Merge branch 'dev' into s3
This commit is contained in:
commit
5cd8f3426c
@ -1,5 +1,5 @@
|
||||
# CORE ALPINE BASE IMAGE
|
||||
OS_NAME=alpine
|
||||
OS_VERSION=3.15
|
||||
GOLANG_IMAGE=golang:1.18.4-alpine3.15
|
||||
CORE_VERSION=16.9.1
|
||||
GOLANG_IMAGE=golang:1.18.6-alpine3.15
|
||||
CORE_VERSION=16.10.1
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
# CORE UBUNTU BASE IMAGE
|
||||
OS_NAME=ubuntu
|
||||
OS_VERSION=20.04
|
||||
GOLANG_IMAGE=golang:1.18.4-alpine3.15
|
||||
CORE_VERSION=16.9.1
|
||||
GOLANG_IMAGE=golang:1.18.6-alpine3.15
|
||||
CORE_VERSION=16.10.1
|
||||
|
||||
24
CHANGELOG.md
24
CHANGELOG.md
@ -1,5 +1,29 @@
|
||||
# Core
|
||||
|
||||
### Core v16.10.0 > v16.10.1
|
||||
|
||||
- Add email address in TLS config for Let's Encrypt
|
||||
- Fix use of Let's Encrypt production CA
|
||||
|
||||
### Core v16.9.1 > v16.10.0
|
||||
|
||||
- Add HLS session middleware to diskfs
|
||||
- Add /v3/metrics (get) endpoint to list all known metrics
|
||||
- Add logging HTTP request and response body sizes
|
||||
- Add process id and reference glob pattern matching
|
||||
- Add cache block list for extensions not to cache
|
||||
- Mod exclude .m3u8 and .mpd files from disk cache by default
|
||||
- Mod replaces x/crypto/acme/autocert with caddyserver/certmagic
|
||||
- Mod exposes ports (Docker desktop)
|
||||
- Fix assigning cleanup rules for diskfs
|
||||
- Fix wrong path for swagger definition
|
||||
- Fix process cleanup on delete, remove empty directories from disk
|
||||
- Fix SRT blocking port on restart (upgrade datarhei/gosrt)
|
||||
- Fix RTMP communication (Blackmagic Web Presenter, thx 235 MEDIA)
|
||||
- Fix RTMP communication (Blackmagic ATEM Mini, datarhei/restreamer#385)
|
||||
- Fix injecting commit, branch, and build info
|
||||
- Fix API metadata endpoints responses
|
||||
|
||||
#### Core v16.9.0 > v16.9.1
|
||||
|
||||
- Fix v1 import app
|
||||
|
||||
@ -14,6 +14,12 @@ ENV CORE_CONFIGFILE=/core/config/config.json
|
||||
ENV CORE_STORAGE_DISK_DIR=/core/data
|
||||
ENV CORE_DB_DIR=/core/config
|
||||
|
||||
EXPOSE 8080/tcp
|
||||
EXPOSE 8181/tcp
|
||||
EXPOSE 1935/tcp
|
||||
EXPOSE 1936/tcp
|
||||
EXPOSE 6000/udp
|
||||
|
||||
VOLUME ["/core/data", "/core/config"]
|
||||
ENTRYPOINT ["/core/bin/run.sh"]
|
||||
WORKDIR /core
|
||||
|
||||
13
Makefile
13
Makefile
@ -6,6 +6,13 @@ BINSUFFIX := $(shell if [ "${GOOS}" -a "${GOARCH}" ]; then echo "-${GOOS}-${GOAR
|
||||
|
||||
all: build
|
||||
|
||||
## init: Install required apps
|
||||
init:
|
||||
go install honnef.co/go/tools/cmd/staticcheck@latest
|
||||
go install github.com/swaggo/swag/cmd/swag@latest
|
||||
go install github.com/99designs/gqlgen@latest
|
||||
go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
## build: Build core (default)
|
||||
build:
|
||||
CGO_ENABLED=${CGO_ENABLED} GOOS=${GOOS} GOARCH=${GOARCH} go build -o core${BINSUFFIX}
|
||||
@ -34,6 +41,10 @@ vet:
|
||||
fmt:
|
||||
go fmt ./...
|
||||
|
||||
## vulncheck: Check for known vulnerabilities in dependencies
|
||||
vulncheck:
|
||||
govulncheck ./...
|
||||
|
||||
## update: Update dependencies
|
||||
update:
|
||||
go get -u
|
||||
@ -85,7 +96,7 @@ release_linux:
|
||||
docker:
|
||||
docker build -t core:$(SHORTCOMMIT) .
|
||||
|
||||
.PHONY: help build swagger test vet fmt vendor commit coverage lint release import update
|
||||
.PHONY: help init build swagger test vet fmt vulncheck vendor commit coverage lint release import update
|
||||
|
||||
## help: Show all commands
|
||||
help: Makefile
|
||||
|
||||
@ -37,7 +37,7 @@ import (
|
||||
"github.com/datarhei/core/v16/srt"
|
||||
"github.com/datarhei/core/v16/update"
|
||||
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
"github.com/caddyserver/certmagic"
|
||||
)
|
||||
|
||||
// The API interface is the implementation for the restreamer API.
|
||||
@ -505,7 +505,12 @@ func (a *api) start() error {
|
||||
host = "localhost"
|
||||
}
|
||||
|
||||
template := "rtmp://" + host + ":" + port + cfg.RTMP.App + "/{name}"
|
||||
template := "rtmp://" + host + ":" + port
|
||||
if cfg.RTMP.App != "/" {
|
||||
template += cfg.RTMP.App
|
||||
}
|
||||
template += "/{name}"
|
||||
|
||||
if len(cfg.RTMP.Token) != 0 {
|
||||
template += "?token=" + cfg.RTMP.Token
|
||||
}
|
||||
@ -704,23 +709,51 @@ func (a *api) start() error {
|
||||
a.cache = cache
|
||||
}
|
||||
|
||||
var autocertManager *autocert.Manager
|
||||
var autocertManager *certmagic.Config
|
||||
|
||||
if cfg.TLS.Enable && cfg.TLS.Auto {
|
||||
if len(cfg.Host.Name) == 0 {
|
||||
return fmt.Errorf("at least one host must be provided in host.name or RS_HOST_NAME")
|
||||
}
|
||||
|
||||
autocertManager = &autocert.Manager{
|
||||
Prompt: autocert.AcceptTOS,
|
||||
HostPolicy: autocert.HostWhitelist(cfg.Host.Name...),
|
||||
Cache: autocert.DirCache(cfg.DB.Dir + "/cert"),
|
||||
certmagic.DefaultACME.Agreed = true
|
||||
certmagic.DefaultACME.Email = cfg.TLS.Email
|
||||
certmagic.DefaultACME.CA = certmagic.LetsEncryptProductionCA
|
||||
certmagic.DefaultACME.DisableHTTPChallenge = false
|
||||
certmagic.DefaultACME.DisableTLSALPNChallenge = true
|
||||
certmagic.DefaultACME.Logger = nil
|
||||
|
||||
certmagic.Default.Storage = &certmagic.FileStorage{
|
||||
Path: cfg.DB.Dir + "/cert",
|
||||
}
|
||||
certmagic.Default.DefaultServerName = cfg.Host.Name[0]
|
||||
certmagic.Default.Logger = nil
|
||||
certmagic.Default.OnEvent = func(event string, data interface{}) {
|
||||
message := ""
|
||||
|
||||
switch data := data.(type) {
|
||||
case string:
|
||||
message = data
|
||||
case fmt.Stringer:
|
||||
message = data.String()
|
||||
}
|
||||
|
||||
if len(message) != 0 {
|
||||
a.log.logger.core.WithComponent("certmagic").Info().WithField("event", event).Log(message)
|
||||
}
|
||||
}
|
||||
|
||||
magic := certmagic.NewDefault()
|
||||
acme := certmagic.NewACMEIssuer(magic, certmagic.DefaultACME)
|
||||
|
||||
magic.Issuers = []certmagic.Issuer{acme}
|
||||
|
||||
autocertManager = magic
|
||||
|
||||
// Start temporary http server on configured port
|
||||
tempserver := &gohttp.Server{
|
||||
Addr: cfg.Address,
|
||||
Handler: autocertManager.HTTPHandler(gohttp.HandlerFunc(func(w gohttp.ResponseWriter, r *gohttp.Request) {
|
||||
Handler: acme.HTTPChallengeHandler(gohttp.HandlerFunc(func(w gohttp.ResponseWriter, r *gohttp.Request) {
|
||||
w.WriteHeader(gohttp.StatusNotFound)
|
||||
})),
|
||||
ReadTimeout: 10 * time.Second,
|
||||
@ -743,9 +776,12 @@ func (a *api) start() error {
|
||||
logger := a.log.logger.core.WithComponent("Let's Encrypt").WithField("host", host)
|
||||
logger.Info().Log("Acquiring certificate ...")
|
||||
|
||||
_, err := autocertManager.GetCertificate(&tls.ClientHelloInfo{
|
||||
ServerName: host,
|
||||
})
|
||||
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(5*time.Minute))
|
||||
|
||||
err := autocertManager.ManageSync(ctx, []string{host})
|
||||
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
logger.Error().WithField("error", err).Log("Failed to acquire certificate")
|
||||
certerror = true
|
||||
@ -977,7 +1013,8 @@ func (a *api) start() error {
|
||||
GetCertificate: autocertManager.GetCertificate,
|
||||
}
|
||||
|
||||
a.sidecarserver.Handler = autocertManager.HTTPHandler(sidecarserverhandler)
|
||||
acme := autocertManager.Issuers[0].(*certmagic.ACMEIssuer)
|
||||
a.sidecarserver.Handler = acme.HTTPChallengeHandler(sidecarserverhandler)
|
||||
}
|
||||
|
||||
wgStart.Add(1)
|
||||
|
||||
@ -30,7 +30,7 @@ func (v versionInfo) MinorString() string {
|
||||
var Version = versionInfo{
|
||||
Major: 16,
|
||||
Minor: 10,
|
||||
Patch: 0,
|
||||
Patch: 1,
|
||||
}
|
||||
|
||||
// Commit is the git commit the app is build from. It should be filled in during compilation
|
||||
|
||||
@ -192,6 +192,7 @@ func (d *Config) init() {
|
||||
d.val(newAddressValue(&d.TLS.Address, ":8181"), "tls.address", "CORE_TLS_ADDRESS", nil, "HTTPS listening address", false, false)
|
||||
d.val(newBoolValue(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false)
|
||||
d.val(newBoolValue(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false)
|
||||
d.val(newEmailValue(&d.TLS.Email, "cert@datarhei.com"), "tls.email", "CORE_TLS_EMAIL", nil, "Email for Let's Encrypt registration", false, false)
|
||||
d.val(newFileValue(&d.TLS.CertFile, ""), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
|
||||
d.val(newFileValue(&d.TLS.KeyFile, ""), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
|
||||
|
||||
@ -206,7 +207,7 @@ func (d *Config) init() {
|
||||
d.val(newInt64Value(&d.Storage.Disk.Cache.TTL, 300), "storage.disk.cache.ttl_seconds", "CORE_STORAGE_DISK_CACHE_TTLSECONDS", nil, "Seconds to keep files in cache", false, false)
|
||||
d.val(newUint64Value(&d.Storage.Disk.Cache.FileSize, 1), "storage.disk.cache.max_file_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXFILESIZEMBYTES", nil, "Max. file size to put in cache", false, false)
|
||||
d.val(newStringListValue(&d.Storage.Disk.Cache.Types.Allow, []string{}, " "), "storage.disk.cache.type.allow", "CORE_STORAGE_DISK_CACHE_TYPES_ALLOW", []string{"CORE_STORAGE_DISK_CACHE_TYPES"}, "File extensions to cache, empty for all", false, false)
|
||||
d.val(newStringListValue(&d.Storage.Disk.Cache.Types.Block, []string{}, " "), "storage.disk.cache.type.block", "CORE_STORAGE_DISK_CACHE_TYPES_BLOCK", nil, "File extensions not to cache, empty for none", false, false)
|
||||
d.val(newStringListValue(&d.Storage.Disk.Cache.Types.Block, []string{".m3u8", ".mpd"}, " "), "storage.disk.cache.type.block", "CORE_STORAGE_DISK_CACHE_TYPES_BLOCK", nil, "File extensions not to cache, empty for none", false, false)
|
||||
|
||||
// Storage (Memory)
|
||||
d.val(newBoolValue(&d.Storage.Memory.Auth.Enable, true), "storage.memory.auth.enable", "CORE_STORAGE_MEMORY_AUTH_ENABLE", nil, "Enable basic auth for PUT,POST, and DELETE on /memfs", false, false)
|
||||
@ -438,6 +439,14 @@ func (d *Config) Validate(resetLogs bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// If TLS and Let's Encrypt certificate is enabled, we require a non-empty email address
|
||||
if d.TLS.Enable && d.TLS.Auto {
|
||||
if len(d.TLS.Email) == 0 {
|
||||
v := d.findVariable("tls.email")
|
||||
v.value.Set(v.defVal)
|
||||
}
|
||||
}
|
||||
|
||||
// If TLS for RTMP is enabled, TLS must be enabled
|
||||
if d.RTMP.EnableTLS {
|
||||
if !d.RTMP.Enable {
|
||||
|
||||
@ -54,6 +54,7 @@ type Data struct {
|
||||
Address string `json:"address"`
|
||||
Enable bool `json:"enable"`
|
||||
Auto bool `json:"auto"`
|
||||
Email string `json:"email"`
|
||||
CertFile string `json:"cert_file"`
|
||||
KeyFile string `json:"key_file"`
|
||||
} `json:"tls"`
|
||||
@ -175,7 +176,6 @@ func NewV3FromV2(d *dataV2) (*Data, error) {
|
||||
data.DB = d.DB
|
||||
data.Host = d.Host
|
||||
data.API = d.API
|
||||
data.TLS = d.TLS
|
||||
data.RTMP = d.RTMP
|
||||
data.SRT = d.SRT
|
||||
data.FFmpeg = d.FFmpeg
|
||||
@ -212,6 +212,13 @@ func NewV3FromV2(d *dataV2) (*Data, error) {
|
||||
data.Router.Routes = copyStringMap(d.Router.Routes)
|
||||
|
||||
// Actual changes
|
||||
data.TLS.Enable = d.TLS.Enable
|
||||
data.TLS.Address = d.TLS.Address
|
||||
data.TLS.Auto = d.TLS.Auto
|
||||
data.TLS.CertFile = d.TLS.CertFile
|
||||
data.TLS.KeyFile = d.TLS.KeyFile
|
||||
data.TLS.Email = "cert@datarhei.com"
|
||||
|
||||
data.Storage.MimeTypes = d.Storage.MimeTypes
|
||||
|
||||
data.Storage.CORS = d.Storage.CORS
|
||||
|
||||
@ -5,6 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/mail"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
@ -957,3 +958,39 @@ func (s *absolutePathValue) Validate() error {
|
||||
func (s *absolutePathValue) IsEmpty() bool {
|
||||
return len(string(*s)) == 0
|
||||
}
|
||||
|
||||
// email address
|
||||
|
||||
type emailValue string
|
||||
|
||||
func newEmailValue(p *string, val string) *emailValue {
|
||||
*p = val
|
||||
return (*emailValue)(p)
|
||||
}
|
||||
|
||||
func (s *emailValue) Set(val string) error {
|
||||
addr, err := mail.ParseAddress(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*s = emailValue(addr.Address)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *emailValue) String() string {
|
||||
return string(*s)
|
||||
}
|
||||
|
||||
func (s *emailValue) Validate() error {
|
||||
if len(s.String()) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := mail.ParseAddress(s.String())
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *emailValue) IsEmpty() bool {
|
||||
return len(string(*s)) == 0
|
||||
}
|
||||
|
||||
41
docs/docs.go
41
docs/docs.go
@ -659,6 +659,30 @@ const docTemplate = `{
|
||||
}
|
||||
},
|
||||
"/api/v3/metrics": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"ApiKeyAuth": []
|
||||
}
|
||||
],
|
||||
"description": "List all known metrics with their description and labels",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"summary": "List all known metrics with their description and labels",
|
||||
"operationId": "metrics-3-describe",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/api.MetricsDescription"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
@ -2626,6 +2650,23 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"api.MetricsDescription": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"labels": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"api.MetricsQuery": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
||||
@ -651,6 +651,30 @@
|
||||
}
|
||||
},
|
||||
"/api/v3/metrics": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"ApiKeyAuth": []
|
||||
}
|
||||
],
|
||||
"description": "List all known metrics with their description and labels",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"summary": "List all known metrics with their description and labels",
|
||||
"operationId": "metrics-3-describe",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/api.MetricsDescription"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
@ -2618,6 +2642,23 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"api.MetricsDescription": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string"
|
||||
},
|
||||
"labels": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"api.MetricsQuery": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
||||
@ -497,6 +497,17 @@ definitions:
|
||||
- password
|
||||
- username
|
||||
type: object
|
||||
api.MetricsDescription:
|
||||
properties:
|
||||
description:
|
||||
type: string
|
||||
labels:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
name:
|
||||
type: string
|
||||
type: object
|
||||
api.MetricsQuery:
|
||||
properties:
|
||||
interval_sec:
|
||||
@ -2179,6 +2190,21 @@ paths:
|
||||
- ApiKeyAuth: []
|
||||
summary: Add JSON metadata under the given key
|
||||
/api/v3/metrics:
|
||||
get:
|
||||
description: List all known metrics with their description and labels
|
||||
operationId: metrics-3-describe
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
items:
|
||||
$ref: '#/definitions/api.MetricsDescription'
|
||||
type: array
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: List all known metrics with their description and labels
|
||||
post:
|
||||
consumes:
|
||||
- application/json
|
||||
|
||||
30
go.mod
30
go.mod
@ -3,31 +3,31 @@ module github.com/datarhei/core/v16
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/99designs/gqlgen v0.17.15
|
||||
github.com/99designs/gqlgen v0.17.16
|
||||
github.com/atrox/haikunatorgo/v2 v2.0.1
|
||||
github.com/caddyserver/certmagic v0.16.2
|
||||
github.com/datarhei/gosrt v0.2.1-0.20220817080252-d44df04a3845
|
||||
github.com/datarhei/joy4 v0.0.0-20220728180719-f752080f4a36
|
||||
github.com/datarhei/joy4 v0.0.0-20220914170649-23c70d207759
|
||||
github.com/go-playground/validator/v10 v10.11.0
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/invopop/jsonschema v0.4.0
|
||||
github.com/joho/godotenv v1.4.0
|
||||
github.com/labstack/echo/v4 v4.8.0
|
||||
github.com/labstack/echo/v4 v4.9.0
|
||||
github.com/lithammer/shortuuid/v4 v4.0.0
|
||||
github.com/mattn/go-isatty v0.0.16
|
||||
github.com/minio/minio-go/v7 v7.0.34
|
||||
github.com/minio/minio-go/v7 v7.0.39
|
||||
github.com/prep/average v0.0.0-20200506183628-d26c465f48c3
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/shirou/gopsutil/v3 v3.22.7
|
||||
github.com/shirou/gopsutil/v3 v3.22.8
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/swaggo/echo-swagger v1.3.4
|
||||
github.com/swaggo/swag v1.8.4
|
||||
github.com/vektah/gqlparser/v2 v2.4.8
|
||||
github.com/swaggo/swag v1.8.5
|
||||
github.com/vektah/gqlparser/v2 v2.5.0
|
||||
github.com/xeipuuv/gojsonschema v1.2.0
|
||||
golang.org/x/crypto v0.0.0-20220817201139-bc19a97f63c8
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4
|
||||
golang.org/x/net v0.0.0-20220822230855-b0a4917ee28c
|
||||
golang.org/x/net v0.0.0-20220907135653-1e95f45603a7
|
||||
)
|
||||
|
||||
require (
|
||||
@ -57,10 +57,13 @@ require (
|
||||
github.com/klauspost/cpuid/v2 v2.1.0 // indirect
|
||||
github.com/labstack/gommon v0.3.1 // indirect
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/libdns/libdns v0.2.1 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/mholt/acmez v1.0.4 // indirect
|
||||
github.com/miekg/dns v1.1.46 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
@ -84,12 +87,15 @@ require (
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/sys v0.0.0-20220823224334-20c2bfdbfe24 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.21.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect
|
||||
golang.org/x/sys v0.0.0-20220907062415-87db552b00fd // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 // indirect
|
||||
golang.org/x/tools v0.1.12 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/ini.v1 v1.66.6 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
74
go.sum
74
go.sum
@ -31,8 +31,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/99designs/gqlgen v0.17.15 h1:5YgNFd46NhO/VltM4ENc6m26mj8GJxQg2ZKOy5s83tA=
|
||||
github.com/99designs/gqlgen v0.17.15/go.mod h1:IXeS/mdPf7JPkmqvbRKjCAV+CLxMKe6vXw6yD9vamB8=
|
||||
github.com/99designs/gqlgen v0.17.16 h1:tTIw/cQ/uvf3iXIb2I6YSkdaDkmHmH2W2eZkVe0IVLA=
|
||||
github.com/99designs/gqlgen v0.17.16/go.mod h1:dnJdUkgfh8iw8CEx2hhTdgTQO/GvVWKLcm/kult5gwI=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
@ -55,12 +55,16 @@ github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig
|
||||
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
|
||||
github.com/atrox/haikunatorgo/v2 v2.0.1 h1:FCVx2KL2YvZtI1rI9WeEHxeLRrKGr0Dd4wfCJiUXupc=
|
||||
github.com/atrox/haikunatorgo/v2 v2.0.1/go.mod h1:BBQmx2o+1Z5poziaHRgddAZKOpijwfKdAmMnSYlFK70=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c h1:8XZeJrs4+ZYhJeJ2aZxADI2tGADS15AzIF8MQ8XAhT4=
|
||||
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c/go.mod h1:x1vxHcL/9AVzuk5HOloOEPrtJY0MaalYr78afXZ+pWI=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/caddyserver/certmagic v0.16.2 h1:k2n3LkkUG3aMUK/kckMuF9/0VFo+0FtMX3drPYESbmQ=
|
||||
github.com/caddyserver/certmagic v0.16.2/go.mod h1:PgLIr/dSJa+WA7t7z6Je5xuS/e5A/GFCPHRuZ1QP+MQ=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
@ -76,8 +80,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/datarhei/gosrt v0.2.1-0.20220817080252-d44df04a3845 h1:nlVb4EVMwdVUwH6e10WZrx4lW0n2utnlE+4ILMPyD5o=
|
||||
github.com/datarhei/gosrt v0.2.1-0.20220817080252-d44df04a3845/go.mod h1:wyoTu+DG45XRuCgEq/y+R8nhZCrJbOyQKn+SwNrNVZ8=
|
||||
github.com/datarhei/joy4 v0.0.0-20220728180719-f752080f4a36 h1:ppjcv7wazy4d7vANREERXkSAUnhV/nfT2a+13u4ZijQ=
|
||||
github.com/datarhei/joy4 v0.0.0-20220728180719-f752080f4a36/go.mod h1:Jcw/6jZDQQmPx8A7INEkXmuEF7E9jjBbSTfVSLwmiQw=
|
||||
github.com/datarhei/joy4 v0.0.0-20220914170649-23c70d207759 h1:h8NyekuQSDvLIsZVTV172m5/RVArXkEM/cnHaUzszQU=
|
||||
github.com/datarhei/joy4 v0.0.0-20220914170649-23c70d207759/go.mod h1:Jcw/6jZDQQmPx8A7INEkXmuEF7E9jjBbSTfVSLwmiQw=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@ -237,12 +241,14 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/labstack/echo/v4 v4.7.2/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20Ltm0OvSks=
|
||||
github.com/labstack/echo/v4 v4.8.0 h1:wdc6yKVaHxkNOEdz4cRZs1pQkwSXPiRjq69yWP4QQS8=
|
||||
github.com/labstack/echo/v4 v4.8.0/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20Ltm0OvSks=
|
||||
github.com/labstack/echo/v4 v4.9.0 h1:wPOF1CE6gvt/kmbMR4dGzWvHMPT+sAEUJOwOTtvITVY=
|
||||
github.com/labstack/echo/v4 v4.9.0/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20Ltm0OvSks=
|
||||
github.com/labstack/gommon v0.3.1 h1:OomWaJXm7xR6L1HmEtGyQf26TEn7V6X88mktX9kee9o=
|
||||
github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
|
||||
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/libdns/libdns v0.2.1 h1:Wu59T7wSHRgtA0cfxC+n1c/e+O3upJGWytknkmFEDis=
|
||||
github.com/libdns/libdns v0.2.1/go.mod h1:yQCXzk1lEZmmCPa857bnk4TsOiqYasqpyOEeSObbb40=
|
||||
github.com/lithammer/shortuuid/v4 v4.0.0 h1:QRbbVkfgNippHOS8PXDkti4NaWeyYfcBTHtw7k08o4c=
|
||||
github.com/lithammer/shortuuid/v4 v4.0.0/go.mod h1:Zs8puNcrvf2rV9rTH51ZLLcj7ZXqQI3lv67aw4KiB1Y=
|
||||
github.com/logrusorgru/aurora/v3 v3.0.0/go.mod h1:vsR12bk5grlLvLXAYrBsb5Oc/N+LxAlxggSjiwMnCUc=
|
||||
@ -264,10 +270,14 @@ github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peK
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mholt/acmez v1.0.4 h1:N3cE4Pek+dSolbsofIkAYz6H1d3pE+2G0os7QHslf80=
|
||||
github.com/mholt/acmez v1.0.4/go.mod h1:qFGLZ4u+ehWINeJZjzPlsnjJBCPAADWTcIqE/7DAYQY=
|
||||
github.com/miekg/dns v1.1.46 h1:uzwpxRtSVxtcIZmz/4Uz6/Rn7G11DvsaslXoy5LxQio=
|
||||
github.com/miekg/dns v1.1.46/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.34 h1:JMfS5fudx1mN6V2MMNyCJ7UMrjEzZzIvMgfkWc1Vnjk=
|
||||
github.com/minio/minio-go/v7 v7.0.34/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
|
||||
github.com/minio/minio-go/v7 v7.0.39 h1:upnbu1jCGOqEvrGSpRauSN9ZG7RCHK7VHxXS8Vmg2zk=
|
||||
github.com/minio/minio-go/v7 v7.0.39/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
|
||||
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||
github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
@ -291,6 +301,7 @@ github.com/otiai10/mint v1.3.3/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
@ -336,8 +347,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/shirou/gopsutil/v3 v3.22.7 h1:flKnuCMfUUrO+oAvwAd6GKZgnPzr098VA/UJ14nhJd4=
|
||||
github.com/shirou/gopsutil/v3 v3.22.7/go.mod h1:s648gW4IywYzUfE/KjXxUsqrqx/T2xO5VqOXxONeRfI=
|
||||
github.com/shirou/gopsutil/v3 v3.22.8 h1:a4s3hXogo5mE2PfdfJIonDbstO/P+9JszdfhAHSzD9Y=
|
||||
github.com/shirou/gopsutil/v3 v3.22.8/go.mod h1:s648gW4IywYzUfE/KjXxUsqrqx/T2xO5VqOXxONeRfI=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
@ -364,8 +375,8 @@ github.com/swaggo/echo-swagger v1.3.4/go.mod h1:vh8QAdbHtTXwTSaWzc1Nby7zMYJd/g0F
|
||||
github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a h1:kAe4YSu0O0UFn1DowNo2MY5p6xzqtJ/wQ7LZynSvGaY=
|
||||
github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a/go.mod h1:lKJPbtWzJ9JhsTN1k1gZgleJWY/cqq0psdoMmaThG3w=
|
||||
github.com/swaggo/swag v1.8.1/go.mod h1:ugemnJsPZm/kRwFUnzBlbHRd0JY9zE1M4F+uy2pAaPQ=
|
||||
github.com/swaggo/swag v1.8.4 h1:oGB351qH1JqUqK1tsMYEE5qTBbPk394BhsZxmUfebcI=
|
||||
github.com/swaggo/swag v1.8.4/go.mod h1:jMLeXOOmYyjk8PvHTsXBdrubsNd9gUJTTCzL5iBnseg=
|
||||
github.com/swaggo/swag v1.8.5 h1:7NgtfXsXE+jrcOwRyiftGKW7Ppydj7tZiVenuRf1fE4=
|
||||
github.com/swaggo/swag v1.8.5/go.mod h1:jMLeXOOmYyjk8PvHTsXBdrubsNd9gUJTTCzL5iBnseg=
|
||||
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
|
||||
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
|
||||
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
|
||||
@ -378,8 +389,8 @@ github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6Kllzaw
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=
|
||||
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/vektah/gqlparser/v2 v2.4.8 h1:O0G2I4xEi7J0/b/qRCWGNXEiU9EQ+hGBmlIU1LXLUfY=
|
||||
github.com/vektah/gqlparser/v2 v2.4.8/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0=
|
||||
github.com/vektah/gqlparser/v2 v2.5.0 h1:GwEwy7AJsqPWrey0bHnn+3JLaHLZVT66wY/+O+Tf9SU=
|
||||
github.com/vektah/gqlparser/v2 v2.5.0/go.mod h1:mPgqFBu/woKTVYWyNk8cO3kh4S/f4aRFZrvOnp3hmCs=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
@ -392,6 +403,7 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsr
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
|
||||
@ -401,6 +413,14 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
|
||||
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
|
||||
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
@ -412,8 +432,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220817201139-bc19a97f63c8 h1:GIAS/yBem/gq2MUqgNIzUHW7cJMmx3TGZOrnyYaNQ6c=
|
||||
golang.org/x/crypto v0.0.0-20220817201139-bc19a97f63c8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 h1:Y/gsMcFOcR+6S6f3YeMKl5g+dZMEWqcz5Czj/GWYbkM=
|
||||
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@ -445,7 +465,6 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
@ -478,16 +497,19 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220822230855-b0a4917ee28c h1:JVAXQ10yGGVbSyoer5VILysz6YKjdNT2bsvlayjqhes=
|
||||
golang.org/x/net v0.0.0-20220822230855-b0a4917ee28c/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/net v0.0.0-20220630215102-69896b714898/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220907135653-1e95f45603a7 h1:1WGATo9HAhkWMbfyuVU0tEFP88OIkUvwaHFveQPvzCQ=
|
||||
golang.org/x/net v0.0.0-20220907135653-1e95f45603a7/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@ -505,6 +527,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -540,8 +563,10 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@ -559,8 +584,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220823224334-20c2bfdbfe24 h1:TyKJRhyo17yWxOMCTHKWrc5rddHORMlnZ/j57umaUd8=
|
||||
golang.org/x/sys v0.0.0-20220823224334-20c2bfdbfe24/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220907062415-87db552b00fd h1:AZeIEzg+8RCELJYq8w+ODLVxFgLMMigSwO/ffKPEd9U=
|
||||
golang.org/x/sys v0.0.0-20220907062415-87db552b00fd/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@ -618,8 +643,9 @@ golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roY
|
||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
||||
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
@ -713,8 +739,8 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI=
|
||||
gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
||||
@ -7,6 +7,12 @@ import (
|
||||
"github.com/datarhei/core/v16/monitor"
|
||||
)
|
||||
|
||||
type MetricsDescription struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
Labels []string `json:"labels"`
|
||||
}
|
||||
|
||||
type MetricsQueryMetric struct {
|
||||
Name string `json:"name"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
|
||||
@ -175,9 +175,10 @@ func (cfg *ProcessConfig) Unmarshal(c *app.Config) {
|
||||
|
||||
for _, c := range x.Cleanup {
|
||||
io.Cleanup = append(io.Cleanup, ProcessConfigIOCleanup{
|
||||
Pattern: c.Pattern,
|
||||
MaxFiles: c.MaxFiles,
|
||||
MaxFileAge: c.MaxFileAge,
|
||||
Pattern: c.Pattern,
|
||||
MaxFiles: c.MaxFiles,
|
||||
MaxFileAge: c.MaxFileAge,
|
||||
PurgeOnDelete: c.PurgeOnDelete,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@ -2,6 +2,7 @@ package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/http/api"
|
||||
@ -28,6 +29,34 @@ func NewMetrics(config MetricsConfig) *MetricsHandler {
|
||||
}
|
||||
}
|
||||
|
||||
// Describe the known metrics
|
||||
// @Summary List all known metrics with their description and labels
|
||||
// @Description List all known metrics with their description and labels
|
||||
// @ID metrics-3-describe
|
||||
// @Produce json
|
||||
// @Success 200 {array} api.MetricsDescription
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/metrics [get]
|
||||
func (r *MetricsHandler) Describe(c echo.Context) error {
|
||||
response := []api.MetricsDescription{}
|
||||
|
||||
descriptors := r.metrics.Describe()
|
||||
|
||||
for _, d := range descriptors {
|
||||
response = append(response, api.MetricsDescription{
|
||||
Name: d.Name(),
|
||||
Description: d.Description(),
|
||||
Labels: d.Labels(),
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(response, func(i, j int) bool {
|
||||
return response[i].Name < response[j].Name
|
||||
})
|
||||
|
||||
return c.JSON(http.StatusOK, response)
|
||||
}
|
||||
|
||||
// Query the collected metrics
|
||||
// @Summary Query the collected metrics
|
||||
// @Description Query the collected metrics
|
||||
|
||||
@ -1,65 +0,0 @@
|
||||
// Package bodysize is an echo middleware that fixes the final number of body bytes sent on the wire
|
||||
package bodysize
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/labstack/echo/v4/middleware"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Skipper middleware.Skipper
|
||||
}
|
||||
|
||||
var DefaultConfig = Config{
|
||||
Skipper: middleware.DefaultSkipper,
|
||||
}
|
||||
|
||||
func New() echo.MiddlewareFunc {
|
||||
return NewWithConfig(DefaultConfig)
|
||||
}
|
||||
|
||||
// New return a new bodysize middleware handler
|
||||
func NewWithConfig(config Config) echo.MiddlewareFunc {
|
||||
if config.Skipper == nil {
|
||||
config.Skipper = DefaultConfig.Skipper
|
||||
}
|
||||
|
||||
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
if config.Skipper(c) {
|
||||
return next(c)
|
||||
}
|
||||
|
||||
res := c.Response()
|
||||
|
||||
writer := res.Writer
|
||||
w := &fakeWriter{
|
||||
ResponseWriter: res.Writer,
|
||||
}
|
||||
res.Writer = w
|
||||
|
||||
defer func() {
|
||||
res.Writer = writer
|
||||
res.Size = w.size
|
||||
}()
|
||||
|
||||
return next(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type fakeWriter struct {
|
||||
http.ResponseWriter
|
||||
|
||||
size int64
|
||||
}
|
||||
|
||||
func (w *fakeWriter) Write(body []byte) (int, error) {
|
||||
n, err := w.ResponseWriter.Write(body)
|
||||
|
||||
w.size += int64(n)
|
||||
|
||||
return n, err
|
||||
}
|
||||
@ -2,6 +2,7 @@ package gzip
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"net"
|
||||
@ -25,15 +26,17 @@ type Config struct {
|
||||
// Length threshold before gzip compression
|
||||
// is used. Optional. Default value 0
|
||||
MinLength int
|
||||
|
||||
// Content-Types to compress. Empty for all
|
||||
// files. Optional. Default value "text/plain" and "text/html"
|
||||
ContentTypes []string
|
||||
}
|
||||
|
||||
type gzipResponseWriter struct {
|
||||
io.Writer
|
||||
http.ResponseWriter
|
||||
wroteHeader bool
|
||||
wroteBody bool
|
||||
minLength int
|
||||
minLengthExceeded bool
|
||||
buffer *bytes.Buffer
|
||||
code int
|
||||
}
|
||||
|
||||
const gzipScheme = "gzip"
|
||||
@ -47,10 +50,32 @@ const (
|
||||
|
||||
// DefaultConfig is the default Gzip middleware config.
|
||||
var DefaultConfig = Config{
|
||||
Skipper: middleware.DefaultSkipper,
|
||||
Level: -1,
|
||||
MinLength: 0,
|
||||
ContentTypes: []string{"text/plain", "text/html"},
|
||||
Skipper: middleware.DefaultSkipper,
|
||||
Level: DefaultCompression,
|
||||
MinLength: 0,
|
||||
}
|
||||
|
||||
// ContentTypesSkipper returns a Skipper based on the list of content types
|
||||
// that should be compressed. If the list is empty, all responses will be
|
||||
// compressed.
|
||||
func ContentTypeSkipper(contentTypes []string) middleware.Skipper {
|
||||
return func(c echo.Context) bool {
|
||||
// If no allowed content types are given, compress all
|
||||
if len(contentTypes) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Iterate through the allowed content types and don't skip if the content type matches
|
||||
responseContentType := c.Response().Header().Get(echo.HeaderContentType)
|
||||
|
||||
for _, contentType := range contentTypes {
|
||||
if strings.Contains(responseContentType, contentType) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// New returns a middleware which compresses HTTP response using gzip compression
|
||||
@ -75,11 +100,8 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
|
||||
config.MinLength = DefaultConfig.MinLength
|
||||
}
|
||||
|
||||
if config.ContentTypes == nil {
|
||||
config.ContentTypes = DefaultConfig.ContentTypes
|
||||
}
|
||||
|
||||
pool := gzipPool(config)
|
||||
bpool := bufferPool()
|
||||
|
||||
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
@ -89,8 +111,8 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
|
||||
|
||||
res := c.Response()
|
||||
res.Header().Add(echo.HeaderVary, echo.HeaderAcceptEncoding)
|
||||
if shouldCompress(c, config.ContentTypes) {
|
||||
res.Header().Set(echo.HeaderContentEncoding, gzipScheme) // Issue #806
|
||||
|
||||
if strings.Contains(c.Request().Header.Get(echo.HeaderAcceptEncoding), gzipScheme) {
|
||||
i := pool.Get()
|
||||
w, ok := i.(*gzip.Writer)
|
||||
if !ok {
|
||||
@ -98,8 +120,14 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
|
||||
}
|
||||
rw := res.Writer
|
||||
w.Reset(rw)
|
||||
|
||||
buf := bpool.Get().(*bytes.Buffer)
|
||||
buf.Reset()
|
||||
|
||||
grw := &gzipResponseWriter{Writer: w, ResponseWriter: rw, minLength: config.MinLength, buffer: buf}
|
||||
|
||||
defer func() {
|
||||
if res.Size == 0 {
|
||||
if !grw.wroteBody {
|
||||
if res.Header().Get(echo.HeaderContentEncoding) == gzipScheme {
|
||||
res.Header().Del(echo.HeaderContentEncoding)
|
||||
}
|
||||
@ -108,49 +136,38 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
|
||||
// See issue #424, #407.
|
||||
res.Writer = rw
|
||||
w.Reset(io.Discard)
|
||||
} else if !grw.minLengthExceeded {
|
||||
// If the minimum content length hasn't exceeded, write the uncompressed response
|
||||
res.Writer = rw
|
||||
if grw.wroteHeader {
|
||||
grw.ResponseWriter.WriteHeader(grw.code)
|
||||
}
|
||||
grw.buffer.WriteTo(rw)
|
||||
w.Reset(io.Discard)
|
||||
}
|
||||
w.Close()
|
||||
bpool.Put(buf)
|
||||
pool.Put(w)
|
||||
}()
|
||||
grw := &gzipResponseWriter{Writer: w, ResponseWriter: rw}
|
||||
|
||||
res.Writer = grw
|
||||
}
|
||||
|
||||
return next(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func shouldCompress(c echo.Context, contentTypes []string) bool {
|
||||
if !strings.Contains(c.Request().Header.Get(echo.HeaderAcceptEncoding), gzipScheme) ||
|
||||
strings.Contains(c.Request().Header.Get("Connection"), "Upgrade") ||
|
||||
strings.Contains(c.Request().Header.Get(echo.HeaderContentType), "text/event-stream") {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// If no allowed content types are given, compress all
|
||||
if len(contentTypes) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
// Iterate through the allowed content types and return true if the content type matches
|
||||
responseContentType := c.Response().Header().Get(echo.HeaderContentType)
|
||||
|
||||
for _, contentType := range contentTypes {
|
||||
if strings.Contains(responseContentType, contentType) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (w *gzipResponseWriter) WriteHeader(code int) {
|
||||
if code == http.StatusNoContent { // Issue #489
|
||||
w.ResponseWriter.Header().Del(echo.HeaderContentEncoding)
|
||||
}
|
||||
w.Header().Del(echo.HeaderContentLength) // Issue #444
|
||||
w.ResponseWriter.WriteHeader(code)
|
||||
|
||||
w.wroteHeader = true
|
||||
|
||||
// Delay writing of the header until we know if we'll actually compress the response
|
||||
w.code = code
|
||||
}
|
||||
|
||||
func (w *gzipResponseWriter) Write(b []byte) (int, error) {
|
||||
@ -158,10 +175,41 @@ func (w *gzipResponseWriter) Write(b []byte) (int, error) {
|
||||
w.Header().Set(echo.HeaderContentType, http.DetectContentType(b))
|
||||
}
|
||||
|
||||
w.wroteBody = true
|
||||
|
||||
if !w.minLengthExceeded {
|
||||
n, err := w.buffer.Write(b)
|
||||
|
||||
if w.buffer.Len() >= w.minLength {
|
||||
w.minLengthExceeded = true
|
||||
|
||||
// The minimum length is exceeded, add Content-Encoding header and write the header
|
||||
w.Header().Set(echo.HeaderContentEncoding, gzipScheme) // Issue #806
|
||||
if w.wroteHeader {
|
||||
w.ResponseWriter.WriteHeader(w.code)
|
||||
}
|
||||
|
||||
return w.Writer.Write(w.buffer.Bytes())
|
||||
} else {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
return w.Writer.Write(b)
|
||||
}
|
||||
|
||||
func (w *gzipResponseWriter) Flush() {
|
||||
if !w.minLengthExceeded {
|
||||
// Enforce compression
|
||||
w.minLengthExceeded = true
|
||||
w.Header().Set(echo.HeaderContentEncoding, gzipScheme) // Issue #806
|
||||
if w.wroteHeader {
|
||||
w.ResponseWriter.WriteHeader(w.code)
|
||||
}
|
||||
|
||||
w.Writer.Write(w.buffer.Bytes())
|
||||
}
|
||||
|
||||
w.Writer.(*gzip.Writer).Flush()
|
||||
if flusher, ok := w.ResponseWriter.(http.Flusher); ok {
|
||||
flusher.Flush()
|
||||
@ -190,3 +238,12 @@ func gzipPool(config Config) sync.Pool {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func bufferPool() sync.Pool {
|
||||
return sync.Pool{
|
||||
New: func() interface{} {
|
||||
b := &bytes.Buffer{}
|
||||
return b
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
240
http/middleware/gzip/gzip_test.go
Normal file
240
http/middleware/gzip/gzip_test.go
Normal file
@ -0,0 +1,240 @@
|
||||
package gzip
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGzip(t *testing.T) {
|
||||
e := echo.New()
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
c := e.NewContext(req, rec)
|
||||
|
||||
// Skip if no Accept-Encoding header
|
||||
h := New()(func(c echo.Context) error {
|
||||
c.Response().Write([]byte("test")) // For Content-Type sniffing
|
||||
return nil
|
||||
})
|
||||
h(c)
|
||||
|
||||
assert := assert.New(t)
|
||||
|
||||
assert.Equal("test", rec.Body.String())
|
||||
|
||||
// Gzip
|
||||
req = httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
rec = httptest.NewRecorder()
|
||||
c = e.NewContext(req, rec)
|
||||
h(c)
|
||||
assert.Equal(gzipScheme, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
assert.Contains(rec.Header().Get(echo.HeaderContentType), echo.MIMETextPlain)
|
||||
r, err := gzip.NewReader(rec.Body)
|
||||
if assert.NoError(err) {
|
||||
buf := new(bytes.Buffer)
|
||||
defer r.Close()
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal("test", buf.String())
|
||||
}
|
||||
|
||||
chunkBuf := make([]byte, 5)
|
||||
|
||||
// Gzip chunked
|
||||
req = httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
rec = httptest.NewRecorder()
|
||||
|
||||
c = e.NewContext(req, rec)
|
||||
New()(func(c echo.Context) error {
|
||||
c.Response().Header().Set("Content-Type", "text/event-stream")
|
||||
c.Response().Header().Set("Transfer-Encoding", "chunked")
|
||||
|
||||
// Write and flush the first part of the data
|
||||
c.Response().Write([]byte("test\n"))
|
||||
c.Response().Flush()
|
||||
|
||||
// Read the first part of the data
|
||||
assert.True(rec.Flushed)
|
||||
assert.Equal(gzipScheme, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
r.Reset(rec.Body)
|
||||
|
||||
_, err = io.ReadFull(r, chunkBuf)
|
||||
assert.NoError(err)
|
||||
assert.Equal("test\n", string(chunkBuf))
|
||||
|
||||
// Write and flush the second part of the data
|
||||
c.Response().Write([]byte("test\n"))
|
||||
c.Response().Flush()
|
||||
|
||||
_, err = io.ReadFull(r, chunkBuf)
|
||||
assert.NoError(err)
|
||||
assert.Equal("test\n", string(chunkBuf))
|
||||
|
||||
// Write the final part of the data and return
|
||||
c.Response().Write([]byte("test"))
|
||||
return nil
|
||||
})(c)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
defer r.Close()
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal("test", buf.String())
|
||||
}
|
||||
|
||||
func TestGzipWithMinLength(t *testing.T) {
|
||||
e := echo.New()
|
||||
// Invalid level
|
||||
e.Use(NewWithConfig(Config{MinLength: 5}))
|
||||
e.GET("/", func(c echo.Context) error {
|
||||
c.Response().Write([]byte("test"))
|
||||
return nil
|
||||
})
|
||||
e.GET("/foobar", func(c echo.Context) error {
|
||||
c.Response().Write([]byte("foobar"))
|
||||
return nil
|
||||
})
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
rec := httptest.NewRecorder()
|
||||
e.ServeHTTP(rec, req)
|
||||
assert.Equal(t, "", rec.Header().Get(echo.HeaderContentEncoding))
|
||||
assert.Contains(t, rec.Body.String(), "test")
|
||||
|
||||
req = httptest.NewRequest(http.MethodGet, "/foobar", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
rec = httptest.NewRecorder()
|
||||
e.ServeHTTP(rec, req)
|
||||
assert.Equal(t, gzipScheme, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
r, err := gzip.NewReader(rec.Body)
|
||||
if assert.NoError(t, err) {
|
||||
buf := new(bytes.Buffer)
|
||||
defer r.Close()
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal(t, "foobar", buf.String())
|
||||
}
|
||||
}
|
||||
|
||||
func TestGzipNoContent(t *testing.T) {
|
||||
e := echo.New()
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
rec := httptest.NewRecorder()
|
||||
c := e.NewContext(req, rec)
|
||||
h := New()(func(c echo.Context) error {
|
||||
return c.NoContent(http.StatusNoContent)
|
||||
})
|
||||
if assert.NoError(t, h(c)) {
|
||||
assert.Empty(t, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
assert.Empty(t, rec.Header().Get(echo.HeaderContentType))
|
||||
assert.Equal(t, 0, len(rec.Body.Bytes()))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGzipEmpty(t *testing.T) {
|
||||
e := echo.New()
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
rec := httptest.NewRecorder()
|
||||
c := e.NewContext(req, rec)
|
||||
h := New()(func(c echo.Context) error {
|
||||
return c.String(http.StatusOK, "")
|
||||
})
|
||||
if assert.NoError(t, h(c)) {
|
||||
assert.Equal(t, gzipScheme, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
assert.Equal(t, "text/plain; charset=UTF-8", rec.Header().Get(echo.HeaderContentType))
|
||||
r, err := gzip.NewReader(rec.Body)
|
||||
if assert.NoError(t, err) {
|
||||
var buf bytes.Buffer
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal(t, "", buf.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGzipErrorReturned(t *testing.T) {
|
||||
e := echo.New()
|
||||
e.Use(New())
|
||||
e.GET("/", func(c echo.Context) error {
|
||||
return echo.ErrNotFound
|
||||
})
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
rec := httptest.NewRecorder()
|
||||
e.ServeHTTP(rec, req)
|
||||
assert.Equal(t, http.StatusNotFound, rec.Code)
|
||||
assert.Empty(t, rec.Header().Get(echo.HeaderContentEncoding))
|
||||
}
|
||||
|
||||
func TestGzipErrorReturnedInvalidConfig(t *testing.T) {
|
||||
e := echo.New()
|
||||
// Invalid level
|
||||
e.Use(NewWithConfig(Config{Level: 12}))
|
||||
e.GET("/", func(c echo.Context) error {
|
||||
c.Response().Write([]byte("test"))
|
||||
return nil
|
||||
})
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
rec := httptest.NewRecorder()
|
||||
e.ServeHTTP(rec, req)
|
||||
assert.Equal(t, http.StatusInternalServerError, rec.Code)
|
||||
assert.Contains(t, rec.Body.String(), "gzip")
|
||||
}
|
||||
|
||||
// Issue #806
|
||||
func TestGzipWithStatic(t *testing.T) {
|
||||
e := echo.New()
|
||||
e.Use(New())
|
||||
e.Static("/test", "./")
|
||||
req := httptest.NewRequest(http.MethodGet, "/test/gzip.go", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
rec := httptest.NewRecorder()
|
||||
e.ServeHTTP(rec, req)
|
||||
assert.Equal(t, http.StatusOK, rec.Code)
|
||||
// Data is written out in chunks when Content-Length == "", so only
|
||||
// validate the content length if it's not set.
|
||||
if cl := rec.Header().Get("Content-Length"); cl != "" {
|
||||
assert.Equal(t, cl, rec.Body.Len())
|
||||
}
|
||||
r, err := gzip.NewReader(rec.Body)
|
||||
if assert.NoError(t, err) {
|
||||
defer r.Close()
|
||||
want, err := os.ReadFile("./gzip.go")
|
||||
if assert.NoError(t, err) {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(r)
|
||||
assert.Equal(t, want, buf.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGzip(b *testing.B) {
|
||||
e := echo.New()
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.Header.Set(echo.HeaderAcceptEncoding, gzipScheme)
|
||||
|
||||
h := New()(func(c echo.Context) error {
|
||||
c.Response().Write([]byte("test")) // For Content-Type sniffing
|
||||
return nil
|
||||
})
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
// Gzip
|
||||
rec := httptest.NewRecorder()
|
||||
c := e.NewContext(req, rec)
|
||||
h(c)
|
||||
}
|
||||
}
|
||||
164
http/middleware/hlsrewrite/hlsrewrite.go
Normal file
164
http/middleware/hlsrewrite/hlsrewrite.go
Normal file
@ -0,0 +1,164 @@
|
||||
package hlsrewrite
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/labstack/echo/v4/middleware"
|
||||
)
|
||||
|
||||
type HLSRewriteConfig struct {
|
||||
// Skipper defines a function to skip middleware.
|
||||
Skipper middleware.Skipper
|
||||
PathPrefix string
|
||||
}
|
||||
|
||||
var DefaultHLSRewriteConfig = HLSRewriteConfig{
|
||||
Skipper: func(c echo.Context) bool {
|
||||
req := c.Request()
|
||||
|
||||
return !strings.HasSuffix(req.URL.Path, ".m3u8")
|
||||
},
|
||||
PathPrefix: "",
|
||||
}
|
||||
|
||||
// NewHTTP returns a new HTTP session middleware with default config
|
||||
func NewHLSRewrite() echo.MiddlewareFunc {
|
||||
return NewHLSRewriteWithConfig(DefaultHLSRewriteConfig)
|
||||
}
|
||||
|
||||
type hlsrewrite struct {
|
||||
pathPrefix string
|
||||
}
|
||||
|
||||
func NewHLSRewriteWithConfig(config HLSRewriteConfig) echo.MiddlewareFunc {
|
||||
if config.Skipper == nil {
|
||||
config.Skipper = DefaultHLSRewriteConfig.Skipper
|
||||
}
|
||||
|
||||
pathPrefix := config.PathPrefix
|
||||
if len(pathPrefix) != 0 {
|
||||
if !strings.HasSuffix(pathPrefix, "/") {
|
||||
pathPrefix += "/"
|
||||
}
|
||||
}
|
||||
|
||||
hls := hlsrewrite{
|
||||
pathPrefix: pathPrefix,
|
||||
}
|
||||
|
||||
return func(next echo.HandlerFunc) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
if config.Skipper(c) {
|
||||
return next(c)
|
||||
}
|
||||
|
||||
req := c.Request()
|
||||
|
||||
if req.Method == "GET" || req.Method == "HEAD" {
|
||||
return hls.rewrite(c, next)
|
||||
}
|
||||
|
||||
return next(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hlsrewrite) rewrite(c echo.Context, next echo.HandlerFunc) error {
|
||||
req := c.Request()
|
||||
res := c.Response()
|
||||
|
||||
path := req.URL.Path
|
||||
|
||||
isM3U8 := strings.HasSuffix(path, ".m3u8")
|
||||
|
||||
rewrite := false
|
||||
|
||||
if isM3U8 {
|
||||
rewrite = true
|
||||
}
|
||||
|
||||
var rewriter *hlsRewriter
|
||||
|
||||
// Keep the current writer for later
|
||||
writer := res.Writer
|
||||
|
||||
if rewrite {
|
||||
// Put the session rewriter in the middle. This will collect
|
||||
// the data that we need to rewrite.
|
||||
rewriter = &hlsRewriter{
|
||||
ResponseWriter: res.Writer,
|
||||
}
|
||||
|
||||
res.Writer = rewriter
|
||||
}
|
||||
|
||||
if err := next(c); err != nil {
|
||||
c.Error(err)
|
||||
}
|
||||
|
||||
// Restore the original writer
|
||||
res.Writer = writer
|
||||
|
||||
if rewrite {
|
||||
if res.Status != 200 {
|
||||
res.Write(rewriter.buffer.Bytes())
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rewrite the data befor sending it to the client
|
||||
rewriter.rewrite(h.pathPrefix)
|
||||
|
||||
res.Header().Set("Cache-Control", "private")
|
||||
res.Write(rewriter.buffer.Bytes())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type hlsRewriter struct {
|
||||
http.ResponseWriter
|
||||
buffer bytes.Buffer
|
||||
}
|
||||
|
||||
func (g *hlsRewriter) Write(data []byte) (int, error) {
|
||||
// Write the data into internal buffer for later rewrite
|
||||
w, err := g.buffer.Write(data)
|
||||
|
||||
return w, err
|
||||
}
|
||||
|
||||
func (g *hlsRewriter) rewrite(pathPrefix string) {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
// Find all URLS in the .m3u8 and add the session ID to the query string
|
||||
scanner := bufio.NewScanner(&g.buffer)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
// Write empty lines unmodified
|
||||
if len(line) == 0 {
|
||||
buffer.WriteString(line + "\n")
|
||||
continue
|
||||
}
|
||||
|
||||
// Write comments unmodified
|
||||
if strings.HasPrefix(line, "#") {
|
||||
buffer.WriteString(line + "\n")
|
||||
continue
|
||||
}
|
||||
|
||||
// Rewrite
|
||||
line = strings.TrimPrefix(line, pathPrefix)
|
||||
buffer.WriteString(line + "\n")
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
g.buffer = buffer
|
||||
}
|
||||
@ -2,6 +2,7 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
@ -45,40 +46,92 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
|
||||
start := time.Now()
|
||||
|
||||
req := c.Request()
|
||||
|
||||
var reader io.ReadCloser
|
||||
r := &sizeReadCloser{}
|
||||
|
||||
if req.Body != nil {
|
||||
reader = req.Body
|
||||
r.ReadCloser = req.Body
|
||||
req.Body = r
|
||||
}
|
||||
|
||||
res := c.Response()
|
||||
|
||||
writer := res.Writer
|
||||
w := &sizeWriter{
|
||||
ResponseWriter: res.Writer,
|
||||
}
|
||||
res.Writer = w
|
||||
|
||||
path := req.URL.Path
|
||||
raw := req.URL.RawQuery
|
||||
|
||||
if err := next(c); err != nil {
|
||||
c.Error(err)
|
||||
}
|
||||
defer func() {
|
||||
res.Writer = writer
|
||||
req.Body = reader
|
||||
|
||||
latency := time.Since(start)
|
||||
latency := time.Since(start)
|
||||
|
||||
if raw != "" {
|
||||
path = path + "?" + raw
|
||||
}
|
||||
if raw != "" {
|
||||
path = path + "?" + raw
|
||||
}
|
||||
|
||||
logger := config.Logger.WithFields(log.Fields{
|
||||
"client": c.RealIP(),
|
||||
"method": req.Method,
|
||||
"path": path,
|
||||
"proto": req.Proto,
|
||||
"status": res.Status,
|
||||
"status_text": http.StatusText(res.Status),
|
||||
"size_bytes": res.Size,
|
||||
"latency_ms": latency.Milliseconds(),
|
||||
"user_agent": req.Header.Get("User-Agent"),
|
||||
})
|
||||
logger := config.Logger.WithFields(log.Fields{
|
||||
"client": c.RealIP(),
|
||||
"method": req.Method,
|
||||
"path": path,
|
||||
"proto": req.Proto,
|
||||
"status": res.Status,
|
||||
"status_text": http.StatusText(res.Status),
|
||||
"tx_size_bytes": w.size,
|
||||
"rx_size_bytes": r.size,
|
||||
"latency_ms": latency.Milliseconds(),
|
||||
"user_agent": req.Header.Get("User-Agent"),
|
||||
})
|
||||
|
||||
if res.Status >= 400 {
|
||||
logger.Warn().Log("")
|
||||
}
|
||||
if res.Status >= 400 {
|
||||
logger.Warn().Log("")
|
||||
}
|
||||
|
||||
logger.Debug().Log("")
|
||||
logger.Debug().Log("")
|
||||
}()
|
||||
|
||||
return nil
|
||||
return next(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type sizeWriter struct {
|
||||
http.ResponseWriter
|
||||
|
||||
size int64
|
||||
}
|
||||
|
||||
func (w *sizeWriter) Write(body []byte) (int, error) {
|
||||
n, err := w.ResponseWriter.Write(body)
|
||||
|
||||
w.size += int64(n)
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
type sizeReadCloser struct {
|
||||
io.ReadCloser
|
||||
|
||||
size int64
|
||||
}
|
||||
|
||||
func (r *sizeReadCloser) Read(p []byte) (int, error) {
|
||||
n, err := r.ReadCloser.Read(p)
|
||||
|
||||
r.size += int64(n)
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *sizeReadCloser) Close() error {
|
||||
err := r.ReadCloser.Close()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -51,7 +51,7 @@ type hls struct {
|
||||
// NewHLS returns a new HLS session middleware
|
||||
func NewHLSWithConfig(config HLSConfig) echo.MiddlewareFunc {
|
||||
if config.Skipper == nil {
|
||||
config.Skipper = DefaultHTTPConfig.Skipper
|
||||
config.Skipper = DefaultHLSConfig.Skipper
|
||||
}
|
||||
|
||||
if config.EgressCollector == nil {
|
||||
|
||||
@ -51,10 +51,10 @@ import (
|
||||
"github.com/datarhei/core/v16/session"
|
||||
"github.com/datarhei/core/v16/srt"
|
||||
|
||||
mwbodysize "github.com/datarhei/core/v16/http/middleware/bodysize"
|
||||
mwcache "github.com/datarhei/core/v16/http/middleware/cache"
|
||||
mwcors "github.com/datarhei/core/v16/http/middleware/cors"
|
||||
mwgzip "github.com/datarhei/core/v16/http/middleware/gzip"
|
||||
mwhlsrewrite "github.com/datarhei/core/v16/http/middleware/hlsrewrite"
|
||||
mwiplimit "github.com/datarhei/core/v16/http/middleware/iplimit"
|
||||
mwlog "github.com/datarhei/core/v16/http/middleware/log"
|
||||
mwmime "github.com/datarhei/core/v16/http/middleware/mime"
|
||||
@ -132,6 +132,7 @@ type server struct {
|
||||
cors echo.MiddlewareFunc
|
||||
cache echo.MiddlewareFunc
|
||||
session echo.MiddlewareFunc
|
||||
hlsrewrite echo.MiddlewareFunc
|
||||
}
|
||||
|
||||
gzip struct {
|
||||
@ -192,6 +193,12 @@ func NewServer(config Config) (Server, error) {
|
||||
}
|
||||
|
||||
s.filesystems[filesystem.Name] = filesystem
|
||||
|
||||
if fs.Filesystem.Type() == "disk" {
|
||||
s.middleware.hlsrewrite = mwhlsrewrite.NewHLSRewriteWithConfig(mwhlsrewrite.HLSRewriteConfig{
|
||||
PathPrefix: fs.Filesystem.Base(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := corsPrefixes["/"]; !ok {
|
||||
@ -330,7 +337,6 @@ func NewServer(config Config) (Server, error) {
|
||||
return nil
|
||||
},
|
||||
}))
|
||||
s.router.Use(mwbodysize.New())
|
||||
s.router.Use(mwsession.NewHTTPWithConfig(mwsession.HTTPConfig{
|
||||
Collector: config.Sessions.Collector("http"),
|
||||
}))
|
||||
@ -394,9 +400,9 @@ func (s *server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
func (s *server) setRoutes() {
|
||||
gzipMiddleware := mwgzip.NewWithConfig(mwgzip.Config{
|
||||
Level: mwgzip.BestSpeed,
|
||||
MinLength: 1000,
|
||||
ContentTypes: []string{""},
|
||||
Level: mwgzip.BestSpeed,
|
||||
MinLength: 1000,
|
||||
Skipper: mwgzip.ContentTypeSkipper(nil),
|
||||
})
|
||||
|
||||
// API router grouo
|
||||
@ -440,9 +446,9 @@ func (s *server) setRoutes() {
|
||||
|
||||
if filesystem.Gzip {
|
||||
fs.Use(mwgzip.NewWithConfig(mwgzip.Config{
|
||||
Level: mwgzip.BestSpeed,
|
||||
MinLength: 1000,
|
||||
ContentTypes: s.gzip.mimetypes,
|
||||
Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes),
|
||||
Level: mwgzip.BestSpeed,
|
||||
MinLength: 1000,
|
||||
}))
|
||||
}
|
||||
|
||||
@ -629,6 +635,7 @@ func (s *server) setRoutesV3(v3 *echo.Group) {
|
||||
// v3 Log
|
||||
v3.GET("/log", s.v3handler.log.Log)
|
||||
|
||||
// v3 Resources
|
||||
// v3 Metrics
|
||||
v3.GET("/metrics", s.v3handler.resources.Describe)
|
||||
v3.POST("/metrics", s.v3handler.resources.Metrics)
|
||||
}
|
||||
|
||||
@ -309,11 +309,19 @@ func (fs *diskFilesystem) List(pattern string) []FileInfo {
|
||||
files := []FileInfo{}
|
||||
|
||||
fs.walk(func(path string, info os.FileInfo) {
|
||||
if path == fs.dir {
|
||||
return
|
||||
}
|
||||
|
||||
name := strings.TrimPrefix(path, fs.dir)
|
||||
if name[0] != os.PathSeparator {
|
||||
name = string(os.PathSeparator) + name
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
name += "/"
|
||||
}
|
||||
|
||||
if len(pattern) != 0 {
|
||||
if ok, _ := glob.Match(pattern, name, '/'); !ok {
|
||||
return
|
||||
@ -337,6 +345,7 @@ func (fs *diskFilesystem) walk(walkfn func(path string, info os.FileInfo)) {
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
walkfn(path, info)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -20,11 +20,11 @@ func NewCPUCollector() metric.Collector {
|
||||
ncpu: 1,
|
||||
}
|
||||
|
||||
c.ncpuDescr = metric.NewDesc("cpu_ncpu", "", nil)
|
||||
c.systemDescr = metric.NewDesc("cpu_system", "", nil)
|
||||
c.userDescr = metric.NewDesc("cpu_user", "", nil)
|
||||
c.idleDescr = metric.NewDesc("cpu_idle", "", nil)
|
||||
c.otherDescr = metric.NewDesc("cpu_other", "", nil)
|
||||
c.ncpuDescr = metric.NewDesc("cpu_ncpu", "Number of logical CPUs in the system", nil)
|
||||
c.systemDescr = metric.NewDesc("cpu_system", "Percentage of CPU used for the system", nil)
|
||||
c.userDescr = metric.NewDesc("cpu_user", "Percentage of CPU used for the user", nil)
|
||||
c.idleDescr = metric.NewDesc("cpu_idle", "Percentage of idle CPU", nil)
|
||||
c.otherDescr = metric.NewDesc("cpu_other", "Percentage of CPU used for other subsystems", nil)
|
||||
|
||||
if ncpu, err := psutil.CPUCounts(true); err == nil {
|
||||
c.ncpu = ncpu
|
||||
|
||||
@ -17,8 +17,8 @@ func NewDiskCollector(path string) metric.Collector {
|
||||
path: path,
|
||||
}
|
||||
|
||||
c.totalDescr = metric.NewDesc("disk_total", "", []string{"path"})
|
||||
c.usageDescr = metric.NewDesc("disk_usage", "", []string{"path"})
|
||||
c.totalDescr = metric.NewDesc("disk_total", "Total size of the disk in bytes", []string{"path"})
|
||||
c.usageDescr = metric.NewDesc("disk_usage", "Number of used bytes on the disk", []string{"path"})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@ -17,7 +17,7 @@ func NewFFmpegCollector(f ffmpeg.FFmpeg) metric.Collector {
|
||||
ffmpeg: f,
|
||||
}
|
||||
|
||||
c.processDescr = metric.NewDesc("ffmpeg_process", "", []string{"state"})
|
||||
c.processDescr = metric.NewDesc("ffmpeg_process", "State of the ffmpeg process", []string{"state"})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@ -19,9 +19,9 @@ func NewFilesystemCollector(name string, fs fs.Filesystem) metric.Collector {
|
||||
name: name,
|
||||
}
|
||||
|
||||
c.limitDescr = metric.NewDesc("filesystem_limit", "", []string{"name"})
|
||||
c.usageDescr = metric.NewDesc("filesystem_usage", "", []string{"name"})
|
||||
c.filesDescr = metric.NewDesc("filesystem_files", "", []string{"name"})
|
||||
c.limitDescr = metric.NewDesc("filesystem_limit", "Total size of the filesystem in bytes, negative if unlimited", []string{"name"})
|
||||
c.usageDescr = metric.NewDesc("filesystem_usage", "Number of used bytes on the filesystem", []string{"name"})
|
||||
c.filesDescr = metric.NewDesc("filesystem_files", "Number of files on the filesystem (excluding directories)", []string{"name"})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@ -13,8 +13,8 @@ type memCollector struct {
|
||||
func NewMemCollector() metric.Collector {
|
||||
c := &memCollector{}
|
||||
|
||||
c.totalDescr = metric.NewDesc("mem_total", "", nil)
|
||||
c.freeDescr = metric.NewDesc("mem_free", "", nil)
|
||||
c.totalDescr = metric.NewDesc("mem_total", "Total available memory in bytes", nil)
|
||||
c.freeDescr = metric.NewDesc("mem_free", "Free memory in bytes", nil)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Pattern interface {
|
||||
@ -304,6 +305,10 @@ func NewDesc(name, description string, labels []string) *Description {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Description) String() string {
|
||||
return fmt.Sprintf("%s: %s (%s)", d.name, d.description, strings.Join(d.labels, ","))
|
||||
}
|
||||
|
||||
func (d *Description) Name() string {
|
||||
return d.name
|
||||
}
|
||||
@ -312,6 +317,13 @@ func (d *Description) Description() string {
|
||||
return d.description
|
||||
}
|
||||
|
||||
func (d *Description) Labels() []string {
|
||||
labels := make([]string, len(d.labels))
|
||||
copy(labels, d.labels)
|
||||
|
||||
return labels
|
||||
}
|
||||
|
||||
type Collector interface {
|
||||
Prefix() string
|
||||
Describe() []*Description
|
||||
|
||||
@ -10,9 +10,26 @@ import (
|
||||
"github.com/datarhei/core/v16/monitor/metric"
|
||||
)
|
||||
|
||||
type Monitor interface {
|
||||
Register(c metric.Collector)
|
||||
type Reader interface {
|
||||
Collect(patterns []metric.Pattern) metric.Metrics
|
||||
Describe() []*metric.Description
|
||||
}
|
||||
|
||||
type Monitor interface {
|
||||
Reader
|
||||
Register(c metric.Collector)
|
||||
UnregisterAll()
|
||||
}
|
||||
|
||||
type HistoryReader interface {
|
||||
Reader
|
||||
History(timerange, interval time.Duration, patterns []metric.Pattern) []HistoryMetrics
|
||||
Resolution() (timerange, interval time.Duration)
|
||||
}
|
||||
|
||||
type HistoryMonitor interface {
|
||||
HistoryReader
|
||||
Register(c metric.Collector)
|
||||
UnregisterAll()
|
||||
}
|
||||
|
||||
@ -75,6 +92,26 @@ func (m *monitor) Collect(patterns []metric.Pattern) metric.Metrics {
|
||||
return metrics
|
||||
}
|
||||
|
||||
func (m *monitor) Describe() []*metric.Description {
|
||||
descriptors := []*metric.Description{}
|
||||
collectors := map[metric.Collector]struct{}{}
|
||||
|
||||
m.lock.RLock()
|
||||
defer m.lock.RUnlock()
|
||||
|
||||
for _, c := range m.collectors {
|
||||
if _, ok := collectors[c]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
collectors[c] = struct{}{}
|
||||
|
||||
descriptors = append(descriptors, c.Describe()...)
|
||||
}
|
||||
|
||||
return descriptors
|
||||
}
|
||||
|
||||
func (m *monitor) UnregisterAll() {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
@ -86,12 +123,6 @@ func (m *monitor) UnregisterAll() {
|
||||
m.collectors = make(map[string]metric.Collector)
|
||||
}
|
||||
|
||||
type HistoryMonitor interface {
|
||||
Monitor
|
||||
History(timerange, interval time.Duration, patterns []metric.Pattern) []HistoryMetrics
|
||||
Resolution() (timerange, interval time.Duration)
|
||||
}
|
||||
|
||||
type historyMonitor struct {
|
||||
monitor Monitor
|
||||
|
||||
@ -209,6 +240,10 @@ func (m *historyMonitor) Collect(patterns []metric.Pattern) metric.Metrics {
|
||||
return m.monitor.Collect(patterns)
|
||||
}
|
||||
|
||||
func (m *historyMonitor) Describe() []*metric.Description {
|
||||
return m.monitor.Describe()
|
||||
}
|
||||
|
||||
func (m *historyMonitor) UnregisterAll() {
|
||||
m.monitor.UnregisterAll()
|
||||
|
||||
@ -327,13 +362,3 @@ func (m *historyMonitor) resample(values []HistoryMetrics, timerange, interval t
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
type Reader interface {
|
||||
Collect(patterns []metric.Pattern) metric.Metrics
|
||||
}
|
||||
|
||||
type HistoryReader interface {
|
||||
Reader
|
||||
History(timerange, interval time.Duration, patterns []metric.Pattern) []HistoryMetrics
|
||||
Resolution() (timerange, interval time.Duration)
|
||||
}
|
||||
|
||||
@ -13,8 +13,8 @@ type netCollector struct {
|
||||
func NewNetCollector() metric.Collector {
|
||||
c := &netCollector{}
|
||||
|
||||
c.rxDescr = metric.NewDesc("net_rx", "", []string{"interface"})
|
||||
c.txDescr = metric.NewDesc("net_tx", "", []string{"interface"})
|
||||
c.rxDescr = metric.NewDesc("net_rx", "Number of received bytes", []string{"interface"})
|
||||
c.txDescr = metric.NewDesc("net_tx", "Number of transmitted bytes", []string{"interface"})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@ -22,10 +22,10 @@ func NewRestreamCollector(r restream.Restreamer) metric.Collector {
|
||||
r: r,
|
||||
}
|
||||
|
||||
c.restreamProcessDescr = metric.NewDesc("restream_process", "", []string{"processid", "state", "order", "name"})
|
||||
c.restreamProcessStatesDescr = metric.NewDesc("restream_process_states", "", []string{"processid", "state"})
|
||||
c.restreamProcessIODescr = metric.NewDesc("restream_io", "", []string{"processid", "type", "id", "address", "index", "stream", "media", "name"})
|
||||
c.restreamStatesDescr = metric.NewDesc("restream_state", "", []string{"state"})
|
||||
c.restreamProcessDescr = metric.NewDesc("restream_process", "Current process values by name", []string{"processid", "state", "order", "name"})
|
||||
c.restreamProcessStatesDescr = metric.NewDesc("restream_process_states", "Current process state", []string{"processid", "state"})
|
||||
c.restreamProcessIODescr = metric.NewDesc("restream_io", "Current process IO values by name", []string{"processid", "type", "id", "address", "index", "stream", "media", "name"})
|
||||
c.restreamStatesDescr = metric.NewDesc("restream_state", "Summarized process states", []string{"state"})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@ -31,17 +31,17 @@ func NewSessionCollector(r session.RegistryReader, collectors []string) metric.C
|
||||
c.collectors = r.Collectors()
|
||||
}
|
||||
|
||||
c.totalDescr = metric.NewDesc("session_total", "", []string{"collector"})
|
||||
c.limitDescr = metric.NewDesc("session_limit", "", []string{"collector"})
|
||||
c.activeDescr = metric.NewDesc("session_active", "", []string{"collector"})
|
||||
c.rxBytesDescr = metric.NewDesc("session_rxbytes", "", []string{"collector"})
|
||||
c.txBytesDescr = metric.NewDesc("session_txbytes", "", []string{"collector"})
|
||||
c.totalDescr = metric.NewDesc("session_total", "Total sessions", []string{"collector"})
|
||||
c.limitDescr = metric.NewDesc("session_limit", "Max. number of concurrent sessions", []string{"collector"})
|
||||
c.activeDescr = metric.NewDesc("session_active", "Number of current sessions", []string{"collector"})
|
||||
c.rxBytesDescr = metric.NewDesc("session_rxbytes", "Number of received bytes", []string{"collector"})
|
||||
c.txBytesDescr = metric.NewDesc("session_txbytes", "Number of transmitted bytes", []string{"collector"})
|
||||
|
||||
c.rxBitrateDescr = metric.NewDesc("session_rxbitrate", "", []string{"collector"})
|
||||
c.txBitrateDescr = metric.NewDesc("session_txbitrate", "", []string{"collector"})
|
||||
c.rxBitrateDescr = metric.NewDesc("session_rxbitrate", "Current receiving bitrate in bit per second", []string{"collector"})
|
||||
c.txBitrateDescr = metric.NewDesc("session_txbitrate", "Current transmitting bitrate in bit per second", []string{"collector"})
|
||||
|
||||
c.maxTxBitrateDescr = metric.NewDesc("session_maxtxbitrate", "", []string{"collector"})
|
||||
c.maxRxBitrateDescr = metric.NewDesc("session_maxrxbitrate", "", []string{"collector"})
|
||||
c.maxRxBitrateDescr = metric.NewDesc("session_maxrxbitrate", "Max. allowed receiving bitrate in bit per second", []string{"collector"})
|
||||
c.maxTxBitrateDescr = metric.NewDesc("session_maxtxbitrate", "Max. allowed transmitting bitrate in bit per second", []string{"collector"})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@ -16,7 +16,7 @@ func NewUptimeCollector() metric.Collector {
|
||||
t: time.Now(),
|
||||
}
|
||||
|
||||
c.uptimeDescr = metric.NewDesc("uptime_uptime", "", nil)
|
||||
c.uptimeDescr = metric.NewDesc("uptime_uptime", "Current uptime in seconds", nil)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@ -53,57 +53,57 @@ type filesystem struct {
|
||||
}
|
||||
|
||||
func New(config Config) Filesystem {
|
||||
fs := &filesystem{
|
||||
rfs := &filesystem{
|
||||
Filesystem: config.FS,
|
||||
logger: config.Logger,
|
||||
}
|
||||
|
||||
if fs.logger == nil {
|
||||
fs.logger = log.New("")
|
||||
if rfs.logger == nil {
|
||||
rfs.logger = log.New("")
|
||||
}
|
||||
|
||||
fs.logger = fs.logger.WithFields(log.Fields{
|
||||
rfs.logger = rfs.logger.WithFields(log.Fields{
|
||||
"name": config.FS.Name(),
|
||||
"type": config.FS.Type(),
|
||||
})
|
||||
|
||||
fs.cleanupPatterns = make(map[string][]Pattern)
|
||||
rfs.cleanupPatterns = make(map[string][]Pattern)
|
||||
|
||||
// already drain the stop
|
||||
fs.stopOnce.Do(func() {})
|
||||
rfs.stopOnce.Do(func() {})
|
||||
|
||||
return fs
|
||||
return rfs
|
||||
}
|
||||
|
||||
func (fs *filesystem) Start() {
|
||||
fs.startOnce.Do(func() {
|
||||
func (rfs *filesystem) Start() {
|
||||
rfs.startOnce.Do(func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
fs.stopTicker = cancel
|
||||
go fs.cleanupTicker(ctx, time.Second)
|
||||
rfs.stopTicker = cancel
|
||||
go rfs.cleanupTicker(ctx, time.Second)
|
||||
|
||||
fs.stopOnce = sync.Once{}
|
||||
rfs.stopOnce = sync.Once{}
|
||||
|
||||
fs.logger.Debug().Log("Starting cleanup")
|
||||
rfs.logger.Debug().Log("Starting cleanup")
|
||||
})
|
||||
}
|
||||
|
||||
func (fs *filesystem) Stop() {
|
||||
fs.stopOnce.Do(func() {
|
||||
fs.stopTicker()
|
||||
func (rfs *filesystem) Stop() {
|
||||
rfs.stopOnce.Do(func() {
|
||||
rfs.stopTicker()
|
||||
|
||||
fs.startOnce = sync.Once{}
|
||||
rfs.startOnce = sync.Once{}
|
||||
|
||||
fs.logger.Debug().Log("Stopping cleanup")
|
||||
rfs.logger.Debug().Log("Stopping cleanup")
|
||||
})
|
||||
}
|
||||
|
||||
func (fs *filesystem) SetCleanup(id string, patterns []Pattern) {
|
||||
func (rfs *filesystem) SetCleanup(id string, patterns []Pattern) {
|
||||
if len(patterns) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, p := range patterns {
|
||||
fs.logger.Debug().WithFields(log.Fields{
|
||||
rfs.logger.Debug().WithFields(log.Fields{
|
||||
"id": id,
|
||||
"pattern": p.Pattern,
|
||||
"max_files": p.MaxFiles,
|
||||
@ -111,38 +111,47 @@ func (fs *filesystem) SetCleanup(id string, patterns []Pattern) {
|
||||
}).Log("Add pattern")
|
||||
}
|
||||
|
||||
fs.cleanupLock.Lock()
|
||||
defer fs.cleanupLock.Unlock()
|
||||
rfs.cleanupLock.Lock()
|
||||
defer rfs.cleanupLock.Unlock()
|
||||
|
||||
fs.cleanupPatterns[id] = append(fs.cleanupPatterns[id], patterns...)
|
||||
rfs.cleanupPatterns[id] = append(rfs.cleanupPatterns[id], patterns...)
|
||||
}
|
||||
|
||||
func (fs *filesystem) UnsetCleanup(id string) {
|
||||
fs.logger.Debug().WithField("id", id).Log("Remove pattern group")
|
||||
func (rfs *filesystem) UnsetCleanup(id string) {
|
||||
rfs.logger.Debug().WithField("id", id).Log("Remove pattern group")
|
||||
|
||||
fs.cleanupLock.Lock()
|
||||
defer fs.cleanupLock.Unlock()
|
||||
rfs.cleanupLock.Lock()
|
||||
defer rfs.cleanupLock.Unlock()
|
||||
|
||||
patterns := fs.cleanupPatterns[id]
|
||||
delete(fs.cleanupPatterns, id)
|
||||
patterns := rfs.cleanupPatterns[id]
|
||||
delete(rfs.cleanupPatterns, id)
|
||||
|
||||
fs.purge(patterns)
|
||||
rfs.purge(patterns)
|
||||
}
|
||||
|
||||
func (fs *filesystem) cleanup() {
|
||||
fs.cleanupLock.RLock()
|
||||
defer fs.cleanupLock.RUnlock()
|
||||
func (rfs *filesystem) cleanup() {
|
||||
rfs.cleanupLock.RLock()
|
||||
defer rfs.cleanupLock.RUnlock()
|
||||
|
||||
for _, patterns := range fs.cleanupPatterns {
|
||||
for _, patterns := range rfs.cleanupPatterns {
|
||||
for _, pattern := range patterns {
|
||||
files := fs.Filesystem.List(pattern.Pattern)
|
||||
filesAndDirs := rfs.Filesystem.List(pattern.Pattern)
|
||||
|
||||
files := []fs.FileInfo{}
|
||||
for _, f := range filesAndDirs {
|
||||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
files = append(files, f)
|
||||
}
|
||||
|
||||
sort.Slice(files, func(i, j int) bool { return files[i].ModTime().Before(files[j].ModTime()) })
|
||||
|
||||
if pattern.MaxFiles > 0 && uint(len(files)) > pattern.MaxFiles {
|
||||
for i := uint(0); i < uint(len(files))-pattern.MaxFiles; i++ {
|
||||
fs.logger.Debug().WithField("path", files[i].Name()).Log("Remove file because MaxFiles is exceeded")
|
||||
fs.Filesystem.Delete(files[i].Name())
|
||||
rfs.logger.Debug().WithField("path", files[i].Name()).Log("Remove file because MaxFiles is exceeded")
|
||||
rfs.Filesystem.Delete(files[i].Name())
|
||||
}
|
||||
}
|
||||
|
||||
@ -151,8 +160,8 @@ func (fs *filesystem) cleanup() {
|
||||
|
||||
for _, f := range files {
|
||||
if f.ModTime().Before(bestBefore) {
|
||||
fs.logger.Debug().WithField("path", f.Name()).Log("Remove file because MaxFileAge is exceeded")
|
||||
fs.Filesystem.Delete(f.Name())
|
||||
rfs.logger.Debug().WithField("path", f.Name()).Log("Remove file because MaxFileAge is exceeded")
|
||||
rfs.Filesystem.Delete(f.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -160,16 +169,17 @@ func (fs *filesystem) cleanup() {
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *filesystem) purge(patterns []Pattern) (nfiles uint64) {
|
||||
func (rfs *filesystem) purge(patterns []Pattern) (nfiles uint64) {
|
||||
for _, pattern := range patterns {
|
||||
if !pattern.PurgeOnDelete {
|
||||
continue
|
||||
}
|
||||
|
||||
files := fs.Filesystem.List(pattern.Pattern)
|
||||
files := rfs.Filesystem.List(pattern.Pattern)
|
||||
sort.Slice(files, func(i, j int) bool { return len(files[i].Name()) > len(files[j].Name()) })
|
||||
for _, f := range files {
|
||||
fs.logger.Debug().WithField("path", f.Name()).Log("Purging file")
|
||||
fs.Filesystem.Delete(f.Name())
|
||||
rfs.logger.Debug().WithField("path", f.Name()).Log("Purging file")
|
||||
rfs.Filesystem.Delete(f.Name())
|
||||
nfiles++
|
||||
}
|
||||
}
|
||||
@ -177,7 +187,7 @@ func (fs *filesystem) purge(patterns []Pattern) (nfiles uint64) {
|
||||
return
|
||||
}
|
||||
|
||||
func (fs *filesystem) cleanupTicker(ctx context.Context, interval time.Duration) {
|
||||
func (rfs *filesystem) cleanupTicker(ctx context.Context, interval time.Duration) {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
@ -186,7 +196,7 @@ func (fs *filesystem) cleanupTicker(ctx context.Context, interval time.Duration)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
fs.cleanup()
|
||||
rfs.cleanup()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -30,28 +30,28 @@ import (
|
||||
type Restreamer interface {
|
||||
ID() string // ID of this instance
|
||||
Name() string // Arbitrary name of this instance
|
||||
CreatedAt() time.Time // time of when this instance has been created
|
||||
Start() // start all processes that have a "start" order
|
||||
Stop() // stop all running process but keep their "start" order
|
||||
AddProcess(config *app.Config) error // add a new process
|
||||
GetProcessIDs(idpattern, refpattern string) []string // get a list of process IDs based on patterns for ID and reference
|
||||
DeleteProcess(id string) error // delete a process
|
||||
UpdateProcess(id string, config *app.Config) error // update a process
|
||||
StartProcess(id string) error // start a process
|
||||
StopProcess(id string) error // stop a process
|
||||
RestartProcess(id string) error // restart a process
|
||||
ReloadProcess(id string) error // reload a process
|
||||
GetProcess(id string) (*app.Process, error) // get a process
|
||||
GetProcessState(id string) (*app.State, error) // get the state of a process
|
||||
GetProcessLog(id string) (*app.Log, error) // get the logs of a process
|
||||
GetPlayout(id, inputid string) (string, error) // get the URL of the playout API for a process
|
||||
Probe(id string) app.Probe // probe a process
|
||||
Skills() skills.Skills // get the ffmpeg skills
|
||||
ReloadSkills() error // reload the ffmpeg skills
|
||||
SetProcessMetadata(id, key string, data interface{}) error // set metatdata to a process
|
||||
GetProcessMetadata(id, key string) (interface{}, error) // get previously set metadata from a process
|
||||
SetMetadata(key string, data interface{}) error // set general metadata
|
||||
GetMetadata(key string) (interface{}, error) // get previously set general metadata
|
||||
CreatedAt() time.Time // Time of when this instance has been created
|
||||
Start() // Start all processes that have a "start" order
|
||||
Stop() // Stop all running process but keep their "start" order
|
||||
AddProcess(config *app.Config) error // Add a new process
|
||||
GetProcessIDs(idpattern, refpattern string) []string // Get a list of process IDs based on patterns for ID and reference
|
||||
DeleteProcess(id string) error // Delete a process
|
||||
UpdateProcess(id string, config *app.Config) error // Update a process
|
||||
StartProcess(id string) error // Start a process
|
||||
StopProcess(id string) error // Stop a process
|
||||
RestartProcess(id string) error // Restart a process
|
||||
ReloadProcess(id string) error // Reload a process
|
||||
GetProcess(id string) (*app.Process, error) // Get a process
|
||||
GetProcessState(id string) (*app.State, error) // Get the state of a process
|
||||
GetProcessLog(id string) (*app.Log, error) // Get the logs of a process
|
||||
GetPlayout(id, inputid string) (string, error) // Get the URL of the playout API for a process
|
||||
Probe(id string) app.Probe // Probe a process
|
||||
Skills() skills.Skills // Get the ffmpeg skills
|
||||
ReloadSkills() error // Reload the ffmpeg skills
|
||||
SetProcessMetadata(id, key string, data interface{}) error // Set metatdata to a process
|
||||
GetProcessMetadata(id, key string) (interface{}, error) // Get previously set metadata from a process
|
||||
SetMetadata(key string, data interface{}) error // Set general metadata
|
||||
GetMetadata(key string) (interface{}, error) // Get previously set general metadata
|
||||
}
|
||||
|
||||
// Config is the required configuration for a new restreamer instance.
|
||||
@ -1349,6 +1349,8 @@ func (r *restream) GetPlayout(id, inputid string) (string, error) {
|
||||
return "127.0.0.1:" + strconv.Itoa(port), nil
|
||||
}
|
||||
|
||||
var ErrMetadataKeyNotFound = errors.New("unknown key")
|
||||
|
||||
func (r *restream) SetProcessMetadata(id, key string, data interface{}) error {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
@ -1395,11 +1397,11 @@ func (r *restream) GetProcessMetadata(id, key string) (interface{}, error) {
|
||||
}
|
||||
|
||||
data, ok := task.metadata[key]
|
||||
if ok {
|
||||
return data, nil
|
||||
if !ok {
|
||||
return nil, ErrMetadataKeyNotFound
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (r *restream) SetMetadata(key string, data interface{}) error {
|
||||
@ -1438,9 +1440,9 @@ func (r *restream) GetMetadata(key string) (interface{}, error) {
|
||||
}
|
||||
|
||||
data, ok := r.metadata[key]
|
||||
if ok {
|
||||
return data, nil
|
||||
if !ok {
|
||||
return nil, ErrMetadataKeyNotFound
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return data, nil
|
||||
}
|
||||
|
||||
8
vendor/github.com/99designs/gqlgen/codegen/config/config.go
generated
vendored
8
vendor/github.com/99designs/gqlgen/codegen/config/config.go
generated
vendored
@ -1,6 +1,7 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -11,7 +12,7 @@ import (
|
||||
"github.com/99designs/gqlgen/internal/code"
|
||||
"github.com/vektah/gqlparser/v2"
|
||||
"github.com/vektah/gqlparser/v2/ast"
|
||||
"gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
@ -102,7 +103,10 @@ func LoadConfig(filename string) (*Config, error) {
|
||||
return nil, fmt.Errorf("unable to read config: %w", err)
|
||||
}
|
||||
|
||||
if err := yaml.UnmarshalStrict(b, config); err != nil {
|
||||
dec := yaml.NewDecoder(bytes.NewReader(b))
|
||||
dec.KnownFields(true)
|
||||
|
||||
if err := dec.Decode(config); err != nil {
|
||||
return nil, fmt.Errorf("unable to parse config: %w", err)
|
||||
}
|
||||
|
||||
|
||||
9
vendor/github.com/99designs/gqlgen/graphql/errcode/codes.go
generated
vendored
9
vendor/github.com/99designs/gqlgen/graphql/errcode/codes.go
generated
vendored
@ -29,12 +29,13 @@ func RegisterErrorType(code string, kind ErrorKind) {
|
||||
}
|
||||
|
||||
// Set the error code on a given graphql error extension
|
||||
func Set(err *gqlerror.Error, value string) {
|
||||
if err.Extensions == nil {
|
||||
err.Extensions = map[string]interface{}{}
|
||||
func Set(err error, value string) {
|
||||
gqlErr, _ := err.(*gqlerror.Error)
|
||||
if gqlErr.Extensions == nil {
|
||||
gqlErr.Extensions = map[string]interface{}{}
|
||||
}
|
||||
|
||||
err.Extensions["code"] = value
|
||||
gqlErr.Extensions["code"] = value
|
||||
}
|
||||
|
||||
// get the kind of the first non User error, defaults to User if no errors have a custom extension
|
||||
|
||||
21
vendor/github.com/99designs/gqlgen/graphql/executor/executor.go
generated
vendored
21
vendor/github.com/99designs/gqlgen/graphql/executor/executor.go
generated
vendored
@ -72,11 +72,12 @@ func (e *Executor) CreateOperationContext(ctx context.Context, params *graphql.R
|
||||
return rc, gqlerror.List{err}
|
||||
}
|
||||
|
||||
var err *gqlerror.Error
|
||||
var err error
|
||||
rc.Variables, err = validator.VariableValues(e.es.Schema(), rc.Operation, params.Variables)
|
||||
if err != nil {
|
||||
errcode.Set(err, errcode.ValidationFailed)
|
||||
return rc, gqlerror.List{err}
|
||||
gqlErr, _ := err.(*gqlerror.Error)
|
||||
if gqlErr != nil {
|
||||
errcode.Set(gqlErr, errcode.ValidationFailed)
|
||||
return rc, gqlerror.List{gqlErr}
|
||||
}
|
||||
rc.Stats.Validation.End = graphql.Now()
|
||||
|
||||
@ -141,7 +142,7 @@ func (e *Executor) DispatchError(ctx context.Context, list gqlerror.List) *graph
|
||||
return resp
|
||||
}
|
||||
|
||||
func (e *Executor) PresentRecoveredError(ctx context.Context, err interface{}) *gqlerror.Error {
|
||||
func (e *Executor) PresentRecoveredError(ctx context.Context, err interface{}) error {
|
||||
return e.errorPresenter(ctx, e.recoverFunc(ctx, err))
|
||||
}
|
||||
|
||||
@ -173,9 +174,10 @@ func (e *Executor) parseQuery(ctx context.Context, stats *graphql.Stats, query s
|
||||
}
|
||||
|
||||
doc, err := parser.ParseQuery(&ast.Source{Input: query})
|
||||
if err != nil {
|
||||
errcode.Set(err, errcode.ParseFailed)
|
||||
return nil, gqlerror.List{err}
|
||||
gqlErr, _ := err.(*gqlerror.Error)
|
||||
if gqlErr != nil {
|
||||
errcode.Set(gqlErr, errcode.ParseFailed)
|
||||
return nil, gqlerror.List{gqlErr}
|
||||
}
|
||||
stats.Parsing.End = graphql.Now()
|
||||
|
||||
@ -183,8 +185,9 @@ func (e *Executor) parseQuery(ctx context.Context, stats *graphql.Stats, query s
|
||||
|
||||
if len(doc.Operations) == 0 {
|
||||
err = gqlerror.Errorf("no operation provided")
|
||||
gqlErr, _ := err.(*gqlerror.Error)
|
||||
errcode.Set(err, errcode.ValidationFailed)
|
||||
return nil, gqlerror.List{err}
|
||||
return nil, gqlerror.List{gqlErr}
|
||||
}
|
||||
|
||||
listErr := validator.Validate(e.es.Schema(), doc)
|
||||
|
||||
3
vendor/github.com/99designs/gqlgen/graphql/handler/server.go
generated
vendored
3
vendor/github.com/99designs/gqlgen/graphql/handler/server.go
generated
vendored
@ -102,7 +102,8 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
err := s.exec.PresentRecoveredError(r.Context(), err)
|
||||
resp := &graphql.Response{Errors: []*gqlerror.Error{err}}
|
||||
gqlErr, _ := err.(*gqlerror.Error)
|
||||
resp := &graphql.Response{Errors: []*gqlerror.Error{gqlErr}}
|
||||
b, _ := json.Marshal(resp)
|
||||
w.WriteHeader(http.StatusUnprocessableEntity)
|
||||
w.Write(b)
|
||||
|
||||
41
vendor/github.com/99designs/gqlgen/graphql/playground/playground.go
generated
vendored
41
vendor/github.com/99designs/gqlgen/graphql/playground/playground.go
generated
vendored
@ -9,17 +9,19 @@ import (
|
||||
var page = template.Must(template.New("graphiql").Parse(`<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>{{.title}}</title>
|
||||
<link
|
||||
rel="stylesheet"
|
||||
href="https://cdn.jsdelivr.net/npm/graphiql@{{.version}}/graphiql.min.css"
|
||||
integrity="{{.cssSRI}}"
|
||||
crossorigin="anonymous"
|
||||
/>
|
||||
</head>
|
||||
<body style="margin: 0;">
|
||||
<div id="graphiql" style="height: 100vh;"></div>
|
||||
<title>{{.title}}</title>
|
||||
<style>
|
||||
body {
|
||||
height: 100%;
|
||||
margin: 0;
|
||||
width: 100%;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
#graphiql {
|
||||
height: 100vh;
|
||||
}
|
||||
</style>
|
||||
<script
|
||||
src="https://cdn.jsdelivr.net/npm/react@17.0.2/umd/react.production.min.js"
|
||||
integrity="{{.reactSRI}}"
|
||||
@ -30,6 +32,16 @@ var page = template.Must(template.New("graphiql").Parse(`<!DOCTYPE html>
|
||||
integrity="{{.reactDOMSRI}}"
|
||||
crossorigin="anonymous"
|
||||
></script>
|
||||
<link
|
||||
rel="stylesheet"
|
||||
href="https://cdn.jsdelivr.net/npm/graphiql@{{.version}}/graphiql.min.css"
|
||||
integrity="{{.cssSRI}}"
|
||||
crossorigin="anonymous"
|
||||
/>
|
||||
</head>
|
||||
<body>
|
||||
<div id="graphiql">Loading...</div>
|
||||
|
||||
<script
|
||||
src="https://cdn.jsdelivr.net/npm/graphiql@{{.version}}/graphiql.min.js"
|
||||
integrity="{{.jsSRI}}"
|
||||
@ -50,8 +62,7 @@ var page = template.Must(template.New("graphiql").Parse(`<!DOCTYPE html>
|
||||
ReactDOM.render(
|
||||
React.createElement(GraphiQL, {
|
||||
fetcher: fetcher,
|
||||
tabs: true,
|
||||
headerEditorEnabled: true,
|
||||
isHeadersEditorEnabled: true,
|
||||
shouldPersistHeaders: true
|
||||
}),
|
||||
document.getElementById('graphiql'),
|
||||
@ -70,9 +81,9 @@ func Handler(title string, endpoint string) http.HandlerFunc {
|
||||
"endpoint": endpoint,
|
||||
"endpointIsAbsolute": endpointHasScheme(endpoint),
|
||||
"subscriptionEndpoint": getSubscriptionEndpoint(endpoint),
|
||||
"version": "1.8.2",
|
||||
"cssSRI": "sha256-CDHiHbYkDSUc3+DS2TU89I9e2W3sJRUOqSmp7JC+LBw=",
|
||||
"jsSRI": "sha256-X8vqrqZ6Rvvoq4tvRVM3LoMZCQH8jwW92tnX0iPiHPc=",
|
||||
"version": "2.0.1",
|
||||
"cssSRI": "sha256-hYUgpHapGug0ucdB5kG0zSipubcQOJcGjclIZke2rl8=",
|
||||
"jsSRI": "sha256-jMXGO5+Y4OhcHPSR34jpzpzlz4OZTlxcvaDXSWmUMRo=",
|
||||
"reactSRI": "sha256-Ipu/TQ50iCCVZBUsZyNJfxrDk0E2yhaEIz0vqI+kFG8=",
|
||||
"reactDOMSRI": "sha256-nbMykgB6tsOFJ7OdVmPpdqMFVk4ZsqWocT6issAPUF0=",
|
||||
})
|
||||
|
||||
2
vendor/github.com/99designs/gqlgen/graphql/version.go
generated
vendored
2
vendor/github.com/99designs/gqlgen/graphql/version.go
generated
vendored
@ -1,3 +1,3 @@
|
||||
package graphql
|
||||
|
||||
const Version = "v0.17.15"
|
||||
const Version = "v0.17.16"
|
||||
|
||||
1
vendor/github.com/caddyserver/certmagic/.gitignore
generated
vendored
Normal file
1
vendor/github.com/caddyserver/certmagic/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
_gitignore/
|
||||
526
vendor/github.com/caddyserver/certmagic/README.md
generated
vendored
Normal file
526
vendor/github.com/caddyserver/certmagic/README.md
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
417
vendor/github.com/caddyserver/certmagic/account.go
generated
vendored
Normal file
417
vendor/github.com/caddyserver/certmagic/account.go
generated
vendored
Normal file
@ -0,0 +1,417 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/mholt/acmez/acme"
|
||||
)
|
||||
|
||||
// getAccount either loads or creates a new account, depending on if
|
||||
// an account can be found in storage for the given CA + email combo.
|
||||
func (am *ACMEIssuer) getAccount(ctx context.Context, ca, email string) (acme.Account, error) {
|
||||
acct, err := am.loadAccount(ctx, ca, email)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return am.newAccount(email)
|
||||
}
|
||||
return acct, err
|
||||
}
|
||||
|
||||
// loadAccount loads an account from storage, but does not create a new one.
|
||||
func (am *ACMEIssuer) loadAccount(ctx context.Context, ca, email string) (acme.Account, error) {
|
||||
regBytes, err := am.config.Storage.Load(ctx, am.storageKeyUserReg(ca, email))
|
||||
if err != nil {
|
||||
return acme.Account{}, err
|
||||
}
|
||||
keyBytes, err := am.config.Storage.Load(ctx, am.storageKeyUserPrivateKey(ca, email))
|
||||
if err != nil {
|
||||
return acme.Account{}, err
|
||||
}
|
||||
|
||||
var acct acme.Account
|
||||
err = json.Unmarshal(regBytes, &acct)
|
||||
if err != nil {
|
||||
return acct, err
|
||||
}
|
||||
acct.PrivateKey, err = PEMDecodePrivateKey(keyBytes)
|
||||
if err != nil {
|
||||
return acct, fmt.Errorf("could not decode account's private key: %v", err)
|
||||
}
|
||||
|
||||
return acct, nil
|
||||
}
|
||||
|
||||
// newAccount generates a new private key for a new ACME account, but
|
||||
// it does not register or save the account.
|
||||
func (*ACMEIssuer) newAccount(email string) (acme.Account, error) {
|
||||
var acct acme.Account
|
||||
if email != "" {
|
||||
acct.Contact = []string{"mailto:" + email} // TODO: should we abstract the contact scheme?
|
||||
}
|
||||
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return acct, fmt.Errorf("generating private key: %v", err)
|
||||
}
|
||||
acct.PrivateKey = privateKey
|
||||
return acct, nil
|
||||
}
|
||||
|
||||
// GetAccount first tries loading the account with the associated private key from storage.
|
||||
// If it does not exist in storage, it will be retrieved from the ACME server and added to storage.
|
||||
// The account must already exist; it does not create a new account.
|
||||
func (am *ACMEIssuer) GetAccount(ctx context.Context, privateKeyPEM []byte) (acme.Account, error) {
|
||||
account, err := am.loadAccountByKey(ctx, privateKeyPEM)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
account, err = am.lookUpAccount(ctx, privateKeyPEM)
|
||||
}
|
||||
return account, err
|
||||
}
|
||||
|
||||
// loadAccountByKey loads the account with the given private key from storage, if it exists.
|
||||
// If it does not exist, an error of type fs.ErrNotExist is returned. This is not very efficient
|
||||
// for lots of accounts.
|
||||
func (am *ACMEIssuer) loadAccountByKey(ctx context.Context, privateKeyPEM []byte) (acme.Account, error) {
|
||||
accountList, err := am.config.Storage.List(ctx, am.storageKeyUsersPrefix(am.CA), false)
|
||||
if err != nil {
|
||||
return acme.Account{}, err
|
||||
}
|
||||
for _, accountFolderKey := range accountList {
|
||||
email := path.Base(accountFolderKey)
|
||||
keyBytes, err := am.config.Storage.Load(ctx, am.storageKeyUserPrivateKey(am.CA, email))
|
||||
if err != nil {
|
||||
return acme.Account{}, err
|
||||
}
|
||||
if bytes.Equal(bytes.TrimSpace(keyBytes), bytes.TrimSpace(privateKeyPEM)) {
|
||||
return am.loadAccount(ctx, am.CA, email)
|
||||
}
|
||||
}
|
||||
return acme.Account{}, fs.ErrNotExist
|
||||
}
|
||||
|
||||
// lookUpAccount looks up the account associated with privateKeyPEM from the ACME server.
|
||||
// If the account is found by the server, it will be saved to storage and returned.
|
||||
func (am *ACMEIssuer) lookUpAccount(ctx context.Context, privateKeyPEM []byte) (acme.Account, error) {
|
||||
client, err := am.newACMEClient(false)
|
||||
if err != nil {
|
||||
return acme.Account{}, fmt.Errorf("creating ACME client: %v", err)
|
||||
}
|
||||
|
||||
privateKey, err := PEMDecodePrivateKey([]byte(privateKeyPEM))
|
||||
if err != nil {
|
||||
return acme.Account{}, fmt.Errorf("decoding private key: %v", err)
|
||||
}
|
||||
|
||||
// look up the account
|
||||
account := acme.Account{PrivateKey: privateKey}
|
||||
account, err = client.GetAccount(ctx, account)
|
||||
if err != nil {
|
||||
return acme.Account{}, fmt.Errorf("looking up account with server: %v", err)
|
||||
}
|
||||
|
||||
// save the account details to storage
|
||||
err = am.saveAccount(ctx, client.Directory, account)
|
||||
if err != nil {
|
||||
return account, fmt.Errorf("could not save account to storage: %v", err)
|
||||
}
|
||||
|
||||
return account, nil
|
||||
}
|
||||
|
||||
// saveAccount persists an ACME account's info and private key to storage.
|
||||
// It does NOT register the account via ACME or prompt the user.
|
||||
func (am *ACMEIssuer) saveAccount(ctx context.Context, ca string, account acme.Account) error {
|
||||
regBytes, err := json.MarshalIndent(account, "", "\t")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
keyBytes, err := PEMEncodePrivateKey(account.PrivateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// extract primary contact (email), without scheme (e.g. "mailto:")
|
||||
primaryContact := getPrimaryContact(account)
|
||||
all := []keyValue{
|
||||
{
|
||||
key: am.storageKeyUserReg(ca, primaryContact),
|
||||
value: regBytes,
|
||||
},
|
||||
{
|
||||
key: am.storageKeyUserPrivateKey(ca, primaryContact),
|
||||
value: keyBytes,
|
||||
},
|
||||
}
|
||||
return storeTx(ctx, am.config.Storage, all)
|
||||
}
|
||||
|
||||
// setEmail does everything it can to obtain an email address
|
||||
// from the user within the scope of memory and storage to use
|
||||
// for ACME TLS. If it cannot get an email address, it does nothing
|
||||
// (If user is prompted, it will warn the user of
|
||||
// the consequences of an empty email.) This function MAY prompt
|
||||
// the user for input. If allowPrompts is false, the user
|
||||
// will NOT be prompted and an empty email may be returned.
|
||||
func (am *ACMEIssuer) setEmail(ctx context.Context, allowPrompts bool) error {
|
||||
leEmail := am.Email
|
||||
|
||||
// First try package default email, or a discovered email address
|
||||
if leEmail == "" {
|
||||
leEmail = DefaultACME.Email
|
||||
}
|
||||
if leEmail == "" {
|
||||
discoveredEmailMu.Lock()
|
||||
leEmail = discoveredEmail
|
||||
discoveredEmailMu.Unlock()
|
||||
}
|
||||
|
||||
// Then try to get most recent user email from storage
|
||||
var gotRecentEmail bool
|
||||
if leEmail == "" {
|
||||
leEmail, gotRecentEmail = am.mostRecentAccountEmail(ctx, am.CA)
|
||||
}
|
||||
if !gotRecentEmail && leEmail == "" && allowPrompts {
|
||||
// Looks like there is no email address readily available,
|
||||
// so we will have to ask the user if we can.
|
||||
var err error
|
||||
leEmail, err = am.promptUserForEmail()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// User might have just signified their agreement
|
||||
am.mu.Lock()
|
||||
am.agreed = DefaultACME.Agreed
|
||||
am.mu.Unlock()
|
||||
}
|
||||
|
||||
// Save the email for later and ensure it is consistent
|
||||
// for repeated use; then update cfg with the email
|
||||
leEmail = strings.TrimSpace(strings.ToLower(leEmail))
|
||||
discoveredEmailMu.Lock()
|
||||
if discoveredEmail == "" {
|
||||
discoveredEmail = leEmail
|
||||
}
|
||||
discoveredEmailMu.Unlock()
|
||||
|
||||
// The unexported email field is the one we use
|
||||
// because we have thread-safe control over it
|
||||
am.mu.Lock()
|
||||
am.email = leEmail
|
||||
am.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// promptUserForEmail prompts the user for an email address
|
||||
// and returns the email address they entered (which could
|
||||
// be the empty string). If no error is returned, then Agreed
|
||||
// will also be set to true, since continuing through the
|
||||
// prompt signifies agreement.
|
||||
func (am *ACMEIssuer) promptUserForEmail() (string, error) {
|
||||
// prompt the user for an email address and terms agreement
|
||||
reader := bufio.NewReader(stdin)
|
||||
am.promptUserAgreement("")
|
||||
fmt.Println("Please enter your email address to signify agreement and to be notified")
|
||||
fmt.Println("in case of issues. You can leave it blank, but we don't recommend it.")
|
||||
fmt.Print(" Email address: ")
|
||||
leEmail, err := reader.ReadString('\n')
|
||||
if err != nil && err != io.EOF {
|
||||
return "", fmt.Errorf("reading email address: %v", err)
|
||||
}
|
||||
leEmail = strings.TrimSpace(leEmail)
|
||||
DefaultACME.Agreed = true
|
||||
return leEmail, nil
|
||||
}
|
||||
|
||||
// promptUserAgreement simply outputs the standard user
|
||||
// agreement prompt with the given agreement URL.
|
||||
// It outputs a newline after the message.
|
||||
func (am *ACMEIssuer) promptUserAgreement(agreementURL string) {
|
||||
userAgreementPrompt := `Your sites will be served over HTTPS automatically using an automated CA.
|
||||
By continuing, you agree to the CA's terms of service`
|
||||
if agreementURL == "" {
|
||||
fmt.Printf("\n\n%s.\n", userAgreementPrompt)
|
||||
return
|
||||
}
|
||||
fmt.Printf("\n\n%s at:\n %s\n", userAgreementPrompt, agreementURL)
|
||||
}
|
||||
|
||||
// askUserAgreement prompts the user to agree to the agreement
|
||||
// at the given agreement URL via stdin. It returns whether the
|
||||
// user agreed or not.
|
||||
func (am *ACMEIssuer) askUserAgreement(agreementURL string) bool {
|
||||
am.promptUserAgreement(agreementURL)
|
||||
fmt.Print("Do you agree to the terms? (y/n): ")
|
||||
|
||||
reader := bufio.NewReader(stdin)
|
||||
answer, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
answer = strings.ToLower(strings.TrimSpace(answer))
|
||||
|
||||
return answer == "y" || answer == "yes"
|
||||
}
|
||||
|
||||
func storageKeyACMECAPrefix(issuerKey string) string {
|
||||
return path.Join(prefixACME, StorageKeys.Safe(issuerKey))
|
||||
}
|
||||
|
||||
func (am *ACMEIssuer) storageKeyCAPrefix(caURL string) string {
|
||||
return storageKeyACMECAPrefix(am.issuerKey(caURL))
|
||||
}
|
||||
|
||||
func (am *ACMEIssuer) storageKeyUsersPrefix(caURL string) string {
|
||||
return path.Join(am.storageKeyCAPrefix(caURL), "users")
|
||||
}
|
||||
|
||||
func (am *ACMEIssuer) storageKeyUserPrefix(caURL, email string) string {
|
||||
if email == "" {
|
||||
email = emptyEmail
|
||||
}
|
||||
return path.Join(am.storageKeyUsersPrefix(caURL), StorageKeys.Safe(email))
|
||||
}
|
||||
|
||||
func (am *ACMEIssuer) storageKeyUserReg(caURL, email string) string {
|
||||
return am.storageSafeUserKey(caURL, email, "registration", ".json")
|
||||
}
|
||||
|
||||
func (am *ACMEIssuer) storageKeyUserPrivateKey(caURL, email string) string {
|
||||
return am.storageSafeUserKey(caURL, email, "private", ".key")
|
||||
}
|
||||
|
||||
// storageSafeUserKey returns a key for the given email, with the default
|
||||
// filename, and the filename ending in the given extension.
|
||||
func (am *ACMEIssuer) storageSafeUserKey(ca, email, defaultFilename, extension string) string {
|
||||
if email == "" {
|
||||
email = emptyEmail
|
||||
}
|
||||
email = strings.ToLower(email)
|
||||
filename := am.emailUsername(email)
|
||||
if filename == "" {
|
||||
filename = defaultFilename
|
||||
}
|
||||
filename = StorageKeys.Safe(filename)
|
||||
return path.Join(am.storageKeyUserPrefix(ca, email), filename+extension)
|
||||
}
|
||||
|
||||
// emailUsername returns the username portion of an email address (part before
|
||||
// '@') or the original input if it can't find the "@" symbol.
|
||||
func (*ACMEIssuer) emailUsername(email string) string {
|
||||
at := strings.Index(email, "@")
|
||||
if at == -1 {
|
||||
return email
|
||||
} else if at == 0 {
|
||||
return email[1:]
|
||||
}
|
||||
return email[:at]
|
||||
}
|
||||
|
||||
// mostRecentAccountEmail finds the most recently-written account file
|
||||
// in storage. Since this is part of a complex sequence to get a user
|
||||
// account, errors here are discarded to simplify code flow in
|
||||
// the caller, and errors are not important here anyway.
|
||||
func (am *ACMEIssuer) mostRecentAccountEmail(ctx context.Context, caURL string) (string, bool) {
|
||||
accountList, err := am.config.Storage.List(ctx, am.storageKeyUsersPrefix(caURL), false)
|
||||
if err != nil || len(accountList) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
// get all the key infos ahead of sorting, because
|
||||
// we might filter some out
|
||||
stats := make(map[string]KeyInfo)
|
||||
for i := 0; i < len(accountList); i++ {
|
||||
u := accountList[i]
|
||||
keyInfo, err := am.config.Storage.Stat(ctx, u)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if keyInfo.IsTerminal {
|
||||
// I found a bug when macOS created a .DS_Store file in
|
||||
// the users folder, and CertMagic tried to use that as
|
||||
// the user email because it was newer than the other one
|
||||
// which existed... sure, this isn't a perfect fix but
|
||||
// frankly one's OS shouldn't mess with the data folder
|
||||
// in the first place.
|
||||
accountList = append(accountList[:i], accountList[i+1:]...)
|
||||
i--
|
||||
continue
|
||||
}
|
||||
stats[u] = keyInfo
|
||||
}
|
||||
|
||||
sort.Slice(accountList, func(i, j int) bool {
|
||||
iInfo := stats[accountList[i]]
|
||||
jInfo := stats[accountList[j]]
|
||||
return jInfo.Modified.Before(iInfo.Modified)
|
||||
})
|
||||
|
||||
if len(accountList) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
account, err := am.getAccount(ctx, caURL, path.Base(accountList[0]))
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
|
||||
return getPrimaryContact(account), true
|
||||
}
|
||||
|
||||
// getPrimaryContact returns the first contact on the account (if any)
|
||||
// without the scheme. (I guess we assume an email address.)
|
||||
func getPrimaryContact(account acme.Account) string {
|
||||
// TODO: should this be abstracted with some lower-level helper?
|
||||
var primaryContact string
|
||||
if len(account.Contact) > 0 {
|
||||
primaryContact = account.Contact[0]
|
||||
if idx := strings.Index(primaryContact, ":"); idx >= 0 {
|
||||
primaryContact = primaryContact[idx+1:]
|
||||
}
|
||||
}
|
||||
return primaryContact
|
||||
}
|
||||
|
||||
// When an email address is not explicitly specified, we can remember
|
||||
// the last one we discovered to avoid having to ask again later.
|
||||
// (We used to store this in DefaultACME.Email but it was racey; see #127)
|
||||
var (
|
||||
discoveredEmail string
|
||||
discoveredEmailMu sync.Mutex
|
||||
)
|
||||
|
||||
// stdin is used to read the user's input if prompted;
|
||||
// this is changed by tests during tests.
|
||||
var stdin = io.ReadWriter(os.Stdin)
|
||||
|
||||
// The name of the folder for accounts where the email
|
||||
// address was not provided; default 'username' if you will,
|
||||
// but only for local/storage use, not with the CA.
|
||||
const emptyEmail = "default"
|
||||
347
vendor/github.com/caddyserver/certmagic/acmeclient.go
generated
vendored
Normal file
347
vendor/github.com/caddyserver/certmagic/acmeclient.go
generated
vendored
Normal file
@ -0,0 +1,347 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
weakrand "math/rand"
|
||||
"net"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/mholt/acmez"
|
||||
"github.com/mholt/acmez/acme"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func init() {
|
||||
weakrand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// acmeClient holds state necessary to perform ACME operations
|
||||
// for certificate management with an ACME account. Call
|
||||
// ACMEIssuer.newACMEClientWithAccount() to get a valid one.
|
||||
type acmeClient struct {
|
||||
iss *ACMEIssuer
|
||||
acmeClient *acmez.Client
|
||||
account acme.Account
|
||||
}
|
||||
|
||||
// newACMEClientWithAccount creates an ACME client ready to use with an account, including
|
||||
// loading one from storage or registering a new account with the CA if necessary. If
|
||||
// useTestCA is true, am.TestCA will be used if set; otherwise, the primary CA will be used.
|
||||
func (iss *ACMEIssuer) newACMEClientWithAccount(ctx context.Context, useTestCA, interactive bool) (*acmeClient, error) {
|
||||
// first, get underlying ACME client
|
||||
client, err := iss.newACMEClient(useTestCA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// look up or create the ACME account
|
||||
var account acme.Account
|
||||
if iss.AccountKeyPEM != "" {
|
||||
account, err = iss.GetAccount(ctx, []byte(iss.AccountKeyPEM))
|
||||
} else {
|
||||
account, err = iss.getAccount(ctx, client.Directory, iss.getEmail())
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting ACME account: %v", err)
|
||||
}
|
||||
|
||||
// register account if it is new
|
||||
if account.Status == "" {
|
||||
if iss.NewAccountFunc != nil {
|
||||
account, err = iss.NewAccountFunc(ctx, iss, account)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("account pre-registration callback: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// agree to terms
|
||||
if interactive {
|
||||
if !iss.isAgreed() {
|
||||
var termsURL string
|
||||
dir, err := client.GetDirectory(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting directory: %w", err)
|
||||
}
|
||||
if dir.Meta != nil {
|
||||
termsURL = dir.Meta.TermsOfService
|
||||
}
|
||||
if termsURL != "" {
|
||||
agreed := iss.askUserAgreement(termsURL)
|
||||
if !agreed {
|
||||
return nil, fmt.Errorf("user must agree to CA terms")
|
||||
}
|
||||
iss.mu.Lock()
|
||||
iss.agreed = agreed
|
||||
iss.mu.Unlock()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// can't prompt a user who isn't there; they should
|
||||
// have reviewed the terms beforehand
|
||||
iss.mu.Lock()
|
||||
iss.agreed = true
|
||||
iss.mu.Unlock()
|
||||
}
|
||||
account.TermsOfServiceAgreed = iss.isAgreed()
|
||||
|
||||
// associate account with external binding, if configured
|
||||
if iss.ExternalAccount != nil {
|
||||
err := account.SetExternalAccountBinding(ctx, client.Client, *iss.ExternalAccount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// create account
|
||||
account, err = client.NewAccount(ctx, account)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("registering account %v with server: %w", account.Contact, err)
|
||||
}
|
||||
|
||||
// persist the account to storage
|
||||
err = iss.saveAccount(ctx, client.Directory, account)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not save account %v: %v", account.Contact, err)
|
||||
}
|
||||
}
|
||||
|
||||
c := &acmeClient{
|
||||
iss: iss,
|
||||
acmeClient: client,
|
||||
account: account,
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// newACMEClient creates a new underlying ACME client using the settings in am,
|
||||
// independent of any particular ACME account. If useTestCA is true, am.TestCA
|
||||
// will be used if it is set; otherwise, the primary CA will be used.
|
||||
func (iss *ACMEIssuer) newACMEClient(useTestCA bool) (*acmez.Client, error) {
|
||||
// ensure defaults are filled in
|
||||
var caURL string
|
||||
if useTestCA {
|
||||
caURL = iss.TestCA
|
||||
}
|
||||
if caURL == "" {
|
||||
caURL = iss.CA
|
||||
}
|
||||
if caURL == "" {
|
||||
caURL = DefaultACME.CA
|
||||
}
|
||||
certObtainTimeout := iss.CertObtainTimeout
|
||||
if certObtainTimeout == 0 {
|
||||
certObtainTimeout = DefaultACME.CertObtainTimeout
|
||||
}
|
||||
|
||||
// ensure endpoint is secure (assume HTTPS if scheme is missing)
|
||||
if !strings.Contains(caURL, "://") {
|
||||
caURL = "https://" + caURL
|
||||
}
|
||||
u, err := url.Parse(caURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if u.Scheme != "https" && !isLoopback(u.Host) && !isInternal(u.Host) {
|
||||
return nil, fmt.Errorf("%s: insecure CA URL (HTTPS required)", caURL)
|
||||
}
|
||||
|
||||
client := &acmez.Client{
|
||||
Client: &acme.Client{
|
||||
Directory: caURL,
|
||||
PollTimeout: certObtainTimeout,
|
||||
UserAgent: buildUAString(),
|
||||
HTTPClient: iss.httpClient,
|
||||
},
|
||||
ChallengeSolvers: make(map[string]acmez.Solver),
|
||||
}
|
||||
if iss.Logger != nil {
|
||||
client.Logger = iss.Logger.Named("acme_client")
|
||||
}
|
||||
|
||||
// configure challenges (most of the time, DNS challenge is
|
||||
// exclusive of other ones because it is usually only used
|
||||
// in situations where the default challenges would fail)
|
||||
if iss.DNS01Solver == nil {
|
||||
// enable HTTP-01 challenge
|
||||
if !iss.DisableHTTPChallenge {
|
||||
useHTTPPort := HTTPChallengePort
|
||||
if HTTPPort > 0 && HTTPPort != HTTPChallengePort {
|
||||
useHTTPPort = HTTPPort
|
||||
}
|
||||
if iss.AltHTTPPort > 0 {
|
||||
useHTTPPort = iss.AltHTTPPort
|
||||
}
|
||||
client.ChallengeSolvers[acme.ChallengeTypeHTTP01] = distributedSolver{
|
||||
storage: iss.config.Storage,
|
||||
storageKeyIssuerPrefix: iss.storageKeyCAPrefix(client.Directory),
|
||||
solver: &httpSolver{
|
||||
acmeIssuer: iss,
|
||||
address: net.JoinHostPort(iss.ListenHost, strconv.Itoa(useHTTPPort)),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// enable TLS-ALPN-01 challenge
|
||||
if !iss.DisableTLSALPNChallenge {
|
||||
useTLSALPNPort := TLSALPNChallengePort
|
||||
if HTTPSPort > 0 && HTTPSPort != TLSALPNChallengePort {
|
||||
useTLSALPNPort = HTTPSPort
|
||||
}
|
||||
if iss.AltTLSALPNPort > 0 {
|
||||
useTLSALPNPort = iss.AltTLSALPNPort
|
||||
}
|
||||
client.ChallengeSolvers[acme.ChallengeTypeTLSALPN01] = distributedSolver{
|
||||
storage: iss.config.Storage,
|
||||
storageKeyIssuerPrefix: iss.storageKeyCAPrefix(client.Directory),
|
||||
solver: &tlsALPNSolver{
|
||||
config: iss.config,
|
||||
address: net.JoinHostPort(iss.ListenHost, strconv.Itoa(useTLSALPNPort)),
|
||||
},
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// use DNS challenge exclusively
|
||||
client.ChallengeSolvers[acme.ChallengeTypeDNS01] = iss.DNS01Solver
|
||||
}
|
||||
|
||||
// wrap solvers in our wrapper so that we can keep track of challenge
|
||||
// info: this is useful for solving challenges globally as a process;
|
||||
// for example, usually there is only one process that can solve the
|
||||
// HTTP and TLS-ALPN challenges, and only one server in that process
|
||||
// that can bind the necessary port(s), so if a server listening on
|
||||
// a different port needed a certificate, it would have to know about
|
||||
// the other server listening on that port, and somehow convey its
|
||||
// challenge info or share its config, but this isn't always feasible;
|
||||
// what the wrapper does is it accesses a global challenge memory so
|
||||
// that unrelated servers in this process can all solve each others'
|
||||
// challenges without having to know about each other - Caddy's admin
|
||||
// endpoint uses this functionality since it and the HTTP/TLS modules
|
||||
// do not know about each other
|
||||
// (doing this here in a separate loop ensures that even if we expose
|
||||
// solver config to users later, we will even wrap their own solvers)
|
||||
for name, solver := range client.ChallengeSolvers {
|
||||
client.ChallengeSolvers[name] = solverWrapper{solver}
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (c *acmeClient) throttle(ctx context.Context, names []string) error {
|
||||
email := c.iss.getEmail()
|
||||
|
||||
// throttling is scoped to CA + account email
|
||||
rateLimiterKey := c.acmeClient.Directory + "," + email
|
||||
rateLimitersMu.Lock()
|
||||
rl, ok := rateLimiters[rateLimiterKey]
|
||||
if !ok {
|
||||
rl = NewRateLimiter(RateLimitEvents, RateLimitEventsWindow)
|
||||
rateLimiters[rateLimiterKey] = rl
|
||||
// TODO: stop rate limiter when it is garbage-collected...
|
||||
}
|
||||
rateLimitersMu.Unlock()
|
||||
if c.iss.Logger != nil {
|
||||
c.iss.Logger.Info("waiting on internal rate limiter",
|
||||
zap.Strings("identifiers", names),
|
||||
zap.String("ca", c.acmeClient.Directory),
|
||||
zap.String("account", email),
|
||||
)
|
||||
}
|
||||
err := rl.Wait(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.iss.Logger != nil {
|
||||
c.iss.Logger.Info("done waiting on internal rate limiter",
|
||||
zap.Strings("identifiers", names),
|
||||
zap.String("ca", c.acmeClient.Directory),
|
||||
zap.String("account", email),
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *acmeClient) usingTestCA() bool {
|
||||
return c.iss.TestCA != "" && c.acmeClient.Directory == c.iss.TestCA
|
||||
}
|
||||
|
||||
func (c *acmeClient) revoke(ctx context.Context, cert *x509.Certificate, reason int) error {
|
||||
return c.acmeClient.RevokeCertificate(ctx, c.account,
|
||||
cert, c.account.PrivateKey, reason)
|
||||
}
|
||||
|
||||
func buildUAString() string {
|
||||
ua := "CertMagic"
|
||||
if UserAgent != "" {
|
||||
ua = UserAgent + " " + ua
|
||||
}
|
||||
return ua
|
||||
}
|
||||
|
||||
// These internal rate limits are designed to prevent accidentally
|
||||
// firehosing a CA's ACME endpoints. They are not intended to
|
||||
// replace or replicate the CA's actual rate limits.
|
||||
//
|
||||
// Let's Encrypt's rate limits can be found here:
|
||||
// https://letsencrypt.org/docs/rate-limits/
|
||||
//
|
||||
// Currently (as of December 2019), Let's Encrypt's most relevant
|
||||
// rate limit for large deployments is 300 new orders per account
|
||||
// per 3 hours (on average, or best case, that's about 1 every 36
|
||||
// seconds, or 2 every 72 seconds, etc.); but it's not reasonable
|
||||
// to try to assume that our internal state is the same as the CA's
|
||||
// (due to process restarts, config changes, failed validations,
|
||||
// etc.) and ultimately, only the CA's actual rate limiter is the
|
||||
// authority. Thus, our own rate limiters do not attempt to enforce
|
||||
// external rate limits. Doing so causes problems when the domains
|
||||
// are not in our control (i.e. serving customer sites) and/or lots
|
||||
// of domains fail validation: they clog our internal rate limiter
|
||||
// and nearly starve out (or at least slow down) the other domains
|
||||
// that need certificates. Failed transactions are already retried
|
||||
// with exponential backoff, so adding in rate limiting can slow
|
||||
// things down even more.
|
||||
//
|
||||
// Instead, the point of our internal rate limiter is to avoid
|
||||
// hammering the CA's endpoint when there are thousands or even
|
||||
// millions of certificates under management. Our goal is to
|
||||
// allow small bursts in a relatively short timeframe so as to
|
||||
// not block any one domain for too long, without unleashing
|
||||
// thousands of requests to the CA at once.
|
||||
var (
|
||||
rateLimiters = make(map[string]*RingBufferRateLimiter)
|
||||
rateLimitersMu sync.RWMutex
|
||||
|
||||
// RateLimitEvents is how many new events can be allowed
|
||||
// in RateLimitEventsWindow.
|
||||
RateLimitEvents = 10
|
||||
|
||||
// RateLimitEventsWindow is the size of the sliding
|
||||
// window that throttles events.
|
||||
RateLimitEventsWindow = 10 * time.Second
|
||||
)
|
||||
|
||||
// Some default values passed down to the underlying ACME client.
|
||||
var (
|
||||
UserAgent string
|
||||
HTTPTimeout = 30 * time.Second
|
||||
)
|
||||
529
vendor/github.com/caddyserver/certmagic/acmeissuer.go
generated
vendored
Normal file
529
vendor/github.com/caddyserver/certmagic/acmeissuer.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
187
vendor/github.com/caddyserver/certmagic/async.go
generated
vendored
Normal file
187
vendor/github.com/caddyserver/certmagic/async.go
generated
vendored
Normal file
@ -0,0 +1,187 @@
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var jm = &jobManager{maxConcurrentJobs: 1000}
|
||||
|
||||
type jobManager struct {
|
||||
mu sync.Mutex
|
||||
maxConcurrentJobs int
|
||||
activeWorkers int
|
||||
queue []namedJob
|
||||
names map[string]struct{}
|
||||
}
|
||||
|
||||
type namedJob struct {
|
||||
name string
|
||||
job func() error
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// Submit enqueues the given job with the given name. If name is non-empty
|
||||
// and a job with the same name is already enqueued or running, this is a
|
||||
// no-op. If name is empty, no duplicate prevention will occur. The job
|
||||
// manager will then run this job as soon as it is able.
|
||||
func (jm *jobManager) Submit(logger *zap.Logger, name string, job func() error) {
|
||||
jm.mu.Lock()
|
||||
defer jm.mu.Unlock()
|
||||
if jm.names == nil {
|
||||
jm.names = make(map[string]struct{})
|
||||
}
|
||||
if name != "" {
|
||||
// prevent duplicate jobs
|
||||
if _, ok := jm.names[name]; ok {
|
||||
return
|
||||
}
|
||||
jm.names[name] = struct{}{}
|
||||
}
|
||||
jm.queue = append(jm.queue, namedJob{name, job, logger})
|
||||
if jm.activeWorkers < jm.maxConcurrentJobs {
|
||||
jm.activeWorkers++
|
||||
go jm.worker()
|
||||
}
|
||||
}
|
||||
|
||||
func (jm *jobManager) worker() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
buf := make([]byte, stackTraceBufferSize)
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
log.Printf("panic: certificate worker: %v\n%s", err, buf)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
jm.mu.Lock()
|
||||
if len(jm.queue) == 0 {
|
||||
jm.activeWorkers--
|
||||
jm.mu.Unlock()
|
||||
return
|
||||
}
|
||||
next := jm.queue[0]
|
||||
jm.queue = jm.queue[1:]
|
||||
jm.mu.Unlock()
|
||||
if err := next.job(); err != nil {
|
||||
if next.logger != nil {
|
||||
next.logger.Error("job failed", zap.Error(err))
|
||||
}
|
||||
}
|
||||
if next.name != "" {
|
||||
jm.mu.Lock()
|
||||
delete(jm.names, next.name)
|
||||
jm.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doWithRetry(ctx context.Context, log *zap.Logger, f func(context.Context) error) error {
|
||||
var attempts int
|
||||
ctx = context.WithValue(ctx, AttemptsCtxKey, &attempts)
|
||||
|
||||
// the initial intervalIndex is -1, signaling
|
||||
// that we should not wait for the first attempt
|
||||
start, intervalIndex := time.Now(), -1
|
||||
var err error
|
||||
|
||||
for time.Since(start) < maxRetryDuration {
|
||||
var wait time.Duration
|
||||
if intervalIndex >= 0 {
|
||||
wait = retryIntervals[intervalIndex]
|
||||
}
|
||||
timer := time.NewTimer(wait)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
timer.Stop()
|
||||
return context.Canceled
|
||||
case <-timer.C:
|
||||
err = f(ctx)
|
||||
attempts++
|
||||
if err == nil || errors.Is(err, context.Canceled) {
|
||||
return err
|
||||
}
|
||||
var errNoRetry ErrNoRetry
|
||||
if errors.As(err, &errNoRetry) {
|
||||
return err
|
||||
}
|
||||
if intervalIndex < len(retryIntervals)-1 {
|
||||
intervalIndex++
|
||||
}
|
||||
if time.Since(start) < maxRetryDuration {
|
||||
if log != nil {
|
||||
log.Error("will retry",
|
||||
zap.Error(err),
|
||||
zap.Int("attempt", attempts),
|
||||
zap.Duration("retrying_in", retryIntervals[intervalIndex]),
|
||||
zap.Duration("elapsed", time.Since(start)),
|
||||
zap.Duration("max_duration", maxRetryDuration))
|
||||
}
|
||||
} else {
|
||||
if log != nil {
|
||||
log.Error("final attempt; giving up",
|
||||
zap.Error(err),
|
||||
zap.Int("attempt", attempts),
|
||||
zap.Duration("elapsed", time.Since(start)),
|
||||
zap.Duration("max_duration", maxRetryDuration))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ErrNoRetry is an error type which signals
|
||||
// to stop retries early.
|
||||
type ErrNoRetry struct{ Err error }
|
||||
|
||||
// Unwrap makes it so that e wraps e.Err.
|
||||
func (e ErrNoRetry) Unwrap() error { return e.Err }
|
||||
func (e ErrNoRetry) Error() string { return e.Err.Error() }
|
||||
|
||||
type retryStateCtxKey struct{}
|
||||
|
||||
// AttemptsCtxKey is the context key for the value
|
||||
// that holds the attempt counter. The value counts
|
||||
// how many times the operation has been attempted.
|
||||
// A value of 0 means first attempt.
|
||||
var AttemptsCtxKey retryStateCtxKey
|
||||
|
||||
// retryIntervals are based on the idea of exponential
|
||||
// backoff, but weighed a little more heavily to the
|
||||
// front. We figure that intermittent errors would be
|
||||
// resolved after the first retry, but any errors after
|
||||
// that would probably require at least a few minutes
|
||||
// to clear up: either for DNS to propagate, for the
|
||||
// administrator to fix their DNS or network properties,
|
||||
// or some other external factor needs to change. We
|
||||
// chose intervals that we think will be most useful
|
||||
// without introducing unnecessary delay. The last
|
||||
// interval in this list will be used until the time
|
||||
// of maxRetryDuration has elapsed.
|
||||
var retryIntervals = []time.Duration{
|
||||
1 * time.Minute,
|
||||
2 * time.Minute,
|
||||
2 * time.Minute,
|
||||
5 * time.Minute, // elapsed: 10 min
|
||||
10 * time.Minute,
|
||||
20 * time.Minute,
|
||||
20 * time.Minute, // elapsed: 1 hr
|
||||
30 * time.Minute,
|
||||
30 * time.Minute, // elapsed: 2 hr
|
||||
1 * time.Hour,
|
||||
3 * time.Hour, // elapsed: 6 hr
|
||||
6 * time.Hour, // for up to maxRetryDuration
|
||||
}
|
||||
|
||||
// maxRetryDuration is the maximum duration to try
|
||||
// doing retries using the above intervals.
|
||||
const maxRetryDuration = 24 * time.Hour * 30
|
||||
364
vendor/github.com/caddyserver/certmagic/cache.go
generated
vendored
Normal file
364
vendor/github.com/caddyserver/certmagic/cache.go
generated
vendored
Normal file
@ -0,0 +1,364 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
weakrand "math/rand" // seeded elsewhere
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Cache is a structure that stores certificates in memory.
|
||||
// A Cache indexes certificates by name for quick access
|
||||
// during TLS handshakes, and avoids duplicating certificates
|
||||
// in memory. Generally, there should only be one per process.
|
||||
// However, that is not a strict requirement; but using more
|
||||
// than one is a code smell, and may indicate an
|
||||
// over-engineered design.
|
||||
//
|
||||
// An empty cache is INVALID and must not be used. Be sure
|
||||
// to call NewCache to get a valid value.
|
||||
//
|
||||
// These should be very long-lived values and must not be
|
||||
// copied. Before all references leave scope to be garbage
|
||||
// collected, ensure you call Stop() to stop maintenance on
|
||||
// the certificates stored in this cache and release locks.
|
||||
//
|
||||
// Caches are not usually manipulated directly; create a
|
||||
// Config value with a pointer to a Cache, and then use
|
||||
// the Config to interact with the cache. Caches are
|
||||
// agnostic of any particular storage or ACME config,
|
||||
// since each certificate may be managed and stored
|
||||
// differently.
|
||||
type Cache struct {
|
||||
// User configuration of the cache
|
||||
options CacheOptions
|
||||
|
||||
// The cache is keyed by certificate hash
|
||||
cache map[string]Certificate
|
||||
|
||||
// cacheIndex is a map of SAN to cache key (cert hash)
|
||||
cacheIndex map[string][]string
|
||||
|
||||
// Protects the cache and index maps
|
||||
mu sync.RWMutex
|
||||
|
||||
// Close this channel to cancel asset maintenance
|
||||
stopChan chan struct{}
|
||||
|
||||
// Used to signal when stopping is completed
|
||||
doneChan chan struct{}
|
||||
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// NewCache returns a new, valid Cache for efficiently
|
||||
// accessing certificates in memory. It also begins a
|
||||
// maintenance goroutine to tend to the certificates
|
||||
// in the cache. Call Stop() when you are done with the
|
||||
// cache so it can clean up locks and stuff.
|
||||
//
|
||||
// Most users of this package will not need to call this
|
||||
// because a default certificate cache is created for you.
|
||||
// Only advanced use cases require creating a new cache.
|
||||
//
|
||||
// This function panics if opts.GetConfigForCert is not
|
||||
// set. The reason is that a cache absolutely needs to
|
||||
// be able to get a Config with which to manage TLS
|
||||
// assets, and it is not safe to assume that the Default
|
||||
// config is always the correct one, since you have
|
||||
// created the cache yourself.
|
||||
//
|
||||
// See the godoc for Cache to use it properly. When
|
||||
// no longer needed, caches should be stopped with
|
||||
// Stop() to clean up resources even if the process
|
||||
// is being terminated, so that it can clean up
|
||||
// any locks for other processes to unblock!
|
||||
func NewCache(opts CacheOptions) *Cache {
|
||||
// assume default options if necessary
|
||||
if opts.OCSPCheckInterval <= 0 {
|
||||
opts.OCSPCheckInterval = DefaultOCSPCheckInterval
|
||||
}
|
||||
if opts.RenewCheckInterval <= 0 {
|
||||
opts.RenewCheckInterval = DefaultRenewCheckInterval
|
||||
}
|
||||
if opts.Capacity < 0 {
|
||||
opts.Capacity = 0
|
||||
}
|
||||
|
||||
// this must be set, because we cannot not
|
||||
// safely assume that the Default Config
|
||||
// is always the correct one to use
|
||||
if opts.GetConfigForCert == nil {
|
||||
panic("cache must be initialized with a GetConfigForCert callback")
|
||||
}
|
||||
|
||||
c := &Cache{
|
||||
options: opts,
|
||||
cache: make(map[string]Certificate),
|
||||
cacheIndex: make(map[string][]string),
|
||||
stopChan: make(chan struct{}),
|
||||
doneChan: make(chan struct{}),
|
||||
logger: opts.Logger,
|
||||
}
|
||||
|
||||
go c.maintainAssets(0)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Stop stops the maintenance goroutine for
|
||||
// certificates in certCache. It blocks until
|
||||
// stopping is complete. Once a cache is
|
||||
// stopped, it cannot be reused.
|
||||
func (certCache *Cache) Stop() {
|
||||
close(certCache.stopChan) // signal to stop
|
||||
<-certCache.doneChan // wait for stop to complete
|
||||
}
|
||||
|
||||
// CacheOptions is used to configure certificate caches.
|
||||
// Once a cache has been created with certain options,
|
||||
// those settings cannot be changed.
|
||||
type CacheOptions struct {
|
||||
// REQUIRED. A function that returns a configuration
|
||||
// used for managing a certificate, or for accessing
|
||||
// that certificate's asset storage (e.g. for
|
||||
// OCSP staples, etc). The returned Config MUST
|
||||
// be associated with the same Cache as the caller.
|
||||
//
|
||||
// The reason this is a callback function, dynamically
|
||||
// returning a Config (instead of attaching a static
|
||||
// pointer to a Config on each certificate) is because
|
||||
// the config for how to manage a domain's certificate
|
||||
// might change from maintenance to maintenance. The
|
||||
// cache is so long-lived, we cannot assume that the
|
||||
// host's situation will always be the same; e.g. the
|
||||
// certificate might switch DNS providers, so the DNS
|
||||
// challenge (if used) would need to be adjusted from
|
||||
// the last time it was run ~8 weeks ago.
|
||||
GetConfigForCert ConfigGetter
|
||||
|
||||
// How often to check certificates for renewal;
|
||||
// if unset, DefaultOCSPCheckInterval will be used.
|
||||
OCSPCheckInterval time.Duration
|
||||
|
||||
// How often to check certificates for renewal;
|
||||
// if unset, DefaultRenewCheckInterval will be used.
|
||||
RenewCheckInterval time.Duration
|
||||
|
||||
// Maximum number of certificates to allow in the cache.
|
||||
// If reached, certificates will be randomly evicted to
|
||||
// make room for new ones. 0 means unlimited.
|
||||
Capacity int
|
||||
|
||||
// Set a logger to enable logging
|
||||
Logger *zap.Logger
|
||||
}
|
||||
|
||||
// ConfigGetter is a function that returns a prepared,
|
||||
// valid config that should be used when managing the
|
||||
// given certificate or its assets.
|
||||
type ConfigGetter func(Certificate) (*Config, error)
|
||||
|
||||
// cacheCertificate calls unsyncedCacheCertificate with a write lock.
|
||||
//
|
||||
// This function is safe for concurrent use.
|
||||
func (certCache *Cache) cacheCertificate(cert Certificate) {
|
||||
certCache.mu.Lock()
|
||||
certCache.unsyncedCacheCertificate(cert)
|
||||
certCache.mu.Unlock()
|
||||
}
|
||||
|
||||
// unsyncedCacheCertificate adds cert to the in-memory cache unless
|
||||
// it already exists in the cache (according to cert.Hash). It
|
||||
// updates the name index.
|
||||
//
|
||||
// This function is NOT safe for concurrent use. Callers MUST acquire
|
||||
// a write lock on certCache.mu first.
|
||||
func (certCache *Cache) unsyncedCacheCertificate(cert Certificate) {
|
||||
// no-op if this certificate already exists in the cache
|
||||
if _, ok := certCache.cache[cert.hash]; ok {
|
||||
if certCache.logger != nil {
|
||||
certCache.logger.Debug("certificate already cached",
|
||||
zap.Strings("subjects", cert.Names),
|
||||
zap.Time("expiration", cert.Leaf.NotAfter),
|
||||
zap.Bool("managed", cert.managed),
|
||||
zap.String("issuer_key", cert.issuerKey),
|
||||
zap.String("hash", cert.hash))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// if the cache is at capacity, make room for new cert
|
||||
cacheSize := len(certCache.cache)
|
||||
if certCache.options.Capacity > 0 && cacheSize >= certCache.options.Capacity {
|
||||
// Go maps are "nondeterministic" but not actually random,
|
||||
// so although we could just chop off the "front" of the
|
||||
// map with less code, that is a heavily skewed eviction
|
||||
// strategy; generating random numbers is cheap and
|
||||
// ensures a much better distribution.
|
||||
rnd := weakrand.Intn(cacheSize)
|
||||
i := 0
|
||||
for _, randomCert := range certCache.cache {
|
||||
if i == rnd {
|
||||
if certCache.logger != nil {
|
||||
certCache.logger.Debug("cache full; evicting random certificate",
|
||||
zap.Strings("removing_subjects", randomCert.Names),
|
||||
zap.String("removing_hash", randomCert.hash),
|
||||
zap.Strings("inserting_subjects", cert.Names),
|
||||
zap.String("inserting_hash", cert.hash))
|
||||
}
|
||||
certCache.removeCertificate(randomCert)
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
// store the certificate
|
||||
certCache.cache[cert.hash] = cert
|
||||
|
||||
// update the index so we can access it by name
|
||||
for _, name := range cert.Names {
|
||||
certCache.cacheIndex[name] = append(certCache.cacheIndex[name], cert.hash)
|
||||
}
|
||||
|
||||
if certCache.logger != nil {
|
||||
certCache.logger.Debug("added certificate to cache",
|
||||
zap.Strings("subjects", cert.Names),
|
||||
zap.Time("expiration", cert.Leaf.NotAfter),
|
||||
zap.Bool("managed", cert.managed),
|
||||
zap.String("issuer_key", cert.issuerKey),
|
||||
zap.String("hash", cert.hash),
|
||||
zap.Int("cache_size", len(certCache.cache)),
|
||||
zap.Int("cache_capacity", certCache.options.Capacity))
|
||||
}
|
||||
}
|
||||
|
||||
// removeCertificate removes cert from the cache.
|
||||
//
|
||||
// This function is NOT safe for concurrent use; callers
|
||||
// MUST first acquire a write lock on certCache.mu.
|
||||
func (certCache *Cache) removeCertificate(cert Certificate) {
|
||||
// delete all mentions of this cert from the name index
|
||||
for _, name := range cert.Names {
|
||||
keyList := certCache.cacheIndex[name]
|
||||
for i := 0; i < len(keyList); i++ {
|
||||
if keyList[i] == cert.hash {
|
||||
keyList = append(keyList[:i], keyList[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
if len(keyList) == 0 {
|
||||
delete(certCache.cacheIndex, name)
|
||||
} else {
|
||||
certCache.cacheIndex[name] = keyList
|
||||
}
|
||||
}
|
||||
|
||||
// delete the actual cert from the cache
|
||||
delete(certCache.cache, cert.hash)
|
||||
|
||||
if certCache.logger != nil {
|
||||
certCache.logger.Debug("removed certificate from cache",
|
||||
zap.Strings("subjects", cert.Names),
|
||||
zap.Time("expiration", cert.Leaf.NotAfter),
|
||||
zap.Bool("managed", cert.managed),
|
||||
zap.String("issuer_key", cert.issuerKey),
|
||||
zap.String("hash", cert.hash),
|
||||
zap.Int("cache_size", len(certCache.cache)),
|
||||
zap.Int("cache_capacity", certCache.options.Capacity))
|
||||
}
|
||||
}
|
||||
|
||||
// replaceCertificate atomically replaces oldCert with newCert in
|
||||
// the cache.
|
||||
//
|
||||
// This method is safe for concurrent use.
|
||||
func (certCache *Cache) replaceCertificate(oldCert, newCert Certificate) {
|
||||
certCache.mu.Lock()
|
||||
certCache.removeCertificate(oldCert)
|
||||
certCache.unsyncedCacheCertificate(newCert)
|
||||
certCache.mu.Unlock()
|
||||
if certCache.logger != nil {
|
||||
certCache.logger.Info("replaced certificate in cache",
|
||||
zap.Strings("subjects", newCert.Names),
|
||||
zap.Time("new_expiration", newCert.Leaf.NotAfter))
|
||||
}
|
||||
}
|
||||
|
||||
func (certCache *Cache) getAllMatchingCerts(name string) []Certificate {
|
||||
certCache.mu.RLock()
|
||||
defer certCache.mu.RUnlock()
|
||||
|
||||
allCertKeys := certCache.cacheIndex[name]
|
||||
|
||||
certs := make([]Certificate, len(allCertKeys))
|
||||
for i := range allCertKeys {
|
||||
certs[i] = certCache.cache[allCertKeys[i]]
|
||||
}
|
||||
|
||||
return certs
|
||||
}
|
||||
|
||||
func (certCache *Cache) getAllCerts() []Certificate {
|
||||
certCache.mu.RLock()
|
||||
defer certCache.mu.RUnlock()
|
||||
certs := make([]Certificate, 0, len(certCache.cache))
|
||||
for _, cert := range certCache.cache {
|
||||
certs = append(certs, cert)
|
||||
}
|
||||
return certs
|
||||
}
|
||||
|
||||
func (certCache *Cache) getConfig(cert Certificate) (*Config, error) {
|
||||
cfg, err := certCache.options.GetConfigForCert(cert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cfg.certCache != nil && cfg.certCache != certCache {
|
||||
return nil, fmt.Errorf("config returned for certificate %v is not nil and points to different cache; got %p, expected %p (this one)",
|
||||
cert.Names, cfg.certCache, certCache)
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// AllMatchingCertificates returns a list of all certificates that could
|
||||
// be used to serve the given SNI name, including exact SAN matches and
|
||||
// wildcard matches.
|
||||
func (certCache *Cache) AllMatchingCertificates(name string) []Certificate {
|
||||
// get exact matches first
|
||||
certs := certCache.getAllMatchingCerts(name)
|
||||
|
||||
// then look for wildcard matches by replacing each
|
||||
// label of the domain name with wildcards
|
||||
labels := strings.Split(name, ".")
|
||||
for i := range labels {
|
||||
labels[i] = "*"
|
||||
candidate := strings.Join(labels, ".")
|
||||
certs = append(certs, certCache.getAllMatchingCerts(candidate)...)
|
||||
}
|
||||
|
||||
return certs
|
||||
}
|
||||
|
||||
var (
|
||||
defaultCache *Cache
|
||||
defaultCacheMu sync.Mutex
|
||||
)
|
||||
429
vendor/github.com/caddyserver/certmagic/certificates.go
generated
vendored
Normal file
429
vendor/github.com/caddyserver/certmagic/certificates.go
generated
vendored
Normal file
@ -0,0 +1,429 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
)
|
||||
|
||||
// Certificate is a tls.Certificate with associated metadata tacked on.
|
||||
// Even if the metadata can be obtained by parsing the certificate,
|
||||
// we are more efficient by extracting the metadata onto this struct,
|
||||
// but at the cost of slightly higher memory use.
|
||||
type Certificate struct {
|
||||
tls.Certificate
|
||||
|
||||
// Names is the list of subject names this
|
||||
// certificate is signed for.
|
||||
Names []string
|
||||
|
||||
// Optional; user-provided, and arbitrary.
|
||||
Tags []string
|
||||
|
||||
// OCSP contains the certificate's parsed OCSP response.
|
||||
// It is not necessarily the response that is stapled
|
||||
// (e.g. if the status is not Good), it is simply the
|
||||
// most recent OCSP response we have for this certificate.
|
||||
ocsp *ocsp.Response
|
||||
|
||||
// The hex-encoded hash of this cert's chain's bytes.
|
||||
hash string
|
||||
|
||||
// Whether this certificate is under our management.
|
||||
managed bool
|
||||
|
||||
// The unique string identifying the issuer of this certificate.
|
||||
issuerKey string
|
||||
}
|
||||
|
||||
// Empty returns true if the certificate struct is not filled out; at
|
||||
// least the tls.Certificate.Certificate field is expected to be set.
|
||||
func (cert Certificate) Empty() bool {
|
||||
return len(cert.Certificate.Certificate) == 0
|
||||
}
|
||||
|
||||
// NeedsRenewal returns true if the certificate is
|
||||
// expiring soon (according to cfg) or has expired.
|
||||
func (cert Certificate) NeedsRenewal(cfg *Config) bool {
|
||||
return currentlyInRenewalWindow(cert.Leaf.NotBefore, cert.Leaf.NotAfter, cfg.RenewalWindowRatio)
|
||||
}
|
||||
|
||||
// Expired returns true if the certificate has expired.
|
||||
func (cert Certificate) Expired() bool {
|
||||
if cert.Leaf == nil {
|
||||
// ideally cert.Leaf would never be nil, but this can happen for
|
||||
// "synthetic" certs like those made to solve the TLS-ALPN challenge
|
||||
// which adds a special cert directly to the cache, since
|
||||
// tls.X509KeyPair() discards the leaf; oh well
|
||||
return false
|
||||
}
|
||||
return time.Now().After(cert.Leaf.NotAfter)
|
||||
}
|
||||
|
||||
// currentlyInRenewalWindow returns true if the current time is
|
||||
// within the renewal window, according to the given start/end
|
||||
// dates and the ratio of the renewal window. If true is returned,
|
||||
// the certificate being considered is due for renewal.
|
||||
func currentlyInRenewalWindow(notBefore, notAfter time.Time, renewalWindowRatio float64) bool {
|
||||
if notAfter.IsZero() {
|
||||
return false
|
||||
}
|
||||
lifetime := notAfter.Sub(notBefore)
|
||||
if renewalWindowRatio == 0 {
|
||||
renewalWindowRatio = DefaultRenewalWindowRatio
|
||||
}
|
||||
renewalWindow := time.Duration(float64(lifetime) * renewalWindowRatio)
|
||||
renewalWindowStart := notAfter.Add(-renewalWindow)
|
||||
return time.Now().After(renewalWindowStart)
|
||||
}
|
||||
|
||||
// HasTag returns true if cert.Tags has tag.
|
||||
func (cert Certificate) HasTag(tag string) bool {
|
||||
for _, t := range cert.Tags {
|
||||
if t == tag {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CacheManagedCertificate loads the certificate for domain into the
|
||||
// cache, from the TLS storage for managed certificates. It returns a
|
||||
// copy of the Certificate that was put into the cache.
|
||||
//
|
||||
// This is a lower-level method; normally you'll call Manage() instead.
|
||||
//
|
||||
// This method is safe for concurrent use.
|
||||
func (cfg *Config) CacheManagedCertificate(ctx context.Context, domain string) (Certificate, error) {
|
||||
cert, err := cfg.loadManagedCertificate(ctx, domain)
|
||||
if err != nil {
|
||||
return cert, err
|
||||
}
|
||||
cfg.certCache.cacheCertificate(cert)
|
||||
cfg.emit("cached_managed_cert", cert.Names)
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// loadManagedCertificate loads the managed certificate for domain from any
|
||||
// of the configured issuers' storage locations, but it does not add it to
|
||||
// the cache. It just loads from storage and returns it.
|
||||
func (cfg *Config) loadManagedCertificate(ctx context.Context, domain string) (Certificate, error) {
|
||||
certRes, err := cfg.loadCertResourceAnyIssuer(ctx, domain)
|
||||
if err != nil {
|
||||
return Certificate{}, err
|
||||
}
|
||||
cert, err := cfg.makeCertificateWithOCSP(ctx, certRes.CertificatePEM, certRes.PrivateKeyPEM)
|
||||
if err != nil {
|
||||
return cert, err
|
||||
}
|
||||
cert.managed = true
|
||||
cert.issuerKey = certRes.issuerKey
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// CacheUnmanagedCertificatePEMFile loads a certificate for host using certFile
|
||||
// and keyFile, which must be in PEM format. It stores the certificate in
|
||||
// the in-memory cache.
|
||||
//
|
||||
// This method is safe for concurrent use.
|
||||
func (cfg *Config) CacheUnmanagedCertificatePEMFile(ctx context.Context, certFile, keyFile string, tags []string) error {
|
||||
cert, err := cfg.makeCertificateFromDiskWithOCSP(ctx, cfg.Storage, certFile, keyFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cert.Tags = tags
|
||||
cfg.certCache.cacheCertificate(cert)
|
||||
cfg.emit("cached_unmanaged_cert", cert.Names)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CacheUnmanagedTLSCertificate adds tlsCert to the certificate cache.
|
||||
// It staples OCSP if possible.
|
||||
//
|
||||
// This method is safe for concurrent use.
|
||||
func (cfg *Config) CacheUnmanagedTLSCertificate(ctx context.Context, tlsCert tls.Certificate, tags []string) error {
|
||||
var cert Certificate
|
||||
err := fillCertFromLeaf(&cert, tlsCert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = stapleOCSP(ctx, cfg.OCSP, cfg.Storage, &cert, nil)
|
||||
if err != nil && cfg.Logger != nil {
|
||||
cfg.Logger.Warn("stapling OCSP", zap.Error(err))
|
||||
}
|
||||
cfg.emit("cached_unmanaged_cert", cert.Names)
|
||||
cert.Tags = tags
|
||||
cfg.certCache.cacheCertificate(cert)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CacheUnmanagedCertificatePEMBytes makes a certificate out of the PEM bytes
|
||||
// of the certificate and key, then caches it in memory.
|
||||
//
|
||||
// This method is safe for concurrent use.
|
||||
func (cfg *Config) CacheUnmanagedCertificatePEMBytes(ctx context.Context, certBytes, keyBytes []byte, tags []string) error {
|
||||
cert, err := cfg.makeCertificateWithOCSP(ctx, certBytes, keyBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cert.Tags = tags
|
||||
cfg.certCache.cacheCertificate(cert)
|
||||
cfg.emit("cached_unmanaged_cert", cert.Names)
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeCertificateFromDiskWithOCSP makes a Certificate by loading the
|
||||
// certificate and key files. It fills out all the fields in
|
||||
// the certificate except for the Managed and OnDemand flags.
|
||||
// (It is up to the caller to set those.) It staples OCSP.
|
||||
func (cfg Config) makeCertificateFromDiskWithOCSP(ctx context.Context, storage Storage, certFile, keyFile string) (Certificate, error) {
|
||||
certPEMBlock, err := os.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return Certificate{}, err
|
||||
}
|
||||
keyPEMBlock, err := os.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return Certificate{}, err
|
||||
}
|
||||
return cfg.makeCertificateWithOCSP(ctx, certPEMBlock, keyPEMBlock)
|
||||
}
|
||||
|
||||
// makeCertificateWithOCSP is the same as makeCertificate except that it also
|
||||
// staples OCSP to the certificate.
|
||||
func (cfg Config) makeCertificateWithOCSP(ctx context.Context, certPEMBlock, keyPEMBlock []byte) (Certificate, error) {
|
||||
cert, err := makeCertificate(certPEMBlock, keyPEMBlock)
|
||||
if err != nil {
|
||||
return cert, err
|
||||
}
|
||||
err = stapleOCSP(ctx, cfg.OCSP, cfg.Storage, &cert, certPEMBlock)
|
||||
if err != nil && cfg.Logger != nil {
|
||||
cfg.Logger.Warn("stapling OCSP", zap.Error(err), zap.Strings("identifiers", cert.Names))
|
||||
}
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// makeCertificate turns a certificate PEM bundle and a key PEM block into
|
||||
// a Certificate with necessary metadata from parsing its bytes filled into
|
||||
// its struct fields for convenience (except for the OnDemand and Managed
|
||||
// flags; it is up to the caller to set those properties!). This function
|
||||
// does NOT staple OCSP.
|
||||
func makeCertificate(certPEMBlock, keyPEMBlock []byte) (Certificate, error) {
|
||||
var cert Certificate
|
||||
|
||||
// Convert to a tls.Certificate
|
||||
tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
|
||||
if err != nil {
|
||||
return cert, err
|
||||
}
|
||||
|
||||
// Extract necessary metadata
|
||||
err = fillCertFromLeaf(&cert, tlsCert)
|
||||
if err != nil {
|
||||
return cert, err
|
||||
}
|
||||
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// fillCertFromLeaf populates cert from tlsCert. If it succeeds, it
|
||||
// guarantees that cert.Leaf is non-nil.
|
||||
func fillCertFromLeaf(cert *Certificate, tlsCert tls.Certificate) error {
|
||||
if len(tlsCert.Certificate) == 0 {
|
||||
return fmt.Errorf("certificate is empty")
|
||||
}
|
||||
cert.Certificate = tlsCert
|
||||
|
||||
// the leaf cert should be the one for the site; we must set
|
||||
// the tls.Certificate.Leaf field so that TLS handshakes are
|
||||
// more efficient
|
||||
leaf := cert.Certificate.Leaf
|
||||
if leaf == nil {
|
||||
var err error
|
||||
leaf, err = x509.ParseCertificate(tlsCert.Certificate[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cert.Certificate.Leaf = leaf
|
||||
}
|
||||
|
||||
// for convenience, we do want to assemble all the
|
||||
// subjects on the certificate into one list
|
||||
if leaf.Subject.CommonName != "" { // TODO: CommonName is deprecated
|
||||
cert.Names = []string{strings.ToLower(leaf.Subject.CommonName)}
|
||||
}
|
||||
for _, name := range leaf.DNSNames {
|
||||
if name != leaf.Subject.CommonName { // TODO: CommonName is deprecated
|
||||
cert.Names = append(cert.Names, strings.ToLower(name))
|
||||
}
|
||||
}
|
||||
for _, ip := range leaf.IPAddresses {
|
||||
if ipStr := ip.String(); ipStr != leaf.Subject.CommonName { // TODO: CommonName is deprecated
|
||||
cert.Names = append(cert.Names, strings.ToLower(ipStr))
|
||||
}
|
||||
}
|
||||
for _, email := range leaf.EmailAddresses {
|
||||
if email != leaf.Subject.CommonName { // TODO: CommonName is deprecated
|
||||
cert.Names = append(cert.Names, strings.ToLower(email))
|
||||
}
|
||||
}
|
||||
for _, u := range leaf.URIs {
|
||||
if u.String() != leaf.Subject.CommonName { // TODO: CommonName is deprecated
|
||||
cert.Names = append(cert.Names, u.String())
|
||||
}
|
||||
}
|
||||
if len(cert.Names) == 0 {
|
||||
return fmt.Errorf("certificate has no names")
|
||||
}
|
||||
|
||||
cert.hash = hashCertificateChain(cert.Certificate.Certificate)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// managedCertInStorageExpiresSoon returns true if cert (being a
|
||||
// managed certificate) is expiring within RenewDurationBefore.
|
||||
// It returns false if there was an error checking the expiration
|
||||
// of the certificate as found in storage, or if the certificate
|
||||
// in storage is NOT expiring soon. A certificate that is expiring
|
||||
// soon in our cache but is not expiring soon in storage probably
|
||||
// means that another instance renewed the certificate in the
|
||||
// meantime, and it would be a good idea to simply load the cert
|
||||
// into our cache rather than repeating the renewal process again.
|
||||
func (cfg *Config) managedCertInStorageExpiresSoon(ctx context.Context, cert Certificate) (bool, error) {
|
||||
certRes, err := cfg.loadCertResourceAnyIssuer(ctx, cert.Names[0])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
_, needsRenew := cfg.managedCertNeedsRenewal(certRes)
|
||||
return needsRenew, nil
|
||||
}
|
||||
|
||||
// reloadManagedCertificate reloads the certificate corresponding to the name(s)
|
||||
// on oldCert into the cache, from storage. This also replaces the old certificate
|
||||
// with the new one, so that all configurations that used the old cert now point
|
||||
// to the new cert. It assumes that the new certificate for oldCert.Names[0] is
|
||||
// already in storage. It returns the newly-loaded certificate if successful.
|
||||
func (cfg *Config) reloadManagedCertificate(ctx context.Context, oldCert Certificate) (Certificate, error) {
|
||||
if cfg.Logger != nil {
|
||||
cfg.Logger.Info("reloading managed certificate", zap.Strings("identifiers", oldCert.Names))
|
||||
}
|
||||
newCert, err := cfg.loadManagedCertificate(ctx, oldCert.Names[0])
|
||||
if err != nil {
|
||||
return Certificate{}, fmt.Errorf("loading managed certificate for %v from storage: %v", oldCert.Names, err)
|
||||
}
|
||||
cfg.certCache.replaceCertificate(oldCert, newCert)
|
||||
return newCert, nil
|
||||
}
|
||||
|
||||
// SubjectQualifiesForCert returns true if subj is a name which,
|
||||
// as a quick sanity check, looks like it could be the subject
|
||||
// of a certificate. Requirements are:
|
||||
// - must not be empty
|
||||
// - must not start or end with a dot (RFC 1034; RFC 6066 section 3)
|
||||
// - must not contain common accidental special characters
|
||||
func SubjectQualifiesForCert(subj string) bool {
|
||||
// must not be empty
|
||||
return strings.TrimSpace(subj) != "" &&
|
||||
|
||||
// must not start or end with a dot
|
||||
!strings.HasPrefix(subj, ".") &&
|
||||
!strings.HasSuffix(subj, ".") &&
|
||||
|
||||
// if it has a wildcard, must be a left-most label (or exactly "*"
|
||||
// which won't be trusted by browsers but still technically works)
|
||||
(!strings.Contains(subj, "*") || strings.HasPrefix(subj, "*.") || subj == "*") &&
|
||||
|
||||
// must not contain other common special characters
|
||||
!strings.ContainsAny(subj, "()[]{}<> \t\n\"\\!@#$%^&|;'+=")
|
||||
}
|
||||
|
||||
// SubjectQualifiesForPublicCert returns true if the subject
|
||||
// name appears eligible for automagic TLS with a public
|
||||
// CA such as Let's Encrypt. For example: localhost and IP
|
||||
// addresses are not eligible because we cannot obtain certs
|
||||
// for those names with a public CA. Wildcard names are
|
||||
// allowed, as long as they conform to CABF requirements (only
|
||||
// one wildcard label, and it must be the left-most label).
|
||||
func SubjectQualifiesForPublicCert(subj string) bool {
|
||||
// must at least qualify for a certificate
|
||||
return SubjectQualifiesForCert(subj) &&
|
||||
|
||||
// localhost, .localhost TLD, and .local TLD are ineligible
|
||||
!SubjectIsInternal(subj) &&
|
||||
|
||||
// cannot be an IP address (as of yet), see
|
||||
// https://community.letsencrypt.org/t/certificate-for-static-ip/84/2?u=mholt
|
||||
!SubjectIsIP(subj) &&
|
||||
|
||||
// only one wildcard label allowed, and it must be left-most, with 3+ labels
|
||||
(!strings.Contains(subj, "*") ||
|
||||
(strings.Count(subj, "*") == 1 &&
|
||||
strings.Count(subj, ".") > 1 &&
|
||||
len(subj) > 2 &&
|
||||
strings.HasPrefix(subj, "*.")))
|
||||
}
|
||||
|
||||
// SubjectIsIP returns true if subj is an IP address.
|
||||
func SubjectIsIP(subj string) bool {
|
||||
return net.ParseIP(subj) != nil
|
||||
}
|
||||
|
||||
// SubjectIsInternal returns true if subj is an internal-facing
|
||||
// hostname or address.
|
||||
func SubjectIsInternal(subj string) bool {
|
||||
return subj == "localhost" ||
|
||||
strings.HasSuffix(subj, ".localhost") ||
|
||||
strings.HasSuffix(subj, ".local")
|
||||
}
|
||||
|
||||
// MatchWildcard returns true if subject (a candidate DNS name)
|
||||
// matches wildcard (a reference DNS name), mostly according to
|
||||
// RFC 6125-compliant wildcard rules. See also RFC 2818 which
|
||||
// states that IP addresses must match exactly, but this function
|
||||
// does not attempt to distinguish IP addresses from internal or
|
||||
// external DNS names that happen to look like IP addresses.
|
||||
// It uses DNS wildcard matching logic and is case-insensitive.
|
||||
// https://tools.ietf.org/html/rfc2818#section-3.1
|
||||
func MatchWildcard(subject, wildcard string) bool {
|
||||
subject, wildcard = strings.ToLower(subject), strings.ToLower(wildcard)
|
||||
if subject == wildcard {
|
||||
return true
|
||||
}
|
||||
if !strings.Contains(wildcard, "*") {
|
||||
return false
|
||||
}
|
||||
labels := strings.Split(subject, ".")
|
||||
for i := range labels {
|
||||
if labels[i] == "" {
|
||||
continue // invalid label
|
||||
}
|
||||
labels[i] = "*"
|
||||
candidate := strings.Join(labels, ".")
|
||||
if candidate == wildcard {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
506
vendor/github.com/caddyserver/certmagic/certmagic.go
generated
vendored
Normal file
506
vendor/github.com/caddyserver/certmagic/certmagic.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1136
vendor/github.com/caddyserver/certmagic/config.go
generated
vendored
Normal file
1136
vendor/github.com/caddyserver/certmagic/config.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
368
vendor/github.com/caddyserver/certmagic/crypto.go
generated
vendored
Normal file
368
vendor/github.com/caddyserver/certmagic/crypto.go
generated
vendored
Normal file
@ -0,0 +1,368 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"io/fs"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/klauspost/cpuid/v2"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/net/idna"
|
||||
)
|
||||
|
||||
// PEMEncodePrivateKey marshals a private key into a PEM-encoded block.
|
||||
// The private key must be one of *ecdsa.PrivateKey, *rsa.PrivateKey, or
|
||||
// *ed25519.PrivateKey.
|
||||
func PEMEncodePrivateKey(key crypto.PrivateKey) ([]byte, error) {
|
||||
var pemType string
|
||||
var keyBytes []byte
|
||||
switch key := key.(type) {
|
||||
case *ecdsa.PrivateKey:
|
||||
var err error
|
||||
pemType = "EC"
|
||||
keyBytes, err = x509.MarshalECPrivateKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *rsa.PrivateKey:
|
||||
pemType = "RSA"
|
||||
keyBytes = x509.MarshalPKCS1PrivateKey(key)
|
||||
case ed25519.PrivateKey:
|
||||
var err error
|
||||
pemType = "ED25519"
|
||||
keyBytes, err = x509.MarshalPKCS8PrivateKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported key type: %T", key)
|
||||
}
|
||||
pemKey := pem.Block{Type: pemType + " PRIVATE KEY", Bytes: keyBytes}
|
||||
return pem.EncodeToMemory(&pemKey), nil
|
||||
}
|
||||
|
||||
// PEMDecodePrivateKey loads a PEM-encoded ECC/RSA private key from an array of bytes.
|
||||
// Borrowed from Go standard library, to handle various private key and PEM block types.
|
||||
func PEMDecodePrivateKey(keyPEMBytes []byte) (crypto.Signer, error) {
|
||||
// Modified from original:
|
||||
// https://github.com/golang/go/blob/693748e9fa385f1e2c3b91ca9acbb6c0ad2d133d/src/crypto/tls/tls.go#L291-L308
|
||||
// https://github.com/golang/go/blob/693748e9fa385f1e2c3b91ca9acbb6c0ad2d133d/src/crypto/tls/tls.go#L238
|
||||
|
||||
keyBlockDER, _ := pem.Decode(keyPEMBytes)
|
||||
|
||||
if keyBlockDER == nil {
|
||||
return nil, fmt.Errorf("failed to decode PEM block containing private key")
|
||||
}
|
||||
|
||||
if keyBlockDER.Type != "PRIVATE KEY" && !strings.HasSuffix(keyBlockDER.Type, " PRIVATE KEY") {
|
||||
return nil, fmt.Errorf("unknown PEM header %q", keyBlockDER.Type)
|
||||
}
|
||||
|
||||
if key, err := x509.ParsePKCS1PrivateKey(keyBlockDER.Bytes); err == nil {
|
||||
return key, nil
|
||||
}
|
||||
|
||||
if key, err := x509.ParsePKCS8PrivateKey(keyBlockDER.Bytes); err == nil {
|
||||
switch key := key.(type) {
|
||||
case *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey:
|
||||
return key.(crypto.Signer), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("found unknown private key type in PKCS#8 wrapping: %T", key)
|
||||
}
|
||||
}
|
||||
|
||||
if key, err := x509.ParseECPrivateKey(keyBlockDER.Bytes); err == nil {
|
||||
return key, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unknown private key type")
|
||||
}
|
||||
|
||||
// parseCertsFromPEMBundle parses a certificate bundle from top to bottom and returns
|
||||
// a slice of x509 certificates. This function will error if no certificates are found.
|
||||
func parseCertsFromPEMBundle(bundle []byte) ([]*x509.Certificate, error) {
|
||||
var certificates []*x509.Certificate
|
||||
var certDERBlock *pem.Block
|
||||
for {
|
||||
certDERBlock, bundle = pem.Decode(bundle)
|
||||
if certDERBlock == nil {
|
||||
break
|
||||
}
|
||||
if certDERBlock.Type == "CERTIFICATE" {
|
||||
cert, err := x509.ParseCertificate(certDERBlock.Bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
certificates = append(certificates, cert)
|
||||
}
|
||||
}
|
||||
if len(certificates) == 0 {
|
||||
return nil, fmt.Errorf("no certificates found in bundle")
|
||||
}
|
||||
return certificates, nil
|
||||
}
|
||||
|
||||
// fastHash hashes input using a hashing algorithm that
|
||||
// is fast, and returns the hash as a hex-encoded string.
|
||||
// Do not use this for cryptographic purposes.
|
||||
func fastHash(input []byte) string {
|
||||
h := fnv.New32a()
|
||||
h.Write(input)
|
||||
return fmt.Sprintf("%x", h.Sum32())
|
||||
}
|
||||
|
||||
// saveCertResource saves the certificate resource to disk. This
|
||||
// includes the certificate file itself, the private key, and the
|
||||
// metadata file.
|
||||
func (cfg *Config) saveCertResource(ctx context.Context, issuer Issuer, cert CertificateResource) error {
|
||||
metaBytes, err := json.MarshalIndent(cert, "", "\t")
|
||||
if err != nil {
|
||||
return fmt.Errorf("encoding certificate metadata: %v", err)
|
||||
}
|
||||
|
||||
issuerKey := issuer.IssuerKey()
|
||||
certKey := cert.NamesKey()
|
||||
|
||||
all := []keyValue{
|
||||
{
|
||||
key: StorageKeys.SitePrivateKey(issuerKey, certKey),
|
||||
value: cert.PrivateKeyPEM,
|
||||
},
|
||||
{
|
||||
key: StorageKeys.SiteCert(issuerKey, certKey),
|
||||
value: cert.CertificatePEM,
|
||||
},
|
||||
{
|
||||
key: StorageKeys.SiteMeta(issuerKey, certKey),
|
||||
value: metaBytes,
|
||||
},
|
||||
}
|
||||
|
||||
return storeTx(ctx, cfg.Storage, all)
|
||||
}
|
||||
|
||||
// loadCertResourceAnyIssuer loads and returns the certificate resource from any
|
||||
// of the configured issuers. If multiple are found (e.g. if there are 3 issuers
|
||||
// configured, and all 3 have a resource matching certNamesKey), then the newest
|
||||
// (latest NotBefore date) resource will be chosen.
|
||||
func (cfg *Config) loadCertResourceAnyIssuer(ctx context.Context, certNamesKey string) (CertificateResource, error) {
|
||||
// we can save some extra decoding steps if there's only one issuer, since
|
||||
// we don't need to compare potentially multiple available resources to
|
||||
// select the best one, when there's only one choice anyway
|
||||
if len(cfg.Issuers) == 1 {
|
||||
return cfg.loadCertResource(ctx, cfg.Issuers[0], certNamesKey)
|
||||
}
|
||||
|
||||
type decodedCertResource struct {
|
||||
CertificateResource
|
||||
issuer Issuer
|
||||
decoded *x509.Certificate
|
||||
}
|
||||
var certResources []decodedCertResource
|
||||
var lastErr error
|
||||
|
||||
// load and decode all certificate resources found with the
|
||||
// configured issuers so we can sort by newest
|
||||
for _, issuer := range cfg.Issuers {
|
||||
certRes, err := cfg.loadCertResource(ctx, issuer, certNamesKey)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// not a problem, but we need to remember the error
|
||||
// in case we end up not finding any cert resources
|
||||
// since we'll need an error to return in that case
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
return CertificateResource{}, err
|
||||
}
|
||||
certs, err := parseCertsFromPEMBundle(certRes.CertificatePEM)
|
||||
if err != nil {
|
||||
return CertificateResource{}, err
|
||||
}
|
||||
certResources = append(certResources, decodedCertResource{
|
||||
CertificateResource: certRes,
|
||||
issuer: issuer,
|
||||
decoded: certs[0],
|
||||
})
|
||||
}
|
||||
if len(certResources) == 0 {
|
||||
if lastErr == nil {
|
||||
lastErr = fmt.Errorf("no certificate resources found") // just in case; e.g. no Issuers configured
|
||||
}
|
||||
return CertificateResource{}, lastErr
|
||||
}
|
||||
|
||||
// sort by date so the most recently issued comes first
|
||||
sort.Slice(certResources, func(i, j int) bool {
|
||||
return certResources[j].decoded.NotBefore.Before(certResources[i].decoded.NotBefore)
|
||||
})
|
||||
|
||||
if cfg.Logger != nil {
|
||||
cfg.Logger.Debug("loading managed certificate",
|
||||
zap.String("domain", certNamesKey),
|
||||
zap.Time("expiration", certResources[0].decoded.NotAfter),
|
||||
zap.String("issuer_key", certResources[0].issuer.IssuerKey()),
|
||||
zap.Any("storage", cfg.Storage),
|
||||
)
|
||||
}
|
||||
|
||||
return certResources[0].CertificateResource, nil
|
||||
}
|
||||
|
||||
// loadCertResource loads a certificate resource from the given issuer's storage location.
|
||||
func (cfg *Config) loadCertResource(ctx context.Context, issuer Issuer, certNamesKey string) (CertificateResource, error) {
|
||||
certRes := CertificateResource{issuerKey: issuer.IssuerKey()}
|
||||
|
||||
normalizedName, err := idna.ToASCII(certNamesKey)
|
||||
if err != nil {
|
||||
return CertificateResource{}, fmt.Errorf("converting '%s' to ASCII: %v", certNamesKey, err)
|
||||
}
|
||||
|
||||
keyBytes, err := cfg.Storage.Load(ctx, StorageKeys.SitePrivateKey(certRes.issuerKey, normalizedName))
|
||||
if err != nil {
|
||||
return CertificateResource{}, err
|
||||
}
|
||||
certRes.PrivateKeyPEM = keyBytes
|
||||
certBytes, err := cfg.Storage.Load(ctx, StorageKeys.SiteCert(certRes.issuerKey, normalizedName))
|
||||
if err != nil {
|
||||
return CertificateResource{}, err
|
||||
}
|
||||
certRes.CertificatePEM = certBytes
|
||||
metaBytes, err := cfg.Storage.Load(ctx, StorageKeys.SiteMeta(certRes.issuerKey, normalizedName))
|
||||
if err != nil {
|
||||
return CertificateResource{}, err
|
||||
}
|
||||
err = json.Unmarshal(metaBytes, &certRes)
|
||||
if err != nil {
|
||||
return CertificateResource{}, fmt.Errorf("decoding certificate metadata: %v", err)
|
||||
}
|
||||
|
||||
return certRes, nil
|
||||
}
|
||||
|
||||
// hashCertificateChain computes the unique hash of certChain,
|
||||
// which is the chain of DER-encoded bytes. It returns the
|
||||
// hex encoding of the hash.
|
||||
func hashCertificateChain(certChain [][]byte) string {
|
||||
h := sha256.New()
|
||||
for _, certInChain := range certChain {
|
||||
h.Write(certInChain)
|
||||
}
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
}
|
||||
|
||||
func namesFromCSR(csr *x509.CertificateRequest) []string {
|
||||
var nameSet []string
|
||||
nameSet = append(nameSet, csr.DNSNames...)
|
||||
nameSet = append(nameSet, csr.EmailAddresses...)
|
||||
for _, v := range csr.IPAddresses {
|
||||
nameSet = append(nameSet, v.String())
|
||||
}
|
||||
for _, v := range csr.URIs {
|
||||
nameSet = append(nameSet, v.String())
|
||||
}
|
||||
return nameSet
|
||||
}
|
||||
|
||||
// preferredDefaultCipherSuites returns an appropriate
|
||||
// cipher suite to use depending on hardware support
|
||||
// for AES-NI.
|
||||
//
|
||||
// See https://github.com/mholt/caddy/issues/1674
|
||||
func preferredDefaultCipherSuites() []uint16 {
|
||||
if cpuid.CPU.Supports(cpuid.AESNI) {
|
||||
return defaultCiphersPreferAES
|
||||
}
|
||||
return defaultCiphersPreferChaCha
|
||||
}
|
||||
|
||||
var (
|
||||
defaultCiphersPreferAES = []uint16{
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
|
||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
|
||||
}
|
||||
defaultCiphersPreferChaCha = []uint16{
|
||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
|
||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
}
|
||||
)
|
||||
|
||||
// StandardKeyGenerator is the standard, in-memory key source
|
||||
// that uses crypto/rand.
|
||||
type StandardKeyGenerator struct {
|
||||
// The type of keys to generate.
|
||||
KeyType KeyType
|
||||
}
|
||||
|
||||
// GenerateKey generates a new private key according to kg.KeyType.
|
||||
func (kg StandardKeyGenerator) GenerateKey() (crypto.PrivateKey, error) {
|
||||
switch kg.KeyType {
|
||||
case ED25519:
|
||||
_, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
return priv, err
|
||||
case "", P256:
|
||||
return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
case P384:
|
||||
return ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
|
||||
case RSA2048:
|
||||
return rsa.GenerateKey(rand.Reader, 2048)
|
||||
case RSA4096:
|
||||
return rsa.GenerateKey(rand.Reader, 4096)
|
||||
case RSA8192:
|
||||
return rsa.GenerateKey(rand.Reader, 8192)
|
||||
}
|
||||
return nil, fmt.Errorf("unrecognized or unsupported key type: %s", kg.KeyType)
|
||||
}
|
||||
|
||||
// DefaultKeyGenerator is the default key source.
|
||||
var DefaultKeyGenerator = StandardKeyGenerator{KeyType: P256}
|
||||
|
||||
// KeyType enumerates the known/supported key types.
|
||||
type KeyType string
|
||||
|
||||
// Constants for all key types we support.
|
||||
const (
|
||||
ED25519 = KeyType("ed25519")
|
||||
P256 = KeyType("p256")
|
||||
P384 = KeyType("p384")
|
||||
RSA2048 = KeyType("rsa2048")
|
||||
RSA4096 = KeyType("rsa4096")
|
||||
RSA8192 = KeyType("rsa8192")
|
||||
)
|
||||
342
vendor/github.com/caddyserver/certmagic/dnsutil.go
generated
vendored
Normal file
342
vendor/github.com/caddyserver/certmagic/dnsutil.go
generated
vendored
Normal file
@ -0,0 +1,342 @@
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
// Code in this file adapted from go-acme/lego, July 2020:
|
||||
// https://github.com/go-acme/lego
|
||||
// by Ludovic Fernandez and Dominik Menke
|
||||
//
|
||||
// It has been modified.
|
||||
|
||||
// findZoneByFQDN determines the zone apex for the given fqdn by recursing
|
||||
// up the domain labels until the nameserver returns a SOA record in the
|
||||
// answer section.
|
||||
func findZoneByFQDN(fqdn string, nameservers []string) (string, error) {
|
||||
if !strings.HasSuffix(fqdn, ".") {
|
||||
fqdn += "."
|
||||
}
|
||||
soa, err := lookupSoaByFqdn(fqdn, nameservers)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return soa.zone, nil
|
||||
}
|
||||
|
||||
func lookupSoaByFqdn(fqdn string, nameservers []string) (*soaCacheEntry, error) {
|
||||
if !strings.HasSuffix(fqdn, ".") {
|
||||
fqdn += "."
|
||||
}
|
||||
|
||||
fqdnSOACacheMu.Lock()
|
||||
defer fqdnSOACacheMu.Unlock()
|
||||
|
||||
// prefer cached version if fresh
|
||||
if ent := fqdnSOACache[fqdn]; ent != nil && !ent.isExpired() {
|
||||
return ent, nil
|
||||
}
|
||||
|
||||
ent, err := fetchSoaByFqdn(fqdn, nameservers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// save result to cache, but don't allow
|
||||
// the cache to grow out of control
|
||||
if len(fqdnSOACache) >= 1000 {
|
||||
for key := range fqdnSOACache {
|
||||
delete(fqdnSOACache, key)
|
||||
break
|
||||
}
|
||||
}
|
||||
fqdnSOACache[fqdn] = ent
|
||||
|
||||
return ent, nil
|
||||
}
|
||||
|
||||
func fetchSoaByFqdn(fqdn string, nameservers []string) (*soaCacheEntry, error) {
|
||||
var err error
|
||||
var in *dns.Msg
|
||||
|
||||
labelIndexes := dns.Split(fqdn)
|
||||
for _, index := range labelIndexes {
|
||||
domain := fqdn[index:]
|
||||
|
||||
in, err = dnsQuery(domain, dns.TypeSOA, nameservers, true)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
switch in.Rcode {
|
||||
case dns.RcodeSuccess:
|
||||
// Check if we got a SOA RR in the answer section
|
||||
if len(in.Answer) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// CNAME records cannot/should not exist at the root of a zone.
|
||||
// So we skip a domain when a CNAME is found.
|
||||
if dnsMsgContainsCNAME(in) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ans := range in.Answer {
|
||||
if soa, ok := ans.(*dns.SOA); ok {
|
||||
return newSoaCacheEntry(soa), nil
|
||||
}
|
||||
}
|
||||
case dns.RcodeNameError:
|
||||
// NXDOMAIN
|
||||
default:
|
||||
// Any response code other than NOERROR and NXDOMAIN is treated as error
|
||||
return nil, fmt.Errorf("unexpected response code '%s' for %s", dns.RcodeToString[in.Rcode], domain)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("could not find the start of authority for %s%s", fqdn, formatDNSError(in, err))
|
||||
}
|
||||
|
||||
// dnsMsgContainsCNAME checks for a CNAME answer in msg
|
||||
func dnsMsgContainsCNAME(msg *dns.Msg) bool {
|
||||
for _, ans := range msg.Answer {
|
||||
if _, ok := ans.(*dns.CNAME); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func dnsQuery(fqdn string, rtype uint16, nameservers []string, recursive bool) (*dns.Msg, error) {
|
||||
m := createDNSMsg(fqdn, rtype, recursive)
|
||||
var in *dns.Msg
|
||||
var err error
|
||||
for _, ns := range nameservers {
|
||||
in, err = sendDNSQuery(m, ns)
|
||||
if err == nil && len(in.Answer) > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return in, err
|
||||
}
|
||||
|
||||
func createDNSMsg(fqdn string, rtype uint16, recursive bool) *dns.Msg {
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion(fqdn, rtype)
|
||||
|
||||
// See: https://caddy.community/t/hard-time-getting-a-response-on-a-dns-01-challenge/15721/16
|
||||
m.SetEdns0(1232, false)
|
||||
if !recursive {
|
||||
m.RecursionDesired = false
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func sendDNSQuery(m *dns.Msg, ns string) (*dns.Msg, error) {
|
||||
udp := &dns.Client{Net: "udp", Timeout: dnsTimeout}
|
||||
in, _, err := udp.Exchange(m, ns)
|
||||
// two kinds of errors we can handle by retrying with TCP:
|
||||
// truncation and timeout; see https://github.com/caddyserver/caddy/issues/3639
|
||||
truncated := in != nil && in.Truncated
|
||||
timeoutErr := err != nil && strings.Contains(err.Error(), "timeout")
|
||||
if truncated || timeoutErr {
|
||||
tcp := &dns.Client{Net: "tcp", Timeout: dnsTimeout}
|
||||
in, _, err = tcp.Exchange(m, ns)
|
||||
}
|
||||
return in, err
|
||||
}
|
||||
|
||||
func formatDNSError(msg *dns.Msg, err error) string {
|
||||
var parts []string
|
||||
if msg != nil {
|
||||
parts = append(parts, dns.RcodeToString[msg.Rcode])
|
||||
}
|
||||
if err != nil {
|
||||
parts = append(parts, err.Error())
|
||||
}
|
||||
if len(parts) > 0 {
|
||||
return ": " + strings.Join(parts, " ")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// soaCacheEntry holds a cached SOA record (only selected fields)
|
||||
type soaCacheEntry struct {
|
||||
zone string // zone apex (a domain name)
|
||||
primaryNs string // primary nameserver for the zone apex
|
||||
expires time.Time // time when this cache entry should be evicted
|
||||
}
|
||||
|
||||
func newSoaCacheEntry(soa *dns.SOA) *soaCacheEntry {
|
||||
return &soaCacheEntry{
|
||||
zone: soa.Hdr.Name,
|
||||
primaryNs: soa.Ns,
|
||||
expires: time.Now().Add(time.Duration(soa.Refresh) * time.Second),
|
||||
}
|
||||
}
|
||||
|
||||
// isExpired checks whether a cache entry should be considered expired.
|
||||
func (cache *soaCacheEntry) isExpired() bool {
|
||||
return time.Now().After(cache.expires)
|
||||
}
|
||||
|
||||
// systemOrDefaultNameservers attempts to get system nameservers from the
|
||||
// resolv.conf file given by path before falling back to hard-coded defaults.
|
||||
func systemOrDefaultNameservers(path string, defaults []string) []string {
|
||||
config, err := dns.ClientConfigFromFile(path)
|
||||
if err != nil || len(config.Servers) == 0 {
|
||||
return defaults
|
||||
}
|
||||
return config.Servers
|
||||
}
|
||||
|
||||
// populateNameserverPorts ensures that all nameservers have a port number.
|
||||
func populateNameserverPorts(servers []string) {
|
||||
for i := range servers {
|
||||
_, port, _ := net.SplitHostPort(servers[i])
|
||||
if port == "" {
|
||||
servers[i] = net.JoinHostPort(servers[i], "53")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkDNSPropagation checks if the expected TXT record has been propagated to all authoritative nameservers.
|
||||
func checkDNSPropagation(fqdn, value string, resolvers []string) (bool, error) {
|
||||
if !strings.HasSuffix(fqdn, ".") {
|
||||
fqdn += "."
|
||||
}
|
||||
|
||||
// Initial attempt to resolve at the recursive NS
|
||||
r, err := dnsQuery(fqdn, dns.TypeTXT, resolvers, true)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if r.Rcode == dns.RcodeSuccess {
|
||||
fqdn = updateDomainWithCName(r, fqdn)
|
||||
}
|
||||
|
||||
authoritativeNss, err := lookupNameservers(fqdn, resolvers)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return checkAuthoritativeNss(fqdn, value, authoritativeNss)
|
||||
}
|
||||
|
||||
// checkAuthoritativeNss queries each of the given nameservers for the expected TXT record.
|
||||
func checkAuthoritativeNss(fqdn, value string, nameservers []string) (bool, error) {
|
||||
for _, ns := range nameservers {
|
||||
r, err := dnsQuery(fqdn, dns.TypeTXT, []string{net.JoinHostPort(ns, "53")}, false)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if r.Rcode != dns.RcodeSuccess {
|
||||
if r.Rcode == dns.RcodeNameError {
|
||||
// if Present() succeeded, then it must show up eventually, or else
|
||||
// something is really broken in the DNS provider or their API;
|
||||
// no need for error here, simply have the caller try again
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("NS %s returned %s for %s", ns, dns.RcodeToString[r.Rcode], fqdn)
|
||||
}
|
||||
|
||||
var found bool
|
||||
for _, rr := range r.Answer {
|
||||
if txt, ok := rr.(*dns.TXT); ok {
|
||||
record := strings.Join(txt.Txt, "")
|
||||
if record == value {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// lookupNameservers returns the authoritative nameservers for the given fqdn.
|
||||
func lookupNameservers(fqdn string, resolvers []string) ([]string, error) {
|
||||
var authoritativeNss []string
|
||||
|
||||
zone, err := findZoneByFQDN(fqdn, resolvers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not determine the zone: %w", err)
|
||||
}
|
||||
|
||||
r, err := dnsQuery(zone, dns.TypeNS, resolvers, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, rr := range r.Answer {
|
||||
if ns, ok := rr.(*dns.NS); ok {
|
||||
authoritativeNss = append(authoritativeNss, strings.ToLower(ns.Ns))
|
||||
}
|
||||
}
|
||||
|
||||
if len(authoritativeNss) > 0 {
|
||||
return authoritativeNss, nil
|
||||
}
|
||||
return nil, errors.New("could not determine authoritative nameservers")
|
||||
}
|
||||
|
||||
// Update FQDN with CNAME if any
|
||||
func updateDomainWithCName(r *dns.Msg, fqdn string) string {
|
||||
for _, rr := range r.Answer {
|
||||
if cn, ok := rr.(*dns.CNAME); ok {
|
||||
if cn.Hdr.Name == fqdn {
|
||||
return cn.Target
|
||||
}
|
||||
}
|
||||
}
|
||||
return fqdn
|
||||
}
|
||||
|
||||
// recursiveNameservers are used to pre-check DNS propagation. It
|
||||
// picks user-configured nameservers (custom) OR the defaults
|
||||
// obtained from resolv.conf and defaultNameservers if none is
|
||||
// configured and ensures that all server addresses have a port value.
|
||||
func recursiveNameservers(custom []string) []string {
|
||||
var servers []string
|
||||
if len(custom) == 0 {
|
||||
servers = systemOrDefaultNameservers(defaultResolvConf, defaultNameservers)
|
||||
} else {
|
||||
servers = make([]string, len(custom))
|
||||
copy(servers, custom)
|
||||
}
|
||||
populateNameserverPorts(servers)
|
||||
return servers
|
||||
}
|
||||
|
||||
var defaultNameservers = []string{
|
||||
"8.8.8.8:53",
|
||||
"8.8.4.4:53",
|
||||
"1.1.1.1:53",
|
||||
"1.0.0.1:53",
|
||||
}
|
||||
|
||||
var dnsTimeout = 10 * time.Second
|
||||
|
||||
var (
|
||||
fqdnSOACache = map[string]*soaCacheEntry{}
|
||||
fqdnSOACacheMu sync.Mutex
|
||||
)
|
||||
|
||||
const defaultResolvConf = "/etc/resolv.conf"
|
||||
404
vendor/github.com/caddyserver/certmagic/filestorage.go
generated
vendored
Normal file
404
vendor/github.com/caddyserver/certmagic/filestorage.go
generated
vendored
Normal file
@ -0,0 +1,404 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
// FileStorage facilitates forming file paths derived from a root
|
||||
// directory. It is used to get file paths in a consistent,
|
||||
// cross-platform way or persisting ACME assets on the file system.
|
||||
// The presence of a lock file for a given key indicates a lock
|
||||
// is held and is thus unavailable.
|
||||
//
|
||||
// Locks are created atomically by relying on the file system to
|
||||
// enforce the O_EXCL flag. Acquirers that are forcefully terminated
|
||||
// will not have a chance to clean up their locks before they exit,
|
||||
// so locks may become stale. That is why, while a lock is actively
|
||||
// held, the contents of the lockfile are updated with the current
|
||||
// timestamp periodically. If another instance tries to acquire the
|
||||
// lock but fails, it can see if the timestamp within is still fresh.
|
||||
// If so, it patiently waits by polling occasionally. Otherwise,
|
||||
// the stale lockfile is deleted, essentially forcing an unlock.
|
||||
//
|
||||
// While locking is atomic, unlocking is not perfectly atomic. File
|
||||
// systems offer native atomic operations when creating files, but
|
||||
// not necessarily when deleting them. It is theoretically possible
|
||||
// for two instances to discover the same stale lock and both proceed
|
||||
// to delete it, but if one instance is able to delete the lockfile
|
||||
// and create a new one before the other one calls delete, then the
|
||||
// new lock file created by the first instance will get deleted by
|
||||
// mistake. This does mean that mutual exclusion is not guaranteed
|
||||
// to be perfectly enforced in the presence of stale locks. One
|
||||
// alternative is to lock the unlock operation by using ".unlock"
|
||||
// files; and we did this for some time, but those files themselves
|
||||
// may become stale, leading applications into infinite loops if
|
||||
// they always expect the unlock file to be deleted by the instance
|
||||
// that created it. We instead prefer the simpler solution that
|
||||
// implies imperfect mutual exclusion if locks become stale, but
|
||||
// that is probably less severe a consequence than infinite loops.
|
||||
//
|
||||
// See https://github.com/caddyserver/caddy/issues/4448 for discussion.
|
||||
// See commit 468bfd25e452196b140148928cdd1f1a2285ae4b for where we
|
||||
// switched away from using .unlock files.
|
||||
type FileStorage struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
// Exists returns true if key exists in s.
|
||||
func (s *FileStorage) Exists(_ context.Context, key string) bool {
|
||||
_, err := os.Stat(s.Filename(key))
|
||||
return !errors.Is(err, fs.ErrNotExist)
|
||||
}
|
||||
|
||||
// Store saves value at key.
|
||||
func (s *FileStorage) Store(_ context.Context, key string, value []byte) error {
|
||||
filename := s.Filename(key)
|
||||
err := os.MkdirAll(filepath.Dir(filename), 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(filename, value, 0600)
|
||||
}
|
||||
|
||||
// Load retrieves the value at key.
|
||||
func (s *FileStorage) Load(_ context.Context, key string) ([]byte, error) {
|
||||
return os.ReadFile(s.Filename(key))
|
||||
}
|
||||
|
||||
// Delete deletes the value at key.
|
||||
func (s *FileStorage) Delete(_ context.Context, key string) error {
|
||||
return os.Remove(s.Filename(key))
|
||||
}
|
||||
|
||||
// List returns all keys that match prefix.
|
||||
func (s *FileStorage) List(ctx context.Context, prefix string, recursive bool) ([]string, error) {
|
||||
var keys []string
|
||||
walkPrefix := s.Filename(prefix)
|
||||
|
||||
err := filepath.Walk(walkPrefix, func(fpath string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info == nil {
|
||||
return fmt.Errorf("%s: file info is nil", fpath)
|
||||
}
|
||||
if fpath == walkPrefix {
|
||||
return nil
|
||||
}
|
||||
if ctxErr := ctx.Err(); ctxErr != nil {
|
||||
return ctxErr
|
||||
}
|
||||
|
||||
suffix, err := filepath.Rel(walkPrefix, fpath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: could not make path relative: %v", fpath, err)
|
||||
}
|
||||
keys = append(keys, path.Join(prefix, suffix))
|
||||
|
||||
if !recursive && info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return keys, err
|
||||
}
|
||||
|
||||
// Stat returns information about key.
|
||||
func (s *FileStorage) Stat(_ context.Context, key string) (KeyInfo, error) {
|
||||
fi, err := os.Stat(s.Filename(key))
|
||||
if err != nil {
|
||||
return KeyInfo{}, err
|
||||
}
|
||||
return KeyInfo{
|
||||
Key: key,
|
||||
Modified: fi.ModTime(),
|
||||
Size: fi.Size(),
|
||||
IsTerminal: !fi.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Filename returns the key as a path on the file
|
||||
// system prefixed by s.Path.
|
||||
func (s *FileStorage) Filename(key string) string {
|
||||
return filepath.Join(s.Path, filepath.FromSlash(key))
|
||||
}
|
||||
|
||||
// Lock obtains a lock named by the given key. It blocks
|
||||
// until the lock can be obtained or an error is returned.
|
||||
func (s *FileStorage) Lock(ctx context.Context, key string) error {
|
||||
filename := s.lockFilename(key)
|
||||
|
||||
for {
|
||||
err := createLockfile(filename)
|
||||
if err == nil {
|
||||
// got the lock, yay
|
||||
return nil
|
||||
}
|
||||
if !os.IsExist(err) {
|
||||
// unexpected error
|
||||
return fmt.Errorf("creating lock file: %v", err)
|
||||
}
|
||||
|
||||
// lock file already exists
|
||||
|
||||
var meta lockMeta
|
||||
f, err := os.Open(filename)
|
||||
if err == nil {
|
||||
err2 := json.NewDecoder(f).Decode(&meta)
|
||||
f.Close()
|
||||
if err2 != nil {
|
||||
return fmt.Errorf("decoding lockfile contents: %w", err2)
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
// must have just been removed; try again to create it
|
||||
continue
|
||||
|
||||
case err != nil:
|
||||
// unexpected error
|
||||
return fmt.Errorf("accessing lock file: %v", err)
|
||||
|
||||
case fileLockIsStale(meta):
|
||||
// lock file is stale - delete it and try again to obtain lock
|
||||
// (NOTE: locking becomes imperfect if lock files are stale; known solutions
|
||||
// either have potential to cause infinite loops, as in caddyserver/caddy#4448,
|
||||
// or must give up on perfect mutual exclusivity; however, these cases are rare,
|
||||
// so we prefer the simpler solution that avoids infinite loops)
|
||||
log.Printf("[INFO][%s] Lock for '%s' is stale (created: %s, last update: %s); removing then retrying: %s",
|
||||
s, key, meta.Created, meta.Updated, filename)
|
||||
if err = os.Remove(filename); err != nil { // hopefully we can replace the lock file quickly!
|
||||
if !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("unable to delete stale lock; deadlocked: %w", err)
|
||||
}
|
||||
}
|
||||
continue
|
||||
|
||||
default:
|
||||
// lockfile exists and is not stale;
|
||||
// just wait a moment and try again,
|
||||
// or return if context cancelled
|
||||
select {
|
||||
case <-time.After(fileLockPollInterval):
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock releases the lock for name.
|
||||
func (s *FileStorage) Unlock(_ context.Context, key string) error {
|
||||
return os.Remove(s.lockFilename(key))
|
||||
}
|
||||
|
||||
func (s *FileStorage) String() string {
|
||||
return "FileStorage:" + s.Path
|
||||
}
|
||||
|
||||
func (s *FileStorage) lockFilename(key string) string {
|
||||
return filepath.Join(s.lockDir(), StorageKeys.Safe(key)+".lock")
|
||||
}
|
||||
|
||||
func (s *FileStorage) lockDir() string {
|
||||
return filepath.Join(s.Path, "locks")
|
||||
}
|
||||
|
||||
func fileLockIsStale(meta lockMeta) bool {
|
||||
ref := meta.Updated
|
||||
if ref.IsZero() {
|
||||
ref = meta.Created
|
||||
}
|
||||
// since updates are exactly every lockFreshnessInterval,
|
||||
// add a grace period for the actual file read+write to
|
||||
// take place
|
||||
return time.Since(ref) > lockFreshnessInterval*2
|
||||
}
|
||||
|
||||
// createLockfile atomically creates the lockfile
|
||||
// identified by filename. A successfully created
|
||||
// lockfile should be removed with removeLockfile.
|
||||
func createLockfile(filename string) error {
|
||||
err := atomicallyCreateFile(filename, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go keepLockfileFresh(filename)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// keepLockfileFresh continuously updates the lock file
|
||||
// at filename with the current timestamp. It stops
|
||||
// when the file disappears (happy path = lock released),
|
||||
// or when there is an error at any point. Since it polls
|
||||
// every lockFreshnessInterval, this function might
|
||||
// not terminate until up to lockFreshnessInterval after
|
||||
// the lock is released.
|
||||
func keepLockfileFresh(filename string) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
buf := make([]byte, stackTraceBufferSize)
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
log.Printf("panic: active locking: %v\n%s", err, buf)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
time.Sleep(lockFreshnessInterval)
|
||||
done, err := updateLockfileFreshness(filename)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] Keeping lock file fresh: %v - terminating lock maintenance (lockfile: %s)", err, filename)
|
||||
return
|
||||
}
|
||||
if done {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateLockfileFreshness updates the lock file at filename
|
||||
// with the current timestamp. It returns true if the parent
|
||||
// loop can terminate (i.e. no more need to update the lock).
|
||||
func updateLockfileFreshness(filename string) (bool, error) {
|
||||
f, err := os.OpenFile(filename, os.O_RDWR, 0644)
|
||||
if os.IsNotExist(err) {
|
||||
return true, nil // lock released
|
||||
}
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// read contents
|
||||
metaBytes, err := io.ReadAll(io.LimitReader(f, 2048))
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
var meta lockMeta
|
||||
if err := json.Unmarshal(metaBytes, &meta); err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
// truncate file and reset I/O offset to beginning
|
||||
if err := f.Truncate(0); err != nil {
|
||||
return true, err
|
||||
}
|
||||
if _, err := f.Seek(0, io.SeekStart); err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
// write updated timestamp
|
||||
meta.Updated = time.Now()
|
||||
if err = json.NewEncoder(f).Encode(meta); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// sync to device; we suspect that sometimes file systems
|
||||
// (particularly AWS EFS) don't do this on their own,
|
||||
// leaving the file empty when we close it; see
|
||||
// https://github.com/caddyserver/caddy/issues/3954
|
||||
return false, f.Sync()
|
||||
}
|
||||
|
||||
// atomicallyCreateFile atomically creates the file
|
||||
// identified by filename if it doesn't already exist.
|
||||
func atomicallyCreateFile(filename string, writeLockInfo bool) error {
|
||||
// no need to check this error, we only really care about the file creation error
|
||||
_ = os.MkdirAll(filepath.Dir(filename), 0700)
|
||||
f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
if writeLockInfo {
|
||||
now := time.Now()
|
||||
meta := lockMeta{
|
||||
Created: now,
|
||||
Updated: now,
|
||||
}
|
||||
if err := json.NewEncoder(f).Encode(meta); err != nil {
|
||||
return err
|
||||
}
|
||||
// see https://github.com/caddyserver/caddy/issues/3954
|
||||
if err := f.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// homeDir returns the best guess of the current user's home
|
||||
// directory from environment variables. If unknown, "." (the
|
||||
// current directory) is returned instead.
|
||||
func homeDir() string {
|
||||
home := os.Getenv("HOME")
|
||||
if home == "" && runtime.GOOS == "windows" {
|
||||
drive := os.Getenv("HOMEDRIVE")
|
||||
path := os.Getenv("HOMEPATH")
|
||||
home = drive + path
|
||||
if drive == "" || path == "" {
|
||||
home = os.Getenv("USERPROFILE")
|
||||
}
|
||||
}
|
||||
if home == "" {
|
||||
home = "."
|
||||
}
|
||||
return home
|
||||
}
|
||||
|
||||
func dataDir() string {
|
||||
baseDir := filepath.Join(homeDir(), ".local", "share")
|
||||
if xdgData := os.Getenv("XDG_DATA_HOME"); xdgData != "" {
|
||||
baseDir = xdgData
|
||||
}
|
||||
return filepath.Join(baseDir, "certmagic")
|
||||
}
|
||||
|
||||
// lockMeta is written into a lock file.
|
||||
type lockMeta struct {
|
||||
Created time.Time `json:"created,omitempty"`
|
||||
Updated time.Time `json:"updated,omitempty"`
|
||||
}
|
||||
|
||||
// lockFreshnessInterval is how often to update
|
||||
// a lock's timestamp. Locks with a timestamp
|
||||
// more than this duration in the past (plus a
|
||||
// grace period for latency) can be considered
|
||||
// stale.
|
||||
const lockFreshnessInterval = 5 * time.Second
|
||||
|
||||
// fileLockPollInterval is how frequently
|
||||
// to check the existence of a lock file
|
||||
const fileLockPollInterval = 1 * time.Second
|
||||
|
||||
// Interface guard
|
||||
var _ Storage = (*FileStorage)(nil)
|
||||
817
vendor/github.com/caddyserver/certmagic/handshake.go
generated
vendored
Normal file
817
vendor/github.com/caddyserver/certmagic/handshake.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
124
vendor/github.com/caddyserver/certmagic/httphandler.go
generated
vendored
Normal file
124
vendor/github.com/caddyserver/certmagic/httphandler.go
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/mholt/acmez/acme"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// HTTPChallengeHandler wraps h in a handler that can solve the ACME
|
||||
// HTTP challenge. cfg is required, and it must have a certificate
|
||||
// cache backed by a functional storage facility, since that is where
|
||||
// the challenge state is stored between initiation and solution.
|
||||
//
|
||||
// If a request is not an ACME HTTP challenge, h will be invoked.
|
||||
func (am *ACMEIssuer) HTTPChallengeHandler(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if am.HandleHTTPChallenge(w, r) {
|
||||
return
|
||||
}
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// HandleHTTPChallenge uses am to solve challenge requests from an ACME
|
||||
// server that were initiated by this instance or any other instance in
|
||||
// this cluster (being, any instances using the same storage am does).
|
||||
//
|
||||
// If the HTTP challenge is disabled, this function is a no-op.
|
||||
//
|
||||
// If am is nil or if am does not have a certificate cache backed by
|
||||
// usable storage, solving the HTTP challenge will fail.
|
||||
//
|
||||
// It returns true if it handled the request; if so, the response has
|
||||
// already been written. If false is returned, this call was a no-op and
|
||||
// the request has not been handled.
|
||||
func (am *ACMEIssuer) HandleHTTPChallenge(w http.ResponseWriter, r *http.Request) bool {
|
||||
if am == nil {
|
||||
return false
|
||||
}
|
||||
if am.DisableHTTPChallenge {
|
||||
return false
|
||||
}
|
||||
if !LooksLikeHTTPChallenge(r) {
|
||||
return false
|
||||
}
|
||||
return am.distributedHTTPChallengeSolver(w, r)
|
||||
}
|
||||
|
||||
// distributedHTTPChallengeSolver checks to see if this challenge
|
||||
// request was initiated by this or another instance which uses the
|
||||
// same storage as am does, and attempts to complete the challenge for
|
||||
// it. It returns true if the request was handled; false otherwise.
|
||||
func (am *ACMEIssuer) distributedHTTPChallengeSolver(w http.ResponseWriter, r *http.Request) bool {
|
||||
if am == nil {
|
||||
return false
|
||||
}
|
||||
host := hostOnly(r.Host)
|
||||
chalInfo, distributed, err := am.config.getChallengeInfo(r.Context(), host)
|
||||
if err != nil {
|
||||
if am.Logger != nil {
|
||||
am.Logger.Error("looking up info for HTTP challenge",
|
||||
zap.String("host", host),
|
||||
zap.Error(err))
|
||||
}
|
||||
return false
|
||||
}
|
||||
return solveHTTPChallenge(am.Logger, w, r, chalInfo.Challenge, distributed)
|
||||
}
|
||||
|
||||
// solveHTTPChallenge solves the HTTP challenge using the given challenge information.
|
||||
// If the challenge is being solved in a distributed fahsion, set distributed to true for logging purposes.
|
||||
// It returns true the properties of the request check out in relation to the HTTP challenge.
|
||||
// Most of this code borrowed from xenolf's built-in HTTP-01 challenge solver in March 2018.
|
||||
func solveHTTPChallenge(logger *zap.Logger, w http.ResponseWriter, r *http.Request, challenge acme.Challenge, distributed bool) bool {
|
||||
challengeReqPath := challenge.HTTP01ResourcePath()
|
||||
if r.URL.Path == challengeReqPath &&
|
||||
strings.EqualFold(hostOnly(r.Host), challenge.Identifier.Value) && // mitigate DNS rebinding attacks
|
||||
r.Method == "GET" {
|
||||
w.Header().Add("Content-Type", "text/plain")
|
||||
w.Write([]byte(challenge.KeyAuthorization))
|
||||
r.Close = true
|
||||
if logger != nil {
|
||||
logger.Info("served key authentication",
|
||||
zap.String("identifier", challenge.Identifier.Value),
|
||||
zap.String("challenge", "http-01"),
|
||||
zap.String("remote", r.RemoteAddr),
|
||||
zap.Bool("distributed", distributed))
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SolveHTTPChallenge solves the HTTP challenge. It should be used only on HTTP requests that are
|
||||
// from ACME servers trying to validate an identifier (i.e. LooksLikeHTTPChallenge() == true). It
|
||||
// returns true if the request criteria check out and it answered with key authentication, in which
|
||||
// case no further handling of the request is necessary.
|
||||
func SolveHTTPChallenge(logger *zap.Logger, w http.ResponseWriter, r *http.Request, challenge acme.Challenge) bool {
|
||||
return solveHTTPChallenge(logger, w, r, challenge, false)
|
||||
}
|
||||
|
||||
// LooksLikeHTTPChallenge returns true if r looks like an ACME
|
||||
// HTTP challenge request from an ACME server.
|
||||
func LooksLikeHTTPChallenge(r *http.Request) bool {
|
||||
return r.Method == "GET" && strings.HasPrefix(r.URL.Path, challengeBasePath)
|
||||
}
|
||||
|
||||
const challengeBasePath = "/.well-known/acme-challenge"
|
||||
678
vendor/github.com/caddyserver/certmagic/maintain.go
generated
vendored
Normal file
678
vendor/github.com/caddyserver/certmagic/maintain.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
233
vendor/github.com/caddyserver/certmagic/ocsp.go
generated
vendored
Normal file
233
vendor/github.com/caddyserver/certmagic/ocsp.go
generated
vendored
Normal file
@ -0,0 +1,233 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/ocsp"
|
||||
)
|
||||
|
||||
// stapleOCSP staples OCSP information to cert for hostname name.
|
||||
// If you have it handy, you should pass in the PEM-encoded certificate
|
||||
// bundle; otherwise the DER-encoded cert will have to be PEM-encoded.
|
||||
// If you don't have the PEM blocks already, just pass in nil.
|
||||
//
|
||||
// If successful, the OCSP response will be set to cert's ocsp field,
|
||||
// regardless of the OCSP status. It is only stapled, however, if the
|
||||
// status is Good.
|
||||
//
|
||||
// Errors here are not necessarily fatal, it could just be that the
|
||||
// certificate doesn't have an issuer URL.
|
||||
func stapleOCSP(ctx context.Context, ocspConfig OCSPConfig, storage Storage, cert *Certificate, pemBundle []byte) error {
|
||||
if ocspConfig.DisableStapling {
|
||||
return nil
|
||||
}
|
||||
|
||||
if pemBundle == nil {
|
||||
// we need a PEM encoding only for some function calls below
|
||||
bundle := new(bytes.Buffer)
|
||||
for _, derBytes := range cert.Certificate.Certificate {
|
||||
pem.Encode(bundle, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||
}
|
||||
pemBundle = bundle.Bytes()
|
||||
}
|
||||
|
||||
var ocspBytes []byte
|
||||
var ocspResp *ocsp.Response
|
||||
var ocspErr error
|
||||
var gotNewOCSP bool
|
||||
|
||||
// First try to load OCSP staple from storage and see if
|
||||
// we can still use it.
|
||||
ocspStapleKey := StorageKeys.OCSPStaple(cert, pemBundle)
|
||||
cachedOCSP, err := storage.Load(ctx, ocspStapleKey)
|
||||
if err == nil {
|
||||
resp, err := ocsp.ParseResponse(cachedOCSP, nil)
|
||||
if err == nil {
|
||||
if freshOCSP(resp) {
|
||||
// staple is still fresh; use it
|
||||
ocspBytes = cachedOCSP
|
||||
ocspResp = resp
|
||||
}
|
||||
} else {
|
||||
// invalid contents; delete the file
|
||||
// (we do this independently of the maintenance routine because
|
||||
// in this case we know for sure this should be a staple file
|
||||
// because we loaded it by name, whereas the maintenance routine
|
||||
// just iterates the list of files, even if somehow a non-staple
|
||||
// file gets in the folder. in this case we are sure it is corrupt.)
|
||||
err := storage.Delete(ctx, ocspStapleKey)
|
||||
if err != nil {
|
||||
log.Printf("[WARNING] Unable to delete invalid OCSP staple file: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we couldn't get a fresh staple by reading the cache,
|
||||
// then we need to request it from the OCSP responder
|
||||
if ocspResp == nil || len(ocspBytes) == 0 {
|
||||
ocspBytes, ocspResp, ocspErr = getOCSPForCert(ocspConfig, pemBundle)
|
||||
if ocspErr != nil {
|
||||
// An error here is not a problem because a certificate may simply
|
||||
// not contain a link to an OCSP server. But we should log it anyway.
|
||||
// There's nothing else we can do to get OCSP for this certificate,
|
||||
// so we can return here with the error.
|
||||
return fmt.Errorf("no OCSP stapling for %v: %v", cert.Names, ocspErr)
|
||||
}
|
||||
gotNewOCSP = true
|
||||
}
|
||||
|
||||
if ocspResp.NextUpdate.After(cert.Leaf.NotAfter) {
|
||||
// uh oh, this OCSP response expires AFTER the certificate does, that's kinda bogus.
|
||||
// it was the reason a lot of Symantec-validated sites (not Caddy) went down
|
||||
// in October 2017. https://twitter.com/mattiasgeniar/status/919432824708648961
|
||||
return fmt.Errorf("invalid: OCSP response for %v valid after certificate expiration (%s)",
|
||||
cert.Names, cert.Leaf.NotAfter.Sub(ocspResp.NextUpdate))
|
||||
}
|
||||
|
||||
// Attach the latest OCSP response to the certificate; this is NOT the same
|
||||
// as stapling it, which we do below only if the status is Good, but it is
|
||||
// useful to keep with the cert in order to act on it later (like if Revoked).
|
||||
cert.ocsp = ocspResp
|
||||
|
||||
// If the response is good, staple it to the certificate. If the OCSP
|
||||
// response was not loaded from storage, we persist it for next time.
|
||||
if ocspResp.Status == ocsp.Good {
|
||||
cert.Certificate.OCSPStaple = ocspBytes
|
||||
if gotNewOCSP {
|
||||
err := storage.Store(ctx, ocspStapleKey, ocspBytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to write OCSP staple file for %v: %v", cert.Names, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getOCSPForCert takes a PEM encoded cert or cert bundle returning the raw OCSP response,
|
||||
// the parsed response, and an error, if any. The returned []byte can be passed directly
|
||||
// into the OCSPStaple property of a tls.Certificate. If the bundle only contains the
|
||||
// issued certificate, this function will try to get the issuer certificate from the
|
||||
// IssuingCertificateURL in the certificate. If the []byte and/or ocsp.Response return
|
||||
// values are nil, the OCSP status may be assumed OCSPUnknown.
|
||||
//
|
||||
// Borrowed from xenolf.
|
||||
func getOCSPForCert(ocspConfig OCSPConfig, bundle []byte) ([]byte, *ocsp.Response, error) {
|
||||
// TODO: Perhaps this should be synchronized too, with a Locker?
|
||||
|
||||
certificates, err := parseCertsFromPEMBundle(bundle)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// We expect the certificate slice to be ordered downwards the chain.
|
||||
// SRV CRT -> CA. We need to pull the leaf and issuer certs out of it,
|
||||
// which should always be the first two certificates. If there's no
|
||||
// OCSP server listed in the leaf cert, there's nothing to do. And if
|
||||
// we have only one certificate so far, we need to get the issuer cert.
|
||||
issuedCert := certificates[0]
|
||||
if len(issuedCert.OCSPServer) == 0 {
|
||||
return nil, nil, fmt.Errorf("no OCSP server specified in certificate")
|
||||
}
|
||||
|
||||
// apply override for responder URL
|
||||
respURL := issuedCert.OCSPServer[0]
|
||||
if len(ocspConfig.ResponderOverrides) > 0 {
|
||||
if override, ok := ocspConfig.ResponderOverrides[respURL]; ok {
|
||||
respURL = override
|
||||
}
|
||||
}
|
||||
if respURL == "" {
|
||||
return nil, nil, fmt.Errorf("override disables querying OCSP responder: %v", issuedCert.OCSPServer[0])
|
||||
}
|
||||
|
||||
if len(certificates) == 1 {
|
||||
if len(issuedCert.IssuingCertificateURL) == 0 {
|
||||
return nil, nil, fmt.Errorf("no URL to issuing certificate")
|
||||
}
|
||||
|
||||
resp, err := http.Get(issuedCert.IssuingCertificateURL[0])
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("getting issuer certificate: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
issuerBytes, err := io.ReadAll(io.LimitReader(resp.Body, 1024*1024))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("reading issuer certificate: %v", err)
|
||||
}
|
||||
|
||||
issuerCert, err := x509.ParseCertificate(issuerBytes)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("parsing issuer certificate: %v", err)
|
||||
}
|
||||
|
||||
// insert it into the slice on position 0;
|
||||
// we want it ordered right SRV CRT -> CA
|
||||
certificates = append(certificates, issuerCert)
|
||||
}
|
||||
|
||||
issuerCert := certificates[1]
|
||||
|
||||
ocspReq, err := ocsp.CreateRequest(issuedCert, issuerCert, nil)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("creating OCSP request: %v", err)
|
||||
}
|
||||
|
||||
reader := bytes.NewReader(ocspReq)
|
||||
req, err := http.Post(respURL, "application/ocsp-request", reader)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("making OCSP request: %v", err)
|
||||
}
|
||||
defer req.Body.Close()
|
||||
|
||||
ocspResBytes, err := io.ReadAll(io.LimitReader(req.Body, 1024*1024))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("reading OCSP response: %v", err)
|
||||
}
|
||||
|
||||
ocspRes, err := ocsp.ParseResponse(ocspResBytes, issuerCert)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("parsing OCSP response: %v", err)
|
||||
}
|
||||
|
||||
return ocspResBytes, ocspRes, nil
|
||||
}
|
||||
|
||||
// freshOCSP returns true if resp is still fresh,
|
||||
// meaning that it is not expedient to get an
|
||||
// updated response from the OCSP server.
|
||||
func freshOCSP(resp *ocsp.Response) bool {
|
||||
nextUpdate := resp.NextUpdate
|
||||
// If there is an OCSP responder certificate, and it expires before the
|
||||
// OCSP response, use its expiration date as the end of the OCSP
|
||||
// response's validity period.
|
||||
if resp.Certificate != nil && resp.Certificate.NotAfter.Before(nextUpdate) {
|
||||
nextUpdate = resp.Certificate.NotAfter
|
||||
}
|
||||
// start checking OCSP staple about halfway through validity period for good measure
|
||||
refreshTime := resp.ThisUpdate.Add(nextUpdate.Sub(resp.ThisUpdate) / 2)
|
||||
return time.Now().Before(refreshTime)
|
||||
}
|
||||
243
vendor/github.com/caddyserver/certmagic/ratelimiter.go
generated
vendored
Normal file
243
vendor/github.com/caddyserver/certmagic/ratelimiter.go
generated
vendored
Normal file
@ -0,0 +1,243 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewRateLimiter returns a rate limiter that allows up to maxEvents
|
||||
// in a sliding window of size window. If maxEvents and window are
|
||||
// both 0, or if maxEvents is non-zero and window is 0, rate limiting
|
||||
// is disabled. This function panics if maxEvents is less than 0 or
|
||||
// if maxEvents is 0 and window is non-zero, which is considered to be
|
||||
// an invalid configuration, as it would never allow events.
|
||||
func NewRateLimiter(maxEvents int, window time.Duration) *RingBufferRateLimiter {
|
||||
if maxEvents < 0 {
|
||||
panic("maxEvents cannot be less than zero")
|
||||
}
|
||||
if maxEvents == 0 && window != 0 {
|
||||
panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
|
||||
}
|
||||
rbrl := &RingBufferRateLimiter{
|
||||
window: window,
|
||||
ring: make([]time.Time, maxEvents),
|
||||
started: make(chan struct{}),
|
||||
stopped: make(chan struct{}),
|
||||
ticket: make(chan struct{}),
|
||||
}
|
||||
go rbrl.loop()
|
||||
<-rbrl.started // make sure loop is ready to receive before we return
|
||||
return rbrl
|
||||
}
|
||||
|
||||
// RingBufferRateLimiter uses a ring to enforce rate limits
|
||||
// consisting of a maximum number of events within a single
|
||||
// sliding window of a given duration. An empty value is
|
||||
// not valid; use NewRateLimiter to get one.
|
||||
type RingBufferRateLimiter struct {
|
||||
window time.Duration
|
||||
ring []time.Time // maxEvents == len(ring)
|
||||
cursor int // always points to the oldest timestamp
|
||||
mu sync.Mutex // protects ring, cursor, and window
|
||||
started chan struct{}
|
||||
stopped chan struct{}
|
||||
ticket chan struct{}
|
||||
}
|
||||
|
||||
// Stop cleans up r's scheduling goroutine.
|
||||
func (r *RingBufferRateLimiter) Stop() {
|
||||
close(r.stopped)
|
||||
}
|
||||
|
||||
func (r *RingBufferRateLimiter) loop() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
buf := make([]byte, stackTraceBufferSize)
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
log.Printf("panic: ring buffer rate limiter: %v\n%s", err, buf)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
// if we've been stopped, return
|
||||
select {
|
||||
case <-r.stopped:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if len(r.ring) == 0 {
|
||||
if r.window == 0 {
|
||||
// rate limiting is disabled; always allow immediately
|
||||
r.permit()
|
||||
continue
|
||||
}
|
||||
panic("invalid configuration: maxEvents = 0 and window != 0 does not allow any events")
|
||||
}
|
||||
|
||||
// wait until next slot is available or until we've been stopped
|
||||
r.mu.Lock()
|
||||
then := r.ring[r.cursor].Add(r.window)
|
||||
r.mu.Unlock()
|
||||
waitDuration := time.Until(then)
|
||||
waitTimer := time.NewTimer(waitDuration)
|
||||
select {
|
||||
case <-waitTimer.C:
|
||||
r.permit()
|
||||
case <-r.stopped:
|
||||
waitTimer.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Allow returns true if the event is allowed to
|
||||
// happen right now. It does not wait. If the event
|
||||
// is allowed, a ticket is claimed.
|
||||
func (r *RingBufferRateLimiter) Allow() bool {
|
||||
select {
|
||||
case <-r.ticket:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the event is allowed to occur. It returns an
|
||||
// error if the context is cancelled.
|
||||
func (r *RingBufferRateLimiter) Wait(ctx context.Context) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return context.Canceled
|
||||
case <-r.ticket:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// MaxEvents returns the maximum number of events that
|
||||
// are allowed within the sliding window.
|
||||
func (r *RingBufferRateLimiter) MaxEvents() int {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
return len(r.ring)
|
||||
}
|
||||
|
||||
// SetMaxEvents changes the maximum number of events that are
|
||||
// allowed in the sliding window. If the new limit is lower,
|
||||
// the oldest events will be forgotten. If the new limit is
|
||||
// higher, the window will suddenly have capacity for new
|
||||
// reservations. It panics if maxEvents is 0 and window size
|
||||
// is not zero.
|
||||
func (r *RingBufferRateLimiter) SetMaxEvents(maxEvents int) {
|
||||
newRing := make([]time.Time, maxEvents)
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.window != 0 && maxEvents == 0 {
|
||||
panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
|
||||
}
|
||||
|
||||
// only make the change if the new limit is different
|
||||
if maxEvents == len(r.ring) {
|
||||
return
|
||||
}
|
||||
|
||||
// the new ring may be smaller; fast-forward to the
|
||||
// oldest timestamp that will be kept in the new
|
||||
// ring so the oldest ones are forgotten and the
|
||||
// newest ones will be remembered
|
||||
sizeDiff := len(r.ring) - maxEvents
|
||||
for i := 0; i < sizeDiff; i++ {
|
||||
r.advance()
|
||||
}
|
||||
|
||||
if len(r.ring) > 0 {
|
||||
// copy timestamps into the new ring until we
|
||||
// have either copied all of them or have reached
|
||||
// the capacity of the new ring
|
||||
startCursor := r.cursor
|
||||
for i := 0; i < len(newRing); i++ {
|
||||
newRing[i] = r.ring[r.cursor]
|
||||
r.advance()
|
||||
if r.cursor == startCursor {
|
||||
// new ring is larger than old one;
|
||||
// "we've come full circle"
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.ring = newRing
|
||||
r.cursor = 0
|
||||
}
|
||||
|
||||
// Window returns the size of the sliding window.
|
||||
func (r *RingBufferRateLimiter) Window() time.Duration {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
return r.window
|
||||
}
|
||||
|
||||
// SetWindow changes r's sliding window duration to window.
|
||||
// Goroutines that are already blocked on a call to Wait()
|
||||
// will not be affected. It panics if window is non-zero
|
||||
// but the max event limit is 0.
|
||||
func (r *RingBufferRateLimiter) SetWindow(window time.Duration) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if window != 0 && len(r.ring) == 0 {
|
||||
panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
|
||||
}
|
||||
r.window = window
|
||||
}
|
||||
|
||||
// permit allows one event through the throttle. This method
|
||||
// blocks until a goroutine is waiting for a ticket or until
|
||||
// the rate limiter is stopped.
|
||||
func (r *RingBufferRateLimiter) permit() {
|
||||
for {
|
||||
select {
|
||||
case r.started <- struct{}{}:
|
||||
// notify parent goroutine that we've started; should
|
||||
// only happen once, before constructor returns
|
||||
continue
|
||||
case <-r.stopped:
|
||||
return
|
||||
case r.ticket <- struct{}{}:
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if len(r.ring) > 0 {
|
||||
r.ring[r.cursor] = time.Now()
|
||||
r.advance()
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// advance moves the cursor to the next position.
|
||||
// It is NOT safe for concurrent use, so it must
|
||||
// be called inside a lock on r.mu.
|
||||
func (r *RingBufferRateLimiter) advance() {
|
||||
r.cursor++
|
||||
if r.cursor >= len(r.ring) {
|
||||
r.cursor = 0
|
||||
}
|
||||
}
|
||||
730
vendor/github.com/caddyserver/certmagic/solvers.go
generated
vendored
Normal file
730
vendor/github.com/caddyserver/certmagic/solvers.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
280
vendor/github.com/caddyserver/certmagic/storage.go
generated
vendored
Normal file
280
vendor/github.com/caddyserver/certmagic/storage.go
generated
vendored
Normal file
@ -0,0 +1,280 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Storage is a type that implements a key-value store.
|
||||
// Keys are prefix-based, with forward slash '/' as separators
|
||||
// and without a leading slash.
|
||||
//
|
||||
// Processes running in a cluster will wish to use the
|
||||
// same Storage value (its implementation and configuration)
|
||||
// in order to share certificates and other TLS resources
|
||||
// with the cluster.
|
||||
//
|
||||
// The Load, Delete, List, and Stat methods should return
|
||||
// fs.ErrNotExist if the key does not exist.
|
||||
//
|
||||
// Implementations of Storage must be safe for concurrent use
|
||||
// and honor context cancellations.
|
||||
type Storage interface {
|
||||
// Locker provides atomic synchronization
|
||||
// operations, making Storage safe to share.
|
||||
Locker
|
||||
|
||||
// Store puts value at key.
|
||||
Store(ctx context.Context, key string, value []byte) error
|
||||
|
||||
// Load retrieves the value at key.
|
||||
Load(ctx context.Context, key string) ([]byte, error)
|
||||
|
||||
// Delete deletes key. An error should be
|
||||
// returned only if the key still exists
|
||||
// when the method returns.
|
||||
Delete(ctx context.Context, key string) error
|
||||
|
||||
// Exists returns true if the key exists
|
||||
// and there was no error checking.
|
||||
Exists(ctx context.Context, key string) bool
|
||||
|
||||
// List returns all keys that match prefix.
|
||||
// If recursive is true, non-terminal keys
|
||||
// will be enumerated (i.e. "directories"
|
||||
// should be walked); otherwise, only keys
|
||||
// prefixed exactly by prefix will be listed.
|
||||
List(ctx context.Context, prefix string, recursive bool) ([]string, error)
|
||||
|
||||
// Stat returns information about key.
|
||||
Stat(ctx context.Context, key string) (KeyInfo, error)
|
||||
}
|
||||
|
||||
// Locker facilitates synchronization of certificate tasks across
|
||||
// machines and networks.
|
||||
type Locker interface {
|
||||
// Lock acquires the lock for key, blocking until the lock
|
||||
// can be obtained or an error is returned. Note that, even
|
||||
// after acquiring a lock, an idempotent operation may have
|
||||
// already been performed by another process that acquired
|
||||
// the lock before - so always check to make sure idempotent
|
||||
// operations still need to be performed after acquiring the
|
||||
// lock.
|
||||
//
|
||||
// The actual implementation of obtaining of a lock must be
|
||||
// an atomic operation so that multiple Lock calls at the
|
||||
// same time always results in only one caller receiving the
|
||||
// lock at any given time.
|
||||
//
|
||||
// To prevent deadlocks, all implementations (where this concern
|
||||
// is relevant) should put a reasonable expiration on the lock in
|
||||
// case Unlock is unable to be called due to some sort of network
|
||||
// failure or system crash. Additionally, implementations should
|
||||
// honor context cancellation as much as possible (in case the
|
||||
// caller wishes to give up and free resources before the lock
|
||||
// can be obtained).
|
||||
Lock(ctx context.Context, key string) error
|
||||
|
||||
// Unlock releases the lock for key. This method must ONLY be
|
||||
// called after a successful call to Lock, and only after the
|
||||
// critical section is finished, even if it errored or timed
|
||||
// out. Unlock cleans up any resources allocated during Lock.
|
||||
Unlock(ctx context.Context, key string) error
|
||||
}
|
||||
|
||||
// KeyInfo holds information about a key in storage.
|
||||
// Key and IsTerminal are required; Modified and Size
|
||||
// are optional if the storage implementation is not
|
||||
// able to get that information. Setting them will
|
||||
// make certain operations more consistent or
|
||||
// predictable, but it is not crucial to basic
|
||||
// functionality.
|
||||
type KeyInfo struct {
|
||||
Key string
|
||||
Modified time.Time
|
||||
Size int64
|
||||
IsTerminal bool // false for keys that only contain other keys (like directories)
|
||||
}
|
||||
|
||||
// storeTx stores all the values or none at all.
|
||||
func storeTx(ctx context.Context, s Storage, all []keyValue) error {
|
||||
for i, kv := range all {
|
||||
err := s.Store(ctx, kv.key, kv.value)
|
||||
if err != nil {
|
||||
for j := i - 1; j >= 0; j-- {
|
||||
s.Delete(ctx, all[j].key)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// keyValue pairs a key and a value.
|
||||
type keyValue struct {
|
||||
key string
|
||||
value []byte
|
||||
}
|
||||
|
||||
// KeyBuilder provides a namespace for methods that
|
||||
// build keys and key prefixes, for addressing items
|
||||
// in a Storage implementation.
|
||||
type KeyBuilder struct{}
|
||||
|
||||
// CertsPrefix returns the storage key prefix for
|
||||
// the given certificate issuer.
|
||||
func (keys KeyBuilder) CertsPrefix(issuerKey string) string {
|
||||
return path.Join(prefixCerts, keys.Safe(issuerKey))
|
||||
}
|
||||
|
||||
// CertsSitePrefix returns a key prefix for items associated with
|
||||
// the site given by domain using the given issuer key.
|
||||
func (keys KeyBuilder) CertsSitePrefix(issuerKey, domain string) string {
|
||||
return path.Join(keys.CertsPrefix(issuerKey), keys.Safe(domain))
|
||||
}
|
||||
|
||||
// SiteCert returns the path to the certificate file for domain
|
||||
// that is associated with the issuer with the given issuerKey.
|
||||
func (keys KeyBuilder) SiteCert(issuerKey, domain string) string {
|
||||
safeDomain := keys.Safe(domain)
|
||||
return path.Join(keys.CertsSitePrefix(issuerKey, domain), safeDomain+".crt")
|
||||
}
|
||||
|
||||
// SitePrivateKey returns the path to the private key file for domain
|
||||
// that is associated with the certificate from the given issuer with
|
||||
// the given issuerKey.
|
||||
func (keys KeyBuilder) SitePrivateKey(issuerKey, domain string) string {
|
||||
safeDomain := keys.Safe(domain)
|
||||
return path.Join(keys.CertsSitePrefix(issuerKey, domain), safeDomain+".key")
|
||||
}
|
||||
|
||||
// SiteMeta returns the path to the metadata file for domain that
|
||||
// is associated with the certificate from the given issuer with
|
||||
// the given issuerKey.
|
||||
func (keys KeyBuilder) SiteMeta(issuerKey, domain string) string {
|
||||
safeDomain := keys.Safe(domain)
|
||||
return path.Join(keys.CertsSitePrefix(issuerKey, domain), safeDomain+".json")
|
||||
}
|
||||
|
||||
// OCSPStaple returns a key for the OCSP staple associated
|
||||
// with the given certificate. If you have the PEM bundle
|
||||
// handy, pass that in to save an extra encoding step.
|
||||
func (keys KeyBuilder) OCSPStaple(cert *Certificate, pemBundle []byte) string {
|
||||
var ocspFileName string
|
||||
if len(cert.Names) > 0 {
|
||||
firstName := keys.Safe(cert.Names[0])
|
||||
ocspFileName = firstName + "-"
|
||||
}
|
||||
ocspFileName += fastHash(pemBundle)
|
||||
return path.Join(prefixOCSP, ocspFileName)
|
||||
}
|
||||
|
||||
// Safe standardizes and sanitizes str for use as
|
||||
// a single component of a storage key. This method
|
||||
// is idempotent.
|
||||
func (keys KeyBuilder) Safe(str string) string {
|
||||
str = strings.ToLower(str)
|
||||
str = strings.TrimSpace(str)
|
||||
|
||||
// replace a few specific characters
|
||||
repl := strings.NewReplacer(
|
||||
" ", "_",
|
||||
"+", "_plus_",
|
||||
"*", "wildcard_",
|
||||
":", "-",
|
||||
"..", "", // prevent directory traversal (regex allows single dots)
|
||||
)
|
||||
str = repl.Replace(str)
|
||||
|
||||
// finally remove all non-word characters
|
||||
return safeKeyRE.ReplaceAllLiteralString(str, "")
|
||||
}
|
||||
|
||||
// CleanUpOwnLocks immediately cleans up all
|
||||
// current locks obtained by this process. Since
|
||||
// this does not cancel the operations that
|
||||
// the locks are synchronizing, this should be
|
||||
// called only immediately before process exit.
|
||||
// Errors are only reported if a logger is given.
|
||||
func CleanUpOwnLocks(ctx context.Context, logger *zap.Logger) {
|
||||
locksMu.Lock()
|
||||
defer locksMu.Unlock()
|
||||
for lockKey, storage := range locks {
|
||||
err := storage.Unlock(ctx, lockKey)
|
||||
if err == nil {
|
||||
delete(locks, lockKey)
|
||||
} else if logger != nil {
|
||||
logger.Error("unable to clean up lock in storage backend",
|
||||
zap.Any("storage", storage),
|
||||
zap.String("lock_key", lockKey),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func acquireLock(ctx context.Context, storage Storage, lockKey string) error {
|
||||
err := storage.Lock(ctx, lockKey)
|
||||
if err == nil {
|
||||
locksMu.Lock()
|
||||
locks[lockKey] = storage
|
||||
locksMu.Unlock()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func releaseLock(ctx context.Context, storage Storage, lockKey string) error {
|
||||
err := storage.Unlock(ctx, lockKey)
|
||||
if err == nil {
|
||||
locksMu.Lock()
|
||||
delete(locks, lockKey)
|
||||
locksMu.Unlock()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// locks stores a reference to all the current
|
||||
// locks obtained by this process.
|
||||
var locks = make(map[string]Storage)
|
||||
var locksMu sync.Mutex
|
||||
|
||||
// StorageKeys provides methods for accessing
|
||||
// keys and key prefixes for items in a Storage.
|
||||
// Typically, you will not need to use this
|
||||
// because accessing storage is abstracted away
|
||||
// for most cases. Only use this if you need to
|
||||
// directly access TLS assets in your application.
|
||||
var StorageKeys KeyBuilder
|
||||
|
||||
const (
|
||||
prefixCerts = "certificates"
|
||||
prefixOCSP = "ocsp"
|
||||
)
|
||||
|
||||
// safeKeyRE matches any undesirable characters in storage keys.
|
||||
// Note that this allows dots, so you'll have to strip ".." manually.
|
||||
var safeKeyRE = regexp.MustCompile(`[^\w@.-]`)
|
||||
|
||||
// defaultFileStorage is a convenient, default storage
|
||||
// implementation using the local file system.
|
||||
var defaultFileStorage = &FileStorage{Path: dataDir()}
|
||||
8
vendor/github.com/datarhei/joy4/format/rtmp/rtmp.go
generated
vendored
8
vendor/github.com/datarhei/joy4/format/rtmp/rtmp.go
generated
vendored
@ -426,7 +426,7 @@ var CodecTypes = flv.CodecTypes
|
||||
|
||||
func (self *Conn) writeBasicConf() (err error) {
|
||||
// > SetChunkSize
|
||||
if err = self.writeSetChunkSize(1024 * 1024 * 128); err != nil {
|
||||
if err = self.writeSetChunkSize(1024 * 1024 * 1); err != nil {
|
||||
return
|
||||
}
|
||||
// > WindowAckSize
|
||||
@ -471,6 +471,7 @@ func (self *Conn) readConnect() (err error) {
|
||||
if ok {
|
||||
tcurl, _ = _tcurl.(string)
|
||||
}
|
||||
|
||||
connectparams := self.commandobj
|
||||
|
||||
if err = self.writeBasicConf(); err != nil {
|
||||
@ -1180,6 +1181,7 @@ func (self *Conn) fillChunkHeader(b []byte, csid uint32, timestamp int32, msgtyp
|
||||
|
||||
if Debug {
|
||||
fmt.Printf("rtmp: write chunk msgdatalen=%d msgsid=%d\n", msgdatalen, msgsid)
|
||||
fmt.Print(hex.Dump(b[:msgdatalen]))
|
||||
}
|
||||
|
||||
return
|
||||
@ -1568,6 +1570,10 @@ func (self *Conn) handleMsg(timestamp uint32, msgsid uint32, msgtypeid uint8, ms
|
||||
}
|
||||
self.readAckSize = pio.U32BE(self.msgdata)
|
||||
return
|
||||
default:
|
||||
if Debug {
|
||||
fmt.Printf("rtmp: unhandled msg: %d\n", msgtypeid)
|
||||
}
|
||||
}
|
||||
|
||||
self.gotmsg = true
|
||||
|
||||
12
vendor/github.com/labstack/echo/v4/CHANGELOG.md
generated
vendored
12
vendor/github.com/labstack/echo/v4/CHANGELOG.md
generated
vendored
@ -1,5 +1,17 @@
|
||||
# Changelog
|
||||
|
||||
## v4.9.0 - 2022-09-04
|
||||
|
||||
**Security**
|
||||
|
||||
* Fix open redirect vulnerability in handlers serving static directories (e.Static, e.StaticFs, echo.StaticDirectoryHandler) [#2260](https://github.com/labstack/echo/pull/2260)
|
||||
|
||||
**Enhancements**
|
||||
|
||||
* Allow configuring ErrorHandler in CSRF middleware [#2257](https://github.com/labstack/echo/pull/2257)
|
||||
* Replace HTTP method constants in tests with stdlib constants [#2247](https://github.com/labstack/echo/pull/2247)
|
||||
|
||||
|
||||
## v4.8.0 - 2022-08-10
|
||||
|
||||
**Most notable things**
|
||||
|
||||
42
vendor/github.com/labstack/echo/v4/context_fs.go
generated
vendored
42
vendor/github.com/labstack/echo/v4/context_fs.go
generated
vendored
@ -1,33 +1,49 @@
|
||||
//go:build !go1.16
|
||||
// +build !go1.16
|
||||
|
||||
package echo
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func (c *context) File(file string) (err error) {
|
||||
f, err := os.Open(file)
|
||||
func (c *context) File(file string) error {
|
||||
return fsFile(c, file, c.echo.Filesystem)
|
||||
}
|
||||
|
||||
// FileFS serves file from given file system.
|
||||
//
|
||||
// When dealing with `embed.FS` use `fs := echo.MustSubFS(fs, "rootDirectory") to create sub fs which uses necessary
|
||||
// prefix for directory path. This is necessary as `//go:embed assets/images` embeds files with paths
|
||||
// including `assets/images` as their prefix.
|
||||
func (c *context) FileFS(file string, filesystem fs.FS) error {
|
||||
return fsFile(c, file, filesystem)
|
||||
}
|
||||
|
||||
func fsFile(c Context, file string, filesystem fs.FS) error {
|
||||
f, err := filesystem.Open(file)
|
||||
if err != nil {
|
||||
return NotFoundHandler(c)
|
||||
return ErrNotFound
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
fi, _ := f.Stat()
|
||||
if fi.IsDir() {
|
||||
file = filepath.Join(file, indexPage)
|
||||
f, err = os.Open(file)
|
||||
file = filepath.ToSlash(filepath.Join(file, indexPage)) // ToSlash is necessary for Windows. fs.Open and os.Open are different in that aspect.
|
||||
f, err = filesystem.Open(file)
|
||||
if err != nil {
|
||||
return NotFoundHandler(c)
|
||||
return ErrNotFound
|
||||
}
|
||||
defer f.Close()
|
||||
if fi, err = f.Stat(); err != nil {
|
||||
return
|
||||
return err
|
||||
}
|
||||
}
|
||||
http.ServeContent(c.Response(), c.Request(), fi.Name(), fi.ModTime(), f)
|
||||
return
|
||||
ff, ok := f.(io.ReadSeeker)
|
||||
if !ok {
|
||||
return errors.New("file does not implement io.ReadSeeker")
|
||||
}
|
||||
http.ServeContent(c.Response(), c.Request(), fi.Name(), fi.ModTime(), ff)
|
||||
return nil
|
||||
}
|
||||
|
||||
52
vendor/github.com/labstack/echo/v4/context_fs_go1.16.go
generated
vendored
52
vendor/github.com/labstack/echo/v4/context_fs_go1.16.go
generated
vendored
@ -1,52 +0,0 @@
|
||||
//go:build go1.16
|
||||
// +build go1.16
|
||||
|
||||
package echo
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func (c *context) File(file string) error {
|
||||
return fsFile(c, file, c.echo.Filesystem)
|
||||
}
|
||||
|
||||
// FileFS serves file from given file system.
|
||||
//
|
||||
// When dealing with `embed.FS` use `fs := echo.MustSubFS(fs, "rootDirectory") to create sub fs which uses necessary
|
||||
// prefix for directory path. This is necessary as `//go:embed assets/images` embeds files with paths
|
||||
// including `assets/images` as their prefix.
|
||||
func (c *context) FileFS(file string, filesystem fs.FS) error {
|
||||
return fsFile(c, file, filesystem)
|
||||
}
|
||||
|
||||
func fsFile(c Context, file string, filesystem fs.FS) error {
|
||||
f, err := filesystem.Open(file)
|
||||
if err != nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
fi, _ := f.Stat()
|
||||
if fi.IsDir() {
|
||||
file = filepath.ToSlash(filepath.Join(file, indexPage)) // ToSlash is necessary for Windows. fs.Open and os.Open are different in that aspect.
|
||||
f, err = filesystem.Open(file)
|
||||
if err != nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
defer f.Close()
|
||||
if fi, err = f.Stat(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ff, ok := f.(io.ReadSeeker)
|
||||
if !ok {
|
||||
return errors.New("file does not implement io.ReadSeeker")
|
||||
}
|
||||
http.ServeContent(c.Response(), c.Request(), fi.Name(), fi.ModTime(), ff)
|
||||
return nil
|
||||
}
|
||||
2
vendor/github.com/labstack/echo/v4/echo.go
generated
vendored
2
vendor/github.com/labstack/echo/v4/echo.go
generated
vendored
@ -248,7 +248,7 @@ const (
|
||||
|
||||
const (
|
||||
// Version of Echo
|
||||
Version = "4.8.0"
|
||||
Version = "4.9.0"
|
||||
website = "https://echo.labstack.com"
|
||||
// http://patorjk.com/software/taag/#p=display&f=Small%20Slant&t=Echo
|
||||
banner = `
|
||||
|
||||
183
vendor/github.com/labstack/echo/v4/echo_fs.go
generated
vendored
183
vendor/github.com/labstack/echo/v4/echo_fs.go
generated
vendored
@ -1,62 +1,175 @@
|
||||
//go:build !go1.16
|
||||
// +build !go1.16
|
||||
|
||||
package echo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type filesystem struct {
|
||||
// Filesystem is file system used by Static and File handlers to access files.
|
||||
// Defaults to os.DirFS(".")
|
||||
//
|
||||
// When dealing with `embed.FS` use `fs := echo.MustSubFS(fs, "rootDirectory") to create sub fs which uses necessary
|
||||
// prefix for directory path. This is necessary as `//go:embed assets/images` embeds files with paths
|
||||
// including `assets/images` as their prefix.
|
||||
Filesystem fs.FS
|
||||
}
|
||||
|
||||
func createFilesystem() filesystem {
|
||||
return filesystem{}
|
||||
}
|
||||
|
||||
// Static registers a new route with path prefix to serve static files from the
|
||||
// provided root directory.
|
||||
func (e *Echo) Static(prefix, root string) *Route {
|
||||
if root == "" {
|
||||
root = "." // For security we want to restrict to CWD.
|
||||
return filesystem{
|
||||
Filesystem: newDefaultFS(),
|
||||
}
|
||||
return e.static(prefix, root, e.GET)
|
||||
}
|
||||
|
||||
func (common) static(prefix, root string, get func(string, HandlerFunc, ...MiddlewareFunc) *Route) *Route {
|
||||
h := func(c Context) error {
|
||||
p, err := url.PathUnescape(c.Param("*"))
|
||||
if err != nil {
|
||||
return err
|
||||
// Static registers a new route with path prefix to serve static files from the provided root directory.
|
||||
func (e *Echo) Static(pathPrefix, fsRoot string) *Route {
|
||||
subFs := MustSubFS(e.Filesystem, fsRoot)
|
||||
return e.Add(
|
||||
http.MethodGet,
|
||||
pathPrefix+"*",
|
||||
StaticDirectoryHandler(subFs, false),
|
||||
)
|
||||
}
|
||||
|
||||
// StaticFS registers a new route with path prefix to serve static files from the provided file system.
|
||||
//
|
||||
// When dealing with `embed.FS` use `fs := echo.MustSubFS(fs, "rootDirectory") to create sub fs which uses necessary
|
||||
// prefix for directory path. This is necessary as `//go:embed assets/images` embeds files with paths
|
||||
// including `assets/images` as their prefix.
|
||||
func (e *Echo) StaticFS(pathPrefix string, filesystem fs.FS) *Route {
|
||||
return e.Add(
|
||||
http.MethodGet,
|
||||
pathPrefix+"*",
|
||||
StaticDirectoryHandler(filesystem, false),
|
||||
)
|
||||
}
|
||||
|
||||
// StaticDirectoryHandler creates handler function to serve files from provided file system
|
||||
// When disablePathUnescaping is set then file name from path is not unescaped and is served as is.
|
||||
func StaticDirectoryHandler(fileSystem fs.FS, disablePathUnescaping bool) HandlerFunc {
|
||||
return func(c Context) error {
|
||||
p := c.Param("*")
|
||||
if !disablePathUnescaping { // when router is already unescaping we do not want to do is twice
|
||||
tmpPath, err := url.PathUnescape(p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unescape path variable: %w", err)
|
||||
}
|
||||
p = tmpPath
|
||||
}
|
||||
|
||||
name := filepath.Join(root, filepath.Clean("/"+p)) // "/"+ for security
|
||||
fi, err := os.Stat(name)
|
||||
// fs.FS.Open() already assumes that file names are relative to FS root path and considers name with prefix `/` as invalid
|
||||
name := filepath.ToSlash(filepath.Clean(strings.TrimPrefix(p, "/")))
|
||||
fi, err := fs.Stat(fileSystem, name)
|
||||
if err != nil {
|
||||
// The access path does not exist
|
||||
return NotFoundHandler(c)
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
// If the request is for a directory and does not end with "/"
|
||||
p = c.Request().URL.Path // path must not be empty.
|
||||
if fi.IsDir() && p[len(p)-1] != '/' {
|
||||
if fi.IsDir() && len(p) > 0 && p[len(p)-1] != '/' {
|
||||
// Redirect to ends with "/"
|
||||
return c.Redirect(http.StatusMovedPermanently, p+"/")
|
||||
return c.Redirect(http.StatusMovedPermanently, sanitizeURI(p+"/"))
|
||||
}
|
||||
return c.File(name)
|
||||
return fsFile(c, name, fileSystem)
|
||||
}
|
||||
// Handle added routes based on trailing slash:
|
||||
// /prefix => exact route "/prefix" + any route "/prefix/*"
|
||||
// /prefix/ => only any route "/prefix/*"
|
||||
if prefix != "" {
|
||||
if prefix[len(prefix)-1] == '/' {
|
||||
// Only add any route for intentional trailing slash
|
||||
return get(prefix+"*", h)
|
||||
}
|
||||
get(prefix, h)
|
||||
}
|
||||
return get(prefix+"/*", h)
|
||||
}
|
||||
|
||||
// FileFS registers a new route with path to serve file from the provided file system.
|
||||
func (e *Echo) FileFS(path, file string, filesystem fs.FS, m ...MiddlewareFunc) *Route {
|
||||
return e.GET(path, StaticFileHandler(file, filesystem), m...)
|
||||
}
|
||||
|
||||
// StaticFileHandler creates handler function to serve file from provided file system
|
||||
func StaticFileHandler(file string, filesystem fs.FS) HandlerFunc {
|
||||
return func(c Context) error {
|
||||
return fsFile(c, file, filesystem)
|
||||
}
|
||||
}
|
||||
|
||||
// defaultFS exists to preserve pre v4.7.0 behaviour where files were open by `os.Open`.
|
||||
// v4.7 introduced `echo.Filesystem` field which is Go1.16+ `fs.Fs` interface.
|
||||
// Difference between `os.Open` and `fs.Open` is that FS does not allow opening path that start with `.`, `..` or `/`
|
||||
// etc. For example previously you could have `../images` in your application but `fs := os.DirFS("./")` would not
|
||||
// allow you to use `fs.Open("../images")` and this would break all old applications that rely on being able to
|
||||
// traverse up from current executable run path.
|
||||
// NB: private because you really should use fs.FS implementation instances
|
||||
type defaultFS struct {
|
||||
prefix string
|
||||
fs fs.FS
|
||||
}
|
||||
|
||||
func newDefaultFS() *defaultFS {
|
||||
dir, _ := os.Getwd()
|
||||
return &defaultFS{
|
||||
prefix: dir,
|
||||
fs: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func (fs defaultFS) Open(name string) (fs.File, error) {
|
||||
if fs.fs == nil {
|
||||
return os.Open(name)
|
||||
}
|
||||
return fs.fs.Open(name)
|
||||
}
|
||||
|
||||
func subFS(currentFs fs.FS, root string) (fs.FS, error) {
|
||||
root = filepath.ToSlash(filepath.Clean(root)) // note: fs.FS operates only with slashes. `ToSlash` is necessary for Windows
|
||||
if dFS, ok := currentFs.(*defaultFS); ok {
|
||||
// we need to make exception for `defaultFS` instances as it interprets root prefix differently from fs.FS.
|
||||
// fs.Fs.Open does not like relative paths ("./", "../") and absolute paths at all but prior echo.Filesystem we
|
||||
// were able to use paths like `./myfile.log`, `/etc/hosts` and these would work fine with `os.Open` but not with fs.Fs
|
||||
if isRelativePath(root) {
|
||||
root = filepath.Join(dFS.prefix, root)
|
||||
}
|
||||
return &defaultFS{
|
||||
prefix: root,
|
||||
fs: os.DirFS(root),
|
||||
}, nil
|
||||
}
|
||||
return fs.Sub(currentFs, root)
|
||||
}
|
||||
|
||||
func isRelativePath(path string) bool {
|
||||
if path == "" {
|
||||
return true
|
||||
}
|
||||
if path[0] == '/' {
|
||||
return false
|
||||
}
|
||||
if runtime.GOOS == "windows" && strings.IndexByte(path, ':') != -1 {
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#file_and_directory_names
|
||||
// https://docs.microsoft.com/en-us/dotnet/standard/io/file-path-formats
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// MustSubFS creates sub FS from current filesystem or panic on failure.
|
||||
// Panic happens when `fsRoot` contains invalid path according to `fs.ValidPath` rules.
|
||||
//
|
||||
// MustSubFS is helpful when dealing with `embed.FS` because for example `//go:embed assets/images` embeds files with
|
||||
// paths including `assets/images` as their prefix. In that case use `fs := echo.MustSubFS(fs, "rootDirectory") to
|
||||
// create sub fs which uses necessary prefix for directory path.
|
||||
func MustSubFS(currentFs fs.FS, fsRoot string) fs.FS {
|
||||
subFs, err := subFS(currentFs, fsRoot)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can not create sub FS, invalid root given, err: %w", err))
|
||||
}
|
||||
return subFs
|
||||
}
|
||||
|
||||
func sanitizeURI(uri string) string {
|
||||
// double slash `\\`, `//` or even `\/` is absolute uri for browsers and by redirecting request to that uri
|
||||
// we are vulnerable to open redirect attack. so replace all slashes from the beginning with single slash
|
||||
if len(uri) > 1 && (uri[0] == '\\' || uri[0] == '/') && (uri[1] == '\\' || uri[1] == '/') {
|
||||
uri = "/" + strings.TrimLeft(uri, `/\`)
|
||||
}
|
||||
return uri
|
||||
}
|
||||
|
||||
169
vendor/github.com/labstack/echo/v4/echo_fs_go1.16.go
generated
vendored
169
vendor/github.com/labstack/echo/v4/echo_fs_go1.16.go
generated
vendored
@ -1,169 +0,0 @@
|
||||
//go:build go1.16
|
||||
// +build go1.16
|
||||
|
||||
package echo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type filesystem struct {
|
||||
// Filesystem is file system used by Static and File handlers to access files.
|
||||
// Defaults to os.DirFS(".")
|
||||
//
|
||||
// When dealing with `embed.FS` use `fs := echo.MustSubFS(fs, "rootDirectory") to create sub fs which uses necessary
|
||||
// prefix for directory path. This is necessary as `//go:embed assets/images` embeds files with paths
|
||||
// including `assets/images` as their prefix.
|
||||
Filesystem fs.FS
|
||||
}
|
||||
|
||||
func createFilesystem() filesystem {
|
||||
return filesystem{
|
||||
Filesystem: newDefaultFS(),
|
||||
}
|
||||
}
|
||||
|
||||
// Static registers a new route with path prefix to serve static files from the provided root directory.
|
||||
func (e *Echo) Static(pathPrefix, fsRoot string) *Route {
|
||||
subFs := MustSubFS(e.Filesystem, fsRoot)
|
||||
return e.Add(
|
||||
http.MethodGet,
|
||||
pathPrefix+"*",
|
||||
StaticDirectoryHandler(subFs, false),
|
||||
)
|
||||
}
|
||||
|
||||
// StaticFS registers a new route with path prefix to serve static files from the provided file system.
|
||||
//
|
||||
// When dealing with `embed.FS` use `fs := echo.MustSubFS(fs, "rootDirectory") to create sub fs which uses necessary
|
||||
// prefix for directory path. This is necessary as `//go:embed assets/images` embeds files with paths
|
||||
// including `assets/images` as their prefix.
|
||||
func (e *Echo) StaticFS(pathPrefix string, filesystem fs.FS) *Route {
|
||||
return e.Add(
|
||||
http.MethodGet,
|
||||
pathPrefix+"*",
|
||||
StaticDirectoryHandler(filesystem, false),
|
||||
)
|
||||
}
|
||||
|
||||
// StaticDirectoryHandler creates handler function to serve files from provided file system
|
||||
// When disablePathUnescaping is set then file name from path is not unescaped and is served as is.
|
||||
func StaticDirectoryHandler(fileSystem fs.FS, disablePathUnescaping bool) HandlerFunc {
|
||||
return func(c Context) error {
|
||||
p := c.Param("*")
|
||||
if !disablePathUnescaping { // when router is already unescaping we do not want to do is twice
|
||||
tmpPath, err := url.PathUnescape(p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unescape path variable: %w", err)
|
||||
}
|
||||
p = tmpPath
|
||||
}
|
||||
|
||||
// fs.FS.Open() already assumes that file names are relative to FS root path and considers name with prefix `/` as invalid
|
||||
name := filepath.ToSlash(filepath.Clean(strings.TrimPrefix(p, "/")))
|
||||
fi, err := fs.Stat(fileSystem, name)
|
||||
if err != nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
// If the request is for a directory and does not end with "/"
|
||||
p = c.Request().URL.Path // path must not be empty.
|
||||
if fi.IsDir() && len(p) > 0 && p[len(p)-1] != '/' {
|
||||
// Redirect to ends with "/"
|
||||
return c.Redirect(http.StatusMovedPermanently, p+"/")
|
||||
}
|
||||
return fsFile(c, name, fileSystem)
|
||||
}
|
||||
}
|
||||
|
||||
// FileFS registers a new route with path to serve file from the provided file system.
|
||||
func (e *Echo) FileFS(path, file string, filesystem fs.FS, m ...MiddlewareFunc) *Route {
|
||||
return e.GET(path, StaticFileHandler(file, filesystem), m...)
|
||||
}
|
||||
|
||||
// StaticFileHandler creates handler function to serve file from provided file system
|
||||
func StaticFileHandler(file string, filesystem fs.FS) HandlerFunc {
|
||||
return func(c Context) error {
|
||||
return fsFile(c, file, filesystem)
|
||||
}
|
||||
}
|
||||
|
||||
// defaultFS exists to preserve pre v4.7.0 behaviour where files were open by `os.Open`.
|
||||
// v4.7 introduced `echo.Filesystem` field which is Go1.16+ `fs.Fs` interface.
|
||||
// Difference between `os.Open` and `fs.Open` is that FS does not allow opening path that start with `.`, `..` or `/`
|
||||
// etc. For example previously you could have `../images` in your application but `fs := os.DirFS("./")` would not
|
||||
// allow you to use `fs.Open("../images")` and this would break all old applications that rely on being able to
|
||||
// traverse up from current executable run path.
|
||||
// NB: private because you really should use fs.FS implementation instances
|
||||
type defaultFS struct {
|
||||
prefix string
|
||||
fs fs.FS
|
||||
}
|
||||
|
||||
func newDefaultFS() *defaultFS {
|
||||
dir, _ := os.Getwd()
|
||||
return &defaultFS{
|
||||
prefix: dir,
|
||||
fs: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func (fs defaultFS) Open(name string) (fs.File, error) {
|
||||
if fs.fs == nil {
|
||||
return os.Open(name)
|
||||
}
|
||||
return fs.fs.Open(name)
|
||||
}
|
||||
|
||||
func subFS(currentFs fs.FS, root string) (fs.FS, error) {
|
||||
root = filepath.ToSlash(filepath.Clean(root)) // note: fs.FS operates only with slashes. `ToSlash` is necessary for Windows
|
||||
if dFS, ok := currentFs.(*defaultFS); ok {
|
||||
// we need to make exception for `defaultFS` instances as it interprets root prefix differently from fs.FS.
|
||||
// fs.Fs.Open does not like relative paths ("./", "../") and absolute paths at all but prior echo.Filesystem we
|
||||
// were able to use paths like `./myfile.log`, `/etc/hosts` and these would work fine with `os.Open` but not with fs.Fs
|
||||
if isRelativePath(root) {
|
||||
root = filepath.Join(dFS.prefix, root)
|
||||
}
|
||||
return &defaultFS{
|
||||
prefix: root,
|
||||
fs: os.DirFS(root),
|
||||
}, nil
|
||||
}
|
||||
return fs.Sub(currentFs, root)
|
||||
}
|
||||
|
||||
func isRelativePath(path string) bool {
|
||||
if path == "" {
|
||||
return true
|
||||
}
|
||||
if path[0] == '/' {
|
||||
return false
|
||||
}
|
||||
if runtime.GOOS == "windows" && strings.IndexByte(path, ':') != -1 {
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#file_and_directory_names
|
||||
// https://docs.microsoft.com/en-us/dotnet/standard/io/file-path-formats
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// MustSubFS creates sub FS from current filesystem or panic on failure.
|
||||
// Panic happens when `fsRoot` contains invalid path according to `fs.ValidPath` rules.
|
||||
//
|
||||
// MustSubFS is helpful when dealing with `embed.FS` because for example `//go:embed assets/images` embeds files with
|
||||
// paths including `assets/images` as their prefix. In that case use `fs := echo.MustSubFS(fs, "rootDirectory") to
|
||||
// create sub fs which uses necessary prefix for directory path.
|
||||
func MustSubFS(currentFs fs.FS, fsRoot string) fs.FS {
|
||||
subFs, err := subFS(currentFs, fsRoot)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can not create sub FS, invalid root given, err: %w", err))
|
||||
}
|
||||
return subFs
|
||||
}
|
||||
31
vendor/github.com/labstack/echo/v4/group_fs.go
generated
vendored
31
vendor/github.com/labstack/echo/v4/group_fs.go
generated
vendored
@ -1,9 +1,30 @@
|
||||
//go:build !go1.16
|
||||
// +build !go1.16
|
||||
|
||||
package echo
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Static implements `Echo#Static()` for sub-routes within the Group.
|
||||
func (g *Group) Static(prefix, root string) {
|
||||
g.static(prefix, root, g.GET)
|
||||
func (g *Group) Static(pathPrefix, fsRoot string) {
|
||||
subFs := MustSubFS(g.echo.Filesystem, fsRoot)
|
||||
g.StaticFS(pathPrefix, subFs)
|
||||
}
|
||||
|
||||
// StaticFS implements `Echo#StaticFS()` for sub-routes within the Group.
|
||||
//
|
||||
// When dealing with `embed.FS` use `fs := echo.MustSubFS(fs, "rootDirectory") to create sub fs which uses necessary
|
||||
// prefix for directory path. This is necessary as `//go:embed assets/images` embeds files with paths
|
||||
// including `assets/images` as their prefix.
|
||||
func (g *Group) StaticFS(pathPrefix string, filesystem fs.FS) {
|
||||
g.Add(
|
||||
http.MethodGet,
|
||||
pathPrefix+"*",
|
||||
StaticDirectoryHandler(filesystem, false),
|
||||
)
|
||||
}
|
||||
|
||||
// FileFS implements `Echo#FileFS()` for sub-routes within the Group.
|
||||
func (g *Group) FileFS(path, file string, filesystem fs.FS, m ...MiddlewareFunc) *Route {
|
||||
return g.GET(path, StaticFileHandler(file, filesystem), m...)
|
||||
}
|
||||
|
||||
33
vendor/github.com/labstack/echo/v4/group_fs_go1.16.go
generated
vendored
33
vendor/github.com/labstack/echo/v4/group_fs_go1.16.go
generated
vendored
@ -1,33 +0,0 @@
|
||||
//go:build go1.16
|
||||
// +build go1.16
|
||||
|
||||
package echo
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Static implements `Echo#Static()` for sub-routes within the Group.
|
||||
func (g *Group) Static(pathPrefix, fsRoot string) {
|
||||
subFs := MustSubFS(g.echo.Filesystem, fsRoot)
|
||||
g.StaticFS(pathPrefix, subFs)
|
||||
}
|
||||
|
||||
// StaticFS implements `Echo#StaticFS()` for sub-routes within the Group.
|
||||
//
|
||||
// When dealing with `embed.FS` use `fs := echo.MustSubFS(fs, "rootDirectory") to create sub fs which uses necessary
|
||||
// prefix for directory path. This is necessary as `//go:embed assets/images` embeds files with paths
|
||||
// including `assets/images` as their prefix.
|
||||
func (g *Group) StaticFS(pathPrefix string, filesystem fs.FS) {
|
||||
g.Add(
|
||||
http.MethodGet,
|
||||
pathPrefix+"*",
|
||||
StaticDirectoryHandler(filesystem, false),
|
||||
)
|
||||
}
|
||||
|
||||
// FileFS implements `Echo#FileFS()` for sub-routes within the Group.
|
||||
func (g *Group) FileFS(path, file string, filesystem fs.FS, m ...MiddlewareFunc) *Route {
|
||||
return g.GET(path, StaticFileHandler(file, filesystem), m...)
|
||||
}
|
||||
18
vendor/github.com/labstack/echo/v4/middleware/csrf.go
generated
vendored
18
vendor/github.com/labstack/echo/v4/middleware/csrf.go
generated
vendored
@ -61,7 +61,13 @@ type (
|
||||
// Indicates SameSite mode of the CSRF cookie.
|
||||
// Optional. Default value SameSiteDefaultMode.
|
||||
CookieSameSite http.SameSite `yaml:"cookie_same_site"`
|
||||
|
||||
// ErrorHandler defines a function which is executed for returning custom errors.
|
||||
ErrorHandler CSRFErrorHandler
|
||||
}
|
||||
|
||||
// CSRFErrorHandler is a function which is executed for creating custom errors.
|
||||
CSRFErrorHandler func(err error, c echo.Context) error
|
||||
)
|
||||
|
||||
// ErrCSRFInvalid is returned when CSRF check fails
|
||||
@ -154,8 +160,9 @@ func CSRFWithConfig(config CSRFConfig) echo.MiddlewareFunc {
|
||||
lastTokenErr = ErrCSRFInvalid
|
||||
}
|
||||
}
|
||||
var finalErr error
|
||||
if lastTokenErr != nil {
|
||||
return lastTokenErr
|
||||
finalErr = lastTokenErr
|
||||
} else if lastExtractorErr != nil {
|
||||
// ugly part to preserve backwards compatible errors. someone could rely on them
|
||||
if lastExtractorErr == errQueryExtractorValueMissing {
|
||||
@ -167,7 +174,14 @@ func CSRFWithConfig(config CSRFConfig) echo.MiddlewareFunc {
|
||||
} else {
|
||||
lastExtractorErr = echo.NewHTTPError(http.StatusBadRequest, lastExtractorErr.Error())
|
||||
}
|
||||
return lastExtractorErr
|
||||
finalErr = lastExtractorErr
|
||||
}
|
||||
|
||||
if finalErr != nil {
|
||||
if config.ErrorHandler != nil {
|
||||
return config.ErrorHandler(finalErr, c)
|
||||
}
|
||||
return finalErr
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
1
vendor/github.com/libdns/libdns/.gitignore
generated
vendored
Normal file
1
vendor/github.com/libdns/libdns/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
_gitignore/
|
||||
21
vendor/github.com/libdns/libdns/LICENSE
generated
vendored
Normal file
21
vendor/github.com/libdns/libdns/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2020 Matthew Holt
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
87
vendor/github.com/libdns/libdns/README.md
generated
vendored
Normal file
87
vendor/github.com/libdns/libdns/README.md
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
libdns - Universal DNS provider APIs for Go
|
||||
===========================================
|
||||
|
||||
<a href="https://pkg.go.dev/github.com/libdns/libdns"><img src="https://img.shields.io/badge/godoc-reference-blue.svg"></a>
|
||||
|
||||
**⚠️ Work-in-progress. Exported APIs are subject to change.**
|
||||
|
||||
`libdns` is a collection of free-range DNS provider client implementations written in Go! With libdns packages, your Go program can manage DNS records across any supported providers. A "provider" is a service or program that manages a DNS zone.
|
||||
|
||||
This repository defines the core interfaces that provider packages should implement. They are small and idiomatic Go interfaces with well-defined semantics.
|
||||
|
||||
The interfaces include:
|
||||
|
||||
- [`RecordGetter`](https://pkg.go.dev/github.com/libdns/libdns#RecordGetter) to list records.
|
||||
- [`RecordAppender`](https://pkg.go.dev/github.com/libdns/libdns#RecordAppender) to append new records.
|
||||
- [`RecordSetter`](https://pkg.go.dev/github.com/libdns/libdns#RecordSetter) to set (create or change existing) records.
|
||||
- [`RecordDeleter`](https://pkg.go.dev/github.com/libdns/libdns#RecordDeleter) to delete records.
|
||||
|
||||
[See full godoc for detailed documentation.](https://pkg.go.dev/github.com/libdns/libdns)
|
||||
|
||||
|
||||
## Example
|
||||
|
||||
To work with DNS records managed by Cloudflare, for example, we can use [libdns/cloudflare](https://pkg.go.dev/github.com/libdns/cloudflare):
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/libdns/cloudflare"
|
||||
"github.com/libdns/libdns"
|
||||
)
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
zone := "example.com."
|
||||
|
||||
// configure the DNS provider (choose any from github.com/libdns)
|
||||
provider := cloudflare.Provider{APIToken: "topsecret"}
|
||||
|
||||
// list records
|
||||
recs, err := provider.GetRecords(ctx, zone)
|
||||
|
||||
// create records (AppendRecords is similar)
|
||||
newRecs, err := provider.SetRecords(ctx, zone, []libdns.Record{
|
||||
Type: "A",
|
||||
Name: "sub",
|
||||
Value: "1.2.3.4",
|
||||
})
|
||||
|
||||
// delete records (this example uses provider-assigned ID)
|
||||
deletedRecs, err := provider.DeleteRecords(ctx, zone, []libdns.Record{
|
||||
ID: "foobar",
|
||||
})
|
||||
|
||||
// no matter which provider you use, the code stays the same!
|
||||
// (some providers have caveats; see their package documentation)
|
||||
```
|
||||
|
||||
|
||||
## Implementing new providers
|
||||
|
||||
Providers are 100% written and maintained by the community! We all maintain just the packages for providers we use.
|
||||
|
||||
**[Instructions for adding new providers](https://github.com/libdns/libdns/wiki/Implementing-providers)** are on this repo's wiki. Please feel free to contribute.
|
||||
|
||||
|
||||
## Similar projects
|
||||
|
||||
**[OctoDNS](https://github.com/github/octodns)** is a suite of tools written in Python for managing DNS. However, its approach is a bit heavy-handed when all you need are small, incremental changes to a zone:
|
||||
|
||||
> WARNING: OctoDNS assumes ownership of any domain you point it to. When you tell it to act it will do whatever is necessary to try and match up states including deleting any unexpected records. Be careful when playing around with OctoDNS.
|
||||
|
||||
This is incredibly useful when you are maintaining your own zone file, but risky when you just need incremental changes.
|
||||
|
||||
**[StackExchange/dnscontrol](https://github.com/StackExchange/dnscontrol)** is written in Go, but is similar to OctoDNS in that it tends to obliterate your entire zone and replace it with your input. Again, this is very useful if you are maintaining your own master list of records, but doesn't do well for simply adding or removing records.
|
||||
|
||||
**[go-acme/lego](https://github.com/go-acme/lego)** has support for a huge number of DNS providers (75+!), but their APIs are only capable of setting and deleting TXT records for ACME challenges.
|
||||
|
||||
**`libdns`** takes inspiration from the above projects but aims for a more generally-useful set of APIs that homogenize pretty well across providers. In contrast to the above projects, libdns can add, set, delete, and get arbitrary records from a zone without obliterating it (although syncing up an entire zone is also possible!). Its APIs also include context so long-running calls can be cancelled early, for example to accommodate on-line config changes downstream. libdns interfaces are also smaller and more composable. Additionally, libdns can grow to support a nearly infinite number of DNS providers without added bloat, because each provider implementation is a separate Go module, which keeps your builds lean and fast.
|
||||
|
||||
In summary, the goal is that libdns providers can do what the above libraries/tools can do, but with more flexibility: they can create and delete TXT records for ACME challenges, they can replace entire zones, but they can also do incremental changes or simply read records.
|
||||
|
||||
|
||||
## Record abstraction
|
||||
|
||||
How records are represented across providers varies widely, and each kind of record has different fields and semantics. In time, our goal is for the `libdns.Record` type to be able to represent most of them as concisely and simply as possible, with the interface methods able to deliver on most of the possible zone operations.
|
||||
|
||||
Realistically, libdns should enable most common record manipulations, but may not be able to fit absolutely 100% of all possibilities with DNS in a provider-agnostic way. That is probably OK; and given the wide varieties in DNS record types and provider APIs, it would be unreasonable to expect otherwise. We are not aiming for 100% fulfillment of 100% of users' requirements; more like 100% fulfillment of ~90% of users' requirements.
|
||||
129
vendor/github.com/libdns/libdns/libdns.go
generated
vendored
Normal file
129
vendor/github.com/libdns/libdns/libdns.go
generated
vendored
Normal file
@ -0,0 +1,129 @@
|
||||
// Package libdns defines core interfaces that should be implemented by DNS
|
||||
// provider clients. They are small and idiomatic Go interfaces with
|
||||
// well-defined semantics.
|
||||
//
|
||||
// Records are described independently of any particular zone, a convention
|
||||
// that grants Record structs portability across zones. As such, record names
|
||||
// are partially qualified, i.e. relative to the zone. For example, an A
|
||||
// record called "sub" in zone "example.com." represents a fully-qualified
|
||||
// domain name (FQDN) of "sub.example.com.". Implementations should expect
|
||||
// that input records conform to this standard, while also ensuring that
|
||||
// output records do; adjustments to record names may need to be made before
|
||||
// or after provider API calls, for example, to maintain consistency with
|
||||
// all other libdns provider implementations. Helper functions are available
|
||||
// in this package to convert between relative and absolute names.
|
||||
//
|
||||
// Although zone names are a required input, libdns does not coerce any
|
||||
// particular representation of DNS zones; only records. Since zone name and
|
||||
// records are separate inputs in libdns interfaces, it is up to the caller
|
||||
// to pair a zone's name with its records in a way that works for them.
|
||||
//
|
||||
// All interface implementations must be safe for concurrent/parallel use.
|
||||
// For example, if AppendRecords() is called at the same time and two API
|
||||
// requests are made to the provider at the same time, the result of both
|
||||
// requests must be visible after they both complete; if the provider does
|
||||
// not synchronize the writing of the zone file and one request overwrites
|
||||
// the other, then the client implementation must take care to synchronize
|
||||
// on behalf of the incompetent provider. This synchronization need not be
|
||||
// global; for example: the scope of synchronization might only need to be
|
||||
// within the same zone, allowing multiple requests at once as long as all
|
||||
// of them are for different zones. (Exact logic depends on the provider.)
|
||||
package libdns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// RecordGetter can get records from a DNS zone.
|
||||
type RecordGetter interface {
|
||||
// GetRecords returns all the records in the DNS zone.
|
||||
//
|
||||
// Implementations must honor context cancellation and be safe for
|
||||
// concurrent use.
|
||||
GetRecords(ctx context.Context, zone string) ([]Record, error)
|
||||
}
|
||||
|
||||
// RecordAppender can non-destructively add new records to a DNS zone.
|
||||
type RecordAppender interface {
|
||||
// AppendRecords creates the requested records in the given zone
|
||||
// and returns the populated records that were created. It never
|
||||
// changes existing records.
|
||||
//
|
||||
// Implementations must honor context cancellation and be safe for
|
||||
// concurrent use.
|
||||
AppendRecords(ctx context.Context, zone string, recs []Record) ([]Record, error)
|
||||
}
|
||||
|
||||
// RecordSetter can set new or update existing records in a DNS zone.
|
||||
type RecordSetter interface {
|
||||
// SetRecords updates the zone so that the records described in the
|
||||
// input are reflected in the output. It may create or overwrite
|
||||
// records or -- depending on the record type -- delete records to
|
||||
// maintain parity with the input. No other records are affected.
|
||||
// It returns the records which were set.
|
||||
//
|
||||
// Records that have an ID associating it with a particular resource
|
||||
// on the provider will be directly replaced. If no ID is given, this
|
||||
// method may use what information is given to do lookups and will
|
||||
// ensure that only necessary changes are made to the zone.
|
||||
//
|
||||
// Implementations must honor context cancellation and be safe for
|
||||
// concurrent use.
|
||||
SetRecords(ctx context.Context, zone string, recs []Record) ([]Record, error)
|
||||
}
|
||||
|
||||
// RecordDeleter can delete records from a DNS zone.
|
||||
type RecordDeleter interface {
|
||||
// DeleteRecords deletes the given records from the zone if they exist.
|
||||
// It returns the records that were deleted.
|
||||
//
|
||||
// Records that have an ID to associate it with a particular resource on
|
||||
// the provider will be directly deleted. If no ID is given, this method
|
||||
// may use what information is given to do lookups and delete only
|
||||
// matching records.
|
||||
//
|
||||
// Implementations must honor context cancellation and be safe for
|
||||
// concurrent use.
|
||||
DeleteRecords(ctx context.Context, zone string, recs []Record) ([]Record, error)
|
||||
}
|
||||
|
||||
// Record is a generalized representation of a DNS record.
|
||||
type Record struct {
|
||||
// provider-specific metadata
|
||||
ID string
|
||||
|
||||
// general record fields
|
||||
Type string
|
||||
Name string // partially-qualified (relative to zone)
|
||||
Value string
|
||||
TTL time.Duration
|
||||
|
||||
// type-dependent record fields
|
||||
Priority int // used by MX, SRV, and URI records
|
||||
}
|
||||
|
||||
// RelativeName makes fqdn relative to zone. For example, for a FQDN of
|
||||
// "sub.example.com" and a zone of "example.com", it outputs "sub".
|
||||
//
|
||||
// If fqdn cannot be expressed relative to zone, the input fqdn is returned.
|
||||
func RelativeName(fqdn, zone string) string {
|
||||
return strings.TrimSuffix(strings.TrimSuffix(fqdn, zone), ".")
|
||||
}
|
||||
|
||||
// AbsoluteName makes name into a fully-qualified domain name (FQDN) by
|
||||
// prepending it to zone and tidying up the dots. For example, an input
|
||||
// of name "sub" and zone "example.com." will return "sub.example.com.".
|
||||
func AbsoluteName(name, zone string) string {
|
||||
if zone == "" {
|
||||
return strings.Trim(name, ".")
|
||||
}
|
||||
if name == "" || name == "@" {
|
||||
return zone
|
||||
}
|
||||
if !strings.HasSuffix(name, ".") {
|
||||
name += "."
|
||||
}
|
||||
return name + zone
|
||||
}
|
||||
1
vendor/github.com/mholt/acmez/.gitignore
generated
vendored
Normal file
1
vendor/github.com/mholt/acmez/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
_gitignore/
|
||||
201
vendor/github.com/mholt/acmez/LICENSE
generated
vendored
Normal file
201
vendor/github.com/mholt/acmez/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
78
vendor/github.com/mholt/acmez/README.md
generated
vendored
Normal file
78
vendor/github.com/mholt/acmez/README.md
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
acmez - ACME client library for Go
|
||||
==================================
|
||||
|
||||
[](https://pkg.go.dev/github.com/mholt/acmez)
|
||||
|
||||
ACMEz ("ack-measy" or "acme-zee", whichever you prefer) is a fully-compliant [RFC 8555](https://tools.ietf.org/html/rfc8555) (ACME) implementation in pure Go. It is lightweight, has an elegant Go API, and its retry logic is highly robust against external errors. ACMEz is suitable for large-scale enterprise deployments.
|
||||
|
||||
**NOTE:** This module is for _getting_ certificates, not _managing_ certificates. Most users probably want certificate _management_ (keeping certificates renewed) rather than to interface directly with ACME. Developers who want to use certificates in their long-running Go programs should use [CertMagic](https://github.com/caddyserver/certmagic) instead; or, if their program is not written in Go, [Caddy](https://caddyserver.com/) can be used to manage certificates (even without running an HTTP or TLS server).
|
||||
|
||||
This module has two primary packages:
|
||||
|
||||
- **`acmez`** is a high-level wrapper for getting certificates. It implements the ACME order flow described in RFC 8555 including challenge solving using pluggable solvers.
|
||||
- **`acme`** is a low-level RFC 8555 implementation that provides the fundamental ACME operations, mainly useful if you have advanced or niche requirements.
|
||||
|
||||
In other words, the `acmez` package is **porcelain** while the `acme` package is **plumbing** (to use git's terminology).
|
||||
|
||||
|
||||
## Features
|
||||
|
||||
- Simple, elegant Go API
|
||||
- Thoroughly documented with spec citations
|
||||
- Robust to external errors
|
||||
- Structured error values ("problems" as defined in RFC 7807)
|
||||
- Smart retries (resilient against network and server hiccups)
|
||||
- Challenge plasticity (randomized challenges, and will retry others if one fails)
|
||||
- Context cancellation (suitable for high-frequency config changes or reloads)
|
||||
- Highly flexible and customizable
|
||||
- External Account Binding (EAB) support
|
||||
- Tested with multiple ACME CAs (more than just Let's Encrypt)
|
||||
- Supports niche aspects of RFC 8555 (such as alt cert chains and account key rollover)
|
||||
- Efficient solving of large SAN lists (e.g. for slow DNS record propagation)
|
||||
- Utility functions for solving challenges
|
||||
- Helpers for RFC 8737 (tls-alpn-01 challenge)
|
||||
|
||||
|
||||
## Examples
|
||||
|
||||
See the [`examples` folder](https://github.com/mholt/acmez/tree/master/examples) for tutorials on how to use either package. **Most users should follow the [porcelain guide](https://github.com/mholt/acmez/blob/master/examples/porcelain/main.go).**
|
||||
|
||||
|
||||
## Challenge solvers
|
||||
|
||||
The `acmez` package is "bring-your-own-solver." It provides helper utilities for http-01, dns-01, and tls-alpn-01 challenges, but does not actually solve them for you. You must write or use an implementation of [`acmez.Solver`](https://pkg.go.dev/github.com/mholt/acmez#Solver) in order to get certificates. How this is done depends on your environment/situation.
|
||||
|
||||
However, you can find [a general-purpose dns-01 solver in CertMagic](https://pkg.go.dev/github.com/caddyserver/certmagic#DNS01Solver), which uses [libdns](https://github.com/libdns) packages to integrate with numerous DNS providers. You can use it like this:
|
||||
|
||||
```go
|
||||
// minimal example using Cloudflare
|
||||
solver := &certmagic.DNS01Solver{
|
||||
DNSProvider: &cloudflare.Provider{APIToken: "topsecret"},
|
||||
}
|
||||
client := acmez.Client{
|
||||
ChallengeSolvers: map[string]acmez.Solver{
|
||||
acme.ChallengeTypeDNS01: solver,
|
||||
},
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
If you're implementing a tls-alpn-01 solver, the `acmez` package can help. It has the constant [`ACMETLS1Protocol`](https://pkg.go.dev/github.com/mholt/acmez#pkg-constants) which you can use to identify challenge handshakes by inspecting the ClientHello's ALPN extension. Simply complete the handshake using a certificate from the [`acmez.TLSALPN01ChallengeCert()`](https://pkg.go.dev/github.com/mholt/acmez#TLSALPN01ChallengeCert) function to solve the challenge.
|
||||
|
||||
|
||||
|
||||
## History
|
||||
|
||||
In 2014, the ISRG was finishing the development of its automated CA infrastructure: the first of its kind to become publicly-trusted, under the name Let's Encrypt, which used a young protocol called ACME to automate domain validation and certificate issuance.
|
||||
|
||||
Meanwhile, a project called [Caddy](https://caddyserver.com) was being developed which would be the first and only web server to use HTTPS _automatically and by default_. To make that possible, another project called lego was commissioned by the Caddy project to become of the first-ever ACME client libraries, and the first client written in Go. It was made by Sebastian Erhart (xenolf), and on day 1 of Let's Encrypt's public beta, Caddy used lego to obtain its first certificate automatically at startup, making Caddy and lego the first-ever integrated ACME client.
|
||||
|
||||
Since then, Caddy has seen use in production longer than any other ACME client integration, and is well-known for being one of the most robust and reliable HTTPS implementations available today.
|
||||
|
||||
A few years later, Caddy's novel auto-HTTPS logic was extracted into a library called [CertMagic](https://github.com/caddyserver/certmagic) to be usable by any Go program. Caddy would continue to use CertMagic, which implemented the certificate _automation and management_ logic on top of the low-level certificate _obtain_ logic that lego provided.
|
||||
|
||||
Soon thereafter, the lego project shifted maintainership and the goals and vision of the project diverged from those of Caddy's use case of managing tens of thousands of certificates per instance. Eventually, [the original Caddy author announced work on a new ACME client library in Go](https://github.com/caddyserver/certmagic/issues/71) that exceeded Caddy's harsh requirements for large-scale enterprise deployments, lean builds, and simple API. This work finally came to fruition in 2020 as ACMEz.
|
||||
|
||||
---
|
||||
|
||||
(c) 2020 Matthew Holt
|
||||
37
vendor/github.com/mholt/acmez/THIRD-PARTY
generated
vendored
Normal file
37
vendor/github.com/mholt/acmez/THIRD-PARTY
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
This document contains Third Party Software Notices and/or Additional
|
||||
Terms and Conditions for licensed third party software components
|
||||
included within this product.
|
||||
|
||||
==
|
||||
|
||||
https://github.com/golang/crypto/blob/master/acme/jws.go
|
||||
https://github.com/golang/crypto/blob/master/acme/jws_test.go
|
||||
(with modifications)
|
||||
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
249
vendor/github.com/mholt/acmez/acme/account.go
generated
vendored
Normal file
249
vendor/github.com/mholt/acmez/acme/account.go
generated
vendored
Normal file
@ -0,0 +1,249 @@
|
||||
// Copyright 2020 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Account represents a set of metadata associated with an account
|
||||
// as defined by the ACME spec §7.1.2:
|
||||
// https://tools.ietf.org/html/rfc8555#section-7.1.2
|
||||
type Account struct {
|
||||
// status (required, string): The status of this account. Possible
|
||||
// values are "valid", "deactivated", and "revoked". The value
|
||||
// "deactivated" should be used to indicate client-initiated
|
||||
// deactivation whereas "revoked" should be used to indicate server-
|
||||
// initiated deactivation. See Section 7.1.6.
|
||||
Status string `json:"status"`
|
||||
|
||||
// contact (optional, array of string): An array of URLs that the
|
||||
// server can use to contact the client for issues related to this
|
||||
// account. For example, the server may wish to notify the client
|
||||
// about server-initiated revocation or certificate expiration. For
|
||||
// information on supported URL schemes, see Section 7.3.
|
||||
Contact []string `json:"contact,omitempty"`
|
||||
|
||||
// termsOfServiceAgreed (optional, boolean): Including this field in a
|
||||
// newAccount request, with a value of true, indicates the client's
|
||||
// agreement with the terms of service. This field cannot be updated
|
||||
// by the client.
|
||||
TermsOfServiceAgreed bool `json:"termsOfServiceAgreed,omitempty"`
|
||||
|
||||
// externalAccountBinding (optional, object): Including this field in a
|
||||
// newAccount request indicates approval by the holder of an existing
|
||||
// non-ACME account to bind that account to this ACME account. This
|
||||
// field is not updateable by the client (see Section 7.3.4).
|
||||
//
|
||||
// Use SetExternalAccountBinding() to set this field's value properly.
|
||||
ExternalAccountBinding json.RawMessage `json:"externalAccountBinding,omitempty"`
|
||||
|
||||
// orders (required, string): A URL from which a list of orders
|
||||
// submitted by this account can be fetched via a POST-as-GET
|
||||
// request, as described in Section 7.1.2.1.
|
||||
Orders string `json:"orders"`
|
||||
|
||||
// In response to new-account, "the server returns this account
|
||||
// object in a 201 (Created) response, with the account URL
|
||||
// in a Location header field." §7.3
|
||||
//
|
||||
// We transfer the value from the header to this field for
|
||||
// storage and recall purposes.
|
||||
Location string `json:"location,omitempty"`
|
||||
|
||||
// The private key to the account. Because it is secret, it is
|
||||
// not serialized as JSON and must be stored separately (usually
|
||||
// a PEM-encoded file).
|
||||
PrivateKey crypto.Signer `json:"-"`
|
||||
}
|
||||
|
||||
// SetExternalAccountBinding sets the ExternalAccountBinding field of the account.
|
||||
// It only sets the field value; it does not register the account with the CA. (The
|
||||
// client parameter is necessary because the EAB encoding depends on the directory.)
|
||||
func (a *Account) SetExternalAccountBinding(ctx context.Context, client *Client, eab EAB) error {
|
||||
if err := client.provision(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
macKey, err := base64.RawURLEncoding.DecodeString(eab.MACKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("base64-decoding MAC key: %w", err)
|
||||
}
|
||||
|
||||
eabJWS, err := jwsEncodeEAB(a.PrivateKey.Public(), macKey, keyID(eab.KeyID), client.dir.NewAccount)
|
||||
if err != nil {
|
||||
return fmt.Errorf("signing EAB content: %w", err)
|
||||
}
|
||||
|
||||
a.ExternalAccountBinding = eabJWS
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewAccount creates a new account on the ACME server.
|
||||
//
|
||||
// "A client creates a new account with the server by sending a POST
|
||||
// request to the server's newAccount URL." §7.3
|
||||
func (c *Client) NewAccount(ctx context.Context, account Account) (Account, error) {
|
||||
if err := c.provision(ctx); err != nil {
|
||||
return account, err
|
||||
}
|
||||
return c.postAccount(ctx, c.dir.NewAccount, accountObject{Account: account})
|
||||
}
|
||||
|
||||
// GetAccount looks up an account on the ACME server.
|
||||
//
|
||||
// "If a client wishes to find the URL for an existing account and does
|
||||
// not want an account to be created if one does not already exist, then
|
||||
// it SHOULD do so by sending a POST request to the newAccount URL with
|
||||
// a JWS whose payload has an 'onlyReturnExisting' field set to 'true'."
|
||||
// §7.3.1
|
||||
func (c *Client) GetAccount(ctx context.Context, account Account) (Account, error) {
|
||||
if err := c.provision(ctx); err != nil {
|
||||
return account, err
|
||||
}
|
||||
return c.postAccount(ctx, c.dir.NewAccount, accountObject{
|
||||
Account: account,
|
||||
OnlyReturnExisting: true,
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateAccount updates account information on the ACME server.
|
||||
//
|
||||
// "If the client wishes to update this information in the future, it
|
||||
// sends a POST request with updated information to the account URL.
|
||||
// The server MUST ignore any updates to the 'orders' field,
|
||||
// 'termsOfServiceAgreed' field (see Section 7.3.3), the 'status' field
|
||||
// (except as allowed by Section 7.3.6), or any other fields it does not
|
||||
// recognize." §7.3.2
|
||||
//
|
||||
// This method uses the account.Location value as the account URL.
|
||||
func (c *Client) UpdateAccount(ctx context.Context, account Account) (Account, error) {
|
||||
return c.postAccount(ctx, account.Location, accountObject{Account: account})
|
||||
}
|
||||
|
||||
type keyChangeRequest struct {
|
||||
Account string `json:"account"`
|
||||
OldKey json.RawMessage `json:"oldKey"`
|
||||
}
|
||||
|
||||
// AccountKeyRollover changes an account's associated key.
|
||||
//
|
||||
// "To change the key associated with an account, the client sends a
|
||||
// request to the server containing signatures by both the old and new
|
||||
// keys." §7.3.5
|
||||
func (c *Client) AccountKeyRollover(ctx context.Context, account Account, newPrivateKey crypto.Signer) (Account, error) {
|
||||
if err := c.provision(ctx); err != nil {
|
||||
return account, err
|
||||
}
|
||||
|
||||
oldPublicKeyJWK, err := jwkEncode(account.PrivateKey.Public())
|
||||
if err != nil {
|
||||
return account, fmt.Errorf("encoding old private key: %v", err)
|
||||
}
|
||||
|
||||
keyChangeReq := keyChangeRequest{
|
||||
Account: account.Location,
|
||||
OldKey: []byte(oldPublicKeyJWK),
|
||||
}
|
||||
|
||||
innerJWS, err := jwsEncodeJSON(keyChangeReq, newPrivateKey, "", "", c.dir.KeyChange)
|
||||
if err != nil {
|
||||
return account, fmt.Errorf("encoding inner JWS: %v", err)
|
||||
}
|
||||
|
||||
_, err = c.httpPostJWS(ctx, account.PrivateKey, account.Location, c.dir.KeyChange, json.RawMessage(innerJWS), nil)
|
||||
if err != nil {
|
||||
return account, fmt.Errorf("rolling key on server: %w", err)
|
||||
}
|
||||
|
||||
account.PrivateKey = newPrivateKey
|
||||
|
||||
return account, nil
|
||||
|
||||
}
|
||||
|
||||
func (c *Client) postAccount(ctx context.Context, endpoint string, account accountObject) (Account, error) {
|
||||
// Normally, the account URL is the key ID ("kid")... except when the user
|
||||
// is trying to get the correct account URL. In that case, we must ignore
|
||||
// any existing URL we may have and not set the kid field on the request.
|
||||
// Arguably, this is a user error (spec says "If client wishes to find the
|
||||
// URL for an existing account", so why would the URL already be filled
|
||||
// out?) but it's easy enough to infer their intent and make it work.
|
||||
kid := account.Location
|
||||
if account.OnlyReturnExisting {
|
||||
kid = ""
|
||||
}
|
||||
|
||||
resp, err := c.httpPostJWS(ctx, account.PrivateKey, kid, endpoint, account, &account.Account)
|
||||
if err != nil {
|
||||
return account.Account, err
|
||||
}
|
||||
|
||||
account.Location = resp.Header.Get("Location")
|
||||
|
||||
return account.Account, nil
|
||||
}
|
||||
|
||||
type accountObject struct {
|
||||
Account
|
||||
|
||||
// If true, newAccount will be read-only, and Account.Location
|
||||
// (which holds the account URL) must be empty.
|
||||
OnlyReturnExisting bool `json:"onlyReturnExisting,omitempty"`
|
||||
}
|
||||
|
||||
// EAB (External Account Binding) contains information
|
||||
// necessary to bind or map an ACME account to some
|
||||
// other account known by the CA.
|
||||
//
|
||||
// External account bindings are "used to associate an
|
||||
// ACME account with an existing account in a non-ACME
|
||||
// system, such as a CA customer database."
|
||||
//
|
||||
// "To enable ACME account binding, the CA operating the
|
||||
// ACME server needs to provide the ACME client with a
|
||||
// MAC key and a key identifier, using some mechanism
|
||||
// outside of ACME." §7.3.4
|
||||
type EAB struct {
|
||||
// "The key identifier MUST be an ASCII string." §7.3.4
|
||||
KeyID string `json:"key_id"`
|
||||
|
||||
// "The MAC key SHOULD be provided in base64url-encoded
|
||||
// form, to maximize compatibility between non-ACME
|
||||
// provisioning systems and ACME clients." §7.3.4
|
||||
MACKey string `json:"mac_key"`
|
||||
}
|
||||
|
||||
// Possible status values. From several spec sections:
|
||||
// - Account §7.1.2 (valid, deactivated, revoked)
|
||||
// - Order §7.1.3 (pending, ready, processing, valid, invalid)
|
||||
// - Authorization §7.1.4 (pending, valid, invalid, deactivated, expired, revoked)
|
||||
// - Challenge §7.1.5 (pending, processing, valid, invalid)
|
||||
// - Status changes §7.1.6
|
||||
const (
|
||||
StatusPending = "pending"
|
||||
StatusProcessing = "processing"
|
||||
StatusValid = "valid"
|
||||
StatusInvalid = "invalid"
|
||||
StatusDeactivated = "deactivated"
|
||||
StatusExpired = "expired"
|
||||
StatusRevoked = "revoked"
|
||||
StatusReady = "ready"
|
||||
)
|
||||
283
vendor/github.com/mholt/acmez/acme/authorization.go
generated
vendored
Normal file
283
vendor/github.com/mholt/acmez/acme/authorization.go
generated
vendored
Normal file
@ -0,0 +1,283 @@
|
||||
// Copyright 2020 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Authorization "represents a server's authorization for
|
||||
// an account to represent an identifier. In addition to the
|
||||
// identifier, an authorization includes several metadata fields, such
|
||||
// as the status of the authorization (e.g., 'pending', 'valid', or
|
||||
// 'revoked') and which challenges were used to validate possession of
|
||||
// the identifier." §7.1.4
|
||||
type Authorization struct {
|
||||
// identifier (required, object): The identifier that the account is
|
||||
// authorized to represent.
|
||||
Identifier Identifier `json:"identifier"`
|
||||
|
||||
// status (required, string): The status of this authorization.
|
||||
// Possible values are "pending", "valid", "invalid", "deactivated",
|
||||
// "expired", and "revoked". See Section 7.1.6.
|
||||
Status string `json:"status"`
|
||||
|
||||
// expires (optional, string): The timestamp after which the server
|
||||
// will consider this authorization invalid, encoded in the format
|
||||
// specified in [RFC3339]. This field is REQUIRED for objects with
|
||||
// "valid" in the "status" field.
|
||||
Expires time.Time `json:"expires,omitempty"`
|
||||
|
||||
// challenges (required, array of objects): For pending authorizations,
|
||||
// the challenges that the client can fulfill in order to prove
|
||||
// possession of the identifier. For valid authorizations, the
|
||||
// challenge that was validated. For invalid authorizations, the
|
||||
// challenge that was attempted and failed. Each array entry is an
|
||||
// object with parameters required to validate the challenge. A
|
||||
// client should attempt to fulfill one of these challenges, and a
|
||||
// server should consider any one of the challenges sufficient to
|
||||
// make the authorization valid.
|
||||
Challenges []Challenge `json:"challenges"`
|
||||
|
||||
// wildcard (optional, boolean): This field MUST be present and true
|
||||
// for authorizations created as a result of a newOrder request
|
||||
// containing a DNS identifier with a value that was a wildcard
|
||||
// domain name. For other authorizations, it MUST be absent.
|
||||
// Wildcard domain names are described in Section 7.1.3.
|
||||
Wildcard bool `json:"wildcard,omitempty"`
|
||||
|
||||
// "The server allocates a new URL for this authorization and returns a
|
||||
// 201 (Created) response with the authorization URL in the Location
|
||||
// header field" §7.4.1
|
||||
//
|
||||
// We transfer the value from the header to this field for storage and
|
||||
// recall purposes.
|
||||
Location string `json:"-"`
|
||||
}
|
||||
|
||||
// IdentifierValue returns the Identifier.Value field, adjusted
|
||||
// according to the Wildcard field.
|
||||
func (authz Authorization) IdentifierValue() string {
|
||||
if authz.Wildcard {
|
||||
return "*." + authz.Identifier.Value
|
||||
}
|
||||
return authz.Identifier.Value
|
||||
}
|
||||
|
||||
// fillChallengeFields populates extra fields in the challenge structs so that
|
||||
// challenges can be solved without needing a bunch of unnecessary extra state.
|
||||
func (authz *Authorization) fillChallengeFields(account Account) error {
|
||||
accountThumbprint, err := jwkThumbprint(account.PrivateKey.Public())
|
||||
if err != nil {
|
||||
return fmt.Errorf("computing account JWK thumbprint: %v", err)
|
||||
}
|
||||
for i := 0; i < len(authz.Challenges); i++ {
|
||||
authz.Challenges[i].Identifier = authz.Identifier
|
||||
if authz.Challenges[i].KeyAuthorization == "" {
|
||||
authz.Challenges[i].KeyAuthorization = authz.Challenges[i].Token + "." + accountThumbprint
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewAuthorization creates a new authorization for an identifier using
|
||||
// the newAuthz endpoint of the directory, if available. This function
|
||||
// creates authzs out of the regular order flow.
|
||||
//
|
||||
// "Note that because the identifier in a pre-authorization request is
|
||||
// the exact identifier to be included in the authorization object, pre-
|
||||
// authorization cannot be used to authorize issuance of certificates
|
||||
// containing wildcard domain names." §7.4.1
|
||||
func (c *Client) NewAuthorization(ctx context.Context, account Account, id Identifier) (Authorization, error) {
|
||||
if err := c.provision(ctx); err != nil {
|
||||
return Authorization{}, err
|
||||
}
|
||||
if c.dir.NewAuthz == "" {
|
||||
return Authorization{}, fmt.Errorf("server does not support newAuthz endpoint")
|
||||
}
|
||||
|
||||
var authz Authorization
|
||||
resp, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, c.dir.NewAuthz, id, &authz)
|
||||
if err != nil {
|
||||
return authz, err
|
||||
}
|
||||
|
||||
authz.Location = resp.Header.Get("Location")
|
||||
|
||||
err = authz.fillChallengeFields(account)
|
||||
if err != nil {
|
||||
return authz, err
|
||||
}
|
||||
|
||||
return authz, nil
|
||||
}
|
||||
|
||||
// GetAuthorization fetches an authorization object from the server.
|
||||
//
|
||||
// "Authorization resources are created by the server in response to
|
||||
// newOrder or newAuthz requests submitted by an account key holder;
|
||||
// their URLs are provided to the client in the responses to these
|
||||
// requests."
|
||||
//
|
||||
// "When a client receives an order from the server in reply to a
|
||||
// newOrder request, it downloads the authorization resources by sending
|
||||
// POST-as-GET requests to the indicated URLs. If the client initiates
|
||||
// authorization using a request to the newAuthz resource, it will have
|
||||
// already received the pending authorization object in the response to
|
||||
// that request." §7.5
|
||||
func (c *Client) GetAuthorization(ctx context.Context, account Account, authzURL string) (Authorization, error) {
|
||||
if err := c.provision(ctx); err != nil {
|
||||
return Authorization{}, err
|
||||
}
|
||||
|
||||
var authz Authorization
|
||||
_, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, authzURL, nil, &authz)
|
||||
if err != nil {
|
||||
return authz, err
|
||||
}
|
||||
|
||||
authz.Location = authzURL
|
||||
|
||||
err = authz.fillChallengeFields(account)
|
||||
if err != nil {
|
||||
return authz, err
|
||||
}
|
||||
|
||||
return authz, nil
|
||||
}
|
||||
|
||||
// PollAuthorization polls the authorization resource endpoint until the authorization is
|
||||
// considered "finalized" which means that it either succeeded, failed, or was abandoned.
|
||||
// It blocks until that happens or until the configured timeout.
|
||||
//
|
||||
// "Usually, the validation process will take some time, so the client
|
||||
// will need to poll the authorization resource to see when it is
|
||||
// finalized."
|
||||
//
|
||||
// "For challenges where the client can tell when the server
|
||||
// has validated the challenge (e.g., by seeing an HTTP or DNS request
|
||||
// from the server), the client SHOULD NOT begin polling until it has
|
||||
// seen the validation request from the server." §7.5.1
|
||||
func (c *Client) PollAuthorization(ctx context.Context, account Account, authz Authorization) (Authorization, error) {
|
||||
start, interval, maxDuration := time.Now(), c.pollInterval(), c.pollTimeout()
|
||||
|
||||
if authz.Status != "" {
|
||||
if finalized, err := authzIsFinalized(authz); finalized {
|
||||
return authz, err
|
||||
}
|
||||
}
|
||||
|
||||
for time.Since(start) < maxDuration {
|
||||
select {
|
||||
case <-time.After(interval):
|
||||
case <-ctx.Done():
|
||||
return authz, ctx.Err()
|
||||
}
|
||||
|
||||
// get the latest authz object
|
||||
resp, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, authz.Location, nil, &authz)
|
||||
if err != nil {
|
||||
return authz, fmt.Errorf("checking authorization status: %w", err)
|
||||
}
|
||||
if finalized, err := authzIsFinalized(authz); finalized {
|
||||
return authz, err
|
||||
}
|
||||
|
||||
// "The server MUST provide information about its retry state to the
|
||||
// client via the 'error' field in the challenge and the Retry-After
|
||||
// HTTP header field in response to requests to the challenge resource."
|
||||
// §8.2
|
||||
interval, err = retryAfter(resp, interval)
|
||||
if err != nil {
|
||||
return authz, err
|
||||
}
|
||||
}
|
||||
|
||||
return authz, fmt.Errorf("authorization took too long")
|
||||
}
|
||||
|
||||
// DeactivateAuthorization deactivates an authorization on the server, which is
|
||||
// a good idea if the authorization is not going to be utilized by the client.
|
||||
//
|
||||
// "If a client wishes to relinquish its authorization to issue
|
||||
// certificates for an identifier, then it may request that the server
|
||||
// deactivate each authorization associated with it by sending POST
|
||||
// requests with the static object {"status": "deactivated"} to each
|
||||
// authorization URL." §7.5.2
|
||||
func (c *Client) DeactivateAuthorization(ctx context.Context, account Account, authzURL string) (Authorization, error) {
|
||||
if err := c.provision(ctx); err != nil {
|
||||
return Authorization{}, err
|
||||
}
|
||||
|
||||
if authzURL == "" {
|
||||
return Authorization{}, fmt.Errorf("empty authz url")
|
||||
}
|
||||
|
||||
deactivate := struct {
|
||||
Status string `json:"status"`
|
||||
}{Status: "deactivated"}
|
||||
|
||||
var authz Authorization
|
||||
_, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, authzURL, deactivate, &authz)
|
||||
authz.Location = authzURL
|
||||
|
||||
return authz, err
|
||||
}
|
||||
|
||||
// authzIsFinalized returns true if the authorization is finished,
|
||||
// whether successfully or not. If not, an error will be returned.
|
||||
// Post-valid statuses that make an authz unusable are treated as
|
||||
// errors.
|
||||
func authzIsFinalized(authz Authorization) (bool, error) {
|
||||
switch authz.Status {
|
||||
case StatusPending:
|
||||
// "Authorization objects are created in the 'pending' state." §7.1.6
|
||||
return false, nil
|
||||
|
||||
case StatusValid:
|
||||
// "If one of the challenges listed in the authorization transitions
|
||||
// to the 'valid' state, then the authorization also changes to the
|
||||
// 'valid' state." §7.1.6
|
||||
return true, nil
|
||||
|
||||
case StatusInvalid:
|
||||
// "If the client attempts to fulfill a challenge and fails, or if
|
||||
// there is an error while the authorization is still pending, then
|
||||
// the authorization transitions to the 'invalid' state." §7.1.6
|
||||
var firstProblem Problem
|
||||
for _, chal := range authz.Challenges {
|
||||
if chal.Error != nil {
|
||||
firstProblem = *chal.Error
|
||||
break
|
||||
}
|
||||
}
|
||||
firstProblem.Resource = authz
|
||||
return true, fmt.Errorf("authorization failed: %w", firstProblem)
|
||||
|
||||
case StatusExpired, StatusDeactivated, StatusRevoked:
|
||||
// Once the authorization is in the 'valid' state, it can expire
|
||||
// ('expired'), be deactivated by the client ('deactivated', see
|
||||
// Section 7.5.2), or revoked by the server ('revoked')." §7.1.6
|
||||
return true, fmt.Errorf("authorization %s", authz.Status)
|
||||
|
||||
case "":
|
||||
return false, fmt.Errorf("status unknown")
|
||||
|
||||
default:
|
||||
return true, fmt.Errorf("server set unrecognized authorization status: %s", authz.Status)
|
||||
}
|
||||
}
|
||||
165
vendor/github.com/mholt/acmez/acme/certificate.go
generated
vendored
Normal file
165
vendor/github.com/mholt/acmez/acme/certificate.go
generated
vendored
Normal file
@ -0,0 +1,165 @@
|
||||
// Copyright 2020 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Certificate represents a certificate chain, which we usually refer
|
||||
// to as "a certificate" because in practice an end-entity certificate
|
||||
// is seldom useful/practical without a chain.
|
||||
type Certificate struct {
|
||||
// The certificate resource URL as provisioned by
|
||||
// the ACME server. Some ACME servers may split
|
||||
// the chain into multiple URLs that are Linked
|
||||
// together, in which case this URL represents the
|
||||
// starting point.
|
||||
URL string `json:"url"`
|
||||
|
||||
// The PEM-encoded certificate chain, end-entity first.
|
||||
ChainPEM []byte `json:"-"`
|
||||
}
|
||||
|
||||
// GetCertificateChain downloads all available certificate chains originating from
|
||||
// the given certURL. This is to be done after an order is finalized.
|
||||
//
|
||||
// "To download the issued certificate, the client simply sends a POST-
|
||||
// as-GET request to the certificate URL."
|
||||
//
|
||||
// "The server MAY provide one or more link relation header fields
|
||||
// [RFC8288] with relation 'alternate'. Each such field SHOULD express
|
||||
// an alternative certificate chain starting with the same end-entity
|
||||
// certificate. This can be used to express paths to various trust
|
||||
// anchors. Clients can fetch these alternates and use their own
|
||||
// heuristics to decide which is optimal." §7.4.2
|
||||
func (c *Client) GetCertificateChain(ctx context.Context, account Account, certURL string) ([]Certificate, error) {
|
||||
if err := c.provision(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var chains []Certificate
|
||||
|
||||
addChain := func(certURL string) (*http.Response, error) {
|
||||
// can't pool this buffer; bytes escape scope
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
// TODO: set the Accept header? ("application/pem-certificate-chain") See end of §7.4.2
|
||||
resp, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, certURL, nil, buf)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
contentType := parseMediaType(resp)
|
||||
|
||||
switch contentType {
|
||||
case "application/pem-certificate-chain":
|
||||
chains = append(chains, Certificate{
|
||||
URL: certURL,
|
||||
ChainPEM: buf.Bytes(),
|
||||
})
|
||||
default:
|
||||
return resp, fmt.Errorf("unrecognized Content-Type from server: %s", contentType)
|
||||
}
|
||||
|
||||
// "For formats that can only express a single certificate, the server SHOULD
|
||||
// provide one or more "Link: rel="up"" header fields pointing to an
|
||||
// issuer or issuers so that ACME clients can build a certificate chain
|
||||
// as defined in TLS (see Section 4.4.2 of [RFC8446])." (end of §7.4.2)
|
||||
allUp := extractLinks(resp, "up")
|
||||
for _, upURL := range allUp {
|
||||
upCerts, err := c.GetCertificateChain(ctx, account, upURL)
|
||||
if err != nil {
|
||||
return resp, fmt.Errorf("retrieving next certificate in chain: %s: %w", upURL, err)
|
||||
}
|
||||
for _, upCert := range upCerts {
|
||||
chains[len(chains)-1].ChainPEM = append(chains[len(chains)-1].ChainPEM, upCert.ChainPEM...)
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// always add preferred/first certificate chain
|
||||
resp, err := addChain(certURL)
|
||||
if err != nil {
|
||||
return chains, err
|
||||
}
|
||||
|
||||
// "The server MAY provide one or more link relation header fields
|
||||
// [RFC8288] with relation 'alternate'. Each such field SHOULD express
|
||||
// an alternative certificate chain starting with the same end-entity
|
||||
// certificate. This can be used to express paths to various trust
|
||||
// anchors. Clients can fetch these alternates and use their own
|
||||
// heuristics to decide which is optimal." §7.4.2
|
||||
alternates := extractLinks(resp, "alternate")
|
||||
for _, altURL := range alternates {
|
||||
_, err = addChain(altURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retrieving alternate certificate chain at %s: %w", altURL, err)
|
||||
}
|
||||
}
|
||||
|
||||
return chains, nil
|
||||
}
|
||||
|
||||
// RevokeCertificate revokes the given certificate. If the certificate key is not
|
||||
// provided, then the account key is used instead. See §7.6.
|
||||
func (c *Client) RevokeCertificate(ctx context.Context, account Account, cert *x509.Certificate, certKey crypto.Signer, reason int) error {
|
||||
if err := c.provision(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
body := struct {
|
||||
Certificate string `json:"certificate"`
|
||||
Reason int `json:"reason"`
|
||||
}{
|
||||
Certificate: base64.RawURLEncoding.EncodeToString(cert.Raw),
|
||||
Reason: reason,
|
||||
}
|
||||
|
||||
// "Revocation requests are different from other ACME requests in that
|
||||
// they can be signed with either an account key pair or the key pair in
|
||||
// the certificate." §7.6
|
||||
kid := ""
|
||||
if certKey == account.PrivateKey {
|
||||
kid = account.Location
|
||||
}
|
||||
|
||||
_, err := c.httpPostJWS(ctx, certKey, kid, c.dir.RevokeCert, body, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// Reasons for revoking a certificate, as defined
|
||||
// by RFC 5280 §5.3.1.
|
||||
// https://tools.ietf.org/html/rfc5280#section-5.3.1
|
||||
const (
|
||||
ReasonUnspecified = iota // 0
|
||||
ReasonKeyCompromise // 1
|
||||
ReasonCACompromise // 2
|
||||
ReasonAffiliationChanged // 3
|
||||
ReasonSuperseded // 4
|
||||
ReasonCessationOfOperation // 5
|
||||
ReasonCertificateHold // 6
|
||||
_ // 7 (unused)
|
||||
ReasonRemoveFromCRL // 8
|
||||
ReasonPrivilegeWithdrawn // 9
|
||||
ReasonAACompromise // 10
|
||||
)
|
||||
133
vendor/github.com/mholt/acmez/acme/challenge.go
generated
vendored
Normal file
133
vendor/github.com/mholt/acmez/acme/challenge.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
// Copyright 2020 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
)
|
||||
|
||||
// Challenge holds information about an ACME challenge.
|
||||
//
|
||||
// "An ACME challenge object represents a server's offer to validate a
|
||||
// client's possession of an identifier in a specific way. Unlike the
|
||||
// other objects listed above, there is not a single standard structure
|
||||
// for a challenge object. The contents of a challenge object depend on
|
||||
// the validation method being used. The general structure of challenge
|
||||
// objects and an initial set of validation methods are described in
|
||||
// Section 8." §7.1.5
|
||||
type Challenge struct {
|
||||
// "Challenge objects all contain the following basic fields..." §8
|
||||
|
||||
// type (required, string): The type of challenge encoded in the
|
||||
// object.
|
||||
Type string `json:"type"`
|
||||
|
||||
// url (required, string): The URL to which a response can be posted.
|
||||
URL string `json:"url"`
|
||||
|
||||
// status (required, string): The status of this challenge. Possible
|
||||
// values are "pending", "processing", "valid", and "invalid" (see
|
||||
// Section 7.1.6).
|
||||
Status string `json:"status"`
|
||||
|
||||
// validated (optional, string): The time at which the server validated
|
||||
// this challenge, encoded in the format specified in [RFC3339].
|
||||
// This field is REQUIRED if the "status" field is "valid".
|
||||
Validated string `json:"validated,omitempty"`
|
||||
|
||||
// error (optional, object): Error that occurred while the server was
|
||||
// validating the challenge, if any, structured as a problem document
|
||||
// [RFC7807]. Multiple errors can be indicated by using subproblems
|
||||
// Section 6.7.1. A challenge object with an error MUST have status
|
||||
// equal to "invalid".
|
||||
Error *Problem `json:"error,omitempty"`
|
||||
|
||||
// "All additional fields are specified by the challenge type." §8
|
||||
// (We also add our own for convenience.)
|
||||
|
||||
// "The token for a challenge is a string comprised entirely of
|
||||
// characters in the URL-safe base64 alphabet." §8.1
|
||||
//
|
||||
// Used by the http-01, tls-alpn-01, and dns-01 challenges.
|
||||
Token string `json:"token,omitempty"`
|
||||
|
||||
// A key authorization is a string that concatenates the token for the
|
||||
// challenge with a key fingerprint, separated by a "." character (§8.1):
|
||||
//
|
||||
// keyAuthorization = token || '.' || base64url(Thumbprint(accountKey))
|
||||
//
|
||||
// This client package automatically assembles and sets this value for you.
|
||||
KeyAuthorization string `json:"keyAuthorization,omitempty"`
|
||||
|
||||
// We attach the identifier that this challenge is associated with, which
|
||||
// may be useful information for solving a challenge. It is not part of the
|
||||
// structure as defined by the spec but is added by us to provide enough
|
||||
// information to solve the DNS-01 challenge.
|
||||
Identifier Identifier `json:"identifier,omitempty"`
|
||||
}
|
||||
|
||||
// HTTP01ResourcePath returns the URI path for solving the http-01 challenge.
|
||||
//
|
||||
// "The path at which the resource is provisioned is comprised of the
|
||||
// fixed prefix '/.well-known/acme-challenge/', followed by the 'token'
|
||||
// value in the challenge." §8.3
|
||||
func (c Challenge) HTTP01ResourcePath() string {
|
||||
return "/.well-known/acme-challenge/" + c.Token
|
||||
}
|
||||
|
||||
// DNS01TXTRecordName returns the name of the TXT record to create for
|
||||
// solving the dns-01 challenge.
|
||||
//
|
||||
// "The client constructs the validation domain name by prepending the
|
||||
// label '_acme-challenge' to the domain name being validated, then
|
||||
// provisions a TXT record with the digest value under that name." §8.4
|
||||
func (c Challenge) DNS01TXTRecordName() string {
|
||||
return "_acme-challenge." + c.Identifier.Value
|
||||
}
|
||||
|
||||
// DNS01KeyAuthorization encodes a key authorization value to be used
|
||||
// in a TXT record for the _acme-challenge DNS record.
|
||||
//
|
||||
// "A client fulfills this challenge by constructing a key authorization
|
||||
// from the 'token' value provided in the challenge and the client's
|
||||
// account key. The client then computes the SHA-256 digest [FIPS180-4]
|
||||
// of the key authorization.
|
||||
//
|
||||
// The record provisioned to the DNS contains the base64url encoding of
|
||||
// this digest." §8.4
|
||||
func (c Challenge) DNS01KeyAuthorization() string {
|
||||
h := sha256.Sum256([]byte(c.KeyAuthorization))
|
||||
return base64.RawURLEncoding.EncodeToString(h[:])
|
||||
}
|
||||
|
||||
// InitiateChallenge "indicates to the server that it is ready for the challenge
|
||||
// validation by sending an empty JSON body ('{}') carried in a POST request to
|
||||
// the challenge URL (not the authorization URL)." §7.5.1
|
||||
func (c *Client) InitiateChallenge(ctx context.Context, account Account, challenge Challenge) (Challenge, error) {
|
||||
if err := c.provision(ctx); err != nil {
|
||||
return Challenge{}, err
|
||||
}
|
||||
_, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, challenge.URL, struct{}{}, &challenge)
|
||||
return challenge, err
|
||||
}
|
||||
|
||||
// The standard or well-known ACME challenge types.
|
||||
const (
|
||||
ChallengeTypeHTTP01 = "http-01" // RFC 8555 §8.3
|
||||
ChallengeTypeDNS01 = "dns-01" // RFC 8555 §8.4
|
||||
ChallengeTypeTLSALPN01 = "tls-alpn-01" // RFC 8737 §3
|
||||
)
|
||||
240
vendor/github.com/mholt/acmez/acme/client.go
generated
vendored
Normal file
240
vendor/github.com/mholt/acmez/acme/client.go
generated
vendored
Normal file
@ -0,0 +1,240 @@
|
||||
// Copyright 2020 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package acme fully implements the ACME protocol specification as
|
||||
// described in RFC 8555: https://tools.ietf.org/html/rfc8555.
|
||||
//
|
||||
// It is designed to work smoothly in large-scale deployments with
|
||||
// high resilience to errors and intermittent network or server issues,
|
||||
// with retries built-in at every layer of the HTTP request stack.
|
||||
//
|
||||
// NOTE: This is a low-level API. Most users will want the mholt/acmez
|
||||
// package which is more concerned with configuring challenges and
|
||||
// implementing the order flow. However, using this package directly
|
||||
// is recommended for advanced use cases having niche requirements.
|
||||
// See the examples in the examples/plumbing folder for a tutorial.
|
||||
package acme
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Client facilitates ACME client operations as defined by the spec.
|
||||
//
|
||||
// Because the client is synchronized for concurrent use, it should
|
||||
// not be copied.
|
||||
//
|
||||
// Many errors that are returned by a Client are likely to be of type
|
||||
// Problem as long as the ACME server returns a structured error
|
||||
// response. This package wraps errors that may be of type Problem,
|
||||
// so you can access the details using the conventional Go pattern:
|
||||
//
|
||||
// var problem Problem
|
||||
// if errors.As(err, &problem) {
|
||||
// log.Printf("Houston, we have a problem: %+v", problem)
|
||||
// }
|
||||
//
|
||||
// All Problem errors originate from the ACME server.
|
||||
type Client struct {
|
||||
// The ACME server's directory endpoint.
|
||||
Directory string
|
||||
|
||||
// Custom HTTP client.
|
||||
HTTPClient *http.Client
|
||||
|
||||
// Augmentation of the User-Agent header. Please set
|
||||
// this so that CAs can troubleshoot bugs more easily.
|
||||
UserAgent string
|
||||
|
||||
// Delay between poll attempts. Only used if server
|
||||
// does not supply a Retry-Afer header. Default: 250ms
|
||||
PollInterval time.Duration
|
||||
|
||||
// Maximum duration for polling. Default: 5m
|
||||
PollTimeout time.Duration
|
||||
|
||||
// An optional logger. Default: no logs
|
||||
Logger *zap.Logger
|
||||
|
||||
mu sync.Mutex // protects all unexported fields
|
||||
dir Directory
|
||||
nonces *stack
|
||||
}
|
||||
|
||||
// GetDirectory retrieves the directory configured at c.Directory. It is
|
||||
// NOT necessary to call this to provision the client. It is only useful
|
||||
// if you want to access a copy of the directory yourself.
|
||||
func (c *Client) GetDirectory(ctx context.Context) (Directory, error) {
|
||||
if err := c.provision(ctx); err != nil {
|
||||
return Directory{}, err
|
||||
}
|
||||
return c.dir, nil
|
||||
}
|
||||
|
||||
func (c *Client) provision(ctx context.Context) error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.nonces == nil {
|
||||
c.nonces = new(stack)
|
||||
}
|
||||
|
||||
err := c.provisionDirectory(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("provisioning client: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) provisionDirectory(ctx context.Context) error {
|
||||
// don't get directory again if we already have it;
|
||||
// checking any one of the required fields will do
|
||||
if c.dir.NewNonce != "" {
|
||||
return nil
|
||||
}
|
||||
if c.Directory == "" {
|
||||
return fmt.Errorf("missing directory URL")
|
||||
}
|
||||
// prefer cached version if it's recent enough
|
||||
directoriesMu.Lock()
|
||||
defer directoriesMu.Unlock()
|
||||
if dir, ok := directories[c.Directory]; ok {
|
||||
if time.Since(dir.retrieved) < 12*time.Hour {
|
||||
c.dir = dir.Directory
|
||||
return nil
|
||||
}
|
||||
}
|
||||
_, err := c.httpReq(ctx, http.MethodGet, c.Directory, nil, &c.dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
directories[c.Directory] = cachedDirectory{c.dir, time.Now()}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) nonce(ctx context.Context) (string, error) {
|
||||
nonce := c.nonces.pop()
|
||||
if nonce != "" {
|
||||
return nonce, nil
|
||||
}
|
||||
|
||||
if c.dir.NewNonce == "" {
|
||||
return "", fmt.Errorf("directory missing newNonce endpoint")
|
||||
}
|
||||
|
||||
resp, err := c.httpReq(ctx, http.MethodHead, c.dir.NewNonce, nil, nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("fetching new nonce from server: %w", err)
|
||||
}
|
||||
|
||||
return resp.Header.Get(replayNonce), nil
|
||||
}
|
||||
|
||||
func (c *Client) pollInterval() time.Duration {
|
||||
if c.PollInterval == 0 {
|
||||
return defaultPollInterval
|
||||
}
|
||||
return c.PollInterval
|
||||
}
|
||||
|
||||
func (c *Client) pollTimeout() time.Duration {
|
||||
if c.PollTimeout == 0 {
|
||||
return defaultPollTimeout
|
||||
}
|
||||
return c.PollTimeout
|
||||
}
|
||||
|
||||
// Directory acts as an index for the ACME server as
|
||||
// specified in the spec: "In order to help clients
|
||||
// configure themselves with the right URLs for each
|
||||
// ACME operation, ACME servers provide a directory
|
||||
// object." §7.1.1
|
||||
type Directory struct {
|
||||
NewNonce string `json:"newNonce"`
|
||||
NewAccount string `json:"newAccount"`
|
||||
NewOrder string `json:"newOrder"`
|
||||
NewAuthz string `json:"newAuthz,omitempty"`
|
||||
RevokeCert string `json:"revokeCert"`
|
||||
KeyChange string `json:"keyChange"`
|
||||
Meta *DirectoryMeta `json:"meta,omitempty"`
|
||||
}
|
||||
|
||||
// DirectoryMeta is optional extra data that may be
|
||||
// included in an ACME server directory. §7.1.1
|
||||
type DirectoryMeta struct {
|
||||
TermsOfService string `json:"termsOfService,omitempty"`
|
||||
Website string `json:"website,omitempty"`
|
||||
CAAIdentities []string `json:"caaIdentities,omitempty"`
|
||||
ExternalAccountRequired bool `json:"externalAccountRequired,omitempty"`
|
||||
}
|
||||
|
||||
// stack is a simple thread-safe stack.
|
||||
type stack struct {
|
||||
stack []string
|
||||
stackMu sync.Mutex
|
||||
}
|
||||
|
||||
func (s *stack) push(v string) {
|
||||
if v == "" {
|
||||
return
|
||||
}
|
||||
s.stackMu.Lock()
|
||||
defer s.stackMu.Unlock()
|
||||
if len(s.stack) >= 64 {
|
||||
return
|
||||
}
|
||||
s.stack = append(s.stack, v)
|
||||
}
|
||||
|
||||
func (s *stack) pop() string {
|
||||
s.stackMu.Lock()
|
||||
defer s.stackMu.Unlock()
|
||||
n := len(s.stack)
|
||||
if n == 0 {
|
||||
return ""
|
||||
}
|
||||
v := s.stack[n-1]
|
||||
s.stack = s.stack[:n-1]
|
||||
return v
|
||||
}
|
||||
|
||||
// Directories seldom (if ever) change in practice, and
|
||||
// client structs are often ephemeral, so we can cache
|
||||
// directories to speed things up a bit for the user.
|
||||
// Keyed by directory URL.
|
||||
var (
|
||||
directories = make(map[string]cachedDirectory)
|
||||
directoriesMu sync.Mutex
|
||||
)
|
||||
|
||||
type cachedDirectory struct {
|
||||
Directory
|
||||
retrieved time.Time
|
||||
}
|
||||
|
||||
// replayNonce is the header field that contains a new
|
||||
// anti-replay nonce from the server.
|
||||
const replayNonce = "Replay-Nonce"
|
||||
|
||||
const (
|
||||
defaultPollInterval = 250 * time.Millisecond
|
||||
defaultPollTimeout = 5 * time.Minute
|
||||
)
|
||||
393
vendor/github.com/mholt/acmez/acme/http.go
generated
vendored
Normal file
393
vendor/github.com/mholt/acmez/acme/http.go
generated
vendored
Normal file
@ -0,0 +1,393 @@
|
||||
// Copyright 2020 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// httpPostJWS performs robust HTTP requests by JWS-encoding the JSON of input.
|
||||
// If output is specified, the response body is written into it: if the response
|
||||
// Content-Type is JSON, it will be JSON-decoded into output (which must be a
|
||||
// pointer); otherwise, if output is an io.Writer, the response body will be
|
||||
// written to it uninterpreted. In all cases, the returned response value's
|
||||
// body will have been drained and closed, so there is no need to close it again.
|
||||
// It automatically retries in the case of network, I/O, or badNonce errors.
|
||||
func (c *Client) httpPostJWS(ctx context.Context, privateKey crypto.Signer,
|
||||
kid, endpoint string, input, output interface{}) (*http.Response, error) {
|
||||
|
||||
if err := c.provision(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var err error
|
||||
|
||||
// we can retry on internal server errors just in case it was a hiccup,
|
||||
// but we probably don't need to retry so many times in that case
|
||||
internalServerErrors, maxInternalServerErrors := 0, 3
|
||||
|
||||
// set a hard cap on the number of retries for any other reason
|
||||
const maxAttempts = 10
|
||||
var attempts int
|
||||
for attempts = 1; attempts <= maxAttempts; attempts++ {
|
||||
if attempts > 1 {
|
||||
select {
|
||||
case <-time.After(250 * time.Millisecond):
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
var nonce string // avoid shadowing err
|
||||
nonce, err = c.nonce(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var encodedPayload []byte // avoid shadowing err
|
||||
encodedPayload, err = jwsEncodeJSON(input, privateKey, keyID(kid), nonce, endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("encoding payload: %v", err)
|
||||
}
|
||||
|
||||
resp, err = c.httpReq(ctx, http.MethodPost, endpoint, encodedPayload, output)
|
||||
if err == nil {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// "When a server rejects a request because its nonce value was
|
||||
// unacceptable (or not present), it MUST provide HTTP status code 400
|
||||
// (Bad Request), and indicate the ACME error type
|
||||
// 'urn:ietf:params:acme:error:badNonce'. An error response with the
|
||||
// 'badNonce' error type MUST include a Replay-Nonce header field with a
|
||||
// fresh nonce that the server will accept in a retry of the original
|
||||
// query (and possibly in other requests, according to the server's
|
||||
// nonce scoping policy). On receiving such a response, a client SHOULD
|
||||
// retry the request using the new nonce." §6.5
|
||||
var problem Problem
|
||||
if errors.As(err, &problem) {
|
||||
if problem.Type == ProblemTypeBadNonce {
|
||||
if c.Logger != nil {
|
||||
c.Logger.Debug("server rejected our nonce; retrying",
|
||||
zap.String("detail", problem.Detail),
|
||||
zap.Error(err))
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// internal server errors *could* just be a hiccup and it may be worth
|
||||
// trying again, but not nearly so many times as for other reasons
|
||||
if resp != nil && resp.StatusCode >= 500 {
|
||||
internalServerErrors++
|
||||
if internalServerErrors < maxInternalServerErrors {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// for any other error, there's not much we can do automatically
|
||||
break
|
||||
}
|
||||
|
||||
return resp, fmt.Errorf("attempt %d: %s: %w", attempts, endpoint, err)
|
||||
}
|
||||
|
||||
// httpReq robustly performs an HTTP request using the given method to the given endpoint, honoring
|
||||
// the given context's cancellation. The joseJSONPayload is optional; if not nil, it is expected to
|
||||
// be a JOSE+JSON encoding. The output is also optional; if not nil, the response body will be read
|
||||
// into output. If the response Content-Type is JSON, it will be JSON-decoded into output, which
|
||||
// must be a pointer type. If the response is any other Content-Type and if output is a io.Writer,
|
||||
// it will be written (without interpretation or decoding) to output. In all cases, the returned
|
||||
// response value will have the body drained and closed, so there is no need to close it again.
|
||||
//
|
||||
// If there are any network or I/O errors, the request will be retried as safely and resiliently as
|
||||
// possible.
|
||||
func (c *Client) httpReq(ctx context.Context, method, endpoint string, joseJSONPayload []byte, output interface{}) (*http.Response, error) {
|
||||
// even if the caller doesn't specify an output, we still use a
|
||||
// buffer to store possible error response (we reset it later)
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
defer bufPool.Put(buf)
|
||||
|
||||
var resp *http.Response
|
||||
var err error
|
||||
|
||||
// potentially retry the request if there's network, I/O, or server internal errors
|
||||
const maxAttempts = 3
|
||||
for attempt := 0; attempt < maxAttempts; attempt++ {
|
||||
if attempt > 0 {
|
||||
// traffic calming ahead
|
||||
select {
|
||||
case <-time.After(250 * time.Millisecond):
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
var body io.Reader
|
||||
if joseJSONPayload != nil {
|
||||
body = bytes.NewReader(joseJSONPayload)
|
||||
}
|
||||
|
||||
var req *http.Request
|
||||
req, err = http.NewRequestWithContext(ctx, method, endpoint, body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating request: %w", err)
|
||||
}
|
||||
if len(joseJSONPayload) > 0 {
|
||||
req.Header.Set("Content-Type", "application/jose+json")
|
||||
}
|
||||
|
||||
// on first attempt, we need to reset buf since it
|
||||
// came from the pool; after first attempt, we should
|
||||
// still reset it because we might be retrying after
|
||||
// a partial download
|
||||
buf.Reset()
|
||||
|
||||
var retry bool
|
||||
resp, retry, err = c.doHTTPRequest(req, buf)
|
||||
if err != nil {
|
||||
if retry {
|
||||
if c.Logger != nil {
|
||||
c.Logger.Warn("HTTP request failed; retrying",
|
||||
zap.String("url", req.URL.String()),
|
||||
zap.Error(err))
|
||||
}
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// check for HTTP errors
|
||||
switch {
|
||||
case resp.StatusCode >= 200 && resp.StatusCode < 300: // OK
|
||||
case resp.StatusCode >= 400 && resp.StatusCode < 600: // error
|
||||
if parseMediaType(resp) == "application/problem+json" {
|
||||
// "When the server responds with an error status, it SHOULD provide
|
||||
// additional information using a problem document [RFC7807]." (§6.7)
|
||||
var problem Problem
|
||||
err = json.Unmarshal(buf.Bytes(), &problem)
|
||||
if err != nil {
|
||||
return resp, fmt.Errorf("HTTP %d: JSON-decoding problem details: %w (raw='%s')",
|
||||
resp.StatusCode, err, buf.String())
|
||||
}
|
||||
if resp.StatusCode >= 500 && joseJSONPayload == nil {
|
||||
// a 5xx status is probably safe to retry on even after a
|
||||
// request that had no I/O errors; it could be that the
|
||||
// server just had a hiccup... so try again, but only if
|
||||
// there is no request body, because we can't replay a
|
||||
// request that has an anti-replay nonce, obviously
|
||||
err = problem
|
||||
continue
|
||||
}
|
||||
return resp, problem
|
||||
}
|
||||
return resp, fmt.Errorf("HTTP %d: %s", resp.StatusCode, buf.String())
|
||||
default: // what even is this
|
||||
return resp, fmt.Errorf("unexpected status code: HTTP %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// do not retry if we got this far (success)
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// if expecting a body, finally decode it
|
||||
if output != nil {
|
||||
contentType := parseMediaType(resp)
|
||||
switch contentType {
|
||||
case "application/json":
|
||||
// unmarshal JSON
|
||||
err = json.Unmarshal(buf.Bytes(), output)
|
||||
if err != nil {
|
||||
return resp, fmt.Errorf("JSON-decoding response body: %w", err)
|
||||
}
|
||||
|
||||
default:
|
||||
// don't interpret anything else here; just hope
|
||||
// it's a Writer and copy the bytes
|
||||
w, ok := output.(io.Writer)
|
||||
if !ok {
|
||||
return resp, fmt.Errorf("response Content-Type is %s but target container is not io.Writer: %T", contentType, output)
|
||||
}
|
||||
_, err = io.Copy(w, buf)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// doHTTPRequest performs an HTTP request at most one time. It returns the response
|
||||
// (with drained and closed body), having drained any request body into buf. If
|
||||
// retry == true is returned, then the request should be safe to retry in the case
|
||||
// of an error. However, in some cases a retry may be recommended even if part of
|
||||
// the response body has been read and written into buf. Thus, the buffer may have
|
||||
// been partially written to and should be reset before being reused.
|
||||
//
|
||||
// This method remembers any nonce returned by the server.
|
||||
func (c *Client) doHTTPRequest(req *http.Request, buf *bytes.Buffer) (resp *http.Response, retry bool, err error) {
|
||||
req.Header.Set("User-Agent", c.userAgent())
|
||||
|
||||
resp, err = c.httpClient().Do(req)
|
||||
if err != nil {
|
||||
return resp, true, fmt.Errorf("performing request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if c.Logger != nil {
|
||||
c.Logger.Debug("http request",
|
||||
zap.String("method", req.Method),
|
||||
zap.String("url", req.URL.String()),
|
||||
zap.Reflect("headers", req.Header),
|
||||
zap.Reflect("response_headers", resp.Header),
|
||||
zap.Int("status_code", resp.StatusCode))
|
||||
}
|
||||
|
||||
// "The server MUST include a Replay-Nonce header field
|
||||
// in every successful response to a POST request and
|
||||
// SHOULD provide it in error responses as well." §6.5
|
||||
//
|
||||
// "Before sending a POST request to the server, an ACME
|
||||
// client needs to have a fresh anti-replay nonce to put
|
||||
// in the 'nonce' header of the JWS. In most cases, the
|
||||
// client will have gotten a nonce from a previous
|
||||
// request." §7.2
|
||||
//
|
||||
// So basically, we need to remember the nonces we get
|
||||
// and use them at the next opportunity.
|
||||
c.nonces.push(resp.Header.Get(replayNonce))
|
||||
|
||||
// drain the response body, even if we aren't keeping it
|
||||
// (this allows us to reuse the connection and also read
|
||||
// any error information)
|
||||
_, err = io.Copy(buf, resp.Body)
|
||||
if err != nil {
|
||||
// this is likely a network or I/O error, but is it worth retrying?
|
||||
// technically the request has already completed, it was just our
|
||||
// download of the response that failed; so we probably should not
|
||||
// retry if the request succeeded... however, if there was an HTTP
|
||||
// error, it likely didn't count against any server-enforced rate
|
||||
// limits, and we DO want to know the error information, so it should
|
||||
// be safe to retry the request in those cases AS LONG AS there is
|
||||
// no request body, which in the context of ACME likely includes an
|
||||
// anti-replay nonce, which obviously we can't reuse
|
||||
retry = resp.StatusCode >= 400 && req.Body == nil
|
||||
return resp, retry, fmt.Errorf("reading response body: %w", err)
|
||||
}
|
||||
|
||||
return resp, false, nil
|
||||
}
|
||||
|
||||
func (c *Client) httpClient() *http.Client {
|
||||
if c.HTTPClient == nil {
|
||||
return http.DefaultClient
|
||||
}
|
||||
return c.HTTPClient
|
||||
}
|
||||
|
||||
func (c *Client) userAgent() string {
|
||||
ua := fmt.Sprintf("acmez (%s; %s)", runtime.GOOS, runtime.GOARCH)
|
||||
if c.UserAgent != "" {
|
||||
ua = c.UserAgent + " " + ua
|
||||
}
|
||||
return ua
|
||||
}
|
||||
|
||||
// extractLinks extracts the URL from the Link header with the
|
||||
// designated relation rel. It may return more than value
|
||||
// if there are multiple matching Link values.
|
||||
//
|
||||
// Originally by Isaac: https://github.com/eggsampler/acme
|
||||
// and has been modified to support multiple matching Links.
|
||||
func extractLinks(resp *http.Response, rel string) []string {
|
||||
if resp == nil {
|
||||
return nil
|
||||
}
|
||||
var links []string
|
||||
for _, l := range resp.Header["Link"] {
|
||||
matches := linkRegex.FindAllStringSubmatch(l, -1)
|
||||
for _, m := range matches {
|
||||
if len(m) != 3 {
|
||||
continue
|
||||
}
|
||||
if m[2] == rel {
|
||||
links = append(links, m[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
return links
|
||||
}
|
||||
|
||||
// parseMediaType returns only the media type from the
|
||||
// Content-Type header of resp.
|
||||
func parseMediaType(resp *http.Response) string {
|
||||
if resp == nil {
|
||||
return ""
|
||||
}
|
||||
ct := resp.Header.Get("Content-Type")
|
||||
sep := strings.Index(ct, ";")
|
||||
if sep < 0 {
|
||||
return ct
|
||||
}
|
||||
return strings.TrimSpace(ct[:sep])
|
||||
}
|
||||
|
||||
// retryAfter returns a duration from the response's Retry-After
|
||||
// header field, if it exists. It can return an error if the
|
||||
// header contains an invalid value. If there is no error but
|
||||
// there is no Retry-After header provided, then the fallback
|
||||
// duration is returned instead.
|
||||
func retryAfter(resp *http.Response, fallback time.Duration) (time.Duration, error) {
|
||||
if resp == nil {
|
||||
return fallback, nil
|
||||
}
|
||||
raSeconds := resp.Header.Get("Retry-After")
|
||||
if raSeconds == "" {
|
||||
return fallback, nil
|
||||
}
|
||||
ra, err := strconv.Atoi(raSeconds)
|
||||
if err != nil || ra < 0 {
|
||||
return 0, fmt.Errorf("response had invalid Retry-After header: %s", raSeconds)
|
||||
}
|
||||
return time.Duration(ra) * time.Second, nil
|
||||
}
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
}
|
||||
|
||||
var linkRegex = regexp.MustCompile(`<(.+?)>;\s*rel="(.+?)"`)
|
||||
263
vendor/github.com/mholt/acmez/acme/jws.go
generated
vendored
Normal file
263
vendor/github.com/mholt/acmez/acme/jws.go
generated
vendored
Normal file
@ -0,0 +1,263 @@
|
||||
// Copyright 2020 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// --- ORIGINAL LICENSE ---
|
||||
//
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the THIRD-PARTY file.
|
||||
//
|
||||
// (This file has been modified from its original contents.)
|
||||
// (And it has dragons. Don't wake the dragons.)
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
_ "crypto/sha512" // need for EC keys
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
var errUnsupportedKey = fmt.Errorf("unknown key type; only RSA and ECDSA are supported")
|
||||
|
||||
// keyID is the account identity provided by a CA during registration.
|
||||
type keyID string
|
||||
|
||||
// noKeyID indicates that jwsEncodeJSON should compute and use JWK instead of a KID.
|
||||
// See jwsEncodeJSON for details.
|
||||
const noKeyID = keyID("")
|
||||
|
||||
// // noPayload indicates jwsEncodeJSON will encode zero-length octet string
|
||||
// // in a JWS request. This is called POST-as-GET in RFC 8555 and is used to make
|
||||
// // authenticated GET requests via POSTing with an empty payload.
|
||||
// // See https://tools.ietf.org/html/rfc8555#section-6.3 for more details.
|
||||
// const noPayload = ""
|
||||
|
||||
// jwsEncodeEAB creates a JWS payload for External Account Binding according to RFC 8555 §7.3.4.
|
||||
func jwsEncodeEAB(accountKey crypto.PublicKey, hmacKey []byte, kid keyID, url string) ([]byte, error) {
|
||||
// §7.3.4: "The 'alg' field MUST indicate a MAC-based algorithm"
|
||||
alg, sha := "HS256", crypto.SHA256
|
||||
|
||||
// §7.3.4: "The 'nonce' field MUST NOT be present"
|
||||
phead, err := jwsHead(alg, "", url, kid, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
encodedKey, err := jwkEncode(accountKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload := base64.RawURLEncoding.EncodeToString([]byte(encodedKey))
|
||||
|
||||
payloadToSign := []byte(phead + "." + payload)
|
||||
|
||||
h := hmac.New(sha256.New, hmacKey)
|
||||
h.Write(payloadToSign)
|
||||
sig := h.Sum(nil)
|
||||
|
||||
return jwsFinal(sha, sig, phead, payload)
|
||||
}
|
||||
|
||||
// jwsEncodeJSON signs claimset using provided key and a nonce.
|
||||
// The result is serialized in JSON format containing either kid or jwk
|
||||
// fields based on the provided keyID value.
|
||||
//
|
||||
// If kid is non-empty, its quoted value is inserted in the protected head
|
||||
// as "kid" field value. Otherwise, JWK is computed using jwkEncode and inserted
|
||||
// as "jwk" field value. The "jwk" and "kid" fields are mutually exclusive.
|
||||
//
|
||||
// See https://tools.ietf.org/html/rfc7515#section-7.
|
||||
//
|
||||
// If nonce is empty, it will not be encoded into the header.
|
||||
func jwsEncodeJSON(claimset interface{}, key crypto.Signer, kid keyID, nonce, url string) ([]byte, error) {
|
||||
alg, sha := jwsHasher(key.Public())
|
||||
if alg == "" || !sha.Available() {
|
||||
return nil, errUnsupportedKey
|
||||
}
|
||||
|
||||
phead, err := jwsHead(alg, nonce, url, kid, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var payload string
|
||||
if claimset != nil {
|
||||
cs, err := json.Marshal(claimset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload = base64.RawURLEncoding.EncodeToString(cs)
|
||||
}
|
||||
|
||||
payloadToSign := []byte(phead + "." + payload)
|
||||
hash := sha.New()
|
||||
_, _ = hash.Write(payloadToSign)
|
||||
digest := hash.Sum(nil)
|
||||
|
||||
sig, err := jwsSign(key, sha, digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return jwsFinal(sha, sig, phead, payload)
|
||||
}
|
||||
|
||||
// jwkEncode encodes public part of an RSA or ECDSA key into a JWK.
|
||||
// The result is also suitable for creating a JWK thumbprint.
|
||||
// https://tools.ietf.org/html/rfc7517
|
||||
func jwkEncode(pub crypto.PublicKey) (string, error) {
|
||||
switch pub := pub.(type) {
|
||||
case *rsa.PublicKey:
|
||||
// https://tools.ietf.org/html/rfc7518#section-6.3.1
|
||||
n := pub.N
|
||||
e := big.NewInt(int64(pub.E))
|
||||
// Field order is important.
|
||||
// See https://tools.ietf.org/html/rfc7638#section-3.3 for details.
|
||||
return fmt.Sprintf(`{"e":"%s","kty":"RSA","n":"%s"}`,
|
||||
base64.RawURLEncoding.EncodeToString(e.Bytes()),
|
||||
base64.RawURLEncoding.EncodeToString(n.Bytes()),
|
||||
), nil
|
||||
case *ecdsa.PublicKey:
|
||||
// https://tools.ietf.org/html/rfc7518#section-6.2.1
|
||||
p := pub.Curve.Params()
|
||||
n := p.BitSize / 8
|
||||
if p.BitSize%8 != 0 {
|
||||
n++
|
||||
}
|
||||
x := pub.X.Bytes()
|
||||
if n > len(x) {
|
||||
x = append(make([]byte, n-len(x)), x...)
|
||||
}
|
||||
y := pub.Y.Bytes()
|
||||
if n > len(y) {
|
||||
y = append(make([]byte, n-len(y)), y...)
|
||||
}
|
||||
// Field order is important.
|
||||
// See https://tools.ietf.org/html/rfc7638#section-3.3 for details.
|
||||
return fmt.Sprintf(`{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`,
|
||||
p.Name,
|
||||
base64.RawURLEncoding.EncodeToString(x),
|
||||
base64.RawURLEncoding.EncodeToString(y),
|
||||
), nil
|
||||
}
|
||||
return "", errUnsupportedKey
|
||||
}
|
||||
|
||||
// jwsHead constructs the protected JWS header for the given fields.
|
||||
// Since jwk and kid are mutually-exclusive, the jwk will be encoded
|
||||
// only if kid is empty. If nonce is empty, it will not be encoded.
|
||||
func jwsHead(alg, nonce, url string, kid keyID, key crypto.Signer) (string, error) {
|
||||
phead := fmt.Sprintf(`{"alg":%q`, alg)
|
||||
if kid == noKeyID {
|
||||
jwk, err := jwkEncode(key.Public())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
phead += fmt.Sprintf(`,"jwk":%s`, jwk)
|
||||
} else {
|
||||
phead += fmt.Sprintf(`,"kid":%q`, kid)
|
||||
}
|
||||
if nonce != "" {
|
||||
phead += fmt.Sprintf(`,"nonce":%q`, nonce)
|
||||
}
|
||||
phead += fmt.Sprintf(`,"url":%q}`, url)
|
||||
phead = base64.RawURLEncoding.EncodeToString([]byte(phead))
|
||||
return phead, nil
|
||||
}
|
||||
|
||||
// jwsFinal constructs the final JWS object.
|
||||
func jwsFinal(sha crypto.Hash, sig []byte, phead, payload string) ([]byte, error) {
|
||||
enc := struct {
|
||||
Protected string `json:"protected"`
|
||||
Payload string `json:"payload"`
|
||||
Sig string `json:"signature"`
|
||||
}{
|
||||
Protected: phead,
|
||||
Payload: payload,
|
||||
Sig: base64.RawURLEncoding.EncodeToString(sig),
|
||||
}
|
||||
result, err := json.Marshal(&enc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// jwsSign signs the digest using the given key.
|
||||
// The hash is unused for ECDSA keys.
|
||||
//
|
||||
// Note: non-stdlib crypto.Signer implementations are expected to return
|
||||
// the signature in the format as specified in RFC7518.
|
||||
// See https://tools.ietf.org/html/rfc7518 for more details.
|
||||
func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) {
|
||||
if key, ok := key.(*ecdsa.PrivateKey); ok {
|
||||
// The key.Sign method of ecdsa returns ASN1-encoded signature.
|
||||
// So, we use the package Sign function instead
|
||||
// to get R and S values directly and format the result accordingly.
|
||||
r, s, err := ecdsa.Sign(rand.Reader, key, digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rb, sb := r.Bytes(), s.Bytes()
|
||||
size := key.Params().BitSize / 8
|
||||
if size%8 > 0 {
|
||||
size++
|
||||
}
|
||||
sig := make([]byte, size*2)
|
||||
copy(sig[size-len(rb):], rb)
|
||||
copy(sig[size*2-len(sb):], sb)
|
||||
return sig, nil
|
||||
}
|
||||
return key.Sign(rand.Reader, digest, hash)
|
||||
}
|
||||
|
||||
// jwsHasher indicates suitable JWS algorithm name and a hash function
|
||||
// to use for signing a digest with the provided key.
|
||||
// It returns ("", 0) if the key is not supported.
|
||||
func jwsHasher(pub crypto.PublicKey) (string, crypto.Hash) {
|
||||
switch pub := pub.(type) {
|
||||
case *rsa.PublicKey:
|
||||
return "RS256", crypto.SHA256
|
||||
case *ecdsa.PublicKey:
|
||||
switch pub.Params().Name {
|
||||
case "P-256":
|
||||
return "ES256", crypto.SHA256
|
||||
case "P-384":
|
||||
return "ES384", crypto.SHA384
|
||||
case "P-521":
|
||||
return "ES512", crypto.SHA512
|
||||
}
|
||||
}
|
||||
return "", 0
|
||||
}
|
||||
|
||||
// jwkThumbprint creates a JWK thumbprint out of pub
|
||||
// as specified in https://tools.ietf.org/html/rfc7638.
|
||||
func jwkThumbprint(pub crypto.PublicKey) (string, error) {
|
||||
jwk, err := jwkEncode(pub)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
b := sha256.Sum256([]byte(jwk))
|
||||
return base64.RawURLEncoding.EncodeToString(b[:]), nil
|
||||
}
|
||||
256
vendor/github.com/mholt/acmez/acme/order.go
generated
vendored
Normal file
256
vendor/github.com/mholt/acmez/acme/order.go
generated
vendored
Normal file
@ -0,0 +1,256 @@
|
||||
// Copyright 2020 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Order is an object that "represents a client's request for a certificate
|
||||
// and is used to track the progress of that order through to issuance.
|
||||
// Thus, the object contains information about the requested
|
||||
// certificate, the authorizations that the server requires the client
|
||||
// to complete, and any certificates that have resulted from this order."
|
||||
// §7.1.3
|
||||
type Order struct {
|
||||
// status (required, string): The status of this order. Possible
|
||||
// values are "pending", "ready", "processing", "valid", and
|
||||
// "invalid". See Section 7.1.6.
|
||||
Status string `json:"status"`
|
||||
|
||||
// expires (optional, string): The timestamp after which the server
|
||||
// will consider this order invalid, encoded in the format specified
|
||||
// in [RFC3339]. This field is REQUIRED for objects with "pending"
|
||||
// or "valid" in the status field.
|
||||
Expires time.Time `json:"expires,omitempty"`
|
||||
|
||||
// identifiers (required, array of object): An array of identifier
|
||||
// objects that the order pertains to.
|
||||
Identifiers []Identifier `json:"identifiers"`
|
||||
|
||||
// notBefore (optional, string): The requested value of the notBefore
|
||||
// field in the certificate, in the date format defined in [RFC3339].
|
||||
NotBefore *time.Time `json:"notBefore,omitempty"`
|
||||
|
||||
// notAfter (optional, string): The requested value of the notAfter
|
||||
// field in the certificate, in the date format defined in [RFC3339].
|
||||
NotAfter *time.Time `json:"notAfter,omitempty"`
|
||||
|
||||
// error (optional, object): The error that occurred while processing
|
||||
// the order, if any. This field is structured as a problem document
|
||||
// [RFC7807].
|
||||
Error *Problem `json:"error,omitempty"`
|
||||
|
||||
// authorizations (required, array of string): For pending orders, the
|
||||
// authorizations that the client needs to complete before the
|
||||
// requested certificate can be issued (see Section 7.5), including
|
||||
// unexpired authorizations that the client has completed in the past
|
||||
// for identifiers specified in the order. The authorizations
|
||||
// required are dictated by server policy; there may not be a 1:1
|
||||
// relationship between the order identifiers and the authorizations
|
||||
// required. For final orders (in the "valid" or "invalid" state),
|
||||
// the authorizations that were completed. Each entry is a URL from
|
||||
// which an authorization can be fetched with a POST-as-GET request.
|
||||
Authorizations []string `json:"authorizations"`
|
||||
|
||||
// finalize (required, string): A URL that a CSR must be POSTed to once
|
||||
// all of the order's authorizations are satisfied to finalize the
|
||||
// order. The result of a successful finalization will be the
|
||||
// population of the certificate URL for the order.
|
||||
Finalize string `json:"finalize"`
|
||||
|
||||
// certificate (optional, string): A URL for the certificate that has
|
||||
// been issued in response to this order.
|
||||
Certificate string `json:"certificate"`
|
||||
|
||||
// Similar to new-account, the server returns a 201 response with
|
||||
// the URL to the order object in the Location header.
|
||||
//
|
||||
// We transfer the value from the header to this field for
|
||||
// storage and recall purposes.
|
||||
Location string `json:"-"`
|
||||
}
|
||||
|
||||
// Identifier is used in order and authorization (authz) objects.
|
||||
type Identifier struct {
|
||||
// type (required, string): The type of identifier. This document
|
||||
// defines the "dns" identifier type. See the registry defined in
|
||||
// Section 9.7.7 for any others.
|
||||
Type string `json:"type"`
|
||||
|
||||
// value (required, string): The identifier itself.
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
// NewOrder creates a new order with the server.
|
||||
//
|
||||
// "The client begins the certificate issuance process by sending a POST
|
||||
// request to the server's newOrder resource." §7.4
|
||||
func (c *Client) NewOrder(ctx context.Context, account Account, order Order) (Order, error) {
|
||||
if err := c.provision(ctx); err != nil {
|
||||
return order, err
|
||||
}
|
||||
resp, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, c.dir.NewOrder, order, &order)
|
||||
if err != nil {
|
||||
return order, err
|
||||
}
|
||||
order.Location = resp.Header.Get("Location")
|
||||
return order, nil
|
||||
}
|
||||
|
||||
// GetOrder retrieves an order from the server. The Order's Location field must be populated.
|
||||
func (c *Client) GetOrder(ctx context.Context, account Account, order Order) (Order, error) {
|
||||
if err := c.provision(ctx); err != nil {
|
||||
return order, err
|
||||
}
|
||||
_, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, order.Location, nil, &order)
|
||||
return order, err
|
||||
}
|
||||
|
||||
// FinalizeOrder finalizes the order with the server and polls under the server has
|
||||
// updated the order status. The CSR must be in ASN.1 DER-encoded format. If this
|
||||
// succeeds, the certificate is ready to download once this returns.
|
||||
//
|
||||
// "Once the client believes it has fulfilled the server's requirements,
|
||||
// it should send a POST request to the order resource's finalize URL." §7.4
|
||||
func (c *Client) FinalizeOrder(ctx context.Context, account Account, order Order, csrASN1DER []byte) (Order, error) {
|
||||
if err := c.provision(ctx); err != nil {
|
||||
return order, err
|
||||
}
|
||||
|
||||
body := struct {
|
||||
// csr (required, string): A CSR encoding the parameters for the
|
||||
// certificate being requested [RFC2986]. The CSR is sent in the
|
||||
// base64url-encoded version of the DER format. (Note: Because this
|
||||
// field uses base64url, and does not include headers, it is
|
||||
// different from PEM.) §7.4
|
||||
CSR string `json:"csr"`
|
||||
}{
|
||||
CSR: base64.RawURLEncoding.EncodeToString(csrASN1DER),
|
||||
}
|
||||
|
||||
resp, err := c.httpPostJWS(ctx, account.PrivateKey, account.Location, order.Finalize, body, &order)
|
||||
if err != nil {
|
||||
// "A request to finalize an order will result in error if the order is
|
||||
// not in the 'ready' state. In such cases, the server MUST return a
|
||||
// 403 (Forbidden) error with a problem document of type
|
||||
// 'orderNotReady'. The client should then send a POST-as-GET request
|
||||
// to the order resource to obtain its current state. The status of the
|
||||
// order will indicate what action the client should take (see below)."
|
||||
// §7.4
|
||||
var problem Problem
|
||||
if errors.As(err, &problem) {
|
||||
if problem.Type != ProblemTypeOrderNotReady {
|
||||
return order, err
|
||||
}
|
||||
} else {
|
||||
return order, err
|
||||
}
|
||||
}
|
||||
|
||||
// unlike with accounts and authorizations, the spec isn't clear on whether
|
||||
// the server MUST set this on finalizing the order, but their example shows a
|
||||
// Location header, so I guess if it's set in the response, we should keep it
|
||||
if newLocation := resp.Header.Get("Location"); newLocation != "" {
|
||||
order.Location = newLocation
|
||||
}
|
||||
|
||||
if finished, err := orderIsFinished(order); finished {
|
||||
return order, err
|
||||
}
|
||||
|
||||
// TODO: "The elements of the "authorizations" and "identifiers" arrays are
|
||||
// immutable once set. If a client observes a change
|
||||
// in the contents of either array, then it SHOULD consider the order
|
||||
// invalid."
|
||||
|
||||
maxDuration := c.pollTimeout()
|
||||
start := time.Now()
|
||||
for time.Since(start) < maxDuration {
|
||||
// querying an order is expensive on the server-side, so we
|
||||
// shouldn't do it too frequently; honor server preference
|
||||
interval, err := retryAfter(resp, c.pollInterval())
|
||||
if err != nil {
|
||||
return order, err
|
||||
}
|
||||
select {
|
||||
case <-time.After(interval):
|
||||
case <-ctx.Done():
|
||||
return order, ctx.Err()
|
||||
}
|
||||
|
||||
resp, err = c.httpPostJWS(ctx, account.PrivateKey, account.Location, order.Location, nil, &order)
|
||||
if err != nil {
|
||||
return order, fmt.Errorf("polling order status: %w", err)
|
||||
}
|
||||
|
||||
// (same reasoning as above)
|
||||
if newLocation := resp.Header.Get("Location"); newLocation != "" {
|
||||
order.Location = newLocation
|
||||
}
|
||||
|
||||
if finished, err := orderIsFinished(order); finished {
|
||||
return order, err
|
||||
}
|
||||
}
|
||||
|
||||
return order, fmt.Errorf("order took too long")
|
||||
}
|
||||
|
||||
// orderIsFinished returns true if the order processing is complete,
|
||||
// regardless of success or failure. If this function returns true,
|
||||
// polling an order status should stop. If there is an error with the
|
||||
// order, an error will be returned. This function should be called
|
||||
// only after a request to finalize an order. See §7.4.
|
||||
func orderIsFinished(order Order) (bool, error) {
|
||||
switch order.Status {
|
||||
case StatusInvalid:
|
||||
// "invalid": The certificate will not be issued. Consider this
|
||||
// order process abandoned.
|
||||
return true, fmt.Errorf("final order is invalid: %w", order.Error)
|
||||
|
||||
case StatusPending:
|
||||
// "pending": The server does not believe that the client has
|
||||
// fulfilled the requirements. Check the "authorizations" array for
|
||||
// entries that are still pending.
|
||||
return true, fmt.Errorf("order pending, authorizations remaining: %v", order.Authorizations)
|
||||
|
||||
case StatusReady:
|
||||
// "ready": The server agrees that the requirements have been
|
||||
// fulfilled, and is awaiting finalization. Submit a finalization
|
||||
// request.
|
||||
// (we did just submit a finalization request, so this is an error)
|
||||
return true, fmt.Errorf("unexpected state: %s - order already finalized", order.Status)
|
||||
|
||||
case StatusProcessing:
|
||||
// "processing": The certificate is being issued. Send a GET request
|
||||
// after the time given in the "Retry-After" header field of the
|
||||
// response, if any.
|
||||
return false, nil
|
||||
|
||||
case StatusValid:
|
||||
// "valid": The server has issued the certificate and provisioned its
|
||||
// URL to the "certificate" field of the order. Download the
|
||||
// certificate.
|
||||
return true, nil
|
||||
|
||||
default:
|
||||
return true, fmt.Errorf("unrecognized order status: %s", order.Status)
|
||||
}
|
||||
}
|
||||
174
vendor/github.com/mholt/acmez/acme/problem.go
generated
vendored
Normal file
174
vendor/github.com/mholt/acmez/acme/problem.go
generated
vendored
Normal file
@ -0,0 +1,174 @@
|
||||
// Copyright 2020 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
// Problem carries the details of an error from HTTP APIs as
|
||||
// defined in RFC 7807: https://tools.ietf.org/html/rfc7807
|
||||
// and as extended by RFC 8555 §6.7:
|
||||
// https://tools.ietf.org/html/rfc8555#section-6.7
|
||||
type Problem struct {
|
||||
// "type" (string) - A URI reference [RFC3986] that identifies the
|
||||
// problem type. This specification encourages that, when
|
||||
// dereferenced, it provide human-readable documentation for the
|
||||
// problem type (e.g., using HTML [W3C.REC-html5-20141028]). When
|
||||
// this member is not present, its value is assumed to be
|
||||
// "about:blank". §3.1
|
||||
Type string `json:"type"`
|
||||
|
||||
// "title" (string) - A short, human-readable summary of the problem
|
||||
// type. It SHOULD NOT change from occurrence to occurrence of the
|
||||
// problem, except for purposes of localization (e.g., using
|
||||
// proactive content negotiation; see [RFC7231], Section 3.4). §3.1
|
||||
Title string `json:"title,omitempty"`
|
||||
|
||||
// "status" (number) - The HTTP status code ([RFC7231], Section 6)
|
||||
// generated by the origin server for this occurrence of the problem.
|
||||
// §3.1
|
||||
Status int `json:"status,omitempty"`
|
||||
|
||||
// "detail" (string) - A human-readable explanation specific to this
|
||||
// occurrence of the problem. §3.1
|
||||
//
|
||||
// RFC 8555 §6.7: "Clients SHOULD display the 'detail' field of all
|
||||
// errors."
|
||||
Detail string `json:"detail,omitempty"`
|
||||
|
||||
// "instance" (string) - A URI reference that identifies the specific
|
||||
// occurrence of the problem. It may or may not yield further
|
||||
// information if dereferenced. §3.1
|
||||
Instance string `json:"instance,omitempty"`
|
||||
|
||||
// "Sometimes a CA may need to return multiple errors in response to a
|
||||
// request. Additionally, the CA may need to attribute errors to
|
||||
// specific identifiers. For instance, a newOrder request may contain
|
||||
// multiple identifiers for which the CA cannot issue certificates. In
|
||||
// this situation, an ACME problem document MAY contain the
|
||||
// 'subproblems' field, containing a JSON array of problem documents."
|
||||
// RFC 8555 §6.7.1
|
||||
Subproblems []Subproblem `json:"subproblems,omitempty"`
|
||||
|
||||
// For convenience, we've added this field to associate with a value
|
||||
// that is related to or caused the problem. It is not part of the
|
||||
// spec, but, if a challenge fails for example, we can associate the
|
||||
// error with the problematic authz object by setting this field.
|
||||
// Challenge failures will have this set to an Authorization type.
|
||||
Resource interface{} `json:"-"`
|
||||
}
|
||||
|
||||
func (p Problem) Error() string {
|
||||
// TODO: 7.3.3: Handle changes to Terms of Service (notice it uses the Instance field and Link header)
|
||||
|
||||
// RFC 8555 §6.7: "Clients SHOULD display the 'detail' field of all errors."
|
||||
s := fmt.Sprintf("HTTP %d %s - %s", p.Status, p.Type, p.Detail)
|
||||
if len(p.Subproblems) > 0 {
|
||||
for _, v := range p.Subproblems {
|
||||
s += fmt.Sprintf(", problem %q: %s", v.Type, v.Detail)
|
||||
if v.Identifier.Type != "" || v.Identifier.Value != "" {
|
||||
s += fmt.Sprintf(" (%s_identifier=%s)", v.Identifier.Type, v.Identifier.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
if p.Instance != "" {
|
||||
s += ", url: " + p.Instance
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface.
|
||||
// This allows problems to be serialized by the zap logger.
|
||||
func (p Problem) MarshalLogObject(enc zapcore.ObjectEncoder) error {
|
||||
enc.AddString("type", p.Type)
|
||||
enc.AddString("title", p.Title)
|
||||
enc.AddString("detail", p.Detail)
|
||||
enc.AddString("instance", p.Instance)
|
||||
enc.AddArray("subproblems", loggableSubproblems(p.Subproblems))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Subproblem describes a more specific error in a problem according to
|
||||
// RFC 8555 §6.7.1: "An ACME problem document MAY contain the
|
||||
// 'subproblems' field, containing a JSON array of problem documents,
|
||||
// each of which MAY contain an 'identifier' field."
|
||||
type Subproblem struct {
|
||||
Problem
|
||||
|
||||
// "If present, the 'identifier' field MUST contain an ACME
|
||||
// identifier (Section 9.7.7)." §6.7.1
|
||||
Identifier Identifier `json:"identifier,omitempty"`
|
||||
}
|
||||
|
||||
// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface.
|
||||
// This allows subproblems to be serialized by the zap logger.
|
||||
func (sp Subproblem) MarshalLogObject(enc zapcore.ObjectEncoder) error {
|
||||
enc.AddString("identifier_type", sp.Identifier.Type)
|
||||
enc.AddString("identifier", sp.Identifier.Value)
|
||||
enc.AddObject("subproblem", sp.Problem)
|
||||
return nil
|
||||
}
|
||||
|
||||
type loggableSubproblems []Subproblem
|
||||
|
||||
// MarshalLogArray satisfies the zapcore.ArrayMarshaler interface.
|
||||
// This allows a list of subproblems to be serialized by the zap logger.
|
||||
func (ls loggableSubproblems) MarshalLogArray(enc zapcore.ArrayEncoder) error {
|
||||
for _, sp := range ls {
|
||||
enc.AppendObject(sp)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Standard token values for the "type" field of problems, as defined
|
||||
// in RFC 8555 §6.7: https://tools.ietf.org/html/rfc8555#section-6.7
|
||||
//
|
||||
// "To facilitate automatic response to errors, this document defines the
|
||||
// following standard tokens for use in the 'type' field (within the
|
||||
// ACME URN namespace 'urn:ietf:params:acme:error:') ... This list is not
|
||||
// exhaustive. The server MAY return errors whose 'type' field is set to
|
||||
// a URI other than those defined above."
|
||||
const (
|
||||
// The ACME error URN prefix.
|
||||
ProblemTypeNamespace = "urn:ietf:params:acme:error:"
|
||||
|
||||
ProblemTypeAccountDoesNotExist = ProblemTypeNamespace + "accountDoesNotExist"
|
||||
ProblemTypeAlreadyRevoked = ProblemTypeNamespace + "alreadyRevoked"
|
||||
ProblemTypeBadCSR = ProblemTypeNamespace + "badCSR"
|
||||
ProblemTypeBadNonce = ProblemTypeNamespace + "badNonce"
|
||||
ProblemTypeBadPublicKey = ProblemTypeNamespace + "badPublicKey"
|
||||
ProblemTypeBadRevocationReason = ProblemTypeNamespace + "badRevocationReason"
|
||||
ProblemTypeBadSignatureAlgorithm = ProblemTypeNamespace + "badSignatureAlgorithm"
|
||||
ProblemTypeCAA = ProblemTypeNamespace + "caa"
|
||||
ProblemTypeCompound = ProblemTypeNamespace + "compound"
|
||||
ProblemTypeConnection = ProblemTypeNamespace + "connection"
|
||||
ProblemTypeDNS = ProblemTypeNamespace + "dns"
|
||||
ProblemTypeExternalAccountRequired = ProblemTypeNamespace + "externalAccountRequired"
|
||||
ProblemTypeIncorrectResponse = ProblemTypeNamespace + "incorrectResponse"
|
||||
ProblemTypeInvalidContact = ProblemTypeNamespace + "invalidContact"
|
||||
ProblemTypeMalformed = ProblemTypeNamespace + "malformed"
|
||||
ProblemTypeOrderNotReady = ProblemTypeNamespace + "orderNotReady"
|
||||
ProblemTypeRateLimited = ProblemTypeNamespace + "rateLimited"
|
||||
ProblemTypeRejectedIdentifier = ProblemTypeNamespace + "rejectedIdentifier"
|
||||
ProblemTypeServerInternal = ProblemTypeNamespace + "serverInternal"
|
||||
ProblemTypeTLS = ProblemTypeNamespace + "tls"
|
||||
ProblemTypeUnauthorized = ProblemTypeNamespace + "unauthorized"
|
||||
ProblemTypeUnsupportedContact = ProblemTypeNamespace + "unsupportedContact"
|
||||
ProblemTypeUnsupportedIdentifier = ProblemTypeNamespace + "unsupportedIdentifier"
|
||||
ProblemTypeUserActionRequired = ProblemTypeNamespace + "userActionRequired"
|
||||
)
|
||||
697
vendor/github.com/mholt/acmez/client.go
generated
vendored
Normal file
697
vendor/github.com/mholt/acmez/client.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
72
vendor/github.com/mholt/acmez/solver.go
generated
vendored
Normal file
72
vendor/github.com/mholt/acmez/solver.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
// Copyright 2020 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package acmez
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/mholt/acmez/acme"
|
||||
)
|
||||
|
||||
// Solver is a type that can solve ACME challenges. All
|
||||
// implementations MUST honor context cancellation.
|
||||
type Solver interface {
|
||||
// Present is called just before a challenge is initiated.
|
||||
// The implementation MUST prepare anything that is necessary
|
||||
// for completing the challenge; for example, provisioning
|
||||
// an HTTP resource, TLS certificate, or a DNS record.
|
||||
//
|
||||
// It MUST return quickly. If presenting the challenge token
|
||||
// will take time, then the implementation MUST do the
|
||||
// minimum amount of work required in this method, and
|
||||
// SHOULD additionally implement the Waiter interface.
|
||||
// For example, a DNS challenge solver might make a quick
|
||||
// HTTP request to a provider's API to create a new DNS
|
||||
// record, but it might be several minutes or hours before
|
||||
// the DNS record propagates. The API request should be
|
||||
// done in Present(), and waiting for propagation should
|
||||
// be done in Wait().
|
||||
Present(context.Context, acme.Challenge) error
|
||||
|
||||
// CleanUp is called after a challenge is finished, whether
|
||||
// successful or not. It MUST free/remove any resources it
|
||||
// allocated/created during Present. It SHOULD NOT require
|
||||
// that Present ran successfully. It MUST return quickly.
|
||||
CleanUp(context.Context, acme.Challenge) error
|
||||
}
|
||||
|
||||
// Waiter is an optional interface for Solvers to implement. Its
|
||||
// primary purpose is to help ensure the challenge can be solved
|
||||
// before the server gives up trying to verify the challenge.
|
||||
//
|
||||
// If implemented, it will be called after Present() but just
|
||||
// before the challenge is initiated with the server. It blocks
|
||||
// until the challenge is ready to be solved. (For example,
|
||||
// waiting on a DNS record to propagate.) This allows challenges
|
||||
// to succeed that would normally fail because they take too long
|
||||
// to set up (i.e. the ACME server would give up polling DNS or
|
||||
// the client would timeout its polling). By separating Present()
|
||||
// from Wait(), it allows the slow part of all solvers to begin
|
||||
// up front, rather than waiting on each solver one at a time.
|
||||
//
|
||||
// It MUST NOT do anything exclusive of Present() that is required
|
||||
// for the challenge to succeed. In other words, if Present() is
|
||||
// called but Wait() is not, then the challenge should still be able
|
||||
// to succeed assuming infinite time.
|
||||
//
|
||||
// Implementations MUST honor context cancellation.
|
||||
type Waiter interface {
|
||||
Wait(context.Context, acme.Challenge) error
|
||||
}
|
||||
98
vendor/github.com/mholt/acmez/tlsalpn01.go
generated
vendored
Normal file
98
vendor/github.com/mholt/acmez/tlsalpn01.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
// Copyright 2020 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package acmez
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"encoding/pem"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/mholt/acmez/acme"
|
||||
)
|
||||
|
||||
// TLSALPN01ChallengeCert creates a certificate that can be used for
|
||||
// handshakes while solving the tls-alpn-01 challenge. See RFC 8737 §3.
|
||||
func TLSALPN01ChallengeCert(challenge acme.Challenge) (*tls.Certificate, error) {
|
||||
keyAuthSum := sha256.Sum256([]byte(challenge.KeyAuthorization))
|
||||
keyAuthSumASN1, err := asn1.Marshal(keyAuthSum[:sha256.Size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
certKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
challengeKeyASN1, err := x509.MarshalECPrivateKey(certKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
|
||||
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
template := x509.Certificate{
|
||||
SerialNumber: serialNumber,
|
||||
Subject: pkix.Name{CommonName: "ACME challenge"},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(24 * time.Hour * 365),
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
BasicConstraintsValid: true,
|
||||
DNSNames: []string{challenge.Identifier.Value},
|
||||
|
||||
// add key authentication digest as the acmeValidation-v1 extension
|
||||
// (marked as critical such that it won't be used by non-ACME software).
|
||||
// Reference: https://www.rfc-editor.org/rfc/rfc8737.html#section-3
|
||||
ExtraExtensions: []pkix.Extension{
|
||||
{
|
||||
Id: idPEACMEIdentifierV1,
|
||||
Critical: true,
|
||||
Value: keyAuthSumASN1,
|
||||
},
|
||||
},
|
||||
}
|
||||
challengeCertDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &certKey.PublicKey, certKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
challengeCertPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: challengeCertDER})
|
||||
challengeKeyPEM := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: challengeKeyASN1})
|
||||
|
||||
cert, err := tls.X509KeyPair(challengeCertPEM, challengeKeyPEM)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cert, nil
|
||||
}
|
||||
|
||||
// ACMETLS1Protocol is the ALPN value for the TLS-ALPN challenge
|
||||
// handshake. See RFC 8737 §6.2.
|
||||
const ACMETLS1Protocol = "acme-tls/1"
|
||||
|
||||
// idPEACMEIdentifierV1 is the SMI Security for PKIX Certification Extension OID referencing the ACME extension.
|
||||
// See RFC 8737 §6.1. https://www.rfc-editor.org/rfc/rfc8737.html#section-6.1
|
||||
var idPEACMEIdentifierV1 = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 31}
|
||||
8
vendor/github.com/miekg/dns/.codecov.yml
generated
vendored
Normal file
8
vendor/github.com/miekg/dns/.codecov.yml
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: 40%
|
||||
threshold: null
|
||||
patch: false
|
||||
changes: false
|
||||
4
vendor/github.com/miekg/dns/.gitignore
generated
vendored
Normal file
4
vendor/github.com/miekg/dns/.gitignore
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
*.6
|
||||
tags
|
||||
test.out
|
||||
a.out
|
||||
1
vendor/github.com/miekg/dns/AUTHORS
generated
vendored
Normal file
1
vendor/github.com/miekg/dns/AUTHORS
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
Miek Gieben <miek@miek.nl>
|
||||
1
vendor/github.com/miekg/dns/CODEOWNERS
generated
vendored
Normal file
1
vendor/github.com/miekg/dns/CODEOWNERS
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
* @miekg @tmthrgd
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user