Merge branch 'dev'

This commit is contained in:
Ingo Oppermann 2023-02-23 11:47:20 +01:00
commit 431d013e3e
No known key found for this signature in database
GPG Key ID: 2AB32426E9DD229E
672 changed files with 104560 additions and 7968 deletions

View File

@ -3,20 +3,20 @@ name: tests
on: [push, pull_request]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 2
- uses: actions/setup-go@v2
with:
go-version: '1.18'
- name: Run coverage
run: go test -coverprofile=coverage.out -covermode=atomic -v ./...
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v2
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: coverage.out
flags: unit-linux
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 2
- uses: actions/setup-go@v2
with:
go-version: "1.19"
- name: Run coverage
run: go test -coverprofile=coverage.out -covermode=atomic -v ./...
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v2
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: coverage.out
flags: unit-linux

View File

@ -1,5 +1,5 @@
# CORE ALPINE BASE IMAGE
OS_NAME=alpine
OS_VERSION=3.16
GOLANG_IMAGE=golang:1.19.3-alpine3.16
CORE_VERSION=16.11.0
GOLANG_IMAGE=golang:1.20-alpine3.16
CORE_VERSION=16.12.0

View File

@ -1,5 +1,5 @@
# CORE UBUNTU BASE IMAGE
OS_NAME=ubuntu
OS_VERSION=20.04
GOLANG_IMAGE=golang:1.19.3-alpine3.16
CORE_VERSION=16.11.0
GOLANG_IMAGE=golang:1.20-alpine3.16
CORE_VERSION=16.12.0

View File

@ -1,5 +1,18 @@
# Core
### Core v16.11.0 > v16.12.0
- Add S3 storage support
- Add support for variables in placeholde parameter
- Add support for RTMP token as stream key as last element in path
- Add support for soft memory limit with debug.memory_limit_mbytes in config
- Add support for partial process config updates
- Add support for alternative syntax for auth0 tenants as environment variable
- Fix config timestamps created_at and loaded_at
- Fix /config/reload return type
- Fix modifying DTS in RTMP packets ([restreamer/#487](https://github.com/datarhei/restreamer/issues/487), [restreamer/#367](https://github.com/datarhei/restreamer/issues/367))
- Fix default internal SRT latency to 20ms
### Core v16.10.1 > v16.11.0
- Add FFmpeg 4.4 to FFmpeg 5.1 migration tool

View File

@ -1,4 +1,4 @@
ARG GOLANG_IMAGE=golang:1.19.3-alpine3.16
ARG GOLANG_IMAGE=golang:1.20-alpine3.16
ARG BUILD_IMAGE=alpine:3.16

View File

@ -1,4 +1,4 @@
FROM golang:1.19.3-alpine3.16
FROM golang:1.20-alpine3.16
RUN apk add alpine-sdk

View File

@ -16,47 +16,47 @@ The datarhei Core is a process management solution for FFmpeg that offers a rang
The objectives of development are:
* Unhindered use of FFmpeg processes
* Portability of FFmpeg, including management across development and production environments
* Scalability of FFmpeg-based applications through the ability to offload processes to additional instances
* Streamlining of media product development by focusing on features and design.
- Unhindered use of FFmpeg processes
- Portability of FFmpeg, including management across development and production environments
- Scalability of FFmpeg-based applications through the ability to offload processes to additional instances
- Streamlining of media product development by focusing on features and design.
## What issues have been resolved thus far?
### Process management
* Run multiple processes via API
* Unrestricted FFmpeg commands in process configuration.
* Error detection and recovery (e.g., FFmpeg stalls, dumps)
* Referencing for process chaining (pipelines)
* Placeholders for storage, RTMP, and SRT usage (automatic credentials management and URL resolution)
* Logs (access to current stdout/stderr)
* Log history (configurable log history, e.g., for error analysis)
* Resource limitation (max. CPU and MEMORY usage per process)
* Statistics (like FFmpeg progress per input and output, CPU and MEMORY, state, uptime)
* Input verification (like FFprobe)
* Metadata (option to store additional information like a title)
- Run multiple processes via API
- Unrestricted FFmpeg commands in process configuration.
- Error detection and recovery (e.g., FFmpeg stalls, dumps)
- Referencing for process chaining (pipelines)
- Placeholders for storage, RTMP, and SRT usage (automatic credentials management and URL resolution)
- Logs (access to current stdout/stderr)
- Log history (configurable log history, e.g., for error analysis)
- Resource limitation (max. CPU and MEMORY usage per process)
- Statistics (like FFmpeg progress per input and output, CPU and MEMORY, state, uptime)
- Input verification (like FFprobe)
- Metadata (option to store additional information like a title)
### Media delivery
* Configurable file systems (in-memory, disk-mount, S3)
* HTTP/S, RTMP/S, and SRT services, including Let's Encrypt
* Bandwidth and session limiting for HLS/MPEG DASH sessions (protects restreams from congestion)
* Viewer session API and logging
- Configurable file systems (in-memory, disk-mount, S3)
- HTTP/S, RTMP/S, and SRT services, including Let's Encrypt
- Bandwidth and session limiting for HLS/MPEG DASH sessions (protects restreams from congestion)
- Viewer session API and logging
### Misc
* HTTP REST and GraphQL API
* Swagger documentation
* Metrics incl. Prometheus support (also detects POSIX and cgroups resources)
* Docker images for fast setup of development environments up to the integration of cloud resources
- HTTP REST and GraphQL API
- Swagger documentation
- Metrics incl. Prometheus support (also detects POSIX and cgroups resources)
- Docker images for fast setup of development environments up to the integration of cloud resources
## Docker images
- datarhei/core:latest (AMD64, ARM64, ARMv7)
- datarhei/core:cuda-latest (Nvidia CUDA 11.7.1, AMD64)
- datarhei/core:rpi-latest (Raspberry Pi / OMX/V4L2-M2M, AMD64/ARMv7)
- datarhei/core:vaapi-latest (Intel VAAPI, AMD64)
- datarhei/core:latest (AMD64, ARM64, ARMv7)
- datarhei/core:cuda-latest (Nvidia CUDA 11.7.1, AMD64)
- datarhei/core:rpi-latest (Raspberry Pi / OMX/V4L2-M2M, AMD64/ARMv7)
- datarhei/core:vaapi-latest (Intel VAAPI, AMD64)
## Quick start
@ -80,12 +80,12 @@ docker run --name core -d \
## Documentation
Documentation is available on [docs.datarhei.com/core](https://docs.datarhei.com/core).
Documentation is available on [docs.datarhei.com/core](https://docs.datarhei.com/core).
- [Quick start](https://docs.datarhei.com/core/guides/beginner)
- [Installation](https://docs.datarhei.com/core/installation)
- [Configuration](https://docs.datarhei.com/core/configuration)
- [Coding](https://docs.datarhei.com/core/development/coding)
- [Quick start](https://docs.datarhei.com/core/guides/beginner)
- [Installation](https://docs.datarhei.com/core/installation)
- [Configuration](https://docs.datarhei.com/core/configuration)
- [Coding](https://docs.datarhei.com/core/development/coding)
## License

File diff suppressed because it is too large Load Diff

View File

@ -9,6 +9,7 @@ import (
cfgvars "github.com/datarhei/core/v16/config/vars"
"github.com/datarhei/core/v16/ffmpeg"
"github.com/datarhei/core/v16/io/file"
"github.com/datarhei/core/v16/io/fs"
"github.com/datarhei/core/v16/log"
"github.com/datarhei/core/v16/restream/store"
@ -22,7 +23,11 @@ func main() {
"to": "ffmpeg5",
})
configstore, err := cfgstore.NewJSON(os.Getenv("CORE_CONFIGFILE"), nil)
configfile := cfgstore.Location(os.Getenv("CORE_CONFIGFILE"))
diskfs, _ := fs.NewDiskFilesystem(fs.DiskConfig{})
configstore, err := cfgstore.NewJSON(diskfs, configfile, nil)
if err != nil {
logger.Error().WithError(err).Log("Loading configuration failed")
os.Exit(1)
@ -115,9 +120,12 @@ func doMigration(logger log.Logger, configstore cfgstore.Store) error {
logger.Info().WithField("backup", backupFilepath).Log("Backup created")
// Load the existing DB
datastore := store.NewJSONStore(store.JSONConfig{
datastore, err := store.NewJSON(store.JSONConfig{
Filepath: cfg.DB.Dir + "/db.json",
})
if err != nil {
return err
}
data, err := datastore.Load()
if err != nil {

View File

@ -17,6 +17,7 @@ import (
"github.com/datarhei/core/v16/encoding/json"
"github.com/datarhei/core/v16/ffmpeg"
"github.com/datarhei/core/v16/ffmpeg/skills"
"github.com/datarhei/core/v16/io/fs"
"github.com/datarhei/core/v16/restream"
"github.com/datarhei/core/v16/restream/app"
"github.com/datarhei/core/v16/restream/store"
@ -495,14 +496,14 @@ type importConfigAudio struct {
sampling string
}
func importV1(path string, cfg importConfig) (store.StoreData, error) {
func importV1(fs fs.Filesystem, path string, cfg importConfig) (store.StoreData, error) {
if len(cfg.id) == 0 {
cfg.id = uuid.New().String()
}
r := store.NewStoreData()
jsondata, err := os.ReadFile(path)
jsondata, err := fs.ReadFile(path)
if err != nil {
return r, fmt.Errorf("failed to read data from %s: %w", path, err)
}
@ -1417,9 +1418,19 @@ func probeInput(binary string, config app.Config) app.Probe {
return app.Probe{}
}
dummyfs, _ := fs.NewMemFilesystem(fs.MemConfig{})
store, err := store.NewJSON(store.JSONConfig{
Filesystem: dummyfs,
Filepath: "/",
Logger: nil,
})
if err != nil {
return app.Probe{}
}
rs, err := restream.New(restream.Config{
FFmpeg: ffmpeg,
Store: store.NewDummyStore(store.DummyConfig{}),
Store: store,
})
if err != nil {
return app.Probe{}

View File

@ -6,6 +6,7 @@ import (
"testing"
"github.com/datarhei/core/v16/encoding/json"
"github.com/datarhei/core/v16/io/fs"
"github.com/datarhei/core/v16/restream/store"
"github.com/stretchr/testify/require"
@ -36,8 +37,13 @@ import (
var id string = "4186b095-7f0a-4e94-8c3d-f17459ab252f"
func testV1Import(t *testing.T, v1Fixture, v4Fixture string, config importConfig) {
diskfs, err := fs.NewRootedDiskFilesystem(fs.RootedDiskConfig{
Root: ".",
})
require.NoError(t, err)
// Import v1 database
v4, err := importV1(v1Fixture, config)
v4, err := importV1(diskfs, v1Fixture, config)
require.Equal(t, nil, err)
// Reset variants
@ -50,7 +56,7 @@ func testV1Import(t *testing.T, v1Fixture, v4Fixture string, config importConfig
require.Equal(t, nil, err)
// Read the wanted result
wantdatav4, err := os.ReadFile(v4Fixture)
wantdatav4, err := diskfs.ReadFile(v4Fixture)
require.Equal(t, nil, err)
var wantv4 store.StoreData

View File

@ -6,6 +6,7 @@ import (
cfgstore "github.com/datarhei/core/v16/config/store"
cfgvars "github.com/datarhei/core/v16/config/vars"
"github.com/datarhei/core/v16/io/fs"
"github.com/datarhei/core/v16/log"
"github.com/datarhei/core/v16/restream/store"
@ -15,18 +16,26 @@ import (
func main() {
logger := log.New("Import").WithOutput(log.NewConsoleWriter(os.Stderr, log.Linfo, true)).WithField("version", "v1")
configstore, err := cfgstore.NewJSON(os.Getenv("CORE_CONFIGFILE"), nil)
configfile := cfgstore.Location(os.Getenv("CORE_CONFIGFILE"))
diskfs, err := fs.NewDiskFilesystem(fs.DiskConfig{})
if err != nil {
logger.Error().WithError(err).Log("Access disk filesystem failed")
os.Exit(1)
}
configstore, err := cfgstore.NewJSON(diskfs, configfile, nil)
if err != nil {
logger.Error().WithError(err).Log("Loading configuration failed")
os.Exit(1)
}
if err := doImport(logger, configstore); err != nil {
if err := doImport(logger, diskfs, configstore); err != nil {
os.Exit(1)
}
}
func doImport(logger log.Logger, configstore cfgstore.Store) error {
func doImport(logger log.Logger, fs fs.Filesystem, configstore cfgstore.Store) error {
if logger == nil {
logger = log.New("")
}
@ -65,23 +74,27 @@ func doImport(logger log.Logger, configstore cfgstore.Store) error {
logger = logger.WithField("database", v1filename)
if _, err := os.Stat(v1filename); err != nil {
if _, err := fs.Stat(v1filename); err != nil {
if os.IsNotExist(err) {
logger.Info().Log("Database doesn't exist and nothing will be imported")
return nil
}
logger.Error().WithError(err).Log("Checking for v1 database")
return fmt.Errorf("checking for v1 database: %w", err)
}
logger.Info().Log("Found database")
// Load an existing DB
datastore := store.NewJSONStore(store.JSONConfig{
Filepath: cfg.DB.Dir + "/db.json",
datastore, err := store.NewJSON(store.JSONConfig{
Filesystem: fs,
Filepath: cfg.DB.Dir + "/db.json",
})
if err != nil {
logger.Error().WithError(err).Log("Creating datastore for new database failed")
return fmt.Errorf("creating datastore for new database failed: %w", err)
}
data, err := datastore.Load()
if err != nil {
@ -103,7 +116,7 @@ func doImport(logger log.Logger, configstore cfgstore.Store) error {
importConfig.binary = cfg.FFmpeg.Binary
// Rewrite the old database to the new database
r, err := importV1(v1filename, importConfig)
r, err := importV1(fs, v1filename, importConfig)
if err != nil {
logger.Error().WithError(err).Log("Importing database failed")
return fmt.Errorf("importing database failed: %w", err)

View File

@ -1,20 +1,30 @@
package main
import (
"strings"
"testing"
"github.com/datarhei/core/v16/config/store"
"github.com/datarhei/core/v16/io/fs"
"github.com/stretchr/testify/require"
)
func TestImport(t *testing.T) {
configstore := store.NewDummy()
memfs, err := fs.NewMemFilesystem(fs.MemConfig{})
require.NoError(t, err)
memfs.WriteFileReader("/mime.types", strings.NewReader("foobar"))
memfs.WriteFileReader("/bin/ffmpeg", strings.NewReader("foobar"))
configstore, err := store.NewJSON(memfs, "/config.json", nil)
require.NoError(t, err)
cfg := configstore.Get()
err := configstore.Set(cfg)
err = configstore.Set(cfg)
require.NoError(t, err)
err = doImport(nil, configstore)
err = doImport(nil, memfs, configstore)
require.NoError(t, err)
}

View File

@ -29,7 +29,7 @@ func (v versionInfo) MinorString() string {
// Version of the app
var Version = versionInfo{
Major: 16,
Minor: 11,
Minor: 12,
Patch: 0,
}

View File

@ -6,11 +6,13 @@ import (
"net"
"time"
haikunator "github.com/atrox/haikunatorgo/v2"
"github.com/datarhei/core/v16/config/copy"
"github.com/datarhei/core/v16/config/value"
"github.com/datarhei/core/v16/config/vars"
"github.com/datarhei/core/v16/io/fs"
"github.com/datarhei/core/v16/math/rand"
haikunator "github.com/atrox/haikunatorgo/v2"
"github.com/google/uuid"
)
@ -45,14 +47,21 @@ const version int64 = 3
// Config is a wrapper for Data
type Config struct {
fs fs.Filesystem
vars vars.Variables
Data
}
// New returns a Config which is initialized with its default values
func New() *Config {
config := &Config{}
func New(f fs.Filesystem) *Config {
config := &Config{
fs: f,
}
if config.fs == nil {
config.fs, _ = fs.NewMemFilesystem(fs.MemConfig{})
}
config.init()
@ -69,7 +78,7 @@ func (d *Config) Set(name, val string) error {
// NewConfigFrom returns a clone of a Config
func (d *Config) Clone() *Config {
data := New()
data := New(d.fs)
data.CreatedAt = d.CreatedAt
data.LoadedAt = d.LoadedAt
@ -111,6 +120,7 @@ func (d *Config) Clone() *Config {
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
data.Storage.Disk.Cache.Types.Allow = copy.Slice(d.Storage.Disk.Cache.Types.Allow)
data.Storage.Disk.Cache.Types.Block = copy.Slice(d.Storage.Disk.Cache.Types.Block)
data.Storage.S3 = copy.Slice(d.Storage.S3)
data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow)
data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block)
@ -143,7 +153,7 @@ func (d *Config) init() {
d.vars.Register(value.NewInt(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false)
// DB
d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config"), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config", d.fs), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
// Host
d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false)
@ -172,14 +182,14 @@ func (d *Config) init() {
d.vars.Register(value.NewBool(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false)
d.vars.Register(value.NewBool(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false)
d.vars.Register(value.NewEmail(&d.TLS.Email, "cert@datarhei.com"), "tls.email", "CORE_TLS_EMAIL", nil, "Email for Let's Encrypt registration", false, false)
d.vars.Register(value.NewFile(&d.TLS.CertFile, ""), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
d.vars.Register(value.NewFile(&d.TLS.KeyFile, ""), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
d.vars.Register(value.NewFile(&d.TLS.CertFile, "", d.fs), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
d.vars.Register(value.NewFile(&d.TLS.KeyFile, "", d.fs), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
// Storage
d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types"), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types", d.fs), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
// Storage (Disk)
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data"), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data", d.fs), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
d.vars.Register(value.NewInt64(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false)
d.vars.Register(value.NewBool(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false)
d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false)
@ -195,6 +205,9 @@ func (d *Config) init() {
d.vars.Register(value.NewInt64(&d.Storage.Memory.Size, 0), "storage.memory.max_size_mbytes", "CORE_STORAGE_MEMORY_MAXSIZEMBYTES", nil, "Max. allowed megabytes for /memfs, 0 for unlimited", false, false)
d.vars.Register(value.NewBool(&d.Storage.Memory.Purge, false), "storage.memory.purge", "CORE_STORAGE_MEMORY_PURGE", nil, "Automatically remove the oldest files if /memfs is full", false, false)
// Storage (S3)
d.vars.Register(value.NewS3StorageListValue(&d.Storage.S3, []value.S3Storage{}, "|"), "storage.s3", "CORE_STORAGE_S3", nil, "List of S3 storage URLS", false, false)
// Storage (CORS)
d.vars.Register(value.NewCORSOrigins(&d.Storage.CORS.Origins, []string{"*"}, ","), "storage.cors.origins", "CORE_STORAGE_CORS_ORIGINS", nil, "Allowed CORS origins for /memfs and /data", false, false)
@ -215,7 +228,7 @@ func (d *Config) init() {
d.vars.Register(value.NewStringList(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false)
// FFmpeg
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg"), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg", d.fs), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false)
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false)
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false)
@ -232,6 +245,7 @@ func (d *Config) init() {
// Debug
d.vars.Register(value.NewBool(&d.Debug.Profiling, false), "debug.profiling", "CORE_DEBUG_PROFILING", nil, "Enable profiling endpoint on /profiling", false, false)
d.vars.Register(value.NewInt(&d.Debug.ForceGC, 0), "debug.force_gc", "CORE_DEBUG_FORCEGC", nil, "Number of seconds between forcing GC to return memory to the OS", false, false)
d.vars.Register(value.NewInt64(&d.Debug.MemoryLimit, 0), "debug.memory_limit_mbytes", "CORE_DEBUG_MEMORY_LIMIT_MBYTES", nil, "Impose a soft memory limit for the core, in megabytes", false, false)
// Metrics
d.vars.Register(value.NewBool(&d.Metrics.Enable, false), "metrics.enable", "CORE_METRICS_ENABLE", nil, "Enable collecting historic metrics data", false, false)
@ -256,7 +270,7 @@ func (d *Config) init() {
// Router
d.vars.Register(value.NewStringList(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false)
d.vars.Register(value.NewStringMapString(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false)
d.vars.Register(value.NewDir(&d.Router.UIPath, ""), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
d.vars.Register(value.NewDir(&d.Router.UIPath, "", d.fs), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
}
// Validate validates the current state of the Config for completeness and sanity. Errors are
@ -374,6 +388,21 @@ func (d *Config) Validate(resetLogs bool) {
}
}
if len(d.Storage.S3) != 0 {
names := map[string]struct{}{
"disk": {},
"mem": {},
}
for _, s3 := range d.Storage.S3 {
if _, ok := names[s3.Name]; ok {
d.vars.Log("error", "storage.s3", "the name %s is already in use or reserved", s3.Name)
}
names[s3.Name] = struct{}{}
}
}
// If playout is enabled, check that the port range is sane
if d.Playout.Enable {
if d.Playout.MinPort >= d.Playout.MaxPort {

View File

@ -1,13 +1,18 @@
package config
import (
"strings"
"testing"
"github.com/datarhei/core/v16/config/vars"
"github.com/datarhei/core/v16/io/fs"
"github.com/stretchr/testify/require"
)
func TestConfigCopy(t *testing.T) {
config1 := New()
fs, _ := fs.NewMemFilesystem(fs.MemConfig{})
config1 := New(fs)
config1.Version = 42
config1.DB.Dir = "foo"
@ -50,3 +55,30 @@ func TestConfigCopy(t *testing.T) {
require.Equal(t, []string{"bar.com"}, config1.Host.Name)
require.Equal(t, []string{"foo.com"}, config2.Host.Name)
}
func TestValidateDefault(t *testing.T) {
fs, err := fs.NewMemFilesystem(fs.MemConfig{})
require.NoError(t, err)
size, fresh, err := fs.WriteFileReader("./mime.types", strings.NewReader("xxxxx"))
require.Equal(t, int64(5), size)
require.Equal(t, true, fresh)
require.NoError(t, err)
_, _, err = fs.WriteFileReader("/bin/ffmpeg", strings.NewReader("xxxxx"))
require.NoError(t, err)
cfg := New(fs)
cfg.Validate(true)
errors := []string{}
cfg.Messages(func(level string, v vars.Variable, message string) {
if level == "error" {
errors = append(errors, message)
}
})
require.Equal(t, 0, len(cfg.Overrides()))
require.Equal(t, false, cfg.HasErrors(), errors)
}

View File

@ -6,14 +6,15 @@ import (
"github.com/datarhei/core/v16/config/copy"
v2 "github.com/datarhei/core/v16/config/v2"
"github.com/datarhei/core/v16/config/value"
"github.com/datarhei/core/v16/io/fs"
)
// Data is the actual configuration data for the app
type Data struct {
CreatedAt time.Time `json:"created_at"`
LoadedAt time.Time `json:"-"`
UpdatedAt time.Time `json:"-"`
Version int64 `json:"version" jsonschema:"minimum=3,maximum=3"`
CreatedAt time.Time `json:"created_at"` // When this config has been persisted
LoadedAt time.Time `json:"-"` // When this config has been actually used
UpdatedAt time.Time `json:"-"` // Irrelevant
Version int64 `json:"version" jsonschema:"minimum=3,maximum=3" format:"int64"`
ID string `json:"id"`
Name string `json:"name"`
Address string `json:"address"`
@ -21,7 +22,7 @@ type Data struct {
Log struct {
Level string `json:"level" enums:"debug,info,warn,error,silent" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=silent"`
Topics []string `json:"topics"`
MaxLines int `json:"max_lines"`
MaxLines int `json:"max_lines" format:"int"`
} `json:"log"`
DB struct {
Dir string `json:"dir"`
@ -67,12 +68,12 @@ type Data struct {
Storage struct {
Disk struct {
Dir string `json:"dir"`
Size int64 `json:"max_size_mbytes"`
Size int64 `json:"max_size_mbytes" format:"int64"`
Cache struct {
Enable bool `json:"enable"`
Size uint64 `json:"max_size_mbytes"`
TTL int64 `json:"ttl_seconds"`
FileSize uint64 `json:"max_file_size_mbytes"`
Size uint64 `json:"max_size_mbytes" format:"uint64"`
TTL int64 `json:"ttl_seconds" format:"int64"`
FileSize uint64 `json:"max_file_size_mbytes" format:"uint64"`
Types struct {
Allow []string `json:"allow"`
Block []string `json:"block"`
@ -85,9 +86,10 @@ type Data struct {
Username string `json:"username"`
Password string `json:"password"`
} `json:"auth"`
Size int64 `json:"max_size_mbytes"`
Size int64 `json:"max_size_mbytes" format:"int64"`
Purge bool `json:"purge"`
} `json:"memory"`
S3 []value.S3Storage `json:"s3"`
CORS struct {
Origins []string `json:"origins"`
} `json:"cors"`
@ -113,7 +115,7 @@ type Data struct {
} `json:"srt"`
FFmpeg struct {
Binary string `json:"binary"`
MaxProcesses int64 `json:"max_processes"`
MaxProcesses int64 `json:"max_processes" format:"int64"`
Access struct {
Input struct {
Allow []string `json:"allow"`
@ -125,33 +127,34 @@ type Data struct {
} `json:"output"`
} `json:"access"`
Log struct {
MaxLines int `json:"max_lines"`
MaxHistory int `json:"max_history"`
MaxLines int `json:"max_lines" format:"int"`
MaxHistory int `json:"max_history" format:"int"`
} `json:"log"`
} `json:"ffmpeg"`
Playout struct {
Enable bool `json:"enable"`
MinPort int `json:"min_port"`
MaxPort int `json:"max_port"`
MinPort int `json:"min_port" format:"int"`
MaxPort int `json:"max_port" format:"int"`
} `json:"playout"`
Debug struct {
Profiling bool `json:"profiling"`
ForceGC int `json:"force_gc"`
Profiling bool `json:"profiling"`
ForceGC int `json:"force_gc" format:"int"`
MemoryLimit int64 `json:"memory_limit_mbytes" format:"int64"`
} `json:"debug"`
Metrics struct {
Enable bool `json:"enable"`
EnablePrometheus bool `json:"enable_prometheus"`
Range int64 `json:"range_sec"` // seconds
Interval int64 `json:"interval_sec"` // seconds
Range int64 `json:"range_sec" format:"int64"` // seconds
Interval int64 `json:"interval_sec" format:"int64"` // seconds
} `json:"metrics"`
Sessions struct {
Enable bool `json:"enable"`
IPIgnoreList []string `json:"ip_ignorelist"`
SessionTimeout int `json:"session_timeout_sec"`
SessionTimeout int `json:"session_timeout_sec" format:"int"`
Persist bool `json:"persist"`
PersistInterval int `json:"persist_interval_sec"`
MaxBitrate uint64 `json:"max_bitrate_mbit"`
MaxSessions uint64 `json:"max_sessions"`
PersistInterval int `json:"persist_interval_sec" format:"int"`
MaxBitrate uint64 `json:"max_bitrate_mbit" format:"uint64"`
MaxSessions uint64 `json:"max_sessions" format:"uint64"`
} `json:"sessions"`
Service struct {
Enable bool `json:"enable"`
@ -165,8 +168,8 @@ type Data struct {
} `json:"router"`
}
func UpgradeV2ToV3(d *v2.Data) (*Data, error) {
cfg := New()
func UpgradeV2ToV3(d *v2.Data, fs fs.Filesystem) (*Data, error) {
cfg := New(fs)
return MergeV2toV3(&cfg.Data, d)
}
@ -189,7 +192,6 @@ func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) {
data.SRT = d.SRT
data.FFmpeg = d.FFmpeg
data.Playout = d.Playout
data.Debug = d.Debug
data.Metrics = d.Metrics
data.Sessions = d.Sessions
data.Service = d.Service
@ -228,6 +230,10 @@ func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) {
data.Storage.Memory = d.Storage.Memory
// Actual changes
data.Debug.Profiling = d.Debug.Profiling
data.Debug.ForceGC = d.Debug.ForceGC
data.Debug.MemoryLimit = 0
data.TLS.Enable = d.TLS.Enable
data.TLS.Address = d.TLS.Address
data.TLS.Auto = d.TLS.Auto
@ -242,6 +248,8 @@ func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) {
data.Storage.Disk.Cache.TTL = d.Storage.Disk.Cache.TTL
data.Storage.Disk.Cache.Types.Allow = copy.Slice(d.Storage.Disk.Cache.Types)
data.Storage.S3 = []value.S3Storage{}
data.Version = 3
return data, nil
@ -267,7 +275,6 @@ func DowngradeV3toV2(d *Data) (*v2.Data, error) {
data.SRT = d.SRT
data.FFmpeg = d.FFmpeg
data.Playout = d.Playout
data.Debug = d.Debug
data.Metrics = d.Metrics
data.Sessions = d.Sessions
data.Service = d.Service
@ -299,6 +306,9 @@ func DowngradeV3toV2(d *Data) (*v2.Data, error) {
data.Router.Routes = copy.StringMap(d.Router.Routes)
// Actual changes
data.Debug.Profiling = d.Debug.Profiling
data.Debug.ForceGC = d.Debug.ForceGC
data.TLS.Enable = d.TLS.Enable
data.TLS.Address = d.TLS.Address
data.TLS.Auto = d.TLS.Auto

36
config/data_test.go Normal file
View File

@ -0,0 +1,36 @@
package config
import (
"testing"
v2 "github.com/datarhei/core/v16/config/v2"
"github.com/datarhei/core/v16/io/fs"
"github.com/stretchr/testify/require"
)
func TestUpgrade(t *testing.T) {
fs, _ := fs.NewMemFilesystem(fs.MemConfig{})
v2cfg := v2.New(fs)
v2cfg.Storage.Disk.Cache.Types = []string{".foo", ".bar"}
v3cfg, err := UpgradeV2ToV3(&v2cfg.Data, fs)
require.NoError(t, err)
require.Equal(t, int64(3), v3cfg.Version)
require.ElementsMatch(t, []string{".foo", ".bar"}, v3cfg.Storage.Disk.Cache.Types.Allow)
require.ElementsMatch(t, []string{".m3u8", ".mpd"}, v3cfg.Storage.Disk.Cache.Types.Block)
}
func TestDowngrade(t *testing.T) {
fs, _ := fs.NewMemFilesystem(fs.MemConfig{})
v3cfg := New(fs)
v3cfg.Storage.Disk.Cache.Types.Allow = []string{".foo", ".bar"}
v2cfg, err := DowngradeV3toV2(&v3cfg.Data)
require.NoError(t, err)
require.Equal(t, int64(2), v2cfg.Version)
require.ElementsMatch(t, []string{".foo", ".bar"}, v2cfg.Storage.Disk.Cache.Types)
}

View File

@ -1,73 +0,0 @@
package store
import (
"fmt"
"github.com/datarhei/core/v16/config"
)
type dummyStore struct {
current *config.Config
active *config.Config
}
// NewDummyStore returns a store that returns the default config
func NewDummy() Store {
s := &dummyStore{}
cfg := config.New()
cfg.DB.Dir = "."
cfg.FFmpeg.Binary = "true"
cfg.Storage.Disk.Dir = "."
cfg.Storage.MimeTypes = ""
s.current = cfg
cfg = config.New()
cfg.DB.Dir = "."
cfg.FFmpeg.Binary = "true"
cfg.Storage.Disk.Dir = "."
cfg.Storage.MimeTypes = ""
s.active = cfg
return s
}
func (c *dummyStore) Get() *config.Config {
return c.current.Clone()
}
func (c *dummyStore) Set(d *config.Config) error {
d.Validate(true)
if d.HasErrors() {
return fmt.Errorf("configuration data has errors after validation")
}
c.current = d.Clone()
return nil
}
func (c *dummyStore) GetActive() *config.Config {
return c.active.Clone()
}
func (c *dummyStore) SetActive(d *config.Config) error {
d.Validate(true)
if d.HasErrors() {
return fmt.Errorf("configuration data has errors after validation")
}
c.active = d.Clone()
return nil
}
func (c *dummyStore) Reload() error {
return nil
}

View File

@ -5,16 +5,16 @@ import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/datarhei/core/v16/config"
v1 "github.com/datarhei/core/v16/config/v1"
v2 "github.com/datarhei/core/v16/config/v2"
"github.com/datarhei/core/v16/encoding/json"
"github.com/datarhei/core/v16/io/file"
"github.com/datarhei/core/v16/io/fs"
)
type jsonStore struct {
fs fs.Filesystem
path string
data map[string]*config.Config
@ -22,18 +22,32 @@ type jsonStore struct {
reloadFn func()
}
// NewJSONStore will read a JSON config file from the given path. After successfully reading it in, it will be written
// back to the path. The returned error will be nil if everything went fine.
// If the path doesn't exist, a default JSON config file will be written to that path.
// The returned ConfigStore can be used to retrieve or write the config.
func NewJSON(path string, reloadFn func()) (Store, error) {
// NewJSONStore will read the JSON config file from the given path. After successfully reading it in, it will be written
// back to the path. The returned error will be nil if everything went fine. If the path doesn't exist, a default JSON
// config file will be written to that path. The returned ConfigStore can be used to retrieve or write the config.
func NewJSON(f fs.Filesystem, path string, reloadFn func()) (Store, error) {
c := &jsonStore{
path: path,
fs: f,
data: make(map[string]*config.Config),
reloadFn: reloadFn,
}
c.data["base"] = config.New()
path, err := filepath.Abs(path)
if err != nil {
return nil, fmt.Errorf("failed to determine absolute path of '%s': %w", path, err)
}
c.path = path
if len(c.path) == 0 {
c.path = "/config.json"
}
if c.fs == nil {
return nil, fmt.Errorf("no valid filesystem provided")
}
c.data["base"] = config.New(f)
if err := c.load(c.data["base"]); err != nil {
return nil, fmt.Errorf("failed to read JSON from '%s': %w", path, err)
@ -57,14 +71,10 @@ func (c *jsonStore) Set(d *config.Config) error {
data := d.Clone()
data.CreatedAt = time.Now()
if err := c.store(data); err != nil {
return fmt.Errorf("failed to write JSON to '%s': %w", c.path, err)
}
data.UpdatedAt = time.Now()
c.data["base"] = data
return nil
@ -89,7 +99,9 @@ func (c *jsonStore) SetActive(d *config.Config) error {
return fmt.Errorf("configuration data has errors after validation")
}
c.data["merged"] = d.Clone()
data := d.Clone()
c.data["merged"] = data
return nil
}
@ -109,15 +121,19 @@ func (c *jsonStore) load(cfg *config.Config) error {
return nil
}
if _, err := os.Stat(c.path); os.IsNotExist(err) {
if _, err := c.fs.Stat(c.path); os.IsNotExist(err) {
return nil
}
jsondata, err := os.ReadFile(c.path)
jsondata, err := c.fs.ReadFile(c.path)
if err != nil {
return err
}
if len(jsondata) == 0 {
return nil
}
data, err := migrate(jsondata)
if err != nil {
return err
@ -125,15 +141,12 @@ func (c *jsonStore) load(cfg *config.Config) error {
cfg.Data = *data
cfg.LoadedAt = time.Now()
cfg.UpdatedAt = cfg.LoadedAt
cfg.UpdatedAt = cfg.CreatedAt
return nil
}
func (c *jsonStore) store(data *config.Config) error {
data.CreatedAt = time.Now()
if len(c.path) == 0 {
return nil
}
@ -143,28 +156,9 @@ func (c *jsonStore) store(data *config.Config) error {
return err
}
dir, filename := filepath.Split(c.path)
_, _, err = c.fs.WriteFileSafe(c.path, jsondata)
tmpfile, err := os.CreateTemp(dir, filename)
if err != nil {
return err
}
defer os.Remove(tmpfile.Name())
if _, err := tmpfile.Write(jsondata); err != nil {
return err
}
if err := tmpfile.Close(); err != nil {
return err
}
if err := file.Rename(tmpfile.Name(), c.path); err != nil {
return err
}
return nil
return err
}
func migrate(jsondata []byte) (*config.Data, error) {
@ -176,38 +170,38 @@ func migrate(jsondata []byte) (*config.Data, error) {
}
if version.Version == 1 {
dataV1 := &v1.New().Data
dataV1 := &v1.New(nil).Data
if err := gojson.Unmarshal(jsondata, dataV1); err != nil {
return nil, json.FormatError(jsondata, err)
}
dataV2, err := v2.UpgradeV1ToV2(dataV1)
dataV2, err := v2.UpgradeV1ToV2(dataV1, nil)
if err != nil {
return nil, err
}
dataV3, err := config.UpgradeV2ToV3(dataV2)
dataV3, err := config.UpgradeV2ToV3(dataV2, nil)
if err != nil {
return nil, err
}
data = dataV3
} else if version.Version == 2 {
dataV2 := &v2.New().Data
dataV2 := &v2.New(nil).Data
if err := gojson.Unmarshal(jsondata, dataV2); err != nil {
return nil, json.FormatError(jsondata, err)
}
dataV3, err := config.UpgradeV2ToV3(dataV2)
dataV3, err := config.UpgradeV2ToV3(dataV2, nil)
if err != nil {
return nil, err
}
data = dataV3
} else if version.Version == 3 {
dataV3 := &config.New().Data
dataV3 := &config.New(nil).Data
if err := gojson.Unmarshal(jsondata, dataV3); err != nil {
return nil, json.FormatError(jsondata, err)

View File

@ -18,7 +18,7 @@ func TestMigrationV1ToV3(t *testing.T) {
jsondatav3, err := os.ReadFile("./fixtures/config_v1_v3.json")
require.NoError(t, err)
datav3 := config.New()
datav3 := config.New(nil)
json.Unmarshal(jsondatav3, datav3)
data, err := migrate(jsondatav1)
@ -37,7 +37,7 @@ func TestMigrationV2ToV3(t *testing.T) {
jsondatav3, err := os.ReadFile("./fixtures/config_v2_v3.json")
require.NoError(t, err)
datav3 := config.New()
datav3 := config.New(nil)
json.Unmarshal(jsondatav3, datav3)
data, err := migrate(jsondatav2)

53
config/store/location.go Normal file
View File

@ -0,0 +1,53 @@
package store
import (
"os"
"path"
)
// Location returns the path to the config file. If no path is provided,
// different standard location will be probed:
// - os.UserConfigDir() + /datarhei-core/config.js
// - os.UserHomeDir() + /.config/datarhei-core/config.js
// - ./config/config.js
// If the config doesn't exist in none of these locations, it will be assumed
// at ./config/config.js
func Location(filepath string) string {
configfile := filepath
if len(configfile) != 0 {
return configfile
}
locations := []string{}
if dir, err := os.UserConfigDir(); err == nil {
locations = append(locations, dir+"/datarhei-core/config.js")
}
if dir, err := os.UserHomeDir(); err == nil {
locations = append(locations, dir+"/.config/datarhei-core/config.js")
}
locations = append(locations, "./config/config.js")
for _, path := range locations {
info, err := os.Stat(path)
if err != nil {
continue
}
if info.IsDir() {
continue
}
configfile = path
}
if len(configfile) == 0 {
configfile = "./config/config.js"
}
os.MkdirAll(path.Dir(configfile), 0740)
return configfile
}

View File

@ -8,6 +8,7 @@ import (
"github.com/datarhei/core/v16/config/copy"
"github.com/datarhei/core/v16/config/value"
"github.com/datarhei/core/v16/config/vars"
"github.com/datarhei/core/v16/io/fs"
"github.com/datarhei/core/v16/math/rand"
haikunator "github.com/atrox/haikunatorgo/v2"
@ -21,14 +22,21 @@ const version int64 = 1
// Config is a wrapper for Data
type Config struct {
fs fs.Filesystem
vars vars.Variables
Data
}
// New returns a Config which is initialized with its default values
func New() *Config {
cfg := &Config{}
func New(f fs.Filesystem) *Config {
cfg := &Config{
fs: f,
}
if cfg.fs == nil {
cfg.fs, _ = fs.NewMemFilesystem(fs.MemConfig{})
}
cfg.init()
@ -45,7 +53,7 @@ func (d *Config) Set(name, val string) error {
// NewConfigFrom returns a clone of a Config
func (d *Config) Clone() *Config {
data := New()
data := New(d.fs)
data.CreatedAt = d.CreatedAt
data.LoadedAt = d.LoadedAt
@ -118,7 +126,7 @@ func (d *Config) init() {
d.vars.Register(value.NewInt(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false)
// DB
d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config"), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config", d.fs), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
// Host
d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false)
@ -146,14 +154,14 @@ func (d *Config) init() {
d.vars.Register(value.NewAddress(&d.TLS.Address, ":8181"), "tls.address", "CORE_TLS_ADDRESS", nil, "HTTPS listening address", false, false)
d.vars.Register(value.NewBool(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false)
d.vars.Register(value.NewBool(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false)
d.vars.Register(value.NewFile(&d.TLS.CertFile, ""), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
d.vars.Register(value.NewFile(&d.TLS.KeyFile, ""), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
d.vars.Register(value.NewFile(&d.TLS.CertFile, "", d.fs), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
d.vars.Register(value.NewFile(&d.TLS.KeyFile, "", d.fs), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
// Storage
d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types"), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types", d.fs), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
// Storage (Disk)
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data"), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data", d.fs), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
d.vars.Register(value.NewInt64(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false)
d.vars.Register(value.NewBool(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false)
d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false)
@ -187,7 +195,7 @@ func (d *Config) init() {
d.vars.Register(value.NewStringList(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false)
// FFmpeg
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg"), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg", d.fs), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false)
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false)
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false)
@ -228,7 +236,7 @@ func (d *Config) init() {
// Router
d.vars.Register(value.NewStringList(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false)
d.vars.Register(value.NewStringMapString(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false)
d.vars.Register(value.NewDir(&d.Router.UIPath, ""), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
d.vars.Register(value.NewDir(&d.Router.UIPath, "", d.fs), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
}
// Validate validates the current state of the Config for completeness and sanity. Errors are

View File

@ -10,7 +10,7 @@ type Data struct {
CreatedAt time.Time `json:"created_at"`
LoadedAt time.Time `json:"-"`
UpdatedAt time.Time `json:"-"`
Version int64 `json:"version" jsonschema:"minimum=1,maximum=1"`
Version int64 `json:"version" jsonschema:"minimum=1,maximum=1" format:"int64"`
ID string `json:"id"`
Name string `json:"name"`
Address string `json:"address"`
@ -18,7 +18,7 @@ type Data struct {
Log struct {
Level string `json:"level" enums:"debug,info,warn,error,silent" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=silent"`
Topics []string `json:"topics"`
MaxLines int `json:"max_lines"`
MaxLines int `json:"max_lines" format:"int"`
} `json:"log"`
DB struct {
Dir string `json:"dir"`
@ -63,12 +63,12 @@ type Data struct {
Storage struct {
Disk struct {
Dir string `json:"dir"`
Size int64 `json:"max_size_mbytes"`
Size int64 `json:"max_size_mbytes" format:"int64"`
Cache struct {
Enable bool `json:"enable"`
Size uint64 `json:"max_size_mbytes"`
TTL int64 `json:"ttl_seconds"`
FileSize uint64 `json:"max_file_size_mbytes"`
Size uint64 `json:"max_size_mbytes" format:"uint64"`
TTL int64 `json:"ttl_seconds" format:"int64"`
FileSize uint64 `json:"max_file_size_mbytes" format:"uint64"`
Types []string `json:"types"`
} `json:"cache"`
} `json:"disk"`
@ -78,7 +78,7 @@ type Data struct {
Username string `json:"username"`
Password string `json:"password"`
} `json:"auth"`
Size int64 `json:"max_size_mbytes"`
Size int64 `json:"max_size_mbytes" format:"int64"`
Purge bool `json:"purge"`
} `json:"memory"`
CORS struct {
@ -105,7 +105,7 @@ type Data struct {
} `json:"srt"`
FFmpeg struct {
Binary string `json:"binary"`
MaxProcesses int64 `json:"max_processes"`
MaxProcesses int64 `json:"max_processes" format:"int64"`
Access struct {
Input struct {
Allow []string `json:"allow"`
@ -117,33 +117,33 @@ type Data struct {
} `json:"output"`
} `json:"access"`
Log struct {
MaxLines int `json:"max_lines"`
MaxHistory int `json:"max_history"`
MaxLines int `json:"max_lines" format:"int"`
MaxHistory int `json:"max_history" format:"int"`
} `json:"log"`
} `json:"ffmpeg"`
Playout struct {
Enable bool `json:"enable"`
MinPort int `json:"min_port"`
MaxPort int `json:"max_port"`
MinPort int `json:"min_port" format:"int"`
MaxPort int `json:"max_port" format:"int"`
} `json:"playout"`
Debug struct {
Profiling bool `json:"profiling"`
ForceGC int `json:"force_gc"`
ForceGC int `json:"force_gc" format:"int"`
} `json:"debug"`
Metrics struct {
Enable bool `json:"enable"`
EnablePrometheus bool `json:"enable_prometheus"`
Range int64 `json:"range_sec"` // seconds
Interval int64 `json:"interval_sec"` // seconds
Range int64 `json:"range_sec" format:"int64"` // seconds
Interval int64 `json:"interval_sec" format:"int64"` // seconds
} `json:"metrics"`
Sessions struct {
Enable bool `json:"enable"`
IPIgnoreList []string `json:"ip_ignorelist"`
SessionTimeout int `json:"session_timeout_sec"`
SessionTimeout int `json:"session_timeout_sec" format:"int"`
Persist bool `json:"persist"`
PersistInterval int `json:"persist_interval_sec"`
MaxBitrate uint64 `json:"max_bitrate_mbit"`
MaxSessions uint64 `json:"max_sessions"`
PersistInterval int `json:"persist_interval_sec" format:"int"`
MaxBitrate uint64 `json:"max_bitrate_mbit" format:"uint64"`
MaxSessions uint64 `json:"max_sessions" format:"uint64"`
} `json:"sessions"`
Service struct {
Enable bool `json:"enable"`

View File

@ -8,6 +8,7 @@ import (
"github.com/datarhei/core/v16/config/copy"
"github.com/datarhei/core/v16/config/value"
"github.com/datarhei/core/v16/config/vars"
"github.com/datarhei/core/v16/io/fs"
"github.com/datarhei/core/v16/math/rand"
haikunator "github.com/atrox/haikunatorgo/v2"
@ -21,14 +22,21 @@ const version int64 = 2
// Config is a wrapper for Data
type Config struct {
fs fs.Filesystem
vars vars.Variables
Data
}
// New returns a Config which is initialized with its default values
func New() *Config {
cfg := &Config{}
func New(f fs.Filesystem) *Config {
cfg := &Config{
fs: f,
}
if cfg.fs == nil {
cfg.fs, _ = fs.NewMemFilesystem(fs.MemConfig{})
}
cfg.init()
@ -45,7 +53,7 @@ func (d *Config) Set(name, val string) error {
// NewConfigFrom returns a clone of a Config
func (d *Config) Clone() *Config {
data := New()
data := New(d.fs)
data.CreatedAt = d.CreatedAt
data.LoadedAt = d.LoadedAt
@ -118,7 +126,7 @@ func (d *Config) init() {
d.vars.Register(value.NewInt(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false)
// DB
d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config"), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config", d.fs), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
// Host
d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false)
@ -146,14 +154,14 @@ func (d *Config) init() {
d.vars.Register(value.NewAddress(&d.TLS.Address, ":8181"), "tls.address", "CORE_TLS_ADDRESS", nil, "HTTPS listening address", false, false)
d.vars.Register(value.NewBool(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false)
d.vars.Register(value.NewBool(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false)
d.vars.Register(value.NewFile(&d.TLS.CertFile, ""), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
d.vars.Register(value.NewFile(&d.TLS.KeyFile, ""), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
d.vars.Register(value.NewFile(&d.TLS.CertFile, "", d.fs), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
d.vars.Register(value.NewFile(&d.TLS.KeyFile, "", d.fs), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
// Storage
d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types"), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types", d.fs), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
// Storage (Disk)
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data"), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data", d.fs), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
d.vars.Register(value.NewInt64(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false)
d.vars.Register(value.NewBool(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false)
d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false)
@ -188,7 +196,7 @@ func (d *Config) init() {
d.vars.Register(value.NewStringList(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false)
// FFmpeg
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg"), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg", d.fs), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false)
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false)
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false)
@ -229,7 +237,7 @@ func (d *Config) init() {
// Router
d.vars.Register(value.NewStringList(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false)
d.vars.Register(value.NewStringMapString(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false)
d.vars.Register(value.NewDir(&d.Router.UIPath, ""), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
d.vars.Register(value.NewDir(&d.Router.UIPath, "", d.fs), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
}
// Validate validates the current state of the Config for completeness and sanity. Errors are

View File

@ -10,13 +10,14 @@ import (
"github.com/datarhei/core/v16/config/copy"
v1 "github.com/datarhei/core/v16/config/v1"
"github.com/datarhei/core/v16/config/value"
"github.com/datarhei/core/v16/io/fs"
)
type Data struct {
CreatedAt time.Time `json:"created_at"`
LoadedAt time.Time `json:"-"`
UpdatedAt time.Time `json:"-"`
Version int64 `json:"version" jsonschema:"minimum=2,maximum=2"`
Version int64 `json:"version" jsonschema:"minimum=2,maximum=2" format:"int64"`
ID string `json:"id"`
Name string `json:"name"`
Address string `json:"address"`
@ -24,7 +25,7 @@ type Data struct {
Log struct {
Level string `json:"level" enums:"debug,info,warn,error,silent" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=silent"`
Topics []string `json:"topics"`
MaxLines int `json:"max_lines"`
MaxLines int `json:"max_lines" format:"int"`
} `json:"log"`
DB struct {
Dir string `json:"dir"`
@ -69,12 +70,12 @@ type Data struct {
Storage struct {
Disk struct {
Dir string `json:"dir"`
Size int64 `json:"max_size_mbytes"`
Size int64 `json:"max_size_mbytes" format:"int64"`
Cache struct {
Enable bool `json:"enable"`
Size uint64 `json:"max_size_mbytes"`
TTL int64 `json:"ttl_seconds"`
FileSize uint64 `json:"max_file_size_mbytes"`
Size uint64 `json:"max_size_mbytes" format:"uint64"`
TTL int64 `json:"ttl_seconds" format:"int64"`
FileSize uint64 `json:"max_file_size_mbytes" format:"uint64"`
Types []string `json:"types"`
} `json:"cache"`
} `json:"disk"`
@ -84,7 +85,7 @@ type Data struct {
Username string `json:"username"`
Password string `json:"password"`
} `json:"auth"`
Size int64 `json:"max_size_mbytes"`
Size int64 `json:"max_size_mbytes" format:"int64"`
Purge bool `json:"purge"`
} `json:"memory"`
CORS struct {
@ -112,7 +113,7 @@ type Data struct {
} `json:"srt"`
FFmpeg struct {
Binary string `json:"binary"`
MaxProcesses int64 `json:"max_processes"`
MaxProcesses int64 `json:"max_processes" format:"int64"`
Access struct {
Input struct {
Allow []string `json:"allow"`
@ -124,33 +125,33 @@ type Data struct {
} `json:"output"`
} `json:"access"`
Log struct {
MaxLines int `json:"max_lines"`
MaxHistory int `json:"max_history"`
MaxLines int `json:"max_lines" format:"int"`
MaxHistory int `json:"max_history" format:"int"`
} `json:"log"`
} `json:"ffmpeg"`
Playout struct {
Enable bool `json:"enable"`
MinPort int `json:"min_port"`
MaxPort int `json:"max_port"`
MinPort int `json:"min_port" format:"int"`
MaxPort int `json:"max_port" format:"int"`
} `json:"playout"`
Debug struct {
Profiling bool `json:"profiling"`
ForceGC int `json:"force_gc"`
ForceGC int `json:"force_gc" format:"int"`
} `json:"debug"`
Metrics struct {
Enable bool `json:"enable"`
EnablePrometheus bool `json:"enable_prometheus"`
Range int64 `json:"range_sec"` // seconds
Interval int64 `json:"interval_sec"` // seconds
Range int64 `json:"range_sec" format:"int64"` // seconds
Interval int64 `json:"interval_sec" format:"int64"` // seconds
} `json:"metrics"`
Sessions struct {
Enable bool `json:"enable"`
IPIgnoreList []string `json:"ip_ignorelist"`
SessionTimeout int `json:"session_timeout_sec"`
SessionTimeout int `json:"session_timeout_sec" format:"int"`
Persist bool `json:"persist"`
PersistInterval int `json:"persist_interval_sec"`
MaxBitrate uint64 `json:"max_bitrate_mbit"`
MaxSessions uint64 `json:"max_sessions"`
PersistInterval int `json:"persist_interval_sec" format:"int"`
MaxBitrate uint64 `json:"max_bitrate_mbit" format:"uint64"`
MaxSessions uint64 `json:"max_sessions" format:"uint64"`
} `json:"sessions"`
Service struct {
Enable bool `json:"enable"`
@ -164,8 +165,8 @@ type Data struct {
} `json:"router"`
}
func UpgradeV1ToV2(d *v1.Data) (*Data, error) {
cfg := New()
func UpgradeV1ToV2(d *v1.Data, fs fs.Filesystem) (*Data, error) {
cfg := New(fs)
return MergeV1ToV2(&cfg.Data, d)
}

View File

@ -4,6 +4,7 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
"net/url"
"strings"
)
@ -16,6 +17,28 @@ type Auth0Tenant struct {
Users []string `json:"users"`
}
func (a *Auth0Tenant) String() string {
u := url.URL{
Scheme: "auth0",
Host: a.Domain,
}
if len(a.ClientID) != 0 {
u.User = url.User(a.ClientID)
}
q := url.Values{}
q.Set("aud", a.Audience)
for _, user := range a.Users {
q.Add("user", user)
}
u.RawQuery = q.Encode()
return u.String()
}
type TenantList struct {
p *[]Auth0Tenant
separator string
@ -32,18 +55,34 @@ func NewTenantList(p *[]Auth0Tenant, val []Auth0Tenant, separator string) *Tenan
return v
}
// Set allows to set a tenant list in two formats:
// - a separator separated list of bas64 encoded Auth0Tenant JSON objects
// - a separator separated list of Auth0Tenant in URL representation: auth0://[clientid]@[domain]?aud=[audience]&user=...&user=...
func (s *TenantList) Set(val string) error {
list := []Auth0Tenant{}
for i, elm := range strings.Split(val, s.separator) {
data, err := base64.StdEncoding.DecodeString(elm)
if err != nil {
return fmt.Errorf("invalid base64 encoding of tenant %d: %w", i, err)
}
t := Auth0Tenant{}
if err := json.Unmarshal(data, &t); err != nil {
return fmt.Errorf("invalid JSON in tenant %d: %w", i, err)
if strings.HasPrefix(elm, "auth0://") {
data, err := url.Parse(elm)
if err != nil {
return fmt.Errorf("invalid url encoding of tenant %d: %w", i, err)
}
t.Domain = data.Host
t.ClientID = data.User.Username()
t.Audience = data.Query().Get("aud")
t.Users = data.Query()["user"]
} else {
data, err := base64.StdEncoding.DecodeString(elm)
if err != nil {
return fmt.Errorf("invalid base64 encoding of tenant %d: %w", i, err)
}
if err := json.Unmarshal(data, &t); err != nil {
return fmt.Errorf("invalid JSON in tenant %d: %w", i, err)
}
}
list = append(list, t)
@ -62,10 +101,10 @@ func (s *TenantList) String() string {
list := []string{}
for _, t := range *s.p {
list = append(list, fmt.Sprintf("%s (%d users)", t.Domain, len(t.Users)))
list = append(list, t.String())
}
return strings.Join(list, ",")
return strings.Join(list, s.separator)
}
func (s *TenantList) Validate() error {

View File

@ -0,0 +1,43 @@
package value
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestAuth0Value(t *testing.T) {
tenants := []Auth0Tenant{}
v := NewTenantList(&tenants, nil, " ")
require.Equal(t, "(empty)", v.String())
v.Set("auth0://clientid@domain?aud=audience&user=user1&user=user2 auth0://domain2?aud=audience2&user=user3")
require.Equal(t, []Auth0Tenant{
{
Domain: "domain",
ClientID: "clientid",
Audience: "audience",
Users: []string{"user1", "user2"},
},
{
Domain: "domain2",
Audience: "audience2",
Users: []string{"user3"},
},
}, tenants)
require.Equal(t, "auth0://clientid@domain?aud=audience&user=user1&user=user2 auth0://domain2?aud=audience2&user=user3", v.String())
require.NoError(t, v.Validate())
v.Set("eyJkb21haW4iOiJkYXRhcmhlaS5ldS5hdXRoMC5jb20iLCJhdWRpZW5jZSI6Imh0dHBzOi8vZGF0YXJoZWkuY29tL2NvcmUiLCJ1c2VycyI6WyJhdXRoMHx4eHgiXX0=")
require.Equal(t, []Auth0Tenant{
{
Domain: "datarhei.eu.auth0.com",
ClientID: "",
Audience: "https://datarhei.com/core",
Users: []string{"auth0|xxx"},
},
}, tenants)
require.Equal(t, "auth0://datarhei.eu.auth0.com?aud=https%3A%2F%2Fdatarhei.com%2Fcore&user=auth0%7Cxxx", v.String())
require.NoError(t, v.Validate())
}

View File

@ -0,0 +1,127 @@
package value
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestAddressValue(t *testing.T) {
var x string
val := NewAddress(&x, ":8080")
require.Equal(t, ":8080", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
x = "foobaz:9090"
require.Equal(t, "foobaz:9090", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
val.Set("fooboz:7070")
require.Equal(t, "fooboz:7070", x)
}
func TestCIDRListValue(t *testing.T) {
var x []string
val := NewCIDRList(&x, []string{}, " ")
require.Equal(t, "(empty)", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, true, val.IsEmpty())
x = []string{"127.0.0.1/32", "127.0.0.2/32"}
require.Equal(t, "127.0.0.1/32 127.0.0.2/32", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
val.Set("129.0.0.1/32 129.0.0.2/32")
require.Equal(t, []string{"129.0.0.1/32", "129.0.0.2/32"}, x)
}
func TestCORSOriginaValue(t *testing.T) {
var x []string
val := NewCORSOrigins(&x, []string{}, " ")
require.Equal(t, "(empty)", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, true, val.IsEmpty())
x = []string{"*"}
require.Equal(t, "*", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
val.Set("http://localhost")
require.Equal(t, []string{"http://localhost"}, x)
}
func TestPortValue(t *testing.T) {
var x int
val := NewPort(&x, 11)
require.Equal(t, "11", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
x = 42
require.Equal(t, "42", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
val.Set("77")
require.Equal(t, int(77), x)
}
func TestURLValue(t *testing.T) {
var x string
val := NewURL(&x, "http://localhost/foobar")
require.Equal(t, "http://localhost/foobar", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
x = "http://localhost:8080/foobar"
require.Equal(t, "http://localhost:8080/foobar", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
val.Set("http://localhost:8080/fooboz/foobaz")
require.Equal(t, "http://localhost:8080/fooboz/foobaz", x)
}
func TestEmailValue(t *testing.T) {
var x string
val := NewEmail(&x, "foobar@example.com")
require.Equal(t, "foobar@example.com", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
x = "foobar+baz@example.com"
require.Equal(t, "foobar+baz@example.com", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
val.Set("foobar@sub.example.com")
require.Equal(t, "foobar@sub.example.com", x)
}

View File

@ -2,39 +2,51 @@ package value
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/datarhei/core/v16/io/fs"
)
// must directory
type MustDir string
type MustDir struct {
p *string
fs fs.Filesystem
}
func NewMustDir(p *string, val string, fs fs.Filesystem) *MustDir {
v := &MustDir{
p: p,
fs: fs,
}
func NewMustDir(p *string, val string) *MustDir {
*p = val
return (*MustDir)(p)
return v
}
func (u *MustDir) Set(val string) error {
*u = MustDir(val)
*u.p = val
return nil
}
func (u *MustDir) String() string {
return string(*u)
return *u.p
}
func (u *MustDir) Validate() error {
val := string(*u)
val := *u.p
if len(strings.TrimSpace(val)) == 0 {
return fmt.Errorf("path name must not be empty")
}
finfo, err := os.Stat(val)
if err := u.fs.MkdirAll(val, 0750); err != nil {
return fmt.Errorf("%s can't be created (%w)", val, err)
}
finfo, err := u.fs.Stat(val)
if err != nil {
return fmt.Errorf("%s does not exist", val)
}
@ -47,36 +59,44 @@ func (u *MustDir) Validate() error {
}
func (u *MustDir) IsEmpty() bool {
return len(string(*u)) == 0
return len(*u.p) == 0
}
// directory
type Dir string
type Dir struct {
p *string
fs fs.Filesystem
}
func NewDir(p *string, val string, fs fs.Filesystem) *Dir {
v := &Dir{
p: p,
fs: fs,
}
func NewDir(p *string, val string) *Dir {
*p = val
return (*Dir)(p)
return v
}
func (u *Dir) Set(val string) error {
*u = Dir(val)
*u.p = val
return nil
}
func (u *Dir) String() string {
return string(*u)
return *u.p
}
func (u *Dir) Validate() error {
val := string(*u)
val := *u.p
if len(strings.TrimSpace(val)) == 0 {
return nil
}
finfo, err := os.Stat(val)
finfo, err := u.fs.Stat(val)
if err != nil {
return fmt.Errorf("%s does not exist", val)
}
@ -89,32 +109,40 @@ func (u *Dir) Validate() error {
}
func (u *Dir) IsEmpty() bool {
return len(string(*u)) == 0
return len(*u.p) == 0
}
// executable
type Exec string
type Exec struct {
p *string
fs fs.Filesystem
}
func NewExec(p *string, val string, fs fs.Filesystem) *Exec {
v := &Exec{
p: p,
fs: fs,
}
func NewExec(p *string, val string) *Exec {
*p = val
return (*Exec)(p)
return v
}
func (u *Exec) Set(val string) error {
*u = Exec(val)
*u.p = val
return nil
}
func (u *Exec) String() string {
return string(*u)
return *u.p
}
func (u *Exec) Validate() error {
val := string(*u)
val := *u.p
_, err := exec.LookPath(val)
_, err := u.fs.LookPath(val)
if err != nil {
return fmt.Errorf("%s not found or is not executable", val)
}
@ -123,36 +151,44 @@ func (u *Exec) Validate() error {
}
func (u *Exec) IsEmpty() bool {
return len(string(*u)) == 0
return len(*u.p) == 0
}
// regular file
type File string
type File struct {
p *string
fs fs.Filesystem
}
func NewFile(p *string, val string, fs fs.Filesystem) *File {
v := &File{
p: p,
fs: fs,
}
func NewFile(p *string, val string) *File {
*p = val
return (*File)(p)
return v
}
func (u *File) Set(val string) error {
*u = File(val)
*u.p = val
return nil
}
func (u *File) String() string {
return string(*u)
return *u.p
}
func (u *File) Validate() error {
val := string(*u)
val := *u.p
if len(val) == 0 {
return nil
}
finfo, err := os.Stat(val)
finfo, err := u.fs.Stat(val)
if err != nil {
return fmt.Errorf("%s does not exist", val)
}
@ -165,7 +201,7 @@ func (u *File) Validate() error {
}
func (u *File) IsEmpty() bool {
return len(string(*u)) == 0
return len(*u.p) == 0
}
// absolute path

142
config/value/os_test.go Normal file
View File

@ -0,0 +1,142 @@
package value
import (
"testing"
"github.com/datarhei/core/v16/io/fs"
"github.com/stretchr/testify/require"
)
func TestMustDirValue(t *testing.T) {
memfs, err := fs.NewMemFilesystem(fs.MemConfig{})
require.NoError(t, err)
_, err = memfs.Stat("/foobar")
require.Error(t, err)
var x string
val := NewMustDir(&x, "./foobar", memfs)
require.Equal(t, "./foobar", val.String())
require.NoError(t, val.Validate())
require.Equal(t, false, val.IsEmpty())
info, err := memfs.Stat("/foobar")
require.NoError(t, err)
require.True(t, info.IsDir())
x = "/bar/foo"
require.Equal(t, "/bar/foo", val.String())
_, err = memfs.Stat("/bar/foo")
require.Error(t, err)
require.NoError(t, val.Validate())
info, err = memfs.Stat("/bar/foo")
require.NoError(t, err)
require.True(t, info.IsDir())
memfs.WriteFile("/foo/bar", []byte("hello"))
val.Set("/foo/bar")
require.Error(t, val.Validate())
}
func TestDirValue(t *testing.T) {
memfs, err := fs.NewMemFilesystem(fs.MemConfig{})
require.NoError(t, err)
var x string
val := NewDir(&x, "/foobar", memfs)
require.Equal(t, "/foobar", val.String())
require.Error(t, val.Validate())
require.Equal(t, false, val.IsEmpty())
err = memfs.MkdirAll("/foobar", 0755)
require.NoError(t, err)
require.NoError(t, val.Validate())
_, _, err = memfs.WriteFile("/foo/bar", []byte("hello"))
require.NoError(t, err)
val.Set("/foo/bar")
require.Error(t, val.Validate())
}
func TestFileValue(t *testing.T) {
memfs, err := fs.NewMemFilesystem(fs.MemConfig{})
require.NoError(t, err)
var x string
val := NewFile(&x, "/foobar", memfs)
require.Equal(t, "/foobar", val.String())
require.Error(t, val.Validate())
require.Equal(t, false, val.IsEmpty())
_, _, err = memfs.WriteFile("/foobar", []byte("hello"))
require.NoError(t, err)
require.NoError(t, val.Validate())
err = memfs.MkdirAll("/foo/bar", 0755)
require.NoError(t, err)
val.Set("/foo/bar")
require.Error(t, val.Validate())
}
func TestExecValue(t *testing.T) {
memfs, err := fs.NewMemFilesystem(fs.MemConfig{})
require.NoError(t, err)
var x string
val := NewExec(&x, "/foobar", memfs)
require.Equal(t, "/foobar", val.String())
require.Error(t, val.Validate())
require.Equal(t, false, val.IsEmpty())
_, _, err = memfs.WriteFile("/foobar", []byte("hello"))
require.NoError(t, err)
require.NoError(t, val.Validate())
err = memfs.MkdirAll("/foo/bar", 0755)
require.NoError(t, err)
val.Set("/foo/bar")
require.Error(t, val.Validate())
}
func TestAbsolutePathValue(t *testing.T) {
var x string
val := NewAbsolutePath(&x, "foobar")
require.Equal(t, "foobar", val.String())
require.Error(t, val.Validate())
require.Equal(t, false, val.IsEmpty())
x = "/foobaz"
require.Equal(t, "/foobaz", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
val.Set("/fooboz")
require.Equal(t, "/fooboz", x)
}

View File

@ -1,6 +1,7 @@
package value
import (
"sort"
"strconv"
"strings"
)
@ -127,11 +128,20 @@ func (s *StringMapString) String() string {
return "(empty)"
}
sms := *s.p
keys := []string{}
for k := range sms {
keys = append(keys, k)
}
sort.Strings(keys)
mappings := make([]string, len(*s.p))
i := 0
for k, v := range *s.p {
mappings[i] = k + ":" + v
for _, k := range keys {
mappings[i] = k + ":" + sms[k]
i++
}

View File

@ -0,0 +1,147 @@
package value
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestStringValue(t *testing.T) {
var x string
val := NewString(&x, "foobar")
require.Equal(t, "foobar", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
x = "foobaz"
require.Equal(t, "foobaz", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
val.Set("fooboz")
require.Equal(t, "fooboz", x)
}
func TestStringListValue(t *testing.T) {
var x []string
val := NewStringList(&x, []string{"foobar"}, " ")
require.Equal(t, "foobar", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
x = []string{"foobar", "foobaz"}
require.Equal(t, "foobar foobaz", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
val.Set("fooboz foobar")
require.Equal(t, []string{"fooboz", "foobar"}, x)
}
func TestStringMapStringValue(t *testing.T) {
var x map[string]string
val := NewStringMapString(&x, map[string]string{"a": "foobar"})
require.Equal(t, "a:foobar", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
x = map[string]string{"a": "foobar", "b": "foobaz"}
require.Equal(t, "a:foobar b:foobaz", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
val.Set("x:fooboz y:foobar")
require.Equal(t, map[string]string{"x": "fooboz", "y": "foobar"}, x)
}
func TestBoolValue(t *testing.T) {
var x bool
val := NewBool(&x, false)
require.Equal(t, "false", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, true, val.IsEmpty())
x = true
require.Equal(t, "true", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
val.Set("false")
require.Equal(t, false, x)
}
func TestIntValue(t *testing.T) {
var x int
val := NewInt(&x, 11)
require.Equal(t, "11", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
x = 42
require.Equal(t, "42", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
val.Set("77")
require.Equal(t, int(77), x)
}
func TestInt64Value(t *testing.T) {
var x int64
val := NewInt64(&x, 11)
require.Equal(t, "11", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
x = 42
require.Equal(t, "42", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
val.Set("77")
require.Equal(t, int64(77), x)
}
func TestUint64Value(t *testing.T) {
var x uint64
val := NewUint64(&x, 11)
require.Equal(t, "11", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
x = 42
require.Equal(t, "42", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
val.Set("77")
require.Equal(t, uint64(77), x)
}

179
config/value/s3.go Normal file
View File

@ -0,0 +1,179 @@
package value
import (
"fmt"
"net/url"
"strings"
"golang.org/x/net/publicsuffix"
)
// array of s3 storages
// https://access_key_id:secret_access_id@region.endpoint/bucket?name=aaa&mount=/abc&username=xxx&password=yyy
type S3Storage struct {
Name string `json:"name"`
Mountpoint string `json:"mountpoint"`
Auth struct {
Enable bool `json:"enable"`
Username string `json:"username"`
Password string `json:"password"`
} `json:"auth"`
Endpoint string `json:"endpoint"`
AccessKeyID string `json:"access_key_id"`
SecretAccessKey string `json:"secret_access_key"`
Bucket string `json:"bucket"`
Region string `json:"region"`
UseSSL bool `json:"use_ssl"`
}
func (t *S3Storage) String() string {
u := url.URL{}
if t.UseSSL {
u.Scheme = "https"
} else {
u.Scheme = "http"
}
u.User = url.UserPassword(t.AccessKeyID, "---")
u.Host = t.Endpoint
if len(t.Region) != 0 {
u.Host = t.Region + "." + u.Host
}
if len(t.Bucket) != 0 {
u.Path = "/" + t.Bucket
}
v := url.Values{}
v.Set("name", t.Name)
v.Set("mountpoint", t.Mountpoint)
if t.Auth.Enable {
if len(t.Auth.Username) != 0 {
v.Set("username", t.Auth.Username)
}
if len(t.Auth.Password) != 0 {
v.Set("password", "---")
}
}
u.RawQuery = v.Encode()
return u.String()
}
type s3StorageListValue struct {
p *[]S3Storage
separator string
}
func NewS3StorageListValue(p *[]S3Storage, val []S3Storage, separator string) *s3StorageListValue {
v := &s3StorageListValue{
p: p,
separator: separator,
}
*p = val
return v
}
func (s *s3StorageListValue) Set(val string) error {
list := []S3Storage{}
for _, elm := range strings.Split(val, s.separator) {
u, err := url.Parse(elm)
if err != nil {
return fmt.Errorf("invalid S3 storage URL (%s): %w", elm, err)
}
t := S3Storage{
Name: u.Query().Get("name"),
Mountpoint: u.Query().Get("mountpoint"),
AccessKeyID: u.User.Username(),
}
hostname := u.Hostname()
port := u.Port()
domain, err := publicsuffix.EffectiveTLDPlusOne(hostname)
if err != nil {
return fmt.Errorf("invalid eTLD (%s): %w", hostname, err)
}
t.Endpoint = domain
if len(port) != 0 {
t.Endpoint += ":" + port
}
region := strings.TrimSuffix(hostname, domain)
if len(region) != 0 {
t.Region = strings.TrimSuffix(region, ".")
}
secret, ok := u.User.Password()
if ok {
t.SecretAccessKey = secret
}
t.Bucket = strings.TrimPrefix(u.Path, "/")
if u.Scheme == "https" {
t.UseSSL = true
}
if u.Query().Has("username") || u.Query().Has("password") {
t.Auth.Enable = true
t.Auth.Username = u.Query().Get("username")
t.Auth.Username = u.Query().Get("password")
}
list = append(list, t)
}
*s.p = list
return nil
}
func (s *s3StorageListValue) String() string {
if s.IsEmpty() {
return "(empty)"
}
list := []string{}
for _, t := range *s.p {
list = append(list, t.String())
}
return strings.Join(list, s.separator)
}
func (s *s3StorageListValue) Validate() error {
for i, t := range *s.p {
if len(t.Name) == 0 {
return fmt.Errorf("the name for s3 storage %d is missing", i)
}
if len(t.Mountpoint) == 0 {
return fmt.Errorf("the mountpoint for s3 storage %d is missing", i)
}
if t.Auth.Enable {
if len(t.Auth.Username) == 0 && len(t.Auth.Password) == 0 {
return fmt.Errorf("auth is enabled, but no username and password are set for s3 storage %d", i)
}
}
}
return nil
}
func (s *s3StorageListValue) IsEmpty() bool {
return len(*s.p) == 0
}

30
config/value/time_test.go Normal file
View File

@ -0,0 +1,30 @@
package value
import (
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestTimeValue(t *testing.T) {
var x time.Time
tm := time.Unix(1257894000, 0).UTC()
val := NewTime(&x, tm)
require.Equal(t, "2009-11-10T23:00:00Z", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
x = time.Unix(1257894001, 0).UTC()
require.Equal(t, "2009-11-10T23:00:01Z", val.String())
require.Equal(t, nil, val.Validate())
require.Equal(t, false, val.IsEmpty())
val.Set("2009-11-11T23:00:00Z")
require.Equal(t, time.Time(time.Date(2009, time.November, 11, 23, 0, 0, 0, time.UTC)), x)
}

View File

@ -3,29 +3,9 @@ package value
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestIntValue(t *testing.T) {
var i int
ivar := NewInt(&i, 11)
assert.Equal(t, "11", ivar.String())
assert.Equal(t, nil, ivar.Validate())
assert.Equal(t, false, ivar.IsEmpty())
i = 42
assert.Equal(t, "42", ivar.String())
assert.Equal(t, nil, ivar.Validate())
assert.Equal(t, false, ivar.IsEmpty())
ivar.Set("77")
assert.Equal(t, int(77), i)
}
type testdata struct {
value1 int
value2 int
@ -37,22 +17,22 @@ func TestCopyStruct(t *testing.T) {
NewInt(&data1.value1, 1)
NewInt(&data1.value2, 2)
assert.Equal(t, int(1), data1.value1)
assert.Equal(t, int(2), data1.value2)
require.Equal(t, int(1), data1.value1)
require.Equal(t, int(2), data1.value2)
data2 := testdata{}
val21 := NewInt(&data2.value1, 3)
val22 := NewInt(&data2.value2, 4)
assert.Equal(t, int(3), data2.value1)
assert.Equal(t, int(4), data2.value2)
require.Equal(t, int(3), data2.value1)
require.Equal(t, int(4), data2.value2)
data2 = data1
assert.Equal(t, int(1), data2.value1)
assert.Equal(t, int(2), data2.value2)
require.Equal(t, int(1), data2.value1)
require.Equal(t, int(2), data2.value2)
assert.Equal(t, "1", val21.String())
assert.Equal(t, "2", val22.String())
require.Equal(t, "1", val21.String())
require.Equal(t, "2", val22.String())
}

View File

@ -1,6 +1,7 @@
package vars
import (
"os"
"testing"
"github.com/datarhei/core/v16/config/value"
@ -38,3 +39,210 @@ func TestVars(t *testing.T) {
x, _ = v1.Get("string")
require.Equal(t, "foobar", x)
}
func TestSetDefault(t *testing.T) {
v := Variables{}
s := ""
v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false)
require.Equal(t, "foobar", s)
v.Set("string", "foobaz")
require.Equal(t, "foobaz", s)
v.SetDefault("strong")
require.Equal(t, "foobaz", s)
v.SetDefault("string")
require.Equal(t, "foobar", s)
}
func TestGet(t *testing.T) {
v := Variables{}
s := ""
v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false)
value, err := v.Get("string")
require.NoError(t, err)
require.Equal(t, "foobar", value)
value, err = v.Get("strong")
require.Error(t, err)
require.Equal(t, "", value)
}
func TestSet(t *testing.T) {
v := Variables{}
s := ""
v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false)
err := v.Set("string", "foobaz")
require.NoError(t, err)
require.Equal(t, "foobaz", s)
err = v.Set("strong", "fooboz")
require.Error(t, err)
require.Equal(t, "foobaz", s)
}
func TestLog(t *testing.T) {
v := Variables{}
s := ""
v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false)
v.Log("info", "string", "hello %s", "world")
require.Equal(t, 1, len(v.logs))
v.Log("info", "strong", "hello %s", "world")
require.Equal(t, 1, len(v.logs))
require.Equal(t, "hello world", v.logs[0].message)
require.Equal(t, "info", v.logs[0].level)
require.Equal(t, Variable{
Value: "foobar",
Name: "string",
EnvName: "",
Description: "a string",
Merged: false,
}, v.logs[0].variable)
v.ResetLogs()
require.Equal(t, 0, len(v.logs))
}
func TestMerge(t *testing.T) {
v := Variables{}
s := ""
os.Setenv("CORE_TEST_STRING", "foobaz")
v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRING", nil, "a string", false, false)
require.Equal(t, s, "foobar")
v.Merge()
require.Equal(t, s, "foobaz")
require.Equal(t, true, v.IsMerged("string"))
require.Equal(t, 0, len(v.logs))
os.Unsetenv("CORE_TEST_STRING")
}
func TestMergeAlt(t *testing.T) {
v := Variables{}
s := ""
os.Setenv("CORE_TEST_STRING", "foobaz")
v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRUNG", []string{"CORE_TEST_STRING"}, "a string", false, false)
require.Equal(t, s, "foobar")
v.Merge()
require.Equal(t, s, "foobaz")
require.Equal(t, true, v.IsMerged("string"))
require.Equal(t, 1, len(v.logs))
require.Contains(t, v.logs[0].message, "CORE_TEST_STRUNG")
require.Equal(t, "warn", v.logs[0].level)
os.Unsetenv("CORE_TEST_STRING")
}
func TestNoMerge(t *testing.T) {
v := Variables{}
s := ""
os.Setenv("CORE_TEST_STRONG", "foobaz")
v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRING", nil, "a string", false, false)
require.Equal(t, s, "foobar")
v.Merge()
require.Equal(t, s, "foobar")
require.Equal(t, false, v.IsMerged("string"))
os.Unsetenv("CORE_TEST_STRONG")
}
func TestValidate(t *testing.T) {
v := Variables{}
s1 := ""
s2 := ""
v.Register(value.NewString(&s1, ""), "string", "", nil, "a string", false, false)
v.Register(value.NewString(&s2, ""), "string", "", nil, "a string", true, false)
require.Equal(t, s1, "")
require.Equal(t, s2, "")
require.Equal(t, false, v.HasErrors())
v.Validate()
require.Equal(t, true, v.HasErrors())
ninfo := 0
nerror := 0
v.Messages(func(level string, v Variable, message string) {
if level == "info" {
ninfo++
} else if level == "error" {
nerror++
}
})
require.Equal(t, 2, ninfo)
require.Equal(t, 1, nerror)
}
func TestOverrides(t *testing.T) {
v := Variables{}
s := ""
os.Setenv("CORE_TEST_STRING", "foobaz")
v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRING", nil, "a string", false, false)
v.Merge()
overrides := v.Overrides()
require.ElementsMatch(t, []string{"string"}, overrides)
}
func TestDisquise(t *testing.T) {
v := Variables{}
s := ""
v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, true)
v.Log("info", "string", "hello %s", "world")
require.Equal(t, 1, len(v.logs))
require.Equal(t, "hello world", v.logs[0].message)
require.Equal(t, "info", v.logs[0].level)
require.Equal(t, Variable{
Value: "***",
Name: "string",
EnvName: "",
Description: "a string",
Merged: false,
}, v.logs[0].variable)
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,9 @@ import (
"github.com/gobwas/glob"
)
// Match returns whether the name matches the glob pattern, also considering
// one or several optionnal separator. An error is only returned if the pattern
// is invalid.
func Match(pattern, name string, separators ...rune) (bool, error) {
g, err := glob.Compile(pattern, separators...)
if err != nil {

58
go.mod
View File

@ -11,22 +11,25 @@ require (
github.com/datarhei/joy4 v0.0.0-20220914170649-23c70d207759
github.com/go-playground/validator/v10 v10.11.1
github.com/gobwas/glob v0.2.3
github.com/golang-jwt/jwt/v4 v4.4.2
github.com/golang-jwt/jwt/v4 v4.4.3
github.com/google/uuid v1.3.0
github.com/invopop/jsonschema v0.4.0
github.com/joho/godotenv v1.4.0
github.com/labstack/echo/v4 v4.9.1
github.com/lithammer/shortuuid/v4 v4.0.0
github.com/mattn/go-isatty v0.0.16
github.com/mattn/go-isatty v0.0.17
github.com/minio/minio-go/v7 v7.0.47
github.com/prep/average v0.0.0-20200506183628-d26c465f48c3
github.com/prometheus/client_golang v1.13.1
github.com/shirou/gopsutil/v3 v3.22.10
github.com/prometheus/client_golang v1.14.0
github.com/shirou/gopsutil/v3 v3.22.11
github.com/stretchr/testify v1.8.1
github.com/swaggo/echo-swagger v1.3.5
github.com/swaggo/swag v1.8.7
github.com/vektah/gqlparser/v2 v2.5.1
github.com/xeipuuv/gojsonschema v1.2.0
golang.org/x/mod v0.6.0
go.uber.org/zap v1.24.0
golang.org/x/mod v0.7.0
golang.org/x/net v0.7.0
)
require (
@ -34,13 +37,14 @@ require (
github.com/agnivade/levenshtein v1.1.1 // indirect
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.20.0 // indirect
github.com/go-openapi/spec v0.20.7 // indirect
github.com/go-openapi/spec v0.20.8 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-playground/locales v0.14.0 // indirect
github.com/go-playground/universal-translator v0.18.0 // indirect
@ -50,7 +54,9 @@ require (
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/iancoleman/orderedmap v0.2.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/klauspost/cpuid/v2 v2.1.2 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.15.15 // indirect
github.com/klauspost/cpuid/v2 v2.2.3 // indirect
github.com/labstack/gommon v0.4.0 // indirect
github.com/leodido/go-urn v1.2.1 // indirect
github.com/libdns/libdns v0.2.1 // indirect
@ -60,16 +66,24 @@ require (
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mholt/acmez v1.0.4 // indirect
github.com/miekg/dns v1.1.50 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/sha256-simd v1.0.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
github.com/rs/xid v1.4.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.5.0 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect
github.com/tklauser/numcpus v0.6.0 // indirect
github.com/urfave/cli/v2 v2.8.1 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.2.2 // indirect
@ -78,14 +92,14 @@ require (
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.8.0 // indirect
go.uber.org/zap v1.23.0 // indirect
golang.org/x/crypto v0.1.0 // indirect
golang.org/x/net v0.1.0 // indirect
golang.org/x/sys v0.1.0 // indirect
golang.org/x/text v0.4.0 // indirect
golang.org/x/time v0.1.0 // indirect
golang.org/x/tools v0.2.0 // indirect
go.uber.org/goleak v1.1.12 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.4.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

516
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -6,7 +6,7 @@ import (
type AVstreamIO struct {
State string `json:"state" enums:"running,idle" jsonschema:"enum=running,enum=idle"`
Packet uint64 `json:"packet"`
Packet uint64 `json:"packet" format:"uint64"`
Time uint64 `json:"time"`
Size uint64 `json:"size_kb"`
}
@ -25,11 +25,11 @@ func (i *AVstreamIO) Unmarshal(io *app.AVstreamIO) {
type AVstream struct {
Input AVstreamIO `json:"input"`
Output AVstreamIO `json:"output"`
Aqueue uint64 `json:"aqueue"`
Queue uint64 `json:"queue"`
Dup uint64 `json:"dup"`
Drop uint64 `json:"drop"`
Enc uint64 `json:"enc"`
Aqueue uint64 `json:"aqueue" format:"uint64"`
Queue uint64 `json:"queue" format:"uint64"`
Dup uint64 `json:"dup" format:"uint64"`
Drop uint64 `json:"drop" format:"uint64"`
Enc uint64 `json:"enc" format:"uint64"`
Looping bool `json:"looping"`
Duplicating bool `json:"duplicating"`
GOP string `json:"gop"`

View File

@ -8,7 +8,7 @@ import (
// Error represents an error response of the API
type Error struct {
Code int `json:"code" jsonschema:"required"`
Code int `json:"code" jsonschema:"required" format:"int"`
Message string `json:"message" jsonschema:""`
Details []string `json:"details" jsonschema:""`
}

View File

@ -3,6 +3,13 @@ package api
// FileInfo represents informatiion about a file on a filesystem
type FileInfo struct {
Name string `json:"name" jsonschema:"minLength=1"`
Size int64 `json:"size_bytes" jsonschema:"minimum=0"`
LastMod int64 `json:"last_modified" jsonschema:"minimum=0"`
Size int64 `json:"size_bytes" jsonschema:"minimum=0" format:"int64"`
LastMod int64 `json:"last_modified" jsonschema:"minimum=0" format:"int64"`
}
// FilesystemInfo represents information about a filesystem
type FilesystemInfo struct {
Name string `json:"name"`
Type string `json:"type"`
Mount string `json:"mount"`
}

View File

@ -19,8 +19,8 @@ type MetricsQueryMetric struct {
}
type MetricsQuery struct {
Timerange int64 `json:"timerange_sec"`
Interval int64 `json:"interval_sec"`
Timerange int64 `json:"timerange_sec" format:"int64"`
Interval int64 `json:"interval_sec" format:"int64"`
Metrics []MetricsQueryMetric `json:"metrics"`
}
@ -51,8 +51,8 @@ func (v MetricsResponseValue) MarshalJSON() ([]byte, error) {
}
type MetricsResponse struct {
Timerange int64 `json:"timerange_sec"`
Interval int64 `json:"interval_sec"`
Timerange int64 `json:"timerange_sec" format:"int64"`
Interval int64 `json:"interval_sec" format:"int64"`
Metrics []MetricsResponseMetric `json:"metrics"`
}

View File

@ -4,9 +4,9 @@ import "github.com/datarhei/core/v16/playout"
type PlayoutStatusIO struct {
State string `json:"state" enums:"running,idle" jsonschema:"enum=running,enum=idle"`
Packet uint64 `json:"packet"`
Time uint64 `json:"time"`
Size uint64 `json:"size_kb"`
Packet uint64 `json:"packet" format:"uint64"`
Time uint64 `json:"time" format:"uint64"`
Size uint64 `json:"size_kb" format:"uint64"`
}
func (i *PlayoutStatusIO) Unmarshal(io playout.StatusIO) {
@ -33,12 +33,12 @@ func (s *PlayoutStatusSwap) Unmarshal(swap playout.StatusSwap) {
type PlayoutStatus struct {
ID string `json:"id"`
Address string `json:"url"`
Stream uint64 `json:"stream"`
Queue uint64 `json:"queue"`
AQueue uint64 `json:"aqueue"`
Dup uint64 `json:"dup"`
Drop uint64 `json:"drop"`
Enc uint64 `json:"enc"`
Stream uint64 `json:"stream" format:"uint64"`
Queue uint64 `json:"queue" format:"uint64"`
AQueue uint64 `json:"aqueue" format:"uint64"`
Dup uint64 `json:"dup" format:"uint64"`
Drop uint64 `json:"drop" format:"uint64"`
Enc uint64 `json:"enc" format:"uint64"`
Looping bool `json:"looping"`
Duplicating bool `json:"duplicating"`
GOP string `json:"gop"`

View File

@ -11,8 +11,8 @@ type ProbeIO struct {
// common
Address string `json:"url"`
Format string `json:"format"`
Index uint64 `json:"index"`
Stream uint64 `json:"stream"`
Index uint64 `json:"index" format:"uint64"`
Stream uint64 `json:"stream" format:"uint64"`
Language string `json:"language"`
Type string `json:"type"`
Codec string `json:"codec"`
@ -23,13 +23,13 @@ type ProbeIO struct {
// video
FPS json.Number `json:"fps" swaggertype:"number" jsonschema:"type=number"`
Pixfmt string `json:"pix_fmt"`
Width uint64 `json:"width"`
Height uint64 `json:"height"`
Width uint64 `json:"width" format:"uint64"`
Height uint64 `json:"height" format:"uint64"`
// audio
Sampling uint64 `json:"sampling_hz"`
Sampling uint64 `json:"sampling_hz" format:"uint64"`
Layout string `json:"layout"`
Channels uint64 `json:"channels"`
Channels uint64 `json:"channels" format:"uint64"`
}
func (i *ProbeIO) Unmarshal(io *app.ProbeIO) {

View File

@ -13,7 +13,7 @@ type Process struct {
ID string `json:"id" jsonschema:"minLength=1"`
Type string `json:"type" jsonschema:"enum=ffmpeg"`
Reference string `json:"reference"`
CreatedAt int64 `json:"created_at" jsonschema:"minimum=0"`
CreatedAt int64 `json:"created_at" jsonschema:"minimum=0" format:"int64"`
Config *ProcessConfig `json:"config,omitempty"`
State *ProcessState `json:"state,omitempty"`
Report *ProcessReport `json:"report,omitempty"`
@ -30,15 +30,15 @@ type ProcessConfigIO struct {
type ProcessConfigIOCleanup struct {
Pattern string `json:"pattern" validate:"required"`
MaxFiles uint `json:"max_files"`
MaxFileAge uint `json:"max_file_age_seconds"`
MaxFiles uint `json:"max_files" format:"uint"`
MaxFileAge uint `json:"max_file_age_seconds" format:"uint"`
PurgeOnDelete bool `json:"purge_on_delete"`
}
type ProcessConfigLimits struct {
CPU float64 `json:"cpu_usage" jsonschema:"minimum=0,maximum=100"`
Memory uint64 `json:"memory_mbytes" jsonschema:"minimum=0"`
WaitFor uint64 `json:"waitfor_seconds" jsonschema:"minimum=0"`
Memory uint64 `json:"memory_mbytes" jsonschema:"minimum=0" format:"uint64"`
WaitFor uint64 `json:"waitfor_seconds" jsonschema:"minimum=0" format:"uint64"`
}
// ProcessConfig represents the configuration of an ffmpeg process
@ -50,9 +50,9 @@ type ProcessConfig struct {
Output []ProcessConfigIO `json:"output" validate:"required"`
Options []string `json:"options"`
Reconnect bool `json:"reconnect"`
ReconnectDelay uint64 `json:"reconnect_delay_seconds"`
ReconnectDelay uint64 `json:"reconnect_delay_seconds" format:"uint64"`
Autostart bool `json:"autostart"`
StaleTimeout uint64 `json:"stale_timeout_seconds"`
StaleTimeout uint64 `json:"stale_timeout_seconds" format:"uint64"`
Limits ProcessConfigLimits `json:"limits"`
}
@ -188,7 +188,7 @@ func (cfg *ProcessConfig) Unmarshal(c *app.Config) {
// ProcessReportHistoryEntry represents the logs of a run of a restream process
type ProcessReportHistoryEntry struct {
CreatedAt int64 `json:"created_at"`
CreatedAt int64 `json:"created_at" format:"int64"`
Prelude []string `json:"prelude"`
Log [][2]string `json:"log"`
}
@ -235,11 +235,11 @@ func (report *ProcessReport) Unmarshal(l *app.Log) {
type ProcessState struct {
Order string `json:"order" jsonschema:"enum=start,enum=stop"`
State string `json:"exec" jsonschema:"enum=finished,enum=starting,enum=running,enum=finishing,enum=killed,enum=failed"`
Runtime int64 `json:"runtime_seconds" jsonschema:"minimum=0"`
Reconnect int64 `json:"reconnect_seconds"`
Runtime int64 `json:"runtime_seconds" jsonschema:"minimum=0" format:"int64"`
Reconnect int64 `json:"reconnect_seconds" format:"int64"`
LastLog string `json:"last_logline"`
Progress *Progress `json:"progress"`
Memory uint64 `json:"memory_bytes"`
Memory uint64 `json:"memory_bytes" format:"uint64"`
CPU json.Number `json:"cpu_usage" swaggertype:"number" jsonschema:"type=number"`
Command []string `json:"command"`
}

View File

@ -13,29 +13,29 @@ type ProgressIO struct {
Address string `json:"address" jsonschema:"minLength=1"`
// General
Index uint64 `json:"index"`
Stream uint64 `json:"stream"`
Index uint64 `json:"index" format:"uint64"`
Stream uint64 `json:"stream" format:"uint64"`
Format string `json:"format"`
Type string `json:"type"`
Codec string `json:"codec"`
Coder string `json:"coder"`
Frame uint64 `json:"frame"`
Frame uint64 `json:"frame" format:"uint64"`
FPS json.Number `json:"fps" swaggertype:"number" jsonschema:"type=number"`
Packet uint64 `json:"packet"`
Packet uint64 `json:"packet" format:"uint64"`
PPS json.Number `json:"pps" swaggertype:"number" jsonschema:"type=number"`
Size uint64 `json:"size_kb"` // kbytes
Size uint64 `json:"size_kb" format:"uint64"` // kbytes
Bitrate json.Number `json:"bitrate_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s
// Video
Pixfmt string `json:"pix_fmt,omitempty"`
Quantizer json.Number `json:"q,omitempty" swaggertype:"number" jsonschema:"type=number"`
Width uint64 `json:"width,omitempty"`
Height uint64 `json:"height,omitempty"`
Width uint64 `json:"width,omitempty" format:"uint64"`
Height uint64 `json:"height,omitempty" format:"uint64"`
// Audio
Sampling uint64 `json:"sampling_hz,omitempty"`
Sampling uint64 `json:"sampling_hz,omitempty" format:"uint64"`
Layout string `json:"layout,omitempty"`
Channels uint64 `json:"channels,omitempty"`
Channels uint64 `json:"channels,omitempty" format:"uint64"`
// avstream
AVstream *AVstream `json:"avstream"`
@ -79,16 +79,16 @@ func (i *ProgressIO) Unmarshal(io *app.ProgressIO) {
type Progress struct {
Input []ProgressIO `json:"inputs"`
Output []ProgressIO `json:"outputs"`
Frame uint64 `json:"frame"`
Packet uint64 `json:"packet"`
Frame uint64 `json:"frame" format:"uint64"`
Packet uint64 `json:"packet" format:"uint64"`
FPS json.Number `json:"fps" swaggertype:"number" jsonschema:"type=number"`
Quantizer json.Number `json:"q" swaggertype:"number" jsonschema:"type=number"`
Size uint64 `json:"size_kb"` // kbytes
Size uint64 `json:"size_kb" format:"uint64"` // kbytes
Time json.Number `json:"time" swaggertype:"number" jsonschema:"type=number"`
Bitrate json.Number `json:"bitrate_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s
Speed json.Number `json:"speed" swaggertype:"number" jsonschema:"type=number"`
Drop uint64 `json:"drop"`
Dup uint64 `json:"dup"`
Drop uint64 `json:"drop" format:"uint64"`
Dup uint64 `json:"dup" format:"uint64"`
}
// Unmarshal converts a restreamer Progress to a Progress in API representation

View File

@ -8,9 +8,9 @@ import (
// SessionStats are the accumulated numbers for the session summary
type SessionStats struct {
TotalSessions uint64 `json:"sessions"`
TotalRxBytes uint64 `json:"traffic_rx_mb"`
TotalTxBytes uint64 `json:"traffic_tx_mb"`
TotalSessions uint64 `json:"sessions" format:"uint64"`
TotalRxBytes uint64 `json:"traffic_rx_mb" format:"uint64"`
TotalTxBytes uint64 `json:"traffic_tx_mb" format:"uint64"`
}
// SessionPeers is for the grouping by peers in the summary
@ -24,12 +24,12 @@ type SessionPeers struct {
type Session struct {
ID string `json:"id"`
Reference string `json:"reference"`
CreatedAt int64 `json:"created_at"`
CreatedAt int64 `json:"created_at" format:"int64"`
Location string `json:"local"`
Peer string `json:"remote"`
Extra string `json:"extra"`
RxBytes uint64 `json:"bytes_rx"`
TxBytes uint64 `json:"bytes_tx"`
RxBytes uint64 `json:"bytes_rx" format:"uint64"`
TxBytes uint64 `json:"bytes_tx" format:"uint64"`
RxBitrate json.Number `json:"bandwidth_rx_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s
TxBitrate json.Number `json:"bandwidth_tx_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s
}
@ -50,10 +50,10 @@ func (s *Session) Unmarshal(sess session.Session) {
// SessionSummaryActive represents the currently active sessions
type SessionSummaryActive struct {
SessionList []Session `json:"list"`
Sessions uint64 `json:"sessions"`
Sessions uint64 `json:"sessions" format:"uint64"`
RxBitrate json.Number `json:"bandwidth_rx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s
TxBitrate json.Number `json:"bandwidth_tx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s
MaxSessions uint64 `json:"max_sessions"`
MaxSessions uint64 `json:"max_sessions" format:"uint64"`
MaxRxBitrate json.Number `json:"max_bandwidth_rx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s
MaxTxBitrate json.Number `json:"max_bandwidth_tx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s
}

View File

@ -8,60 +8,60 @@ import (
// SRTStatistics represents the statistics of a SRT connection
type SRTStatistics struct {
MsTimeStamp uint64 `json:"timestamp_ms"` // The time elapsed, in milliseconds, since the SRT socket has been created
MsTimeStamp uint64 `json:"timestamp_ms" format:"uint64"` // The time elapsed, in milliseconds, since the SRT socket has been created
// Accumulated
PktSent uint64 `json:"sent_pkt"` // The total number of sent DATA packets, including retransmitted packets
PktRecv uint64 `json:"recv_pkt"` // The total number of received DATA packets, including retransmitted packets
PktSentUnique uint64 `json:"sent_unique_pkt"` // The total number of unique DATA packets sent by the SRT sender
PktRecvUnique uint64 `json:"recv_unique_pkt"` // The total number of unique original, retransmitted or recovered by the packet filter DATA packets received in time, decrypted without errors and, as a result, scheduled for delivery to the upstream application by the SRT receiver.
PktSndLoss uint64 `json:"send_loss_pkt"` // The total number of data packets considered or reported as lost at the sender side. Does not correspond to the packets detected as lost at the receiver side.
PktRcvLoss uint64 `json:"recv_loss_pkt"` // The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side
PktRetrans uint64 `json:"sent_retrans_pkt"` // The total number of retransmitted packets sent by the SRT sender
PktRcvRetrans uint64 `json:"recv_retran_pkts"` // The total number of retransmitted packets registered at the receiver side
PktSentACK uint64 `json:"sent_ack_pkt"` // The total number of sent ACK (Acknowledgement) control packets
PktRecvACK uint64 `json:"recv_ack_pkt"` // The total number of received ACK (Acknowledgement) control packets
PktSentNAK uint64 `json:"sent_nak_pkt"` // The total number of sent NAK (Negative Acknowledgement) control packets
PktRecvNAK uint64 `json:"recv_nak_pkt"` // The total number of received NAK (Negative Acknowledgement) control packets
PktSentKM uint64 `json:"send_km_pkt"` // The total number of sent KM (Key Material) control packets
PktRecvKM uint64 `json:"recv_km_pkt"` // The total number of received KM (Key Material) control packets
UsSndDuration uint64 `json:"send_duration_us"` // The total accumulated time in microseconds, during which the SRT sender has some data to transmit, including packets that have been sent, but not yet acknowledged
PktSndDrop uint64 `json:"send_drop_pkt"` // The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time
PktRcvDrop uint64 `json:"recv_drop_pkt"` // The total number of dropped by the SRT receiver and, as a result, not delivered to the upstream application DATA packets
PktRcvUndecrypt uint64 `json:"recv_undecrypt_pkt"` // The total number of packets that failed to be decrypted at the receiver side
PktSent uint64 `json:"sent_pkt" format:"uint64"` // The total number of sent DATA packets, including retransmitted packets
PktRecv uint64 `json:"recv_pkt" format:"uint64"` // The total number of received DATA packets, including retransmitted packets
PktSentUnique uint64 `json:"sent_unique_pkt" format:"uint64"` // The total number of unique DATA packets sent by the SRT sender
PktRecvUnique uint64 `json:"recv_unique_pkt" format:"uint64"` // The total number of unique original, retransmitted or recovered by the packet filter DATA packets received in time, decrypted without errors and, as a result, scheduled for delivery to the upstream application by the SRT receiver.
PktSndLoss uint64 `json:"send_loss_pkt" format:"uint64"` // The total number of data packets considered or reported as lost at the sender side. Does not correspond to the packets detected as lost at the receiver side.
PktRcvLoss uint64 `json:"recv_loss_pkt" format:"uint64"` // The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side
PktRetrans uint64 `json:"sent_retrans_pkt" format:"uint64"` // The total number of retransmitted packets sent by the SRT sender
PktRcvRetrans uint64 `json:"recv_retran_pkts" format:"uint64"` // The total number of retransmitted packets registered at the receiver side
PktSentACK uint64 `json:"sent_ack_pkt" format:"uint64"` // The total number of sent ACK (Acknowledgement) control packets
PktRecvACK uint64 `json:"recv_ack_pkt" format:"uint64"` // The total number of received ACK (Acknowledgement) control packets
PktSentNAK uint64 `json:"sent_nak_pkt" format:"uint64"` // The total number of sent NAK (Negative Acknowledgement) control packets
PktRecvNAK uint64 `json:"recv_nak_pkt" format:"uint64"` // The total number of received NAK (Negative Acknowledgement) control packets
PktSentKM uint64 `json:"send_km_pkt" format:"uint64"` // The total number of sent KM (Key Material) control packets
PktRecvKM uint64 `json:"recv_km_pkt" format:"uint64"` // The total number of received KM (Key Material) control packets
UsSndDuration uint64 `json:"send_duration_us" format:"uint64"` // The total accumulated time in microseconds, during which the SRT sender has some data to transmit, including packets that have been sent, but not yet acknowledged
PktSndDrop uint64 `json:"send_drop_pkt" format:"uint64"` // The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time
PktRcvDrop uint64 `json:"recv_drop_pkt" format:"uint64"` // The total number of dropped by the SRT receiver and, as a result, not delivered to the upstream application DATA packets
PktRcvUndecrypt uint64 `json:"recv_undecrypt_pkt" format:"uint64"` // The total number of packets that failed to be decrypted at the receiver side
ByteSent uint64 `json:"sent_bytes"` // Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
ByteRecv uint64 `json:"recv_bytes"` // Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
ByteSentUnique uint64 `json:"sent_unique_bytes"` // Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
ByteRecvUnique uint64 `json:"recv_unique_bytes"` // Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
ByteRcvLoss uint64 `json:"recv_loss_bytes"` // Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size
ByteRetrans uint64 `json:"sent_retrans_bytes"` // Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
ByteSndDrop uint64 `json:"send_drop_bytes"` // Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
ByteRcvDrop uint64 `json:"recv_drop_bytes"` // Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
ByteRcvUndecrypt uint64 `json:"recv_undecrypt_bytes"` // Same as pktRcvUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
ByteSent uint64 `json:"sent_bytes" format:"uint64"` // Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
ByteRecv uint64 `json:"recv_bytes" format:"uint64"` // Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
ByteSentUnique uint64 `json:"sent_unique_bytes" format:"uint64"` // Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
ByteRecvUnique uint64 `json:"recv_unique_bytes" format:"uint64"` // Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
ByteRcvLoss uint64 `json:"recv_loss_bytes" format:"uint64"` // Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size
ByteRetrans uint64 `json:"sent_retrans_bytes" format:"uint64"` // Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
ByteSndDrop uint64 `json:"send_drop_bytes" format:"uint64"` // Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
ByteRcvDrop uint64 `json:"recv_drop_bytes" format:"uint64"` // Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
ByteRcvUndecrypt uint64 `json:"recv_undecrypt_bytes" format:"uint64"` // Same as pktRcvUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
// Instantaneous
UsPktSndPeriod float64 `json:"pkt_send_period_us"` // Current minimum time interval between which consecutive packets are sent, in microseconds
PktFlowWindow uint64 `json:"flow_window_pkt"` // The maximum number of packets that can be "in flight"
PktFlightSize uint64 `json:"flight_size_pkt"` // The number of packets in flight
MsRTT float64 `json:"rtt_ms"` // Smoothed round-trip time (SRTT), an exponentially-weighted moving average (EWMA) of an endpoint's RTT samples, in milliseconds
MbpsBandwidth float64 `json:"bandwidth_mbit"` // Estimated bandwidth of the network link, in Mbps
ByteAvailSndBuf uint64 `json:"avail_send_buf_bytes"` // The available space in the sender's buffer, in bytes
ByteAvailRcvBuf uint64 `json:"avail_recv_buf_bytes"` // The available space in the receiver's buffer, in bytes
MbpsMaxBW float64 `json:"max_bandwidth_mbit"` // Transmission bandwidth limit, in Mbps
ByteMSS uint64 `json:"mss_bytes"` // Maximum Segment Size (MSS), in bytes
PktSndBuf uint64 `json:"send_buf_pkt"` // The number of packets in the sender's buffer that are already scheduled for sending or even possibly sent, but not yet acknowledged
ByteSndBuf uint64 `json:"send_buf_bytes"` // Instantaneous (current) value of pktSndBuf, but expressed in bytes, including payload and all headers (IP, TCP, SRT)
MsSndBuf uint64 `json:"send_buf_ms"` // The timespan (msec) of packets in the sender's buffer (unacknowledged packets)
MsSndTsbPdDelay uint64 `json:"send_tsbpd_delay_ms"` // Timestamp-based Packet Delivery Delay value of the peer
PktRcvBuf uint64 `json:"recv_buf_pkt"` // The number of acknowledged packets in receiver's buffer
ByteRcvBuf uint64 `json:"recv_buf_bytes"` // Instantaneous (current) value of pktRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT)
MsRcvBuf uint64 `json:"recv_buf_ms"` // The timespan (msec) of acknowledged packets in the receiver's buffer
MsRcvTsbPdDelay uint64 `json:"recv_tsbpd_delay_ms"` // Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY
PktReorderTolerance uint64 `json:"reorder_tolerance_pkt"` // Instant value of the packet reorder tolerance
PktRcvAvgBelatedTime uint64 `json:"pkt_recv_avg_belated_time_ms"` // Accumulated difference between the current time and the time-to-play of a packet that is received late
UsPktSndPeriod float64 `json:"pkt_send_period_us"` // Current minimum time interval between which consecutive packets are sent, in microseconds
PktFlowWindow uint64 `json:"flow_window_pkt" format:"uint64"` // The maximum number of packets that can be "in flight"
PktFlightSize uint64 `json:"flight_size_pkt" format:"uint64"` // The number of packets in flight
MsRTT float64 `json:"rtt_ms"` // Smoothed round-trip time (SRTT), an exponentially-weighted moving average (EWMA) of an endpoint's RTT samples, in milliseconds
MbpsBandwidth float64 `json:"bandwidth_mbit"` // Estimated bandwidth of the network link, in Mbps
ByteAvailSndBuf uint64 `json:"avail_send_buf_bytes" format:"uint64"` // The available space in the sender's buffer, in bytes
ByteAvailRcvBuf uint64 `json:"avail_recv_buf_bytes" format:"uint64"` // The available space in the receiver's buffer, in bytes
MbpsMaxBW float64 `json:"max_bandwidth_mbit"` // Transmission bandwidth limit, in Mbps
ByteMSS uint64 `json:"mss_bytes" format:"uint64"` // Maximum Segment Size (MSS), in bytes
PktSndBuf uint64 `json:"send_buf_pkt" format:"uint64"` // The number of packets in the sender's buffer that are already scheduled for sending or even possibly sent, but not yet acknowledged
ByteSndBuf uint64 `json:"send_buf_bytes" format:"uint64"` // Instantaneous (current) value of pktSndBuf, but expressed in bytes, including payload and all headers (IP, TCP, SRT)
MsSndBuf uint64 `json:"send_buf_ms" format:"uint64"` // The timespan (msec) of packets in the sender's buffer (unacknowledged packets)
MsSndTsbPdDelay uint64 `json:"send_tsbpd_delay_ms" format:"uint64"` // Timestamp-based Packet Delivery Delay value of the peer
PktRcvBuf uint64 `json:"recv_buf_pkt" format:"uint64"` // The number of acknowledged packets in receiver's buffer
ByteRcvBuf uint64 `json:"recv_buf_bytes" format:"uint64"` // Instantaneous (current) value of pktRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT)
MsRcvBuf uint64 `json:"recv_buf_ms" format:"uint64"` // The timespan (msec) of acknowledged packets in the receiver's buffer
MsRcvTsbPdDelay uint64 `json:"recv_tsbpd_delay_ms" format:"uint64"` // Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY
PktReorderTolerance uint64 `json:"reorder_tolerance_pkt" format:"uint64"` // Instant value of the packet reorder tolerance
PktRcvAvgBelatedTime uint64 `json:"pkt_recv_avg_belated_time_ms" format:"uint64"` // Accumulated difference between the current time and the time-to-play of a packet that is received late
}
// Unmarshal converts the SRT statistics into API representation
@ -119,7 +119,7 @@ func (s *SRTStatistics) Unmarshal(ss *gosrt.Statistics) {
}
type SRTLog struct {
Timestamp int64 `json:"ts"`
Timestamp int64 `json:"ts" format:"int64"`
Message []string `json:"msg"`
}

View File

@ -1,7 +1,7 @@
package api
type WidgetProcess struct {
CurrentSessions uint64 `json:"current_sessions"`
TotalSessions uint64 `json:"total_sessions"`
CurrentSessions uint64 `json:"current_sessions" format:"uint64"`
TotalSessions uint64 `json:"total_sessions" format:"uint64"`
Uptime int64 `json:"uptime"`
}

25
http/fs/fs.go Normal file
View File

@ -0,0 +1,25 @@
package fs
import (
"github.com/datarhei/core/v16/http/cache"
"github.com/datarhei/core/v16/io/fs"
)
type FS struct {
Name string
Mountpoint string
AllowWrite bool
EnableAuth bool
Username string
Password string
DefaultFile string
DefaultContentType string
Gzip bool
Filesystem fs.Filesystem
Cache cache.Cacher
}

View File

@ -3,6 +3,7 @@ package api
import (
"io"
"net/http"
"time"
cfgstore "github.com/datarhei/core/v16/config/store"
cfgvars "github.com/datarhei/core/v16/config/vars"
@ -71,6 +72,10 @@ func (p *ConfigHandler) Set(c echo.Context) error {
}
cfg := p.store.Get()
cfgActive := p.store.GetActive()
// Copy the timestamp of when this config has been used
cfg.LoadedAt = cfgActive.LoadedAt
// For each version, set the current config as default config value. This will
// allow to set a partial config without destroying the other values.
@ -119,6 +124,9 @@ func (p *ConfigHandler) Set(c echo.Context) error {
return api.Err(http.StatusBadRequest, "Invalid config version", "version %d", version.Version)
}
cfg.CreatedAt = time.Now()
cfg.UpdatedAt = cfg.CreatedAt
// Now we make a copy from the config and merge it with the environment
// variables. If this configuration is valid, we will store the un-merged
// one to disk.
@ -157,15 +165,15 @@ func (p *ConfigHandler) Set(c echo.Context) error {
// Reload will reload the currently active configuration
// @Summary Reload the currently active configuration
// @Description Reload the currently active configuration. This will trigger a restart of the Restreamer.
// @Description Reload the currently active configuration. This will trigger a restart of the Core.
// @Tags v16.7.2
// @ID config-3-reload
// @Produce plain
// @Success 200 {string} string "OK"
// @Produce json
// @Success 200 {string} string
// @Security ApiKeyAuth
// @Router /api/v3/config/reload [get]
func (p *ConfigHandler) Reload(c echo.Context) error {
p.store.Reload()
return c.String(http.StatusOK, "OK")
return c.JSON(http.StatusOK, "OK")
}

View File

@ -4,20 +4,32 @@ import (
"bytes"
"encoding/json"
"net/http"
"strings"
"testing"
"github.com/datarhei/core/v16/config"
"github.com/datarhei/core/v16/config/store"
v1 "github.com/datarhei/core/v16/config/v1"
"github.com/datarhei/core/v16/http/mock"
"github.com/datarhei/core/v16/io/fs"
"github.com/labstack/echo/v4"
"github.com/stretchr/testify/require"
)
func getDummyConfigRouter() (*echo.Echo, store.Store) {
func getDummyConfigRouter(t *testing.T) (*echo.Echo, store.Store) {
router := mock.DummyEcho()
config := store.NewDummy()
memfs, err := fs.NewMemFilesystem(fs.MemConfig{})
require.NoError(t, err)
_, _, err = memfs.WriteFileReader("./mime.types", strings.NewReader("xxxxx"))
require.NoError(t, err)
_, _, err = memfs.WriteFileReader("/bin/ffmpeg", strings.NewReader("xxxxx"))
require.NoError(t, err)
config, err := store.NewJSON(memfs, "/config.json", nil)
require.NoError(t, err)
handler := NewConfig(config)
@ -28,7 +40,7 @@ func getDummyConfigRouter() (*echo.Echo, store.Store) {
}
func TestConfigGet(t *testing.T) {
router, _ := getDummyConfigRouter()
router, _ := getDummyConfigRouter(t)
mock.Request(t, http.StatusOK, router, "GET", "/", nil)
@ -36,18 +48,21 @@ func TestConfigGet(t *testing.T) {
}
func TestConfigSetConflict(t *testing.T) {
router, _ := getDummyConfigRouter()
router, _ := getDummyConfigRouter(t)
cfg := config.New(nil)
cfg.Storage.MimeTypes = "/path/to/mime.types"
var data bytes.Buffer
encoder := json.NewEncoder(&data)
encoder.Encode(config.New())
encoder.Encode(cfg)
mock.Request(t, http.StatusConflict, router, "PUT", "/", &data)
}
func TestConfigSet(t *testing.T) {
router, store := getDummyConfigRouter()
router, store := getDummyConfigRouter(t)
storedcfg := store.Get()
@ -57,11 +72,9 @@ func TestConfigSet(t *testing.T) {
encoder := json.NewEncoder(&data)
// Setting a new v3 config
cfg := config.New()
cfg.FFmpeg.Binary = "true"
cfg := config.New(nil)
cfg.DB.Dir = "."
cfg.Storage.Disk.Dir = "."
cfg.Storage.MimeTypes = ""
cfg.Storage.Disk.Cache.Types.Allow = []string{".aaa"}
cfg.Storage.Disk.Cache.Types.Block = []string{".zzz"}
cfg.Host.Name = []string{"foobar.com"}
@ -78,11 +91,9 @@ func TestConfigSet(t *testing.T) {
require.Equal(t, "cert@datarhei.com", cfg.TLS.Email)
// Setting a complete v1 config
cfgv1 := v1.New()
cfgv1.FFmpeg.Binary = "true"
cfgv1 := v1.New(nil)
cfgv1.DB.Dir = "."
cfgv1.Storage.Disk.Dir = "."
cfgv1.Storage.MimeTypes = ""
cfgv1.Storage.Disk.Cache.Types = []string{".bbb"}
cfgv1.Host.Name = []string{"foobar.com"}

View File

@ -1,215 +0,0 @@
package api
import (
"net/http"
"path/filepath"
"sort"
"github.com/datarhei/core/v16/http/api"
"github.com/datarhei/core/v16/http/cache"
"github.com/datarhei/core/v16/http/handler"
"github.com/datarhei/core/v16/http/handler/util"
"github.com/datarhei/core/v16/io/fs"
"github.com/labstack/echo/v4"
)
// The DiskFSHandler type provides handlers for manipulating a filesystem
type DiskFSHandler struct {
cache cache.Cacher
filesystem fs.Filesystem
handler *handler.DiskFSHandler
}
// NewDiskFS return a new DiskFS type. You have to provide a filesystem to act on and optionally
// a Cacher where files will be purged from if the Cacher is related to the filesystem.
func NewDiskFS(fs fs.Filesystem, cache cache.Cacher) *DiskFSHandler {
return &DiskFSHandler{
cache: cache,
filesystem: fs,
handler: handler.NewDiskFS(fs, cache),
}
}
// GetFile returns the file at the given path
// @Summary Fetch a file from the filesystem
// @Description Fetch a file from the filesystem. The contents of that file are returned.
// @Tags v16.7.2
// @ID diskfs-3-get-file
// @Produce application/data
// @Produce json
// @Param path path string true "Path to file"
// @Success 200 {file} byte
// @Success 301 {string} string
// @Failure 404 {object} api.Error
// @Security ApiKeyAuth
// @Router /api/v3/fs/disk/{path} [get]
func (h *DiskFSHandler) GetFile(c echo.Context) error {
path := util.PathWildcardParam(c)
mimeType := c.Response().Header().Get(echo.HeaderContentType)
c.Response().Header().Del(echo.HeaderContentType)
file := h.filesystem.Open(path)
if file == nil {
return api.Err(http.StatusNotFound, "File not found", path)
}
stat, _ := file.Stat()
if stat.IsDir() {
return api.Err(http.StatusNotFound, "File not found", path)
}
defer file.Close()
c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT"))
if path, ok := stat.IsLink(); ok {
path = filepath.Clean("/" + path)
if path[0] == '/' {
path = path[1:]
}
return c.Redirect(http.StatusMovedPermanently, path)
}
c.Response().Header().Set(echo.HeaderContentType, mimeType)
if c.Request().Method == "HEAD" {
return c.Blob(http.StatusOK, "application/data", nil)
}
return c.Stream(http.StatusOK, "application/data", file)
}
// PutFile adds or overwrites a file at the given path
// @Summary Add a file to the filesystem
// @Description Writes or overwrites a file on the filesystem
// @Tags v16.7.2
// @ID diskfs-3-put-file
// @Accept application/data
// @Produce text/plain
// @Produce json
// @Param path path string true "Path to file"
// @Param data body []byte true "File data"
// @Success 201 {string} string
// @Success 204 {string} string
// @Failure 507 {object} api.Error
// @Security ApiKeyAuth
// @Router /api/v3/fs/disk/{path} [put]
func (h *DiskFSHandler) PutFile(c echo.Context) error {
path := util.PathWildcardParam(c)
c.Response().Header().Del(echo.HeaderContentType)
req := c.Request()
_, created, err := h.filesystem.Store(path, req.Body)
if err != nil {
return api.Err(http.StatusBadRequest, "%s", err)
}
if h.cache != nil {
h.cache.Delete(path)
}
c.Response().Header().Set("Content-Location", req.URL.RequestURI())
if created {
return c.String(http.StatusCreated, path)
}
return c.NoContent(http.StatusNoContent)
}
// DeleteFile removes a file from the filesystem
// @Summary Remove a file from the filesystem
// @Description Remove a file from the filesystem
// @Tags v16.7.2
// @ID diskfs-3-delete-file
// @Produce text/plain
// @Param path path string true "Path to file"
// @Success 200 {string} string
// @Failure 404 {object} api.Error
// @Security ApiKeyAuth
// @Router /api/v3/fs/disk/{path} [delete]
func (h *DiskFSHandler) DeleteFile(c echo.Context) error {
path := util.PathWildcardParam(c)
c.Response().Header().Del(echo.HeaderContentType)
size := h.filesystem.Delete(path)
if size < 0 {
return api.Err(http.StatusNotFound, "File not found", path)
}
if h.cache != nil {
h.cache.Delete(path)
}
return c.String(http.StatusOK, "OK")
}
// ListFiles lists all files on the filesystem
// @Summary List all files on the filesystem
// @Description List all files on the filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.
// @Tags v16.7.2
// @ID diskfs-3-list-files
// @Produce json
// @Param glob query string false "glob pattern for file names"
// @Param sort query string false "none, name, size, lastmod"
// @Param order query string false "asc, desc"
// @Success 200 {array} api.FileInfo
// @Security ApiKeyAuth
// @Router /api/v3/fs/disk [get]
func (h *DiskFSHandler) ListFiles(c echo.Context) error {
pattern := util.DefaultQuery(c, "glob", "")
sortby := util.DefaultQuery(c, "sort", "none")
order := util.DefaultQuery(c, "order", "asc")
files := h.filesystem.List(pattern)
var sortFunc func(i, j int) bool
switch sortby {
case "name":
if order == "desc" {
sortFunc = func(i, j int) bool { return files[i].Name() > files[j].Name() }
} else {
sortFunc = func(i, j int) bool { return files[i].Name() < files[j].Name() }
}
case "size":
if order == "desc" {
sortFunc = func(i, j int) bool { return files[i].Size() > files[j].Size() }
} else {
sortFunc = func(i, j int) bool { return files[i].Size() < files[j].Size() }
}
default:
if order == "asc" {
sortFunc = func(i, j int) bool { return files[i].ModTime().Before(files[j].ModTime()) }
} else {
sortFunc = func(i, j int) bool { return files[i].ModTime().After(files[j].ModTime()) }
}
}
sort.Slice(files, sortFunc)
fileinfos := []api.FileInfo{}
for _, f := range files {
if f.IsDir() {
continue
}
fileinfos = append(fileinfos, api.FileInfo{
Name: f.Name(),
Size: f.Size(),
LastMod: f.ModTime().Unix(),
})
}
return c.JSON(http.StatusOK, fileinfos)
}

View File

@ -0,0 +1,146 @@
package api
import (
"net/http"
"github.com/datarhei/core/v16/http/api"
"github.com/datarhei/core/v16/http/handler"
"github.com/datarhei/core/v16/http/handler/util"
"github.com/labstack/echo/v4"
)
type FSConfig struct {
Type string
Mountpoint string
Handler *handler.FSHandler
}
// The FSHandler type provides handlers for manipulating a filesystem
type FSHandler struct {
filesystems map[string]FSConfig
}
// NewFS return a new FSHanlder type. You have to provide a filesystem to act on.
func NewFS(filesystems map[string]FSConfig) *FSHandler {
return &FSHandler{
filesystems: filesystems,
}
}
// GetFileAPI returns the file at the given path
// @Summary Fetch a file from a filesystem
// @Description Fetch a file from a filesystem
// @ID filesystem-3-get-file
// @Produce application/data
// @Produce json
// @Param name path string true "Name of the filesystem"
// @Param path path string true "Path to file"
// @Success 200 {file} byte
// @Success 301 {string} string
// @Failure 404 {object} api.Error
// @Security ApiKeyAuth
// @Router /api/v3/fs/{name}/{path} [get]
func (h *FSHandler) GetFile(c echo.Context) error {
name := util.PathParam(c, "name")
config, ok := h.filesystems[name]
if !ok {
return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name)
}
return config.Handler.GetFile(c)
}
// PutFileAPI adds or overwrites a file at the given path
// @Summary Add a file to a filesystem
// @Description Writes or overwrites a file on a filesystem
// @ID filesystem-3-put-file
// @Accept application/data
// @Produce text/plain
// @Produce json
// @Param name path string true "Name of the filesystem"
// @Param path path string true "Path to file"
// @Param data body []byte true "File data"
// @Success 201 {string} string
// @Success 204 {string} string
// @Failure 507 {object} api.Error
// @Security ApiKeyAuth
// @Router /api/v3/fs/{name}/{path} [put]
func (h *FSHandler) PutFile(c echo.Context) error {
name := util.PathParam(c, "name")
config, ok := h.filesystems[name]
if !ok {
return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name)
}
return config.Handler.PutFile(c)
}
// DeleteFileAPI removes a file from a filesystem
// @Summary Remove a file from a filesystem
// @Description Remove a file from a filesystem
// @ID filesystem-3-delete-file
// @Produce text/plain
// @Param name path string true "Name of the filesystem"
// @Param path path string true "Path to file"
// @Success 200 {string} string
// @Failure 404 {object} api.Error
// @Security ApiKeyAuth
// @Router /api/v3/fs/{name}/{path} [delete]
func (h *FSHandler) DeleteFile(c echo.Context) error {
name := util.PathParam(c, "name")
config, ok := h.filesystems[name]
if !ok {
return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name)
}
return config.Handler.DeleteFile(c)
}
// ListFiles lists all files on a filesystem
// @Summary List all files on a filesystem
// @Description List all files on a filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.
// @ID filesystem-3-list-files
// @Produce json
// @Param name path string true "Name of the filesystem"
// @Param glob query string false "glob pattern for file names"
// @Param sort query string false "none, name, size, lastmod"
// @Param order query string false "asc, desc"
// @Success 200 {array} api.FileInfo
// @Security ApiKeyAuth
// @Router /api/v3/fs/{name} [get]
func (h *FSHandler) ListFiles(c echo.Context) error {
name := util.PathParam(c, "name")
config, ok := h.filesystems[name]
if !ok {
return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name)
}
return config.Handler.ListFiles(c)
}
// List lists all registered filesystems
// @Summary List all registered filesystems
// @Description Listall registered filesystems
// @ID filesystem-3-list
// @Produce json
// @Success 200 {array} api.FilesystemInfo
// @Security ApiKeyAuth
// @Router /api/v3/fs [get]
func (h *FSHandler) List(c echo.Context) error {
fss := []api.FilesystemInfo{}
for name, config := range h.filesystems {
fss = append(fss, api.FilesystemInfo{
Name: name,
Type: config.Type,
Mount: config.Mountpoint,
})
}
return c.JSON(http.StatusOK, fss)
}

View File

@ -1,177 +0,0 @@
package api
import (
"io"
"net/http"
"net/url"
"sort"
"github.com/datarhei/core/v16/http/api"
"github.com/datarhei/core/v16/http/handler"
"github.com/datarhei/core/v16/http/handler/util"
"github.com/datarhei/core/v16/io/fs"
"github.com/labstack/echo/v4"
)
// The MemFSHandler type provides handlers for manipulating a filesystem
type MemFSHandler struct {
filesystem fs.Filesystem
handler *handler.MemFSHandler
}
// NewMemFS return a new MemFS type. You have to provide a filesystem to act on.
func NewMemFS(fs fs.Filesystem) *MemFSHandler {
return &MemFSHandler{
filesystem: fs,
handler: handler.NewMemFS(fs),
}
}
// GetFileAPI returns the file at the given path
// @Summary Fetch a file from the memory filesystem
// @Description Fetch a file from the memory filesystem
// @Tags v16.7.2
// @ID memfs-3-get-file
// @Produce application/data
// @Produce json
// @Param path path string true "Path to file"
// @Success 200 {file} byte
// @Success 301 {string} string
// @Failure 404 {object} api.Error
// @Security ApiKeyAuth
// @Router /api/v3/fs/mem/{path} [get]
func (h *MemFSHandler) GetFile(c echo.Context) error {
return h.handler.GetFile(c)
}
// PutFileAPI adds or overwrites a file at the given path
// @Summary Add a file to the memory filesystem
// @Description Writes or overwrites a file on the memory filesystem
// @Tags v16.7.2
// @ID memfs-3-put-file
// @Accept application/data
// @Produce text/plain
// @Produce json
// @Param path path string true "Path to file"
// @Param data body []byte true "File data"
// @Success 201 {string} string
// @Success 204 {string} string
// @Failure 507 {object} api.Error
// @Security ApiKeyAuth
// @Router /api/v3/fs/mem/{path} [put]
func (h *MemFSHandler) PutFile(c echo.Context) error {
return h.handler.PutFile(c)
}
// DeleteFileAPI removes a file from the filesystem
// @Summary Remove a file from the memory filesystem
// @Description Remove a file from the memory filesystem
// @Tags v16.7.2
// @ID memfs-3-delete-file
// @Produce text/plain
// @Param path path string true "Path to file"
// @Success 200 {string} string
// @Failure 404 {object} api.Error
// @Security ApiKeyAuth
// @Router /api/v3/fs/mem/{path} [delete]
func (h *MemFSHandler) DeleteFile(c echo.Context) error {
return h.handler.DeleteFile(c)
}
// PatchFile creates a symbolic link to a file in the filesystem
// @Summary Create a link to a file in the memory filesystem
// @Description Create a link to a file in the memory filesystem. The file linked to has to exist.
// @Tags v16.7.2
// @ID memfs-3-patch
// @Accept application/data
// @Produce text/plain
// @Produce json
// @Param path path string true "Path to file"
// @Param url body string true "Path to the file to link to"
// @Success 201 {string} string
// @Failure 400 {object} api.Error
// @Security ApiKeyAuth
// @Router /api/v3/fs/mem/{path} [patch]
func (h *MemFSHandler) PatchFile(c echo.Context) error {
path := util.PathWildcardParam(c)
c.Response().Header().Del(echo.HeaderContentType)
req := c.Request()
body, err := io.ReadAll(req.Body)
if err != nil {
return api.Err(http.StatusBadRequest, "Failed reading request body", "%s", err)
}
u, err := url.Parse(string(body))
if err != nil {
return api.Err(http.StatusBadRequest, "Body doesn't contain a valid path", "%s", err)
}
if err := h.filesystem.Symlink(u.Path, path); err != nil {
return api.Err(http.StatusBadRequest, "Failed to create symlink", "%s", err)
}
c.Response().Header().Set("Content-Location", req.URL.RequestURI())
return c.String(http.StatusCreated, "")
}
// ListFiles lists all files on the filesystem
// @Summary List all files on the memory filesystem
// @Description List all files on the memory filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.
// @Tags v16.7.2
// @ID memfs-3-list-files
// @Produce json
// @Param glob query string false "glob pattern for file names"
// @Param sort query string false "none, name, size, lastmod"
// @Param order query string false "asc, desc"
// @Success 200 {array} api.FileInfo
// @Security ApiKeyAuth
// @Router /api/v3/fs/mem [get]
func (h *MemFSHandler) ListFiles(c echo.Context) error {
pattern := util.DefaultQuery(c, "glob", "")
sortby := util.DefaultQuery(c, "sort", "none")
order := util.DefaultQuery(c, "order", "asc")
files := h.filesystem.List(pattern)
var sortFunc func(i, j int) bool
switch sortby {
case "name":
if order == "desc" {
sortFunc = func(i, j int) bool { return files[i].Name() > files[j].Name() }
} else {
sortFunc = func(i, j int) bool { return files[i].Name() < files[j].Name() }
}
case "size":
if order == "desc" {
sortFunc = func(i, j int) bool { return files[i].Size() > files[j].Size() }
} else {
sortFunc = func(i, j int) bool { return files[i].Size() < files[j].Size() }
}
default:
if order == "asc" {
sortFunc = func(i, j int) bool { return files[i].ModTime().Before(files[j].ModTime()) }
} else {
sortFunc = func(i, j int) bool { return files[i].ModTime().After(files[j].ModTime()) }
}
}
sort.Slice(files, sortFunc)
var fileinfos []api.FileInfo = make([]api.FileInfo, len(files))
for i, f := range files {
fileinfos[i] = api.FileInfo{
Name: f.Name(),
Size: f.Size(),
LastMod: f.ModTime().Unix(),
}
}
return c.JSON(http.StatusOK, fileinfos)
}

View File

@ -51,7 +51,7 @@ func (h *RestreamHandler) Add(c echo.Context) error {
return api.Err(http.StatusBadRequest, "Unsupported process type", "Supported process types are: ffmpeg")
}
if len(process.Input) == 0 && len(process.Output) == 0 {
if len(process.Input) == 0 || len(process.Output) == 0 {
return api.Err(http.StatusBadRequest, "At least one input and one output need to be defined")
}
@ -189,6 +189,14 @@ func (h *RestreamHandler) Update(c echo.Context) error {
Autostart: true,
}
current, err := h.restream.GetProcess(id)
if err != nil {
return api.Err(http.StatusNotFound, "Process not found", "%s", id)
}
// Prefill the config with the current values
process.Unmarshal(current.Config)
if err := util.ShouldBindJSON(c, &process); err != nil {
return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", err)
}

View File

@ -1,88 +0,0 @@
package handler
import (
"net/http"
"path/filepath"
"github.com/datarhei/core/v16/http/api"
"github.com/datarhei/core/v16/http/cache"
"github.com/datarhei/core/v16/http/handler/util"
"github.com/datarhei/core/v16/io/fs"
"github.com/labstack/echo/v4"
)
// The DiskFSHandler type provides handlers for manipulating a filesystem
type DiskFSHandler struct {
cache cache.Cacher
filesystem fs.Filesystem
}
// NewDiskFS return a new DiskFS type. You have to provide a filesystem to act on and optionally
// a Cacher where files will be purged from if the Cacher is related to the filesystem.
func NewDiskFS(fs fs.Filesystem, cache cache.Cacher) *DiskFSHandler {
return &DiskFSHandler{
cache: cache,
filesystem: fs,
}
}
// GetFile returns the file at the given path
// @Summary Fetch a file from the filesystem
// @Description Fetch a file from the filesystem. If the file is a directory, a index.html is returned, if it exists.
// @ID diskfs-get-file
// @Produce application/data
// @Produce json
// @Param path path string true "Path to file"
// @Success 200 {file} byte
// @Success 301 {string} string
// @Failure 404 {object} api.Error
// @Router /{path} [get]
func (h *DiskFSHandler) GetFile(c echo.Context) error {
path := util.PathWildcardParam(c)
mimeType := c.Response().Header().Get(echo.HeaderContentType)
c.Response().Header().Del(echo.HeaderContentType)
file := h.filesystem.Open(path)
if file == nil {
return api.Err(http.StatusNotFound, "File not found", path)
}
stat, _ := file.Stat()
if stat.IsDir() {
path = filepath.Join(path, "index.html")
file.Close()
file = h.filesystem.Open(path)
if file == nil {
return api.Err(http.StatusNotFound, "File not found", path)
}
stat, _ = file.Stat()
}
defer file.Close()
c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT"))
if path, ok := stat.IsLink(); ok {
path = filepath.Clean("/" + path)
if path[0] == '/' {
path = path[1:]
}
return c.Redirect(http.StatusMovedPermanently, path)
}
c.Response().Header().Set(echo.HeaderContentType, mimeType)
if c.Request().Method == "HEAD" {
return c.Blob(http.StatusOK, "application/data", nil)
}
return c.Stream(http.StatusOK, "application/data", file)
}

164
http/handler/filesystem.go Normal file
View File

@ -0,0 +1,164 @@
package handler
import (
"net/http"
"path/filepath"
"sort"
"github.com/datarhei/core/v16/http/api"
"github.com/datarhei/core/v16/http/fs"
"github.com/datarhei/core/v16/http/handler/util"
"github.com/labstack/echo/v4"
)
// The FSHandler type provides handlers for manipulating a filesystem
type FSHandler struct {
fs fs.FS
}
// NewFS return a new FSHandler type. You have to provide a filesystem to act on.
func NewFS(fs fs.FS) *FSHandler {
return &FSHandler{
fs: fs,
}
}
func (h *FSHandler) GetFile(c echo.Context) error {
path := util.PathWildcardParam(c)
mimeType := c.Response().Header().Get(echo.HeaderContentType)
c.Response().Header().Del(echo.HeaderContentType)
file := h.fs.Filesystem.Open(path)
if file == nil {
return api.Err(http.StatusNotFound, "File not found", path)
}
stat, _ := file.Stat()
if len(h.fs.DefaultFile) != 0 {
if stat.IsDir() {
path = filepath.Join(path, h.fs.DefaultFile)
file.Close()
file = h.fs.Filesystem.Open(path)
if file == nil {
return api.Err(http.StatusNotFound, "File not found", path)
}
stat, _ = file.Stat()
}
}
defer file.Close()
c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT"))
if path, ok := stat.IsLink(); ok {
path = filepath.Clean("/" + path)
if path[0] == '/' {
path = path[1:]
}
return c.Redirect(http.StatusMovedPermanently, path)
}
c.Response().Header().Set(echo.HeaderContentType, mimeType)
if c.Request().Method == "HEAD" {
return c.Blob(http.StatusOK, "application/data", nil)
}
return c.Stream(http.StatusOK, "application/data", file)
}
func (h *FSHandler) PutFile(c echo.Context) error {
path := util.PathWildcardParam(c)
c.Response().Header().Del(echo.HeaderContentType)
req := c.Request()
_, created, err := h.fs.Filesystem.WriteFileReader(path, req.Body)
if err != nil {
return api.Err(http.StatusBadRequest, "Bad request", "%s", err)
}
if h.fs.Cache != nil {
h.fs.Cache.Delete(path)
}
c.Response().Header().Set("Content-Location", req.URL.RequestURI())
if created {
return c.String(http.StatusCreated, "")
}
return c.NoContent(http.StatusNoContent)
}
func (h *FSHandler) DeleteFile(c echo.Context) error {
path := util.PathWildcardParam(c)
c.Response().Header().Del(echo.HeaderContentType)
size := h.fs.Filesystem.Remove(path)
if size < 0 {
return api.Err(http.StatusNotFound, "File not found", path)
}
if h.fs.Cache != nil {
h.fs.Cache.Delete(path)
}
return c.String(http.StatusOK, "Deleted: "+path)
}
func (h *FSHandler) ListFiles(c echo.Context) error {
pattern := util.DefaultQuery(c, "glob", "")
sortby := util.DefaultQuery(c, "sort", "none")
order := util.DefaultQuery(c, "order", "asc")
files := h.fs.Filesystem.List("/", pattern)
var sortFunc func(i, j int) bool
switch sortby {
case "name":
if order == "desc" {
sortFunc = func(i, j int) bool { return files[i].Name() > files[j].Name() }
} else {
sortFunc = func(i, j int) bool { return files[i].Name() < files[j].Name() }
}
case "size":
if order == "desc" {
sortFunc = func(i, j int) bool { return files[i].Size() > files[j].Size() }
} else {
sortFunc = func(i, j int) bool { return files[i].Size() < files[j].Size() }
}
default:
if order == "asc" {
sortFunc = func(i, j int) bool { return files[i].ModTime().Before(files[j].ModTime()) }
} else {
sortFunc = func(i, j int) bool { return files[i].ModTime().After(files[j].ModTime()) }
}
}
sort.Slice(files, sortFunc)
var fileinfos []api.FileInfo = make([]api.FileInfo, len(files))
for i, f := range files {
fileinfos[i] = api.FileInfo{
Name: f.Name(),
Size: f.Size(),
LastMod: f.ModTime().Unix(),
}
}
return c.JSON(http.StatusOK, fileinfos)
}

View File

@ -1,130 +0,0 @@
package handler
import (
"net/http"
"path/filepath"
"github.com/datarhei/core/v16/http/api"
"github.com/datarhei/core/v16/http/handler/util"
"github.com/datarhei/core/v16/io/fs"
"github.com/labstack/echo/v4"
)
// The MemFSHandler type provides handlers for manipulating a filesystem
type MemFSHandler struct {
filesystem fs.Filesystem
}
// NewMemFS return a new MemFS type. You have to provide a filesystem to act on.
func NewMemFS(fs fs.Filesystem) *MemFSHandler {
return &MemFSHandler{
filesystem: fs,
}
}
// GetFile returns the file at the given path
// @Summary Fetch a file from the memory filesystem
// @Description Fetch a file from the memory filesystem
// @ID memfs-get-file
// @Produce application/data
// @Produce json
// @Param path path string true "Path to file"
// @Success 200 {file} byte
// @Success 301 {string} string
// @Failure 404 {object} api.Error
// @Router /memfs/{path} [get]
func (h *MemFSHandler) GetFile(c echo.Context) error {
path := util.PathWildcardParam(c)
mimeType := c.Response().Header().Get(echo.HeaderContentType)
c.Response().Header().Del(echo.HeaderContentType)
file := h.filesystem.Open(path)
if file == nil {
return api.Err(http.StatusNotFound, "File not found", path)
}
defer file.Close()
stat, _ := file.Stat()
c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT"))
if path, ok := stat.IsLink(); ok {
path = filepath.Clean("/" + path)
if path[0] == '/' {
path = path[1:]
}
return c.Redirect(http.StatusMovedPermanently, path)
}
c.Response().Header().Set(echo.HeaderContentType, mimeType)
if c.Request().Method == "HEAD" {
return c.Blob(http.StatusOK, "application/data", nil)
}
return c.Stream(http.StatusOK, "application/data", file)
}
// PutFile adds or overwrites a file at the given path
// @Summary Add a file to the memory filesystem
// @Description Writes or overwrites a file on the memory filesystem
// @ID memfs-put-file
// @Accept application/data
// @Produce text/plain
// @Produce json
// @Param path path string true "Path to file"
// @Param data body []byte true "File data"
// @Success 201 {string} string
// @Success 204 {string} string
// @Failure 507 {object} api.Error
// @Security BasicAuth
// @Router /memfs/{path} [put]
func (h *MemFSHandler) PutFile(c echo.Context) error {
path := util.PathWildcardParam(c)
c.Response().Header().Del(echo.HeaderContentType)
req := c.Request()
_, created, err := h.filesystem.Store(path, req.Body)
if err != nil {
return api.Err(http.StatusBadRequest, "%s", err)
}
c.Response().Header().Set("Content-Location", req.URL.RequestURI())
if created {
return c.String(http.StatusCreated, "")
}
return c.NoContent(http.StatusNoContent)
}
// DeleteFile removes a file from the filesystem
// @Summary Remove a file from the memory filesystem
// @Description Remove a file from the memory filesystem
// @ID memfs-delete-file
// @Produce text/plain
// @Param path path string true "Path to file"
// @Success 200 {string} string
// @Failure 404 {object} api.Error
// @Security BasicAuth
// @Router /memfs/{path} [delete]
func (h *MemFSHandler) DeleteFile(c echo.Context) error {
path := util.PathWildcardParam(c)
c.Response().Header().Del(echo.HeaderContentType)
size := h.filesystem.Delete(path)
if size < 0 {
return api.Err(http.StatusNotFound, "File not found", path)
}
return c.String(http.StatusOK, "Deleted: "+path)
}

View File

@ -17,6 +17,7 @@ import (
"github.com/datarhei/core/v16/http/errorhandler"
"github.com/datarhei/core/v16/http/validator"
"github.com/datarhei/core/v16/internal/testhelper"
"github.com/datarhei/core/v16/io/fs"
"github.com/datarhei/core/v16/restream"
"github.com/datarhei/core/v16/restream/store"
@ -32,7 +33,17 @@ func DummyRestreamer(pathPrefix string) (restream.Restreamer, error) {
return nil, fmt.Errorf("failed to build helper program: %w", err)
}
store := store.NewDummyStore(store.DummyConfig{})
memfs, err := fs.NewMemFilesystem(fs.MemConfig{})
if err != nil {
return nil, fmt.Errorf("failed to create memory filesystem: %w", err)
}
store, err := store.NewJSON(store.JSONConfig{
Filesystem: memfs,
})
if err != nil {
return nil, err
}
ffmpeg, err := ffmpeg.New(ffmpeg.Config{
Binary: binary,

View File

@ -29,19 +29,20 @@
package http
import (
"fmt"
"net/http"
"strings"
cfgstore "github.com/datarhei/core/v16/config/store"
"github.com/datarhei/core/v16/http/cache"
"github.com/datarhei/core/v16/http/errorhandler"
"github.com/datarhei/core/v16/http/fs"
"github.com/datarhei/core/v16/http/graph/resolver"
"github.com/datarhei/core/v16/http/handler"
api "github.com/datarhei/core/v16/http/handler/api"
"github.com/datarhei/core/v16/http/jwt"
"github.com/datarhei/core/v16/http/router"
"github.com/datarhei/core/v16/http/validator"
"github.com/datarhei/core/v16/io/fs"
"github.com/datarhei/core/v16/log"
"github.com/datarhei/core/v16/monitor"
"github.com/datarhei/core/v16/net"
@ -79,8 +80,7 @@ type Config struct {
Metrics monitor.HistoryReader
Prometheus prometheus.Reader
MimeTypesFile string
DiskFS fs.Filesystem
MemFS MemFSConfig
Filesystems []fs.FS
IPLimiter net.IPLimiter
Profiling bool
Cors CorsConfig
@ -94,13 +94,6 @@ type Config struct {
ReadOnly bool
}
type MemFSConfig struct {
EnableAuth bool
Username string
Password string
Filesystem fs.Filesystem
}
type CorsConfig struct {
Origins []string
}
@ -114,8 +107,6 @@ type server struct {
handler struct {
about *api.AboutHandler
memfs *handler.MemFSHandler
diskfs *handler.DiskFSHandler
prometheus *handler.PrometheusHandler
profiling *handler.ProfilingHandler
ping *handler.PingHandler
@ -127,8 +118,6 @@ type server struct {
log *api.LogHandler
restream *api.RestreamHandler
playout *api.PlayoutHandler
memfs *api.MemFSHandler
diskfs *api.DiskFSHandler
rtmp *api.RTMPHandler
srt *api.SRTHandler
config *api.ConfigHandler
@ -148,18 +137,12 @@ type server struct {
hlsrewrite echo.MiddlewareFunc
}
memfs struct {
enableAuth bool
username string
password string
}
diskfs fs.Filesystem
gzip struct {
mimetypes []string
}
filesystems map[string]*filesystem
router *echo.Echo
mimeTypesFile string
profiling bool
@ -167,32 +150,63 @@ type server struct {
readOnly bool
}
type filesystem struct {
fs.FS
handler *handler.FSHandler
middleware echo.MiddlewareFunc
}
func NewServer(config Config) (Server, error) {
s := &server{
logger: config.Logger,
mimeTypesFile: config.MimeTypesFile,
profiling: config.Profiling,
diskfs: config.DiskFS,
readOnly: config.ReadOnly,
}
s.v3handler.diskfs = api.NewDiskFS(
config.DiskFS,
config.Cache,
)
s.filesystems = map[string]*filesystem{}
s.handler.diskfs = handler.NewDiskFS(
config.DiskFS,
config.Cache,
)
corsPrefixes := map[string][]string{
"/api": {"*"},
}
s.middleware.hlsrewrite = mwhlsrewrite.NewHLSRewriteWithConfig(mwhlsrewrite.HLSRewriteConfig{
PathPrefix: config.DiskFS.Base(),
})
for _, fs := range config.Filesystems {
if _, ok := s.filesystems[fs.Name]; ok {
return nil, fmt.Errorf("the filesystem name '%s' is already in use", fs.Name)
}
s.memfs.enableAuth = config.MemFS.EnableAuth
s.memfs.username = config.MemFS.Username
s.memfs.password = config.MemFS.Password
if !strings.HasPrefix(fs.Mountpoint, "/") {
fs.Mountpoint = "/" + fs.Mountpoint
}
if !strings.HasSuffix(fs.Mountpoint, "/") {
fs.Mountpoint = strings.TrimSuffix(fs.Mountpoint, "/")
}
if _, ok := corsPrefixes[fs.Mountpoint]; ok {
return nil, fmt.Errorf("the mount point '%s' is already in use (%s)", fs.Mountpoint, fs.Name)
}
corsPrefixes[fs.Mountpoint] = config.Cors.Origins
filesystem := &filesystem{
FS: fs,
handler: handler.NewFS(fs),
}
if fs.Filesystem.Type() == "disk" {
filesystem.middleware = mwhlsrewrite.NewHLSRewriteWithConfig(mwhlsrewrite.HLSRewriteConfig{
PathPrefix: fs.Filesystem.Metadata("base"),
})
}
s.filesystems[filesystem.Name] = filesystem
}
if _, ok := corsPrefixes["/"]; !ok {
return nil, fmt.Errorf("one filesystem must be mounted at /")
}
if config.Logger == nil {
s.logger = log.New("HTTP")
@ -224,16 +238,6 @@ func NewServer(config Config) (Server, error) {
)
}
if config.MemFS.Filesystem != nil {
s.v3handler.memfs = api.NewMemFS(
config.MemFS.Filesystem,
)
s.handler.memfs = handler.NewMemFS(
config.MemFS.Filesystem,
)
}
if config.Prometheus != nil {
s.handler.prometheus = handler.NewPrometheus(
config.Prometheus.HTTPHandler(),
@ -292,12 +296,6 @@ func NewServer(config Config) (Server, error) {
Logger: s.logger,
})
if config.Cache != nil {
s.middleware.cache = mwcache.NewWithConfig(mwcache.Config{
Cache: config.Cache,
})
}
s.v3handler.widget = api.NewWidget(api.WidgetConfig{
Restream: config.Restream,
Registry: config.Sessions,
@ -308,11 +306,7 @@ func NewServer(config Config) (Server, error) {
})
if middleware, err := mwcors.NewWithConfig(mwcors.Config{
Prefixes: map[string][]string{
"/": config.Cors.Origins,
"/api": {"*"},
"/memfs": config.Cors.Origins,
},
Prefixes: corsPrefixes,
}); err != nil {
return nil, err
} else {
@ -437,65 +431,66 @@ func (s *server) setRoutes() {
doc.Use(gzipMiddleware)
doc.GET("", echoSwagger.WrapHandler)
// Serve static data
fs := s.router.Group("/*")
fs.Use(mwmime.NewWithConfig(mwmime.Config{
MimeTypesFile: s.mimeTypesFile,
DefaultContentType: "text/html",
}))
fs.Use(mwgzip.NewWithConfig(mwgzip.Config{
Level: mwgzip.BestSpeed,
MinLength: 1000,
Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes),
}))
if s.middleware.cache != nil {
fs.Use(s.middleware.cache)
}
fs.Use(s.middleware.hlsrewrite)
if s.middleware.session != nil {
fs.Use(s.middleware.session)
}
// Mount filesystems
for _, filesystem := range s.filesystems {
// Define a local variable because later in the loop we have a closure
filesystem := filesystem
fs.GET("", s.handler.diskfs.GetFile)
fs.HEAD("", s.handler.diskfs.GetFile)
// Memory FS
if s.handler.memfs != nil {
memfs := s.router.Group("/memfs/*")
memfs.Use(mwmime.NewWithConfig(mwmime.Config{
MimeTypesFile: s.mimeTypesFile,
DefaultContentType: "application/data",
}))
memfs.Use(mwgzip.NewWithConfig(mwgzip.Config{
Level: mwgzip.BestSpeed,
MinLength: 1000,
Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes),
}))
if s.middleware.session != nil {
memfs.Use(s.middleware.session)
mountpoint := filesystem.Mountpoint + "/*"
if filesystem.Mountpoint == "/" {
mountpoint = "/*"
}
memfs.HEAD("", s.handler.memfs.GetFile)
memfs.GET("", s.handler.memfs.GetFile)
fs := s.router.Group(mountpoint)
fs.Use(mwmime.NewWithConfig(mwmime.Config{
MimeTypesFile: s.mimeTypesFile,
DefaultContentType: filesystem.DefaultContentType,
}))
var authmw echo.MiddlewareFunc
if filesystem.Gzip {
fs.Use(mwgzip.NewWithConfig(mwgzip.Config{
Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes),
Level: mwgzip.BestSpeed,
MinLength: 1000,
}))
}
if s.memfs.enableAuth {
authmw = middleware.BasicAuth(func(username, password string, c echo.Context) (bool, error) {
if username == s.memfs.username && password == s.memfs.password {
return true, nil
}
return false, nil
if filesystem.Cache != nil {
mwcache := mwcache.NewWithConfig(mwcache.Config{
Cache: filesystem.Cache,
})
fs.Use(mwcache)
}
memfs.POST("", s.handler.memfs.PutFile, authmw)
memfs.PUT("", s.handler.memfs.PutFile, authmw)
memfs.DELETE("", s.handler.memfs.DeleteFile, authmw)
} else {
memfs.POST("", s.handler.memfs.PutFile)
memfs.PUT("", s.handler.memfs.PutFile)
memfs.DELETE("", s.handler.memfs.DeleteFile)
if filesystem.middleware != nil {
fs.Use(filesystem.middleware)
}
if s.middleware.session != nil {
fs.Use(s.middleware.session)
}
fs.GET("", filesystem.handler.GetFile)
fs.HEAD("", filesystem.handler.GetFile)
if filesystem.AllowWrite {
if filesystem.EnableAuth {
authmw := middleware.BasicAuth(func(username, password string, c echo.Context) (bool, error) {
if username == filesystem.Username && password == filesystem.Password {
return true, nil
}
return false, nil
})
fs.POST("", filesystem.handler.PutFile, authmw)
fs.PUT("", filesystem.handler.PutFile, authmw)
fs.DELETE("", filesystem.handler.DeleteFile, authmw)
} else {
fs.POST("", filesystem.handler.PutFile)
fs.PUT("", filesystem.handler.PutFile)
fs.DELETE("", filesystem.handler.DeleteFile)
}
}
}
@ -593,32 +588,33 @@ func (s *server) setRoutesV3(v3 *echo.Group) {
}
}
// v3 Memory FS
if s.v3handler.memfs != nil {
v3.GET("/fs/mem", s.v3handler.memfs.ListFiles)
v3.GET("/fs/mem/*", s.v3handler.memfs.GetFile)
if !s.readOnly {
v3.DELETE("/fs/mem/*", s.v3handler.memfs.DeleteFile)
v3.PUT("/fs/mem/*", s.v3handler.memfs.PutFile)
v3.PATCH("/fs/mem/*", s.v3handler.memfs.PatchFile)
// v3 Filesystems
fshandlers := map[string]api.FSConfig{}
for _, fs := range s.filesystems {
fshandlers[fs.Name] = api.FSConfig{
Type: fs.Filesystem.Type(),
Mountpoint: fs.Mountpoint,
Handler: fs.handler,
}
}
// v3 Disk FS
v3.GET("/fs/disk", s.v3handler.diskfs.ListFiles)
v3.GET("/fs/disk/*", s.v3handler.diskfs.GetFile, mwmime.NewWithConfig(mwmime.Config{
handler := api.NewFS(fshandlers)
v3.GET("/fs", handler.List)
v3.GET("/fs/:name", handler.ListFiles)
v3.GET("/fs/:name/*", handler.GetFile, mwmime.NewWithConfig(mwmime.Config{
MimeTypesFile: s.mimeTypesFile,
DefaultContentType: "application/data",
}))
v3.HEAD("/fs/disk/*", s.v3handler.diskfs.GetFile, mwmime.NewWithConfig(mwmime.Config{
v3.HEAD("/fs/:name/*", handler.GetFile, mwmime.NewWithConfig(mwmime.Config{
MimeTypesFile: s.mimeTypesFile,
DefaultContentType: "application/data",
}))
if !s.readOnly {
v3.PUT("/fs/disk/*", s.v3handler.diskfs.PutFile)
v3.DELETE("/fs/disk/*", s.v3handler.diskfs.DeleteFile)
v3.PUT("/fs/:name/*", handler.PutFile)
v3.DELETE("/fs/:name/*", handler.DeleteFile)
}
// v3 RTMP

File diff suppressed because it is too large Load Diff

View File

@ -1,40 +0,0 @@
package fs
import (
"io"
"time"
)
type dummyFileInfo struct{}
func (d *dummyFileInfo) Name() string { return "" }
func (d *dummyFileInfo) Size() int64 { return 0 }
func (d *dummyFileInfo) ModTime() time.Time { return time.Date(2000, 1, 1, 0, 0, 0, 0, nil) }
func (d *dummyFileInfo) IsLink() (string, bool) { return "", false }
func (d *dummyFileInfo) IsDir() bool { return false }
type dummyFile struct{}
func (d *dummyFile) Read(p []byte) (int, error) { return 0, io.EOF }
func (d *dummyFile) Close() error { return nil }
func (d *dummyFile) Name() string { return "" }
func (d *dummyFile) Stat() (FileInfo, error) { return &dummyFileInfo{}, nil }
type dummyFilesystem struct{}
func (d *dummyFilesystem) Base() string { return "/" }
func (d *dummyFilesystem) Rebase(string) error { return nil }
func (d *dummyFilesystem) Size() (int64, int64) { return 0, -1 }
func (d *dummyFilesystem) Resize(int64) {}
func (d *dummyFilesystem) Files() int64 { return 0 }
func (d *dummyFilesystem) Symlink(string, string) error { return nil }
func (d *dummyFilesystem) Open(string) File { return &dummyFile{} }
func (d *dummyFilesystem) Store(string, io.Reader) (int64, bool, error) { return 0, true, nil }
func (d *dummyFilesystem) Delete(string) int64 { return 0 }
func (d *dummyFilesystem) DeleteAll() int64 { return 0 }
func (d *dummyFilesystem) List(string) []FileInfo { return []FileInfo{} }
// NewDummyFilesystem return a dummy filesystem
func NewDummyFilesystem() Filesystem {
return &dummyFilesystem{}
}

View File

@ -3,24 +3,29 @@ package fs
import (
"io"
"io/fs"
"os"
"time"
)
// FileInfo describes a file and is returned by Stat.
type FileInfo interface {
// Name returns the full name of the file
// Name returns the full name of the file.
Name() string
// Size reports the size of the file in bytes
// Size reports the size of the file in bytes.
Size() int64
// ModTime returns the time of last modification
// Mode returns the file mode.
Mode() fs.FileMode
// ModTime returns the time of last modification.
ModTime() time.Time
// IsLink returns the path this file is linking to and true. Otherwise an empty string and false.
IsLink() (string, bool)
// IsDir returns whether the file represents a directory
// IsDir returns whether the file represents a directory.
IsDir() bool
}
@ -28,52 +33,101 @@ type FileInfo interface {
type File interface {
io.ReadCloser
// Name returns the Name of the file
// Name returns the Name of the file.
Name() string
// Stat returns the FileInfo to this file. In case of an error
// FileInfo is nil and the error is non-nil.
// Stat returns the FileInfo to this file. In case of an error FileInfo is nil
// and the error is non-nil. If the file is a symlink, the info reports the name and mode
// of the link itself, but the modification time and size of the linked file.
Stat() (FileInfo, error)
}
// Filesystem is an interface that provides access to a filesystem.
type Filesystem interface {
// Base returns the base path of this filesystem
Base() string
// Rebase sets a new base path for this filesystem
Rebase(string) error
type ReadFilesystem interface {
// Size returns the consumed size and capacity of the filesystem in bytes. The
// capacity is negative if the filesystem can consume as much space as it can.
// capacity is zero or negative if the filesystem can consume as much space as it wants.
Size() (int64, int64)
// Resize resizes the filesystem to the new size. Files may need to be deleted.
Resize(size int64)
// Files returns the current number of files in the filesystem.
Files() int64
// Open returns the file stored at the given path. It returns nil if the
// file doesn't exist. If the file is a symlink, the name is the name of
// the link, but it will read the contents of the linked file.
Open(path string) File
// ReadFile reads the content of the file at the given path into the writer. Returns
// the number of bytes read or an error.
ReadFile(path string) ([]byte, error)
// Stat returns info about the file at path. If the file doesn't exist, an error
// will be returned. If the file is a symlink, the info reports the name and mode
// of the link itself, but the modification time and size are of the linked file.
Stat(path string) (FileInfo, error)
// List lists all files that are currently on the filesystem.
List(path, pattern string) []FileInfo
// LookPath searches for an executable named file in the directories named by the PATH environment
// variable. If file contains a slash, it is tried directly and the PATH is not consulted. Otherwise,
// on success, the result is an absolute path. On non-disk filesystems. Only the mere existence
// of that file is verfied.
LookPath(file string) (string, error)
}
type WriteFilesystem interface {
// Symlink creates newname as a symbolic link to oldname.
Symlink(oldname, newname string) error
// Open returns the file stored at the given path. It returns nil if the
// file doesn't exist.
Open(path string) File
// Store adds a file to the filesystem. Returns the size of the data that has been
// WriteFileReader adds a file to the filesystem. Returns the size of the data that has been
// stored in bytes and whether the file is new. The size is negative if there was
// an error adding the file and error is not nil.
Store(path string, r io.Reader) (int64, bool, error)
WriteFileReader(path string, r io.Reader) (int64, bool, error)
// Delete removes a file at the given path from the filesystem. Returns the size of
// WriteFile adds a file to the filesystem. Returns the size of the data that has been
// stored in bytes and whether the file is new. The size is negative if there was
// an error adding the file and error is not nil.
WriteFile(path string, data []byte) (int64, bool, error)
// WriteFileSafe adds a file to the filesystem by first writing it to a tempfile and then
// renaming it to the actual path. Returns the size of the data that has been
// stored in bytes and whether the file is new. The size is negative if there was
// an error adding the file and error is not nil.
WriteFileSafe(path string, data []byte) (int64, bool, error)
// MkdirAll creates a directory named path, along with any necessary parents, and returns nil,
// or else returns an error. The permission bits perm (before umask) are used for all directories
// that MkdirAll creates. If path is already a directory, MkdirAll does nothing and returns nil.
MkdirAll(path string, perm os.FileMode) error
// Rename renames the file from src to dst. If src and dst can't be renamed
// regularly, the data is copied from src to dst. dst will be overwritten
// if it already exists. src will be removed after all data has been copied
// successfully. Both files exist during copying.
Rename(src, dst string) error
// Copy copies a file from src to dst.
Copy(src, dst string) error
// Remove removes a file at the given path from the filesystem. Returns the size of
// the remove file in bytes. The size is negative if the file doesn't exist.
Delete(path string) int64
Remove(path string) int64
// DeleteAll removes all files from the filesystem. Returns the size of the
// RemoveAll removes all files from the filesystem. Returns the size of the
// removed files in bytes.
DeleteAll() int64
// List lists all files that are currently on the filesystem.
List(pattern string) []FileInfo
RemoveAll() int64
}
// Filesystem is an interface that provides access to a filesystem.
type Filesystem interface {
ReadFilesystem
WriteFilesystem
// Name returns the name of the filesystem.
Name() string
// Type returns the type of the filesystem, e.g. disk, mem, s3
Type() string
Metadata(key string) string
SetMetadata(key string, data string)
}

742
io/fs/fs_test.go Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,406 +1,30 @@
package fs
import (
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNew(t *testing.T) {
mem := NewMemFilesystem(MemConfig{
Size: 10,
Purge: false,
})
func TestMemFromDir(t *testing.T) {
mem, err := NewMemFilesystemFromDir(".", MemConfig{})
require.NoError(t, err)
cur, max := mem.Size()
names := []string{}
for _, f := range mem.List("/", "/*.go") {
names = append(names, f.Name())
}
assert.Equal(t, int64(0), cur)
assert.Equal(t, int64(10), max)
cur = mem.Files()
assert.Equal(t, int64(0), cur)
}
func TestSimplePutNoPurge(t *testing.T) {
mem := NewMemFilesystem(MemConfig{
Size: 10,
Purge: false,
})
data := strings.NewReader("xxxxx")
size, created, err := mem.Store("/foobar", data)
assert.Nil(t, err)
assert.Equal(t, int64(5), size)
assert.Equal(t, true, created)
cur, max := mem.Size()
assert.Equal(t, int64(5), cur)
assert.Equal(t, int64(10), max)
cur = mem.Files()
assert.Equal(t, int64(1), cur)
}
func TestSimpleDelete(t *testing.T) {
mem := NewMemFilesystem(MemConfig{
Size: 10,
Purge: false,
})
size := mem.Delete("/foobar")
assert.Equal(t, int64(-1), size)
data := strings.NewReader("xxxxx")
mem.Store("/foobar", data)
size = mem.Delete("/foobar")
assert.Equal(t, int64(5), size)
cur, max := mem.Size()
assert.Equal(t, int64(0), cur)
assert.Equal(t, int64(10), max)
cur = mem.Files()
assert.Equal(t, int64(0), cur)
}
func TestReplaceNoPurge(t *testing.T) {
mem := NewMemFilesystem(MemConfig{
Size: 10,
Purge: false,
})
data := strings.NewReader("xxxxx")
size, created, err := mem.Store("/foobar", data)
assert.Nil(t, err)
assert.Equal(t, int64(5), size)
assert.Equal(t, true, created)
cur, max := mem.Size()
assert.Equal(t, int64(5), cur)
assert.Equal(t, int64(10), max)
cur = mem.Files()
assert.Equal(t, int64(1), cur)
data = strings.NewReader("yyy")
size, created, err = mem.Store("/foobar", data)
assert.Nil(t, err)
assert.Equal(t, int64(3), size)
assert.Equal(t, false, created)
cur, max = mem.Size()
assert.Equal(t, int64(3), cur)
assert.Equal(t, int64(10), max)
cur = mem.Files()
assert.Equal(t, int64(1), cur)
}
func TestReplacePurge(t *testing.T) {
mem := NewMemFilesystem(MemConfig{
Size: 10,
Purge: true,
})
data1 := strings.NewReader("xxx")
data2 := strings.NewReader("yyy")
data3 := strings.NewReader("zzz")
mem.Store("/foobar1", data1)
mem.Store("/foobar2", data2)
mem.Store("/foobar3", data3)
cur, max := mem.Size()
assert.Equal(t, int64(9), cur)
assert.Equal(t, int64(10), max)
cur = mem.Files()
assert.Equal(t, int64(3), cur)
data4 := strings.NewReader("zzzzz")
size, _, _ := mem.Store("/foobar1", data4)
assert.Equal(t, int64(5), size)
cur, max = mem.Size()
assert.Equal(t, int64(8), cur)
assert.Equal(t, int64(10), max)
cur = mem.Files()
assert.Equal(t, int64(2), cur)
}
func TestReplaceUnlimited(t *testing.T) {
mem := NewMemFilesystem(MemConfig{
Size: 0,
Purge: false,
})
data := strings.NewReader("xxxxx")
size, created, err := mem.Store("/foobar", data)
assert.Nil(t, err)
assert.Equal(t, int64(5), size)
assert.Equal(t, true, created)
cur, max := mem.Size()
assert.Equal(t, int64(5), cur)
assert.Equal(t, int64(0), max)
cur = mem.Files()
assert.Equal(t, int64(1), cur)
data = strings.NewReader("yyy")
size, created, err = mem.Store("/foobar", data)
assert.Nil(t, err)
assert.Equal(t, int64(3), size)
assert.Equal(t, false, created)
cur, max = mem.Size()
assert.Equal(t, int64(3), cur)
assert.Equal(t, int64(0), max)
cur = mem.Files()
assert.Equal(t, int64(1), cur)
}
func TestTooBigNoPurge(t *testing.T) {
mem := NewMemFilesystem(MemConfig{
Size: 10,
Purge: false,
})
data := strings.NewReader("xxxxxyyyyyz")
size, _, _ := mem.Store("/foobar", data)
assert.Equal(t, int64(-1), size)
}
func TestTooBigPurge(t *testing.T) {
mem := NewMemFilesystem(MemConfig{
Size: 10,
Purge: true,
})
data1 := strings.NewReader("xxxxx")
data2 := strings.NewReader("yyyyy")
mem.Store("/foobar1", data1)
mem.Store("/foobar2", data2)
data := strings.NewReader("xxxxxyyyyyz")
size, _, _ := mem.Store("/foobar", data)
assert.Equal(t, int64(-1), size)
}
func TestFullSpaceNoPurge(t *testing.T) {
mem := NewMemFilesystem(MemConfig{
Size: 10,
Purge: false,
})
data1 := strings.NewReader("xxxxx")
data2 := strings.NewReader("yyyyy")
mem.Store("/foobar1", data1)
mem.Store("/foobar2", data2)
cur, max := mem.Size()
assert.Equal(t, int64(10), cur)
assert.Equal(t, int64(10), max)
cur = mem.Files()
assert.Equal(t, int64(2), cur)
data3 := strings.NewReader("zzzzz")
size, _, _ := mem.Store("/foobar3", data3)
assert.Equal(t, int64(-1), size)
}
func TestFullSpacePurge(t *testing.T) {
mem := NewMemFilesystem(MemConfig{
Size: 10,
Purge: true,
})
data1 := strings.NewReader("xxxxx")
data2 := strings.NewReader("yyyyy")
mem.Store("/foobar1", data1)
mem.Store("/foobar2", data2)
cur, max := mem.Size()
assert.Equal(t, int64(10), cur)
assert.Equal(t, int64(10), max)
cur = mem.Files()
assert.Equal(t, int64(2), cur)
data3 := strings.NewReader("zzzzz")
size, _, _ := mem.Store("/foobar3", data3)
assert.Equal(t, int64(5), size)
cur, max = mem.Size()
assert.Equal(t, int64(10), cur)
assert.Equal(t, int64(10), max)
cur = mem.Files()
assert.Equal(t, int64(2), cur)
}
func TestFullSpacePurgeMulti(t *testing.T) {
mem := NewMemFilesystem(MemConfig{
Size: 10,
Purge: true,
})
data1 := strings.NewReader("xxx")
data2 := strings.NewReader("yyy")
data3 := strings.NewReader("zzz")
mem.Store("/foobar1", data1)
mem.Store("/foobar2", data2)
mem.Store("/foobar3", data3)
cur, max := mem.Size()
assert.Equal(t, int64(9), cur)
assert.Equal(t, int64(10), max)
cur = mem.Files()
assert.Equal(t, int64(3), cur)
data4 := strings.NewReader("zzzzz")
size, _, _ := mem.Store("/foobar4", data4)
assert.Equal(t, int64(5), size)
cur, max = mem.Size()
assert.Equal(t, int64(8), cur)
assert.Equal(t, int64(10), max)
cur = mem.Files()
assert.Equal(t, int64(2), cur)
}
func TestPurgeOrder(t *testing.T) {
mem := NewMemFilesystem(MemConfig{
Size: 10,
Purge: true,
})
data1 := strings.NewReader("xxxxx")
data2 := strings.NewReader("yyyyy")
data3 := strings.NewReader("zzzzz")
mem.Store("/foobar1", data1)
time.Sleep(1 * time.Second)
mem.Store("/foobar2", data2)
time.Sleep(1 * time.Second)
mem.Store("/foobar3", data3)
file := mem.Open("/foobar1")
assert.Nil(t, file)
}
func TestList(t *testing.T) {
mem := NewMemFilesystem(MemConfig{
Size: 10,
Purge: false,
})
data1 := strings.NewReader("a")
data2 := strings.NewReader("bb")
data3 := strings.NewReader("ccc")
data4 := strings.NewReader("dddd")
mem.Store("/foobar1", data1)
mem.Store("/foobar2", data2)
mem.Store("/foobar3", data3)
mem.Store("/foobar4", data4)
cur, max := mem.Size()
assert.Equal(t, int64(10), cur)
assert.Equal(t, int64(10), max)
cur = mem.Files()
assert.Equal(t, int64(4), cur)
files := mem.List("")
assert.Equal(t, 4, len(files))
}
func TestData(t *testing.T) {
mem := NewMemFilesystem(MemConfig{
Size: 10,
Purge: false,
})
data := "gduwotoxqb"
data1 := strings.NewReader(data)
mem.Store("/foobar", data1)
file := mem.Open("/foobar")
data2 := make([]byte, len(data)+1)
n, _ := file.Read(data2)
assert.Equal(t, len(data), n)
assert.Equal(t, []byte(data), data2[:n])
require.ElementsMatch(t, []string{
"/disk.go",
"/fs_test.go",
"/fs.go",
"/mem_test.go",
"/mem.go",
"/readonly_test.go",
"/readonly.go",
"/s3.go",
"/sized_test.go",
"/sized.go",
}, names)
}

54
io/fs/readonly.go Normal file
View File

@ -0,0 +1,54 @@
package fs
import (
"io"
"os"
)
type readOnlyFilesystem struct {
Filesystem
}
func NewReadOnlyFilesystem(fs Filesystem) (Filesystem, error) {
r := &readOnlyFilesystem{
Filesystem: fs,
}
return r, nil
}
func (r *readOnlyFilesystem) Symlink(oldname, newname string) error {
return os.ErrPermission
}
func (r *readOnlyFilesystem) WriteFileReader(path string, rd io.Reader) (int64, bool, error) {
return -1, false, os.ErrPermission
}
func (r *readOnlyFilesystem) WriteFile(path string, data []byte) (int64, bool, error) {
return -1, false, os.ErrPermission
}
func (r *readOnlyFilesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) {
return -1, false, os.ErrPermission
}
func (r *readOnlyFilesystem) MkdirAll(path string, perm os.FileMode) error {
return os.ErrPermission
}
func (r *readOnlyFilesystem) Remove(path string) int64 {
return -1
}
func (r *readOnlyFilesystem) RemoveAll() int64 {
return 0
}
func (r *readOnlyFilesystem) Purge(size int64) int64 {
return 0
}
func (r *readOnlyFilesystem) Resize(size int64) error {
return os.ErrPermission
}

50
io/fs/readonly_test.go Normal file
View File

@ -0,0 +1,50 @@
package fs
import (
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func TestReadOnly(t *testing.T) {
mem, err := NewMemFilesystemFromDir(".", MemConfig{})
require.NoError(t, err)
ro, err := NewReadOnlyFilesystem(mem)
require.NoError(t, err)
err = ro.Symlink("/readonly.go", "/foobar.go")
require.Error(t, err)
_, _, err = ro.WriteFile("/readonly.go", []byte("foobar"))
require.Error(t, err)
_, _, err = ro.WriteFileReader("/readonly.go", strings.NewReader("foobar"))
require.Error(t, err)
_, _, err = ro.WriteFileSafe("/readonly.go", []byte("foobar"))
require.Error(t, err)
err = ro.MkdirAll("/foobar/baz", 0700)
require.Error(t, err)
res := ro.Remove("/readonly.go")
require.Equal(t, int64(-1), res)
res = ro.RemoveAll()
require.Equal(t, int64(0), res)
rop, ok := ro.(PurgeFilesystem)
require.True(t, ok, "must implement PurgeFilesystem")
size, _ := ro.Size()
res = rop.Purge(size)
require.Equal(t, int64(0), res)
ros, ok := ro.(SizedFilesystem)
require.True(t, ok, "must implement SizedFilesystem")
err = ros.Resize(100)
require.Error(t, err)
}

649
io/fs/s3.go Normal file

File diff suppressed because it is too large Load Diff

168
io/fs/sized.go Normal file
View File

@ -0,0 +1,168 @@
package fs
import (
"bytes"
"fmt"
"io"
)
type SizedFilesystem interface {
Filesystem
// Resize resizes the filesystem to the new size. Files may need to be deleted.
Resize(size int64) error
}
type PurgeFilesystem interface {
// Purge will free up at least size number of bytes and returns the actual
// freed space in bytes.
Purge(size int64) int64
}
type sizedFilesystem struct {
Filesystem
// Size is the capacity of the filesystem in bytes
maxSize int64
// Set true to automatically delete the oldest files until there's
// enough space to store a new file
purge bool
}
var _ PurgeFilesystem = &sizedFilesystem{}
func NewSizedFilesystem(fs Filesystem, maxSize int64, purge bool) (SizedFilesystem, error) {
r := &sizedFilesystem{
Filesystem: fs,
maxSize: maxSize,
purge: purge,
}
return r, nil
}
func (r *sizedFilesystem) Size() (int64, int64) {
currentSize, _ := r.Filesystem.Size()
return currentSize, r.maxSize
}
func (r *sizedFilesystem) Resize(size int64) error {
currentSize, _ := r.Size()
if size >= currentSize {
// If the new size is the same or larger than the current size,
// nothing to do.
r.maxSize = size
return nil
}
// If the new size is less than the current size, purge some files.
r.Purge(currentSize - size)
r.maxSize = size
return nil
}
func (r *sizedFilesystem) WriteFileReader(path string, rd io.Reader) (int64, bool, error) {
currentSize, maxSize := r.Size()
if maxSize <= 0 {
return r.Filesystem.WriteFileReader(path, rd)
}
data := bytes.Buffer{}
size, err := data.ReadFrom(rd)
if err != nil {
return -1, false, err
}
// reject if the new file is larger than the available space
if size > maxSize {
return -1, false, fmt.Errorf("File is too big")
}
// Calculate the new size of the filesystem
newSize := currentSize + size
// If the the new size is larger than the allowed size, we have to free
// some space.
if newSize > maxSize {
if !r.purge {
return -1, false, fmt.Errorf("not enough space on device")
}
if r.Purge(size) < size {
return -1, false, fmt.Errorf("not enough space on device")
}
}
return r.Filesystem.WriteFileReader(path, &data)
}
func (r *sizedFilesystem) WriteFile(path string, data []byte) (int64, bool, error) {
return r.WriteFileReader(path, bytes.NewBuffer(data))
}
func (r *sizedFilesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) {
currentSize, maxSize := r.Size()
if maxSize <= 0 {
return r.Filesystem.WriteFile(path, data)
}
size := int64(len(data))
// reject if the new file is larger than the available space
if size > maxSize {
return -1, false, fmt.Errorf("File is too big")
}
// Calculate the new size of the filesystem
newSize := currentSize + size
// If the the new size is larger than the allowed size, we have to free
// some space.
if newSize > maxSize {
if !r.purge {
return -1, false, fmt.Errorf("not enough space on device")
}
if r.Purge(size) < size {
return -1, false, fmt.Errorf("not enough space on device")
}
}
return r.Filesystem.WriteFileSafe(path, data)
}
func (r *sizedFilesystem) Purge(size int64) int64 {
if purger, ok := r.Filesystem.(PurgeFilesystem); ok {
return purger.Purge(size)
}
return 0
/*
files := r.Filesystem.List("/", "")
sort.Slice(files, func(i, j int) bool {
return files[i].ModTime().Before(files[j].ModTime())
})
var freed int64 = 0
for _, f := range files {
r.Filesystem.Remove(f.Name())
size -= f.Size()
freed += f.Size()
r.currentSize -= f.Size()
if size <= 0 {
break
}
}
files = nil
return freed
*/
}

350
io/fs/sized_test.go Normal file
View File

@ -0,0 +1,350 @@
package fs
import (
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func newMemFS() Filesystem {
mem, _ := NewMemFilesystem(MemConfig{})
return mem
}
func TestNewSized(t *testing.T) {
fs, _ := NewSizedFilesystem(newMemFS(), 10, false)
cur, max := fs.Size()
require.Equal(t, int64(0), cur)
require.Equal(t, int64(10), max)
cur = fs.Files()
require.Equal(t, int64(0), cur)
}
func TestSizedResize(t *testing.T) {
fs, _ := NewSizedFilesystem(newMemFS(), 10, false)
cur, max := fs.Size()
require.Equal(t, int64(0), cur)
require.Equal(t, int64(10), max)
err := fs.Resize(20)
require.NoError(t, err)
cur, max = fs.Size()
require.Equal(t, int64(0), cur)
require.Equal(t, int64(20), max)
}
func TestSizedResizePurge(t *testing.T) {
fs, _ := NewSizedFilesystem(newMemFS(), 10, false)
cur, max := fs.Size()
require.Equal(t, int64(0), cur)
require.Equal(t, int64(10), max)
fs.WriteFileReader("/foobar", strings.NewReader("xxxxxxxxxx"))
cur, max = fs.Size()
require.Equal(t, int64(10), cur)
require.Equal(t, int64(10), max)
err := fs.Resize(5)
require.NoError(t, err)
cur, max = fs.Size()
require.Equal(t, int64(0), cur)
require.Equal(t, int64(5), max)
}
func TestSizedWrite(t *testing.T) {
fs, _ := NewSizedFilesystem(newMemFS(), 10, false)
cur, max := fs.Size()
require.Equal(t, int64(0), cur)
require.Equal(t, int64(10), max)
size, created, err := fs.WriteFileReader("/foobar", strings.NewReader("xxxxx"))
require.NoError(t, err)
require.Equal(t, int64(5), size)
require.Equal(t, true, created)
cur, max = fs.Size()
require.Equal(t, int64(5), cur)
require.Equal(t, int64(10), max)
_, _, err = fs.WriteFile("/foobaz", []byte("xxxxxx"))
require.Error(t, err)
_, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("xxxxxx"))
require.Error(t, err)
_, _, err = fs.WriteFileSafe("/foobaz", []byte("xxxxxx"))
require.Error(t, err)
}
func TestSizedReplaceNoPurge(t *testing.T) {
fs, _ := NewSizedFilesystem(newMemFS(), 10, false)
data := strings.NewReader("xxxxx")
size, created, err := fs.WriteFileReader("/foobar", data)
require.Nil(t, err)
require.Equal(t, int64(5), size)
require.Equal(t, true, created)
cur, max := fs.Size()
require.Equal(t, int64(5), cur)
require.Equal(t, int64(10), max)
cur = fs.Files()
require.Equal(t, int64(1), cur)
data = strings.NewReader("yyy")
size, created, err = fs.WriteFileReader("/foobar", data)
require.Nil(t, err)
require.Equal(t, int64(3), size)
require.Equal(t, false, created)
cur, max = fs.Size()
require.Equal(t, int64(3), cur)
require.Equal(t, int64(10), max)
cur = fs.Files()
require.Equal(t, int64(1), cur)
}
func TestSizedReplacePurge(t *testing.T) {
fs, _ := NewSizedFilesystem(newMemFS(), 10, true)
data1 := strings.NewReader("xxx")
data2 := strings.NewReader("yyy")
data3 := strings.NewReader("zzz")
fs.WriteFileReader("/foobar1", data1)
fs.WriteFileReader("/foobar2", data2)
fs.WriteFileReader("/foobar3", data3)
cur, max := fs.Size()
require.Equal(t, int64(9), cur)
require.Equal(t, int64(10), max)
cur = fs.Files()
require.Equal(t, int64(3), cur)
data4 := strings.NewReader("zzzzz")
size, _, _ := fs.WriteFileReader("/foobar1", data4)
require.Equal(t, int64(5), size)
cur, max = fs.Size()
require.Equal(t, int64(8), cur)
require.Equal(t, int64(10), max)
cur = fs.Files()
require.Equal(t, int64(2), cur)
}
func TestSizedReplaceUnlimited(t *testing.T) {
fs, _ := NewSizedFilesystem(newMemFS(), -1, false)
data := strings.NewReader("xxxxx")
size, created, err := fs.WriteFileReader("/foobar", data)
require.Nil(t, err)
require.Equal(t, int64(5), size)
require.Equal(t, true, created)
cur, max := fs.Size()
require.Equal(t, int64(5), cur)
require.Equal(t, int64(-1), max)
cur = fs.Files()
require.Equal(t, int64(1), cur)
data = strings.NewReader("yyy")
size, created, err = fs.WriteFileReader("/foobar", data)
require.Nil(t, err)
require.Equal(t, int64(3), size)
require.Equal(t, false, created)
cur, max = fs.Size()
require.Equal(t, int64(3), cur)
require.Equal(t, int64(-1), max)
cur = fs.Files()
require.Equal(t, int64(1), cur)
}
func TestSizedTooBigNoPurge(t *testing.T) {
fs, _ := NewSizedFilesystem(newMemFS(), 10, false)
data := strings.NewReader("xxxxxyyyyyz")
size, _, err := fs.WriteFileReader("/foobar", data)
require.Error(t, err)
require.Equal(t, int64(-1), size)
}
func TestSizedTooBigPurge(t *testing.T) {
fs, _ := NewSizedFilesystem(newMemFS(), 10, true)
data1 := strings.NewReader("xxxxx")
data2 := strings.NewReader("yyyyy")
fs.WriteFileReader("/foobar1", data1)
fs.WriteFileReader("/foobar2", data2)
data := strings.NewReader("xxxxxyyyyyz")
size, _, err := fs.WriteFileReader("/foobar", data)
require.Error(t, err)
require.Equal(t, int64(-1), size)
require.Equal(t, int64(2), fs.Files())
}
func TestSizedFullSpaceNoPurge(t *testing.T) {
fs, _ := NewSizedFilesystem(newMemFS(), 10, false)
data1 := strings.NewReader("xxxxx")
data2 := strings.NewReader("yyyyy")
fs.WriteFileReader("/foobar1", data1)
fs.WriteFileReader("/foobar2", data2)
cur, max := fs.Size()
require.Equal(t, int64(10), cur)
require.Equal(t, int64(10), max)
cur = fs.Files()
require.Equal(t, int64(2), cur)
data3 := strings.NewReader("zzzzz")
size, _, err := fs.WriteFileReader("/foobar3", data3)
require.Error(t, err)
require.Equal(t, int64(-1), size)
}
func TestSizedFullSpacePurge(t *testing.T) {
fs, _ := NewSizedFilesystem(newMemFS(), 10, true)
data1 := strings.NewReader("xxxxx")
data2 := strings.NewReader("yyyyy")
fs.WriteFileReader("/foobar1", data1)
fs.WriteFileReader("/foobar2", data2)
cur, max := fs.Size()
require.Equal(t, int64(10), cur)
require.Equal(t, int64(10), max)
cur = fs.Files()
require.Equal(t, int64(2), cur)
data3 := strings.NewReader("zzzzz")
size, _, _ := fs.WriteFileReader("/foobar3", data3)
require.Equal(t, int64(5), size)
cur, max = fs.Size()
require.Equal(t, int64(10), cur)
require.Equal(t, int64(10), max)
cur = fs.Files()
require.Equal(t, int64(2), cur)
}
func TestSizedFullSpacePurgeMulti(t *testing.T) {
fs, _ := NewSizedFilesystem(newMemFS(), 10, true)
data1 := strings.NewReader("xxx")
data2 := strings.NewReader("yyy")
data3 := strings.NewReader("zzz")
fs.WriteFileReader("/foobar1", data1)
fs.WriteFileReader("/foobar2", data2)
fs.WriteFileReader("/foobar3", data3)
cur, max := fs.Size()
require.Equal(t, int64(9), cur)
require.Equal(t, int64(10), max)
cur = fs.Files()
require.Equal(t, int64(3), cur)
data4 := strings.NewReader("zzzzz")
size, _, _ := fs.WriteFileReader("/foobar4", data4)
require.Equal(t, int64(5), size)
cur, max = fs.Size()
require.Equal(t, int64(8), cur)
require.Equal(t, int64(10), max)
cur = fs.Files()
require.Equal(t, int64(2), cur)
}
func TestSizedPurgeOrder(t *testing.T) {
fs, _ := NewSizedFilesystem(newMemFS(), 10, true)
data1 := strings.NewReader("xxxxx")
data2 := strings.NewReader("yyyyy")
data3 := strings.NewReader("zzzzz")
fs.WriteFileReader("/foobar1", data1)
time.Sleep(1 * time.Second)
fs.WriteFileReader("/foobar2", data2)
time.Sleep(1 * time.Second)
fs.WriteFileReader("/foobar3", data3)
file := fs.Open("/foobar1")
require.Nil(t, file)
}

View File

@ -103,7 +103,6 @@ type Logger interface {
type logger struct {
output Writer
component string
topics map[string]struct{}
}
// New returns an implementation of the Logger interface.
@ -121,14 +120,6 @@ func (l *logger) clone() *logger {
component: l.component,
}
if len(l.topics) != 0 {
clone.topics = make(map[string]struct{})
for topic := range l.topics {
clone.topics[topic] = struct{}{}
}
}
return clone
}

View File

@ -5,15 +5,15 @@ import (
"bytes"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLoglevelNames(t *testing.T) {
assert.Equal(t, "DEBUG", Ldebug.String())
assert.Equal(t, "ERROR", Lerror.String())
assert.Equal(t, "WARN", Lwarn.String())
assert.Equal(t, "INFO", Linfo.String())
assert.Equal(t, `SILENT`, Lsilent.String())
require.Equal(t, "DEBUG", Ldebug.String())
require.Equal(t, "ERROR", Lerror.String())
require.Equal(t, "WARN", Lwarn.String())
require.Equal(t, "INFO", Linfo.String())
require.Equal(t, `SILENT`, Lsilent.String())
}
func TestLogColorToNotTTY(t *testing.T) {
@ -23,7 +23,7 @@ func TestLogColorToNotTTY(t *testing.T) {
w := NewConsoleWriter(writer, Linfo, true).(*syncWriter)
formatter := w.writer.(*consoleWriter).formatter.(*consoleFormatter)
assert.NotEqual(t, true, formatter.color, "Color should not be used on a buffer logger")
require.NotEqual(t, true, formatter.color, "Color should not be used on a buffer logger")
}
func TestLogContext(t *testing.T) {
@ -53,7 +53,7 @@ func TestLogContext(t *testing.T) {
lenWithoutCtx := buffer.Len()
buffer.Reset()
assert.Greater(t, lenWithCtx, lenWithoutCtx, "Log line length without context is not shorter than with context")
require.Greater(t, lenWithCtx, lenWithoutCtx, "Log line length without context is not shorter than with context")
}
func TestLogClone(t *testing.T) {
@ -65,7 +65,7 @@ func TestLogClone(t *testing.T) {
logger.Info().Log("info")
writer.Flush()
assert.Contains(t, buffer.String(), `component="test"`)
require.Contains(t, buffer.String(), `component="test"`)
buffer.Reset()
@ -74,7 +74,7 @@ func TestLogClone(t *testing.T) {
logger2.Info().Log("info")
writer.Flush()
assert.Contains(t, buffer.String(), `component="tset"`)
require.Contains(t, buffer.String(), `component="tset"`)
}
func TestLogSilent(t *testing.T) {
@ -85,22 +85,22 @@ func TestLogSilent(t *testing.T) {
logger.Debug().Log("debug")
writer.Flush()
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
buffer.Reset()
logger.Info().Log("info")
writer.Flush()
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
buffer.Reset()
logger.Warn().Log("warn")
writer.Flush()
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
buffer.Reset()
logger.Error().Log("error")
writer.Flush()
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
buffer.Reset()
}
@ -112,22 +112,22 @@ func TestLogDebug(t *testing.T) {
logger.Debug().Log("debug")
writer.Flush()
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
buffer.Reset()
logger.Info().Log("info")
writer.Flush()
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
buffer.Reset()
logger.Warn().Log("warn")
writer.Flush()
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
buffer.Reset()
logger.Error().Log("error")
writer.Flush()
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
buffer.Reset()
}
@ -139,22 +139,22 @@ func TestLogInfo(t *testing.T) {
logger.Debug().Log("debug")
writer.Flush()
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
buffer.Reset()
logger.Info().Log("info")
writer.Flush()
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
buffer.Reset()
logger.Warn().Log("warn")
writer.Flush()
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
buffer.Reset()
logger.Error().Log("error")
writer.Flush()
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
buffer.Reset()
}
@ -166,22 +166,22 @@ func TestLogWarn(t *testing.T) {
logger.Debug().Log("debug")
writer.Flush()
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
buffer.Reset()
logger.Info().Log("info")
writer.Flush()
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
buffer.Reset()
logger.Warn().Log("warn")
writer.Flush()
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
buffer.Reset()
logger.Error().Log("error")
writer.Flush()
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
buffer.Reset()
}
@ -193,21 +193,43 @@ func TestLogError(t *testing.T) {
logger.Debug().Log("debug")
writer.Flush()
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
buffer.Reset()
logger.Info().Log("info")
writer.Flush()
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
buffer.Reset()
logger.Warn().Log("warn")
writer.Flush()
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
buffer.Reset()
logger.Error().Log("error")
writer.Flush()
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
buffer.Reset()
}
func TestLogWithField(t *testing.T) {
bufwriter := NewBufferWriter(Linfo, 10)
logger := New("test").WithOutput(bufwriter)
logger = logger.WithField("foo", "bar")
logger.Info().Log("hello")
events := bufwriter.Events()
require.Equal(t, 1, len(events))
require.Empty(t, events[0].err)
require.Equal(t, "bar", events[0].Data["foo"])
logger = logger.WithField("func", func() bool { return true })
logger.Info().Log("hello")
events = bufwriter.Events()
require.Equal(t, 2, len(events))
require.NotEmpty(t, events[1].err)
require.Equal(t, "bar", events[0].Data["foo"])
}

181
log/writer_test.go Normal file
View File

@ -0,0 +1,181 @@
package log
import (
"bytes"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestJSONWriter(t *testing.T) {
buffer := bytes.Buffer{}
writer := NewJSONWriter(&buffer, Linfo)
writer.Write(&Event{
logger: &logger{},
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
Level: Linfo,
Component: "test",
Caller: "me",
Message: "hello world",
err: "",
Data: map[string]interface{}{"foo": "bar"},
})
require.Equal(t, `{"Time":"2009-11-10T23:00:00Z","Level":"INFO","Component":"test","Caller":"me","Message":"hello world","Data":{"caller":"me","component":"test","foo":"bar","message":"hello world","ts":"2009-11-10T23:00:00Z"}}`, buffer.String())
}
func TestConsoleWriter(t *testing.T) {
buffer := bytes.Buffer{}
writer := NewConsoleWriter(&buffer, Linfo, false)
writer.Write(&Event{
logger: &logger{},
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
Level: Linfo,
Component: "test",
Caller: "me",
Message: "hello world",
err: "",
Data: map[string]interface{}{"foo": "bar"},
})
require.Equal(t, `ts=2009-11-10T23:00:00Z level=INFO component="test" msg="hello world" foo="bar"`+"\n", buffer.String())
}
func TestTopicWriter(t *testing.T) {
bufwriter := NewBufferWriter(Linfo, 10)
writer1 := NewTopicWriter(bufwriter, []string{})
writer2 := NewTopicWriter(bufwriter, []string{"foobar"})
writer1.Write(&Event{
logger: &logger{},
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
Level: Linfo,
Component: "test",
Caller: "me",
Message: "hello world",
err: "",
Data: map[string]interface{}{"foo": "bar"},
})
writer2.Write(&Event{
logger: &logger{},
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
Level: Linfo,
Component: "test",
Caller: "me",
Message: "hello world",
err: "",
Data: map[string]interface{}{"foo": "bar"},
})
require.Equal(t, 1, len(bufwriter.Events()))
writer1.Write(&Event{
logger: &logger{},
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
Level: Linfo,
Component: "foobar",
Caller: "me",
Message: "hello world",
err: "",
Data: map[string]interface{}{"foo": "bar"},
})
writer2.Write(&Event{
logger: &logger{},
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
Level: Linfo,
Component: "foobar",
Caller: "me",
Message: "hello world",
err: "",
Data: map[string]interface{}{"foo": "bar"},
})
require.Equal(t, 3, len(bufwriter.Events()))
}
func TestMultiwriter(t *testing.T) {
bufwriter1 := NewBufferWriter(Linfo, 10)
bufwriter2 := NewBufferWriter(Linfo, 10)
writer := NewMultiWriter(bufwriter1, bufwriter2)
writer.Write(&Event{
logger: &logger{},
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
Level: Linfo,
Component: "foobar",
Caller: "me",
Message: "hello world",
err: "",
Data: map[string]interface{}{"foo": "bar"},
})
require.Equal(t, 1, len(bufwriter1.Events()))
require.Equal(t, 1, len(bufwriter2.Events()))
}
func TestLevelRewriter(t *testing.T) {
bufwriter := NewBufferWriter(Linfo, 10)
rule := LevelRewriteRule{
Level: Lwarn,
Component: "foobar",
Match: map[string]string{
"foo": "bar",
},
}
writer := NewLevelRewriter(bufwriter, []LevelRewriteRule{rule})
writer.Write(&Event{
logger: &logger{},
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
Level: Linfo,
Component: "foobar",
Caller: "me",
Message: "hello world",
err: "",
Data: map[string]interface{}{"foo": "bar"},
})
events := bufwriter.Events()
require.Equal(t, 1, len(events))
require.Equal(t, Lwarn, events[0].Level)
writer.Write(&Event{
logger: &logger{},
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
Level: Linfo,
Component: "foobar",
Caller: "me",
Message: "hello world",
err: "",
Data: map[string]interface{}{"bar": "foo"},
})
events = bufwriter.Events()
require.Equal(t, 2, len(events))
require.Equal(t, Linfo, events[1].Level)
writer.Write(&Event{
logger: &logger{},
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
Level: Linfo,
Component: "test",
Caller: "me",
Message: "hello world",
err: "",
Data: map[string]interface{}{"foo": "bar"},
})
events = bufwriter.Events()
require.Equal(t, 3, len(events))
require.Equal(t, Linfo, events[2].Level)
}

View File

@ -5,6 +5,7 @@ import (
"os/signal"
"github.com/datarhei/core/v16/app/api"
"github.com/datarhei/core/v16/config/store"
"github.com/datarhei/core/v16/log"
_ "github.com/joho/godotenv/autoload"
@ -13,7 +14,9 @@ import (
func main() {
logger := log.New("Core").WithOutput(log.NewConsoleWriter(os.Stderr, log.Lwarn, true))
app, err := api.New(os.Getenv("CORE_CONFIGFILE"), os.Stderr)
configfile := store.Location(os.Getenv("CORE_CONFIGFILE"))
app, err := api.New(configfile, os.Stderr)
if err != nil {
logger.Error().WithError(err).Log("Failed to create new API")
os.Exit(1)

View File

@ -12,7 +12,7 @@ type Pattern interface {
Name() string
// Match returns whether a map of labels with its label values
// match this pattern.
// match this pattern. All labels have to be present and need to match.
Match(labels map[string]string) bool
// IsValid returns whether the pattern is valid.
@ -26,7 +26,7 @@ type pattern struct {
}
// NewPattern creates a new pattern with the given prefix and group name. There
// has to be an even number of parameter, which is ("label", "labelvalue", "label",
// has to be an even number of labels, which is ("label", "labelvalue", "label",
// "labelvalue" ...). The label value will be interpreted as regular expression.
func NewPattern(name string, labels ...string) Pattern {
p := &pattern{
@ -38,7 +38,6 @@ func NewPattern(name string, labels ...string) Pattern {
for i := 0; i < len(labels); i += 2 {
exp, err := regexp.Compile(labels[i+1])
if err != nil {
fmt.Printf("error: %s\n", err)
continue
}
@ -84,19 +83,35 @@ func (p *pattern) IsValid() bool {
return p.valid
}
// Metrics is a collection of values
type Metrics interface {
// Value returns the first value that matches the name and the labels. The labels
// are used to create a pattern and therefore must obey to the rules of NewPattern.
Value(name string, labels ...string) Value
// Values returns all values that matches the name and the labels. The labels
// are used to create a pattern and therefore must obey to the rules of NewPattern.
Values(name string, labels ...string) []Value
// Labels return a list of all values for a label.
Labels(name string, label string) []string
// All returns all values currently stored in the collection.
All() []Value
// Add adds a value to the collection.
Add(v Value)
// String return a string representation of all collected values.
String() string
}
// metrics is an implementation of the Metrics interface.
type metrics struct {
values []Value
}
// NewMetrics returns a new metrics instance.
func NewMetrics() *metrics {
return &metrics{}
}
@ -231,8 +246,15 @@ func (v *value) Hash() string {
func (v *value) String() string {
s := fmt.Sprintf("%s: %f {", v.name, v.value)
for k, v := range v.labels {
s += k + "=" + v + " "
keys := []string{}
for k := range v.labels {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
s += k + "=" + v.labels[k] + " "
}
s += "}"

View File

@ -2,25 +2,154 @@ package metric
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestValue(t *testing.T) {
d := NewDesc("group", "", []string{"name"})
v := NewValue(d, 42, "foobar")
func TestPattern(t *testing.T) {
p := NewPattern("bla", "label1", "value1", "label2")
require.Equal(t, false, p.IsValid())
if v.L("name") != "foobar" {
t.Fatalf("label name doesn't have the expected value")
}
p = NewPattern("bla", "label1", "value1", "label2", "valu(e2")
require.Equal(t, false, p.IsValid())
p = NewPattern("bla")
require.Equal(t, true, p.IsValid())
require.Equal(t, "bla", p.Name())
p = NewPattern("bla", "label1", "value1", "label2", "value2")
require.Equal(t, true, p.IsValid())
}
func TestPatternMatch(t *testing.T) {
p := NewPattern("bla", "label1", "value1", "label2")
require.Equal(t, false, p.IsValid())
require.Equal(t, false, p.Match(map[string]string{"label1": "value1"}))
p0 := NewPattern("bla")
require.Equal(t, true, p0.IsValid())
require.Equal(t, true, p0.Match(map[string]string{}))
require.Equal(t, true, p0.Match(map[string]string{"labelX": "foobar"}))
p = NewPattern("bla", "label1", "value.", "label2", "val?ue2")
require.Equal(t, true, p.IsValid())
require.Equal(t, false, p.Match(map[string]string{}))
require.Equal(t, false, p.Match(map[string]string{"label1": "value1"}))
require.Equal(t, true, p.Match(map[string]string{"label1": "value1", "label2": "value2"}))
require.Equal(t, true, p.Match(map[string]string{"label1": "value5", "label2": "vaue2"}))
}
func TestValue(t *testing.T) {
d := NewDesc("group", "", []string{"label1", "label2"})
v := NewValue(d, 42, "foobar")
require.Nil(t, v)
v = NewValue(d, 42, "foobar", "foobaz")
require.NotNil(t, v)
require.Equal(t, float64(42), v.Val())
require.Equal(t, "", v.L("labelX"))
require.Equal(t, "foobar", v.L("label1"))
require.Equal(t, "foobaz", v.L("label2"))
require.Equal(t, "group", v.Name())
require.Equal(t, "group:label1=foobar label2=foobaz ", v.Hash())
require.Equal(t, "group: 42.000000 {label1=foobar label2=foobaz }", v.String())
require.Equal(t, map[string]string{"label1": "foobar", "label2": "foobaz"}, v.Labels())
}
func TestValuePattern(t *testing.T) {
d := NewDesc("group", "", []string{"label1", "label2"})
v := NewValue(d, 42, "foobar", "foobaz")
p1 := NewPattern("group")
p2 := NewPattern("group", "label1", "foobar")
p3 := NewPattern("group", "label2", "foobaz")
p4 := NewPattern("group", "label2", "foobaz", "label1", "foobar")
if v.Match([]Pattern{p1}) == false {
t.Fatalf("pattern p1 should have matched")
}
require.Equal(t, true, v.Match(nil))
require.Equal(t, true, v.Match([]Pattern{p1}))
require.Equal(t, true, v.Match([]Pattern{p2}))
require.Equal(t, true, v.Match([]Pattern{p3}))
require.Equal(t, true, v.Match([]Pattern{p4}))
require.Equal(t, true, v.Match([]Pattern{p1, p2, p3, p4}))
p2 := NewPattern("group", "name", "foobar")
p5 := NewPattern("group", "label1", "foobaz")
if v.Match([]Pattern{p2}) == false {
t.Fatalf("pattern p2 should have matched")
}
require.Equal(t, false, v.Match([]Pattern{p5}))
require.Equal(t, true, v.Match([]Pattern{p4, p5}))
require.Equal(t, true, v.Match([]Pattern{p5, p4}))
}
func TestDescription(t *testing.T) {
d := NewDesc("name", "blabla", []string{"label"})
require.Equal(t, "name", d.Name())
require.Equal(t, "blabla", d.Description())
require.ElementsMatch(t, []string{"label"}, d.Labels())
require.Equal(t, "name: blabla (label)", d.String())
}
func TestMetri(t *testing.T) {
m := NewMetrics()
require.Equal(t, "", m.String())
require.Equal(t, 0, len(m.All()))
d := NewDesc("group", "", []string{"label1", "label2"})
v1 := NewValue(d, 42, "foobar", "foobaz")
require.NotNil(t, v1)
m.Add(v1)
require.Equal(t, v1.String(), m.String())
require.Equal(t, 1, len(m.All()))
l := m.Labels("group", "label2")
require.ElementsMatch(t, []string{"foobaz"}, l)
v2 := NewValue(d, 77, "barfoo", "bazfoo")
m.Add(v2)
require.Equal(t, v1.String()+v2.String(), m.String())
require.Equal(t, 2, len(m.All()))
l = m.Labels("group", "label2")
require.ElementsMatch(t, []string{"foobaz", "bazfoo"}, l)
v := m.Value("bla", "label1", "foo*")
require.Equal(t, nullValue, v)
v = m.Value("group")
require.NotEqual(t, nullValue, v)
v = m.Value("group", "label1", "foo*")
require.NotEqual(t, nullValue, v)
v = m.Value("group", "label2", "baz")
require.NotEqual(t, nullValue, v)
vs := m.Values("group")
require.Equal(t, 2, len(vs))
vs = m.Values("group", "label1", "foo*")
require.Equal(t, 2, len(vs))
vs = m.Values("group", "label2", "*baz*")
require.NotEqual(t, 2, len(vs))
vs = m.Values("group", "label1")
require.Equal(t, 0, len(vs))
}

View File

@ -3,18 +3,27 @@ package net
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAnonymizeIPString(t *testing.T) {
_, err := AnonymizeIPString("127.987.475.21")
require.Error(t, err)
_, err = AnonymizeIPString("bbd1:xxxx")
require.Error(t, err)
_, err = AnonymizeIPString("hello-world")
require.Error(t, err)
ipv4 := "192.168.1.42"
ipv6 := "bbd1:e95a:adbb:b29a:e38b:577f:6f9a:1fa7"
anonymizedIPv4, err := AnonymizeIPString(ipv4)
assert.Nil(t, err)
assert.Equal(t, "192.168.1.0", anonymizedIPv4)
require.NoError(t, err)
require.Equal(t, "192.168.1.0", anonymizedIPv4)
anonymizedIPv6, err := AnonymizeIPString(ipv6)
assert.Nil(t, err)
assert.Equal(t, "bbd1:e95a:adbb:b29a::", anonymizedIPv6)
require.NoError(t, err)
require.Equal(t, "bbd1:e95a:adbb:b29a::", anonymizedIPv6)
}

View File

@ -3,57 +3,63 @@ package net
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestIPLimiterNew(t *testing.T) {
var err error
_, err = NewIPLimiter([]string{}, []string{})
assert.Nil(t, err)
require.Nil(t, err)
_, err = NewIPLimiter([]string{"::1/128", "127.0.0.1/32", ""}, []string{})
assert.Nil(t, err)
require.Nil(t, err)
_, err = NewIPLimiter([]string{}, []string{"::1/128", "127.0.0.1/32", ""})
assert.Nil(t, err)
require.Nil(t, err)
}
func TestIPLimiterError(t *testing.T) {
var err error
_, err = NewIPLimiter([]string{}, []string{})
assert.Nil(t, err)
require.Nil(t, err)
_, err = NewIPLimiter([]string{"::1"}, []string{})
assert.NotNil(t, err, "Should not accept invalid IP")
require.NotNil(t, err, "Should not accept invalid IP")
_, err = NewIPLimiter([]string{}, []string{"::1"})
assert.NotNil(t, err, "Should not accept invalid IP")
require.NotNil(t, err, "Should not accept invalid IP")
}
func TestIPLimiterInvalidIPs(t *testing.T) {
limiter, _ := NewIPLimiter([]string{}, []string{})
assert.False(t, limiter.IsAllowed(""), "Invalid IP shouldn't be allowed")
require.False(t, limiter.IsAllowed(""), "Invalid IP shouldn't be allowed")
}
func TestIPLimiterNoIPs(t *testing.T) {
limiter, _ := NewIPLimiter([]string{}, []string{})
assert.True(t, limiter.IsAllowed("127.0.0.1"), "IP should be allowed")
require.True(t, limiter.IsAllowed("127.0.0.1"), "IP should be allowed")
}
func TestIPLimiterAllowlist(t *testing.T) {
limiter, _ := NewIPLimiter([]string{}, []string{"::1/128"})
assert.False(t, limiter.IsAllowed("127.0.0.1"), "Unallowed IP shouldn't be allowed")
assert.True(t, limiter.IsAllowed("::1"), "Allowed IP should be allowed")
require.False(t, limiter.IsAllowed("127.0.0.1"), "Unallowed IP shouldn't be allowed")
require.True(t, limiter.IsAllowed("::1"), "Allowed IP should be allowed")
}
func TestIPLimiterBlocklist(t *testing.T) {
limiter, _ := NewIPLimiter([]string{"::1/128"}, []string{})
assert.True(t, limiter.IsAllowed("127.0.0.1"), "Allowed IP should be allowed")
assert.False(t, limiter.IsAllowed("::1"), "Unallowed IP shouldn't be allowed")
require.True(t, limiter.IsAllowed("127.0.0.1"), "Allowed IP should be allowed")
require.False(t, limiter.IsAllowed("::1"), "Unallowed IP shouldn't be allowed")
}
func TestNullIPLimiter(t *testing.T) {
limiter := NewNullIPLimiter()
require.True(t, limiter.IsAllowed("foobar"))
}

View File

@ -3,19 +3,30 @@ package net
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewPortrange(t *testing.T) {
_, err := NewPortrange(1000, 1999)
assert.Nil(t, err, "Valid port range not accepted: %s", err)
require.Nil(t, err, "Valid port range not accepted: %s", err)
}
func TestInvalidPortrange(t *testing.T) {
_, err := NewPortrange(1999, 1000)
assert.NotNil(t, err, "Invalid port range accepted")
require.NotNil(t, err, "Invalid port range accepted")
}
func TestOutOfRangePortrange(t *testing.T) {
p, err := NewPortrange(-1, 70000)
require.NoError(t, err)
portrange := p.(*portrange)
require.Equal(t, 1, portrange.min)
require.Equal(t, 65535, len(portrange.ports))
}
func TestGetPort(t *testing.T) {
@ -23,26 +34,26 @@ func TestGetPort(t *testing.T) {
port, err := portrange.Get()
assert.Nil(t, err)
assert.Equal(t, 1000, port)
require.Nil(t, err)
require.Equal(t, 1000, port)
}
func TestGetPutPort(t *testing.T) {
portrange, _ := NewPortrange(1000, 1999)
port, err := portrange.Get()
assert.Nil(t, err)
assert.Equal(t, 1000, port)
require.Nil(t, err)
require.Equal(t, 1000, port)
port, err = portrange.Get()
assert.Nil(t, err)
assert.Equal(t, 1001, port)
require.Nil(t, err)
require.Equal(t, 1001, port)
portrange.Put(1000)
port, err = portrange.Get()
assert.Nil(t, err)
assert.Equal(t, 1000, port)
require.Nil(t, err)
require.Equal(t, 1000, port)
}
func TestPortUnavailable(t *testing.T) {
@ -50,12 +61,12 @@ func TestPortUnavailable(t *testing.T) {
for i := 0; i < 1000; i++ {
port, _ := portrange.Get()
assert.Equal(t, 1000+i, port, "at index %d", i)
require.Equal(t, 1000+i, port, "at index %d", i)
}
port, err := portrange.Get()
assert.NotNil(t, err)
assert.Less(t, port, 0)
require.NotNil(t, err)
require.Less(t, port, 0)
}
func TestPutPort(t *testing.T) {
@ -73,16 +84,27 @@ func TestClampRange(t *testing.T) {
port, _ := portrange.Get()
assert.Equal(t, 65000, port)
require.Equal(t, 65000, port)
portrange.Put(65000)
for i := 65000; i <= 65535; i++ {
port, _ := portrange.Get()
assert.Equal(t, i, port, "at index %d", i)
require.Equal(t, i, port, "at index %d", i)
}
port, _ = portrange.Get()
assert.Less(t, port, 0)
require.Less(t, port, 0)
}
func TestDummyPortranger(t *testing.T) {
portrange := NewDummyPortrange()
port, err := portrange.Get()
require.Error(t, err)
require.Equal(t, 0, port)
portrange.Put(42)
}

View File

@ -7,9 +7,20 @@ import (
)
func TestLookup(t *testing.T) {
_, err := Lookup("https://www.google.com")
ip, err := Lookup("/localhost:8080/foobar")
require.NoError(t, err)
require.Equal(t, "", ip)
ip, err = Lookup("http://")
require.NoError(t, err)
require.Equal(t, "", ip)
ip, err = Lookup("https://www.google.com")
require.NoError(t, err)
require.NotEmpty(t, ip)
}
func TestLocalhost(t *testing.T) {
@ -18,3 +29,22 @@ func TestLocalhost(t *testing.T) {
require.NoError(t, err)
require.Subset(t, []string{"127.0.0.1", "::1"}, []string{ip})
}
func TestValidate(t *testing.T) {
err := Validate("http://localhost/foobar")
require.NoError(t, err)
err = Validate("foobar")
require.NoError(t, err)
}
func TestScheme(t *testing.T) {
r := HasScheme("http://localhost/foobar")
require.True(t, r)
r = HasScheme("iueriherfd://localhost/foobar")
require.True(t, r)
r = HasScheme("//localhost/foobar")
require.False(t, r)
}

View File

@ -192,6 +192,7 @@ type process struct {
onStart func()
onExit func()
onStateChange func(from, to string)
lock sync.Mutex
}
limits Limiter
}
@ -588,6 +589,7 @@ func (p *process) stop(wait bool) error {
if wait {
wg.Add(1)
p.callbacks.lock.Lock()
if p.callbacks.onExit == nil {
p.callbacks.onExit = func() {
wg.Done()
@ -601,6 +603,7 @@ func (p *process) stop(wait bool) error {
p.callbacks.onExit = cb
}
}
p.callbacks.lock.Unlock()
}
var err error
@ -829,10 +832,12 @@ func (p *process) waiter() {
// Reset the parser stats
p.parser.ResetStats()
// Call the onStop callback
// Call the onExit callback
p.callbacks.lock.Lock()
if p.callbacks.onExit != nil {
go p.callbacks.onExit()
}
p.callbacks.lock.Unlock()
p.order.lock.Lock()
defer p.order.lock.Unlock()

View File

@ -98,7 +98,7 @@ func (p *process) cpuTimes() (*cpuTimesStat, error) {
}
s := &cpuTimesStat{
total: times.Total(),
total: cpuTotal(times),
system: times.System,
user: times.User,
}

View File

@ -285,7 +285,7 @@ func (u *util) cpuTimes() (*cpuTimesStat, error) {
}
s := &cpuTimesStat{
total: times[0].Total(),
total: cpuTotal(&times[0]),
system: times[0].System,
user: times[0].User,
idle: times[0].Idle,
@ -496,3 +496,8 @@ func (u *util) readFile(path string) ([]string, error) {
return lines, nil
}
func cpuTotal(c *cpu.TimesStat) float64 {
return c.User + c.System + c.Idle + c.Nice + c.Iowait + c.Irq +
c.Softirq + c.Steal + c.Guest + c.GuestNice
}

View File

@ -2,7 +2,6 @@ package app
import (
"github.com/datarhei/core/v16/process"
"github.com/datarhei/core/v16/restream/replace"
)
type ConfigIOCleanup struct {
@ -80,79 +79,6 @@ func (config *Config) Clone() *Config {
return clone
}
// ReplacePlaceholders replaces all placeholders in the config. The config
// will be modified in place.
func (config *Config) ResolvePlaceholders(r replace.Replacer) {
for i, option := range config.Options {
// Replace any known placeholders
option = r.Replace(option, "diskfs", "")
config.Options[i] = option
}
// Resolving the given inputs
for i, input := range config.Input {
// Replace any known placeholders
input.ID = r.Replace(input.ID, "processid", config.ID)
input.ID = r.Replace(input.ID, "reference", config.Reference)
input.Address = r.Replace(input.Address, "inputid", input.ID)
input.Address = r.Replace(input.Address, "processid", config.ID)
input.Address = r.Replace(input.Address, "reference", config.Reference)
input.Address = r.Replace(input.Address, "diskfs", "")
input.Address = r.Replace(input.Address, "memfs", "")
input.Address = r.Replace(input.Address, "rtmp", "")
input.Address = r.Replace(input.Address, "srt", "")
for j, option := range input.Options {
// Replace any known placeholders
option = r.Replace(option, "inputid", input.ID)
option = r.Replace(option, "processid", config.ID)
option = r.Replace(option, "reference", config.Reference)
option = r.Replace(option, "diskfs", "")
option = r.Replace(option, "memfs", "")
input.Options[j] = option
}
config.Input[i] = input
}
// Resolving the given outputs
for i, output := range config.Output {
// Replace any known placeholders
output.ID = r.Replace(output.ID, "processid", config.ID)
output.Address = r.Replace(output.Address, "outputid", output.ID)
output.Address = r.Replace(output.Address, "processid", config.ID)
output.Address = r.Replace(output.Address, "reference", config.Reference)
output.Address = r.Replace(output.Address, "diskfs", "")
output.Address = r.Replace(output.Address, "memfs", "")
output.Address = r.Replace(output.Address, "rtmp", "")
output.Address = r.Replace(output.Address, "srt", "")
for j, option := range output.Options {
// Replace any known placeholders
option = r.Replace(option, "outputid", output.ID)
option = r.Replace(option, "processid", config.ID)
option = r.Replace(option, "reference", config.Reference)
option = r.Replace(option, "diskfs", "")
option = r.Replace(option, "memfs", "")
output.Options[j] = option
}
for j, cleanup := range output.Cleanup {
// Replace any known placeholders
cleanup.Pattern = r.Replace(cleanup.Pattern, "outputid", output.ID)
cleanup.Pattern = r.Replace(cleanup.Pattern, "processid", config.ID)
cleanup.Pattern = r.Replace(cleanup.Pattern, "reference", config.Reference)
output.Cleanup[j] = cleanup
}
config.Output[i] = output
}
}
// CreateCommand created the FFmpeg command from this config.
func (config *Config) CreateCommand() []string {
var command []string

View File

@ -62,6 +62,11 @@ func New(config Config) Filesystem {
rfs.logger = log.New("")
}
rfs.logger = rfs.logger.WithFields(log.Fields{
"name": config.FS.Name(),
"type": config.FS.Type(),
})
rfs.cleanupPatterns = make(map[string][]Pattern)
// already drain the stop
@ -130,7 +135,7 @@ func (rfs *filesystem) cleanup() {
for _, patterns := range rfs.cleanupPatterns {
for _, pattern := range patterns {
filesAndDirs := rfs.Filesystem.List(pattern.Pattern)
filesAndDirs := rfs.Filesystem.List("/", pattern.Pattern)
files := []fs.FileInfo{}
for _, f := range filesAndDirs {
@ -146,7 +151,7 @@ func (rfs *filesystem) cleanup() {
if pattern.MaxFiles > 0 && uint(len(files)) > pattern.MaxFiles {
for i := uint(0); i < uint(len(files))-pattern.MaxFiles; i++ {
rfs.logger.Debug().WithField("path", files[i].Name()).Log("Remove file because MaxFiles is exceeded")
rfs.Filesystem.Delete(files[i].Name())
rfs.Filesystem.Remove(files[i].Name())
}
}
@ -156,7 +161,7 @@ func (rfs *filesystem) cleanup() {
for _, f := range files {
if f.ModTime().Before(bestBefore) {
rfs.logger.Debug().WithField("path", f.Name()).Log("Remove file because MaxFileAge is exceeded")
rfs.Filesystem.Delete(f.Name())
rfs.Filesystem.Remove(f.Name())
}
}
}
@ -170,11 +175,11 @@ func (rfs *filesystem) purge(patterns []Pattern) (nfiles uint64) {
continue
}
files := rfs.Filesystem.List(pattern.Pattern)
files := rfs.Filesystem.List("/", pattern.Pattern)
sort.Slice(files, func(i, j int) bool { return len(files[i].Name()) > len(files[j].Name()) })
for _, f := range files {
rfs.logger.Debug().WithField("path", f.Name()).Log("Purging file")
rfs.Filesystem.Delete(f.Name())
rfs.Filesystem.Remove(f.Name())
nfiles++
}
}

View File

@ -10,11 +10,7 @@ import (
)
func TestMaxFiles(t *testing.T) {
memfs := fs.NewMemFilesystem(fs.MemConfig{
Base: "/",
Size: 1024,
Purge: false,
})
memfs, _ := fs.NewMemFilesystem(fs.MemConfig{})
cleanfs := New(Config{
FS: memfs,
@ -30,15 +26,15 @@ func TestMaxFiles(t *testing.T) {
},
})
cleanfs.Store("/chunk_0.ts", strings.NewReader("chunk_0"))
cleanfs.Store("/chunk_1.ts", strings.NewReader("chunk_1"))
cleanfs.Store("/chunk_2.ts", strings.NewReader("chunk_2"))
cleanfs.WriteFileReader("/chunk_0.ts", strings.NewReader("chunk_0"))
cleanfs.WriteFileReader("/chunk_1.ts", strings.NewReader("chunk_1"))
cleanfs.WriteFileReader("/chunk_2.ts", strings.NewReader("chunk_2"))
require.Eventually(t, func() bool {
return cleanfs.Files() == 3
}, 3*time.Second, time.Second)
cleanfs.Store("/chunk_3.ts", strings.NewReader("chunk_3"))
cleanfs.WriteFileReader("/chunk_3.ts", strings.NewReader("chunk_3"))
require.Eventually(t, func() bool {
if cleanfs.Files() != 3 {
@ -47,7 +43,7 @@ func TestMaxFiles(t *testing.T) {
names := []string{}
for _, f := range cleanfs.List("/*.ts") {
for _, f := range cleanfs.List("/", "/*.ts") {
names = append(names, f.Name())
}
@ -60,11 +56,7 @@ func TestMaxFiles(t *testing.T) {
}
func TestMaxAge(t *testing.T) {
memfs := fs.NewMemFilesystem(fs.MemConfig{
Base: "/",
Size: 1024,
Purge: false,
})
memfs, _ := fs.NewMemFilesystem(fs.MemConfig{})
cleanfs := New(Config{
FS: memfs,
@ -80,15 +72,15 @@ func TestMaxAge(t *testing.T) {
},
})
cleanfs.Store("/chunk_0.ts", strings.NewReader("chunk_0"))
cleanfs.Store("/chunk_1.ts", strings.NewReader("chunk_1"))
cleanfs.Store("/chunk_2.ts", strings.NewReader("chunk_2"))
cleanfs.WriteFileReader("/chunk_0.ts", strings.NewReader("chunk_0"))
cleanfs.WriteFileReader("/chunk_1.ts", strings.NewReader("chunk_1"))
cleanfs.WriteFileReader("/chunk_2.ts", strings.NewReader("chunk_2"))
require.Eventually(t, func() bool {
return cleanfs.Files() == 0
}, 5*time.Second, time.Second)
cleanfs.Store("/chunk_3.ts", strings.NewReader("chunk_3"))
cleanfs.WriteFileReader("/chunk_3.ts", strings.NewReader("chunk_3"))
require.Eventually(t, func() bool {
if cleanfs.Files() != 1 {
@ -97,7 +89,7 @@ func TestMaxAge(t *testing.T) {
names := []string{}
for _, f := range cleanfs.List("/*.ts") {
for _, f := range cleanfs.List("/", "/*.ts") {
names = append(names, f.Name())
}
@ -110,11 +102,7 @@ func TestMaxAge(t *testing.T) {
}
func TestUnsetCleanup(t *testing.T) {
memfs := fs.NewMemFilesystem(fs.MemConfig{
Base: "/",
Size: 1024,
Purge: false,
})
memfs, _ := fs.NewMemFilesystem(fs.MemConfig{})
cleanfs := New(Config{
FS: memfs,
@ -130,15 +118,15 @@ func TestUnsetCleanup(t *testing.T) {
},
})
cleanfs.Store("/chunk_0.ts", strings.NewReader("chunk_0"))
cleanfs.Store("/chunk_1.ts", strings.NewReader("chunk_1"))
cleanfs.Store("/chunk_2.ts", strings.NewReader("chunk_2"))
cleanfs.WriteFileReader("/chunk_0.ts", strings.NewReader("chunk_0"))
cleanfs.WriteFileReader("/chunk_1.ts", strings.NewReader("chunk_1"))
cleanfs.WriteFileReader("/chunk_2.ts", strings.NewReader("chunk_2"))
require.Eventually(t, func() bool {
return cleanfs.Files() == 3
}, 3*time.Second, time.Second)
cleanfs.Store("/chunk_3.ts", strings.NewReader("chunk_3"))
cleanfs.WriteFileReader("/chunk_3.ts", strings.NewReader("chunk_3"))
require.Eventually(t, func() bool {
if cleanfs.Files() != 3 {
@ -147,7 +135,7 @@ func TestUnsetCleanup(t *testing.T) {
names := []string{}
for _, f := range cleanfs.List("/*.ts") {
for _, f := range cleanfs.List("/", "/*.ts") {
names = append(names, f.Name())
}
@ -158,7 +146,7 @@ func TestUnsetCleanup(t *testing.T) {
cleanfs.UnsetCleanup("foobar")
cleanfs.Store("/chunk_4.ts", strings.NewReader("chunk_4"))
cleanfs.WriteFileReader("/chunk_4.ts", strings.NewReader("chunk_4"))
require.Eventually(t, func() bool {
if cleanfs.Files() != 4 {
@ -167,7 +155,7 @@ func TestUnsetCleanup(t *testing.T) {
names := []string{}
for _, f := range cleanfs.List("/*.ts") {
for _, f := range cleanfs.List("/", "/*.ts") {
names = append(names, f.Name())
}

View File

@ -4,17 +4,23 @@ import (
"net/url"
"regexp"
"strings"
"github.com/datarhei/core/v16/glob"
"github.com/datarhei/core/v16/restream/app"
)
type TemplateFn func(config *app.Config, section string) string
type Replacer interface {
// RegisterTemplate registers a template for a specific placeholder. Template
// may contain placeholders as well of the form {name}. They will be replaced
// by the parameters of the placeholder (see Replace).
RegisterTemplate(placeholder, template string)
// by the parameters of the placeholder (see Replace). If a parameter is not of
// a template is not present, default values can be provided.
RegisterTemplate(placeholder, template string, defaults map[string]string)
// RegisterTemplateFunc does the same as RegisterTemplate, but the template
// is returned by the template function.
RegisterTemplateFunc(placeholder string, template func() string)
RegisterTemplateFunc(placeholder string, template TemplateFn, defaults map[string]string)
// Replace replaces all occurences of placeholder in str with value. The placeholder is of the
// form {placeholder}. It is possible to escape a characters in value with \\ by appending a ^
@ -24,12 +30,18 @@ type Replacer interface {
// the value of the corresponding key in the parameters.
// If the value is an empty string, the registered templates will be searched for that
// placeholder. If no template is found, the placeholder will be replaced by the empty string.
// A placeholder name may consist on of the letters a-z.
Replace(str, placeholder, value string) string
// A placeholder name may consist on of the letters a-z and ':'. The placeholder may contain
// a glob pattern to find the appropriate template.
Replace(str, placeholder, value string, vars map[string]string, config *app.Config, section string) string
}
type template struct {
fn TemplateFn
defaults map[string]string
}
type replacer struct {
templates map[string]func() string
templates map[string]template
re *regexp.Regexp
templateRe *regexp.Regexp
@ -38,41 +50,51 @@ type replacer struct {
// New returns a Replacer
func New() Replacer {
r := &replacer{
templates: make(map[string]func() string),
re: regexp.MustCompile(`{([a-z]+)(?:\^(.))?(?:,(.*?))?}`),
templateRe: regexp.MustCompile(`{([a-z]+)}`),
templates: make(map[string]template),
re: regexp.MustCompile(`{([a-z:]+)(?:\^(.))?(?:,(.*?))?}`),
templateRe: regexp.MustCompile(`{([a-z:]+)}`),
}
return r
}
func (r *replacer) RegisterTemplate(placeholder, template string) {
r.templates[placeholder] = func() string { return template }
func (r *replacer) RegisterTemplate(placeholder, tmpl string, defaults map[string]string) {
r.RegisterTemplateFunc(placeholder, func(*app.Config, string) string { return tmpl }, defaults)
}
func (r *replacer) RegisterTemplateFunc(placeholder string, template func() string) {
r.templates[placeholder] = template
func (r *replacer) RegisterTemplateFunc(placeholder string, templateFn TemplateFn, defaults map[string]string) {
r.templates[placeholder] = template{
fn: templateFn,
defaults: defaults,
}
}
func (r *replacer) Replace(str, placeholder, value string) string {
func (r *replacer) Replace(str, placeholder, value string, vars map[string]string, config *app.Config, section string) string {
str = r.re.ReplaceAllStringFunc(str, func(match string) string {
matches := r.re.FindStringSubmatch(match)
if matches[1] != placeholder {
if ok, _ := glob.Match(placeholder, matches[1], ':'); !ok {
return match
}
placeholder := matches[1]
// We need a copy from the value
v := value
var tmpl template = template{
fn: func(*app.Config, string) string { return v },
}
// Check for a registered template
if len(v) == 0 {
tmplFunc, ok := r.templates[placeholder]
t, ok := r.templates[placeholder]
if ok {
v = tmplFunc()
tmpl = t
}
}
v = r.compileTemplate(v, matches[3])
v = tmpl.fn(config, section)
v = r.compileTemplate(v, matches[3], vars, tmpl.defaults)
if len(matches[2]) != 0 {
// If there's a character to escape, we also have to escape the
@ -97,13 +119,18 @@ func (r *replacer) Replace(str, placeholder, value string) string {
// placeholder name and will be replaced with the value. The resulting string is "Hello World!".
// If a placeholder name is not present in the params string, it will not be replaced. The key
// and values can be escaped as in net/url.QueryEscape.
func (r *replacer) compileTemplate(str, params string) string {
if len(params) == 0 {
func (r *replacer) compileTemplate(str, params string, vars map[string]string, defaults map[string]string) string {
if len(params) == 0 && len(defaults) == 0 {
return str
}
p := make(map[string]string)
// Copy the defaults
for key, value := range defaults {
p[key] = value
}
// taken from net/url.ParseQuery
for params != "" {
var key string
@ -111,15 +138,22 @@ func (r *replacer) compileTemplate(str, params string) string {
if key == "" {
continue
}
key, value, _ := strings.Cut(key, "=")
key, err := url.QueryUnescape(key)
if err != nil {
continue
}
value, err = url.QueryUnescape(value)
if err != nil {
continue
}
for name, v := range vars {
value = strings.ReplaceAll(value, "$"+name, v)
}
p[key] = value
}

View File

@ -3,6 +3,7 @@ package replace
import (
"testing"
"github.com/datarhei/core/v16/restream/app"
"github.com/stretchr/testify/require"
)
@ -24,28 +25,56 @@ func TestReplace(t *testing.T) {
r := New()
for _, e := range samples {
replaced := r.Replace(e[0], "foobar", foobar)
replaced := r.Replace(e[0], "foobar", foobar, nil, nil, "")
require.Equal(t, e[1], replaced, e[0])
}
replaced := r.Replace("{foobar}", "foobar", "")
replaced := r.Replace("{foobar}", "foobar", "", nil, nil, "")
require.Equal(t, "", replaced)
}
func TestReplaceTemplate(t *testing.T) {
r := New()
r.RegisterTemplate("foobar", "Hello {who}! {what}?")
r.RegisterTemplate("foo:bar", "Hello {who}! {what}?", nil)
replaced := r.Replace("{foobar,who=World}", "foobar", "")
replaced := r.Replace("{foo:bar,who=World}", "foo:bar", "", nil, nil, "")
require.Equal(t, "Hello World! {what}?", replaced)
replaced = r.Replace("{foobar,who=World,what=E%3dmc^2}", "foobar", "")
replaced = r.Replace("{foo:bar,who=World,what=E%3dmc^2}", "foo:bar", "", nil, nil, "")
require.Equal(t, "Hello World! E=mc^2?", replaced)
replaced = r.Replace("{foobar^:,who=World,what=E%3dmc:2}", "foobar", "")
replaced = r.Replace("{foo:bar^:,who=World,what=E%3dmc:2}", "foo:bar", "", nil, nil, "")
require.Equal(t, "Hello World! E=mc\\\\:2?", replaced)
}
func TestReplaceTemplateFunc(t *testing.T) {
r := New()
r.RegisterTemplateFunc("foo:bar", func(config *app.Config, kind string) string { return "Hello {who}! {what}?" }, nil)
replaced := r.Replace("{foo:bar,who=World}", "foo:bar", "", nil, nil, "")
require.Equal(t, "Hello World! {what}?", replaced)
replaced = r.Replace("{foo:bar,who=World,what=E%3dmc^2}", "foo:bar", "", nil, nil, "")
require.Equal(t, "Hello World! E=mc^2?", replaced)
replaced = r.Replace("{foo:bar^:,who=World,what=E%3dmc:2}", "foo:bar", "", nil, nil, "")
require.Equal(t, "Hello World! E=mc\\\\:2?", replaced)
}
func TestReplaceTemplateDefaults(t *testing.T) {
r := New()
r.RegisterTemplate("foobar", "Hello {who}! {what}?", map[string]string{
"who": "someone",
"what": "something",
})
replaced := r.Replace("{foobar}", "foobar", "", nil, nil, "")
require.Equal(t, "Hello someone! something?", replaced)
replaced = r.Replace("{foobar,who=World}", "foobar", "", nil, nil, "")
require.Equal(t, "Hello World! something?", replaced)
}
func TestReplaceCompileTemplate(t *testing.T) {
samples := [][3]string{
{"Hello {who}!", "who=World", "Hello World!"},
@ -58,7 +87,58 @@ func TestReplaceCompileTemplate(t *testing.T) {
r := New().(*replacer)
for _, e := range samples {
replaced := r.compileTemplate(e[0], e[1])
replaced := r.compileTemplate(e[0], e[1], nil, nil)
require.Equal(t, e[2], replaced, e[0])
}
}
func TestReplaceCompileTemplateDefaults(t *testing.T) {
samples := [][3]string{
{"Hello {who}!", "", "Hello someone!"},
{"Hello {who}!", "who=World", "Hello World!"},
{"Hello {who}! {what}?", "who=World", "Hello World! something?"},
{"Hello {who}! {what}?", "who=World,what=Yeah", "Hello World! Yeah?"},
{"Hello {who}! {what}?", "who=World,what=", "Hello World! ?"},
}
r := New().(*replacer)
for _, e := range samples {
replaced := r.compileTemplate(e[0], e[1], nil, map[string]string{
"who": "someone",
"what": "something",
})
require.Equal(t, e[2], replaced, e[0])
}
}
func TestReplaceCompileTemplateWithVars(t *testing.T) {
samples := [][3]string{
{"Hello {who}!", "who=$processid", "Hello 123456789!"},
{"Hello {who}! {what}?", "who=$location", "Hello World! {what}?"},
{"Hello {who}! {what}?", "who=$location,what=Yeah", "Hello World! Yeah?"},
{"Hello {who}! {what}?", "who=$location,what=$processid", "Hello World! 123456789?"},
{"Hello {who}!", "who=$processidxxx", "Hello 123456789xxx!"},
}
vars := map[string]string{
"processid": "123456789",
"location": "World",
}
r := New().(*replacer)
for _, e := range samples {
replaced := r.compileTemplate(e[0], e[1], vars, nil)
require.Equal(t, e[2], replaced, e[0])
}
}
func TestReplaceGlob(t *testing.T) {
r := New()
r.RegisterTemplate("foo:bar", "Hello foobar", nil)
r.RegisterTemplate("foo:baz", "Hello foobaz", nil)
replaced := r.Replace("{foo:baz}, {foo:bar}", "foo:*", "", nil, nil, "")
require.Equal(t, "Hello foobaz, Hello foobar", replaced)
}

View File

@ -30,30 +30,31 @@ import (
// The Restreamer interface
type Restreamer interface {
ID() string // ID of this instance
Name() string // Arbitrary name of this instance
CreatedAt() time.Time // Time of when this instance has been created
Start() // Start all processes that have a "start" order
Stop() // Stop all running process but keep their "start" order
AddProcess(config *app.Config) error // Add a new process
GetProcessIDs(idpattern, refpattern string) []string // Get a list of process IDs based on patterns for ID and reference
DeleteProcess(id string) error // Delete a process
UpdateProcess(id string, config *app.Config) error // Update a process
StartProcess(id string) error // Start a process
StopProcess(id string) error // Stop a process
RestartProcess(id string) error // Restart a process
ReloadProcess(id string) error // Reload a process
GetProcess(id string) (*app.Process, error) // Get a process
GetProcessState(id string) (*app.State, error) // Get the state of a process
GetProcessLog(id string) (*app.Log, error) // Get the logs of a process
GetPlayout(id, inputid string) (string, error) // Get the URL of the playout API for a process
Probe(id string) app.Probe // Probe a process
Skills() skills.Skills // Get the ffmpeg skills
ReloadSkills() error // Reload the ffmpeg skills
SetProcessMetadata(id, key string, data interface{}) error // Set metatdata to a process
GetProcessMetadata(id, key string) (interface{}, error) // Get previously set metadata from a process
SetMetadata(key string, data interface{}) error // Set general metadata
GetMetadata(key string) (interface{}, error) // Get previously set general metadata
ID() string // ID of this instance
Name() string // Arbitrary name of this instance
CreatedAt() time.Time // Time of when this instance has been created
Start() // Start all processes that have a "start" order
Stop() // Stop all running process but keep their "start" order
AddProcess(config *app.Config) error // Add a new process
GetProcessIDs(idpattern, refpattern string) []string // Get a list of process IDs based on patterns for ID and reference
DeleteProcess(id string) error // Delete a process
UpdateProcess(id string, config *app.Config) error // Update a process
StartProcess(id string) error // Start a process
StopProcess(id string) error // Stop a process
RestartProcess(id string) error // Restart a process
ReloadProcess(id string) error // Reload a process
GetProcess(id string) (*app.Process, error) // Get a process
GetProcessState(id string) (*app.State, error) // Get the state of a process
GetProcessLog(id string) (*app.Log, error) // Get the logs of a process
GetPlayout(id, inputid string) (string, error) // Get the URL of the playout API for a process
Probe(id string) app.Probe // Probe a process
ProbeWithTimeout(id string, timeout time.Duration) app.Probe // Probe a process with specific timeout
Skills() skills.Skills // Get the ffmpeg skills
ReloadSkills() error // Reload the ffmpeg skills
SetProcessMetadata(id, key string, data interface{}) error // Set metatdata to a process
GetProcessMetadata(id, key string) (interface{}, error) // Get previously set metadata from a process
SetMetadata(key string, data interface{}) error // Set general metadata
GetMetadata(key string) (interface{}, error) // Get previously set general metadata
}
// Config is the required configuration for a new restreamer instance.
@ -61,8 +62,7 @@ type Config struct {
ID string
Name string
Store store.Store
DiskFS fs.Filesystem
MemFS fs.Filesystem
Filesystems []fs.Filesystem
Replace replace.Replacer
FFmpeg ffmpeg.FFmpeg
MaxProcesses int64
@ -93,8 +93,8 @@ type restream struct {
maxProc int64
nProc int64
fs struct {
diskfs rfs.Filesystem
memfs rfs.Filesystem
list []rfs.Filesystem
diskfs []rfs.Filesystem
stopObserver context.CancelFunc
}
replace replace.Replacer
@ -124,29 +124,28 @@ func New(config Config) (Restreamer, error) {
}
if r.store == nil {
r.store = store.NewDummyStore(store.DummyConfig{})
dummyfs, _ := fs.NewMemFilesystem(fs.MemConfig{})
s, err := store.NewJSON(store.JSONConfig{
Filesystem: dummyfs,
})
if err != nil {
return nil, err
}
r.store = s
}
if config.DiskFS != nil {
r.fs.diskfs = rfs.New(rfs.Config{
FS: config.DiskFS,
Logger: r.logger.WithComponent("Cleanup").WithField("type", "diskfs"),
for _, fs := range config.Filesystems {
fs := rfs.New(rfs.Config{
FS: fs,
Logger: r.logger.WithComponent("Cleanup"),
})
} else {
r.fs.diskfs = rfs.New(rfs.Config{
FS: fs.NewDummyFilesystem(),
})
}
if config.MemFS != nil {
r.fs.memfs = rfs.New(rfs.Config{
FS: config.MemFS,
Logger: r.logger.WithComponent("Cleanup").WithField("type", "memfs"),
})
} else {
r.fs.memfs = rfs.New(rfs.Config{
FS: fs.NewDummyFilesystem(),
})
r.fs.list = append(r.fs.list, fs)
// Add the diskfs filesystems also to a separate array. We need it later for input and output validation
if fs.Type() == "disk" {
r.fs.diskfs = append(r.fs.diskfs, fs)
}
}
if r.replace == nil {
@ -185,12 +184,16 @@ func (r *restream) Start() {
r.setCleanup(id, t.config)
}
r.fs.diskfs.Start()
r.fs.memfs.Start()
ctx, cancel := context.WithCancel(context.Background())
r.fs.stopObserver = cancel
go r.observe(ctx, 10*time.Second)
for _, fs := range r.fs.list {
fs.Start()
if fs.Type() == "disk" {
go r.observe(ctx, fs, 10*time.Second)
}
}
r.stopOnce = sync.Once{}
})
@ -214,14 +217,16 @@ func (r *restream) Stop() {
r.fs.stopObserver()
r.fs.diskfs.Stop()
r.fs.memfs.Stop()
// Stop the cleanup jobs
for _, fs := range r.fs.list {
fs.Stop()
}
r.startOnce = sync.Once{}
})
}
func (r *restream) observe(ctx context.Context, interval time.Duration) {
func (r *restream) observe(ctx context.Context, fs fs.Filesystem, interval time.Duration) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
@ -230,14 +235,14 @@ func (r *restream) observe(ctx context.Context, interval time.Duration) {
case <-ctx.Done():
return
case <-ticker.C:
size, limit := r.fs.diskfs.Size()
size, limit := fs.Size()
isFull := false
if limit > 0 && size >= limit {
isFull = true
}
if isFull {
// Stop all tasks that write to disk
// Stop all tasks that write to this filesystem
r.lock.Lock()
for id, t := range r.tasks {
if !t.valid {
@ -252,7 +257,7 @@ func (r *restream) observe(ctx context.Context, interval time.Duration) {
continue
}
r.logger.Warn().Log("Shutting down because disk is full")
r.logger.Warn().Log("Shutting down because filesystem is full")
r.stopProcess(id)
}
r.lock.Unlock()
@ -290,7 +295,7 @@ func (r *restream) load() error {
}
// Replace all placeholders in the config
t.config.ResolvePlaceholders(r.replace)
resolvePlaceholders(t.config, r.replace)
tasks[id] = t
}
@ -463,7 +468,7 @@ func (r *restream) createTask(config *app.Config) (*task, error) {
logger: r.logger.WithField("id", process.ID),
}
t.config.ResolvePlaceholders(r.replace)
resolvePlaceholders(t.config, r.replace)
err := r.resolveAddresses(r.tasks, t.config)
if err != nil {
@ -502,34 +507,50 @@ func (r *restream) createTask(config *app.Config) (*task, error) {
}
func (r *restream) setCleanup(id string, config *app.Config) {
rePrefix := regexp.MustCompile(`^([a-z]+):`)
for _, output := range config.Output {
for _, c := range output.Cleanup {
if strings.HasPrefix(c.Pattern, "memfs:") {
r.fs.memfs.SetCleanup(id, []rfs.Pattern{
{
Pattern: strings.TrimPrefix(c.Pattern, "memfs:"),
MaxFiles: c.MaxFiles,
MaxFileAge: time.Duration(c.MaxFileAge) * time.Second,
PurgeOnDelete: c.PurgeOnDelete,
},
})
} else if strings.HasPrefix(c.Pattern, "diskfs:") {
r.fs.diskfs.SetCleanup(id, []rfs.Pattern{
{
Pattern: strings.TrimPrefix(c.Pattern, "diskfs:"),
MaxFiles: c.MaxFiles,
MaxFileAge: time.Duration(c.MaxFileAge) * time.Second,
PurgeOnDelete: c.PurgeOnDelete,
},
matches := rePrefix.FindStringSubmatch(c.Pattern)
if matches == nil {
continue
}
name := matches[1]
// Support legacy names
if name == "diskfs" {
name = "disk"
} else if name == "memfs" {
name = "mem"
}
for _, fs := range r.fs.list {
if fs.Name() != name {
continue
}
pattern := rfs.Pattern{
Pattern: rePrefix.ReplaceAllString(c.Pattern, ""),
MaxFiles: c.MaxFiles,
MaxFileAge: time.Duration(c.MaxFileAge) * time.Second,
PurgeOnDelete: c.PurgeOnDelete,
}
fs.SetCleanup(id, []rfs.Pattern{
pattern,
})
break
}
}
}
}
func (r *restream) unsetCleanup(id string) {
r.fs.diskfs.UnsetCleanup(id)
r.fs.memfs.UnsetCleanup(id)
for _, fs := range r.fs.list {
fs.UnsetCleanup(id)
}
}
func (r *restream) setPlayoutPorts(t *task) error {
@ -618,9 +639,23 @@ func (r *restream) validateConfig(config *app.Config) (bool, error) {
return false, fmt.Errorf("the address for input '#%s:%s' must not be empty", config.ID, io.ID)
}
io.Address, err = r.validateInputAddress(io.Address, r.fs.diskfs.Base())
if err != nil {
return false, fmt.Errorf("the address for input '#%s:%s' (%s) is invalid: %w", config.ID, io.ID, io.Address, err)
if len(r.fs.diskfs) != 0 {
maxFails := 0
for _, fs := range r.fs.diskfs {
io.Address, err = r.validateInputAddress(io.Address, fs.Metadata("base"))
if err != nil {
maxFails++
}
}
if maxFails == len(r.fs.diskfs) {
return false, fmt.Errorf("the address for input '#%s:%s' (%s) is invalid: %w", config.ID, io.ID, io.Address, err)
}
} else {
io.Address, err = r.validateInputAddress(io.Address, "/")
if err != nil {
return false, fmt.Errorf("the address for input '#%s:%s' (%s) is invalid: %w", config.ID, io.ID, io.Address, err)
}
}
}
@ -650,15 +685,33 @@ func (r *restream) validateConfig(config *app.Config) (bool, error) {
return false, fmt.Errorf("the address for output '#%s:%s' must not be empty", config.ID, io.ID)
}
isFile := false
if len(r.fs.diskfs) != 0 {
maxFails := 0
for _, fs := range r.fs.diskfs {
isFile := false
io.Address, isFile, err = r.validateOutputAddress(io.Address, fs.Metadata("base"))
if err != nil {
maxFails++
}
io.Address, isFile, err = r.validateOutputAddress(io.Address, r.fs.diskfs.Base())
if err != nil {
return false, fmt.Errorf("the address for output '#%s:%s' is invalid: %w", config.ID, io.ID, err)
}
if isFile {
hasFiles = true
}
}
if isFile {
hasFiles = true
if maxFails == len(r.fs.diskfs) {
return false, fmt.Errorf("the address for output '#%s:%s' is invalid: %w", config.ID, io.ID, err)
}
} else {
isFile := false
io.Address, isFile, err = r.validateOutputAddress(io.Address, "/")
if err != nil {
return false, fmt.Errorf("the address for output '#%s:%s' is invalid: %w", config.ID, io.ID, err)
}
if isFile {
hasFiles = true
}
}
}
@ -1089,7 +1142,7 @@ func (r *restream) reloadProcess(id string) error {
t.config = t.process.Config.Clone()
t.config.ResolvePlaceholders(r.replace)
resolvePlaceholders(t.config, r.replace)
err := r.resolveAddresses(r.tasks, t.config)
if err != nil {
@ -1251,6 +1304,10 @@ func (r *restream) GetProcessLog(id string) (*app.Log, error) {
}
func (r *restream) Probe(id string) app.Probe {
return r.ProbeWithTimeout(id, 20*time.Second)
}
func (r *restream) ProbeWithTimeout(id string, timeout time.Duration) app.Probe {
r.lock.RLock()
appprobe := app.Probe{}
@ -1288,7 +1345,7 @@ func (r *restream) Probe(id string) app.Probe {
ffmpeg, err := r.ffmpeg.New(ffmpeg.ProcessConfig{
Reconnect: false,
ReconnectDelay: 0,
StaleTimeout: 20 * time.Second,
StaleTimeout: timeout,
Command: command,
Parser: prober,
Logger: task.logger,
@ -1437,3 +1494,97 @@ func (r *restream) GetMetadata(key string) (interface{}, error) {
return data, nil
}
// resolvePlaceholders replaces all placeholders in the config. The config
// will be modified in place.
func resolvePlaceholders(config *app.Config, r replace.Replacer) {
vars := map[string]string{
"processid": config.ID,
"reference": config.Reference,
}
for i, option := range config.Options {
// Replace any known placeholders
option = r.Replace(option, "diskfs", "", vars, config, "global")
option = r.Replace(option, "fs:*", "", vars, config, "global")
config.Options[i] = option
}
// Resolving the given inputs
for i, input := range config.Input {
// Replace any known placeholders
input.ID = r.Replace(input.ID, "processid", config.ID, nil, nil, "input")
input.ID = r.Replace(input.ID, "reference", config.Reference, nil, nil, "input")
vars["inputid"] = input.ID
input.Address = r.Replace(input.Address, "inputid", input.ID, nil, nil, "input")
input.Address = r.Replace(input.Address, "processid", config.ID, nil, nil, "input")
input.Address = r.Replace(input.Address, "reference", config.Reference, nil, nil, "input")
input.Address = r.Replace(input.Address, "diskfs", "", vars, config, "input")
input.Address = r.Replace(input.Address, "memfs", "", vars, config, "input")
input.Address = r.Replace(input.Address, "fs:*", "", vars, config, "input")
input.Address = r.Replace(input.Address, "rtmp", "", vars, config, "input")
input.Address = r.Replace(input.Address, "srt", "", vars, config, "input")
for j, option := range input.Options {
// Replace any known placeholders
option = r.Replace(option, "inputid", input.ID, nil, nil, "input")
option = r.Replace(option, "processid", config.ID, nil, nil, "input")
option = r.Replace(option, "reference", config.Reference, nil, nil, "input")
option = r.Replace(option, "diskfs", "", vars, config, "input")
option = r.Replace(option, "memfs", "", vars, config, "input")
option = r.Replace(option, "fs:*", "", vars, config, "input")
input.Options[j] = option
}
delete(vars, "inputid")
config.Input[i] = input
}
// Resolving the given outputs
for i, output := range config.Output {
// Replace any known placeholders
output.ID = r.Replace(output.ID, "processid", config.ID, nil, nil, "output")
output.ID = r.Replace(output.ID, "reference", config.Reference, nil, nil, "output")
vars["outputid"] = output.ID
output.Address = r.Replace(output.Address, "outputid", output.ID, nil, nil, "output")
output.Address = r.Replace(output.Address, "processid", config.ID, nil, nil, "output")
output.Address = r.Replace(output.Address, "reference", config.Reference, nil, nil, "output")
output.Address = r.Replace(output.Address, "diskfs", "", vars, config, "output")
output.Address = r.Replace(output.Address, "memfs", "", vars, config, "output")
output.Address = r.Replace(output.Address, "fs:*", "", vars, config, "output")
output.Address = r.Replace(output.Address, "rtmp", "", vars, config, "output")
output.Address = r.Replace(output.Address, "srt", "", vars, config, "output")
for j, option := range output.Options {
// Replace any known placeholders
option = r.Replace(option, "outputid", output.ID, nil, nil, "output")
option = r.Replace(option, "processid", config.ID, nil, nil, "output")
option = r.Replace(option, "reference", config.Reference, nil, nil, "output")
option = r.Replace(option, "diskfs", "", vars, config, "output")
option = r.Replace(option, "memfs", "", vars, config, "output")
option = r.Replace(option, "fs:*", "", vars, config, "output")
output.Options[j] = option
}
for j, cleanup := range output.Cleanup {
// Replace any known placeholders
cleanup.Pattern = r.Replace(cleanup.Pattern, "outputid", output.ID, nil, nil, "output")
cleanup.Pattern = r.Replace(cleanup.Pattern, "processid", config.ID, nil, nil, "output")
cleanup.Pattern = r.Replace(cleanup.Pattern, "reference", config.Reference, nil, nil, "output")
output.Cleanup[j] = cleanup
}
delete(vars, "outputid")
config.Output[i] = output
}
}

View File

@ -9,11 +9,12 @@ import (
"github.com/datarhei/core/v16/internal/testhelper"
"github.com/datarhei/core/v16/net"
"github.com/datarhei/core/v16/restream/app"
"github.com/datarhei/core/v16/restream/replace"
"github.com/stretchr/testify/require"
)
func getDummyRestreamer(portrange net.Portranger, validatorIn, validatorOut ffmpeg.Validator) (Restreamer, error) {
func getDummyRestreamer(portrange net.Portranger, validatorIn, validatorOut ffmpeg.Validator, replacer replace.Replacer) (Restreamer, error) {
binary, err := testhelper.BuildBinary("ffmpeg", "../internal/testhelper")
if err != nil {
return nil, fmt.Errorf("failed to build helper program: %w", err)
@ -30,7 +31,8 @@ func getDummyRestreamer(portrange net.Portranger, validatorIn, validatorOut ffmp
}
rs, err := New(Config{
FFmpeg: ffmpeg,
FFmpeg: ffmpeg,
Replace: replacer,
})
if err != nil {
return nil, err
@ -77,7 +79,7 @@ func getDummyProcess() *app.Config {
}
func TestAddProcess(t *testing.T) {
rs, err := getDummyRestreamer(nil, nil, nil)
rs, err := getDummyRestreamer(nil, nil, nil, nil)
require.NoError(t, err)
process := getDummyProcess()
@ -97,7 +99,7 @@ func TestAddProcess(t *testing.T) {
}
func TestAutostartProcess(t *testing.T) {
rs, err := getDummyRestreamer(nil, nil, nil)
rs, err := getDummyRestreamer(nil, nil, nil, nil)
require.NoError(t, err)
process := getDummyProcess()
@ -112,7 +114,7 @@ func TestAutostartProcess(t *testing.T) {
}
func TestAddInvalidProcess(t *testing.T) {
rs, err := getDummyRestreamer(nil, nil, nil)
rs, err := getDummyRestreamer(nil, nil, nil, nil)
require.NoError(t, err)
// Invalid process ID
@ -180,7 +182,7 @@ func TestAddInvalidProcess(t *testing.T) {
}
func TestRemoveProcess(t *testing.T) {
rs, err := getDummyRestreamer(nil, nil, nil)
rs, err := getDummyRestreamer(nil, nil, nil, nil)
require.NoError(t, err)
process := getDummyProcess()
@ -195,24 +197,98 @@ func TestRemoveProcess(t *testing.T) {
require.NotEqual(t, nil, err, "Unset process found (%s)", process.ID)
}
func TestGetProcess(t *testing.T) {
rs, err := getDummyRestreamer(nil, nil, nil)
func TestUpdateProcess(t *testing.T) {
rs, err := getDummyRestreamer(nil, nil, nil, nil)
require.NoError(t, err)
process := getDummyProcess()
process1 := getDummyProcess()
require.NotNil(t, process1)
process1.ID = "process1"
rs.AddProcess(process)
process2 := getDummyProcess()
require.NotNil(t, process2)
process2.ID = "process2"
_, err = rs.GetProcess(process.ID)
require.Equal(t, nil, err, "Process not found (%s)", process.ID)
err = rs.AddProcess(process1)
require.Equal(t, nil, err)
err = rs.AddProcess(process2)
require.Equal(t, nil, err)
process3 := getDummyProcess()
require.NotNil(t, process3)
process3.ID = "process2"
err = rs.UpdateProcess("process1", process3)
require.Error(t, err)
process3.ID = "process3"
err = rs.UpdateProcess("process1", process3)
require.NoError(t, err)
_, err = rs.GetProcess(process1.ID)
require.Error(t, err)
_, err = rs.GetProcess(process3.ID)
require.NoError(t, err)
}
func TestGetProcess(t *testing.T) {
rs, err := getDummyRestreamer(nil, nil, nil, nil)
require.NoError(t, err)
process1 := getDummyProcess()
process1.ID = "foo_aaa_1"
process1.Reference = "foo_aaa_1"
process2 := getDummyProcess()
process2.ID = "bar_bbb_2"
process2.Reference = "bar_bbb_2"
process3 := getDummyProcess()
process3.ID = "foo_ccc_3"
process3.Reference = "foo_ccc_3"
process4 := getDummyProcess()
process4.ID = "bar_ddd_4"
process4.Reference = "bar_ddd_4"
rs.AddProcess(process1)
rs.AddProcess(process2)
rs.AddProcess(process3)
rs.AddProcess(process4)
_, err = rs.GetProcess(process1.ID)
require.Equal(t, nil, err)
list := rs.GetProcessIDs("", "")
require.Len(t, list, 1, "expected 1 process")
require.Equal(t, process.ID, list[0], "expected same process ID")
require.Len(t, list, 4)
require.ElementsMatch(t, []string{"foo_aaa_1", "bar_bbb_2", "foo_ccc_3", "bar_ddd_4"}, list)
list = rs.GetProcessIDs("foo_*", "")
require.Len(t, list, 2)
require.ElementsMatch(t, []string{"foo_aaa_1", "foo_ccc_3"}, list)
list = rs.GetProcessIDs("bar_*", "")
require.Len(t, list, 2)
require.ElementsMatch(t, []string{"bar_bbb_2", "bar_ddd_4"}, list)
list = rs.GetProcessIDs("*_bbb_*", "")
require.Len(t, list, 1)
require.ElementsMatch(t, []string{"bar_bbb_2"}, list)
list = rs.GetProcessIDs("", "foo_*")
require.Len(t, list, 2)
require.ElementsMatch(t, []string{"foo_aaa_1", "foo_ccc_3"}, list)
list = rs.GetProcessIDs("", "bar_*")
require.Len(t, list, 2)
require.ElementsMatch(t, []string{"bar_bbb_2", "bar_ddd_4"}, list)
list = rs.GetProcessIDs("", "*_bbb_*")
require.Len(t, list, 1)
require.ElementsMatch(t, []string{"bar_bbb_2"}, list)
}
func TestStartProcess(t *testing.T) {
rs, err := getDummyRestreamer(nil, nil, nil)
rs, err := getDummyRestreamer(nil, nil, nil, nil)
require.NoError(t, err)
process := getDummyProcess()
@ -238,7 +314,7 @@ func TestStartProcess(t *testing.T) {
}
func TestStopProcess(t *testing.T) {
rs, err := getDummyRestreamer(nil, nil, nil)
rs, err := getDummyRestreamer(nil, nil, nil, nil)
require.NoError(t, err)
process := getDummyProcess()
@ -263,7 +339,7 @@ func TestStopProcess(t *testing.T) {
}
func TestRestartProcess(t *testing.T) {
rs, err := getDummyRestreamer(nil, nil, nil)
rs, err := getDummyRestreamer(nil, nil, nil, nil)
require.NoError(t, err)
process := getDummyProcess()
@ -288,7 +364,7 @@ func TestRestartProcess(t *testing.T) {
}
func TestReloadProcess(t *testing.T) {
rs, err := getDummyRestreamer(nil, nil, nil)
rs, err := getDummyRestreamer(nil, nil, nil, nil)
require.NoError(t, err)
process := getDummyProcess()
@ -318,8 +394,21 @@ func TestReloadProcess(t *testing.T) {
rs.StopProcess(process.ID)
}
func TestProcessData(t *testing.T) {
rs, err := getDummyRestreamer(nil, nil, nil)
func TestProbeProcess(t *testing.T) {
rs, err := getDummyRestreamer(nil, nil, nil, nil)
require.NoError(t, err)
process := getDummyProcess()
rs.AddProcess(process)
probe := rs.ProbeWithTimeout(process.ID, 5*time.Second)
require.Equal(t, 3, len(probe.Streams))
}
func TestProcessMetadata(t *testing.T) {
rs, err := getDummyRestreamer(nil, nil, nil, nil)
require.NoError(t, err)
process := getDummyProcess()
@ -340,7 +429,7 @@ func TestProcessData(t *testing.T) {
}
func TestLog(t *testing.T) {
rs, err := getDummyRestreamer(nil, nil, nil)
rs, err := getDummyRestreamer(nil, nil, nil, nil)
require.NoError(t, err)
process := getDummyProcess()
@ -373,7 +462,7 @@ func TestLog(t *testing.T) {
}
func TestPlayoutNoRange(t *testing.T) {
rs, err := getDummyRestreamer(nil, nil, nil)
rs, err := getDummyRestreamer(nil, nil, nil, nil)
require.NoError(t, err)
process := getDummyProcess()
@ -396,7 +485,7 @@ func TestPlayoutRange(t *testing.T) {
portrange, err := net.NewPortrange(3000, 3001)
require.NoError(t, err)
rs, err := getDummyRestreamer(portrange, nil, nil)
rs, err := getDummyRestreamer(portrange, nil, nil, nil)
require.NoError(t, err)
process := getDummyProcess()
@ -417,7 +506,7 @@ func TestPlayoutRange(t *testing.T) {
}
func TestAddressReference(t *testing.T) {
rs, err := getDummyRestreamer(nil, nil, nil)
rs, err := getDummyRestreamer(nil, nil, nil, nil)
require.NoError(t, err)
process1 := getDummyProcess()
@ -449,7 +538,7 @@ func TestAddressReference(t *testing.T) {
}
func TestConfigValidation(t *testing.T) {
rsi, err := getDummyRestreamer(nil, nil, nil)
rsi, err := getDummyRestreamer(nil, nil, nil, nil)
require.NoError(t, err)
rs := rsi.(*restream)
@ -496,7 +585,7 @@ func TestConfigValidationFFmpeg(t *testing.T) {
valOut, err := ffmpeg.NewValidator([]string{"^https?://", "^rtmp://"}, nil)
require.NoError(t, err)
rsi, err := getDummyRestreamer(nil, valIn, valOut)
rsi, err := getDummyRestreamer(nil, valIn, valOut, nil)
require.NoError(t, err)
rs := rsi.(*restream)
@ -522,7 +611,7 @@ func TestConfigValidationFFmpeg(t *testing.T) {
}
func TestOutputAddressValidation(t *testing.T) {
rsi, err := getDummyRestreamer(nil, nil, nil)
rsi, err := getDummyRestreamer(nil, nil, nil, nil)
require.NoError(t, err)
rs := rsi.(*restream)
@ -561,3 +650,196 @@ func TestOutputAddressValidation(t *testing.T) {
require.Equal(t, r.path, path)
}
}
func TestMetadata(t *testing.T) {
rs, err := getDummyRestreamer(nil, nil, nil, nil)
require.NoError(t, err)
process := getDummyProcess()
data, _ := rs.GetMetadata("foobar")
require.Equal(t, nil, data, "nothing should be stored under the key")
rs.SetMetadata("foobar", process)
data, _ = rs.GetMetadata("foobar")
require.NotEqual(t, nil, data, "there should be something stored under the key")
p := data.(*app.Config)
require.Equal(t, process.ID, p.ID, "failed to retrieve stored data")
}
func TestReplacer(t *testing.T) {
replacer := replace.New()
replacer.RegisterTemplateFunc("diskfs", func(config *app.Config, section string) string {
return "/mnt/diskfs"
}, nil)
replacer.RegisterTemplateFunc("fs:disk", func(config *app.Config, section string) string {
return "/mnt/diskfs"
}, nil)
replacer.RegisterTemplateFunc("memfs", func(config *app.Config, section string) string {
return "http://localhost/mnt/memfs"
}, nil)
replacer.RegisterTemplateFunc("fs:mem", func(config *app.Config, section string) string {
return "http://localhost/mnt/memfs"
}, nil)
replacer.RegisterTemplateFunc("rtmp", func(config *app.Config, section string) string {
return "rtmp://localhost/app/{name}?token=foobar"
}, nil)
replacer.RegisterTemplateFunc("srt", func(config *app.Config, section string) string {
template := "srt://localhost:6000?mode=caller&transtype=live&latency={latency}&streamid={name}"
if section == "output" {
template += ",mode:publish"
} else {
template += ",mode:request"
}
template += ",token:abcfoobar&passphrase=secret"
return template
}, map[string]string{
"latency": "20000", // 20 milliseconds, FFmpeg requires microseconds
})
rsi, err := getDummyRestreamer(nil, nil, nil, replacer)
require.NoError(t, err)
process := &app.Config{
ID: "314159265359",
Reference: "refref",
Input: []app.ConfigIO{
{
ID: "in_{processid}_{reference}",
Address: "input:{inputid}_process:{processid}_reference:{reference}_diskfs:{diskfs}/disk.txt_memfs:{memfs}/mem.txt_fsdisk:{fs:disk}/fsdisk.txt_fsmem:{fs:mem}/fsmem.txt_rtmp:{rtmp,name=pmtr}_srt:{srt,name=trs}_rtmp:{rtmp,name=$inputid}",
Options: []string{
"-f",
"lavfi",
"-re",
"input:{inputid}",
"process:{processid}",
"reference:{reference}",
"diskfs:{diskfs}/disk.txt",
"memfs:{memfs}/mem.txt",
"fsdisk:{fs:disk}/fsdisk.txt",
"fsmem:{fs:mem}/$inputid.txt",
},
},
},
Output: []app.ConfigIO{
{
ID: "out_{processid}_{reference}",
Address: "output:{outputid}_process:{processid}_reference:{reference}_diskfs:{diskfs}/disk.txt_memfs:{memfs}/mem.txt_fsdisk:{fs:disk}/fsdisk.txt_fsmem:{fs:mem}/fsmem.txt_rtmp:{rtmp,name=$processid}_srt:{srt,name=$reference,latency=42}_rtmp:{rtmp,name=$outputid}",
Options: []string{
"-codec",
"copy",
"-f",
"null",
"output:{outputid}",
"process:{processid}",
"reference:{reference}",
"diskfs:{diskfs}/disk.txt",
"memfs:{memfs}/mem.txt",
"fsdisk:{fs:disk}/fsdisk.txt",
"fsmem:{fs:mem}/$outputid.txt",
},
Cleanup: []app.ConfigIOCleanup{
{
Pattern: "pattern_{outputid}_{processid}_{reference}_{rtmp,name=$outputid}",
MaxFiles: 0,
MaxFileAge: 0,
PurgeOnDelete: false,
},
},
},
},
Options: []string{
"-loglevel",
"info",
"{diskfs}/foobar_on_disk.txt",
"{memfs}/foobar_in_mem.txt",
"{fs:disk}/foobar_on_disk_aswell.txt",
"{fs:mem}/foobar_in_mem_aswell.txt",
},
Reconnect: true,
ReconnectDelay: 10,
Autostart: false,
StaleTimeout: 0,
}
err = rsi.AddProcess(process)
require.NoError(t, err)
rs := rsi.(*restream)
process = &app.Config{
ID: "314159265359",
Reference: "refref",
FFVersion: "^4.0.2",
Input: []app.ConfigIO{
{
ID: "in_314159265359_refref",
Address: "input:in_314159265359_refref_process:314159265359_reference:refref_diskfs:/mnt/diskfs/disk.txt_memfs:http://localhost/mnt/memfs/mem.txt_fsdisk:/mnt/diskfs/fsdisk.txt_fsmem:http://localhost/mnt/memfs/fsmem.txt_rtmp:rtmp://localhost/app/pmtr?token=foobar_srt:srt://localhost:6000?mode=caller&transtype=live&latency=20000&streamid=trs,mode:request,token:abcfoobar&passphrase=secret_rtmp:rtmp://localhost/app/in_314159265359_refref?token=foobar",
Options: []string{
"-f",
"lavfi",
"-re",
"input:in_314159265359_refref",
"process:314159265359",
"reference:refref",
"diskfs:/mnt/diskfs/disk.txt",
"memfs:http://localhost/mnt/memfs/mem.txt",
"fsdisk:/mnt/diskfs/fsdisk.txt",
"fsmem:http://localhost/mnt/memfs/$inputid.txt",
},
Cleanup: []app.ConfigIOCleanup{},
},
},
Output: []app.ConfigIO{
{
ID: "out_314159265359_refref",
Address: "output:out_314159265359_refref_process:314159265359_reference:refref_diskfs:/mnt/diskfs/disk.txt_memfs:http://localhost/mnt/memfs/mem.txt_fsdisk:/mnt/diskfs/fsdisk.txt_fsmem:http://localhost/mnt/memfs/fsmem.txt_rtmp:rtmp://localhost/app/314159265359?token=foobar_srt:srt://localhost:6000?mode=caller&transtype=live&latency=42&streamid=refref,mode:publish,token:abcfoobar&passphrase=secret_rtmp:rtmp://localhost/app/out_314159265359_refref?token=foobar",
Options: []string{
"-codec",
"copy",
"-f",
"null",
"output:out_314159265359_refref",
"process:314159265359",
"reference:refref",
"diskfs:/mnt/diskfs/disk.txt",
"memfs:http://localhost/mnt/memfs/mem.txt",
"fsdisk:/mnt/diskfs/fsdisk.txt",
"fsmem:http://localhost/mnt/memfs/$outputid.txt",
},
Cleanup: []app.ConfigIOCleanup{
{
Pattern: "pattern_out_314159265359_refref_314159265359_refref_{rtmp,name=$outputid}",
MaxFiles: 0,
MaxFileAge: 0,
PurgeOnDelete: false,
},
},
},
},
Options: []string{
"-loglevel",
"info",
"/mnt/diskfs/foobar_on_disk.txt",
"{memfs}/foobar_in_mem.txt",
"/mnt/diskfs/foobar_on_disk_aswell.txt",
"http://localhost/mnt/memfs/foobar_in_mem_aswell.txt",
},
Reconnect: true,
ReconnectDelay: 10,
Autostart: false,
StaleTimeout: 0,
}
require.Equal(t, process, rs.tasks["314159265359"].config)
}

View File

@ -1,37 +0,0 @@
package store
import (
"github.com/datarhei/core/v16/log"
)
type DummyConfig struct {
Logger log.Logger
}
type dummyStore struct {
logger log.Logger
}
func NewDummyStore(config DummyConfig) Store {
s := &dummyStore{
logger: config.Logger,
}
if s.logger == nil {
s.logger = log.New("")
}
return s
}
func (sb *dummyStore) Store(data StoreData) error {
sb.logger.Debug().Log("Data stored")
return nil
}
func (sb *dummyStore) Load() (StoreData, error) {
sb.logger.Debug().Log("Data loaded")
return NewStoreData(), nil
}

View File

@ -4,24 +4,23 @@ import (
gojson "encoding/json"
"fmt"
"os"
"path"
"sync"
"github.com/datarhei/core/v16/encoding/json"
"github.com/datarhei/core/v16/io/file"
"github.com/datarhei/core/v16/io/fs"
"github.com/datarhei/core/v16/log"
)
type JSONConfig struct {
Filepath string
FFVersion string
Logger log.Logger
Filesystem fs.Filesystem
Filepath string // Full path to the database file
Logger log.Logger
}
type jsonStore struct {
filepath string
ffversion string
logger log.Logger
fs fs.Filesystem
filepath string
logger log.Logger
// Mutex to serialize access to the backend
lock sync.RWMutex
@ -29,18 +28,26 @@ type jsonStore struct {
var version uint64 = 4
func NewJSONStore(config JSONConfig) Store {
func NewJSON(config JSONConfig) (Store, error) {
s := &jsonStore{
filepath: config.Filepath,
ffversion: config.FFVersion,
logger: config.Logger,
fs: config.Filesystem,
filepath: config.Filepath,
logger: config.Logger,
}
if len(s.filepath) == 0 {
s.filepath = "/db.json"
}
if s.fs == nil {
return nil, fmt.Errorf("no valid filesystem provided")
}
if s.logger == nil {
s.logger = log.New("")
}
return s
return s, nil
}
func (s *jsonStore) Load() (StoreData, error) {
@ -79,28 +86,11 @@ func (s *jsonStore) store(filepath string, data StoreData) error {
return err
}
dir := path.Dir(filepath)
name := path.Base(filepath)
tmpfile, err := os.CreateTemp(dir, name)
_, _, err = s.fs.WriteFileSafe(filepath, jsondata)
if err != nil {
return err
}
defer os.Remove(tmpfile.Name())
if _, err := tmpfile.Write(jsondata); err != nil {
return err
}
if err := tmpfile.Close(); err != nil {
return err
}
if err := file.Rename(tmpfile.Name(), filepath); err != nil {
return err
}
s.logger.WithField("file", filepath).Debug().Log("Stored data")
return nil
@ -113,7 +103,7 @@ type storeVersion struct {
func (s *jsonStore) load(filepath string, version uint64) (StoreData, error) {
r := NewStoreData()
_, err := os.Stat(filepath)
_, err := s.fs.Stat(filepath)
if err != nil {
if os.IsNotExist(err) {
return r, nil
@ -122,7 +112,7 @@ func (s *jsonStore) load(filepath string, version uint64) (StoreData, error) {
return r, err
}
jsondata, err := os.ReadFile(filepath)
jsondata, err := s.fs.ReadFile(filepath)
if err != nil {
return r, err
}

View File

@ -1,40 +1,61 @@
package store
import (
"os"
"testing"
"github.com/datarhei/core/v16/io/fs"
"github.com/stretchr/testify/require"
)
func TestNew(t *testing.T) {
store := NewJSONStore(JSONConfig{})
func getFS(t *testing.T) fs.Filesystem {
fs, err := fs.NewRootedDiskFilesystem(fs.RootedDiskConfig{
Root: ".",
})
require.NoError(t, err)
info, err := fs.Stat("./fixtures/v4_empty.json")
require.NoError(t, err)
require.Equal(t, "/fixtures/v4_empty.json", info.Name())
return fs
}
func TestNew(t *testing.T) {
store, err := NewJSON(JSONConfig{
Filesystem: getFS(t),
})
require.NoError(t, err)
require.NotEmpty(t, store)
}
func TestLoad(t *testing.T) {
store := NewJSONStore(JSONConfig{
Filepath: "./fixtures/v4_empty.json",
store, err := NewJSON(JSONConfig{
Filesystem: getFS(t),
Filepath: "./fixtures/v4_empty.json",
})
require.NoError(t, err)
_, err := store.Load()
require.Equal(t, nil, err)
_, err = store.Load()
require.NoError(t, err)
}
func TestLoadFailed(t *testing.T) {
store := NewJSONStore(JSONConfig{
Filepath: "./fixtures/v4_invalid.json",
store, err := NewJSON(JSONConfig{
Filesystem: getFS(t),
Filepath: "./fixtures/v4_invalid.json",
})
require.NoError(t, err)
_, err := store.Load()
require.NotEqual(t, nil, err)
_, err = store.Load()
require.Error(t, err)
}
func TestIsEmpty(t *testing.T) {
store := NewJSONStore(JSONConfig{
Filepath: "./fixtures/v4_empty.json",
store, err := NewJSON(JSONConfig{
Filesystem: getFS(t),
Filepath: "./fixtures/v4_empty.json",
})
require.NoError(t, err)
data, err := store.Load()
require.NoError(t, err)
@ -42,9 +63,11 @@ func TestIsEmpty(t *testing.T) {
}
func TestNotExists(t *testing.T) {
store := NewJSONStore(JSONConfig{
Filepath: "./fixtures/v4_notexist.json",
store, err := NewJSON(JSONConfig{
Filesystem: getFS(t),
Filepath: "./fixtures/v4_notexist.json",
})
require.NoError(t, err)
data, err := store.Load()
require.NoError(t, err)
@ -52,11 +75,14 @@ func TestNotExists(t *testing.T) {
}
func TestStore(t *testing.T) {
os.Remove("./fixtures/v4_store.json")
fs := getFS(t)
fs.Remove("./fixtures/v4_store.json")
store := NewJSONStore(JSONConfig{
Filepath: "./fixtures/v4_store.json",
store, err := NewJSON(JSONConfig{
Filesystem: fs,
Filepath: "./fixtures/v4_store.json",
})
require.NoError(t, err)
data, err := store.Load()
require.NoError(t, err)
@ -70,13 +96,15 @@ func TestStore(t *testing.T) {
require.NoError(t, err)
require.Equal(t, data, data2)
os.Remove("./fixtures/v4_store.json")
fs.Remove("./fixtures/v4_store.json")
}
func TestInvalidVersion(t *testing.T) {
store := NewJSONStore(JSONConfig{
Filepath: "./fixtures/v3_empty.json",
store, err := NewJSON(JSONConfig{
Filesystem: getFS(t),
Filepath: "./fixtures/v3_empty.json",
})
require.NoError(t, err)
data, err := store.Load()
require.Error(t, err)

Some files were not shown because too many files have changed in this diff Show More