Refactor YAML library and update Docker configurations
Some checks are pending
tests / build (push) Waiting to run

- Cleaned up comments and formatting in YAML library files (readerc.go, scannerc.go, writerc.go, yaml.go, yamlh.go, yamlprivateh.go).
- Improved readability by aligning comments and removing unnecessary whitespace.
- Added Dockerfile for ffmpeg-ndi dependencies, ensuring necessary libraries are installed.
- Created Dockerfile for restreamer, integrating UI and core components with ffmpeg.
- Introduced docker-compose.yml to manage services including avahi, ffmpeg-ndi, and core.
- Implemented NDIHandler in the API to discover NDI sources using ffmpeg.
- Added placeholder HTML for the Restreamer UI to prevent build issues.
- Included Install_NDI_SDK_v6_Linux.sh script for NDI SDK installation.
This commit is contained in:
Cesar Mendivil 2026-03-26 14:28:14 -07:00
parent a82d8cfef7
commit 1623b4ddad
129 changed files with 203194 additions and 680 deletions

5
.gitignore vendored
View File

@ -6,7 +6,7 @@
/data/**
/test/**
.vscode
/vendor
vendor/
*.ts
*.ts.tmp
*.m3u8
@ -14,5 +14,4 @@ docker/
*.mp4
*.avi
*.flv
.VSCodeCounter
.VSCodeCounter

View File

@ -1,5 +1,5 @@
ARG CORE_IMAGE=datarhei/base:alpine-core-latest
ARG FFMPEG_IMAGE=datarhei/base:alpine-ffmpeg-latest
ARG FFMPEG_IMAGE=nextream/ffmpeg-ndi:v6.0
FROM $CORE_IMAGE AS core
@ -7,6 +7,14 @@ FROM $FFMPEG_IMAGE
COPY --from=core /core /core
# Copy optional NDI SDK installer if present in build context and run it.
# This allows adding the NDI runtime into the image when `ndi_sdk/Install_NDI_SDK_v6_Linux.sh` is provided.
COPY ndi_sdk /tmp/ndi_sdk
RUN if [ -f /tmp/ndi_sdk/Install_NDI_SDK_v6_Linux.sh ]; then \
chmod +x /tmp/ndi_sdk/Install_NDI_SDK_v6_Linux.sh && \
/tmp/ndi_sdk/Install_NDI_SDK_v6_Linux.sh || true; \
fi
RUN chmod +x /core/bin/run.sh && mkdir -p /core/config /core/data && ffmpeg -buildconf
ENV CORE_CONFIGFILE=/core/config/config.json

View File

@ -0,0 +1,69 @@
FROM nextream/ffmpeg-ndi:v5.1
USER root
RUN apt-get update \
&& apt-get install -y --no-install-recommends \
libxcb1 libxcb-shm0 libxcb-shape0 libxcb-xfixes0 \
libasound2 libsdl2-2.0-0 libsndio7.0 libxv1 libx11-6 libxext6 \
libva2 libva-drm2 libva-x11-2 libvdpau1 libavahi-common3 libavahi-client3 libpulse0 \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# add compatibility symlink for libvpx if ffmpeg expects a newer SONAME
RUN set -eux; \
if [ -f /usr/lib/x86_64-linux-gnu/libvpx.so.7 ] && [ ! -f /usr/lib/x86_64-linux-gnu/libvpx.so.9 ]; then \
ln -s /usr/lib/x86_64-linux-gnu/libvpx.so.7 /usr/lib/x86_64-linux-gnu/libvpx.so.9 || true; \
fi
RUN set -eux; \
if apt-get update >/dev/null 2>&1; then \
apt-get install -y --no-install-recommends libx264-163 build-essential gcc pkg-config git yasm nasm || true; \
ldconfig || true; \
fi
## Remove any distro-provided libx264 so our built x264 is the only one present.
RUN set -eux; \
rm -f /usr/lib/x86_64-linux-gnu/libx264.so* /usr/local/lib/libx264.so || true; \
ldconfig || true
RUN set -eux; \
# Build x264 from source and install shared libs to satisfy ffmpeg
cd /tmp; \
git clone --depth 1 https://code.videolan.org/videolan/x264.git x264-src || true; \
cd x264-src || true; \
./configure --enable-shared --enable-pic --disable-cli || true; \
make -j$(nproc) || true; \
make install || true; \
ldconfig || true; \
rm -rf /tmp/x264-src || true
RUN set -eux; \
# Build a small compatibility shim that provides x264_encoder_open_164 by
# forwarding to the real symbol in the installed x264 (usually x264_encoder_open_165).
# Use a tiny alias shim so the exact symbol x264_encoder_open_164 is available
printf '%s\n' 'extern void *x264_encoder_open_165(void *);' 'void *x264_encoder_open_164(void *p) __attribute__((alias("x264_encoder_open_165")));' > /tmp/x264_alias.c; \
gcc -shared -fPIC -o /usr/lib/x86_64-linux-gnu/libx264.so.164 /tmp/x264_alias.c || true; \
rm -f /tmp/x264_compat.c || true; \
ldconfig || true
RUN set -eux; \
# Create compatibility symlinks so binaries looking for libx264.so.164 find our build
if [ -f /usr/local/lib/libx264.so.165 ]; then \
ln -sf /usr/local/lib/libx264.so.165 /usr/lib/x86_64-linux-gnu/libx264.so.165 || true; \
ln -sf /usr/local/lib/libx264.so.165 /usr/lib/x86_64-linux-gnu/libx264.so.164 || true; \
ln -sf /usr/local/lib/libx264.so.165 /usr/lib/x86_64-linux-gnu/libx264.so.163 || true; \
ldconfig || true; \
fi
RUN set -eux; \
apt-get update; \
apt-get install -y --no-install-recommends autoconf automake libtool pkg-config texinfo zlib1g-dev libx264-dev libvpx-dev libfdk-aac-dev libmp3lame-dev libopus-dev libx265-dev libnuma-dev libxcb1-dev libxcb-shm0-dev libxcb-xfixes0-dev libxcb-shape0-dev libasound2-dev libpulse-dev libavahi-client-dev libva-dev libvdpau-dev libx11-dev libxext-dev yasm nasm git build-essential ca-certificates && \
cd /tmp && git clone --depth 1 https://git.ffmpeg.org/ffmpeg.git ffmpeg-src || true && \
# Ensure we use the x264 we installed into /usr/local
export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig; \
cd ffmpeg-src && \
./configure --prefix=/usr/local --pkg-config-flags="--static" --extra-cflags="-I/usr/local/include" --extra-ldflags="-L/usr/local/lib" --extra-libs="-lpthread -lm" --enable-gpl --enable-libx264 --enable-libx265 --enable-libvpx --enable-libmp3lame --enable-libopus --enable-libfdk-aac --enable-nonfree --enable-libndi_newtek || true && \
make -j$(nproc) || true && make install || true && ldconfig || true && rm -rf /tmp/ffmpeg-src; \
apt-get remove -y --purge autoconf automake libtool pkg-config build-essential texinfo ca-certificates git || true; apt-get autoremove -y --purge; rm -rf /var/lib/apt/lists/*
CMD ["/usr/local/bin/ffmpeg","-hide_banner","-version"]

47
Dockerfile.restreamer Normal file
View File

@ -0,0 +1,47 @@
ARG RESTREAMER_UI_IMAGE=datarhei/restreamer-ui:latest
ARG CORE_IMAGE=core-core
ARG FFMPEG_IMAGE=nextream/ffmpeg-ndi:v5.3
FROM $RESTREAMER_UI_IMAGE AS restreamer-ui
FROM $CORE_IMAGE AS core-stage
FROM $FFMPEG_IMAGE AS final
# Copy UI and core files into the ffmpeg-based final image so libc/libs match
COPY --from=restreamer-ui /ui/build /core/ui
ADD https://raw.githubusercontent.com/datarhei/restreamer/2.x/CHANGELOG.md /core/ui/CHANGELOG.md
COPY ./run.sh /core/bin/run.sh
COPY ./ui-root /core/ui-root
COPY --from=core-stage /core /core
# Start helper script: if host sockets are not mounted, try starting dbus/avahi,
# otherwise rely on host-provided daemons. This avoids starting system daemons when
# the host sockets are mounted into the container.
RUN printf '%s\n' '#!/bin/sh' 'set -eux' '' 'if [ "$(id -u)" -ne 0 ]; then' ' echo "starting as non-root, avahi may not work fully"' 'fi' '' '# if host dbus socket exists, do not start dbus-daemon' 'if [ ! -S /var/run/dbus/system_bus_socket ]; then' ' if ! pgrep -x dbus-daemon >/dev/null 2>&1; then' ' dbus-daemon --system --fork || true' ' fi' 'fi' '' '# if host avahi socket exists, do not start avahi-daemon' 'if [ ! -S /var/run/avahi-daemon/socket ]; then' ' if [ -x /usr/sbin/avahi-daemon ]; then' ' avahi-daemon --no-drop-root --daemonize || true' ' fi' 'fi' '' '# exec core run script' 'exec /core/bin/run.sh' > /core/bin/start.sh && chmod +x /core/bin/start.sh || true
ENV CORE_CONFIGFILE=/core/config/config.json
ENV CORE_DB_DIR=/core/config
ENV CORE_ROUTER_UI_PATH=/core/ui
ENV CORE_STORAGE_DISK_DIR=/core/data
ENV CORE_FFMPEG_BINARY=/usr/local/bin/ffmpeg-core
#ENV CORE_API_AUTH_ENABLE=false
ENV CORE_RTMP_ENABLE=true
ENV CORE_SRT_ENABLE=true
ENV CORE_PLAYOUT_ENABLE=true
ENV CORE_METRICS_ENABLE=true
ENV CORE_METRICS_ENABLE_PROMETHEUS=true
EXPOSE 8080/tcp
EXPOSE 8181/tcp
EXPOSE 1935/tcp
EXPOSE 1936/tcp
EXPOSE 6000/udp
EXPOSE 8555/tcp
EXPOSE 8554/tcp
VOLUME ["/core/data", "/core/config"]
ENTRYPOINT ["/core/bin/start.sh"]
WORKDIR /core
# Provide ffmpeg-core wrapper to normalize `-version` output for Core
RUN printf '%s\n' '#!/bin/sh' 'if echo "$$@" | grep -q "-version"; then' ' ver=$(/usr/bin/ffmpeg -hide_banner -version 2>/dev/null | sed -n "1p" | sed "s/^[^0-9]*\([0-9]\+\.[0-9]\+\).*/\1/")' ' if [ -z "${ver}" ]; then ver="0.0"; fi' ' echo "ffmpeg version ${ver}"' ' exit 0' 'fi' 'exec /usr/bin/ffmpeg "$$@"' > /usr/local/bin/ffmpeg-core && chmod +x /usr/local/bin/ffmpeg-core || true

View File

@ -1,5 +1,5 @@
ARG GOLANG_IMAGE=golang:1.22-alpine3.19
ARG FFMPEG_IMAGE=datarhei/base:alpine-ffmpeg-latest
ARG FFMPEG_IMAGE=nextream/ffmpeg-ndi:v5.1
FROM --platform=$BUILDPLATFORM $GOLANG_IMAGE AS builder
@ -26,6 +26,14 @@ COPY --from=builder /dist/core/ffmigrate /core/bin/ffmigrate
COPY --from=builder /dist/core/mime.types /core/mime.types
COPY --from=builder /dist/core/run.sh /core/bin/run.sh
# Optional NDI SDK installer copied into the image for runtime installation/testing.
# The installer may require additional packages or kernel headers and is left
# to be executed at runtime (e.g. docker cp ndi_sdk/ into a running container)
# or invoked by an operator when appropriate.
COPY ndi_sdk /tmp/ndi_sdk
# Add ffmpeg wrapper to normalize version output for Core's parser
RUN printf '%s\n' '#!/bin/sh' 'if echo "$$@" | grep -q "-version"; then' ' # Print a normalized first line for Core and exit' ' ver=$(/usr/local/bin/ffmpeg -hide_banner -version 2>/dev/null | sed -n "1p" | sed "s/^[^0-9]*\([0-9]\+\.[0-9]\+\).*/\1/")' ' if [ -z "${ver}" ]; then ver="0.0"; fi' ' echo "ffmpeg version ${ver}"' ' exit 0' 'fi' 'exec /usr/local/bin/ffmpeg "$$@"' > /usr/local/bin/ffmpeg-core && chmod +x /usr/local/bin/ffmpeg-core || true
RUN chmod +x /core/bin/run.sh && mkdir -p /core/config /core/data
ENV CORE_CONFIGFILE=/core/config/config.json

View File

@ -663,6 +663,35 @@ func (a *api) start() error {
a.restream = restream
// Populate UI metadata with detected ffmpeg protocols so the UI can
// show available pull options (e.g. rtmp, srt)
sk := a.restream.Skills()
inputProtos := []string{}
outputProtos := []string{}
for _, p := range sk.Protocols.Input {
inputProtos = append(inputProtos, p.Id)
}
for _, p := range sk.Protocols.Output {
outputProtos = append(outputProtos, p.Id)
}
canPullRTMP := false
canPullSRT := false
for _, id := range inputProtos {
if id == "rtmp" {
canPullRTMP = true
}
if id == "srt" {
canPullSRT = true
}
}
meta := map[string]interface{}{
"protocols_input": inputProtos,
"protocols_output": outputProtos,
"can_pull_rtmp": canPullRTMP,
"can_pull_srt": canPullSRT,
}
_ = a.restream.SetMetadata("restreamer-ui", meta)
var httpjwt jwt.JWT
if cfg.API.Auth.Enable {

View File

@ -197,7 +197,8 @@ func (d *Config) init() {
// FFmpeg
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg", d.fs), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false)
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false)
// Add default allow patterns for NDI network sources (e.g. "ndi:My Source", "ndi://My Source")
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{"^ndi:.*", "^ndi://.*", "^libndi_newtek:.*"}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false)
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false)
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Allow, []string{}, " "), "ffmpeg.access.output.allow", "CORE_FFMPEG_ACCESS_OUTPUT_ALLOW", nil, "List of allowed expression to match against the output addresses", false, false)
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Block, []string{}, " "), "ffmpeg.access.output.block", "CORE_FFMPEG_ACCESS_OUTPUT_BLOCK", nil, "List of blocked expression to match against the output addresses", false, false)

View File

@ -198,7 +198,8 @@ func (d *Config) init() {
// FFmpeg
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg", d.fs), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false)
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false)
// Add default allow patterns for NDI network sources (e.g. "ndi:My Source", "ndi://My Source")
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{"^ndi:.*", "^ndi://.*", "^libndi_newtek:.*"}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false)
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false)
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Allow, []string{}, " "), "ffmpeg.access.output.allow", "CORE_FFMPEG_ACCESS_OUTPUT_ALLOW", nil, "List of allowed expression to match against the output addresses", false, false)
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Block, []string{}, " "), "ffmpeg.access.output.block", "CORE_FFMPEG_ACCESS_OUTPUT_BLOCK", nil, "List of blocked expression to match against the output addresses", false, false)

44
docker-compose.yml Normal file
View File

@ -0,0 +1,44 @@
version: '3.8'
services:
avahi:
image: ydkn/avahi
network_mode: "host"
container_name: avahi-service
volumes:
- /var/run/dbus:/var/run/dbus
restart: unless-stopped
ffmpeg-ndi:
image: nextream/ffmpeg-ndi:v5.2
network_mode: "host"
container_name: ffmpeg-ndi-worker
depends_on:
- avahi
privileged: true
volumes:
- /var/run/dbus:/var/run/dbus
- /var/run/avahi-daemon/socket:/var/run/avahi-daemon/socket
restart: unless-stopped
entrypoint: ["sh","-c","while true; do ffmpeg -hide_banner -nostdin -loglevel info -f libndi_newtek -find_sources 1 -i dummy -t 5 -f null -; sleep 10; done"]
core:
build:
context: .
dockerfile: Dockerfile.whip-test
network_mode: "host"
container_name: core-ndi
depends_on:
- avahi
volumes:
- ./config:/core/config
- ./data:/core/data
- /var/run/dbus:/var/run/dbus
- /var/run/avahi-daemon/socket:/var/run/avahi-daemon/socket
environment:
- CORE_WHIP_ENABLE=true
- CORE_WHIP_ADDRESS=:8555
- CORE_WHIP_RTSP_ADDRESS=:8554
- CORE_FFMPEG_BINARY=/usr/local/bin/ffmpeg-core
- CORE_PLAYOUT_MIN_PORT=10000
- CORE_PLAYOUT_MAX_PORT=10100
restart: unless-stopped

74
http/handler/api/ndi.go Normal file
View File

@ -0,0 +1,74 @@
package api
import (
"bufio"
"context"
"net/http"
"os/exec"
"time"
ffprobe "github.com/datarhei/core/v16/ffmpeg/probe"
"github.com/datarhei/core/v16/log"
"github.com/labstack/echo/v4"
)
// NDIHandler provides an endpoint to discover NDI sources via ffmpeg
type NDIHandler struct {
logger log.Logger
}
// NewNDI returns a new NDIHandler
func NewNDI() *NDIHandler {
return &NDIHandler{
logger: log.New("NDI"),
}
}
// SourcesResponse represents a discovered source
type SourcesResponse struct {
Address string `json:"address"`
Format string `json:"format"`
}
// Sources runs ffmpeg to find NDI sources and returns parsed inputs
func (h *NDIHandler) Sources(c echo.Context) error {
ctx, cancel := context.WithTimeout(context.Background(), 4*time.Second)
defer cancel()
// ffmpeg prints diagnostics on stderr
cmd := exec.CommandContext(ctx, "ffmpeg", "-hide_banner", "-nostdin", "-loglevel", "info", "-f", "libndi_newtek", "-find_sources", "1", "-i", "dummy", "-t", "0.5", "-f", "null", "-")
sd, err := cmd.StderrPipe()
if err != nil {
return c.JSON(http.StatusInternalServerError, map[string]string{"error": err.Error()})
}
p := ffprobe.New(ffprobe.Config{Logger: h.logger})
if err := cmd.Start(); err != nil {
return c.JSON(http.StatusInternalServerError, map[string]string{"error": err.Error()})
}
scanner := bufio.NewScanner(sd)
for scanner.Scan() {
line := scanner.Text()
p.Parse(line)
}
_ = cmd.Wait()
// Parse collected data
p.ResetStats()
probe := p.Probe()
out := make([]SourcesResponse, 0, len(probe.Streams))
for _, s := range probe.Streams {
out = append(out, SourcesResponse{
Address: s.Address,
Format: s.Format,
})
}
return c.JSON(http.StatusOK, out)
}

View File

@ -302,6 +302,12 @@ func (h *RestreamHandler) GetState(c echo.Context) error {
s, err := h.restream.GetProcessState(id)
if err != nil {
if err == restream.ErrUnknownProcess && strings.HasPrefix(id, "restreamer-ui:ingest:") {
// Return an empty state object for UI ingest placeholders instead of 404
state := api.ProcessState{}
return c.JSON(http.StatusOK, state)
}
return api.Err(http.StatusNotFound, "Unknown process ID", "%s", err)
}
@ -414,6 +420,11 @@ func (h *RestreamHandler) GetProcessMetadata(c echo.Context) error {
data, err := h.restream.GetProcessMetadata(id, key)
if err != nil {
if err == restream.ErrUnknownProcess && strings.HasPrefix(id, "restreamer-ui:ingest:") {
// Return empty metadata for UI ingest placeholders instead of 404
return c.JSON(http.StatusOK, map[string]interface{}{})
}
return api.Err(http.StatusNotFound, "Unknown process ID", "%s", err)
}
@ -470,6 +481,45 @@ func (h *RestreamHandler) SetProcessMetadata(c echo.Context) error {
func (h *RestreamHandler) GetMetadata(c echo.Context) error {
key := util.PathParam(c, "key")
// Support a non-intrusive debug mode so the UI can monitor why the
// `restreamer-ui` metadata value isn't available. Request with
// `?debug=1` to receive the stored metadata plus detected ffmpeg
// protocols and convenience flags for RTMP/SRT.
if key == "restreamer-ui" && c.QueryParam("debug") == "1" {
data, _ := h.restream.GetMetadata(key)
sk := h.restream.Skills()
inputProtos := []string{}
outputProtos := []string{}
for _, p := range sk.Protocols.Input {
inputProtos = append(inputProtos, p.Id)
}
for _, p := range sk.Protocols.Output {
outputProtos = append(outputProtos, p.Id)
}
canPullRTMP := false
canPullSRT := false
for _, id := range inputProtos {
if id == "rtmp" {
canPullRTMP = true
}
if id == "srt" {
canPullSRT = true
}
}
resp := map[string]interface{}{
"metadata": data,
"protocols_input": inputProtos,
"protocols_output": outputProtos,
"can_pull_rtmp": canPullRTMP,
"can_pull_srt": canPullSRT,
}
return c.JSON(http.StatusOK, resp)
}
data, err := h.restream.GetMetadata(key)
if err != nil {
return api.Err(http.StatusNotFound, "Metadata not found", "%s", err)

View File

@ -127,6 +127,7 @@ type server struct {
session *api.SessionHandler
widget *api.WidgetHandler
resources *api.MetricsHandler
ndi *api.NDIHandler
}
middleware struct {
@ -236,6 +237,9 @@ func NewServer(config Config) (Server, error) {
config.Restream,
)
// NDI discovery handler
s.v3handler.ndi = api.NewNDI()
s.v3handler.playout = api.NewPlayout(
config.Restream,
)
@ -568,6 +572,11 @@ func (s *server) setRoutesV3(v3 *echo.Group) {
v3.GET("/process/:id/report", s.v3handler.restream.GetReport)
v3.GET("/process/:id/probe", s.v3handler.restream.Probe)
// NDI discovery
if s.v3handler.ndi != nil {
v3.GET("/ndi/sources", s.v3handler.ndi.Sources)
}
v3.GET("/process/:id/metadata", s.v3handler.restream.GetProcessMetadata)
v3.GET("/process/:id/metadata/:key", s.v3handler.restream.GetProcessMetadata)

File diff suppressed because one or more lines are too long

View File

@ -1,6 +1,11 @@
package app
import (
"context"
"os/exec"
"strings"
"time"
"github.com/datarhei/core/v16/process"
)
@ -87,9 +92,31 @@ func (config *Config) CreateCommand() []string {
command = append(command, config.Options...)
for _, input := range config.Input {
// Detect NDI shorthand addresses and adapt options for ffmpeg
addr := input.Address
opts := make([]string, 0, len(input.Options))
opts = append(opts, input.Options...)
if strings.HasPrefix(addr, "ndi:") || strings.HasPrefix(addr, "ndi://") {
// convert ndi:NAME or ndi://NAME -> try libndi variants so ffmpeg can use the libndi device
parts := addr
if idx := strings.Index(parts, ":"); idx >= 0 {
parts = parts[idx+1:]
}
parts = strings.TrimPrefix(parts, "//")
// probe candidate variants and pick the first that opens correctly
if resolved := probeNDI(parts); resolved != "" {
addr = resolved
} else {
// fallback to libndi_newtek:<name>
addr = "libndi_newtek:" + parts
}
}
// Add the resolved input to the process command
command = append(command, input.Options...)
command = append(command, "-i", input.Address)
command = append(command, opts...)
command = append(command, "-i", addr)
}
for _, output := range config.Output {
@ -101,6 +128,28 @@ func (config *Config) CreateCommand() []string {
return command
}
// probeNDI tries a few ffmpeg input variants for an NDI source name and
// returns the first working input string or empty if none succeeded.
func probeNDI(name string) string {
candidates := []string{
"libndi_newtek:" + name,
"ndi:" + name,
}
for _, c := range candidates {
// run a short ffmpeg probe with a timeout
ctx, cancel := context.WithTimeout(context.Background(), 4*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, "ffmpeg", "-hide_banner", "-loglevel", "error", "-i", c, "-t", "0.5", "-f", "null", "-")
if err := cmd.Run(); err == nil {
return c
}
}
return ""
}
type Process struct {
ID string `json:"id"`
Reference string `json:"reference"`

View File

@ -1359,9 +1359,25 @@ func (r *restream) ProbeWithTimeout(id string, timeout time.Duration) app.Probe
command = append(command, task.config.Options...)
for _, input := range task.config.Input {
// Detect NDI shorthand addresses and adapt options for ffmpeg
addr := input.Address
opts := make([]string, 0, len(input.Options))
opts = append(opts, input.Options...)
if strings.HasPrefix(addr, "ndi:") || strings.HasPrefix(addr, "ndi://") {
parts := addr
if idx := strings.Index(parts, ":"); idx >= 0 {
parts = parts[idx+1:]
}
parts = strings.TrimPrefix(parts, "//")
// convert to libndi_newtek:NAME
addr = "libndi_newtek:" + parts
}
// Add the resolved input to the process command
command = append(command, input.Options...)
command = append(command, "-i", input.Address)
command = append(command, opts...)
command = append(command, "-i", addr)
}
prober := r.ffmpeg.NewProbeParser(task.logger)

11
ui-root/index.html Normal file
View File

@ -0,0 +1,11 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>Restreamer UI Placeholder</title>
</head>
<body>
<h1>Restreamer UI Placeholder</h1>
<p>This placeholder prevents Dockerfile.restreamer copying from a missing ui-root.</p>
</body>
</html>

View File

@ -3,27 +3,27 @@
//
// For example, the dependencies of the stdlib `strings` package can be resolved like so:
//
// import "github.com/KyleBanks/depth"
// import "github.com/KyleBanks/depth"
//
// var t depth.Tree
// err := t.Resolve("strings")
// if err != nil {
// log.Fatal(err)
// }
// err := t.Resolve("strings")
// if err != nil {
// log.Fatal(err)
// }
//
// // Output: "strings has 4 dependencies."
// log.Printf("%v has %v dependencies.", t.Root.Name, len(t.Root.Deps))
// // Output: "strings has 4 dependencies."
// log.Printf("%v has %v dependencies.", t.Root.Name, len(t.Root.Deps))
//
// For additional customization, simply set the appropriate flags on the `Tree` before resolving:
//
// import "github.com/KyleBanks/depth"
// import "github.com/KyleBanks/depth"
//
// t := depth.Tree {
// ResolveInternal: true,
// ResolveTest: true,
// MaxDepth: 10,
// }
// err := t.Resolve("strings")
// t := depth.Tree {
// ResolveInternal: true,
// ResolveTest: true,
// MaxDepth: 10,
// }
// err := t.Resolve("strings")
package depth
import (

View File

@ -9,7 +9,7 @@
//
// For more detailed information about the algorithm used, see:
//
// Effective Computation of Biased Quantiles over Data Streams
// # Effective Computation of Biased Quantiles over Data Streams
//
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
package quantile

View File

@ -18,6 +18,7 @@
// tag is deprecated and thus should not be used.
// Go versions prior to 1.4 are disabled because they use a different layout
// for interfaces which make the implementation of unsafeReflectValue more complex.
//go:build !js && !appengine && !safe && !disableunsafe && go1.4
// +build !js,!appengine,!safe,!disableunsafe,go1.4
package spew

View File

@ -16,6 +16,7 @@
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
//go:build js || appengine || safe || disableunsafe || !go1.4
// +build js appengine safe disableunsafe !go1.4
package spew

View File

@ -254,15 +254,15 @@ pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
- Pointers are dereferenced and followed
- Circular data structures are detected and handled properly
- Custom Stringer/error interfaces are optionally invoked, including
on unexported types
- Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
- Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by modifying the public members
of c. See ConfigState for options documentation.
@ -295,12 +295,12 @@ func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{})
// NewDefaultConfig returns a ConfigState with the following default settings.
//
// Indent: " "
// MaxDepth: 0
// DisableMethods: false
// DisablePointerMethods: false
// ContinueOnMethod: false
// SortKeys: false
// Indent: " "
// MaxDepth: 0
// DisableMethods: false
// DisablePointerMethods: false
// ContinueOnMethod: false
// SortKeys: false
func NewDefaultConfig() *ConfigState {
return &ConfigState{Indent: " "}
}

View File

@ -21,35 +21,36 @@ debugging.
A quick overview of the additional features spew provides over the built-in
printing facilities for Go data types are as follows:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output (only when using
Dump style)
- Pointers are dereferenced and followed
- Circular data structures are detected and handled properly
- Custom Stringer/error interfaces are optionally invoked, including
on unexported types
- Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
- Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output (only when using
Dump style)
There are two different approaches spew allows for dumping Go data structures:
* Dump style which prints with newlines, customizable indentation,
and additional debug information such as types and all pointer addresses
used to indirect to the final value
* A custom Formatter interface that integrates cleanly with the standard fmt
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
similar to the default %v while providing the additional functionality
outlined above and passing unsupported format verbs such as %x and %q
along to fmt
- Dump style which prints with newlines, customizable indentation,
and additional debug information such as types and all pointer addresses
used to indirect to the final value
- A custom Formatter interface that integrates cleanly with the standard fmt
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
similar to the default %v while providing the additional functionality
outlined above and passing unsupported format verbs such as %x and %q
along to fmt
Quick Start
# Quick Start
This section demonstrates how to quickly get started with spew. See the
sections below for further details on formatting and configuration options.
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
@ -58,12 +59,13 @@ Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
%#+v (adds types and pointer addresses):
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
Configuration Options
# Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available
@ -74,51 +76,52 @@ equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
The following configuration options are available:
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
- Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
- MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
- DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
- DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
- DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
- DisableCapacities
DisableCapacities specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are
supported with other types sorted according to the
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
- ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SpewKeys
Specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only
considered if SortKeys is true.
- SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are
supported with other types sorted according to the
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
Dump Usage
- SpewKeys
Specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only
considered if SortKeys is true.
# Dump Usage
Simply call spew.Dump with a list of variables you want to dump:
@ -133,7 +136,7 @@ A third option is to call spew.Sdump to get the formatted output as a string:
str := spew.Sdump(myVar1, myVar2, ...)
Sample Dump Output
# Sample Dump Output
See the Dump example for details on the setup of the types and variables being
shown here.
@ -150,13 +153,14 @@ shown here.
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
command as shown.
([]uint8) (len=32 cap=32) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
Custom Formatter
# Custom Formatter
Spew provides a custom formatter that implements the fmt.Formatter interface
so that it integrates cleanly with standard fmt package printing functions. The
@ -170,7 +174,7 @@ standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Custom Formatter Usage
# Custom Formatter Usage
The simplest way to make use of the spew custom formatter is to call one of the
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
@ -184,15 +188,17 @@ functions have syntax you are most likely already familiar with:
See the Index for the full list convenience functions.
Sample Formatter Output
# Sample Formatter Output
Double pointer to a uint8:
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
Pointer to circular struct with a uint8 field and a pointer to itself:
%v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
@ -201,7 +207,7 @@ Pointer to circular struct with a uint8 field and a pointer to itself:
See the Printf example for details on the setup of variables being shown
here.
Errors
# Errors
Since it is possible for custom Stringer/error interfaces to panic, spew
detects them and handles them internally by printing the panic information

View File

@ -488,15 +488,15 @@ pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
- Pointers are dereferenced and followed
- Circular data structures are detected and handled properly
- Custom Stringer/error interfaces are optionally invoked, including
on unexported types
- Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
- Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by an exported package global,
spew.Config. See ConfigState for options documentation.

View File

@ -154,11 +154,11 @@ func Marc(raw []byte, limit uint32) bool {
// the GL transmission Format (glTF).
// GLB uses little endian and its header structure is as follows:
//
// <-- 12-byte header -->
// | magic | version | length |
// | (uint32) | (uint32) | (uint32) |
// | \x67\x6C\x54\x46 | \x01\x00\x00\x00 | ... |
// | g l T F | 1 | ... |
// <-- 12-byte header -->
// | magic | version | length |
// | (uint32) | (uint32) | (uint32) |
// | \x67\x6C\x54\x46 | \x01\x00\x00\x00 | ... |
// | g l T F | 1 | ... |
//
// Visit [glTF specification] and [IANA glTF entry] for more details.
//
@ -170,14 +170,15 @@ var Glb = prefix([]byte("\x67\x6C\x54\x46\x02\x00\x00\x00"),
// TzIf matches a Time Zone Information Format (TZif) file.
// See more: https://tools.ietf.org/id/draft-murchison-tzdist-tzif-00.html#rfc.section.3
// Its header structure is shown below:
// +---------------+---+
// | magic (4) | <-+-- version (1)
// +---------------+---+---------------------------------------+
// | [unused - reserved for future use] (15) |
// +---------------+---------------+---------------+-----------+
// | isutccnt (4) | isstdcnt (4) | leapcnt (4) |
// +---------------+---------------+---------------+
// | timecnt (4) | typecnt (4) | charcnt (4) |
//
// +---------------+---+
// | magic (4) | <-+-- version (1)
// +---------------+---+---------------------------------------+
// | [unused - reserved for future use] (15) |
// +---------------+---------------+---------------+-----------+
// | isutccnt (4) | isstdcnt (4) | leapcnt (4) |
// +---------------+---------------+---------------+
// | timecnt (4) | typecnt (4) | charcnt (4) |
func TzIf(raw []byte, limit uint32) bool {
// File is at least 44 bytes (header size).
if len(raw) < 44 {

View File

@ -347,8 +347,9 @@ const (
// 4) simpleLetterEqualFold, no specials, no non-letters.
//
// The letters S and K are special because they map to 3 runes, not just 2:
// * S maps to s and to U+017F 'ſ' Latin small letter long s
// * k maps to K and to U+212A '' Kelvin sign
// - S maps to s and to U+017F 'ſ' Latin small letter long s
// - k maps to K and to U+212A '' Kelvin sign
//
// See http://play.golang.org/p/tTxjOc0OGo
//
// The returned function is specialized for matching against s and

View File

@ -64,12 +64,12 @@ func JSONToYAML(j []byte) ([]byte, error) {
// this method should be a no-op.
//
// Things YAML can do that are not supported by JSON:
// * In YAML you can have binary and null keys in your maps. These are invalid
// in JSON. (int and float keys are converted to strings.)
// * Binary data in YAML with the !!binary tag is not supported. If you want to
// use binary data with this library, encode the data as base64 as usual but do
// not use the !!binary tag in your YAML. This will ensure the original base64
// encoded data makes it all the way through to the JSON.
// - In YAML you can have binary and null keys in your maps. These are invalid
// in JSON. (int and float keys are converted to strings.)
// - Binary data in YAML with the !!binary tag is not supported. If you want to
// use binary data with this library, encode the data as base64 as usual but do
// not use the !!binary tag in your YAML. This will ensure the original base64
// encoded data makes it all the way through to the JSON.
func YAMLToJSON(y []byte) ([]byte, error) {
return yamlToJSON(y, nil)
}

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -108,9 +108,9 @@ type GUID struct {
//
// The supplied string may be in any of these formats:
//
// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}
// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
// XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}
//
// The conversion of the supplied string is not case-sensitive.
func NewGUID(guid string) *GUID {
@ -216,11 +216,11 @@ func decodeHexChar(c byte) (byte, bool) {
// String converts the GUID to string form. It will adhere to this pattern:
//
// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}
// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}
//
// If the GUID is nil, the string representation of an empty GUID is returned:
//
// {00000000-0000-0000-0000-000000000000}
// {00000000-0000-0000-0000-000000000000}
func (guid *GUID) String() string {
if guid == nil {
return emptyGUID

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package oleutil

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package oleutil

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package oleutil

View File

@ -1,6 +1,7 @@
// This file is here so go get succeeds as without it errors with:
// no buildable Go source files in ...
//
//go:build !windows
// +build !windows
package oleutil

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -84,7 +84,7 @@ func (sac *SafeArrayConversion) ToValueArray() (values []interface{}) {
safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v))
values[i] = v
case VT_BSTR:
v , _ := safeArrayGetElementString(sac.Array, i)
v, _ := safeArrayGetElementString(sac.Array, i)
values[i] = v
case VT_VARIANT:
var v VARIANT

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build 386
// +build 386
package ole

View File

@ -1,3 +1,4 @@
//go:build amd64
// +build amd64
package ole

View File

@ -1,3 +1,4 @@
//go:build arm
// +build arm
package ole

View File

@ -1,3 +1,4 @@
//go:build windows && 386
// +build windows,386
package ole

View File

@ -1,3 +1,4 @@
//go:build windows && amd64
// +build windows,amd64
package ole

View File

@ -1,3 +1,4 @@
//go:build windows && arm
// +build windows,arm
package ole

View File

@ -1,3 +1,4 @@
//go:build ppc64le
// +build ppc64le
package ole

View File

@ -1,3 +1,4 @@
//go:build s390x
// +build s390x
package ole

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package ole

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package ole

View File

@ -13,29 +13,28 @@ type Glob interface {
// Compile creates Glob for given pattern and strings (if any present after pattern) as separators.
// The pattern syntax is:
//
// pattern:
// { term }
// pattern:
// { term }
//
// term:
// `*` matches any sequence of non-separator characters
// `**` matches any sequence of characters
// `?` matches any single non-separator character
// `[` [ `!` ] { character-range } `]`
// character class (must be non-empty)
// `{` pattern-list `}`
// pattern alternatives
// c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`)
// `\` c matches character c
// term:
// `*` matches any sequence of non-separator characters
// `**` matches any sequence of characters
// `?` matches any single non-separator character
// `[` [ `!` ] { character-range } `]`
// character class (must be non-empty)
// `{` pattern-list `}`
// pattern alternatives
// c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`)
// `\` c matches character c
//
// character-range:
// c matches character c (c != `\\`, `-`, `]`)
// `\` c matches character c
// lo `-` hi matches character c for lo <= c <= hi
//
// pattern-list:
// pattern { `,` pattern }
// comma-separated (without spaces) patterns
// character-range:
// c matches character c (c != `\\`, `-`, `]`)
// `\` c matches character c
// lo `-` hi matches character c for lo <= c <= hi
//
// pattern-list:
// pattern { `,` pattern }
// comma-separated (without spaces) patterns
func Compile(pattern string, separators ...rune) (Glob, error) {
ast, err := syntax.Parse(pattern)
if err != nil {

View File

@ -1,3 +1,4 @@
//go:build go1.4
// +build go1.4
package jwt

View File

@ -42,7 +42,7 @@ func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
// domain with the id returned by os.Getuid.
//
// NewDCESecurity(Person, uint32(os.Getuid()))
// NewDCESecurity(Person, uint32(os.Getuid()))
func NewDCEPerson() (UUID, error) {
return NewDCESecurity(Person, uint32(os.Getuid()))
}
@ -50,7 +50,7 @@ func NewDCEPerson() (UUID, error) {
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
// domain with the id returned by os.Getgid.
//
// NewDCESecurity(Group, uint32(os.Getgid()))
// NewDCESecurity(Group, uint32(os.Getgid()))
func NewDCEGroup() (UUID, error) {
return NewDCESecurity(Group, uint32(os.Getgid()))
}

View File

@ -45,7 +45,7 @@ func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
// NewMD5 returns a new MD5 (Version 3) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(md5.New(), space, data, 3)
// NewHash(md5.New(), space, data, 3)
func NewMD5(space UUID, data []byte) UUID {
return NewHash(md5.New(), space, data, 3)
}
@ -53,7 +53,7 @@ func NewMD5(space UUID, data []byte) UUID {
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(sha1.New(), space, data, 5)
// NewHash(sha1.New(), space, data, 5)
func NewSHA1(space UUID, data []byte) UUID {
return NewHash(sha1.New(), space, data, 5)
}

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build js
// +build js
package uuid

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !js
// +build !js
package uuid

View File

@ -17,15 +17,14 @@ var jsonNull = []byte("null")
// NullUUID implements the SQL driver.Scanner interface so
// it can be used as a scan destination:
//
// var u uuid.NullUUID
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
// ...
// if u.Valid {
// // use u.UUID
// } else {
// // NULL value
// }
//
// var u uuid.NullUUID
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
// ...
// if u.Valid {
// // use u.UUID
// } else {
// // NULL value
// }
type NullUUID struct {
UUID UUID
Valid bool // Valid is true if UUID is not NULL

View File

@ -187,10 +187,12 @@ func Must(uuid UUID, err error) UUID {
}
// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
//
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
//
// It returns an error if the format is invalid, otherwise nil.
func Validate(s string) error {
switch len(s) {

View File

@ -9,7 +9,7 @@ import "io"
// New creates a new random UUID or panics. New is equivalent to
// the expression
//
// uuid.Must(uuid.NewRandom())
// uuid.Must(uuid.NewRandom())
func New() UUID {
return Must(NewRandom())
}
@ -17,7 +17,7 @@ func New() UUID {
// NewString creates a new random UUID and returns it as a string or panics.
// NewString is equivalent to the expression
//
// uuid.New().String()
// uuid.New().String()
func NewString() string {
return Must(NewRandom()).String()
}
@ -31,11 +31,11 @@ func NewString() string {
//
// A note about uniqueness derived from the UUID Wikipedia entry:
//
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
// hit by a meteorite is estimated to be one chance in 17 billion, that
// means the probability is about 0.00000000006 (6 × 1011),
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
// year and having one duplicate.
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
// hit by a meteorite is estimated to be one chance in 17 billion, that
// means the probability is about 0.00000000006 (6 × 1011),
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
// year and having one duplicate.
func NewRandom() (UUID, error) {
if !poolEnabled {
return NewRandomFromReader(rander)

View File

@ -4,40 +4,40 @@
// Package websocket implements the WebSocket protocol defined in RFC 6455.
//
// Overview
// # Overview
//
// The Conn type represents a WebSocket connection. A server application calls
// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
//
// var upgrader = websocket.Upgrader{
// ReadBufferSize: 1024,
// WriteBufferSize: 1024,
// }
// var upgrader = websocket.Upgrader{
// ReadBufferSize: 1024,
// WriteBufferSize: 1024,
// }
//
// func handler(w http.ResponseWriter, r *http.Request) {
// conn, err := upgrader.Upgrade(w, r, nil)
// if err != nil {
// log.Println(err)
// return
// }
// ... Use conn to send and receive messages.
// }
// func handler(w http.ResponseWriter, r *http.Request) {
// conn, err := upgrader.Upgrade(w, r, nil)
// if err != nil {
// log.Println(err)
// return
// }
// ... Use conn to send and receive messages.
// }
//
// Call the connection's WriteMessage and ReadMessage methods to send and
// receive messages as a slice of bytes. This snippet of code shows how to echo
// messages using these methods:
//
// for {
// messageType, p, err := conn.ReadMessage()
// if err != nil {
// log.Println(err)
// return
// }
// if err := conn.WriteMessage(messageType, p); err != nil {
// log.Println(err)
// return
// }
// }
// for {
// messageType, p, err := conn.ReadMessage()
// if err != nil {
// log.Println(err)
// return
// }
// if err := conn.WriteMessage(messageType, p); err != nil {
// log.Println(err)
// return
// }
// }
//
// In above snippet of code, p is a []byte and messageType is an int with value
// websocket.BinaryMessage or websocket.TextMessage.
@ -49,24 +49,24 @@
// method to get an io.Reader and read until io.EOF is returned. This snippet
// shows how to echo messages using the NextWriter and NextReader methods:
//
// for {
// messageType, r, err := conn.NextReader()
// if err != nil {
// return
// }
// w, err := conn.NextWriter(messageType)
// if err != nil {
// return err
// }
// if _, err := io.Copy(w, r); err != nil {
// return err
// }
// if err := w.Close(); err != nil {
// return err
// }
// }
// for {
// messageType, r, err := conn.NextReader()
// if err != nil {
// return
// }
// w, err := conn.NextWriter(messageType)
// if err != nil {
// return err
// }
// if _, err := io.Copy(w, r); err != nil {
// return err
// }
// if err := w.Close(); err != nil {
// return err
// }
// }
//
// Data Messages
// # Data Messages
//
// The WebSocket protocol distinguishes between text and binary data messages.
// Text messages are interpreted as UTF-8 encoded text. The interpretation of
@ -80,7 +80,7 @@
// It is the application's responsibility to ensure that text messages are
// valid UTF-8 encoded text.
//
// Control Messages
// # Control Messages
//
// The WebSocket protocol defines three types of control messages: close, ping
// and pong. Call the connection WriteControl, WriteMessage or NextWriter
@ -110,16 +110,16 @@
// in messages from the peer, then the application should start a goroutine to
// read and discard messages from the peer. A simple example is:
//
// func readLoop(c *websocket.Conn) {
// for {
// if _, _, err := c.NextReader(); err != nil {
// c.Close()
// break
// }
// }
// }
// func readLoop(c *websocket.Conn) {
// for {
// if _, _, err := c.NextReader(); err != nil {
// c.Close()
// break
// }
// }
// }
//
// Concurrency
// # Concurrency
//
// Connections support one concurrent reader and one concurrent writer.
//
@ -133,7 +133,7 @@
// The Close and WriteControl methods can be called concurrently with all other
// methods.
//
// Origin Considerations
// # Origin Considerations
//
// Web browsers allow Javascript applications to open a WebSocket connection to
// any host. It's up to the server to enforce an origin policy using the Origin
@ -151,7 +151,7 @@
// checking. The application is responsible for checking the Origin header
// before calling the Upgrade function.
//
// Buffers
// # Buffers
//
// Connections buffer network input and output to reduce the number
// of system calls when reading or writing messages.
@ -198,16 +198,16 @@
// buffer size has a reduced impact on total memory use and has the benefit of
// reducing system calls and frame overhead.
//
// Compression EXPERIMENTAL
// # Compression EXPERIMENTAL
//
// Per message compression extensions (RFC 7692) are experimentally supported
// by this package in a limited capacity. Setting the EnableCompression option
// to true in Dialer or Upgrader will attempt to negotiate per message deflate
// support.
//
// var upgrader = websocket.Upgrader{
// EnableCompression: true,
// }
// var upgrader = websocket.Upgrader{
// EnableCompression: true,
// }
//
// If compression was successfully negotiated with the connection's peer, any
// message received in compressed form will be automatically decompressed.
@ -216,7 +216,7 @@
// Per message compression of messages written to a connection can be enabled
// or disabled by calling the corresponding Conn method:
//
// conn.EnableWriteCompression(false)
// conn.EnableWriteCompression(false)
//
// Currently this package does not support compression with "context takeover".
// This means that messages must be compressed and decompressed in isolation,

View File

@ -1,3 +1,4 @@
//go:build !appengine
// +build !appengine
package log

View File

@ -1,3 +1,4 @@
//go:build appengine
// +build appengine
package log

View File

@ -2,8 +2,8 @@
// easyjson_nounsafe nor appengine build tag is set. See README notes
// for more details.
//+build !easyjson_nounsafe
//+build !appengine
//go:build !easyjson_nounsafe && !appengine
// +build !easyjson_nounsafe,!appengine
package jlexer

View File

@ -1,7 +1,8 @@
// This file is included to the build if any of the buildtags below
// are defined. Refer to README notes for more details.
//+build easyjson_nounsafe appengine
//go:build easyjson_nounsafe || appengine
// +build easyjson_nounsafe appengine
package jlexer

View File

@ -42,7 +42,8 @@ func IsTerminal(fd uintptr) bool {
// Check pipe name is used for cygwin/msys2 pty.
// Cygwin/MSYS2 PTY has a name like:
// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master
//
// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master
func isCygwinPipeName(name string) bool {
token := strings.Split(name, "-")
if len(token) < 5 {

View File

@ -1,4 +1,5 @@
//+build !noasm,!appengine,gc
//go:build !noasm && !appengine && gc
// +build !noasm,!appengine,gc
// Copyright (c) 2020 MinIO Inc. All rights reserved.
// Use of this source code is governed by a license that can be

View File

@ -1,4 +1,5 @@
//+build !noasm,!appengine,gc
//go:build !noasm && !appengine && gc
// +build !noasm,!appengine,gc
// Copyright (c) 2020 MinIO Inc. All rights reserved.
// Use of this source code is governed by a license that can be

View File

@ -1,4 +1,5 @@
//+build !noasm,!appengine,gc
//go:build !noasm && !appengine && gc
// +build !noasm,!appengine,gc
// Copyright (c) 2020 MinIO Inc. All rights reserved.
// Use of this source code is governed by a license that can be

View File

@ -1,4 +1,5 @@
//+build !amd64 appengine !gc noasm
//go:build !amd64 || appengine || !gc || noasm
// +build !amd64 appengine !gc noasm
// Copyright (c) 2020 MinIO Inc. All rights reserved.
// Use of this source code is governed by a license that can be

View File

@ -1,4 +1,5 @@
//+build !noasm,!appengine,gc
//go:build !noasm && !appengine && gc
// +build !noasm,!appengine,gc
// Copyright (c) 2020 MinIO Inc. All rights reserved.
// Use of this source code is governed by a license that can be

View File

@ -1,11 +1,11 @@
// Code generated by command: go run gen.go -out ../md5block_amd64.s -stubs ../md5block_amd64.go -pkg=md5simd. DO NOT EDIT.
// +build !appengine
// +build !noasm
// +build gc
//go:build !appengine && !noasm && gc
// +build !appengine,!noasm,gc
package md5simd
// Encode p to digest
//
//go:noescape
func blockScalar(dig *[4]uint32, p []byte)

View File

@ -9,84 +9,84 @@
//
// The simplest function to start with is Decode.
//
// Field Tags
// # Field Tags
//
// When decoding to a struct, mapstructure will use the field name by
// default to perform the mapping. For example, if a struct has a field
// "Username" then mapstructure will look for a key in the source value
// of "username" (case insensitive).
//
// type User struct {
// Username string
// }
// type User struct {
// Username string
// }
//
// You can change the behavior of mapstructure by using struct tags.
// The default struct tag that mapstructure looks for is "mapstructure"
// but you can customize it using DecoderConfig.
//
// Renaming Fields
// # Renaming Fields
//
// To rename the key that mapstructure looks for, use the "mapstructure"
// tag and set a value directly. For example, to change the "username" example
// above to "user":
//
// type User struct {
// Username string `mapstructure:"user"`
// }
// type User struct {
// Username string `mapstructure:"user"`
// }
//
// Embedded Structs and Squashing
// # Embedded Structs and Squashing
//
// Embedded structs are treated as if they're another field with that name.
// By default, the two structs below are equivalent when decoding with
// mapstructure:
//
// type Person struct {
// Name string
// }
// type Person struct {
// Name string
// }
//
// type Friend struct {
// Person
// }
// type Friend struct {
// Person
// }
//
// type Friend struct {
// Person Person
// }
// type Friend struct {
// Person Person
// }
//
// This would require an input that looks like below:
//
// map[string]interface{}{
// "person": map[string]interface{}{"name": "alice"},
// }
// map[string]interface{}{
// "person": map[string]interface{}{"name": "alice"},
// }
//
// If your "person" value is NOT nested, then you can append ",squash" to
// your tag value and mapstructure will treat it as if the embedded struct
// were part of the struct directly. Example:
//
// type Friend struct {
// Person `mapstructure:",squash"`
// }
// type Friend struct {
// Person `mapstructure:",squash"`
// }
//
// Now the following input would be accepted:
//
// map[string]interface{}{
// "name": "alice",
// }
// map[string]interface{}{
// "name": "alice",
// }
//
// When decoding from a struct to a map, the squash tag squashes the struct
// fields into a single map. Using the example structs from above:
//
// Friend{Person: Person{Name: "alice"}}
// Friend{Person: Person{Name: "alice"}}
//
// Will be decoded into a map:
//
// map[string]interface{}{
// "name": "alice",
// }
// map[string]interface{}{
// "name": "alice",
// }
//
// DecoderConfig has a field that changes the behavior of mapstructure
// to always squash embedded structs.
//
// Remainder Values
// # Remainder Values
//
// If there are any unmapped keys in the source value, mapstructure by
// default will silently ignore them. You can error by setting ErrorUnused
@ -98,20 +98,20 @@
// probably be a "map[string]interface{}" or "map[interface{}]interface{}".
// See example below:
//
// type Friend struct {
// Name string
// Other map[string]interface{} `mapstructure:",remain"`
// }
// type Friend struct {
// Name string
// Other map[string]interface{} `mapstructure:",remain"`
// }
//
// Given the input below, Other would be populated with the other
// values that weren't used (everything but "name"):
//
// map[string]interface{}{
// "name": "bob",
// "address": "123 Maple St.",
// }
// map[string]interface{}{
// "name": "bob",
// "address": "123 Maple St.",
// }
//
// Omit Empty Values
// # Omit Empty Values
//
// When decoding from a struct to any other value, you may use the
// ",omitempty" suffix on your tag to omit that value if it equates to
@ -122,37 +122,37 @@
// field value is zero and a numeric type, the field is empty, and it won't
// be encoded into the destination type.
//
// type Source struct {
// Age int `mapstructure:",omitempty"`
// }
// type Source struct {
// Age int `mapstructure:",omitempty"`
// }
//
// Unexported fields
// # Unexported fields
//
// Since unexported (private) struct fields cannot be set outside the package
// where they are defined, the decoder will simply skip them.
//
// For this output type definition:
//
// type Exported struct {
// private string // this unexported field will be skipped
// Public string
// }
// type Exported struct {
// private string // this unexported field will be skipped
// Public string
// }
//
// Using this map as input:
//
// map[string]interface{}{
// "private": "I will be ignored",
// "Public": "I made it through!",
// }
// map[string]interface{}{
// "private": "I will be ignored",
// "Public": "I made it through!",
// }
//
// The following struct will be decoded:
//
// type Exported struct {
// private: "" // field is left with an empty string (zero value)
// Public: "I made it through!"
// }
// type Exported struct {
// private: "" // field is left with an empty string (zero value)
// Public: "I made it through!"
// }
//
// Other Configuration
// # Other Configuration
//
// mapstructure is highly configurable. See the DecoderConfig struct
// for other features and options that are supported.

View File

@ -199,12 +199,15 @@ func (m *SequenceMatcher) isBJunk(s string) bool {
// If IsJunk is not defined:
//
// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
// alo <= i <= i+k <= ahi
// blo <= j <= j+k <= bhi
//
// alo <= i <= i+k <= ahi
// blo <= j <= j+k <= bhi
//
// and for all (i',j',k') meeting those conditions,
// k >= k'
// i <= i'
// and if i == i', j <= j'
//
// k >= k'
// i <= i'
// and if i == i', j <= j'
//
// In other words, of all maximal matching blocks, return one that
// starts earliest in a, and of all those maximal matching blocks that

View File

@ -1,3 +1,4 @@
//go:build darwin
// +build darwin
package xid

View File

@ -1,3 +1,4 @@
//go:build !darwin && !linux && !freebsd && !windows
// +build !darwin,!linux,!freebsd,!windows
package xid

View File

@ -1,3 +1,4 @@
//go:build freebsd
// +build freebsd
package xid

View File

@ -1,3 +1,4 @@
//go:build linux
// +build linux
package xid

View File

@ -1,3 +1,4 @@
//go:build windows
// +build windows
package xid

2
vendor/github.com/rs/xid/id.go generated vendored
View File

@ -43,8 +43,8 @@ package xid
import (
"bytes"
"crypto/sha256"
"crypto/rand"
"crypto/sha256"
"database/sql/driver"
"encoding/binary"
"fmt"

View File

@ -16,7 +16,7 @@
// If you're interested in calling Blackfriday from command line, see
// https://github.com/russross/blackfriday-tool.
//
// Sanitized Anchor Names
// # Sanitized Anchor Names
//
// Blackfriday includes an algorithm for creating sanitized anchor names
// corresponding to a given input text. This algorithm is used to create

View File

@ -735,7 +735,9 @@ func linkEndsWithEntity(data []byte, linkEnd int) bool {
}
// hasPrefixCaseInsensitive is a custom implementation of
// strings.HasPrefix(strings.ToLower(s), prefix)
//
// strings.HasPrefix(strings.ToLower(s), prefix)
//
// we rolled our own because ToLower pulls in a huge machinery of lowercasing
// anything from Unicode and that's very slow. Since this func will only be
// used on ASCII protocol prefixes, we can take shortcuts.

View File

@ -345,8 +345,8 @@ func WithNoExtensions() Option {
// In Markdown, the link reference syntax can be made to resolve a link to
// a reference instead of an inline URL, in one of the following ways:
//
// * [link text][refid]
// * [refid][]
// - [link text][refid]
// - [refid][]
//
// Usually, the refid is defined at the bottom of the Markdown document. If
// this override function is provided, the refid is passed to the override
@ -363,7 +363,9 @@ func WithRefOverride(o ReferenceOverrideFunc) Option {
// block of markdown-encoded text.
//
// The simplest invocation of Run takes one argument, input:
// output := Run(input)
//
// output := Run(input)
//
// This will parse the input with CommonExtensions enabled and render it with
// the default HTMLRenderer (with CommonHTMLFlags).
//
@ -371,13 +373,15 @@ func WithRefOverride(o ReferenceOverrideFunc) Option {
// type does not contain exported fields, you can not use it directly. Instead,
// use the With* functions. For example, this will call the most basic
// functionality, with no extensions:
// output := Run(input, WithNoExtensions())
//
// output := Run(input, WithNoExtensions())
//
// You can use any number of With* arguments, even contradicting ones. They
// will be applied in order of appearance and the latter will override the
// former:
// output := Run(input, WithNoExtensions(), WithExtensions(exts),
// WithRenderer(yourRenderer))
//
// output := Run(input, WithNoExtensions(), WithExtensions(exts),
// WithRenderer(yourRenderer))
func Run(input []byte, opts ...Option) []byte {
r := NewHTMLRenderer(HTMLRendererParameters{
Flags: CommonHTMLFlags,
@ -491,35 +495,35 @@ func (p *Markdown) parseRefsToAST() {
//
// Consider this markdown with reference-style links:
//
// [link][ref]
// [link][ref]
//
// [ref]: /url/ "tooltip title"
// [ref]: /url/ "tooltip title"
//
// It will be ultimately converted to this HTML:
//
// <p><a href=\"/url/\" title=\"title\">link</a></p>
// <p><a href=\"/url/\" title=\"title\">link</a></p>
//
// And a reference structure will be populated as follows:
//
// p.refs["ref"] = &reference{
// link: "/url/",
// title: "tooltip title",
// }
// p.refs["ref"] = &reference{
// link: "/url/",
// title: "tooltip title",
// }
//
// Alternatively, reference can contain information about a footnote. Consider
// this markdown:
//
// Text needing a footnote.[^a]
// Text needing a footnote.[^a]
//
// [^a]: This is the note
// [^a]: This is the note
//
// A reference structure will be populated as follows:
//
// p.refs["a"] = &reference{
// link: "a",
// title: "This is the note",
// noteID: <some positive int>,
// }
// p.refs["a"] = &reference{
// link: "a",
// title: "This is the note",
// noteID: <some positive int>,
// }
//
// TODO: As you can see, it begs for splitting into two dedicated structures
// for refs and for footnotes.

View File

@ -1,4 +1,4 @@
//go:build !darwin && !linux && !freebsd && !openbsd && !netbsd && !windows && !solaris && !aix
//go:build !darwin && !linux && !freebsd && !openbsd && !netbsd && !windows && !solaris && !aix
// +build !darwin,!linux,!freebsd,!openbsd,!netbsd,!windows,!solaris,!aix
package disk

View File

@ -96,7 +96,7 @@ func ParseNetstat(output string, mode string,
n.PacketsSent = parsed[2]
n.Dropout = parsed[3]
case "ine":
n.Errin = parsed[0]
n.Errin = parsed[0]
n.Errout = parsed[1]
}

View File

@ -65,9 +65,9 @@ func ExecuteFunc(template, startTag, endTag string, w io.Writer, f TagFunc) (int
// values from the map m and writes the result to the given writer w.
//
// Substitution map m may contain values with the following types:
// * []byte - the fastest value type
// * string - convenient value type
// * TagFunc - flexible value type
// - []byte - the fastest value type
// - string - convenient value type
// - TagFunc - flexible value type
//
// Returns the number of bytes written to w.
//
@ -81,9 +81,9 @@ func Execute(template, startTag, endTag string, w io.Writer, m map[string]interf
// This can be used as a drop-in replacement for strings.Replacer
//
// Substitution map m may contain values with the following types:
// * []byte - the fastest value type
// * string - convenient value type
// * TagFunc - flexible value type
// - []byte - the fastest value type
// - string - convenient value type
// - TagFunc - flexible value type
//
// Returns the number of bytes written to w.
//
@ -134,9 +134,9 @@ var byteBufferPool bytebufferpool.Pool
// values from the map m and returns the result.
//
// Substitution map m may contain values with the following types:
// * []byte - the fastest value type
// * string - convenient value type
// * TagFunc - flexible value type
// - []byte - the fastest value type
// - string - convenient value type
// - TagFunc - flexible value type
//
// This function is optimized for constantly changing templates.
// Use Template.ExecuteString for frozen templates.
@ -148,9 +148,9 @@ func ExecuteString(template, startTag, endTag string, m map[string]interface{})
// This can be used as a drop-in replacement for strings.Replacer
//
// Substitution map m may contain values with the following types:
// * []byte - the fastest value type
// * string - convenient value type
// * TagFunc - flexible value type
// - []byte - the fastest value type
// - string - convenient value type
// - TagFunc - flexible value type
//
// This function is optimized for constantly changing templates.
// Use Template.ExecuteStringStd for frozen templates.
@ -304,9 +304,9 @@ func (t *Template) ExecuteFunc(w io.Writer, f TagFunc) (int64, error) {
// values from the map m and writes the result to the given writer w.
//
// Substitution map m may contain values with the following types:
// * []byte - the fastest value type
// * string - convenient value type
// * TagFunc - flexible value type
// - []byte - the fastest value type
// - string - convenient value type
// - TagFunc - flexible value type
//
// Returns the number of bytes written to w.
func (t *Template) Execute(w io.Writer, m map[string]interface{}) (int64, error) {
@ -317,9 +317,9 @@ func (t *Template) Execute(w io.Writer, m map[string]interface{}) (int64, error)
// This can be used as a drop-in replacement for strings.Replacer
//
// Substitution map m may contain values with the following types:
// * []byte - the fastest value type
// * string - convenient value type
// * TagFunc - flexible value type
// - []byte - the fastest value type
// - string - convenient value type
// - TagFunc - flexible value type
//
// Returns the number of bytes written to w.
func (t *Template) ExecuteStd(w io.Writer, m map[string]interface{}) (int64, error) {
@ -365,9 +365,9 @@ func (t *Template) ExecuteFuncStringWithErr(f TagFunc) (string, error) {
// values from the map m and returns the result.
//
// Substitution map m may contain values with the following types:
// * []byte - the fastest value type
// * string - convenient value type
// * TagFunc - flexible value type
// - []byte - the fastest value type
// - string - convenient value type
// - TagFunc - flexible value type
//
// This function is optimized for frozen templates.
// Use ExecuteString for constantly changing templates.
@ -379,9 +379,9 @@ func (t *Template) ExecuteString(m map[string]interface{}) string {
// This can be used as a drop-in replacement for strings.Replacer
//
// Substitution map m may contain values with the following types:
// * []byte - the fastest value type
// * string - convenient value type
// * TagFunc - flexible value type
// - []byte - the fastest value type
// - string - convenient value type
// - TagFunc - flexible value type
//
// This function is optimized for frozen templates.
// Use ExecuteStringStd for constantly changing templates.

Some files were not shown because too many files have changed in this diff Show More