Add Range support for HTTP GET requests on a mounted filesystem
This commit is contained in:
parent
8e29cbdd12
commit
b76e6a9b2c
@ -1,9 +1,16 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/http/api"
|
||||
"github.com/datarhei/core/v16/http/fs"
|
||||
@ -67,12 +74,56 @@ func (h *FSHandler) GetFile(c echo.Context) error {
|
||||
}
|
||||
|
||||
c.Response().Header().Set(echo.HeaderContentType, mimeType)
|
||||
c.Response().Header().Set("Accept-Ranges", "bytes")
|
||||
|
||||
if c.Request().Method == "HEAD" {
|
||||
c.Response().Header().Set(echo.HeaderContentLength, strconv.FormatInt(stat.Size(), 10))
|
||||
return c.Blob(http.StatusOK, "application/data", nil)
|
||||
}
|
||||
|
||||
return c.Stream(http.StatusOK, "application/data", file)
|
||||
var streamFile io.Reader = file
|
||||
status := http.StatusOK
|
||||
|
||||
ifRange := c.Request().Header.Get("If-Range")
|
||||
if len(ifRange) != 0 {
|
||||
ifTime, err := time.Parse("Mon, 02 Jan 2006 15:04:05 MST", ifRange)
|
||||
if err != nil {
|
||||
return api.Err(http.StatusBadRequest, "", "%s", err)
|
||||
}
|
||||
|
||||
if ifTime.Unix() != stat.ModTime().Unix() {
|
||||
c.Request().Header.Del("Range")
|
||||
}
|
||||
}
|
||||
|
||||
byteRange := c.Request().Header.Get("Range")
|
||||
if len(byteRange) != 0 {
|
||||
ranges, err := parseRange(byteRange, stat.Size())
|
||||
if err != nil {
|
||||
return api.Err(http.StatusRequestedRangeNotSatisfiable, "")
|
||||
}
|
||||
|
||||
if len(ranges) > 1 {
|
||||
return api.Err(http.StatusNotImplemented, "", "multipart range requests are not supported")
|
||||
}
|
||||
|
||||
if len(ranges) == 1 {
|
||||
_, err := file.Seek(ranges[0].start, io.SeekStart)
|
||||
if err != nil {
|
||||
return api.Err(http.StatusRequestedRangeNotSatisfiable, "")
|
||||
}
|
||||
|
||||
c.Response().Header().Set("Content-Range", ranges[0].contentRange(stat.Size()))
|
||||
streamFile = &limitReader{
|
||||
r: streamFile,
|
||||
size: int(ranges[0].length),
|
||||
}
|
||||
|
||||
status = http.StatusPartialContent
|
||||
}
|
||||
}
|
||||
|
||||
return c.Stream(status, "application/data", streamFile)
|
||||
}
|
||||
|
||||
func (h *FSHandler) PutFile(c echo.Context) error {
|
||||
@ -162,3 +213,129 @@ func (h *FSHandler) ListFiles(c echo.Context) error {
|
||||
|
||||
return c.JSON(http.StatusOK, fileinfos)
|
||||
}
|
||||
|
||||
type limitReader struct {
|
||||
r io.Reader
|
||||
size int
|
||||
}
|
||||
|
||||
func (l *limitReader) Read(p []byte) (int, error) {
|
||||
if l.size == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
len := len(p)
|
||||
|
||||
if len > l.size {
|
||||
p = p[:l.size]
|
||||
}
|
||||
|
||||
i, err := l.r.Read(p)
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
|
||||
l.size -= i
|
||||
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// From: github.com/golang/go/net/http/fs.go@7dc9fcb
|
||||
|
||||
// errNoOverlap is returned by serveContent's parseRange if first-byte-pos of
|
||||
// all of the byte-range-spec values is greater than the content size.
|
||||
var errNoOverlap = errors.New("invalid range: failed to overlap")
|
||||
|
||||
// httpRange specifies the byte range to be sent to the client.
|
||||
type httpRange struct {
|
||||
start, length int64
|
||||
}
|
||||
|
||||
func (r httpRange) contentRange(size int64) string {
|
||||
return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size)
|
||||
}
|
||||
|
||||
/*
|
||||
func (r httpRange) mimeHeader(contentType string, size int64) textproto.MIMEHeader {
|
||||
return textproto.MIMEHeader{
|
||||
"Content-Range": {r.contentRange(size)},
|
||||
"Content-Type": {contentType},
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
// parseRange parses a Range header string as per RFC 7233.
|
||||
// errNoOverlap is returned if none of the ranges overlap.
|
||||
func parseRange(s string, size int64) ([]httpRange, error) {
|
||||
if s == "" {
|
||||
return nil, nil // header not present
|
||||
}
|
||||
const b = "bytes="
|
||||
if !strings.HasPrefix(s, b) {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
var ranges []httpRange
|
||||
noOverlap := false
|
||||
for _, ra := range strings.Split(s[len(b):], ",") {
|
||||
ra = textproto.TrimString(ra)
|
||||
if ra == "" {
|
||||
continue
|
||||
}
|
||||
start, end, ok := strings.Cut(ra, "-")
|
||||
if !ok {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
start, end = textproto.TrimString(start), textproto.TrimString(end)
|
||||
var r httpRange
|
||||
if start == "" {
|
||||
// If no start is specified, end specifies the
|
||||
// range start relative to the end of the file,
|
||||
// and we are dealing with <suffix-length>
|
||||
// which has to be a non-negative integer as per
|
||||
// RFC 7233 Section 2.1 "Byte-Ranges".
|
||||
if end == "" || end[0] == '-' {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
i, err := strconv.ParseInt(end, 10, 64)
|
||||
if i < 0 || err != nil {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
if i > size {
|
||||
i = size
|
||||
}
|
||||
r.start = size - i
|
||||
r.length = size - r.start
|
||||
} else {
|
||||
i, err := strconv.ParseInt(start, 10, 64)
|
||||
if err != nil || i < 0 {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
if i >= size {
|
||||
// If the range begins after the size of the content,
|
||||
// then it does not overlap.
|
||||
noOverlap = true
|
||||
continue
|
||||
}
|
||||
r.start = i
|
||||
if end == "" {
|
||||
// If no end is specified, range extends to end of the file.
|
||||
r.length = size - r.start
|
||||
} else {
|
||||
i, err := strconv.ParseInt(end, 10, 64)
|
||||
if err != nil || r.start > i {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
if i >= size {
|
||||
i = size - 1
|
||||
}
|
||||
r.length = i - r.start + 1
|
||||
}
|
||||
}
|
||||
ranges = append(ranges, r)
|
||||
}
|
||||
if noOverlap && len(ranges) == 0 {
|
||||
// The specified ranges did not overlap with the content.
|
||||
return nil, errNoOverlap
|
||||
}
|
||||
return ranges, nil
|
||||
}
|
||||
|
||||
5
http/middleware/cache/cache.go
vendored
5
http/middleware/cache/cache.go
vendored
@ -67,6 +67,11 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
|
||||
return next(c)
|
||||
}
|
||||
|
||||
if len(req.Header.Get("Range")) != 0 {
|
||||
res.Header().Set("X-Cache", "SKIP RANGEREQ")
|
||||
return next(c)
|
||||
}
|
||||
|
||||
if obj, expireIn, _ := config.Cache.Get(key); obj == nil {
|
||||
// cache miss
|
||||
writer := res.Writer
|
||||
|
||||
@ -121,6 +121,10 @@ func (f *diskFile) Read(p []byte) (int, error) {
|
||||
return f.file.Read(p)
|
||||
}
|
||||
|
||||
func (f *diskFile) Seek(offset int64, whence int) (int64, error) {
|
||||
return f.file.Seek(offset, whence)
|
||||
}
|
||||
|
||||
// diskFilesystem implements the Filesystem interface
|
||||
type diskFilesystem struct {
|
||||
metadata map[string]string
|
||||
|
||||
@ -31,7 +31,7 @@ type FileInfo interface {
|
||||
|
||||
// File provides access to a single file.
|
||||
type File interface {
|
||||
io.ReadCloser
|
||||
io.ReadSeekCloser
|
||||
|
||||
// Name returns the Name of the file.
|
||||
Name() string
|
||||
|
||||
33
io/fs/mem.go
33
io/fs/mem.go
@ -64,11 +64,16 @@ func (f *memFileInfo) IsDir() bool {
|
||||
return f.dir
|
||||
}
|
||||
|
||||
type memFile struct {
|
||||
type internalMemFile struct {
|
||||
memFileInfo
|
||||
data *bytes.Buffer // Contents of the file
|
||||
}
|
||||
|
||||
type memFile struct {
|
||||
memFileInfo
|
||||
data *bytes.Reader
|
||||
}
|
||||
|
||||
func (f *memFile) Name() string {
|
||||
return f.name
|
||||
}
|
||||
@ -93,6 +98,14 @@ func (f *memFile) Read(p []byte) (int, error) {
|
||||
return f.data.Read(p)
|
||||
}
|
||||
|
||||
func (f *memFile) Seek(offset int64, whence int) (int64, error) {
|
||||
if f.data == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
return f.data.Seek(offset, whence)
|
||||
}
|
||||
|
||||
func (f *memFile) Close() error {
|
||||
if f.data == nil {
|
||||
return io.EOF
|
||||
@ -108,7 +121,7 @@ type memFilesystem struct {
|
||||
metaLock sync.RWMutex
|
||||
|
||||
// Mapping of path to file
|
||||
files map[string]*memFile
|
||||
files map[string]*internalMemFile
|
||||
|
||||
// Mutex for the files map
|
||||
filesLock sync.RWMutex
|
||||
@ -137,7 +150,7 @@ func NewMemFilesystem(config MemConfig) (Filesystem, error) {
|
||||
|
||||
fs.logger = fs.logger.WithField("type", "mem")
|
||||
|
||||
fs.files = make(map[string]*memFile)
|
||||
fs.files = make(map[string]*internalMemFile)
|
||||
|
||||
fs.dataPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
@ -270,7 +283,7 @@ func (fs *memFilesystem) Open(path string) File {
|
||||
|
||||
if file.data != nil {
|
||||
newFile.lastMod = file.lastMod
|
||||
newFile.data = bytes.NewBuffer(file.data.Bytes())
|
||||
newFile.data = bytes.NewReader(file.data.Bytes())
|
||||
newFile.size = int64(newFile.data.Len())
|
||||
}
|
||||
|
||||
@ -323,7 +336,7 @@ func (fs *memFilesystem) Symlink(oldname, newname string) error {
|
||||
}
|
||||
}
|
||||
|
||||
newFile := &memFile{
|
||||
newFile := &internalMemFile{
|
||||
memFileInfo: memFileInfo{
|
||||
name: newname,
|
||||
dir: false,
|
||||
@ -346,7 +359,7 @@ func (fs *memFilesystem) WriteFileReader(path string, r io.Reader) (int64, bool,
|
||||
return -1, false, fmt.Errorf("path not writeable")
|
||||
}
|
||||
|
||||
newFile := &memFile{
|
||||
newFile := &internalMemFile{
|
||||
memFileInfo: memFileInfo{
|
||||
name: path,
|
||||
dir: false,
|
||||
@ -414,7 +427,7 @@ func (fs *memFilesystem) Purge(size int64) int64 {
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
|
||||
files := []*memFile{}
|
||||
files := []*internalMemFile{}
|
||||
|
||||
for _, f := range fs.files {
|
||||
files = append(files, f)
|
||||
@ -466,7 +479,7 @@ func (fs *memFilesystem) MkdirAll(path string, perm os.FileMode) error {
|
||||
return os.ErrExist
|
||||
}
|
||||
|
||||
f := &memFile{
|
||||
f := &internalMemFile{
|
||||
memFileInfo: memFileInfo{
|
||||
name: path,
|
||||
size: 0,
|
||||
@ -539,7 +552,7 @@ func (fs *memFilesystem) Copy(src, dst string) error {
|
||||
if ok {
|
||||
fs.currentSize -= dstFile.size
|
||||
} else {
|
||||
dstFile = &memFile{
|
||||
dstFile = &internalMemFile{
|
||||
memFileInfo: memFileInfo{
|
||||
name: dst,
|
||||
dir: false,
|
||||
@ -664,7 +677,7 @@ func (fs *memFilesystem) RemoveAll() int64 {
|
||||
|
||||
size := fs.currentSize
|
||||
|
||||
fs.files = make(map[string]*memFile)
|
||||
fs.files = make(map[string]*internalMemFile)
|
||||
fs.currentSize = 0
|
||||
|
||||
return size
|
||||
|
||||
22
io/fs/s3.go
22
io/fs/s3.go
@ -622,20 +622,12 @@ func (f *s3FileInfo) IsDir() bool {
|
||||
}
|
||||
|
||||
type s3File struct {
|
||||
data io.ReadCloser
|
||||
data io.ReadSeekCloser
|
||||
name string
|
||||
size int64
|
||||
lastModified time.Time
|
||||
}
|
||||
|
||||
func (f *s3File) Read(p []byte) (int, error) {
|
||||
return f.data.Read(p)
|
||||
}
|
||||
|
||||
func (f *s3File) Close() error {
|
||||
return f.data.Close()
|
||||
}
|
||||
|
||||
func (f *s3File) Name() string {
|
||||
return f.name
|
||||
}
|
||||
@ -647,3 +639,15 @@ func (f *s3File) Stat() (FileInfo, error) {
|
||||
lastModified: f.lastModified,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *s3File) Read(p []byte) (int, error) {
|
||||
return f.data.Read(p)
|
||||
}
|
||||
|
||||
func (f *s3File) Seek(offset int64, whence int) (int64, error) {
|
||||
return f.data.Seek(offset, whence)
|
||||
}
|
||||
|
||||
func (f *s3File) Close() error {
|
||||
return f.data.Close()
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user