Add S3 config options

This commit is contained in:
Ingo Oppermann 2022-08-20 08:45:07 +03:00
parent 1183de560a
commit baa9a80015
No known key found for this signature in database
GPG Key ID: 2AB32426E9DD229E
5 changed files with 85 additions and 42 deletions

View File

@ -410,31 +410,39 @@ func (a *api) start() error {
a.memfs.Resize(cfg.Storage.Memory.Size * 1024 * 1024)
}
baseS3FS := url.URL{
Scheme: "http",
Path: "/s3",
}
if cfg.Storage.S3.Enable {
baseS3FS := url.URL{
Scheme: "http",
Path: "/s3",
}
host, port, _ = gonet.SplitHostPort(cfg.Address)
if len(host) == 0 {
baseS3FS.Host = "localhost:" + port
} else {
baseS3FS.Host = cfg.Address
}
host, port, _ := gonet.SplitHostPort(cfg.Address)
if len(host) == 0 {
baseS3FS.Host = "localhost:" + port
} else {
baseS3FS.Host = cfg.Address
}
if cfg.Storage.Memory.Auth.Enable {
baseS3FS.User = url.UserPassword(cfg.Storage.Memory.Auth.Username, cfg.Storage.Memory.Auth.Password)
}
if cfg.Storage.Memory.Auth.Enable {
baseS3FS.User = url.UserPassword(cfg.Storage.Memory.Auth.Username, cfg.Storage.Memory.Auth.Password)
}
s3fs, err := fs.NewS3Filesystem(fs.S3Config{
Base: baseS3FS.String(),
Logger: a.log.logger.core.WithComponent("S3FS"),
})
if err != nil {
return err
}
s3fs, err := fs.NewS3Filesystem(fs.S3Config{
Base: baseS3FS.String(),
Endpoint: cfg.Storage.S3.Endpoint,
AccessKeyID: cfg.Storage.S3.AccessKeyID,
SecretAccessKey: cfg.Storage.S3.SecretAccessKey,
Region: cfg.Storage.S3.Region,
Bucket: cfg.Storage.S3.Bucket,
UseSSL: cfg.Storage.S3.UseSSL,
Logger: a.log.logger.core.WithComponent("S3"),
})
if err != nil {
return err
}
a.s3fs = s3fs
a.s3fs = s3fs
}
var portrange net.Portranger
@ -580,7 +588,9 @@ func (a *api) start() error {
metrics.Register(monitor.NewDiskCollector(a.diskfs.Base()))
metrics.Register(monitor.NewFilesystemCollector("diskfs", diskfs))
metrics.Register(monitor.NewFilesystemCollector("memfs", a.memfs))
metrics.Register(monitor.NewFilesystemCollector("s3fs", a.s3fs))
if a.s3fs != nil {
metrics.Register(monitor.NewFilesystemCollector("s3fs", a.s3fs))
}
metrics.Register(monitor.NewRestreamCollector(a.restream))
metrics.Register(monitor.NewFFmpegCollector(a.ffmpeg))
metrics.Register(monitor.NewSessionCollector(a.sessions, []string{}))
@ -834,9 +844,9 @@ func (a *api) start() error {
Filesystem: a.memfs,
},
S3FS: http.MemFSConfig{
EnableAuth: cfg.Storage.Memory.Auth.Enable,
Username: cfg.Storage.Memory.Auth.Username,
Password: cfg.Storage.Memory.Auth.Password,
EnableAuth: cfg.Storage.S3.Auth.Enable,
Username: cfg.Storage.S3.Auth.Username,
Password: cfg.Storage.S3.Auth.Password,
Filesystem: a.s3fs,
},
IPLimiter: iplimiter,
@ -902,9 +912,9 @@ func (a *api) start() error {
Filesystem: a.memfs,
},
S3FS: http.MemFSConfig{
EnableAuth: cfg.Storage.Memory.Auth.Enable,
Username: cfg.Storage.Memory.Auth.Username,
Password: cfg.Storage.Memory.Auth.Password,
EnableAuth: cfg.Storage.S3.Auth.Enable,
Username: cfg.Storage.S3.Auth.Username,
Password: cfg.Storage.S3.Auth.Password,
Filesystem: a.s3fs,
},
IPLimiter: iplimiter,

View File

@ -199,6 +199,18 @@ func (d *Config) init() {
d.val(newInt64Value(&d.Storage.Memory.Size, 0), "storage.memory.max_size_mbytes", "CORE_STORAGE_MEMORY_MAXSIZEMBYTES", nil, "Max. allowed megabytes for /memfs, 0 for unlimited", false, false)
d.val(newBoolValue(&d.Storage.Memory.Purge, false), "storage.memory.purge", "CORE_STORAGE_MEMORY_PURGE", nil, "Automatically remove the oldest files if /memfs is full", false, false)
// Storage (S3)
d.val(newBoolValue(&d.Storage.S3.Enable, true), "storage.s3.enable", "CORE_STORAGE_S3_ENABLE", nil, "Enable S3 storage", false, false)
d.val(newBoolValue(&d.Storage.S3.Auth.Enable, true), "storage.s3.auth.enable", "CORE_STORAGE_S3_AUTH_ENABLE", nil, "Enable basic auth for PUT,POST, and DELETE on /s3", false, false)
d.val(newStringValue(&d.Storage.S3.Auth.Username, "admin"), "storage.s3.auth.username", "CORE_STORAGE_S3_AUTH_USERNAME", nil, "Username for Basic-Auth of /s3", false, false)
d.val(newStringValue(&d.Storage.S3.Auth.Password, rand.StringAlphanumeric(18)), "storage.s3.auth.password", "CORE_STORAGE_S3_AUTH_PASSWORD", nil, "Password for Basic-Auth of /s3", false, true)
d.val(newStringValue(&d.Storage.S3.Endpoint, ""), "storage.s3.endpoint", "CORE_STORAGE_S3_ENDPOINT", nil, "S3 host", false, false)
d.val(newStringValue(&d.Storage.S3.AccessKeyID, ""), "storage.s3.acces_key_id", "CORE_STORAGE_S3_ACCESS_KEY_ID", nil, "S3 access key ID", false, false)
d.val(newStringValue(&d.Storage.S3.SecretAccessKey, ""), "storage.s3.secret_access_key", "CORE_STORAGE_S3_SECRET_ACCESS_KEY", nil, "S3 secret access key", false, true)
d.val(newStringValue(&d.Storage.S3.Bucket, ""), "storage.s3.bucket", "CORE_STORAGE_S3_BUCKET", nil, "Bucket name, will be created if it doesn't exists", false, false)
d.val(newStringValue(&d.Storage.S3.Region, ""), "storage.s3.region", "CORE_STORAGE_S3_REGION", nil, "S3 region", false, false)
d.val(newBoolValue(&d.Storage.S3.UseSSL, true), "storage.s3.use_ssl", "CORE_STORAGE_S3_USE_SSL", nil, "Enable SSL for communication (recommended)", false, false)
// Storage (CORS)
d.val(newCORSOriginsValue(&d.Storage.CORS.Origins, []string{"*"}, ","), "storage.cors.origins", "CORE_STORAGE_CORS_ORIGINS", nil, "Allowed CORS origins for /memfs and /data", false, false)

View File

@ -81,6 +81,20 @@ type Data struct {
Size int64 `json:"max_size_mbytes"`
Purge bool `json:"purge"`
} `json:"memory"`
S3 struct {
Enable bool `json:"enable"`
Auth struct {
Enable bool `json:"enable"`
Username string `json:"username"`
Password string `json:"password"`
} `json:"auth"`
Endpoint string `json:"endpoint"`
AccessKeyID string `json:"access_key_id"`
SecretAccessKey string `json:"secret_access_key"`
Bucket string `json:"bucket"`
Region string `json:"region"`
UseSSL bool `json:"use_ssl"`
} `json:"s3"`
CORS struct {
Origins []string `json:"origins"`
} `json:"cors"`

View File

@ -507,22 +507,22 @@ func (s *server) setRoutes() {
// S3 FS
if s.handler.s3fs != nil {
memfs := s.router.Group("/s3fs/*")
memfs.Use(mwmime.NewWithConfig(mwmime.Config{
s3fs := s.router.Group("/s3/*")
s3fs.Use(mwmime.NewWithConfig(mwmime.Config{
MimeTypesFile: s.mimeTypesFile,
DefaultContentType: "application/data",
}))
memfs.Use(mwgzip.NewWithConfig(mwgzip.Config{
s3fs.Use(mwgzip.NewWithConfig(mwgzip.Config{
Level: mwgzip.BestSpeed,
MinLength: 1000,
ContentTypes: s.gzip.mimetypes,
}))
if s.middleware.session != nil {
memfs.Use(s.middleware.session)
s3fs.Use(s.middleware.session)
}
memfs.HEAD("", s.handler.s3fs.GetFile)
memfs.GET("", s.handler.s3fs.GetFile)
s3fs.HEAD("", s.handler.s3fs.GetFile)
s3fs.GET("", s.handler.s3fs.GetFile)
var authmw echo.MiddlewareFunc
@ -535,13 +535,17 @@ func (s *server) setRoutes() {
return false, nil
})
memfs.POST("", s.handler.s3fs.PutFile, authmw)
memfs.PUT("", s.handler.s3fs.PutFile, authmw)
memfs.DELETE("", s.handler.s3fs.DeleteFile, authmw)
s3fs.POST("", s.handler.s3fs.PutFile, authmw)
s3fs.PUT("", s.handler.s3fs.PutFile, authmw)
s3fs.DELETE("", s.handler.s3fs.DeleteFile, authmw)
} else {
memfs.POST("", s.handler.s3fs.PutFile)
memfs.PUT("", s.handler.s3fs.PutFile)
memfs.DELETE("", s.handler.s3fs.DeleteFile)
s3fs.POST("", s.handler.s3fs.PutFile)
s3fs.PUT("", s.handler.s3fs.PutFile)
s3fs.DELETE("", s.handler.s3fs.DeleteFile)
}
if s.middleware.cache != nil {
s3fs.Use(s.middleware.cache)
}
}

View File

@ -71,6 +71,8 @@ func NewS3Filesystem(config S3Config) (Filesystem, error) {
"endpoint": fs.endpoint,
})
fs.logger.Debug().Log("Connected")
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(30*time.Second))
defer cancel()
@ -142,8 +144,9 @@ func (fs *s3fs) Symlink(oldname, newname string) error {
}
func (fs *s3fs) Open(path string) File {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
//ctx, cancel := context.WithCancel(context.Background())
//defer cancel()
ctx := context.Background()
object, err := fs.client.GetObject(ctx, fs.bucket, path, minio.GetObjectOptions{})
if err != nil {