Init V4 community edition (#2265)
* Init V4 community edition * Init V4 community edition
This commit is contained in:
124
pkg/filemanager/manager/archive.go
Normal file
124
pkg/filemanager/manager/archive.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
|
||||
"golang.org/x/tools/container/intsets"
|
||||
)
|
||||
|
||||
func (m *manager) CreateArchive(ctx context.Context, uris []*fs.URI, writer io.Writer, opts ...fs.Option) (int, error) {
|
||||
o := newOption()
|
||||
for _, opt := range opts {
|
||||
opt.Apply(o)
|
||||
}
|
||||
|
||||
failed := 0
|
||||
|
||||
// List all top level files
|
||||
files := make([]fs.File, 0, len(uris))
|
||||
for _, uri := range uris {
|
||||
file, err := m.Get(ctx, uri, dbfs.WithFileEntities(), dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityDownloadFile))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get file %s: %w", uri, err)
|
||||
}
|
||||
|
||||
files = append(files, file)
|
||||
}
|
||||
|
||||
zipWriter := zip.NewWriter(writer)
|
||||
defer zipWriter.Close()
|
||||
|
||||
var compressed int64
|
||||
for _, file := range files {
|
||||
if file.Type() == types.FileTypeFile {
|
||||
if err := m.compressFileToArchive(ctx, "/", file, zipWriter, o.ArchiveCompression, o.DryRun); err != nil {
|
||||
failed++
|
||||
m.l.Warning("Failed to compress file %s: %s, skipping it...", file.Uri(false), err)
|
||||
}
|
||||
|
||||
compressed += file.Size()
|
||||
if o.ProgressFunc != nil {
|
||||
o.ProgressFunc(compressed, file.Size(), 0)
|
||||
}
|
||||
|
||||
if o.MaxArchiveSize > 0 && compressed > o.MaxArchiveSize {
|
||||
return 0, fs.ErrArchiveSrcSizeTooBig
|
||||
}
|
||||
|
||||
} else {
|
||||
if err := m.Walk(ctx, file.Uri(false), intsets.MaxInt, func(f fs.File, level int) error {
|
||||
if f.Type() == types.FileTypeFolder || f.IsSymbolic() {
|
||||
return nil
|
||||
}
|
||||
if err := m.compressFileToArchive(ctx, strings.TrimPrefix(f.Uri(false).Dir(),
|
||||
file.Uri(false).Dir()), f, zipWriter, o.ArchiveCompression, o.DryRun); err != nil {
|
||||
failed++
|
||||
m.l.Warning("Failed to compress file %s: %s, skipping it...", f.Uri(false), err)
|
||||
}
|
||||
|
||||
compressed += f.Size()
|
||||
if o.ProgressFunc != nil {
|
||||
o.ProgressFunc(compressed, f.Size(), 0)
|
||||
}
|
||||
|
||||
if o.MaxArchiveSize > 0 && compressed > o.MaxArchiveSize {
|
||||
return fs.ErrArchiveSrcSizeTooBig
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
m.l.Warning("Failed to walk folder %s: %s, skipping it...", file.Uri(false), err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return failed, nil
|
||||
}
|
||||
|
||||
func (m *manager) compressFileToArchive(ctx context.Context, parent string, file fs.File, zipWriter *zip.Writer,
|
||||
compression bool, dryrun fs.CreateArchiveDryRunFunc) error {
|
||||
es, err := m.GetEntitySource(ctx, file.PrimaryEntityID())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get entity source for file %s: %w", file.Uri(false), err)
|
||||
}
|
||||
|
||||
zipName := filepath.FromSlash(path.Join(parent, file.DisplayName()))
|
||||
if dryrun != nil {
|
||||
dryrun(zipName, es.Entity())
|
||||
return nil
|
||||
}
|
||||
|
||||
m.l.Debug("Compressing %s to archive...", file.Uri(false))
|
||||
header := &zip.FileHeader{
|
||||
Name: zipName,
|
||||
Modified: file.UpdatedAt(),
|
||||
UncompressedSize64: uint64(file.Size()),
|
||||
}
|
||||
|
||||
if !compression {
|
||||
header.Method = zip.Store
|
||||
} else {
|
||||
header.Method = zip.Deflate
|
||||
}
|
||||
|
||||
writer, err := zipWriter.CreateHeader(header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create zip header for %s: %w", file.Uri(false), err)
|
||||
}
|
||||
|
||||
es.Apply(entitysource.WithContext(ctx))
|
||||
_, err = io.Copy(writer, es)
|
||||
return err
|
||||
|
||||
}
|
||||
365
pkg/filemanager/manager/entity.go
Normal file
365
pkg/filemanager/manager/entity.go
Normal file
@@ -0,0 +1,365 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/user"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type EntityManagement interface {
|
||||
// GetEntityUrls gets download urls of given entities, return URLs and the earliest expiry time
|
||||
GetEntityUrls(ctx context.Context, args []GetEntityUrlArgs, opts ...fs.Option) ([]string, *time.Time, error)
|
||||
// GetUrlForRedirectedDirectLink gets redirected direct download link of given direct link
|
||||
GetUrlForRedirectedDirectLink(ctx context.Context, dl *ent.DirectLink, opts ...fs.Option) (string, *time.Time, error)
|
||||
// GetDirectLink gets permanent direct download link of given files
|
||||
GetDirectLink(ctx context.Context, urls ...*fs.URI) ([]DirectLink, error)
|
||||
// GetEntitySource gets source of given entity
|
||||
GetEntitySource(ctx context.Context, entityID int, opts ...fs.Option) (entitysource.EntitySource, error)
|
||||
// Thumbnail gets thumbnail entity of given file
|
||||
Thumbnail(ctx context.Context, uri *fs.URI) (entitysource.EntitySource, error)
|
||||
// SubmitAndAwaitThumbnailTask submits a thumbnail task and waits for result
|
||||
SubmitAndAwaitThumbnailTask(ctx context.Context, uri *fs.URI, ext string, entity fs.Entity) (fs.Entity, error)
|
||||
// SetCurrentVersion sets current version of given file
|
||||
SetCurrentVersion(ctx context.Context, path *fs.URI, version int) error
|
||||
// DeleteVersion deletes a version of given file
|
||||
DeleteVersion(ctx context.Context, path *fs.URI, version int) error
|
||||
// ExtractAndSaveMediaMeta extracts and saves media meta into file metadata of given file.
|
||||
ExtractAndSaveMediaMeta(ctx context.Context, uri *fs.URI, entityID int) error
|
||||
// RecycleEntities recycles a group of entities
|
||||
RecycleEntities(ctx context.Context, force bool, entityIDs ...int) error
|
||||
}
|
||||
|
||||
type DirectLink struct {
|
||||
File fs.File
|
||||
Url string
|
||||
}
|
||||
|
||||
func (m *manager) GetDirectLink(ctx context.Context, urls ...*fs.URI) ([]DirectLink, error) {
|
||||
ae := serializer.NewAggregateError()
|
||||
res := make([]DirectLink, 0, len(urls))
|
||||
useRedirect := m.user.Edges.Group.Settings.RedirectedSource
|
||||
fileClient := m.dep.FileClient()
|
||||
siteUrl := m.settings.SiteURL(ctx)
|
||||
|
||||
for _, url := range urls {
|
||||
file, err := m.fs.Get(
|
||||
ctx, url,
|
||||
dbfs.WithFileEntities(),
|
||||
dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityDownloadFile),
|
||||
)
|
||||
if err != nil {
|
||||
ae.Add(url.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
if file.OwnerID() != m.user.ID {
|
||||
ae.Add(url.String(), fs.ErrOwnerOnly)
|
||||
continue
|
||||
}
|
||||
|
||||
if file.Type() != types.FileTypeFile {
|
||||
ae.Add(url.String(), fs.ErrEntityNotExist)
|
||||
continue
|
||||
}
|
||||
|
||||
target := file.PrimaryEntity()
|
||||
if target == nil {
|
||||
ae.Add(url.String(), fs.ErrEntityNotExist)
|
||||
continue
|
||||
}
|
||||
|
||||
// Hooks for entity download
|
||||
if err := m.fs.ExecuteNavigatorHooks(ctx, fs.HookTypeBeforeDownload, file); err != nil {
|
||||
m.l.Warning("Failed to execute navigator hooks: %s", err)
|
||||
}
|
||||
|
||||
if useRedirect {
|
||||
// Use redirect source
|
||||
link, err := fileClient.CreateDirectLink(ctx, file.ID(), file.Name(), m.user.Edges.Group.SpeedLimit)
|
||||
if err != nil {
|
||||
ae.Add(url.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
linkHashID := hashid.EncodeSourceLinkID(m.hasher, link.ID)
|
||||
res = append(res, DirectLink{
|
||||
File: file,
|
||||
Url: routes.MasterDirectLink(siteUrl, linkHashID, link.Name).String(),
|
||||
})
|
||||
} else {
|
||||
// Use direct source
|
||||
policy, d, err := m.getEntityPolicyDriver(ctx, target, nil)
|
||||
if err != nil {
|
||||
ae.Add(url.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
source := entitysource.NewEntitySource(target, d, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(),
|
||||
m.l, m.config, m.dep.MimeDetector(ctx))
|
||||
sourceUrl, err := source.Url(ctx,
|
||||
entitysource.WithSpeedLimit(int64(m.user.Edges.Group.SpeedLimit)),
|
||||
entitysource.WithDisplayName(file.Name()),
|
||||
)
|
||||
if err != nil {
|
||||
ae.Add(url.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
res = append(res, DirectLink{
|
||||
File: file,
|
||||
Url: sourceUrl.Url,
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return res, ae.Aggregate()
|
||||
}
|
||||
|
||||
func (m *manager) GetUrlForRedirectedDirectLink(ctx context.Context, dl *ent.DirectLink, opts ...fs.Option) (string, *time.Time, error) {
|
||||
o := newOption()
|
||||
for _, opt := range opts {
|
||||
opt.Apply(o)
|
||||
}
|
||||
|
||||
file, err := dl.Edges.FileOrErr()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
owner, err := file.Edges.OwnerOrErr()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
entities, err := file.Edges.EntitiesOrErr()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// File owner must be active
|
||||
if owner.Status != user.StatusActive {
|
||||
return "", nil, fs.ErrDirectLinkInvalid.WithError(fmt.Errorf("file owner is not active"))
|
||||
}
|
||||
|
||||
// Find primary entity
|
||||
target, found := lo.Find(entities, func(entity *ent.Entity) bool {
|
||||
return entity.ID == file.PrimaryEntity
|
||||
})
|
||||
if !found {
|
||||
return "", nil, fs.ErrDirectLinkInvalid.WithError(fmt.Errorf("primary entity not found"))
|
||||
}
|
||||
primaryEntity := fs.NewEntity(target)
|
||||
|
||||
// Generate url
|
||||
var (
|
||||
res string
|
||||
expire *time.Time
|
||||
)
|
||||
|
||||
// Try to read from cache.
|
||||
cacheKey := entityUrlCacheKey(primaryEntity.ID(), int64(dl.Speed), dl.Name, false,
|
||||
m.settings.SiteURL(ctx).String())
|
||||
if cached, ok := m.kv.Get(cacheKey); ok {
|
||||
cachedItem := cached.(EntityUrlCache)
|
||||
res = cachedItem.Url
|
||||
expire = cachedItem.ExpireAt
|
||||
} else {
|
||||
// Cache miss, Generate new url
|
||||
policy, d, err := m.getEntityPolicyDriver(ctx, primaryEntity, nil)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
source := entitysource.NewEntitySource(primaryEntity, d, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(),
|
||||
m.l, m.config, m.dep.MimeDetector(ctx))
|
||||
downloadUrl, err := source.Url(ctx,
|
||||
entitysource.WithExpire(o.Expire),
|
||||
entitysource.WithDownload(false),
|
||||
entitysource.WithSpeedLimit(int64(dl.Speed)),
|
||||
entitysource.WithDisplayName(dl.Name),
|
||||
)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// Save into kv
|
||||
cacheValidDuration := expireTimeToTTL(o.Expire) - m.settings.EntityUrlCacheMargin(ctx)
|
||||
if cacheValidDuration > 0 {
|
||||
m.kv.Set(cacheKey, EntityUrlCache{
|
||||
Url: downloadUrl.Url,
|
||||
ExpireAt: downloadUrl.ExpireAt,
|
||||
}, cacheValidDuration)
|
||||
}
|
||||
|
||||
res = downloadUrl.Url
|
||||
expire = downloadUrl.ExpireAt
|
||||
}
|
||||
|
||||
return res, expire, nil
|
||||
}
|
||||
|
||||
func (m *manager) GetEntityUrls(ctx context.Context, args []GetEntityUrlArgs, opts ...fs.Option) ([]string, *time.Time, error) {
|
||||
o := newOption()
|
||||
for _, opt := range opts {
|
||||
opt.Apply(o)
|
||||
}
|
||||
|
||||
var earliestExpireAt *time.Time
|
||||
res := make([]string, len(args))
|
||||
ae := serializer.NewAggregateError()
|
||||
for i, arg := range args {
|
||||
file, err := m.fs.Get(
|
||||
ctx, arg.URI,
|
||||
dbfs.WithFileEntities(),
|
||||
dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityDownloadFile),
|
||||
)
|
||||
if err != nil {
|
||||
ae.Add(arg.URI.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
if file.Type() != types.FileTypeFile {
|
||||
ae.Add(arg.URI.String(), fs.ErrEntityNotExist)
|
||||
continue
|
||||
}
|
||||
|
||||
var (
|
||||
target fs.Entity
|
||||
found bool
|
||||
)
|
||||
if arg.PreferredEntityID != "" {
|
||||
found, target = fs.FindDesiredEntity(file, arg.PreferredEntityID, m.hasher, nil)
|
||||
if !found {
|
||||
ae.Add(arg.URI.String(), fs.ErrEntityNotExist)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// No preferred entity ID, use the primary version entity
|
||||
target = file.PrimaryEntity()
|
||||
if target == nil {
|
||||
ae.Add(arg.URI.String(), fs.ErrEntityNotExist)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Hooks for entity download
|
||||
if err := m.fs.ExecuteNavigatorHooks(ctx, fs.HookTypeBeforeDownload, file); err != nil {
|
||||
m.l.Warning("Failed to execute navigator hooks: %s", err)
|
||||
}
|
||||
|
||||
// Try to read from cache.
|
||||
cacheKey := entityUrlCacheKey(target.ID(), o.DownloadSpeed, getEntityDisplayName(file, target), o.IsDownload,
|
||||
m.settings.SiteURL(ctx).String())
|
||||
if cached, ok := m.kv.Get(cacheKey); ok && !o.NoCache {
|
||||
cachedItem := cached.(EntityUrlCache)
|
||||
// Find the earliest expiry time
|
||||
if cachedItem.ExpireAt != nil && (earliestExpireAt == nil || cachedItem.ExpireAt.Before(*earliestExpireAt)) {
|
||||
earliestExpireAt = cachedItem.ExpireAt
|
||||
}
|
||||
res[i] = cachedItem.Url
|
||||
continue
|
||||
}
|
||||
|
||||
// Cache miss, Generate new url
|
||||
policy, d, err := m.getEntityPolicyDriver(ctx, target, nil)
|
||||
if err != nil {
|
||||
ae.Add(arg.URI.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
source := entitysource.NewEntitySource(target, d, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(),
|
||||
m.l, m.config, m.dep.MimeDetector(ctx))
|
||||
downloadUrl, err := source.Url(ctx,
|
||||
entitysource.WithExpire(o.Expire),
|
||||
entitysource.WithDownload(o.IsDownload),
|
||||
entitysource.WithSpeedLimit(o.DownloadSpeed),
|
||||
entitysource.WithDisplayName(getEntityDisplayName(file, target)),
|
||||
)
|
||||
if err != nil {
|
||||
ae.Add(arg.URI.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Find the earliest expiry time
|
||||
if downloadUrl.ExpireAt != nil && (earliestExpireAt == nil || downloadUrl.ExpireAt.Before(*earliestExpireAt)) {
|
||||
earliestExpireAt = downloadUrl.ExpireAt
|
||||
}
|
||||
|
||||
// Save into kv
|
||||
cacheValidDuration := expireTimeToTTL(o.Expire) - m.settings.EntityUrlCacheMargin(ctx)
|
||||
if cacheValidDuration > 0 {
|
||||
m.kv.Set(cacheKey, EntityUrlCache{
|
||||
Url: downloadUrl.Url,
|
||||
ExpireAt: downloadUrl.ExpireAt,
|
||||
}, cacheValidDuration)
|
||||
}
|
||||
|
||||
res[i] = downloadUrl.Url
|
||||
}
|
||||
|
||||
return res, earliestExpireAt, ae.Aggregate()
|
||||
}
|
||||
|
||||
func (m *manager) GetEntitySource(ctx context.Context, entityID int, opts ...fs.Option) (entitysource.EntitySource, error) {
|
||||
o := newOption()
|
||||
for _, opt := range opts {
|
||||
opt.Apply(o)
|
||||
}
|
||||
|
||||
var (
|
||||
entity fs.Entity
|
||||
err error
|
||||
)
|
||||
|
||||
if o.Entity != nil {
|
||||
entity = o.Entity
|
||||
} else {
|
||||
entity, err = m.fs.GetEntity(ctx, entityID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if entity.ReferenceCount() == 0 {
|
||||
return nil, fs.ErrEntityNotExist
|
||||
}
|
||||
}
|
||||
|
||||
policy, handler, err := m.getEntityPolicyDriver(ctx, entity, o.Policy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return entitysource.NewEntitySource(entity, handler, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(), m.l,
|
||||
m.config, m.dep.MimeDetector(ctx), entitysource.WithContext(ctx), entitysource.WithThumb(o.IsThumb)), nil
|
||||
}
|
||||
|
||||
func (l *manager) SetCurrentVersion(ctx context.Context, path *fs.URI, version int) error {
|
||||
return l.fs.VersionControl(ctx, path, version, false)
|
||||
}
|
||||
|
||||
func (l *manager) DeleteVersion(ctx context.Context, path *fs.URI, version int) error {
|
||||
return l.fs.VersionControl(ctx, path, version, true)
|
||||
}
|
||||
|
||||
func entityUrlCacheKey(id int, speed int64, displayName string, download bool, siteUrl string) string {
|
||||
hash := sha1.New()
|
||||
hash.Write([]byte(fmt.Sprintf("%d_%d_%s_%t_%s", id,
|
||||
speed, displayName, download, siteUrl)))
|
||||
hashRes := hex.EncodeToString(hash.Sum(nil))
|
||||
|
||||
return fmt.Sprintf("%s_%s", EntityUrlCacheKeyPrefix, hashRes)
|
||||
}
|
||||
958
pkg/filemanager/manager/entitysource/entitysource.go
Normal file
958
pkg/filemanager/manager/entitysource/entitysource.go
Normal file
@@ -0,0 +1,958 @@
|
||||
package entitysource
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/auth"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/local"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/juju/ratelimit"
|
||||
)
|
||||
|
||||
const (
|
||||
shortSeekBytes = 1024
|
||||
// The algorithm uses at most sniffLen bytes to make its decision.
|
||||
sniffLen = 512
|
||||
defaultUrlExpire = time.Hour * 1
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoContentLength is returned by Seek when the initial http response did not include a Content-Length header
|
||||
ErrNoContentLength = errors.New("Content-Length was not set")
|
||||
|
||||
// errNoOverlap is returned by serveContent's parseRange if first-byte-pos of
|
||||
// all of the byte-range-spec values is greater than the content size.
|
||||
errNoOverlap = errors.New("invalid range: failed to overlap")
|
||||
)
|
||||
|
||||
type EntitySource interface {
|
||||
io.ReadSeekCloser
|
||||
io.ReaderAt
|
||||
|
||||
// Url generates a download url for the entity.
|
||||
Url(ctx context.Context, opts ...EntitySourceOption) (*EntityUrl, error)
|
||||
// Serve serves the entity to the client, with supports on Range header and If- cache control.
|
||||
Serve(w http.ResponseWriter, r *http.Request, opts ...EntitySourceOption)
|
||||
// Entity returns the entity of the source.
|
||||
Entity() fs.Entity
|
||||
// IsLocal returns true if the source is in local machine.
|
||||
IsLocal() bool
|
||||
// LocalPath returns the local path of the source file.
|
||||
LocalPath(ctx context.Context) string
|
||||
// Apply applies the options to the source.
|
||||
Apply(opts ...EntitySourceOption)
|
||||
// CloneToLocalSrc clones the source to a local file source.
|
||||
CloneToLocalSrc(t types.EntityType, src string) (EntitySource, error)
|
||||
// ShouldInternalProxy returns true if the source will/should be proxied by internal proxy.
|
||||
ShouldInternalProxy(opts ...EntitySourceOption) bool
|
||||
}
|
||||
|
||||
type EntitySourceOption interface {
|
||||
Apply(any)
|
||||
}
|
||||
|
||||
type EntitySourceOptions struct {
|
||||
SpeedLimit int64
|
||||
Expire *time.Time
|
||||
IsDownload bool
|
||||
NoInternalProxy bool
|
||||
DisplayName string
|
||||
OneTimeDownloadKey string
|
||||
Ctx context.Context
|
||||
IsThumb bool
|
||||
}
|
||||
|
||||
type EntityUrl struct {
|
||||
Url string
|
||||
ExpireAt *time.Time
|
||||
}
|
||||
|
||||
type EntitySourceOptionFunc func(any)
|
||||
|
||||
// WithSpeedLimit set speed limit for file source (if supported)
|
||||
func WithSpeedLimit(limit int64) EntitySourceOption {
|
||||
return EntitySourceOptionFunc(func(option any) {
|
||||
option.(*EntitySourceOptions).SpeedLimit = limit
|
||||
})
|
||||
}
|
||||
|
||||
// WithExpire set expire time for file source
|
||||
func WithExpire(expire *time.Time) EntitySourceOption {
|
||||
return EntitySourceOptionFunc(func(option any) {
|
||||
option.(*EntitySourceOptions).Expire = expire
|
||||
})
|
||||
}
|
||||
|
||||
// WithDownload set file URL as download
|
||||
func WithDownload(isDownload bool) EntitySourceOption {
|
||||
return EntitySourceOptionFunc(func(option any) {
|
||||
option.(*EntitySourceOptions).IsDownload = isDownload
|
||||
})
|
||||
}
|
||||
|
||||
// WithNoInternalProxy overwrite policy's internal proxy setting
|
||||
func WithNoInternalProxy() EntitySourceOption {
|
||||
return EntitySourceOptionFunc(func(option any) {
|
||||
option.(*EntitySourceOptions).NoInternalProxy = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithDisplayName set display name for file source
|
||||
func WithDisplayName(name string) EntitySourceOption {
|
||||
return EntitySourceOptionFunc(func(option any) {
|
||||
option.(*EntitySourceOptions).DisplayName = name
|
||||
})
|
||||
}
|
||||
|
||||
// WithContext set context for file source
|
||||
func WithContext(ctx context.Context) EntitySourceOption {
|
||||
return EntitySourceOptionFunc(func(option any) {
|
||||
option.(*EntitySourceOptions).Ctx = ctx
|
||||
})
|
||||
}
|
||||
|
||||
// WithThumb set entity source as thumb. This will result in entity source URL
|
||||
// generated with thumbnail processing parameters. For sidecar thumb files,
|
||||
// this option will be ignored.
|
||||
func WithThumb(isThumb bool) EntitySourceOption {
|
||||
return EntitySourceOptionFunc(func(option any) {
|
||||
option.(*EntitySourceOptions).IsThumb = isThumb
|
||||
})
|
||||
}
|
||||
|
||||
func (f EntitySourceOptionFunc) Apply(option any) {
|
||||
f(option)
|
||||
}
|
||||
|
||||
type (
|
||||
entitySource struct {
|
||||
e fs.Entity
|
||||
handler driver.Handler
|
||||
policy *ent.StoragePolicy
|
||||
generalAuth auth.Auth
|
||||
settings setting.Provider
|
||||
hasher hashid.Encoder
|
||||
c request.Client
|
||||
l logging.Logger
|
||||
config conf.ConfigProvider
|
||||
mime mime.MimeDetector
|
||||
|
||||
rsc io.ReadCloser
|
||||
pos int64
|
||||
o *EntitySourceOptions
|
||||
}
|
||||
)
|
||||
|
||||
// NewEntitySource creates a new EntitySource.
|
||||
func NewEntitySource(
|
||||
e fs.Entity,
|
||||
handler driver.Handler,
|
||||
policy *ent.StoragePolicy,
|
||||
generalAuth auth.Auth,
|
||||
settings setting.Provider,
|
||||
hasher hashid.Encoder,
|
||||
c request.Client,
|
||||
l logging.Logger,
|
||||
config conf.ConfigProvider,
|
||||
mime mime.MimeDetector,
|
||||
opts ...EntitySourceOption,
|
||||
) EntitySource {
|
||||
s := &entitySource{
|
||||
e: e,
|
||||
handler: handler,
|
||||
policy: policy,
|
||||
generalAuth: generalAuth,
|
||||
settings: settings,
|
||||
hasher: hasher,
|
||||
c: c,
|
||||
config: config,
|
||||
l: l,
|
||||
mime: mime,
|
||||
o: &EntitySourceOptions{},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.Apply(s.o)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (f *entitySource) Apply(opts ...EntitySourceOption) {
|
||||
for _, opt := range opts {
|
||||
opt.Apply(f.o)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *entitySource) CloneToLocalSrc(t types.EntityType, src string) (EntitySource, error) {
|
||||
e, err := local.NewLocalFileEntity(t, src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
policy := &ent.StoragePolicy{Type: types.PolicyTypeLocal}
|
||||
handler := local.New(policy, f.l, f.config)
|
||||
|
||||
newSrc := NewEntitySource(e, handler, policy, f.generalAuth, f.settings, f.hasher, f.c, f.l, f.config, f.mime).(*entitySource)
|
||||
newSrc.o = f.o
|
||||
return newSrc, nil
|
||||
}
|
||||
|
||||
func (f *entitySource) Entity() fs.Entity {
|
||||
return f.e
|
||||
}
|
||||
|
||||
func (f *entitySource) IsLocal() bool {
|
||||
return f.handler.Capabilities().StaticFeatures.Enabled(int(driver.HandlerCapabilityInboundGet))
|
||||
}
|
||||
|
||||
func (f *entitySource) LocalPath(ctx context.Context) string {
|
||||
return f.handler.LocalPath(ctx, f.e.Source())
|
||||
}
|
||||
|
||||
func (f *entitySource) Serve(w http.ResponseWriter, r *http.Request, opts ...EntitySourceOption) {
|
||||
for _, opt := range opts {
|
||||
opt.Apply(f.o)
|
||||
}
|
||||
|
||||
if f.IsLocal() {
|
||||
// For local files, validate file existence by resetting rsc
|
||||
if err := f.resetRequest(); err != nil {
|
||||
f.l.Warning("Failed to serve local entity %q: %s", err, f.e.Source())
|
||||
http.Error(w, "Entity data does not exist.", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
etag := "\"" + hashid.EncodeEntityID(f.hasher, f.e.ID()) + "\""
|
||||
w.Header().Set("Etag", "\""+hashid.EncodeEntityID(f.hasher, f.e.ID())+"\"")
|
||||
|
||||
if f.o.IsDownload {
|
||||
encodedFilename := url.PathEscape(f.o.DisplayName)
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"; filename*=UTF-8''%s",
|
||||
f.o.DisplayName, encodedFilename))
|
||||
}
|
||||
|
||||
done, rangeReq := checkPreconditions(w, r, etag)
|
||||
if done {
|
||||
return
|
||||
}
|
||||
|
||||
if !f.IsLocal() {
|
||||
// for non-local file, reverse-proxy the request
|
||||
expire := time.Now().Add(defaultUrlExpire)
|
||||
u, err := f.Url(driver.WithForcePublicEndpoint(f.o.Ctx, false), WithNoInternalProxy(), WithExpire(&expire))
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
target, err := url.Parse(u.Url)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
proxy := &httputil.ReverseProxy{
|
||||
Director: func(request *http.Request) {
|
||||
request.URL.Scheme = target.Scheme
|
||||
request.URL.Host = target.Host
|
||||
request.URL.Path = target.Path
|
||||
request.URL.RawPath = target.RawPath
|
||||
request.URL.RawQuery = target.RawQuery
|
||||
request.Host = target.Host
|
||||
request.Header.Del("Authorization")
|
||||
},
|
||||
ModifyResponse: func(response *http.Response) error {
|
||||
response.Header.Del("ETag")
|
||||
response.Header.Del("Content-Disposition")
|
||||
response.Header.Del("Cache-Control")
|
||||
logging.Request(f.l,
|
||||
false,
|
||||
response.StatusCode,
|
||||
response.Request.Method,
|
||||
request.LocalIP,
|
||||
response.Request.URL.String(),
|
||||
"",
|
||||
start,
|
||||
)
|
||||
return nil
|
||||
},
|
||||
ErrorHandler: func(writer http.ResponseWriter, request *http.Request, err error) {
|
||||
f.l.Error("Reverse proxy error in %q: %s", request.URL.String(), err)
|
||||
writer.WriteHeader(http.StatusBadGateway)
|
||||
writer.Write([]byte("[Cloudreve] Bad Gateway"))
|
||||
},
|
||||
}
|
||||
|
||||
r = r.Clone(f.o.Ctx)
|
||||
defer func() {
|
||||
if err := recover(); err != nil && err != http.ErrAbortHandler {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
proxy.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
code := http.StatusOK
|
||||
// If Content-Type isn't set, use the file's extension to find it, but
|
||||
// if the Content-Type is unset explicitly, do not sniff the type.
|
||||
ctypes, haveType := w.Header()["Content-Type"]
|
||||
var ctype string
|
||||
if !haveType {
|
||||
ctype = f.mime.TypeByName(f.o.DisplayName)
|
||||
if ctype == "" {
|
||||
// read a chunk to decide between utf-8 text and binary
|
||||
var buf [sniffLen]byte
|
||||
n, _ := io.ReadFull(f, buf[:])
|
||||
ctype = http.DetectContentType(buf[:n])
|
||||
_, err := f.Seek(0, io.SeekStart) // rewind to output whole file
|
||||
if err != nil {
|
||||
http.Error(w, "seeker can't seek", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
w.Header().Set("Content-Type", ctype)
|
||||
} else if len(ctypes) > 0 {
|
||||
ctype = ctypes[0]
|
||||
}
|
||||
|
||||
size := f.e.Size()
|
||||
if size < 0 {
|
||||
// Should never happen but just to be sure
|
||||
http.Error(w, "negative content size computed", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// handle Content-Range header.
|
||||
sendSize := size
|
||||
var sendContent io.Reader = f
|
||||
ranges, err := parseRange(rangeReq, size)
|
||||
switch err {
|
||||
case nil:
|
||||
case errNoOverlap:
|
||||
if size == 0 {
|
||||
// Some clients add a Range header to all requests to
|
||||
// limit the size of the response. If the file is empty,
|
||||
// ignore the range header and respond with a 200 rather
|
||||
// than a 416.
|
||||
ranges = nil
|
||||
break
|
||||
}
|
||||
w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", size))
|
||||
fallthrough
|
||||
default:
|
||||
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
|
||||
return
|
||||
}
|
||||
|
||||
if sumRangesSize(ranges) > size {
|
||||
// The total number of bytes in all the ranges
|
||||
// is larger than the size of the file by
|
||||
// itself, so this is probably an attack, or a
|
||||
// dumb client. Ignore the range request.
|
||||
ranges = nil
|
||||
}
|
||||
switch {
|
||||
case len(ranges) == 1:
|
||||
// RFC 7233, Section 4.1:
|
||||
// "If a single part is being transferred, the server
|
||||
// generating the 206 response MUST generate a
|
||||
// Content-Range header field, describing what range
|
||||
// of the selected representation is enclosed, and a
|
||||
// payload consisting of the range.
|
||||
// ...
|
||||
// A server MUST NOT generate a multipart response to
|
||||
// a request for a single range, since a client that
|
||||
// does not request multiple parts might not support
|
||||
// multipart responses."
|
||||
ra := ranges[0]
|
||||
if _, err := f.Seek(ra.start, io.SeekStart); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
|
||||
return
|
||||
}
|
||||
sendSize = ra.length
|
||||
code = http.StatusPartialContent
|
||||
w.Header().Set("Content-Range", ra.contentRange(size))
|
||||
case len(ranges) > 1:
|
||||
sendSize = rangesMIMESize(ranges, ctype, size)
|
||||
code = http.StatusPartialContent
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
mw := multipart.NewWriter(pw)
|
||||
w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
|
||||
sendContent = pr
|
||||
defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
|
||||
go func() {
|
||||
for _, ra := range ranges {
|
||||
part, err := mw.CreatePart(ra.mimeHeader(ctype, size))
|
||||
if err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if _, err := f.Seek(ra.start, io.SeekStart); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if _, err := io.CopyN(part, f, ra.length); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
mw.Close()
|
||||
pw.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
if w.Header().Get("Content-Encoding") == "" {
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
|
||||
}
|
||||
|
||||
w.WriteHeader(code)
|
||||
|
||||
if r.Method != "HEAD" {
|
||||
io.CopyN(w, sendContent, sendSize)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *entitySource) Read(p []byte) (n int, err error) {
|
||||
if f.rsc == nil {
|
||||
err = f.resetRequest()
|
||||
}
|
||||
if f.rsc != nil {
|
||||
n, err = f.rsc.Read(p)
|
||||
f.pos += int64(n)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *entitySource) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
if f.IsLocal() {
|
||||
if f.rsc == nil {
|
||||
err = f.resetRequest()
|
||||
}
|
||||
if readAt, ok := f.rsc.(io.ReaderAt); ok {
|
||||
return readAt.ReadAt(p, off)
|
||||
}
|
||||
}
|
||||
|
||||
return 0, errors.New("source does not support ReadAt")
|
||||
}
|
||||
|
||||
func (f *entitySource) Seek(offset int64, whence int) (int64, error) {
|
||||
var err error
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
case io.SeekCurrent:
|
||||
offset += f.pos
|
||||
case io.SeekEnd:
|
||||
offset = f.e.Size() + offset
|
||||
}
|
||||
if f.rsc != nil {
|
||||
// Try to read, which is cheaper than doing a request
|
||||
if f.pos < offset && offset-f.pos <= shortSeekBytes {
|
||||
_, err := io.CopyN(io.Discard, f, offset-f.pos)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if f.pos != offset {
|
||||
err = f.rsc.Close()
|
||||
f.rsc = nil
|
||||
}
|
||||
}
|
||||
f.pos = offset
|
||||
return f.pos, err
|
||||
}
|
||||
|
||||
func (f *entitySource) Close() error {
|
||||
if f.rsc != nil {
|
||||
return f.rsc.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *entitySource) ShouldInternalProxy(opts ...EntitySourceOption) bool {
|
||||
for _, opt := range opts {
|
||||
opt.Apply(f.o)
|
||||
}
|
||||
handlerCapability := f.handler.Capabilities()
|
||||
return f.e.ID() == 0 || handlerCapability.StaticFeatures.Enabled(int(driver.HandlerCapabilityProxyRequired)) ||
|
||||
f.policy.Settings.InternalProxy && !f.o.NoInternalProxy
|
||||
}
|
||||
|
||||
func (f *entitySource) Url(ctx context.Context, opts ...EntitySourceOption) (*EntityUrl, error) {
|
||||
for _, opt := range opts {
|
||||
opt.Apply(f.o)
|
||||
}
|
||||
|
||||
var (
|
||||
srcUrl *url.URL
|
||||
err error
|
||||
srcUrlStr string
|
||||
)
|
||||
|
||||
expire := f.o.Expire
|
||||
displayName := f.o.DisplayName
|
||||
if displayName == "" {
|
||||
displayName = path.Base(util.FormSlash(f.e.Source()))
|
||||
}
|
||||
|
||||
// Use internal proxy URL if:
|
||||
// 1. Internal proxy is required by driver's definition
|
||||
// 2. Internal proxy is enabled in Policy setting and not disabled by option
|
||||
// 3. It's an empty entity.
|
||||
handlerCapability := f.handler.Capabilities()
|
||||
if f.ShouldInternalProxy() {
|
||||
siteUrl := f.settings.SiteURL(ctx)
|
||||
base := routes.MasterFileContentUrl(
|
||||
siteUrl,
|
||||
hashid.EncodeEntityID(f.hasher, f.e.ID()),
|
||||
displayName,
|
||||
f.o.IsDownload,
|
||||
f.o.IsThumb,
|
||||
f.o.SpeedLimit,
|
||||
)
|
||||
|
||||
srcUrl, err = auth.SignURI(ctx, f.generalAuth, base.String(), expire)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sign internal proxy URL: %w", err)
|
||||
}
|
||||
|
||||
if f.IsLocal() {
|
||||
// For local file, we need to apply proxy if needed
|
||||
srcUrl, err = driver.ApplyProxyIfNeeded(f.policy, srcUrl)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to apply proxy: %w", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
expire = capExpireTime(expire, handlerCapability.MinSourceExpire, handlerCapability.MaxSourceExpire)
|
||||
if f.o.IsThumb {
|
||||
srcUrlStr, err = f.handler.Thumb(ctx, expire, util.Ext(f.o.DisplayName), f.e)
|
||||
} else {
|
||||
srcUrlStr, err = f.handler.Source(ctx, f.e, &driver.GetSourceArgs{
|
||||
Expire: expire,
|
||||
IsDownload: f.o.IsDownload,
|
||||
Speed: f.o.SpeedLimit,
|
||||
DisplayName: displayName,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get source URL: %w", err)
|
||||
}
|
||||
|
||||
srcUrl, err = url.Parse(srcUrlStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse origin URL: %w", err)
|
||||
}
|
||||
|
||||
srcUrl, err = driver.ApplyProxyIfNeeded(f.policy, srcUrl)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to apply proxy: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &EntityUrl{
|
||||
Url: srcUrl.String(),
|
||||
ExpireAt: expire,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *entitySource) resetRequest() error {
|
||||
// For inbound files, we can use the handler to open the file directly
|
||||
if f.IsLocal() {
|
||||
if f.rsc == nil {
|
||||
file, err := f.handler.Open(f.o.Ctx, f.e.Source())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open inbound file: %w", err)
|
||||
}
|
||||
|
||||
if f.pos > 0 {
|
||||
_, err = file.Seek(f.pos, io.SeekStart)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to seek inbound file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
f.rsc = file
|
||||
|
||||
if f.o.SpeedLimit > 0 {
|
||||
bucket := ratelimit.NewBucketWithRate(float64(f.o.SpeedLimit), f.o.SpeedLimit)
|
||||
f.rsc = lrs{f.rsc, ratelimit.Reader(f.rsc, bucket)}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
expire := time.Now().Add(defaultUrlExpire)
|
||||
u, err := f.Url(driver.WithForcePublicEndpoint(f.o.Ctx, false), WithNoInternalProxy(), WithExpire(&expire))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate download url: %w", err)
|
||||
}
|
||||
|
||||
h := http.Header{}
|
||||
h.Set("Range", fmt.Sprintf("bytes=%d-", f.pos))
|
||||
resp := f.c.Request(http.MethodGet, u.Url, nil,
|
||||
request.WithContext(f.o.Ctx),
|
||||
request.WithLogger(f.l),
|
||||
request.WithHeader(h),
|
||||
).CheckHTTPResponse(http.StatusOK, http.StatusPartialContent)
|
||||
if resp.Err != nil {
|
||||
return fmt.Errorf("failed to request download url: %w", resp.Err)
|
||||
}
|
||||
|
||||
f.rsc = resp.Response.Body
|
||||
return nil
|
||||
}
|
||||
|
||||
// capExpireTime make sure expire time is not too long or too short (if min or max is set)
|
||||
func capExpireTime(expire *time.Time, min, max time.Duration) *time.Time {
|
||||
timeNow := time.Now()
|
||||
if expire == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cappedExpires := *expire
|
||||
// Make sure expire time is not too long or too short
|
||||
if min > 0 && expire.Before(timeNow.Add(min)) {
|
||||
cappedExpires = timeNow.Add(min)
|
||||
} else if max > 0 && expire.After(timeNow.Add(max)) {
|
||||
cappedExpires = timeNow.Add(max)
|
||||
}
|
||||
|
||||
return &cappedExpires
|
||||
}
|
||||
|
||||
// checkPreconditions evaluates request preconditions and reports whether a precondition
|
||||
// resulted in sending StatusNotModified or StatusPreconditionFailed.
|
||||
func checkPreconditions(w http.ResponseWriter, r *http.Request, etag string) (done bool, rangeHeader string) {
|
||||
// This function carefully follows RFC 7232 section 6.
|
||||
ch := checkIfMatch(r, etag)
|
||||
if ch == condFalse {
|
||||
w.WriteHeader(http.StatusPreconditionFailed)
|
||||
return true, ""
|
||||
}
|
||||
switch checkIfNoneMatch(r, etag) {
|
||||
case condFalse:
|
||||
if r.Method == "GET" || r.Method == "HEAD" {
|
||||
writeNotModified(w)
|
||||
return true, ""
|
||||
} else {
|
||||
w.WriteHeader(http.StatusPreconditionFailed)
|
||||
return true, ""
|
||||
}
|
||||
}
|
||||
|
||||
rangeHeader = r.Header.Get("Range")
|
||||
if rangeHeader != "" && checkIfRange(r, etag) == condFalse {
|
||||
rangeHeader = ""
|
||||
}
|
||||
return false, rangeHeader
|
||||
}
|
||||
|
||||
// condResult is the result of an HTTP request precondition check.
|
||||
// See https://tools.ietf.org/html/rfc7232 section 3.
|
||||
type condResult int
|
||||
|
||||
const (
|
||||
condNone condResult = iota
|
||||
condTrue
|
||||
condFalse
|
||||
)
|
||||
|
||||
func checkIfMatch(r *http.Request, currentEtag string) condResult {
|
||||
im := r.Header.Get("If-Match")
|
||||
if im == "" {
|
||||
return condNone
|
||||
}
|
||||
for {
|
||||
im = textproto.TrimString(im)
|
||||
if len(im) == 0 {
|
||||
break
|
||||
}
|
||||
if im[0] == ',' {
|
||||
im = im[1:]
|
||||
continue
|
||||
}
|
||||
if im[0] == '*' {
|
||||
return condTrue
|
||||
}
|
||||
etag, remain := scanETag(im)
|
||||
if etag == "" {
|
||||
break
|
||||
}
|
||||
if etagStrongMatch(etag, currentEtag) {
|
||||
return condTrue
|
||||
}
|
||||
im = remain
|
||||
}
|
||||
|
||||
return condFalse
|
||||
}
|
||||
|
||||
// scanETag determines if a syntactically valid ETag is present at s. If so,
|
||||
// the ETag and remaining text after consuming ETag is returned. Otherwise,
|
||||
// it returns "", "".
|
||||
func scanETag(s string) (etag string, remain string) {
|
||||
s = textproto.TrimString(s)
|
||||
start := 0
|
||||
if strings.HasPrefix(s, "W/") {
|
||||
start = 2
|
||||
}
|
||||
if len(s[start:]) < 2 || s[start] != '"' {
|
||||
return "", ""
|
||||
}
|
||||
// ETag is either W/"text" or "text".
|
||||
// See RFC 7232 2.3.
|
||||
for i := start + 1; i < len(s); i++ {
|
||||
c := s[i]
|
||||
switch {
|
||||
// Character values allowed in ETags.
|
||||
case c == 0x21 || c >= 0x23 && c <= 0x7E || c >= 0x80:
|
||||
case c == '"':
|
||||
return s[:i+1], s[i+1:]
|
||||
default:
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// etagStrongMatch reports whether a and b match using strong ETag comparison.
|
||||
// Assumes a and b are valid ETags.
|
||||
func etagStrongMatch(a, b string) bool {
|
||||
return a == b && a != "" && a[0] == '"'
|
||||
}
|
||||
|
||||
func checkIfNoneMatch(r *http.Request, currentEtag string) condResult {
|
||||
inm := r.Header.Get("If-None-Match")
|
||||
if inm == "" {
|
||||
return condNone
|
||||
}
|
||||
buf := inm
|
||||
for {
|
||||
buf = textproto.TrimString(buf)
|
||||
if len(buf) == 0 {
|
||||
break
|
||||
}
|
||||
if buf[0] == ',' {
|
||||
buf = buf[1:]
|
||||
continue
|
||||
}
|
||||
if buf[0] == '*' {
|
||||
return condFalse
|
||||
}
|
||||
etag, remain := scanETag(buf)
|
||||
if etag == "" {
|
||||
break
|
||||
}
|
||||
if etagWeakMatch(etag, currentEtag) {
|
||||
return condFalse
|
||||
}
|
||||
buf = remain
|
||||
}
|
||||
return condTrue
|
||||
}
|
||||
|
||||
// etagWeakMatch reports whether a and b match using weak ETag comparison.
|
||||
// Assumes a and b are valid ETags.
|
||||
func etagWeakMatch(a, b string) bool {
|
||||
return strings.TrimPrefix(a, "W/") == strings.TrimPrefix(b, "W/")
|
||||
}
|
||||
|
||||
func writeNotModified(w http.ResponseWriter) {
|
||||
// RFC 7232 section 4.1:
|
||||
// a sender SHOULD NOT generate representation metadata other than the
|
||||
// above listed fields unless said metadata exists for the purpose of
|
||||
// guiding cache updates (e.g., Last-Modified might be useful if the
|
||||
// response does not have an ETag field).
|
||||
h := w.Header()
|
||||
delete(h, "Content-Type")
|
||||
delete(h, "Content-Length")
|
||||
delete(h, "Content-Encoding")
|
||||
if h.Get("Etag") != "" {
|
||||
delete(h, "Last-Modified")
|
||||
}
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
}
|
||||
|
||||
func checkIfRange(r *http.Request, currentEtag string) condResult {
|
||||
if r.Method != "GET" && r.Method != "HEAD" {
|
||||
return condNone
|
||||
}
|
||||
ir := r.Header.Get("If-Range")
|
||||
if ir == "" {
|
||||
return condNone
|
||||
}
|
||||
etag, _ := scanETag(ir)
|
||||
if etag != "" {
|
||||
if etagStrongMatch(etag, currentEtag) {
|
||||
return condTrue
|
||||
} else {
|
||||
return condFalse
|
||||
}
|
||||
}
|
||||
|
||||
return condFalse
|
||||
}
|
||||
|
||||
// httpRange specifies the byte range to be sent to the client.
|
||||
type httpRange struct {
|
||||
start, length int64
|
||||
}
|
||||
|
||||
func (r httpRange) contentRange(size int64) string {
|
||||
return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size)
|
||||
}
|
||||
|
||||
func (r httpRange) mimeHeader(contentType string, size int64) textproto.MIMEHeader {
|
||||
return textproto.MIMEHeader{
|
||||
"Content-Range": {r.contentRange(size)},
|
||||
"Content-Type": {contentType},
|
||||
}
|
||||
}
|
||||
|
||||
// parseRange parses a Range header string as per RFC 7233.
|
||||
// errNoOverlap is returned if none of the ranges overlap.
|
||||
func parseRange(s string, size int64) ([]httpRange, error) {
|
||||
if s == "" {
|
||||
return nil, nil // header not present
|
||||
}
|
||||
const b = "bytes="
|
||||
if !strings.HasPrefix(s, b) {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
var ranges []httpRange
|
||||
noOverlap := false
|
||||
for _, ra := range strings.Split(s[len(b):], ",") {
|
||||
ra = textproto.TrimString(ra)
|
||||
if ra == "" {
|
||||
continue
|
||||
}
|
||||
start, end, ok := strings.Cut(ra, "-")
|
||||
if !ok {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
start, end = textproto.TrimString(start), textproto.TrimString(end)
|
||||
var r httpRange
|
||||
if start == "" {
|
||||
// If no start is specified, end specifies the
|
||||
// range start relative to the end of the file,
|
||||
// and we are dealing with <suffix-length>
|
||||
// which has to be a non-negative integer as per
|
||||
// RFC 7233 Section 2.1 "Byte-Ranges".
|
||||
if end == "" || end[0] == '-' {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
i, err := strconv.ParseInt(end, 10, 64)
|
||||
if i < 0 || err != nil {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
if i > size {
|
||||
i = size
|
||||
}
|
||||
r.start = size - i
|
||||
r.length = size - r.start
|
||||
} else {
|
||||
i, err := strconv.ParseInt(start, 10, 64)
|
||||
if err != nil || i < 0 {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
if i >= size {
|
||||
// If the range begins after the size of the content,
|
||||
// then it does not overlap.
|
||||
noOverlap = true
|
||||
continue
|
||||
}
|
||||
r.start = i
|
||||
if end == "" {
|
||||
// If no end is specified, range extends to end of the file.
|
||||
r.length = size - r.start
|
||||
} else {
|
||||
i, err := strconv.ParseInt(end, 10, 64)
|
||||
if err != nil || r.start > i {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
if i >= size {
|
||||
i = size - 1
|
||||
}
|
||||
r.length = i - r.start + 1
|
||||
}
|
||||
}
|
||||
ranges = append(ranges, r)
|
||||
}
|
||||
if noOverlap && len(ranges) == 0 {
|
||||
// The specified ranges did not overlap with the content.
|
||||
return nil, errNoOverlap
|
||||
}
|
||||
return ranges, nil
|
||||
}
|
||||
|
||||
func sumRangesSize(ranges []httpRange) (size int64) {
|
||||
for _, ra := range ranges {
|
||||
size += ra.length
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// countingWriter counts how many bytes have been written to it.
|
||||
type countingWriter int64
|
||||
|
||||
func (w *countingWriter) Write(p []byte) (n int, err error) {
|
||||
*w += countingWriter(len(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// rangesMIMESize returns the number of bytes it takes to encode the
|
||||
// provided ranges as a multipart response.
|
||||
func rangesMIMESize(ranges []httpRange, contentType string, contentSize int64) (encSize int64) {
|
||||
var w countingWriter
|
||||
mw := multipart.NewWriter(&w)
|
||||
for _, ra := range ranges {
|
||||
mw.CreatePart(ra.mimeHeader(contentType, contentSize))
|
||||
encSize += ra.length
|
||||
}
|
||||
mw.Close()
|
||||
encSize += int64(w)
|
||||
return
|
||||
}
|
||||
|
||||
type lrs struct {
|
||||
c io.Closer
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
func (r lrs) Read(p []byte) (int, error) {
|
||||
return r.r.Read(p)
|
||||
}
|
||||
|
||||
func (r lrs) Close() error {
|
||||
return r.c.Close()
|
||||
}
|
||||
114
pkg/filemanager/manager/fs.go
Normal file
114
pkg/filemanager/manager/fs.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/cos"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/local"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/obs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/onedrive"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/oss"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/qiniu"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/remote"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/s3"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/upyun"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
)
|
||||
|
||||
func (m *manager) LocalDriver(policy *ent.StoragePolicy) driver.Handler {
|
||||
if policy == nil {
|
||||
policy = &ent.StoragePolicy{Type: types.PolicyTypeLocal, Settings: &types.PolicySetting{}}
|
||||
}
|
||||
return local.New(policy, m.l, m.config)
|
||||
}
|
||||
|
||||
func (m *manager) CastStoragePolicyOnSlave(ctx context.Context, policy *ent.StoragePolicy) *ent.StoragePolicy {
|
||||
if !m.stateless {
|
||||
return policy
|
||||
}
|
||||
|
||||
nodeId := cluster.NodeIdFromContext(ctx)
|
||||
if policy.Type == types.PolicyTypeRemote {
|
||||
if nodeId != policy.NodeID {
|
||||
return policy
|
||||
}
|
||||
|
||||
policyCopy := *policy
|
||||
policyCopy.Type = types.PolicyTypeLocal
|
||||
return &policyCopy
|
||||
} else if policy.Type == types.PolicyTypeLocal {
|
||||
policyCopy := *policy
|
||||
policyCopy.NodeID = nodeId
|
||||
policyCopy.Type = types.PolicyTypeRemote
|
||||
policyCopy.SetNode(&ent.Node{
|
||||
ID: nodeId,
|
||||
Server: cluster.MasterSiteUrlFromContext(ctx),
|
||||
SlaveKey: m.config.Slave().Secret,
|
||||
})
|
||||
return &policyCopy
|
||||
} else if policy.Type == types.PolicyTypeOss {
|
||||
policyCopy := *policy
|
||||
if policyCopy.Settings != nil {
|
||||
policyCopy.Settings.ServerSideEndpoint = ""
|
||||
}
|
||||
}
|
||||
|
||||
return policy
|
||||
}
|
||||
|
||||
func (m *manager) GetStorageDriver(ctx context.Context, policy *ent.StoragePolicy) (driver.Handler, error) {
|
||||
switch policy.Type {
|
||||
case types.PolicyTypeLocal:
|
||||
return local.New(policy, m.l, m.config), nil
|
||||
case types.PolicyTypeRemote:
|
||||
return remote.New(ctx, policy, m.settings, m.config, m.l)
|
||||
case types.PolicyTypeOss:
|
||||
return oss.New(ctx, policy, m.settings, m.config, m.l, m.dep.MimeDetector(ctx))
|
||||
case types.PolicyTypeCos:
|
||||
return cos.New(ctx, policy, m.settings, m.config, m.l, m.dep.MimeDetector(ctx))
|
||||
case types.PolicyTypeS3:
|
||||
return s3.New(ctx, policy, m.settings, m.config, m.l, m.dep.MimeDetector(ctx))
|
||||
case types.PolicyTypeObs:
|
||||
return obs.New(ctx, policy, m.settings, m.config, m.l, m.dep.MimeDetector(ctx))
|
||||
case types.PolicyTypeQiniu:
|
||||
return qiniu.New(ctx, policy, m.settings, m.config, m.l, m.dep.MimeDetector(ctx))
|
||||
case types.PolicyTypeUpyun:
|
||||
return upyun.New(ctx, policy, m.settings, m.config, m.l, m.dep.MimeDetector(ctx))
|
||||
case types.PolicyTypeOd:
|
||||
return onedrive.New(ctx, policy, m.settings, m.config, m.l, m.dep.CredManager())
|
||||
default:
|
||||
return nil, ErrUnknownPolicyType
|
||||
}
|
||||
}
|
||||
|
||||
func (m *manager) getEntityPolicyDriver(cxt context.Context, e fs.Entity, policyOverwrite *ent.StoragePolicy) (*ent.StoragePolicy, driver.Handler, error) {
|
||||
policyID := e.PolicyID()
|
||||
var (
|
||||
policy *ent.StoragePolicy
|
||||
err error
|
||||
)
|
||||
if policyID == 0 {
|
||||
policy = &ent.StoragePolicy{Type: types.PolicyTypeLocal, Settings: &types.PolicySetting{}}
|
||||
} else {
|
||||
if policyOverwrite != nil && policyOverwrite.ID == policyID {
|
||||
policy = policyOverwrite
|
||||
} else {
|
||||
policy, err = m.policyClient.GetPolicyByID(cxt, e.PolicyID())
|
||||
if err != nil {
|
||||
return nil, nil, serializer.NewError(serializer.CodeDBError, "failed to get policy", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
d, err := m.GetStorageDriver(cxt, policy)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return policy, d, nil
|
||||
}
|
||||
171
pkg/filemanager/manager/manager.go
Normal file
171
pkg/filemanager/manager/manager.go
Normal file
@@ -0,0 +1,171 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/auth"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrUnknownPolicyType = serializer.NewError(serializer.CodeInternalSetting, "Unknown policy type", nil)
|
||||
)
|
||||
|
||||
const (
|
||||
UploadSessionCachePrefix = "callback_"
|
||||
// Ctx key for upload session
|
||||
UploadSessionCtx = "uploadSession"
|
||||
)
|
||||
|
||||
type (
|
||||
FileOperation interface {
|
||||
// Get gets file object by given path
|
||||
Get(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.File, error)
|
||||
// List lists files under given path
|
||||
List(ctx context.Context, path *fs.URI, args *ListArgs) (fs.File, *fs.ListFileResult, error)
|
||||
// Create creates a file or directory
|
||||
Create(ctx context.Context, path *fs.URI, fileType types.FileType, opt ...fs.Option) (fs.File, error)
|
||||
// Rename renames a file or directory
|
||||
Rename(ctx context.Context, path *fs.URI, newName string) (fs.File, error)
|
||||
// Delete deletes a group of file or directory. UnlinkOnly indicates whether to delete file record in DB only.
|
||||
Delete(ctx context.Context, path []*fs.URI, opts ...fs.Option) error
|
||||
// Restore restores a group of files
|
||||
Restore(ctx context.Context, path ...*fs.URI) error
|
||||
// MoveOrCopy moves or copies a group of files
|
||||
MoveOrCopy(ctx context.Context, src []*fs.URI, dst *fs.URI, isCopy bool) error
|
||||
// Update puts file content. If given file does not exist, it will create a new one.
|
||||
Update(ctx context.Context, req *fs.UploadRequest, opts ...fs.Option) (fs.File, error)
|
||||
// Walk walks through given path
|
||||
Walk(ctx context.Context, path *fs.URI, depth int, f fs.WalkFunc, opts ...fs.Option) error
|
||||
// UpsertMedata update or insert metadata of given file
|
||||
PatchMedata(ctx context.Context, path []*fs.URI, data ...fs.MetadataPatch) error
|
||||
// CreateViewerSession creates a viewer session for given file
|
||||
CreateViewerSession(ctx context.Context, uri *fs.URI, version string, viewer *setting.Viewer) (*ViewerSession, error)
|
||||
}
|
||||
|
||||
FsManagement interface {
|
||||
// SharedAddressTranslation translates shared symbolic address to real address. If path does not exist,
|
||||
// most recent existing parent directory will be returned.
|
||||
SharedAddressTranslation(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.File, *fs.URI, error)
|
||||
// Capacity gets capacity of current file system
|
||||
Capacity(ctx context.Context) (*fs.Capacity, error)
|
||||
// CheckIfCapacityExceeded checks if given user's capacity exceeded, and send notification email
|
||||
CheckIfCapacityExceeded(ctx context.Context) error
|
||||
// LocalDriver gets local driver for operating local files.
|
||||
LocalDriver(policy *ent.StoragePolicy) driver.Handler
|
||||
// CastStoragePolicyOnSlave check if given storage policy need to be casted to another.
|
||||
// It is used on slave node, when local policy need to cast to remote policy;
|
||||
// Remote policy with same node ID can be casted to local policy.
|
||||
CastStoragePolicyOnSlave(ctx context.Context, policy *ent.StoragePolicy) *ent.StoragePolicy
|
||||
// GetStorageDriver gets storage driver for given policy
|
||||
GetStorageDriver(ctx context.Context, policy *ent.StoragePolicy) (driver.Handler, error)
|
||||
}
|
||||
|
||||
ShareManagement interface {
|
||||
// CreateShare creates a share link for given path
|
||||
CreateOrUpdateShare(ctx context.Context, path *fs.URI, args *CreateShareArgs) (*ent.Share, error)
|
||||
}
|
||||
|
||||
Archiver interface {
|
||||
CreateArchive(ctx context.Context, uris []*fs.URI, writer io.Writer, opts ...fs.Option) (int, error)
|
||||
}
|
||||
|
||||
FileManager interface {
|
||||
fs.LockSystem
|
||||
FileOperation
|
||||
EntityManagement
|
||||
UploadManagement
|
||||
FsManagement
|
||||
ShareManagement
|
||||
Archiver
|
||||
|
||||
// Recycle reset current FileManager object and put back to resource pool
|
||||
Recycle()
|
||||
}
|
||||
|
||||
// GetEntityUrlArgs single args to get entity url
|
||||
GetEntityUrlArgs struct {
|
||||
URI *fs.URI
|
||||
PreferredEntityID string
|
||||
}
|
||||
|
||||
// CreateShareArgs args to create share link
|
||||
CreateShareArgs struct {
|
||||
ExistedShareID int
|
||||
IsPrivate bool
|
||||
RemainDownloads int
|
||||
Expire *time.Time
|
||||
}
|
||||
)
|
||||
|
||||
type manager struct {
|
||||
user *ent.User
|
||||
l logging.Logger
|
||||
fs fs.FileSystem
|
||||
settings setting.Provider
|
||||
kv cache.Driver
|
||||
config conf.ConfigProvider
|
||||
stateless bool
|
||||
auth auth.Auth
|
||||
hasher hashid.Encoder
|
||||
policyClient inventory.StoragePolicyClient
|
||||
|
||||
dep dependency.Dep
|
||||
}
|
||||
|
||||
func NewFileManager(dep dependency.Dep, u *ent.User) FileManager {
|
||||
config := dep.ConfigProvider()
|
||||
if config.System().Mode == conf.SlaveMode || u == nil {
|
||||
return newStatelessFileManager(dep)
|
||||
}
|
||||
return &manager{
|
||||
l: dep.Logger(),
|
||||
user: u,
|
||||
settings: dep.SettingProvider(),
|
||||
fs: dbfs.NewDatabaseFS(u, dep.FileClient(), dep.ShareClient(), dep.Logger(), dep.LockSystem(),
|
||||
dep.SettingProvider(), dep.StoragePolicyClient(), dep.HashIDEncoder(), dep.UserClient(), dep.KV(), dep.NavigatorStateKV()),
|
||||
kv: dep.KV(),
|
||||
config: config,
|
||||
auth: dep.GeneralAuth(),
|
||||
hasher: dep.HashIDEncoder(),
|
||||
policyClient: dep.StoragePolicyClient(),
|
||||
dep: dep,
|
||||
}
|
||||
}
|
||||
|
||||
func newStatelessFileManager(dep dependency.Dep) FileManager {
|
||||
return &manager{
|
||||
l: dep.Logger(),
|
||||
settings: dep.SettingProvider(),
|
||||
kv: dep.KV(),
|
||||
config: dep.ConfigProvider(),
|
||||
stateless: true,
|
||||
auth: dep.GeneralAuth(),
|
||||
dep: dep,
|
||||
hasher: dep.HashIDEncoder(),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *manager) Recycle() {
|
||||
if m.fs != nil {
|
||||
m.fs.Recycle()
|
||||
}
|
||||
}
|
||||
|
||||
func newOption() *fs.FsOption {
|
||||
return &fs.FsOption{}
|
||||
}
|
||||
193
pkg/filemanager/manager/mediameta.go
Normal file
193
pkg/filemanager/manager/mediameta.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/task"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type (
|
||||
MediaMetaTask struct {
|
||||
*queue.DBTask
|
||||
}
|
||||
|
||||
MediaMetaTaskState struct {
|
||||
Uri *fs.URI `json:"uri"`
|
||||
EntityID int `json:"entity_id"`
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
queue.RegisterResumableTaskFactory(queue.MediaMetaTaskType, NewMediaMetaTaskFromModel)
|
||||
}
|
||||
|
||||
// NewMediaMetaTask creates a new MediaMetaTask to
|
||||
func NewMediaMetaTask(ctx context.Context, uri *fs.URI, entityID int, creator *ent.User) (*MediaMetaTask, error) {
|
||||
state := &MediaMetaTaskState{
|
||||
Uri: uri,
|
||||
EntityID: entityID,
|
||||
}
|
||||
stateBytes, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal state: %w", err)
|
||||
}
|
||||
|
||||
return &MediaMetaTask{
|
||||
DBTask: &queue.DBTask{
|
||||
DirectOwner: creator,
|
||||
Task: &ent.Task{
|
||||
Type: queue.MediaMetaTaskType,
|
||||
CorrelationID: logging.CorrelationID(ctx),
|
||||
PrivateState: string(stateBytes),
|
||||
PublicState: &types.TaskPublicState{},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewMediaMetaTaskFromModel(task *ent.Task) queue.Task {
|
||||
return &MediaMetaTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: task,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MediaMetaTask) Do(ctx context.Context) (task.Status, error) {
|
||||
dep := dependency.FromContext(ctx)
|
||||
fm := NewFileManager(dep, inventory.UserFromContext(ctx)).(*manager)
|
||||
|
||||
// unmarshal state
|
||||
var state MediaMetaTaskState
|
||||
if err := json.Unmarshal([]byte(m.State()), &state); err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to unmarshal state: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
err := fm.ExtractAndSaveMediaMeta(ctx, state.Uri, state.EntityID)
|
||||
if err != nil {
|
||||
return task.StatusError, err
|
||||
}
|
||||
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
|
||||
func (m *manager) ExtractAndSaveMediaMeta(ctx context.Context, uri *fs.URI, entityID int) error {
|
||||
// 1. retrieve file info
|
||||
file, err := m.fs.Get(ctx, uri, dbfs.WithFileEntities())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get file: %w", err)
|
||||
}
|
||||
|
||||
versions := lo.Filter(file.Entities(), func(i fs.Entity, index int) bool {
|
||||
return i.Type() == types.EntityTypeVersion
|
||||
})
|
||||
targetVersion, versionIndex, found := lo.FindIndexOf(versions, func(i fs.Entity) bool {
|
||||
return i.ID() == entityID
|
||||
})
|
||||
if !found {
|
||||
return fmt.Errorf("failed to find version: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
if versionIndex != 0 {
|
||||
m.l.Debug("Skip media meta task for non-latest version.")
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
metas []driver.MediaMeta
|
||||
)
|
||||
// 2. try using native driver
|
||||
_, d, err := m.getEntityPolicyDriver(ctx, targetVersion, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get storage driver: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
driverCaps := d.Capabilities()
|
||||
if util.IsInExtensionList(driverCaps.MediaMetaSupportedExts, file.Name()) {
|
||||
m.l.Debug("Using native driver to generate media meta.")
|
||||
metas, err = d.MediaMeta(ctx, targetVersion.Source(), file.Ext())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get media meta using native driver: %w", err)
|
||||
}
|
||||
} else if driverCaps.MediaMetaProxy && util.IsInExtensionList(m.dep.MediaMetaExtractor(ctx).Exts(), file.Name()) {
|
||||
m.l.Debug("Using local extractor to generate media meta.")
|
||||
extractor := m.dep.MediaMetaExtractor(ctx)
|
||||
source, err := m.GetEntitySource(ctx, targetVersion.ID())
|
||||
defer source.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get entity source: %w", err)
|
||||
}
|
||||
|
||||
metas, err = extractor.Extract(ctx, file.Ext(), source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to extract media meta using local extractor: %w", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
m.l.Debug("No available generator for media meta.")
|
||||
return nil
|
||||
}
|
||||
|
||||
m.l.Debug("%d media meta generated.", len(metas))
|
||||
m.l.Debug("Media meta: %v", metas)
|
||||
|
||||
// 3. save meta
|
||||
if len(metas) > 0 {
|
||||
if err := m.fs.PatchMetadata(ctx, []*fs.URI{uri}, lo.Map(metas, func(i driver.MediaMeta, index int) fs.MetadataPatch {
|
||||
return fs.MetadataPatch{
|
||||
Key: fmt.Sprintf("%s:%s", i.Type, i.Key),
|
||||
Value: i.Value,
|
||||
}
|
||||
})...); err != nil {
|
||||
return fmt.Errorf("failed to save media meta: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *manager) shouldGenerateMediaMeta(ctx context.Context, d driver.Handler, fileName string) bool {
|
||||
driverCaps := d.Capabilities()
|
||||
if util.IsInExtensionList(driverCaps.MediaMetaSupportedExts, fileName) {
|
||||
// Handler support it natively
|
||||
return true
|
||||
}
|
||||
|
||||
if driverCaps.MediaMetaProxy && util.IsInExtensionList(m.dep.MediaMetaExtractor(ctx).Exts(), fileName) {
|
||||
// Handler does not support. but proxy is enabled.
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *manager) mediaMetaForNewEntity(ctx context.Context, session *fs.UploadSession, d driver.Handler) {
|
||||
if session.Props.EntityType == nil || *session.Props.EntityType == types.EntityTypeVersion {
|
||||
if !m.shouldGenerateMediaMeta(ctx, d, session.Props.Uri.Name()) {
|
||||
return
|
||||
}
|
||||
|
||||
mediaMetaTask, err := NewMediaMetaTask(ctx, session.Props.Uri, session.EntityID, m.user)
|
||||
if err != nil {
|
||||
m.l.Warning("Failed to create media meta task: %s", err)
|
||||
return
|
||||
}
|
||||
if err := m.dep.MediaMetaQueue(ctx).QueueTask(ctx, mediaMetaTask); err != nil {
|
||||
m.l.Warning("Failed to queue media meta task: %s", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
174
pkg/filemanager/manager/metadata.go
Normal file
174
pkg/filemanager/manager/metadata.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/application/constants"
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/go-playground/validator/v10"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type (
|
||||
metadataValidator func(ctx context.Context, m *manager, patch *fs.MetadataPatch) error
|
||||
)
|
||||
|
||||
const (
|
||||
wildcardMetadataKey = "*"
|
||||
customizeMetadataSuffix = "customize"
|
||||
tagMetadataSuffix = "tag"
|
||||
iconColorMetadataKey = customizeMetadataSuffix + ":icon_color"
|
||||
emojiIconMetadataKey = customizeMetadataSuffix + ":emoji"
|
||||
shareOwnerMetadataKey = dbfs.MetadataSysPrefix + "shared_owner"
|
||||
shareRedirectMetadataKey = dbfs.MetadataSysPrefix + "shared_redirect"
|
||||
)
|
||||
|
||||
var (
|
||||
validate = validator.New()
|
||||
|
||||
lastEmojiHash = ""
|
||||
emojiPresets = map[string]struct{}{}
|
||||
|
||||
// validateColor validates a color value
|
||||
validateColor = func(optional bool) metadataValidator {
|
||||
return func(ctx context.Context, m *manager, patch *fs.MetadataPatch) error {
|
||||
if patch.Remove {
|
||||
return nil
|
||||
}
|
||||
|
||||
tag := "omitempty,iscolor"
|
||||
if !optional {
|
||||
tag = "required,iscolor"
|
||||
}
|
||||
|
||||
res := validate.Var(patch.Value, tag)
|
||||
if res != nil {
|
||||
return fmt.Errorf("invalid color: %w", res)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
validators = map[string]map[string]metadataValidator{
|
||||
"sys": {
|
||||
wildcardMetadataKey: func(ctx context.Context, m *manager, patch *fs.MetadataPatch) error {
|
||||
if patch.Remove {
|
||||
return fmt.Errorf("cannot remove system metadata")
|
||||
}
|
||||
|
||||
dep := dependency.FromContext(ctx)
|
||||
// Validate share owner is valid hashid
|
||||
if patch.Key == shareOwnerMetadataKey {
|
||||
hasher := dep.HashIDEncoder()
|
||||
_, err := hasher.Decode(patch.Value, hashid.UserID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid share owner: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate share redirect uri is valid share uri
|
||||
if patch.Key == shareRedirectMetadataKey {
|
||||
uri, err := fs.NewUriFromString(patch.Value)
|
||||
if err != nil || uri.FileSystem() != constants.FileSystemShare {
|
||||
return fmt.Errorf("invalid redirect uri: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unsupported system metadata key: %s", patch.Key)
|
||||
},
|
||||
},
|
||||
"dav": {},
|
||||
customizeMetadataSuffix: {
|
||||
iconColorMetadataKey: validateColor(false),
|
||||
emojiIconMetadataKey: func(ctx context.Context, m *manager, patch *fs.MetadataPatch) error {
|
||||
if patch.Remove {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate if patched emoji is within preset list.
|
||||
emojis := m.settings.EmojiPresets(ctx)
|
||||
current := fmt.Sprintf("%x", (sha1.Sum([]byte(emojis))))
|
||||
if current != lastEmojiHash {
|
||||
presets := make(map[string][]string)
|
||||
if err := json.Unmarshal([]byte(emojis), &presets); err != nil {
|
||||
return fmt.Errorf("failed to read emoji setting: %w", err)
|
||||
}
|
||||
|
||||
emojiPresets = make(map[string]struct{})
|
||||
for _, v := range presets {
|
||||
for _, emoji := range v {
|
||||
emojiPresets[emoji] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := emojiPresets[patch.Value]; !ok {
|
||||
return fmt.Errorf("unsupported emoji")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
tagMetadataSuffix: {
|
||||
wildcardMetadataKey: func(ctx context.Context, m *manager, patch *fs.MetadataPatch) error {
|
||||
if err := validateColor(true)(ctx, m, patch); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if patch.Key == tagMetadataSuffix+":" {
|
||||
return fmt.Errorf("invalid metadata key")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func (m *manager) PatchMedata(ctx context.Context, path []*fs.URI, data ...fs.MetadataPatch) error {
|
||||
if err := m.validateMetadata(ctx, data...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.fs.PatchMetadata(ctx, path, data...)
|
||||
}
|
||||
|
||||
func (m *manager) validateMetadata(ctx context.Context, data ...fs.MetadataPatch) error {
|
||||
for _, patch := range data {
|
||||
category := strings.Split(patch.Key, ":")
|
||||
if len(category) < 2 {
|
||||
return serializer.NewError(serializer.CodeParamErr, "Invalid metadata key", nil)
|
||||
}
|
||||
|
||||
categoryValidators, ok := validators[category[0]]
|
||||
if !ok {
|
||||
return serializer.NewError(serializer.CodeParamErr, "Invalid metadata key",
|
||||
fmt.Errorf("unknown category: %s", category[0]))
|
||||
}
|
||||
|
||||
// Explicit validators
|
||||
if v, ok := categoryValidators[patch.Key]; ok {
|
||||
if err := v(ctx, m, &patch); err != nil {
|
||||
return serializer.NewError(serializer.CodeParamErr, "Invalid metadata patch", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Wildcard validators
|
||||
if v, ok := categoryValidators[wildcardMetadataKey]; ok {
|
||||
if err := v(ctx, m, &patch); err != nil {
|
||||
return serializer.NewError(serializer.CodeParamErr, "Invalid metadata patch", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
296
pkg/filemanager/manager/operation.go
Normal file
296
pkg/filemanager/manager/operation.go
Normal file
@@ -0,0 +1,296 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
const (
|
||||
EntityUrlCacheKeyPrefix = "entity_url_"
|
||||
DownloadSentinelCachePrefix = "download_sentinel_"
|
||||
)
|
||||
|
||||
type (
|
||||
ListArgs struct {
|
||||
Page int
|
||||
PageSize int
|
||||
PageToken string
|
||||
Order string
|
||||
OrderDirection string
|
||||
// StreamResponseCallback is used for streamed list operation, e.g. searching files.
|
||||
// Whenever a new item is found, this callback will be called with the current item and the parent item.
|
||||
StreamResponseCallback func(fs.File, []fs.File)
|
||||
}
|
||||
|
||||
EntityUrlCache struct {
|
||||
Url string
|
||||
ExpireAt *time.Time
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
gob.Register(EntityUrlCache{})
|
||||
}
|
||||
|
||||
func (m *manager) Get(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.File, error) {
|
||||
return m.fs.Get(ctx, path, opts...)
|
||||
}
|
||||
|
||||
func (m *manager) List(ctx context.Context, path *fs.URI, args *ListArgs) (fs.File, *fs.ListFileResult, error) {
|
||||
dbfsSetting := m.settings.DBFS(ctx)
|
||||
opts := []fs.Option{
|
||||
fs.WithPageSize(args.PageSize),
|
||||
fs.WithOrderBy(args.Order),
|
||||
fs.WithOrderDirection(args.OrderDirection),
|
||||
dbfs.WithFilePublicMetadata(),
|
||||
dbfs.WithContextHint(),
|
||||
dbfs.WithFileShareIfOwned(),
|
||||
}
|
||||
|
||||
searchParams := path.SearchParameters()
|
||||
if searchParams != nil {
|
||||
if dbfsSetting.UseSSEForSearch {
|
||||
opts = append(opts, dbfs.WithStreamListResponseCallback(args.StreamResponseCallback))
|
||||
}
|
||||
|
||||
if searchParams.Category != "" {
|
||||
// Overwrite search query with predefined category
|
||||
category := fs.SearchCategoryFromString(searchParams.Category)
|
||||
if category == setting.CategoryUnknown {
|
||||
return nil, nil, fmt.Errorf("unknown category: %s", searchParams.Category)
|
||||
}
|
||||
|
||||
path = path.SetQuery(m.settings.SearchCategoryQuery(ctx, category))
|
||||
searchParams = path.SearchParameters()
|
||||
}
|
||||
}
|
||||
|
||||
if dbfsSetting.UseCursorPagination || searchParams != nil {
|
||||
opts = append(opts, dbfs.WithCursorPagination(args.PageToken))
|
||||
} else {
|
||||
opts = append(opts, fs.WithPage(args.Page))
|
||||
}
|
||||
|
||||
return m.fs.List(ctx, path, opts...)
|
||||
}
|
||||
|
||||
func (m *manager) SharedAddressTranslation(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.File, *fs.URI, error) {
|
||||
o := newOption()
|
||||
for _, opt := range opts {
|
||||
opt.Apply(o)
|
||||
}
|
||||
|
||||
return m.fs.SharedAddressTranslation(ctx, path)
|
||||
}
|
||||
|
||||
func (m *manager) Create(ctx context.Context, path *fs.URI, fileType types.FileType, opts ...fs.Option) (fs.File, error) {
|
||||
o := newOption()
|
||||
for _, opt := range opts {
|
||||
opt.Apply(o)
|
||||
}
|
||||
|
||||
if m.stateless {
|
||||
return nil, o.Node.CreateFile(ctx, &fs.StatelessCreateFileService{
|
||||
Path: path.String(),
|
||||
Type: fileType,
|
||||
UserID: o.StatelessUserID,
|
||||
})
|
||||
}
|
||||
|
||||
isSymbolic := false
|
||||
if o.Metadata != nil {
|
||||
if err := m.validateMetadata(ctx, lo.MapToSlice(o.Metadata, func(key string, value string) fs.MetadataPatch {
|
||||
if key == shareRedirectMetadataKey {
|
||||
isSymbolic = true
|
||||
}
|
||||
|
||||
return fs.MetadataPatch{
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
})...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if isSymbolic {
|
||||
opts = append(opts, dbfs.WithSymbolicLink())
|
||||
}
|
||||
|
||||
return m.fs.Create(ctx, path, fileType, opts...)
|
||||
}
|
||||
|
||||
func (m *manager) Rename(ctx context.Context, path *fs.URI, newName string) (fs.File, error) {
|
||||
return m.fs.Rename(ctx, path, newName)
|
||||
}
|
||||
|
||||
func (m *manager) MoveOrCopy(ctx context.Context, src []*fs.URI, dst *fs.URI, isCopy bool) error {
|
||||
return m.fs.MoveOrCopy(ctx, src, dst, isCopy)
|
||||
}
|
||||
|
||||
func (m *manager) SoftDelete(ctx context.Context, path ...*fs.URI) error {
|
||||
return m.fs.SoftDelete(ctx, path...)
|
||||
}
|
||||
|
||||
func (m *manager) Delete(ctx context.Context, path []*fs.URI, opts ...fs.Option) error {
|
||||
o := newOption()
|
||||
for _, opt := range opts {
|
||||
opt.Apply(o)
|
||||
}
|
||||
|
||||
if !o.SkipSoftDelete && !o.SysSkipSoftDelete {
|
||||
return m.SoftDelete(ctx, path...)
|
||||
}
|
||||
|
||||
staleEntities, err := m.fs.Delete(ctx, path, fs.WithUnlinkOnly(o.UnlinkOnly), fs.WithSysSkipSoftDelete(o.SysSkipSoftDelete))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.l.Debug("New stale entities: %v", staleEntities)
|
||||
|
||||
// Delete stale entities
|
||||
if len(staleEntities) > 0 {
|
||||
t, err := newExplicitEntityRecycleTask(ctx, lo.Map(staleEntities, func(entity fs.Entity, index int) int {
|
||||
return entity.ID()
|
||||
}))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create explicit entity recycle task: %w", err)
|
||||
}
|
||||
|
||||
if err := m.dep.EntityRecycleQueue(ctx).QueueTask(ctx, t); err != nil {
|
||||
return fmt.Errorf("failed to queue explicit entity recycle task: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *manager) Walk(ctx context.Context, path *fs.URI, depth int, f fs.WalkFunc, opts ...fs.Option) error {
|
||||
return m.fs.Walk(ctx, path, depth, f, opts...)
|
||||
}
|
||||
|
||||
func (m *manager) Capacity(ctx context.Context) (*fs.Capacity, error) {
|
||||
res, err := m.fs.Capacity(ctx, m.user)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (m *manager) CheckIfCapacityExceeded(ctx context.Context) error {
|
||||
ctx = context.WithValue(ctx, inventory.LoadUserGroup{}, true)
|
||||
capacity, err := m.Capacity(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get user capacity: %w", err)
|
||||
}
|
||||
|
||||
if capacity.Used <= capacity.Total {
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *manager) ConfirmLock(ctx context.Context, ancestor fs.File, uri *fs.URI, token ...string) (func(), fs.LockSession, error) {
|
||||
return l.fs.ConfirmLock(ctx, ancestor, uri, token...)
|
||||
}
|
||||
|
||||
func (l *manager) Lock(ctx context.Context, d time.Duration, requester *ent.User, zeroDepth bool,
|
||||
application lock.Application, uri *fs.URI, token string) (fs.LockSession, error) {
|
||||
return l.fs.Lock(ctx, d, requester, zeroDepth, application, uri, token)
|
||||
}
|
||||
|
||||
func (l *manager) Unlock(ctx context.Context, tokens ...string) error {
|
||||
return l.fs.Unlock(ctx, tokens...)
|
||||
}
|
||||
|
||||
func (l *manager) Refresh(ctx context.Context, d time.Duration, token string) (lock.LockDetails, error) {
|
||||
return l.fs.Refresh(ctx, d, token)
|
||||
}
|
||||
|
||||
func (l *manager) Restore(ctx context.Context, path ...*fs.URI) error {
|
||||
return l.fs.Restore(ctx, path...)
|
||||
}
|
||||
|
||||
func (l *manager) CreateOrUpdateShare(ctx context.Context, path *fs.URI, args *CreateShareArgs) (*ent.Share, error) {
|
||||
file, err := l.fs.Get(ctx, path, dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityShare))
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeNotFound, "src file not found", err)
|
||||
}
|
||||
|
||||
// Only file owner can share file
|
||||
if file.OwnerID() != l.user.ID {
|
||||
return nil, serializer.NewError(serializer.CodeNoPermissionErr, "permission denied", nil)
|
||||
}
|
||||
|
||||
if file.IsSymbolic() {
|
||||
return nil, serializer.NewError(serializer.CodeNoPermissionErr, "cannot share symbolic file", nil)
|
||||
}
|
||||
|
||||
var existed *ent.Share
|
||||
shareClient := l.dep.ShareClient()
|
||||
if args.ExistedShareID != 0 {
|
||||
loadShareCtx := context.WithValue(ctx, inventory.LoadShareFile{}, true)
|
||||
existed, err = shareClient.GetByID(loadShareCtx, args.ExistedShareID)
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeNotFound, "failed to get existed share", err)
|
||||
}
|
||||
|
||||
if existed.Edges.File.ID != file.ID() {
|
||||
return nil, serializer.NewError(serializer.CodeNotFound, "share link not found", nil)
|
||||
}
|
||||
}
|
||||
|
||||
password := ""
|
||||
if args.IsPrivate {
|
||||
password = util.RandString(8, util.RandomLowerCases)
|
||||
}
|
||||
|
||||
share, err := shareClient.Upsert(ctx, &inventory.CreateShareParams{
|
||||
OwnerID: file.OwnerID(),
|
||||
FileID: file.ID(),
|
||||
Password: password,
|
||||
Expires: args.Expire,
|
||||
RemainDownloads: args.RemainDownloads,
|
||||
Existed: existed,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "failed to create share", err)
|
||||
}
|
||||
|
||||
return share, nil
|
||||
}
|
||||
|
||||
func getEntityDisplayName(f fs.File, e fs.Entity) string {
|
||||
switch e.Type() {
|
||||
case types.EntityTypeThumbnail:
|
||||
return fmt.Sprintf("%s_thumbnail", f.DisplayName())
|
||||
case types.EntityTypeLivePhoto:
|
||||
return fmt.Sprintf("%s_live_photo.mov", f.DisplayName())
|
||||
default:
|
||||
return f.Name()
|
||||
}
|
||||
}
|
||||
|
||||
func expireTimeToTTL(expireAt *time.Time) int {
|
||||
if expireAt == nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
return int(time.Until(*expireAt).Seconds())
|
||||
}
|
||||
374
pkg/filemanager/manager/recycle.go
Normal file
374
pkg/filemanager/manager/recycle.go
Normal file
@@ -0,0 +1,374 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/task"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/crontab"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type (
|
||||
ExplicitEntityRecycleTask struct {
|
||||
*queue.DBTask
|
||||
}
|
||||
|
||||
ExplicitEntityRecycleTaskState struct {
|
||||
EntityIDs []int `json:"entity_ids,omitempty"`
|
||||
Errors [][]RecycleError `json:"errors,omitempty"`
|
||||
}
|
||||
|
||||
RecycleError struct {
|
||||
ID string `json:"id"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
queue.RegisterResumableTaskFactory(queue.ExplicitEntityRecycleTaskType, NewExplicitEntityRecycleTaskFromModel)
|
||||
queue.RegisterResumableTaskFactory(queue.EntityRecycleRoutineTaskType, NewEntityRecycleRoutineTaskFromModel)
|
||||
crontab.Register(setting.CronTypeEntityCollect, func(ctx context.Context) {
|
||||
dep := dependency.FromContext(ctx)
|
||||
l := dep.Logger()
|
||||
t, err := NewEntityRecycleRoutineTask(ctx)
|
||||
if err != nil {
|
||||
l.Error("Failed to create entity recycle routine task: %s", err)
|
||||
}
|
||||
|
||||
if err := dep.EntityRecycleQueue(ctx).QueueTask(ctx, t); err != nil {
|
||||
l.Error("Failed to queue entity recycle routine task: %s", err)
|
||||
}
|
||||
})
|
||||
crontab.Register(setting.CronTypeTrashBinCollect, CronCollectTrashBin)
|
||||
}
|
||||
|
||||
func NewExplicitEntityRecycleTaskFromModel(task *ent.Task) queue.Task {
|
||||
return &ExplicitEntityRecycleTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: task,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newExplicitEntityRecycleTask(ctx context.Context, entities []int) (*ExplicitEntityRecycleTask, error) {
|
||||
state := &ExplicitEntityRecycleTaskState{
|
||||
EntityIDs: entities,
|
||||
Errors: make([][]RecycleError, 0),
|
||||
}
|
||||
stateBytes, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal state: %w", err)
|
||||
}
|
||||
|
||||
t := &ExplicitEntityRecycleTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: &ent.Task{
|
||||
Type: queue.ExplicitEntityRecycleTaskType,
|
||||
CorrelationID: logging.CorrelationID(ctx),
|
||||
PrivateState: string(stateBytes),
|
||||
PublicState: &types.TaskPublicState{
|
||||
ResumeTime: time.Now().Unix() - 1,
|
||||
},
|
||||
},
|
||||
DirectOwner: inventory.UserFromContext(ctx),
|
||||
},
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (m *ExplicitEntityRecycleTask) Do(ctx context.Context) (task.Status, error) {
|
||||
dep := dependency.FromContext(ctx)
|
||||
fm := NewFileManager(dep, inventory.UserFromContext(ctx)).(*manager)
|
||||
|
||||
// unmarshal state
|
||||
state := &ExplicitEntityRecycleTaskState{}
|
||||
if err := json.Unmarshal([]byte(m.State()), state); err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to unmarshal state: %w", err)
|
||||
}
|
||||
|
||||
// recycle entities
|
||||
err := fm.RecycleEntities(ctx, false, state.EntityIDs...)
|
||||
if err != nil {
|
||||
appendAe(&state.Errors, err)
|
||||
privateState, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to marshal state: %w", err)
|
||||
}
|
||||
m.Task.PrivateState = string(privateState)
|
||||
return task.StatusError, err
|
||||
}
|
||||
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
|
||||
type (
|
||||
EntityRecycleRoutineTask struct {
|
||||
*queue.DBTask
|
||||
}
|
||||
|
||||
EntityRecycleRoutineTaskState struct {
|
||||
Errors [][]RecycleError `json:"errors,omitempty"`
|
||||
}
|
||||
)
|
||||
|
||||
func NewEntityRecycleRoutineTaskFromModel(task *ent.Task) queue.Task {
|
||||
return &EntityRecycleRoutineTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: task,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewEntityRecycleRoutineTask(ctx context.Context) (queue.Task, error) {
|
||||
state := &EntityRecycleRoutineTaskState{
|
||||
Errors: make([][]RecycleError, 0),
|
||||
}
|
||||
stateBytes, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal state: %w", err)
|
||||
}
|
||||
|
||||
t := &EntityRecycleRoutineTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: &ent.Task{
|
||||
Type: queue.EntityRecycleRoutineTaskType,
|
||||
CorrelationID: logging.CorrelationID(ctx),
|
||||
PrivateState: string(stateBytes),
|
||||
PublicState: &types.TaskPublicState{
|
||||
ResumeTime: time.Now().Unix() - 1,
|
||||
},
|
||||
},
|
||||
DirectOwner: inventory.UserFromContext(ctx),
|
||||
},
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (m *EntityRecycleRoutineTask) Do(ctx context.Context) (task.Status, error) {
|
||||
dep := dependency.FromContext(ctx)
|
||||
fm := NewFileManager(dep, inventory.UserFromContext(ctx)).(*manager)
|
||||
|
||||
// unmarshal state
|
||||
state := &EntityRecycleRoutineTaskState{}
|
||||
if err := json.Unmarshal([]byte(m.State()), state); err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to unmarshal state: %w", err)
|
||||
}
|
||||
|
||||
// recycle entities
|
||||
err := fm.RecycleEntities(ctx, false)
|
||||
if err != nil {
|
||||
appendAe(&state.Errors, err)
|
||||
|
||||
privateState, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to marshal state: %w", err)
|
||||
}
|
||||
m.Task.PrivateState = string(privateState)
|
||||
return task.StatusError, err
|
||||
}
|
||||
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
|
||||
// RecycleEntities delete given entities. If the ID list is empty, it will walk through
|
||||
// all stale entities in DB.
|
||||
func (m *manager) RecycleEntities(ctx context.Context, force bool, entityIDs ...int) error {
|
||||
ae := serializer.NewAggregateError()
|
||||
entities, err := m.fs.StaleEntities(ctx, entityIDs...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get entities: %w", err)
|
||||
}
|
||||
|
||||
// Group entities by policy ID
|
||||
entityGroup := lo.GroupBy(entities, func(entity fs.Entity) int {
|
||||
return entity.PolicyID()
|
||||
})
|
||||
|
||||
// Delete entity in each group in batch
|
||||
for _, entities := range entityGroup {
|
||||
entityChunk := lo.Chunk(entities, 100)
|
||||
m.l.Info("Recycling %d entities in %d batches", len(entities), len(entityChunk))
|
||||
|
||||
for batch, chunk := range entityChunk {
|
||||
m.l.Info("Start to recycle batch #%d, %d entities", batch, len(chunk))
|
||||
mapSrcToId := make(map[string]int, len(chunk))
|
||||
_, d, err := m.getEntityPolicyDriver(ctx, chunk[0], nil)
|
||||
if err != nil {
|
||||
for _, entity := range chunk {
|
||||
ae.Add(strconv.Itoa(entity.ID()), err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
for _, entity := range chunk {
|
||||
mapSrcToId[entity.Source()] = entity.ID()
|
||||
}
|
||||
|
||||
res, err := d.Delete(ctx, lo.Map(chunk, func(entity fs.Entity, index int) string {
|
||||
return entity.Source()
|
||||
})...)
|
||||
if err != nil {
|
||||
for _, src := range res {
|
||||
ae.Add(strconv.Itoa(mapSrcToId[src]), err)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete upload session if it's still valid
|
||||
for _, entity := range chunk {
|
||||
sid := entity.UploadSessionID()
|
||||
if sid == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if session, ok := m.kv.Get(UploadSessionCachePrefix + sid.String()); ok {
|
||||
session := session.(fs.UploadSession)
|
||||
if err := d.CancelToken(ctx, &session); err != nil {
|
||||
m.l.Warning("Failed to cancel upload session for %q: %s, this is expected if it's remote policy.", session.Props.Uri.String(), err)
|
||||
}
|
||||
_ = m.kv.Delete(UploadSessionCachePrefix, sid.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Filtering out entities that are successfully deleted
|
||||
rawAe := ae.Raw()
|
||||
successEntities := lo.FilterMap(chunk, func(entity fs.Entity, index int) (int, bool) {
|
||||
entityIdStr := fmt.Sprintf("%d", entity.ID())
|
||||
_, ok := rawAe[entityIdStr]
|
||||
if !ok {
|
||||
// No error, deleted
|
||||
return entity.ID(), true
|
||||
}
|
||||
|
||||
if force {
|
||||
ae.Remove(entityIdStr)
|
||||
}
|
||||
return entity.ID(), force
|
||||
})
|
||||
|
||||
// Remove entities from DB
|
||||
fc, tx, ctx, err := inventory.WithTx(ctx, m.dep.FileClient())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start transaction: %w", err)
|
||||
}
|
||||
storageReduced, err := fc.RemoveEntitiesByID(ctx, successEntities...)
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
return fmt.Errorf("failed to remove entities from DB: %w", err)
|
||||
}
|
||||
|
||||
tx.AppendStorageDiff(storageReduced)
|
||||
if err := inventory.CommitWithStorageDiff(ctx, tx, m.l, m.dep.UserClient()); err != nil {
|
||||
return fmt.Errorf("failed to commit delete change: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return ae.Aggregate()
|
||||
}
|
||||
|
||||
const (
|
||||
MinimumTrashCollectBatch = 1000
|
||||
)
|
||||
|
||||
// CronCollectTrashBin walks through all files in trash bin and delete them if they are expired.
|
||||
func CronCollectTrashBin(ctx context.Context) {
|
||||
dep := dependency.FromContext(ctx)
|
||||
l := dep.Logger()
|
||||
fm := NewFileManager(dep, inventory.UserFromContext(ctx)).(*manager)
|
||||
pageSize := dep.SettingProvider().DBFS(ctx).MaxPageSize
|
||||
batch := 0
|
||||
expiredFiles := make([]fs.File, 0)
|
||||
for {
|
||||
res, err := fm.fs.AllFilesInTrashBin(ctx, fs.WithPageSize(pageSize))
|
||||
if err != nil {
|
||||
l.Error("Failed to get files in trash bin: %s", err)
|
||||
}
|
||||
|
||||
expired := lo.Filter(res.Files, func(file fs.File, index int) bool {
|
||||
if expire, ok := file.Metadata()[dbfs.MetadataExpectedCollectTime]; ok {
|
||||
expireUnix, err := strconv.ParseInt(expire, 10, 64)
|
||||
if err != nil {
|
||||
l.Warning("Failed to parse expected collect time %q: %s, will treat as expired", expire, err)
|
||||
}
|
||||
|
||||
if expireUnix < time.Now().Unix() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
l.Info("Found %d files in trash bin pending collect, in batch #%d", len(res.Files), batch)
|
||||
|
||||
expiredFiles = append(expiredFiles, expired...)
|
||||
if len(expiredFiles) >= MinimumTrashCollectBatch {
|
||||
collectTrashBin(ctx, expiredFiles, dep, l)
|
||||
expiredFiles = expiredFiles[:0]
|
||||
}
|
||||
|
||||
if res.Pagination.NextPageToken == "" {
|
||||
if len(expiredFiles) > 0 {
|
||||
collectTrashBin(ctx, expiredFiles, dep, l)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
batch++
|
||||
}
|
||||
}
|
||||
|
||||
func collectTrashBin(ctx context.Context, files []fs.File, dep dependency.Dep, l logging.Logger) {
|
||||
l.Info("Start to collect %d files in trash bin", len(files))
|
||||
uc := dep.UserClient()
|
||||
|
||||
// Group files by Owners
|
||||
fileGroup := lo.GroupBy(files, func(file fs.File) int {
|
||||
return file.OwnerID()
|
||||
})
|
||||
|
||||
for uid, expiredFiles := range fileGroup {
|
||||
ctx = context.WithValue(ctx, inventory.LoadUserGroup{}, true)
|
||||
user, err := uc.GetByID(ctx, uid)
|
||||
if err != nil {
|
||||
l.Error("Failed to get user %d: %s", uid, err)
|
||||
continue
|
||||
}
|
||||
|
||||
ctx = context.WithValue(ctx, inventory.UserCtx{}, user)
|
||||
fm := NewFileManager(dep, user).(*manager)
|
||||
if err := fm.Delete(ctx, lo.Map(expiredFiles, func(file fs.File, index int) *fs.URI {
|
||||
return file.Uri(false)
|
||||
}), fs.WithSkipSoftDelete(true)); err != nil {
|
||||
l.Error("Failed to delete files for user %d: %s", uid, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func appendAe(errs *[][]RecycleError, err error) {
|
||||
var ae *serializer.AggregateError
|
||||
*errs = append(*errs, make([]RecycleError, 0))
|
||||
if errors.As(err, &ae) {
|
||||
(*errs)[len(*errs)-1] = lo.MapToSlice(ae.Raw(), func(key string, value error) RecycleError {
|
||||
return RecycleError{
|
||||
ID: key,
|
||||
Error: value.Error(),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
294
pkg/filemanager/manager/thumbnail.go
Normal file
294
pkg/filemanager/manager/thumbnail.go
Normal file
@@ -0,0 +1,294 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/task"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/local"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
// Thumbnail returns the thumbnail entity of the file.
|
||||
func (m *manager) Thumbnail(ctx context.Context, uri *fs.URI) (entitysource.EntitySource, error) {
|
||||
// retrieve file info
|
||||
file, err := m.fs.Get(ctx, uri, dbfs.WithFileEntities(), dbfs.WithFilePublicMetadata())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get file: %w", err)
|
||||
}
|
||||
|
||||
// 0. Check if thumb is disabled in this file.
|
||||
if _, ok := file.Metadata()[dbfs.ThumbDisabledKey]; ok || file.Type() != types.FileTypeFile {
|
||||
return nil, fs.ErrEntityNotExist
|
||||
}
|
||||
|
||||
// 1. If thumbnail entity exist, use it.
|
||||
entities := file.Entities()
|
||||
thumbEntity, found := lo.Find(entities, func(e fs.Entity) bool {
|
||||
return e.Type() == types.EntityTypeThumbnail
|
||||
})
|
||||
if found {
|
||||
thumbSource, err := m.GetEntitySource(ctx, 0, fs.WithEntity(thumbEntity))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get entity source: %w", err)
|
||||
}
|
||||
|
||||
thumbSource.Apply(entitysource.WithDisplayName(file.DisplayName() + ".jpg"))
|
||||
return thumbSource, nil
|
||||
}
|
||||
|
||||
latest := file.PrimaryEntity()
|
||||
// If primary entity not exist, or it's empty
|
||||
if latest == nil || latest.ID() == 0 {
|
||||
return nil, fmt.Errorf("failed to get latest version")
|
||||
}
|
||||
|
||||
// 2. Thumb entity not exist, try native policy generator
|
||||
_, handler, err := m.getEntityPolicyDriver(ctx, latest, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get entity policy driver: %w", err)
|
||||
}
|
||||
capabilities := handler.Capabilities()
|
||||
// Check if file extension and size is supported by native policy generator.
|
||||
if capabilities.ThumbSupportAllExts || util.IsInExtensionList(capabilities.ThumbSupportedExts, file.DisplayName()) &&
|
||||
(capabilities.ThumbMaxSize == 0 || latest.Size() <= capabilities.ThumbMaxSize) {
|
||||
thumbSource, err := m.GetEntitySource(ctx, 0, fs.WithEntity(latest), fs.WithUseThumb(true))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get latest entity source: %w", err)
|
||||
}
|
||||
|
||||
thumbSource.Apply(entitysource.WithDisplayName(file.DisplayName()))
|
||||
return thumbSource, nil
|
||||
} else if capabilities.ThumbProxy {
|
||||
if err := m.fs.CheckCapability(ctx, uri,
|
||||
dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityGenerateThumb)); err != nil {
|
||||
// Current FS does not support generate new thumb.
|
||||
return nil, fs.ErrEntityNotExist
|
||||
}
|
||||
|
||||
thumbEntity, err := m.SubmitAndAwaitThumbnailTask(ctx, uri, file.Ext(), latest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute thumb task: %w", err)
|
||||
}
|
||||
|
||||
thumbSource, err := m.GetEntitySource(ctx, 0, fs.WithEntity(thumbEntity))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get entity source: %w", err)
|
||||
}
|
||||
|
||||
return thumbSource, nil
|
||||
} else {
|
||||
// 4. If proxy generator not support, mark thumb as not available.
|
||||
_ = disableThumb(ctx, m, uri)
|
||||
}
|
||||
|
||||
return nil, fs.ErrEntityNotExist
|
||||
}
|
||||
|
||||
func (m *manager) SubmitAndAwaitThumbnailTask(ctx context.Context, uri *fs.URI, ext string, entity fs.Entity) (fs.Entity, error) {
|
||||
es, err := m.GetEntitySource(ctx, 0, fs.WithEntity(entity))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get entity source: %w", err)
|
||||
}
|
||||
|
||||
defer es.Close()
|
||||
t := newGenerateThumbTask(ctx, m, uri, ext, es)
|
||||
if err := m.dep.ThumbQueue(ctx).QueueTask(ctx, t); err != nil {
|
||||
return nil, fmt.Errorf("failed to queue task: %w", err)
|
||||
}
|
||||
|
||||
// Wait for task to finish
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case res := <-t.sig:
|
||||
if res.err != nil {
|
||||
return nil, fmt.Errorf("failed to generate thumb: %w", res.err)
|
||||
}
|
||||
|
||||
return res.thumbEntity, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (m *manager) generateThumb(ctx context.Context, uri *fs.URI, ext string, es entitysource.EntitySource) (fs.Entity, error) {
|
||||
// Generate thumb
|
||||
pipeline := m.dep.ThumbPipeline()
|
||||
res, err := pipeline.Generate(ctx, es, ext, nil)
|
||||
if err != nil {
|
||||
if res != nil && res.Path != "" {
|
||||
_ = os.Remove(res.Path)
|
||||
}
|
||||
|
||||
if !errors.Is(err, context.Canceled) && !m.stateless {
|
||||
if err := disableThumb(ctx, m, uri); err != nil {
|
||||
m.l.Warning("Failed to disable thumb: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to generate thumb: %w", err)
|
||||
}
|
||||
|
||||
defer os.Remove(res.Path)
|
||||
|
||||
// Upload thumb entity
|
||||
thumbFile, err := os.Open(res.Path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open temp thumb %q: %w", res.Path, err)
|
||||
}
|
||||
|
||||
defer thumbFile.Close()
|
||||
fileInfo, err := thumbFile.Stat()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to stat temp thumb %q: %w", res.Path, err)
|
||||
}
|
||||
|
||||
var (
|
||||
thumbEntity fs.Entity
|
||||
)
|
||||
if m.stateless {
|
||||
_, d, err := m.getEntityPolicyDriver(ctx, es.Entity(), nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get storage driver: %w", err)
|
||||
}
|
||||
|
||||
savePath := es.Entity().Source() + m.settings.ThumbSlaveSidecarSuffix(ctx)
|
||||
if err := d.Put(ctx, &fs.UploadRequest{
|
||||
File: thumbFile,
|
||||
Seeker: thumbFile,
|
||||
Props: &fs.UploadProps{SavePath: savePath},
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("failed to save thumb sidecar: %w", err)
|
||||
}
|
||||
|
||||
thumbEntity, err = local.NewLocalFileEntity(types.EntityTypeThumbnail, savePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create local thumb entity: %w", err)
|
||||
}
|
||||
} else {
|
||||
entityType := types.EntityTypeThumbnail
|
||||
req := &fs.UploadRequest{
|
||||
Props: &fs.UploadProps{
|
||||
Uri: uri,
|
||||
Size: fileInfo.Size(),
|
||||
SavePath: fmt.Sprintf(
|
||||
"%s.%s%s",
|
||||
es.Entity().Source(),
|
||||
util.RandStringRunes(16),
|
||||
m.settings.ThumbEntitySuffix(ctx),
|
||||
),
|
||||
MimeType: m.dep.MimeDetector(ctx).TypeByName("thumb.jpg"),
|
||||
EntityType: &entityType,
|
||||
},
|
||||
File: thumbFile,
|
||||
Seeker: thumbFile,
|
||||
}
|
||||
|
||||
// Generating thumb can be triggered by users with read-only permission. We can bypass update permission check.
|
||||
ctx = dbfs.WithBypassOwnerCheck(ctx)
|
||||
|
||||
file, err := m.Update(ctx, req, fs.WithEntityType(types.EntityTypeThumbnail))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upload thumb entity: %w", err)
|
||||
}
|
||||
|
||||
entities := file.Entities()
|
||||
found := false
|
||||
thumbEntity, found = lo.Find(entities, func(e fs.Entity) bool {
|
||||
return e.Type() == types.EntityTypeThumbnail
|
||||
})
|
||||
if !found {
|
||||
return nil, fmt.Errorf("failed to find thumb entity")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if m.settings.ThumbGCAfterGen(ctx) {
|
||||
m.l.Debug("GC after thumb generation")
|
||||
runtime.GC()
|
||||
}
|
||||
|
||||
return thumbEntity, nil
|
||||
}
|
||||
|
||||
type (
|
||||
GenerateThumbTask struct {
|
||||
*queue.InMemoryTask
|
||||
es entitysource.EntitySource
|
||||
ext string
|
||||
m *manager
|
||||
uri *fs.URI
|
||||
sig chan *generateRes
|
||||
}
|
||||
generateRes struct {
|
||||
thumbEntity fs.Entity
|
||||
err error
|
||||
}
|
||||
)
|
||||
|
||||
func newGenerateThumbTask(ctx context.Context, m *manager, uri *fs.URI, ext string, es entitysource.EntitySource) *GenerateThumbTask {
|
||||
t := &GenerateThumbTask{
|
||||
InMemoryTask: &queue.InMemoryTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: &ent.Task{
|
||||
CorrelationID: logging.CorrelationID(ctx),
|
||||
PublicState: &types.TaskPublicState{},
|
||||
},
|
||||
},
|
||||
},
|
||||
es: es,
|
||||
ext: ext,
|
||||
m: m,
|
||||
uri: uri,
|
||||
sig: make(chan *generateRes, 2),
|
||||
}
|
||||
|
||||
t.InMemoryTask.DBTask.Task.SetUser(m.user)
|
||||
return t
|
||||
}
|
||||
|
||||
func (m *GenerateThumbTask) Do(ctx context.Context) (task.Status, error) {
|
||||
var (
|
||||
res fs.Entity
|
||||
err error
|
||||
)
|
||||
defer func() { m.sig <- &generateRes{res, err} }()
|
||||
|
||||
// Make sure user does not cancel request before we start generating thumb.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
return task.StatusError, err
|
||||
default:
|
||||
}
|
||||
|
||||
res, err = m.m.generateThumb(ctx, m.uri, m.ext, m.es)
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
|
||||
func (m *GenerateThumbTask) OnError(err error, d time.Duration) {
|
||||
m.InMemoryTask.OnError(err, d)
|
||||
m.sig <- &generateRes{nil, err}
|
||||
}
|
||||
|
||||
func disableThumb(ctx context.Context, m *manager, uri *fs.URI) error {
|
||||
return m.fs.PatchMetadata(
|
||||
dbfs.WithBypassOwnerCheck(ctx),
|
||||
[]*fs.URI{uri}, fs.MetadataPatch{
|
||||
Key: dbfs.ThumbDisabledKey,
|
||||
Value: "",
|
||||
Private: false,
|
||||
})
|
||||
}
|
||||
500
pkg/filemanager/manager/upload.go
Normal file
500
pkg/filemanager/manager/upload.go
Normal file
@@ -0,0 +1,500 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/task"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/gofrs/uuid"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type (
|
||||
UploadManagement interface {
|
||||
// CreateUploadSession creates a upload session for given upload request
|
||||
CreateUploadSession(ctx context.Context, req *fs.UploadRequest, opts ...fs.Option) (*fs.UploadCredential, error)
|
||||
// ConfirmUploadSession confirms whether upload session is valid for upload.
|
||||
ConfirmUploadSession(ctx context.Context, session *fs.UploadSession, chunkIndex int) (fs.File, error)
|
||||
// Upload uploads file data to storage
|
||||
Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy) error
|
||||
// CompleteUpload completes upload session and returns file object
|
||||
CompleteUpload(ctx context.Context, session *fs.UploadSession) (fs.File, error)
|
||||
// CancelUploadSession cancels upload session
|
||||
CancelUploadSession(ctx context.Context, path *fs.URI, sessionID string) error
|
||||
// OnUploadFailed should be called when an unmanaged upload failed before complete.
|
||||
OnUploadFailed(ctx context.Context, session *fs.UploadSession)
|
||||
// Similar to CompleteUpload, but does not create actual uplaod session in storage.
|
||||
PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ...fs.Option) (*fs.UploadSession, error)
|
||||
}
|
||||
)
|
||||
|
||||
func (m *manager) CreateUploadSession(ctx context.Context, req *fs.UploadRequest, opts ...fs.Option) (*fs.UploadCredential, error) {
|
||||
o := newOption()
|
||||
for _, opt := range opts {
|
||||
opt.Apply(o)
|
||||
}
|
||||
|
||||
// Validate metadata
|
||||
if req.Props.Metadata != nil {
|
||||
if err := m.validateMetadata(ctx, lo.MapToSlice(req.Props.Metadata, func(key string, value string) fs.MetadataPatch {
|
||||
return fs.MetadataPatch{
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
})...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
uploadSession := o.UploadSession
|
||||
var (
|
||||
err error
|
||||
)
|
||||
|
||||
if uploadSession == nil {
|
||||
// If upload session not specified, invoke DBFS to create one
|
||||
sessionID := uuid.Must(uuid.NewV4()).String()
|
||||
req.Props.UploadSessionID = sessionID
|
||||
ttl := m.settings.UploadSessionTTL(ctx)
|
||||
req.Props.ExpireAt = time.Now().Add(ttl)
|
||||
|
||||
// Prepare for upload
|
||||
uploadSession, err = m.fs.PrepareUpload(ctx, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("faield to prepare uplaod: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
d, err := m.GetStorageDriver(ctx, m.CastStoragePolicyOnSlave(ctx, uploadSession.Policy))
|
||||
if err != nil {
|
||||
m.OnUploadFailed(ctx, uploadSession)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uploadSession.ChunkSize = uploadSession.Policy.Settings.ChunkSize
|
||||
// Create upload credential for underlying storage driver
|
||||
credential := &fs.UploadCredential{}
|
||||
if !uploadSession.Policy.Settings.Relay || m.stateless {
|
||||
credential, err = d.Token(ctx, uploadSession, req)
|
||||
if err != nil {
|
||||
m.OnUploadFailed(ctx, uploadSession)
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// For relayed upload, we don't need to create credential
|
||||
uploadSession.ChunkSize = 0
|
||||
credential.ChunkSize = 0
|
||||
}
|
||||
credential.SessionID = uploadSession.Props.UploadSessionID
|
||||
credential.Expires = req.Props.ExpireAt.Unix()
|
||||
credential.StoragePolicy = uploadSession.Policy
|
||||
credential.CallbackSecret = uploadSession.CallbackSecret
|
||||
credential.Uri = uploadSession.Props.Uri.String()
|
||||
|
||||
// If upload sentinel check is required, queue a check task
|
||||
if d.Capabilities().StaticFeatures.Enabled(int(driver.HandlerCapabilityUploadSentinelRequired)) {
|
||||
t, err := newUploadSentinelCheckTask(ctx, uploadSession)
|
||||
if err != nil {
|
||||
m.OnUploadFailed(ctx, uploadSession)
|
||||
return nil, fmt.Errorf("failed to create upload sentinel check task: %w", err)
|
||||
}
|
||||
|
||||
if err := m.dep.EntityRecycleQueue(ctx).QueueTask(ctx, t); err != nil {
|
||||
m.OnUploadFailed(ctx, uploadSession)
|
||||
return nil, fmt.Errorf("failed to queue upload sentinel check task: %w", err)
|
||||
}
|
||||
|
||||
uploadSession.SentinelTaskID = t.ID()
|
||||
}
|
||||
|
||||
err = m.kv.Set(
|
||||
UploadSessionCachePrefix+req.Props.UploadSessionID,
|
||||
*uploadSession,
|
||||
max(1, int(req.Props.ExpireAt.Sub(time.Now()).Seconds())),
|
||||
)
|
||||
if err != nil {
|
||||
m.OnUploadFailed(ctx, uploadSession)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return credential, nil
|
||||
}
|
||||
|
||||
func (m *manager) ConfirmUploadSession(ctx context.Context, session *fs.UploadSession, chunkIndex int) (fs.File, error) {
|
||||
// Get placeholder file
|
||||
file, err := m.fs.Get(ctx, session.Props.Uri)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get placeholder file: %w", err)
|
||||
}
|
||||
|
||||
// Confirm locks on placeholder file
|
||||
if session.LockToken == "" {
|
||||
release, ls, err := m.fs.ConfirmLock(ctx, file, file.Uri(false), session.LockToken)
|
||||
if err != nil {
|
||||
return nil, fs.ErrLockExpired.WithError(err)
|
||||
}
|
||||
|
||||
defer release()
|
||||
ctx = fs.LockSessionToContext(ctx, ls)
|
||||
}
|
||||
|
||||
// Make sure this storage policy is OK to receive data from clients to Cloudreve server.
|
||||
if session.Policy.Type != types.PolicyTypeLocal && !session.Policy.Settings.Relay {
|
||||
return nil, serializer.NewError(serializer.CodePolicyNotAllowed, "", nil)
|
||||
}
|
||||
|
||||
actualSizeStart := int64(chunkIndex) * session.ChunkSize
|
||||
if session.Policy.Settings.ChunkSize == 0 && chunkIndex > 0 {
|
||||
return nil, serializer.NewError(serializer.CodeInvalidChunkIndex, "Chunk index cannot be greater than 0", nil)
|
||||
}
|
||||
|
||||
if actualSizeStart > 0 && actualSizeStart >= session.Props.Size {
|
||||
return nil, serializer.NewError(serializer.CodeInvalidChunkIndex, "Chunk offset cannot be greater than file size", nil)
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (m *manager) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ...fs.Option) (*fs.UploadSession, error) {
|
||||
return m.fs.PrepareUpload(ctx, req, opts...)
|
||||
}
|
||||
|
||||
func (m *manager) Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy) error {
|
||||
d, err := m.GetStorageDriver(ctx, m.CastStoragePolicyOnSlave(ctx, policy))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := d.Put(ctx, req); err != nil {
|
||||
return serializer.NewError(serializer.CodeIOFailed, "Failed to upload file", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *manager) CancelUploadSession(ctx context.Context, path *fs.URI, sessionID string) error {
|
||||
// Get upload session
|
||||
var session *fs.UploadSession
|
||||
sessionRaw, ok := m.kv.Get(UploadSessionCachePrefix + sessionID)
|
||||
if ok {
|
||||
sessionTyped := sessionRaw.(fs.UploadSession)
|
||||
session = &sessionTyped
|
||||
}
|
||||
|
||||
var (
|
||||
staleEntities []fs.Entity
|
||||
err error
|
||||
)
|
||||
|
||||
if !m.stateless {
|
||||
staleEntities, err = m.fs.CancelUploadSession(ctx, path, sessionID, session)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.l.Debug("New stale entities: %v", staleEntities)
|
||||
}
|
||||
|
||||
if session != nil {
|
||||
ctx = context.WithValue(ctx, cluster.SlaveNodeIDCtx{}, strconv.Itoa(session.Policy.NodeID))
|
||||
d, err := m.GetStorageDriver(ctx, m.CastStoragePolicyOnSlave(ctx, session.Policy))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get storage driver: %w", err)
|
||||
}
|
||||
|
||||
if m.stateless {
|
||||
if _, err = d.Delete(ctx, session.Props.SavePath); err != nil {
|
||||
return fmt.Errorf("failed to delete file: %w", err)
|
||||
}
|
||||
} else {
|
||||
if err = d.CancelToken(ctx, session); err != nil {
|
||||
return fmt.Errorf("failed to cancel upload session: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
m.kv.Delete(UploadSessionCachePrefix, session.Props.UploadSessionID)
|
||||
}
|
||||
|
||||
// Delete stale entities
|
||||
if len(staleEntities) > 0 {
|
||||
t, err := newExplicitEntityRecycleTask(ctx, lo.Map(staleEntities, func(entity fs.Entity, index int) int {
|
||||
return entity.ID()
|
||||
}))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create explicit entity recycle task: %w", err)
|
||||
}
|
||||
|
||||
if err := m.dep.EntityRecycleQueue(ctx).QueueTask(ctx, t); err != nil {
|
||||
return fmt.Errorf("failed to queue explicit entity recycle task: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *manager) CompleteUpload(ctx context.Context, session *fs.UploadSession) (fs.File, error) {
|
||||
d, err := m.GetStorageDriver(ctx, m.CastStoragePolicyOnSlave(ctx, session.Policy))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := d.CompleteUpload(ctx, session); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
file fs.File
|
||||
)
|
||||
if m.fs != nil {
|
||||
file, err = m.fs.CompleteUpload(ctx, session)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to complete upload: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if session.SentinelTaskID > 0 {
|
||||
// Cancel sentinel check task
|
||||
m.l.Debug("Cancel upload sentinel check task [%d].", session.SentinelTaskID)
|
||||
if err := m.dep.TaskClient().SetCompleteByID(ctx, session.SentinelTaskID); err != nil {
|
||||
m.l.Warning("Failed to set upload sentinel check task [%d] to complete: %s", session.SentinelTaskID, err)
|
||||
}
|
||||
}
|
||||
|
||||
m.onNewEntityUploaded(ctx, session, d)
|
||||
// Remove upload session
|
||||
_ = m.kv.Delete(UploadSessionCachePrefix, session.Props.UploadSessionID)
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (m *manager) Update(ctx context.Context, req *fs.UploadRequest, opts ...fs.Option) (fs.File, error) {
|
||||
o := newOption()
|
||||
for _, opt := range opts {
|
||||
opt.Apply(o)
|
||||
}
|
||||
entityType := types.EntityTypeVersion
|
||||
if o.EntityType != nil {
|
||||
entityType = *o.EntityType
|
||||
}
|
||||
|
||||
req.Props.EntityType = &entityType
|
||||
if o.EntityTypeNil {
|
||||
req.Props.EntityType = nil
|
||||
}
|
||||
|
||||
req.Props.UploadSessionID = uuid.Must(uuid.NewV4()).String()
|
||||
|
||||
if m.stateless {
|
||||
return m.updateStateless(ctx, req, o)
|
||||
}
|
||||
|
||||
// Prepare for upload
|
||||
uploadSession, err := m.fs.PrepareUpload(ctx, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("faield to prepare uplaod: %w", err)
|
||||
}
|
||||
|
||||
if err := m.Upload(ctx, req, uploadSession.Policy); err != nil {
|
||||
m.OnUploadFailed(ctx, uploadSession)
|
||||
return nil, fmt.Errorf("failed to upload new entity: %w", err)
|
||||
}
|
||||
|
||||
file, err := m.CompleteUpload(ctx, uploadSession)
|
||||
if err != nil {
|
||||
m.OnUploadFailed(ctx, uploadSession)
|
||||
return nil, fmt.Errorf("failed to complete update: %w", err)
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (m *manager) OnUploadFailed(ctx context.Context, session *fs.UploadSession) {
|
||||
ctx = context.WithoutCancel(ctx)
|
||||
if !m.stateless {
|
||||
if session.LockToken != "" {
|
||||
if err := m.Unlock(ctx, session.LockToken); err != nil {
|
||||
m.l.Warning("OnUploadFailed hook failed to unlock: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if session.NewFileCreated {
|
||||
if err := m.Delete(ctx, []*fs.URI{session.Props.Uri}, fs.WithSysSkipSoftDelete(true)); err != nil {
|
||||
m.l.Warning("OnUploadFailed hook failed to delete file: %s", err)
|
||||
}
|
||||
} else {
|
||||
if err := m.fs.VersionControl(ctx, session.Props.Uri, session.EntityID, true); err != nil {
|
||||
m.l.Warning("OnUploadFailed hook failed to version control: %s", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
d, err := m.GetStorageDriver(ctx, m.CastStoragePolicyOnSlave(ctx, session.Policy))
|
||||
if err != nil {
|
||||
m.l.Warning("OnUploadFailed hook failed: %s", err)
|
||||
}
|
||||
|
||||
if failed, err := d.Delete(ctx, session.Props.SavePath); err != nil {
|
||||
m.l.Warning("OnUploadFailed hook failed to remove uploaded file: %s, failed file: %v", err, failed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// similar to Update, but expected to be executed on slave node.
|
||||
func (m *manager) updateStateless(ctx context.Context, req *fs.UploadRequest, o *fs.FsOption) (fs.File, error) {
|
||||
// Prepare for upload
|
||||
res, err := o.Node.PrepareUpload(ctx, &fs.StatelessPrepareUploadService{
|
||||
UploadRequest: req,
|
||||
UserID: o.StatelessUserID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("faield to prepare uplaod: %w", err)
|
||||
}
|
||||
|
||||
req.Props = res.Req.Props
|
||||
if err := m.Upload(ctx, req, res.Session.Policy); err != nil {
|
||||
if err := o.Node.OnUploadFailed(ctx, &fs.StatelessOnUploadFailedService{
|
||||
UploadSession: res.Session,
|
||||
UserID: o.StatelessUserID,
|
||||
}); err != nil {
|
||||
m.l.Warning("Failed to call stateless OnUploadFailed: %s", err)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to upload new entity: %w", err)
|
||||
}
|
||||
|
||||
err = o.Node.CompleteUpload(ctx, &fs.StatelessCompleteUploadService{
|
||||
UploadSession: res.Session,
|
||||
UserID: o.StatelessUserID,
|
||||
})
|
||||
if err != nil {
|
||||
if err := o.Node.OnUploadFailed(ctx, &fs.StatelessOnUploadFailedService{
|
||||
UploadSession: res.Session,
|
||||
UserID: o.StatelessUserID,
|
||||
}); err != nil {
|
||||
m.l.Warning("Failed to call stateless OnUploadFailed: %s", err)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to complete update: %w", err)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *manager) onNewEntityUploaded(ctx context.Context, session *fs.UploadSession, d driver.Handler) {
|
||||
if !m.stateless {
|
||||
// Submit media meta task for new entity
|
||||
m.mediaMetaForNewEntity(ctx, session, d)
|
||||
}
|
||||
}
|
||||
|
||||
// Upload sentinel check task is used for compliant storage policy (COS, S3...), it will delete the marked entity.
|
||||
// It is expected to be queued after upload session is created, and canceled after upload callback is completed.
|
||||
// If this task is executed, it means the upload callback does not complete in time.
|
||||
type (
|
||||
UploadSentinelCheckTask struct {
|
||||
*queue.DBTask
|
||||
}
|
||||
UploadSentinelCheckTaskState struct {
|
||||
Session *fs.UploadSession `json:"session"`
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
uploadSentinelCheckMargin = 5 * time.Minute
|
||||
)
|
||||
|
||||
func init() {
|
||||
queue.RegisterResumableTaskFactory(queue.UploadSentinelCheckTaskType, NewUploadSentinelCheckTaskFromModel)
|
||||
}
|
||||
|
||||
func NewUploadSentinelCheckTaskFromModel(task *ent.Task) queue.Task {
|
||||
return &UploadSentinelCheckTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: task,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newUploadSentinelCheckTask(ctx context.Context, uploadSession *fs.UploadSession) (*ExplicitEntityRecycleTask, error) {
|
||||
state := &UploadSentinelCheckTaskState{
|
||||
Session: uploadSession,
|
||||
}
|
||||
stateBytes, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal state: %w", err)
|
||||
}
|
||||
|
||||
resumeAfter := uploadSession.Props.ExpireAt.Add(uploadSentinelCheckMargin)
|
||||
t := &ExplicitEntityRecycleTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: &ent.Task{
|
||||
Type: queue.UploadSentinelCheckTaskType,
|
||||
CorrelationID: logging.CorrelationID(ctx),
|
||||
PrivateState: string(stateBytes),
|
||||
PublicState: &types.TaskPublicState{
|
||||
ResumeTime: resumeAfter.Unix(),
|
||||
},
|
||||
},
|
||||
DirectOwner: inventory.UserFromContext(ctx),
|
||||
},
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (m *UploadSentinelCheckTask) Do(ctx context.Context) (task.Status, error) {
|
||||
dep := dependency.FromContext(ctx)
|
||||
taskClient := dep.TaskClient()
|
||||
l := dep.Logger()
|
||||
fm := NewFileManager(dep, inventory.UserFromContext(ctx)).(*manager)
|
||||
|
||||
// Check if sentinel is canceled due to callback complete
|
||||
t, err := taskClient.GetTaskByID(ctx, m.ID())
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to get task by ID: %w", err)
|
||||
}
|
||||
|
||||
if t.Status == task.StatusCompleted {
|
||||
l.Info("Upload sentinel check task [%d] is canceled due to callback complete.", m.ID())
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
|
||||
// unmarshal state
|
||||
state := &UploadSentinelCheckTaskState{}
|
||||
if err := json.Unmarshal([]byte(m.State()), state); err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to unmarshal state: %w", err)
|
||||
}
|
||||
|
||||
l.Info("Upload sentinel check triggered, clean up stale place holder entity [%d].", state.Session.EntityID)
|
||||
entity, err := fm.fs.GetEntity(ctx, state.Session.EntityID)
|
||||
if err != nil {
|
||||
l.Debug("Failed to get entity [%d]: %s, skip sentinel check.", state.Session.EntityID, err)
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
|
||||
_, d, err := fm.getEntityPolicyDriver(ctx, entity, nil)
|
||||
if err != nil {
|
||||
l.Debug("Failed to get storage driver for entity [%d]: %s", state.Session.EntityID, err)
|
||||
return task.StatusError, err
|
||||
}
|
||||
|
||||
_, err = d.Delete(ctx, entity.Source())
|
||||
if err != nil {
|
||||
l.Debug("Failed to delete entity source [%d]: %s", state.Session.EntityID, err)
|
||||
return task.StatusError, err
|
||||
}
|
||||
|
||||
if err := d.CancelToken(ctx, state.Session); err != nil {
|
||||
l.Debug("Failed to cancel token [%d]: %s", state.Session.EntityID, err)
|
||||
}
|
||||
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
93
pkg/filemanager/manager/viewer.go
Normal file
93
pkg/filemanager/manager/viewer.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/gofrs/uuid"
|
||||
)
|
||||
|
||||
type (
|
||||
ViewerSession struct {
|
||||
ID string `json:"id"`
|
||||
AccessToken string `json:"access_token"`
|
||||
Expires int64 `json:"expires"`
|
||||
File fs.File `json:"-"`
|
||||
}
|
||||
ViewerSessionCache struct {
|
||||
ID string
|
||||
Uri string
|
||||
UserID int
|
||||
FileID int
|
||||
ViewerID string
|
||||
Version string
|
||||
Token string
|
||||
}
|
||||
ViewerSessionCacheCtx struct{}
|
||||
ViewerCtx struct{}
|
||||
)
|
||||
|
||||
const (
|
||||
ViewerSessionCachePrefix = "viewer_session_"
|
||||
|
||||
sessionExpiresPadding = 10
|
||||
)
|
||||
|
||||
func init() {
|
||||
gob.Register(ViewerSessionCache{})
|
||||
}
|
||||
|
||||
func (m *manager) CreateViewerSession(ctx context.Context, uri *fs.URI, version string, viewer *setting.Viewer) (*ViewerSession, error) {
|
||||
file, err := m.fs.Get(ctx, uri, dbfs.WithFileEntities())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
versionType := types.EntityTypeVersion
|
||||
found, desired := fs.FindDesiredEntity(file, version, m.hasher, &versionType)
|
||||
if !found {
|
||||
return nil, fs.ErrEntityNotExist
|
||||
}
|
||||
|
||||
if desired.Size() > m.settings.MaxOnlineEditSize(ctx) {
|
||||
return nil, fs.ErrFileSizeTooBig
|
||||
}
|
||||
|
||||
sessionID := uuid.Must(uuid.NewV4()).String()
|
||||
token := util.RandStringRunes(128)
|
||||
sessionCache := &ViewerSessionCache{
|
||||
ID: sessionID,
|
||||
Uri: file.Uri(false).String(),
|
||||
UserID: m.user.ID,
|
||||
ViewerID: viewer.ID,
|
||||
FileID: file.ID(),
|
||||
Version: version,
|
||||
Token: fmt.Sprintf("%s.%s", sessionID, token),
|
||||
}
|
||||
ttl := m.settings.ViewerSessionTTL(ctx)
|
||||
if err := m.kv.Set(ViewerSessionCachePrefix+sessionID, *sessionCache, ttl); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ViewerSession{
|
||||
File: file,
|
||||
ID: sessionID,
|
||||
AccessToken: sessionCache.Token,
|
||||
Expires: time.Now().Add(time.Duration(ttl-sessionExpiresPadding) * time.Second).UnixMilli(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ViewerSessionFromContext(ctx context.Context) *ViewerSessionCache {
|
||||
return ctx.Value(ViewerSessionCacheCtx{}).(*ViewerSessionCache)
|
||||
}
|
||||
|
||||
func ViewerFromContext(ctx context.Context) *setting.Viewer {
|
||||
return ctx.Value(ViewerCtx{}).(*setting.Viewer)
|
||||
}
|
||||
Reference in New Issue
Block a user