Init V4 community edition (#2265)

* Init V4 community edition

* Init V4 community edition
This commit is contained in:
AaronLiu
2025-04-20 17:31:25 +08:00
committed by GitHub
parent da4e44b77a
commit 21d158db07
597 changed files with 119415 additions and 41692 deletions

View File

@@ -0,0 +1,877 @@
package dbfs
import (
"context"
"errors"
"fmt"
"math/rand"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/gofrs/uuid"
"github.com/samber/lo"
"golang.org/x/tools/container/intsets"
)
const (
ContextHintHeader = constants.CrHeaderPrefix + "Context-Hint"
NavigatorStateCachePrefix = "navigator_state_"
ContextHintTTL = 5 * 60 // 5 minutes
folderSummaryCachePrefix = "folder_summary_"
)
type (
ContextHintCtxKey struct{}
ByPassOwnerCheckCtxKey struct{}
)
func NewDatabaseFS(u *ent.User, fileClient inventory.FileClient, shareClient inventory.ShareClient,
l logging.Logger, ls lock.LockSystem, settingClient setting.Provider,
storagePolicyClient inventory.StoragePolicyClient, hasher hashid.Encoder, userClient inventory.UserClient,
cache, stateKv cache.Driver) fs.FileSystem {
return &DBFS{
user: u,
navigators: make(map[string]Navigator),
fileClient: fileClient,
shareClient: shareClient,
l: l,
ls: ls,
settingClient: settingClient,
storagePolicyClient: storagePolicyClient,
hasher: hasher,
userClient: userClient,
cache: cache,
stateKv: stateKv,
}
}
type DBFS struct {
user *ent.User
navigators map[string]Navigator
fileClient inventory.FileClient
userClient inventory.UserClient
storagePolicyClient inventory.StoragePolicyClient
shareClient inventory.ShareClient
l logging.Logger
ls lock.LockSystem
settingClient setting.Provider
hasher hashid.Encoder
cache cache.Driver
stateKv cache.Driver
mu sync.Mutex
}
func (f *DBFS) Recycle() {
for _, navigator := range f.navigators {
navigator.Recycle()
}
}
func (f *DBFS) GetEntity(ctx context.Context, entityID int) (fs.Entity, error) {
if entityID == 0 {
return fs.NewEmptyEntity(f.user), nil
}
files, _, err := f.fileClient.GetEntitiesByIDs(ctx, []int{entityID}, 0)
if err != nil {
return nil, fmt.Errorf("failed to get entity: %w", err)
}
if len(files) == 0 {
return nil, fs.ErrEntityNotExist
}
return fs.NewEntity(files[0]), nil
}
func (f *DBFS) List(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.File, *fs.ListFileResult, error) {
o := newDbfsOption()
for _, opt := range opts {
o.apply(opt)
}
// Get navigator
navigator, err := f.getNavigator(ctx, path, NavigatorCapabilityListChildren)
if err != nil {
return nil, nil, err
}
searchParams := path.SearchParameters()
isSearching := searchParams != nil
// Validate pagination args
props := navigator.Capabilities(isSearching)
if o.PageSize > props.MaxPageSize {
o.PageSize = props.MaxPageSize
}
parent, err := f.getFileByPath(ctx, navigator, path)
if err != nil {
return nil, nil, fmt.Errorf("Parent not exist: %w", err)
}
var hintId *uuid.UUID
if o.generateContextHint {
newHintId := uuid.Must(uuid.NewV4())
hintId = &newHintId
}
if o.loadFilePublicMetadata {
ctx = context.WithValue(ctx, inventory.LoadFilePublicMetadata{}, true)
}
if o.loadFileShareIfOwned && parent != nil && parent.OwnerID() == f.user.ID {
ctx = context.WithValue(ctx, inventory.LoadFileShare{}, true)
}
var streamCallback func([]*File)
if o.streamListResponseCallback != nil {
streamCallback = func(files []*File) {
o.streamListResponseCallback(parent, lo.Map(files, func(item *File, index int) fs.File {
return item
}))
}
}
children, err := navigator.Children(ctx, parent, &ListArgs{
Page: &inventory.PaginationArgs{
Page: o.FsOption.Page,
PageSize: o.PageSize,
OrderBy: o.OrderBy,
Order: inventory.OrderDirection(o.OrderDirection),
UseCursorPagination: o.useCursorPagination,
PageToken: o.pageToken,
},
Search: searchParams,
StreamCallback: streamCallback,
})
if err != nil {
return nil, nil, fmt.Errorf("failed to get children: %w", err)
}
var storagePolicy *ent.StoragePolicy
if parent != nil {
storagePolicy, err = f.getPreferredPolicy(ctx, parent)
if err != nil {
f.l.Warning("Failed to get preferred policy: %v", err)
}
}
return parent, &fs.ListFileResult{
Files: lo.Map(children.Files, func(item *File, index int) fs.File {
return item
}),
Props: props,
Pagination: children.Pagination,
ContextHint: hintId,
RecursionLimitReached: children.RecursionLimitReached,
MixedType: children.MixedType,
SingleFileView: children.SingleFileView,
Parent: parent,
StoragePolicy: storagePolicy,
}, nil
}
func (f *DBFS) Capacity(ctx context.Context, u *ent.User) (*fs.Capacity, error) {
// First, get user's available storage packs
var (
res = &fs.Capacity{}
)
requesterGroup, err := u.Edges.GroupOrErr()
if err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to get user's group", err)
}
res.Used = f.user.Storage
res.Total = requesterGroup.MaxStorage
return res, nil
}
func (f *DBFS) CreateEntity(ctx context.Context, file fs.File, policy *ent.StoragePolicy,
entityType types.EntityType, req *fs.UploadRequest, opts ...fs.Option) (fs.Entity, error) {
o := newDbfsOption()
for _, opt := range opts {
o.apply(opt)
}
// If uploader specified previous latest version ID (etag), we should check if it's still valid.
if o.previousVersion != "" {
entityId, err := f.hasher.Decode(o.previousVersion, hashid.EntityID)
if err != nil {
return nil, serializer.NewError(serializer.CodeParamErr, "Unknown version ID", err)
}
entities, err := file.(*File).Model.Edges.EntitiesOrErr()
if err != nil || entities == nil {
return nil, fmt.Errorf("create entity: previous entities not load")
}
// File is stale during edit if the latest entity is not the same as the one specified by uploader.
if e := file.PrimaryEntity(); e == nil || e.ID() != entityId {
return nil, fs.ErrStaleVersion
}
}
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
if err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
}
fileModel := file.(*File).Model
if o.removeStaleEntities {
storageDiff, err := fc.RemoveStaleEntities(ctx, fileModel)
if err != nil {
_ = inventory.Rollback(tx)
return nil, serializer.NewError(serializer.CodeDBError, "Failed to remove stale entities", err)
}
tx.AppendStorageDiff(storageDiff)
}
entity, storageDiff, err := fc.CreateEntity(ctx, fileModel, &inventory.EntityParameters{
OwnerID: file.(*File).Owner().ID,
EntityType: entityType,
StoragePolicyID: policy.ID,
Source: req.Props.SavePath,
Size: req.Props.Size,
UploadSessionID: uuid.FromStringOrNil(o.UploadRequest.Props.UploadSessionID),
})
if err != nil {
_ = inventory.Rollback(tx)
return nil, serializer.NewError(serializer.CodeDBError, "Failed to create entity", err)
}
tx.AppendStorageDiff(storageDiff)
if err := inventory.CommitWithStorageDiff(ctx, tx, f.l, f.userClient); err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit create change", err)
}
return fs.NewEntity(entity), nil
}
func (f *DBFS) PatchMetadata(ctx context.Context, path []*fs.URI, metas ...fs.MetadataPatch) error {
ae := serializer.NewAggregateError()
targets := make([]*File, 0, len(path))
for _, p := range path {
navigator, err := f.getNavigator(ctx, p, NavigatorCapabilityUpdateMetadata, NavigatorCapabilityLockFile)
if err != nil {
ae.Add(p.String(), err)
continue
}
target, err := f.getFileByPath(ctx, navigator, p)
if err != nil {
ae.Add(p.String(), fmt.Errorf("failed to get target file: %w", err))
continue
}
// Require Update permission
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && target.OwnerID() != f.user.ID {
return fs.ErrOwnerOnly.WithError(fmt.Errorf("permission denied"))
}
if target.IsRootFolder() {
ae.Add(p.String(), fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot move root folder")))
continue
}
targets = append(targets, target)
}
if len(targets) == 0 {
return ae.Aggregate()
}
// Lock all targets
lockTargets := lo.Map(targets, func(value *File, key int) *LockByPath {
return &LockByPath{value.Uri(true), value, value.Type(), ""}
})
ls, err := f.acquireByPath(ctx, -1, f.user, true, fs.LockApp(fs.ApplicationUpdateMetadata), lockTargets...)
defer func() { _ = f.Release(ctx, ls) }()
if err != nil {
return err
}
metadataMap := make(map[string]string)
privateMap := make(map[string]bool)
deleted := make([]string, 0)
for _, meta := range metas {
if meta.Remove {
deleted = append(deleted, meta.Key)
continue
}
metadataMap[meta.Key] = meta.Value
if meta.Private {
privateMap[meta.Key] = meta.Private
}
}
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
if err != nil {
return serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
}
for _, target := range targets {
if err := fc.UpsertMetadata(ctx, target.Model, metadataMap, privateMap); err != nil {
_ = inventory.Rollback(tx)
return fmt.Errorf("failed to upsert metadata: %w", err)
}
if len(deleted) > 0 {
if err := fc.RemoveMetadata(ctx, target.Model, deleted...); err != nil {
_ = inventory.Rollback(tx)
return fmt.Errorf("failed to remove metadata: %w", err)
}
}
}
if err := inventory.Commit(tx); err != nil {
return serializer.NewError(serializer.CodeDBError, "Failed to commit metadata change", err)
}
return ae.Aggregate()
}
func (f *DBFS) SharedAddressTranslation(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.File, *fs.URI, error) {
o := newDbfsOption()
for _, opt := range opts {
o.apply(opt)
}
// Get navigator
navigator, err := f.getNavigator(ctx, path, o.requiredCapabilities...)
if err != nil {
return nil, nil, err
}
ctx = context.WithValue(ctx, inventory.LoadFilePublicMetadata{}, true)
if o.loadFileEntities {
ctx = context.WithValue(ctx, inventory.LoadFileEntity{}, true)
}
uriTranslation := func(target *File, rebase bool) (fs.File, *fs.URI, error) {
// Translate shared address to real address
metadata := target.Metadata()
if metadata == nil {
if err := f.fileClient.QueryMetadata(ctx, target.Model); err != nil {
return nil, nil, fmt.Errorf("failed to query metadata: %w", err)
}
metadata = target.Metadata()
}
redirect, ok := metadata[MetadataSharedRedirect]
if !ok {
return nil, nil, fmt.Errorf("missing metadata %s in symbolic folder %s", MetadataSharedRedirect, path)
}
redirectUri, err := fs.NewUriFromString(redirect)
if err != nil {
return nil, nil, fmt.Errorf("invalid redirect uri %s in symbolic folder %s", redirect, path)
}
newUri := redirectUri
if rebase {
newUri = redirectUri.Rebase(path, target.Uri(false))
}
return f.SharedAddressTranslation(ctx, newUri, opts...)
}
target, err := f.getFileByPath(ctx, navigator, path)
if err != nil {
if errors.Is(err, ErrSymbolicFolderFound) && target.Type() == types.FileTypeFolder {
return uriTranslation(target, true)
}
if !ent.IsNotFound(err) {
return nil, nil, fmt.Errorf("failed to get target file: %w", err)
}
// Request URI does not exist, return most recent ancestor
return target, path, err
}
if target.IsSymbolic() {
return uriTranslation(target, false)
}
return target, path, nil
}
func (f *DBFS) Get(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.File, error) {
o := newDbfsOption()
for _, opt := range opts {
o.apply(opt)
}
// Get navigator
navigator, err := f.getNavigator(ctx, path, o.requiredCapabilities...)
if err != nil {
return nil, err
}
if o.loadFilePublicMetadata || o.extendedInfo {
ctx = context.WithValue(ctx, inventory.LoadFilePublicMetadata{}, true)
}
if o.loadFileEntities || o.extendedInfo || o.loadFolderSummary {
ctx = context.WithValue(ctx, inventory.LoadFileEntity{}, true)
}
if o.loadFileShareIfOwned {
ctx = context.WithValue(ctx, inventory.LoadFileShare{}, true)
}
if o.loadEntityUser {
ctx = context.WithValue(ctx, inventory.LoadEntityUser{}, true)
}
// Get target file
target, err := f.getFileByPath(ctx, navigator, path)
if err != nil {
return nil, fmt.Errorf("failed to get target file: %w", err)
}
if o.extendedInfo && target != nil {
extendedInfo := &fs.FileExtendedInfo{
StorageUsed: target.SizeUsed(),
EntityStoragePolicies: make(map[int]*ent.StoragePolicy),
}
policyID := target.PolicyID()
if policyID > 0 {
policy, err := f.storagePolicyClient.GetPolicyByID(ctx, policyID)
if err == nil {
extendedInfo.StoragePolicy = policy
}
}
target.FileExtendedInfo = extendedInfo
if target.OwnerID() == f.user.ID || f.user.Edges.Group.Permissions.Enabled(int(types.GroupPermissionIsAdmin)) {
target.FileExtendedInfo.Shares = target.Model.Edges.Shares
}
entities := target.Entities()
for _, entity := range entities {
if _, ok := extendedInfo.EntityStoragePolicies[entity.PolicyID()]; !ok {
policy, err := f.storagePolicyClient.GetPolicyByID(ctx, entity.PolicyID())
if err != nil {
return nil, fmt.Errorf("failed to get policy: %w", err)
}
extendedInfo.EntityStoragePolicies[entity.PolicyID()] = policy
}
}
}
// Calculate folder summary if requested
if o.loadFolderSummary && target != nil && target.Type() == types.FileTypeFolder {
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && target.OwnerID() != f.user.ID {
return nil, fs.ErrOwnerOnly
}
// first, try to load from cache
summary, ok := f.cache.Get(fmt.Sprintf("%s%d", folderSummaryCachePrefix, target.ID()))
if ok {
summaryTyped := summary.(fs.FolderSummary)
target.FileFolderSummary = &summaryTyped
} else {
// cache miss, walk the folder to get the summary
newSummary := &fs.FolderSummary{Completed: true}
if f.user.Edges.Group == nil {
return nil, fmt.Errorf("user group not loaded")
}
limit := max(f.user.Edges.Group.Settings.MaxWalkedFiles, 1)
// disable load metadata to speed up
ctxWalk := context.WithValue(ctx, inventory.LoadFilePublicMetadata{}, false)
if err := navigator.Walk(ctxWalk, []*File{target}, limit, intsets.MaxInt, func(files []*File, l int) error {
for _, file := range files {
if file.ID() == target.ID() {
continue
}
if file.Type() == types.FileTypeFile {
newSummary.Files++
} else {
newSummary.Folders++
}
newSummary.Size += file.SizeUsed()
}
return nil
}); err != nil {
if !errors.Is(err, ErrFileCountLimitedReached) {
return nil, fmt.Errorf("failed to walk: %w", err)
}
newSummary.Completed = false
}
// cache the summary
newSummary.CalculatedAt = time.Now()
f.cache.Set(fmt.Sprintf("%s%d", folderSummaryCachePrefix, target.ID()), newSummary, f.settingClient.FolderPropsCacheTTL(ctx))
target.FileFolderSummary = newSummary
}
}
if target == nil {
return nil, fmt.Errorf("cannot get root file with nil root")
}
return target, nil
}
func (f *DBFS) CheckCapability(ctx context.Context, uri *fs.URI, opts ...fs.Option) error {
o := newDbfsOption()
for _, opt := range opts {
o.apply(opt)
}
// Get navigator
_, err := f.getNavigator(ctx, uri, o.requiredCapabilities...)
if err != nil {
return err
}
return nil
}
func (f *DBFS) Walk(ctx context.Context, path *fs.URI, depth int, walk fs.WalkFunc, opts ...fs.Option) error {
o := newDbfsOption()
for _, opt := range opts {
o.apply(opt)
}
if o.loadFilePublicMetadata {
ctx = context.WithValue(ctx, inventory.LoadFilePublicMetadata{}, true)
}
if o.loadFileEntities {
ctx = context.WithValue(ctx, inventory.LoadFileEntity{}, true)
}
// Get navigator
navigator, err := f.getNavigator(ctx, path, o.requiredCapabilities...)
if err != nil {
return err
}
target, err := f.getFileByPath(ctx, navigator, path)
if err != nil {
return err
}
// Require Read permission
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && target.OwnerID() != f.user.ID {
return fs.ErrOwnerOnly
}
// Walk
if f.user.Edges.Group == nil {
return fmt.Errorf("user group not loaded")
}
limit := max(f.user.Edges.Group.Settings.MaxWalkedFiles, 1)
if err := navigator.Walk(ctx, []*File{target}, limit, depth, func(files []*File, l int) error {
for _, file := range files {
if err := walk(file, l); err != nil {
return err
}
}
return nil
}); err != nil {
return fmt.Errorf("failed to walk: %w", err)
}
return nil
}
func (f *DBFS) ExecuteNavigatorHooks(ctx context.Context, hookType fs.HookType, file fs.File) error {
navigator, err := f.getNavigator(ctx, file.Uri(false))
if err != nil {
return err
}
if dbfsFile, ok := file.(*File); ok {
return navigator.ExecuteHook(ctx, hookType, dbfsFile)
}
return nil
}
// createFile creates a file with given name and type under given parent folder
func (f *DBFS) createFile(ctx context.Context, parent *File, name string, fileType types.FileType, o *dbfsOption) (*File, error) {
createFileArgs := &inventory.CreateFileParameters{
FileType: fileType,
Name: name,
MetadataPrivateMask: make(map[string]bool),
Metadata: make(map[string]string),
IsSymbolic: o.isSymbolicLink,
}
if o.Metadata != nil {
for k, v := range o.Metadata {
createFileArgs.Metadata[k] = v
}
}
if o.preferredStoragePolicy != nil {
createFileArgs.StoragePolicyID = o.preferredStoragePolicy.ID
} else {
// get preferred storage policy
policy, err := f.getPreferredPolicy(ctx, parent)
if err != nil {
return nil, err
}
createFileArgs.StoragePolicyID = policy.ID
}
if o.UploadRequest != nil {
createFileArgs.EntityParameters = &inventory.EntityParameters{
EntityType: types.EntityTypeVersion,
Source: o.UploadRequest.Props.SavePath,
Size: o.UploadRequest.Props.Size,
ModifiedAt: o.UploadRequest.Props.LastModified,
UploadSessionID: uuid.FromStringOrNil(o.UploadRequest.Props.UploadSessionID),
}
}
// Start transaction to create files
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
if err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
}
file, entity, storageDiff, err := fc.CreateFile(ctx, parent.Model, createFileArgs)
if err != nil {
_ = inventory.Rollback(tx)
if ent.IsConstraintError(err) {
return nil, fs.ErrFileExisted.WithError(err)
}
return nil, serializer.NewError(serializer.CodeDBError, "Failed to create file", err)
}
tx.AppendStorageDiff(storageDiff)
if err := inventory.CommitWithStorageDiff(ctx, tx, f.l, f.userClient); err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit create change", err)
}
file.SetEntities([]*ent.Entity{entity})
return newFile(parent, file), nil
}
// getPreferredPolicy tries to get the preferred storage policy for the given file.
func (f *DBFS) getPreferredPolicy(ctx context.Context, file *File) (*ent.StoragePolicy, error) {
ownerGroup := file.Owner().Edges.Group
if ownerGroup == nil {
return nil, fmt.Errorf("owner group not loaded")
}
groupPolicy, err := f.storagePolicyClient.GetByGroup(ctx, ownerGroup)
if err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to get available storage policies", err)
}
return groupPolicy, nil
}
func (f *DBFS) getFileByPath(ctx context.Context, navigator Navigator, path *fs.URI) (*File, error) {
file, err := navigator.To(ctx, path)
if err != nil && errors.Is(err, ErrFsNotInitialized) {
// Initialize file system for user if root folder does not exist.
uid := path.ID(hashid.EncodeUserID(f.hasher, f.user.ID))
uidInt, err := f.hasher.Decode(uid, hashid.UserID)
if err != nil {
return nil, fmt.Errorf("failed to decode user ID: %w", err)
}
if err := f.initFs(ctx, uidInt); err != nil {
return nil, fmt.Errorf("failed to initialize file system: %w", err)
}
return navigator.To(ctx, path)
}
return file, err
}
// initFs initializes the file system for the user.
func (f *DBFS) initFs(ctx context.Context, uid int) error {
f.l.Info("Initialize database file system for user %q", f.user.Email)
_, err := f.fileClient.CreateFolder(ctx, nil,
&inventory.CreateFolderParameters{
Owner: uid,
Name: inventory.RootFolderName,
})
if err != nil {
return fmt.Errorf("failed to create root folder: %w", err)
}
return nil
}
func (f *DBFS) getNavigator(ctx context.Context, path *fs.URI, requiredCapabilities ...NavigatorCapability) (Navigator, error) {
pathFs := path.FileSystem()
config := f.settingClient.DBFS(ctx)
navigatorId := f.navigatorId(path)
var (
res Navigator
)
f.mu.Lock()
defer f.mu.Unlock()
if navigator, ok := f.navigators[navigatorId]; ok {
res = navigator
} else {
var n Navigator
switch pathFs {
case constants.FileSystemMy:
n = NewMyNavigator(f.user, f.fileClient, f.userClient, f.l, config, f.hasher)
case constants.FileSystemShare:
n = NewShareNavigator(f.user, f.fileClient, f.shareClient, f.l, config, f.hasher)
case constants.FileSystemTrash:
n = NewTrashNavigator(f.user, f.fileClient, f.l, config, f.hasher)
case constants.FileSystemSharedWithMe:
n = NewSharedWithMeNavigator(f.user, f.fileClient, f.l, config, f.hasher)
default:
return nil, fmt.Errorf("unknown file system %q", pathFs)
}
// retrieve state if context hint is provided
if stateID, ok := ctx.Value(ContextHintCtxKey{}).(uuid.UUID); ok && stateID != uuid.Nil {
cacheKey := NavigatorStateCachePrefix + stateID.String() + "_" + navigatorId
if stateRaw, ok := f.stateKv.Get(cacheKey); ok {
if err := n.RestoreState(stateRaw.(State)); err != nil {
f.l.Warning("Failed to restore state for navigator %q: %s", navigatorId, err)
} else {
f.l.Info("Navigator %q restored state (%q) successfully", navigatorId, stateID)
}
} else {
// State expire, refresh it
n.PersistState(f.stateKv, cacheKey)
}
}
f.navigators[navigatorId] = n
res = n
}
// Check fs capabilities
capabilities := res.Capabilities(false).Capability
for _, capability := range requiredCapabilities {
if !capabilities.Enabled(int(capability)) {
return nil, fs.ErrNotSupportedAction.WithError(fmt.Errorf("action %q is not supported under current fs", capability))
}
}
return res, nil
}
func (f *DBFS) navigatorId(path *fs.URI) string {
uidHashed := hashid.EncodeUserID(f.hasher, f.user.ID)
switch path.FileSystem() {
case constants.FileSystemMy:
return fmt.Sprintf("%s/%s/%d", constants.FileSystemMy, path.ID(uidHashed), f.user.ID)
case constants.FileSystemShare:
return fmt.Sprintf("%s/%s/%d", constants.FileSystemShare, path.ID(uidHashed), f.user.ID)
case constants.FileSystemTrash:
return fmt.Sprintf("%s/%s", constants.FileSystemTrash, path.ID(uidHashed))
default:
return fmt.Sprintf("%s/%s/%d", path.FileSystem(), path.ID(uidHashed), f.user.ID)
}
}
// generateSavePath generates the physical save path for the upload request.
func generateSavePath(policy *ent.StoragePolicy, req *fs.UploadRequest, user *ent.User) string {
baseTable := map[string]string{
"{randomkey16}": util.RandStringRunes(16),
"{randomkey8}": util.RandStringRunes(8),
"{timestamp}": strconv.FormatInt(time.Now().Unix(), 10),
"{timestamp_nano}": strconv.FormatInt(time.Now().UnixNano(), 10),
"{randomnum2}": strconv.Itoa(rand.Intn(2)),
"{randomnum3}": strconv.Itoa(rand.Intn(3)),
"{randomnum4}": strconv.Itoa(rand.Intn(4)),
"{randomnum8}": strconv.Itoa(rand.Intn(8)),
"{uid}": strconv.Itoa(user.ID),
"{datetime}": time.Now().Format("20060102150405"),
"{date}": time.Now().Format("20060102"),
"{year}": time.Now().Format("2006"),
"{month}": time.Now().Format("01"),
"{day}": time.Now().Format("02"),
"{hour}": time.Now().Format("15"),
"{minute}": time.Now().Format("04"),
"{second}": time.Now().Format("05"),
}
dirRule := policy.DirNameRule
dirRule = filepath.ToSlash(dirRule)
dirRule = util.Replace(baseTable, dirRule)
dirRule = util.Replace(map[string]string{
"{path}": req.Props.Uri.Dir() + fs.Separator,
}, dirRule)
originName := req.Props.Uri.Name()
nameTable := map[string]string{
"{originname}": originName,
"{ext}": filepath.Ext(originName),
"{originname_without_ext}": strings.TrimSuffix(originName, filepath.Ext(originName)),
"{uuid}": uuid.Must(uuid.NewV4()).String(),
}
nameRule := policy.FileNameRule
nameRule = util.Replace(baseTable, nameRule)
nameRule = util.Replace(nameTable, nameRule)
return path.Join(path.Clean(dirRule), nameRule)
}
func canMoveOrCopyTo(src, dst *fs.URI, isCopy bool) bool {
if isCopy {
return src.FileSystem() == dst.FileSystem() && src.FileSystem() == constants.FileSystemMy
} else {
switch src.FileSystem() {
case constants.FileSystemMy:
return dst.FileSystem() == constants.FileSystemMy || dst.FileSystem() == constants.FileSystemTrash
case constants.FileSystemTrash:
return dst.FileSystem() == constants.FileSystemMy
}
}
return false
}
func allAncestors(targets []*File) []*ent.File {
return lo.Map(
lo.UniqBy(
lo.FlatMap(targets, func(value *File, index int) []*File {
return value.Ancestors()
}),
func(item *File) int {
return item.ID()
},
),
func(item *File, index int) *ent.File {
return item.Model
},
)
}
func WithBypassOwnerCheck(ctx context.Context) context.Context {
return context.WithValue(ctx, ByPassOwnerCheckCtxKey{}, true)
}

View File

@@ -0,0 +1,335 @@
package dbfs
import (
"encoding/gob"
"path"
"sync"
"time"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/samber/lo"
)
func init() {
gob.Register(File{})
gob.Register(shareNavigatorState{})
gob.Register(map[string]*File{})
gob.Register(map[int]*File{})
}
var filePool = &sync.Pool{
New: func() any {
return &File{
Children: make(map[string]*File),
}
},
}
type (
File struct {
Model *ent.File
Children map[string]*File
Parent *File
Path [2]*fs.URI
OwnerModel *ent.User
IsUserRoot bool
CapabilitiesBs *boolset.BooleanSet
FileExtendedInfo *fs.FileExtendedInfo
FileFolderSummary *fs.FolderSummary
mu *sync.Mutex
}
)
const (
MetadataSysPrefix = "sys:"
MetadataUploadSessionPrefix = MetadataSysPrefix + "upload_session"
MetadataUploadSessionID = MetadataUploadSessionPrefix + "_id"
MetadataSharedRedirect = MetadataSysPrefix + "shared_redirect"
MetadataRestoreUri = MetadataSysPrefix + "restore_uri"
MetadataExpectedCollectTime = MetadataSysPrefix + "expected_collect_time"
ThumbMetadataPrefix = "thumb:"
ThumbDisabledKey = ThumbMetadataPrefix + "disabled"
pathIndexRoot = 0
pathIndexUser = 1
)
func (f *File) Name() string {
return f.Model.Name
}
func (f *File) IsNil() bool {
return f == nil
}
func (f *File) DisplayName() string {
if uri, ok := f.Metadata()[MetadataRestoreUri]; ok {
restoreUri, err := fs.NewUriFromString(uri)
if err != nil {
return f.Name()
}
return path.Base(restoreUri.Path())
}
return f.Name()
}
func (f *File) CanHaveChildren() bool {
return f.Type() == types.FileTypeFolder && !f.IsSymbolic()
}
func (f *File) Ext() string {
return util.Ext(f.Name())
}
func (f *File) ID() int {
return f.Model.ID
}
func (f *File) IsSymbolic() bool {
return f.Model.IsSymbolic
}
func (f *File) Type() types.FileType {
return types.FileType(f.Model.Type)
}
func (f *File) Size() int64 {
return f.Model.Size
}
func (f *File) SizeUsed() int64 {
return lo.SumBy(f.Entities(), func(item fs.Entity) int64 {
return item.Size()
})
}
func (f *File) UpdatedAt() time.Time {
return f.Model.UpdatedAt
}
func (f *File) CreatedAt() time.Time {
return f.Model.CreatedAt
}
func (f *File) ExtendedInfo() *fs.FileExtendedInfo {
return f.FileExtendedInfo
}
func (f *File) Owner() *ent.User {
parent := f
for parent != nil {
if parent.OwnerModel != nil {
return parent.OwnerModel
}
parent = parent.Parent
}
return nil
}
func (f *File) OwnerID() int {
return f.Model.OwnerID
}
func (f *File) Shared() bool {
return len(f.Model.Edges.Shares) > 0
}
func (f *File) Metadata() map[string]string {
if f.Model.Edges.Metadata == nil {
return nil
}
return lo.Associate(f.Model.Edges.Metadata, func(item *ent.Metadata) (string, string) {
return item.Name, item.Value
})
}
// Uri returns the URI of the file.
// If isRoot is true, the URI will be returned from owner's view.
// Otherwise, the URI will be returned from user's view.
func (f *File) Uri(isRoot bool) *fs.URI {
index := 1
if isRoot {
index = 0
}
if f.Path[index] != nil || f.Parent == nil {
return f.Path[index]
}
// Find the root file
elements := make([]string, 0)
parent := f
for parent.Parent != nil && parent.Path[index] == nil {
elements = append([]string{parent.Name()}, elements...)
parent = parent.Parent
}
if parent.Path[index] == nil {
return nil
}
return parent.Path[index].Join(elements...)
}
// UserRoot return the root file from user's view.
func (f *File) UserRoot() *File {
root := f
for root != nil && !root.IsUserRoot {
root = root.Parent
}
return root
}
// Root return the root file from owner's view.
func (f *File) Root() *File {
root := f
for root.Parent != nil {
root = root.Parent
}
return root
}
// RootUri return the URI of the user root file under owner's view.
func (f *File) RootUri() *fs.URI {
return f.UserRoot().Uri(true)
}
func (f *File) Replace(model *ent.File) *File {
f.mu.Lock()
delete(f.Parent.Children, f.Model.Name)
f.mu.Unlock()
defer f.Recycle()
replaced := newFile(f.Parent, model)
if f.IsRootFile() {
// If target is a root file, the user path should remain the same.
replaced.Path[pathIndexUser] = f.Path[pathIndexUser]
}
return replaced
}
// Ancestors return all ancestors of the file, until the owner root is reached.
func (f *File) Ancestors() []*File {
return f.AncestorsChain()[1:]
}
// AncestorsChain return all ancestors of the file (including itself), until the owner root is reached.
func (f *File) AncestorsChain() []*File {
ancestors := make([]*File, 0)
parent := f
for parent != nil {
ancestors = append(ancestors, parent)
parent = parent.Parent
}
return ancestors
}
func (f *File) PolicyID() int {
root := f
return root.Model.StoragePolicyFiles
}
// IsRootFolder return true if the file is the root folder under user's view.
func (f *File) IsRootFolder() bool {
return f.Type() == types.FileTypeFolder && f.IsRootFile()
}
// IsRootFile return true if the file is the root file under user's view.
func (f *File) IsRootFile() bool {
uri := f.Uri(false)
p := uri.Path()
return f.Model.Name == inventory.RootFolderName || p == fs.Separator || p == ""
}
func (f *File) Entities() []fs.Entity {
return lo.Map(f.Model.Edges.Entities, func(item *ent.Entity, index int) fs.Entity {
return fs.NewEntity(item)
})
}
func (f *File) PrimaryEntity() fs.Entity {
primary, _ := lo.Find(f.Model.Edges.Entities, func(item *ent.Entity) bool {
return item.Type == int(types.EntityTypeVersion) && item.ID == f.Model.PrimaryEntity
})
if primary != nil {
return fs.NewEntity(primary)
}
return fs.NewEmptyEntity(f.Owner())
}
func (f *File) PrimaryEntityID() int {
return f.Model.PrimaryEntity
}
func (f *File) FolderSummary() *fs.FolderSummary {
return f.FileFolderSummary
}
func (f *File) Capabilities() *boolset.BooleanSet {
return f.CapabilitiesBs
}
func newFile(parent *File, model *ent.File) *File {
f := filePool.Get().(*File)
f.Model = model
if parent != nil {
f.Parent = parent
parent.mu.Lock()
parent.Children[model.Name] = f
if parent.Path[pathIndexUser] != nil {
f.Path[pathIndexUser] = parent.Path[pathIndexUser].Join(model.Name)
}
if parent.Path[pathIndexRoot] != nil {
f.Path[pathIndexRoot] = parent.Path[pathIndexRoot].Join(model.Name)
}
f.CapabilitiesBs = parent.CapabilitiesBs
f.mu = parent.mu
parent.mu.Unlock()
} else {
f.mu = &sync.Mutex{}
}
return f
}
func newParentFile(parent *ent.File, child *File) *File {
newParent := newFile(nil, parent)
newParent.Children[child.Name()] = child
child.Parent = newParent
newParent.mu = child.mu
return newParent
}
func (f *File) Recycle() {
for _, child := range f.Children {
child.Recycle()
}
f.Model = nil
f.Children = make(map[string]*File)
f.Path[0] = nil
f.Path[1] = nil
f.Parent = nil
f.OwnerModel = nil
f.IsUserRoot = false
f.mu = nil
filePool.Put(f)
}

View File

@@ -0,0 +1,55 @@
package dbfs
import (
"context"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/samber/lo"
)
func (f *DBFS) StaleEntities(ctx context.Context, entities ...int) ([]fs.Entity, error) {
res, err := f.fileClient.StaleEntities(ctx, entities...)
if err != nil {
return nil, err
}
return lo.Map(res, func(e *ent.Entity, i int) fs.Entity {
return fs.NewEntity(e)
}), nil
}
func (f *DBFS) AllFilesInTrashBin(ctx context.Context, opts ...fs.Option) (*fs.ListFileResult, error) {
o := newDbfsOption()
for _, opt := range opts {
o.apply(opt)
}
navigator, err := f.getNavigator(ctx, newTrashUri(""), NavigatorCapabilityListChildren)
if err != nil {
return nil, err
}
ctx = context.WithValue(ctx, inventory.LoadFilePublicMetadata{}, true)
children, err := navigator.Children(ctx, nil, &ListArgs{
Page: &inventory.PaginationArgs{
Page: o.FsOption.Page,
PageSize: o.PageSize,
OrderBy: o.OrderBy,
Order: inventory.OrderDirection(o.OrderDirection),
UseCursorPagination: o.useCursorPagination,
PageToken: o.pageToken,
},
})
if err != nil {
return nil, err
}
return &fs.ListFileResult{
Files: lo.Map(children.Files, func(item *File, index int) fs.File {
return item
}),
Pagination: children.Pagination,
RecursionLimitReached: children.RecursionLimitReached,
}, nil
}

View File

@@ -0,0 +1,325 @@
package dbfs
import (
"context"
"errors"
"fmt"
"strconv"
"strings"
"time"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/samber/lo"
)
type (
LockSession struct {
Tokens map[string]string
TokenStack [][]string
}
LockByPath struct {
Uri *fs.URI
ClosestAncestor *File
Type types.FileType
Token string
}
AlwaysIncludeTokenCtx struct{}
)
func (f *DBFS) ConfirmLock(ctx context.Context, ancestor fs.File, uri *fs.URI, token ...string) (func(), fs.LockSession, error) {
session := LockSessionFromCtx(ctx)
lockUri := ancestor.RootUri().JoinRaw(uri.PathTrimmed())
ns, root, lKey := lockTupleFromUri(lockUri, f.user, f.hasher)
lc := lock.LockInfo{
Ns: ns,
Root: root,
Token: token,
}
// Skip if already locked in current session
if _, ok := session.Tokens[lKey]; ok {
return func() {}, session, nil
}
release, tokenHit, err := f.ls.Confirm(time.Now(), lc)
if err != nil {
return nil, nil, err
}
session.Tokens[lKey] = tokenHit
stackIndex := len(session.TokenStack) - 1
session.TokenStack[stackIndex] = append(session.TokenStack[stackIndex], lKey)
return release, session, nil
}
func (f *DBFS) Lock(ctx context.Context, d time.Duration, requester *ent.User, zeroDepth bool, application lock.Application,
uri *fs.URI, token string) (fs.LockSession, error) {
// Get navigator
navigator, err := f.getNavigator(ctx, uri, NavigatorCapabilityLockFile)
if err != nil {
return nil, err
}
ancestor, err := f.getFileByPath(ctx, navigator, uri)
if err != nil && !ent.IsNotFound(err) {
return nil, fmt.Errorf("failed to get ancestor: %w", err)
}
if ancestor.IsRootFolder() && ancestor.Uri(false).IsSame(uri, hashid.EncodeUserID(f.hasher, f.user.ID)) {
return nil, fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot lock root folder"))
}
// Lock require create or update permission
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && ancestor.Owner().ID != requester.ID {
return nil, fs.ErrOwnerOnly
}
t := types.FileTypeFile
if ancestor.Uri(false).IsSame(uri, hashid.EncodeUserID(f.hasher, f.user.ID)) {
t = ancestor.Type()
}
lr := &LockByPath{
Uri: ancestor.RootUri().JoinRaw(uri.PathTrimmed()),
ClosestAncestor: ancestor,
Type: t,
Token: token,
}
ls, err := f.acquireByPath(ctx, d, requester, zeroDepth, application, lr)
if err != nil {
return nil, err
}
return ls, nil
}
func (f *DBFS) Unlock(ctx context.Context, tokens ...string) error {
return f.ls.Unlock(time.Now(), tokens...)
}
func (f *DBFS) Refresh(ctx context.Context, d time.Duration, token string) (lock.LockDetails, error) {
return f.ls.Refresh(time.Now(), d, token)
}
func (f *DBFS) acquireByPath(ctx context.Context, duration time.Duration,
requester *ent.User, zeroDepth bool, application lock.Application, locks ...*LockByPath) (*LockSession, error) {
session := LockSessionFromCtx(ctx)
// Prepare lock details for each file
lockDetails := make([]lock.LockDetails, 0, len(locks))
lockedRequest := make([]*LockByPath, 0, len(locks))
for _, l := range locks {
ns, root, lKey := lockTupleFromUri(l.Uri, f.user, f.hasher)
ld := lock.LockDetails{
Owner: lock.Owner{
Application: application,
},
Ns: ns,
Root: root,
ZeroDepth: zeroDepth,
Duration: duration,
Type: l.Type,
Token: l.Token,
}
// Skip if already locked in current session
if _, ok := session.Tokens[lKey]; ok {
continue
}
lockDetails = append(lockDetails, ld)
lockedRequest = append(lockedRequest, l)
}
// Acquire lock
tokens, err := f.ls.Create(time.Now(), lockDetails...)
if len(tokens) > 0 {
for i, token := range tokens {
key := lockDetails[i].Key()
session.Tokens[key] = token
stackIndex := len(session.TokenStack) - 1
session.TokenStack[stackIndex] = append(session.TokenStack[stackIndex], key)
}
}
if err != nil {
var conflicts lock.ConflictError
if errors.As(err, &conflicts) {
// Conflict with existing lock, generate user-friendly error message
conflicts = lo.Map(conflicts, func(c *lock.ConflictDetail, index int) *lock.ConflictDetail {
lr := lockedRequest[c.Index]
if lr.ClosestAncestor.Root().Model.OwnerID == requester.ID {
// Add absolute path for owner issued lock request
c.Path = newMyUri().JoinRaw(c.Path).String()
return c
}
// Hide token for non-owner requester
if v, ok := ctx.Value(AlwaysIncludeTokenCtx{}).(bool); !ok || !v {
c.Token = ""
}
// If conflicted resources still under user root, expose the relative path
userRoot := lr.ClosestAncestor.UserRoot()
userRootPath := userRoot.Uri(true).Path()
if strings.HasPrefix(c.Path, userRootPath) {
c.Path = userRoot.
Uri(false).
Join(strings.Split(strings.TrimPrefix(c.Path, userRootPath), fs.Separator)...).String()
return c
}
// Hide sensitive information for non-owner issued lock request
c.Path = ""
return c
})
return session, fs.ErrLockConflict.WithError(conflicts)
}
return session, fmt.Errorf("faield to create lock: %w", err)
}
// Check if any ancestor is modified during `getFileByPath` and `lock`.
if err := f.ensureConsistency(
ctx,
lo.Map(lockedRequest, func(item *LockByPath, index int) *File {
return item.ClosestAncestor
})...,
); err != nil {
return session, err
}
return session, nil
}
func (f *DBFS) Release(ctx context.Context, session *LockSession) error {
if session == nil {
return nil
}
stackIndex := len(session.TokenStack) - 1
err := f.ls.Unlock(time.Now(), lo.Map(session.TokenStack[stackIndex], func(key string, index int) string {
return session.Tokens[key]
})...)
if err == nil {
for _, key := range session.TokenStack[stackIndex] {
delete(session.Tokens, key)
}
session.TokenStack = session.TokenStack[:len(session.TokenStack)-1]
}
return err
}
// ensureConsistency queries database for all given files and its ancestors, make sure there's no modification in
// between. This is to make sure there's no modification between navigator's first query and lock acquisition.
func (f *DBFS) ensureConsistency(ctx context.Context, files ...*File) error {
if len(files) == 0 {
return nil
}
// Generate a list of unique files (include ancestors) to check
uniqueFiles := make(map[int]*File)
for _, file := range files {
for root := file; root != nil; root = root.Parent {
if _, ok := uniqueFiles[root.Model.ID]; ok {
// This file and its ancestors are already included
break
}
uniqueFiles[root.Model.ID] = root
}
}
page := 0
fileIds := lo.Keys(uniqueFiles)
for page >= 0 {
files, next, err := f.fileClient.GetByIDs(ctx, fileIds, page)
if err != nil {
return fmt.Errorf("failed to check file consistency: %w", err)
}
for _, file := range files {
latest := uniqueFiles[file.ID].Model
if file.Name != latest.Name ||
file.FileChildren != latest.FileChildren ||
file.OwnerID != latest.OwnerID ||
file.Type != latest.Type {
return fs.ErrModified.
WithError(fmt.Errorf("file %s has been modified before lock acquisition", file.Name))
}
}
page = next
}
return nil
}
// LockSessionFromCtx retrieves lock session from context. If no lock session
// found, a new empty lock session will be returned.
func LockSessionFromCtx(ctx context.Context) *LockSession {
l, _ := ctx.Value(fs.LockSessionCtxKey{}).(*LockSession)
if l == nil {
ls := &LockSession{
Tokens: make(map[string]string),
TokenStack: make([][]string, 0),
}
l = ls
}
l.TokenStack = append(l.TokenStack, make([]string, 0))
return l
}
// Exclude removes lock from session, so that it won't be released.
func (l *LockSession) Exclude(lock *LockByPath, u *ent.User, hasher hashid.Encoder) string {
_, _, lKey := lockTupleFromUri(lock.Uri, u, hasher)
foundInCurrentStack := false
token, found := l.Tokens[lKey]
if found {
stackIndex := len(l.TokenStack) - 1
l.TokenStack[stackIndex] = lo.Filter(l.TokenStack[stackIndex], func(t string, index int) bool {
if t == lKey {
foundInCurrentStack = true
}
return t != lKey
})
if foundInCurrentStack {
delete(l.Tokens, lKey)
return token
}
}
return ""
}
func (l *LockSession) LastToken() string {
stackIndex := len(l.TokenStack) - 1
if len(l.TokenStack[stackIndex]) == 0 {
return ""
}
return l.Tokens[l.TokenStack[stackIndex][len(l.TokenStack[stackIndex])-1]]
}
// WithAlwaysIncludeToken returns a new context with a flag to always include token in conflic response.
func WithAlwaysIncludeToken(ctx context.Context) context.Context {
return context.WithValue(ctx, AlwaysIncludeTokenCtx{}, true)
}
func lockTupleFromUri(uri *fs.URI, u *ent.User, hasher hashid.Encoder) (string, string, string) {
id := uri.ID(hashid.EncodeUserID(hasher, u.ID))
if id == "" {
id = strconv.Itoa(u.ID)
}
ns := fmt.Sprintf(id + "/" + string(uri.FileSystem()))
root := uri.Path()
return ns, root, ns + "/" + root
}

View File

@@ -0,0 +1,831 @@
package dbfs
import (
"context"
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/samber/lo"
"golang.org/x/tools/container/intsets"
)
func (f *DBFS) Create(ctx context.Context, path *fs.URI, fileType types.FileType, opts ...fs.Option) (fs.File, error) {
o := newDbfsOption()
for _, opt := range opts {
o.apply(opt)
}
// Get navigator
navigator, err := f.getNavigator(ctx, path, NavigatorCapabilityCreateFile, NavigatorCapabilityLockFile)
if err != nil {
return nil, err
}
// Get most recent ancestor
var ancestor *File
if o.ancestor != nil {
ancestor = o.ancestor
} else {
ancestor, err = f.getFileByPath(ctx, navigator, path)
if err != nil && !ent.IsNotFound(err) {
return nil, fmt.Errorf("failed to get ancestor: %w", err)
}
}
if ancestor.Uri(false).IsSame(path, hashid.EncodeUserID(f.hasher, f.user.ID)) {
if ancestor.Type() == fileType {
if o.errOnConflict {
return ancestor, fs.ErrFileExisted
}
// Target file already exist, return it.
return ancestor, nil
}
// File with the same name but different type already exist
return nil, fs.ErrFileExisted.
WithError(fmt.Errorf("object with the same name but different type %q already exist", ancestor.Type()))
}
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && ancestor.Owner().ID != f.user.ID {
return nil, fs.ErrOwnerOnly
}
// Lock ancestor
lockedPath := ancestor.RootUri().JoinRaw(path.PathTrimmed())
ls, err := f.acquireByPath(ctx, -1, f.user, false, fs.LockApp(fs.ApplicationCreate),
&LockByPath{lockedPath, ancestor, fileType, ""})
defer func() { _ = f.Release(ctx, ls) }()
if err != nil {
return nil, err
}
// For all ancestors in user's desired path, create folders if not exist
existedElements := ancestor.Uri(false).Elements()
desired := path.Elements()
if (len(desired)-len(existedElements) > 1) && o.noChainedCreation {
return nil, fs.ErrPathNotExist
}
for i := len(existedElements); i < len(desired); i++ {
// Make sure parent is a folder
if !ancestor.CanHaveChildren() {
return nil, fs.ErrNotSupportedAction.WithError(fmt.Errorf("parent must be a valid folder"))
}
// Validate object name
if err := validateFileName(desired[i]); err != nil {
return nil, fs.ErrIllegalObjectName.WithError(err)
}
if i < len(desired)-1 || fileType == types.FileTypeFolder {
args := &inventory.CreateFolderParameters{
Owner: ancestor.Model.OwnerID,
Name: desired[i],
}
// Apply options for last element
if i == len(desired)-1 {
if o.Metadata != nil {
args.Metadata = o.Metadata
}
args.IsSymbolic = o.isSymbolicLink
}
// Create folder if it is not the last element or the target is a folder
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
if err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
}
newFolder, err := fc.CreateFolder(ctx, ancestor.Model, args)
if err != nil {
_ = inventory.Rollback(tx)
return nil, fmt.Errorf("failed to create folder %q: %w", desired[i], err)
}
if err := inventory.Commit(tx); err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit folder creation", err)
}
ancestor = newFile(ancestor, newFolder)
} else {
file, err := f.createFile(ctx, ancestor, desired[i], fileType, o)
if err != nil {
return nil, err
}
return file, nil
}
}
return ancestor, nil
}
func (f *DBFS) Rename(ctx context.Context, path *fs.URI, newName string) (fs.File, error) {
// Get navigator
navigator, err := f.getNavigator(ctx, path, NavigatorCapabilityRenameFile, NavigatorCapabilityLockFile)
if err != nil {
return nil, err
}
// Get target file
target, err := f.getFileByPath(ctx, navigator, path)
if err != nil {
return nil, fmt.Errorf("failed to get target file: %w", err)
}
oldName := target.Name()
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && target.Owner().ID != f.user.ID {
return nil, fs.ErrOwnerOnly
}
// Root folder cannot be modified
if target.IsRootFolder() {
return nil, fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot modify root folder"))
}
// Validate new name
if err := validateFileName(newName); err != nil {
return nil, fs.ErrIllegalObjectName.WithError(err)
}
// If target is a file, validate file extension
policy, err := f.getPreferredPolicy(ctx, target)
if err != nil {
return nil, err
}
if target.Type() == types.FileTypeFile {
if err := validateExtension(newName, policy); err != nil {
return nil, fs.ErrIllegalObjectName.WithError(err)
}
}
// Lock target
ls, err := f.acquireByPath(ctx, -1, f.user, false, fs.LockApp(fs.ApplicationRename),
&LockByPath{target.Uri(true), target, target.Type(), ""})
defer func() { _ = f.Release(ctx, ls) }()
if err != nil {
return nil, err
}
// Rename target
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
if err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
}
updated, err := fc.Rename(ctx, target.Model, newName)
if err != nil {
_ = inventory.Rollback(tx)
if ent.IsConstraintError(err) {
return nil, fs.ErrFileExisted.WithError(err)
}
return nil, serializer.NewError(serializer.CodeDBError, "failed to update file", err)
}
if target.Type() == types.FileTypeFile && !strings.EqualFold(filepath.Ext(newName), filepath.Ext(oldName)) {
if err := fc.RemoveMetadata(ctx, target.Model, ThumbDisabledKey); err != nil {
_ = inventory.Rollback(tx)
return nil, serializer.NewError(serializer.CodeDBError, "failed to remove disabled thumbnail mark", err)
}
}
if err := inventory.Commit(tx); err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit rename change", err)
}
return target.Replace(updated), nil
}
func (f *DBFS) SoftDelete(ctx context.Context, path ...*fs.URI) error {
ae := serializer.NewAggregateError()
targets := make([]*File, 0, len(path))
for _, p := range path {
// Get navigator
navigator, err := f.getNavigator(ctx, p, NavigatorCapabilitySoftDelete)
if err != nil {
ae.Add(p.String(), err)
continue
}
// Get target file
target, err := f.getFileByPath(ctx, navigator, p)
if err != nil {
ae.Add(p.String(), fmt.Errorf("failed to get target file: %w", err))
continue
}
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && target.Owner().ID != f.user.ID {
ae.Add(p.String(), fs.ErrOwnerOnly.WithError(fmt.Errorf("only file owner can delete file without trash bin")))
continue
}
// Root folder cannot be deleted
if target.IsRootFolder() {
ae.Add(p.String(), fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot delete root folder")))
continue
}
targets = append(targets, target)
}
if len(targets) == 0 {
return ae.Aggregate()
}
// Lock all targets
lockTargets := lo.Map(targets, func(value *File, key int) *LockByPath {
return &LockByPath{value.Uri(true), value, value.Type(), ""}
})
ls, err := f.acquireByPath(ctx, -1, f.user, false, fs.LockApp(fs.ApplicationSoftDelete), lockTargets...)
defer func() { _ = f.Release(ctx, ls) }()
if err != nil {
return err
}
// Start transaction to soft-delete files
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
if err != nil {
return serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
}
for _, target := range targets {
// Perform soft-delete
if err := fc.SoftDelete(ctx, target.Model); err != nil {
_ = inventory.Rollback(tx)
return serializer.NewError(serializer.CodeDBError, "failed to soft-delete file", err)
}
// Save restore uri into metadata
if err := fc.UpsertMetadata(ctx, target.Model, map[string]string{
MetadataRestoreUri: target.Uri(true).String(),
MetadataExpectedCollectTime: strconv.FormatInt(
time.Now().Add(time.Duration(target.Owner().Edges.Group.Settings.TrashRetention)*time.Second).Unix(),
10),
}, nil); err != nil {
_ = inventory.Rollback(tx)
return serializer.NewError(serializer.CodeDBError, "failed to update metadata", err)
}
}
// Commit transaction
if err := inventory.Commit(tx); err != nil {
return serializer.NewError(serializer.CodeDBError, "Failed to commit soft-delete change", err)
}
return ae.Aggregate()
}
func (f *DBFS) Delete(ctx context.Context, path []*fs.URI, opts ...fs.Option) ([]fs.Entity, error) {
o := newDbfsOption()
for _, opt := range opts {
o.apply(opt)
}
var opt *types.EntityRecycleOption
if o.UnlinkOnly {
opt = &types.EntityRecycleOption{
UnlinkOnly: true,
}
}
ae := serializer.NewAggregateError()
fileNavGroup := make(map[Navigator][]*File)
ctx = context.WithValue(ctx, inventory.LoadFileEntity{}, true)
for _, p := range path {
// Get navigator
navigator, err := f.getNavigator(ctx, p, NavigatorCapabilityDeleteFile, NavigatorCapabilityLockFile)
if err != nil {
ae.Add(p.String(), err)
continue
}
// Get target file
target, err := f.getFileByPath(ctx, navigator, p)
if err != nil {
ae.Add(p.String(), fmt.Errorf("failed to get target file: %w", err))
continue
}
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !o.SysSkipSoftDelete && !ok && target.Owner().ID != f.user.ID {
ae.Add(p.String(), fs.ErrOwnerOnly)
continue
}
// Root folder cannot be deleted
if target.IsRootFolder() {
ae.Add(p.String(), fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot delete root folder")))
continue
}
if _, ok := fileNavGroup[navigator]; !ok {
fileNavGroup[navigator] = make([]*File, 0)
}
fileNavGroup[navigator] = append(fileNavGroup[navigator], target)
}
targets := lo.Flatten(lo.Values(fileNavGroup))
if len(targets) == 0 {
return nil, ae.Aggregate()
}
// Lock all targets
lockTargets := lo.Map(targets, func(value *File, key int) *LockByPath {
return &LockByPath{value.Uri(true), value, value.Type(), ""}
})
ls, err := f.acquireByPath(ctx, -1, f.user, false, fs.LockApp(fs.ApplicationDelete), lockTargets...)
defer func() { _ = f.Release(ctx, ls) }()
if err != nil {
return nil, err
}
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
if err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
}
// Delete targets
newStaleEntities, storageDiff, err := f.deleteFiles(ctx, fileNavGroup, fc, opt)
if err != nil {
_ = inventory.Rollback(tx)
return nil, serializer.NewError(serializer.CodeDBError, "failed to delete files", err)
}
tx.AppendStorageDiff(storageDiff)
if err := inventory.CommitWithStorageDiff(ctx, tx, f.l, f.userClient); err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit delete change", err)
}
return newStaleEntities, ae.Aggregate()
}
func (f *DBFS) VersionControl(ctx context.Context, path *fs.URI, versionId int, delete bool) error {
// Get navigator
navigator, err := f.getNavigator(ctx, path, NavigatorCapabilityVersionControl)
if err != nil {
return err
}
// Get target file
ctx = context.WithValue(ctx, inventory.LoadFileEntity{}, true)
target, err := f.getFileByPath(ctx, navigator, path)
if err != nil {
return fmt.Errorf("failed to get target file: %w", err)
}
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && target.Owner().ID != f.user.ID {
return fs.ErrOwnerOnly
}
// Target must be a file
if target.Type() != types.FileTypeFile {
return fs.ErrNotSupportedAction.WithError(fmt.Errorf("target must be a valid file"))
}
// Lock file
ls, err := f.acquireByPath(ctx, -1, f.user, true, fs.LockApp(fs.ApplicationVersionControl),
&LockByPath{target.Uri(true), target, target.Type(), ""})
defer func() { _ = f.Release(ctx, ls) }()
if err != nil {
return err
}
if delete {
storageDiff, err := f.deleteEntity(ctx, target, versionId)
if err != nil {
return err
}
if err := f.userClient.ApplyStorageDiff(ctx, storageDiff); err != nil {
f.l.Error("Failed to apply storage diff after deleting version: %s", err)
}
return nil
} else {
return f.setCurrentVersion(ctx, target, versionId)
}
}
func (f *DBFS) Restore(ctx context.Context, path ...*fs.URI) error {
ae := serializer.NewAggregateError()
targets := make([]*File, 0, len(path))
ctx = context.WithValue(ctx, inventory.LoadFilePublicMetadata{}, true)
for _, p := range path {
// Get navigator
navigator, err := f.getNavigator(ctx, p, NavigatorCapabilityRestore)
if err != nil {
ae.Add(p.String(), err)
continue
}
// Get target file
target, err := f.getFileByPath(ctx, navigator, p)
if err != nil {
ae.Add(p.String(), fmt.Errorf("failed to get file: %w", err))
continue
}
targets = append(targets, target)
}
if len(targets) == 0 {
return ae.Aggregate()
}
allTrashUriStr := lo.FilterMap(targets, func(t *File, key int) ([]*fs.URI, bool) {
if restoreUri, ok := t.Metadata()[MetadataRestoreUri]; ok {
srcUrl, err := fs.NewUriFromString(restoreUri)
if err != nil {
ae.Add(t.Uri(false).String(), fs.ErrNotSupportedAction.WithError(fmt.Errorf("invalid restore uri: %w", err)))
return nil, false
}
return []*fs.URI{t.Uri(false), srcUrl.DirUri()}, true
}
ae.Add(t.Uri(false).String(), fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot restore file without required metadata mark")))
return nil, false
})
// Copy each file to its original location
for _, uris := range allTrashUriStr {
if err := f.MoveOrCopy(ctx, []*fs.URI{uris[0]}, uris[1], false); err != nil {
if !ae.Merge(err) {
ae.Add(uris[0].String(), err)
}
}
}
return ae.Aggregate()
}
func (f *DBFS) MoveOrCopy(ctx context.Context, path []*fs.URI, dst *fs.URI, isCopy bool) error {
targets := make([]*File, 0, len(path))
dstNavigator, err := f.getNavigator(ctx, dst, NavigatorCapabilityLockFile)
if err != nil {
return err
}
// Get destination file
destination, err := f.getFileByPath(ctx, dstNavigator, dst)
if err != nil {
return fmt.Errorf("faield to get destination folder: %w", err)
}
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && destination.Owner().ID != f.user.ID {
return fs.ErrOwnerOnly
}
// Target must be a folder
if !destination.CanHaveChildren() {
return fs.ErrNotSupportedAction.WithError(fmt.Errorf("destination must be a valid folder"))
}
ae := serializer.NewAggregateError()
fileNavGroup := make(map[Navigator][]*File)
dstRootPath := destination.Uri(true)
ctx = context.WithValue(ctx, inventory.LoadFileEntity{}, true)
ctx = context.WithValue(ctx, inventory.LoadFileMetadata{}, true)
for _, p := range path {
// Get navigator
navigator, err := f.getNavigator(ctx, p, NavigatorCapabilityLockFile)
if err != nil {
ae.Add(p.String(), err)
continue
}
// Check fs capability
if !canMoveOrCopyTo(p, dst, isCopy) {
ae.Add(p.String(), fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot move or copy file form %s to %s", p.String(), dst.String())))
continue
}
// Get target file
target, err := f.getFileByPath(ctx, navigator, p)
if err != nil {
ae.Add(p.String(), fmt.Errorf("failed to get file: %w", err))
continue
}
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && target.Owner().ID != f.user.ID {
ae.Add(p.String(), fs.ErrOwnerOnly)
continue
}
// Root folder cannot be moved or copied
if target.IsRootFolder() {
ae.Add(p.String(), fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot move root folder")))
continue
}
// Cannot move or copy folder to its descendant
if target.Type() == types.FileTypeFolder &&
dstRootPath.EqualOrIsDescendantOf(target.Uri(true), hashid.EncodeUserID(f.hasher, f.user.ID)) {
ae.Add(p.String(), fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot move or copy folder to itself or its descendant")))
continue
}
targets = append(targets, target)
if isCopy {
if _, ok := fileNavGroup[navigator]; !ok {
fileNavGroup[navigator] = make([]*File, 0)
}
fileNavGroup[navigator] = append(fileNavGroup[navigator], target)
}
}
if len(targets) > 0 {
// Lock all targets
lockTargets := lo.Map(targets, func(value *File, key int) *LockByPath {
return &LockByPath{value.Uri(true), value, value.Type(), ""}
})
// Lock destination
dstBase := destination.Uri(true)
dstLockTargets := lo.Map(targets, func(value *File, key int) *LockByPath {
return &LockByPath{dstBase.Join(value.Name()), destination, value.Type(), ""}
})
allLockTargets := make([]*LockByPath, 0, len(targets)*2)
if !isCopy {
// For moving files from trash bin, also lock the dst with restored name.
dstRestoreTargets := lo.FilterMap(targets, func(value *File, key int) (*LockByPath, bool) {
if _, ok := value.Metadata()[MetadataRestoreUri]; ok {
return &LockByPath{dstBase.Join(value.DisplayName()), destination, value.Type(), ""}, true
}
return nil, false
})
allLockTargets = append(allLockTargets, lockTargets...)
allLockTargets = append(allLockTargets, dstRestoreTargets...)
}
allLockTargets = append(allLockTargets, dstLockTargets...)
ls, err := f.acquireByPath(ctx, -1, f.user, false, fs.LockApp(fs.ApplicationMoveCopy), allLockTargets...)
defer func() { _ = f.Release(ctx, ls) }()
if err != nil {
return err
}
// Start transaction to move files
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
if err != nil {
return serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
}
var (
storageDiff inventory.StorageDiff
)
if isCopy {
_, storageDiff, err = f.copyFiles(ctx, fileNavGroup, destination, fc)
} else {
storageDiff, err = f.moveFiles(ctx, targets, destination, fc, dstNavigator)
}
if err != nil {
_ = inventory.Rollback(tx)
return err
}
tx.AppendStorageDiff(storageDiff)
if err := inventory.CommitWithStorageDiff(ctx, tx, f.l, f.userClient); err != nil {
return serializer.NewError(serializer.CodeDBError, "Failed to commit move change", err)
}
// TODO: after move, dbfs cache should be cleared
}
return ae.Aggregate()
}
func (f *DBFS) deleteEntity(ctx context.Context, target *File, entityId int) (inventory.StorageDiff, error) {
if target.PrimaryEntityID() == entityId {
return nil, fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot delete current version"))
}
targetVersion, found := lo.Find(target.Entities(), func(item fs.Entity) bool {
return item.ID() == entityId
})
if !found {
return nil, fs.ErrEntityNotExist.WithError(fmt.Errorf("version not found"))
}
diff, err := f.fileClient.UnlinkEntity(ctx, targetVersion.Model(), target.Model, target.Owner())
if err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to unlink entity", err)
}
if targetVersion.UploadSessionID() != nil {
err = f.fileClient.RemoveMetadata(ctx, target.Model, MetadataUploadSessionID)
if err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to remove upload session metadata", err)
}
}
return diff, nil
}
func (f *DBFS) setCurrentVersion(ctx context.Context, target *File, versionId int) error {
if target.PrimaryEntityID() == versionId {
return nil
}
targetVersion, found := lo.Find(target.Entities(), func(item fs.Entity) bool {
return item.ID() == versionId && item.Type() == types.EntityTypeVersion && item.UploadSessionID() == nil
})
if !found {
return fs.ErrEntityNotExist.WithError(fmt.Errorf("version not found"))
}
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
if err != nil {
return serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
}
if err := f.fileClient.SetPrimaryEntity(ctx, target.Model, targetVersion.ID()); err != nil {
return serializer.NewError(serializer.CodeDBError, "Failed to set primary entity", err)
}
// Cap thumbnail entities
diff, err := fc.CapEntities(ctx, target.Model, target.Owner(), 0, types.EntityTypeThumbnail)
if err != nil {
_ = inventory.Rollback(tx)
return serializer.NewError(serializer.CodeDBError, "Failed to cap thumbnail entities", err)
}
tx.AppendStorageDiff(diff)
if err := inventory.CommitWithStorageDiff(ctx, tx, f.l, f.userClient); err != nil {
return serializer.NewError(serializer.CodeDBError, "Failed to commit set current version", err)
}
return nil
}
func (f *DBFS) deleteFiles(ctx context.Context, targets map[Navigator][]*File, fc inventory.FileClient, opt *types.EntityRecycleOption) ([]fs.Entity, inventory.StorageDiff, error) {
if f.user.Edges.Group == nil {
return nil, nil, fmt.Errorf("user group not loaded")
}
limit := max(f.user.Edges.Group.Settings.MaxWalkedFiles, 1)
allStaleEntities := make([]fs.Entity, 0, len(targets))
storageDiff := make(inventory.StorageDiff)
for n, files := range targets {
// Let navigator use tx
reset, err := n.FollowTx(ctx)
if err != nil {
return nil, nil, err
}
defer reset()
// List all files to be deleted
toBeDeletedFiles := make([]*File, 0, len(files))
if err := n.Walk(ctx, files, limit, intsets.MaxInt, func(targets []*File, level int) error {
limit -= len(targets)
toBeDeletedFiles = append(toBeDeletedFiles, targets...)
return nil
}); err != nil {
return nil, nil, fmt.Errorf("failed to walk files: %w", err)
}
// Delete files
staleEntities, diff, err := fc.Delete(ctx, lo.Map(toBeDeletedFiles, func(item *File, index int) *ent.File {
return item.Model
}), opt)
if err != nil {
return nil, nil, fmt.Errorf("failed to delete files: %w", err)
}
storageDiff.Merge(diff)
allStaleEntities = append(allStaleEntities, lo.Map(staleEntities, func(item *ent.Entity, index int) fs.Entity {
return fs.NewEntity(item)
})...)
}
return allStaleEntities, storageDiff, nil
}
func (f *DBFS) copyFiles(ctx context.Context, targets map[Navigator][]*File, destination *File, fc inventory.FileClient) (map[int]*ent.File, inventory.StorageDiff, error) {
if f.user.Edges.Group == nil {
return nil, nil, fmt.Errorf("user group not loaded")
}
limit := max(f.user.Edges.Group.Settings.MaxWalkedFiles, 1)
capacity, err := f.Capacity(ctx, destination.Owner())
if err != nil {
return nil, nil, fmt.Errorf("copy files: failed to destination owner capacity: %w", err)
}
dstAncestors := lo.Map(destination.AncestorsChain(), func(item *File, index int) *ent.File {
return item.Model
})
// newTargetsMap is the map of between new target files in first layer, and its src file ID.
newTargetsMap := make(map[int]*ent.File)
storageDiff := make(inventory.StorageDiff)
var diff inventory.StorageDiff
for n, files := range targets {
initialDstMap := make(map[int][]*ent.File)
for _, file := range files {
initialDstMap[file.Model.FileChildren] = dstAncestors
}
firstLayer := true
// Let navigator use tx
reset, err := n.FollowTx(ctx)
if err != nil {
return nil, nil, err
}
defer reset()
if err := n.Walk(ctx, files, limit, intsets.MaxInt, func(targets []*File, level int) error {
// check capacity for each file
sizeTotal := int64(0)
for _, file := range targets {
sizeTotal += file.SizeUsed()
}
if err := f.validateUserCapacityRaw(ctx, sizeTotal, capacity); err != nil {
return fs.ErrInsufficientCapacity
}
limit -= len(targets)
initialDstMap, diff, err = fc.Copy(ctx, lo.Map(targets, func(item *File, index int) *ent.File {
return item.Model
}), initialDstMap)
if err != nil {
if ent.IsConstraintError(err) {
return fs.ErrFileExisted.WithError(err)
}
return serializer.NewError(serializer.CodeDBError, "Failed to copy files", err)
}
storageDiff.Merge(diff)
if firstLayer {
for k, v := range initialDstMap {
newTargetsMap[k] = v[0]
}
}
capacity.Used += sizeTotal
firstLayer = false
return nil
}); err != nil {
return nil, nil, fmt.Errorf("failed to walk files: %w", err)
}
}
return newTargetsMap, storageDiff, nil
}
func (f *DBFS) moveFiles(ctx context.Context, targets []*File, destination *File, fc inventory.FileClient, n Navigator) (inventory.StorageDiff, error) {
models := lo.Map(targets, func(value *File, key int) *ent.File {
return value.Model
})
// Change targets' parent
if err := fc.SetParent(ctx, models, destination.Model); err != nil {
if ent.IsConstraintError(err) {
return nil, fs.ErrFileExisted.WithError(err)
}
return nil, serializer.NewError(serializer.CodeDBError, "Failed to move file", err)
}
var (
storageDiff inventory.StorageDiff
)
// For files moved out from trash bin
for _, file := range targets {
if _, ok := file.Metadata()[MetadataRestoreUri]; !ok {
continue
}
// renaming it to its original name
if _, err := fc.Rename(ctx, file.Model, file.DisplayName()); err != nil {
if ent.IsConstraintError(err) {
return nil, fs.ErrFileExisted.WithError(err)
}
return storageDiff, serializer.NewError(serializer.CodeDBError, "Failed to rename file from trash bin to its original name", err)
}
// Remove trash bin metadata
if err := fc.RemoveMetadata(ctx, file.Model, MetadataRestoreUri, MetadataExpectedCollectTime); err != nil {
return storageDiff, serializer.NewError(serializer.CodeDBError, "Failed to remove trash related metadata", err)
}
}
return storageDiff, nil
}

View File

@@ -0,0 +1,172 @@
package dbfs
import (
"context"
"fmt"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
)
var myNavigatorCapability = &boolset.BooleanSet{}
// NewMyNavigator creates a navigator for user's "my" file system.
func NewMyNavigator(u *ent.User, fileClient inventory.FileClient, userClient inventory.UserClient, l logging.Logger,
config *setting.DBFS, hasher hashid.Encoder) Navigator {
return &myNavigator{
user: u,
l: l,
fileClient: fileClient,
userClient: userClient,
config: config,
baseNavigator: newBaseNavigator(fileClient, defaultFilter, u, hasher, config),
}
}
type myNavigator struct {
l logging.Logger
user *ent.User
fileClient inventory.FileClient
userClient inventory.UserClient
config *setting.DBFS
*baseNavigator
root *File
disableRecycle bool
persist func()
}
func (n *myNavigator) Recycle() {
if n.persist != nil {
n.persist()
n.persist = nil
}
if n.root != nil && !n.disableRecycle {
n.root.Recycle()
}
}
func (n *myNavigator) PersistState(kv cache.Driver, key string) {
n.disableRecycle = true
n.persist = func() {
kv.Set(key, n.root, ContextHintTTL)
}
}
func (n *myNavigator) RestoreState(s State) error {
n.disableRecycle = true
if state, ok := s.(*File); ok {
n.root = state
return nil
}
return fmt.Errorf("invalid state type: %T", s)
}
func (n *myNavigator) To(ctx context.Context, path *fs.URI) (*File, error) {
if n.root == nil {
// Anonymous user does not have a root folder.
if inventory.IsAnonymousUser(n.user) {
return nil, ErrLoginRequired
}
fsUid, err := n.hasher.Decode(path.ID(hashid.EncodeUserID(n.hasher, n.user.ID)), hashid.UserID)
if err != nil {
return nil, fs.ErrPathNotExist.WithError(fmt.Errorf("invalid user id"))
}
if fsUid != n.user.ID {
return nil, ErrPermissionDenied
}
targetUser, err := n.userClient.GetLoginUserByID(ctx, fsUid)
if err != nil {
return nil, fs.ErrPathNotExist.WithError(fmt.Errorf("user not found: %w", err))
}
rootFile, err := n.fileClient.Root(ctx, targetUser)
if err != nil {
n.l.Info("User's root folder not found: %s, will initialize it.", err)
return nil, ErrFsNotInitialized
}
n.root = newFile(nil, rootFile)
rootPath := path.Root()
n.root.Path[pathIndexRoot], n.root.Path[pathIndexUser] = rootPath, rootPath
n.root.OwnerModel = targetUser
n.root.IsUserRoot = true
n.root.CapabilitiesBs = n.Capabilities(false).Capability
}
current, lastAncestor := n.root, n.root
elements := path.Elements()
var err error
for index, element := range elements {
lastAncestor = current
current, err = n.walkNext(ctx, current, element, index == len(elements)-1)
if err != nil {
return lastAncestor, fmt.Errorf("failed to walk into %q: %w", element, err)
}
}
return current, nil
}
func (n *myNavigator) Children(ctx context.Context, parent *File, args *ListArgs) (*ListResult, error) {
return n.baseNavigator.children(ctx, parent, args)
}
func (n *myNavigator) walkNext(ctx context.Context, root *File, next string, isLeaf bool) (*File, error) {
return n.baseNavigator.walkNext(ctx, root, next, isLeaf)
}
func (n *myNavigator) Capabilities(isSearching bool) *fs.NavigatorProps {
res := &fs.NavigatorProps{
Capability: myNavigatorCapability,
OrderDirectionOptions: fullOrderDirectionOption,
OrderByOptions: fullOrderByOption,
MaxPageSize: n.config.MaxPageSize,
}
if isSearching {
res.OrderByOptions = nil
res.OrderDirectionOptions = nil
}
return res
}
func (n *myNavigator) Walk(ctx context.Context, levelFiles []*File, limit, depth int, f WalkFunc) error {
return n.baseNavigator.walk(ctx, levelFiles, limit, depth, f)
}
func (n *myNavigator) FollowTx(ctx context.Context) (func(), error) {
if _, ok := ctx.Value(inventory.TxCtx{}).(*inventory.Tx); !ok {
return nil, fmt.Errorf("navigator: no inherited transaction found in context")
}
newFileClient, _, _, err := inventory.WithTx(ctx, n.fileClient)
if err != nil {
return nil, err
}
newUserClient, _, _, err := inventory.WithTx(ctx, n.userClient)
oldFileClient, oldUserClient := n.fileClient, n.userClient
revert := func() {
n.fileClient = oldFileClient
n.userClient = oldUserClient
n.baseNavigator.fileClient = oldFileClient
}
n.fileClient = newFileClient
n.userClient = newUserClient
n.baseNavigator.fileClient = newFileClient
return revert, nil
}
func (n *myNavigator) ExecuteHook(ctx context.Context, hookType fs.HookType, file *File) error {
return nil
}

View File

@@ -0,0 +1,536 @@
package dbfs
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/samber/lo"
)
var (
ErrFsNotInitialized = fmt.Errorf("fs not initialized")
ErrPermissionDenied = serializer.NewError(serializer.CodeNoPermissionErr, "Permission denied", nil)
ErrShareIncorrectPassword = serializer.NewError(serializer.CodeIncorrectPassword, "Incorrect share password", nil)
ErrFileCountLimitedReached = serializer.NewError(serializer.CodeFileCountLimitedReached, "Walked file count reached limit", nil)
ErrSymbolicFolderFound = serializer.NewError(serializer.CodeNoPermissionErr, "Symbolic folder cannot be walked into", nil)
ErrLoginRequired = serializer.NewError(serializer.CodeCheckLogin, "Login required", nil)
fullOrderByOption = []string{"name", "size", "updated_at", "created_at"}
searchLimitedOrderByOption = []string{"created_at"}
fullOrderDirectionOption = []string{"asc", "desc"}
)
type (
// Navigator is a navigator for database file system.
Navigator interface {
Recycle()
// To returns the file by path. If given path is not exist, returns ErrFileNotFound and most-recent ancestor.
To(ctx context.Context, path *fs.URI) (*File, error)
// Children returns the children of the parent file.
Children(ctx context.Context, parent *File, args *ListArgs) (*ListResult, error)
// Capabilities returns the capabilities of the navigator.
Capabilities(isSearching bool) *fs.NavigatorProps
// Walk walks the file tree until limit is reached.
Walk(ctx context.Context, levelFiles []*File, limit, depth int, f WalkFunc) error
// PersistState tells navigator to persist the state of the navigator before recycle.
PersistState(kv cache.Driver, key string)
// RestoreState restores the state of the navigator.
RestoreState(s State) error
// FollowTx let the navigator inherit the transaction. Return a function to reset back to previous DB client.
FollowTx(ctx context.Context) (func(), error)
// ExecuteHook performs custom operations before or after certain actions.
ExecuteHook(ctx context.Context, hookType fs.HookType, file *File) error
}
State interface{}
NavigatorCapability int
ListArgs struct {
Page *inventory.PaginationArgs
Search *inventory.SearchFileParameters
SharedWithMe bool
StreamCallback func([]*File)
}
// ListResult is the result of a list operation.
ListResult struct {
Files []*File
MixedType bool
Pagination *inventory.PaginationResults
RecursionLimitReached bool
SingleFileView bool
}
WalkFunc func([]*File, int) error
)
const (
NavigatorCapabilityCreateFile NavigatorCapability = iota
NavigatorCapabilityRenameFile
NavigatorCapability_CommunityPlacehodler1
NavigatorCapability_CommunityPlacehodler2
NavigatorCapability_CommunityPlacehodler3
NavigatorCapability_CommunityPlacehodler4
NavigatorCapabilityUploadFile
NavigatorCapabilityDownloadFile
NavigatorCapabilityUpdateMetadata
NavigatorCapabilityListChildren
NavigatorCapabilityGenerateThumb
NavigatorCapability_CommunityPlacehodler5
NavigatorCapability_CommunityPlacehodler6
NavigatorCapability_CommunityPlacehodler7
NavigatorCapabilityDeleteFile
NavigatorCapabilityLockFile
NavigatorCapabilitySoftDelete
NavigatorCapabilityRestore
NavigatorCapabilityShare
NavigatorCapabilityInfo
NavigatorCapabilityVersionControl
NavigatorCapability_CommunityPlacehodler8
NavigatorCapability_CommunityPlacehodler9
NavigatorCapabilityEnterFolder
searchTokenSeparator = "|"
)
func init() {
boolset.Sets(map[NavigatorCapability]bool{
NavigatorCapabilityCreateFile: true,
NavigatorCapabilityRenameFile: true,
NavigatorCapabilityUploadFile: true,
NavigatorCapabilityDownloadFile: true,
NavigatorCapabilityUpdateMetadata: true,
NavigatorCapabilityListChildren: true,
NavigatorCapabilityGenerateThumb: true,
NavigatorCapabilityDeleteFile: true,
NavigatorCapabilityLockFile: true,
NavigatorCapabilitySoftDelete: true,
NavigatorCapabilityShare: true,
NavigatorCapabilityInfo: true,
NavigatorCapabilityVersionControl: true,
NavigatorCapabilityEnterFolder: true,
}, myNavigatorCapability)
boolset.Sets(map[NavigatorCapability]bool{
NavigatorCapabilityDownloadFile: true,
NavigatorCapabilityListChildren: true,
NavigatorCapabilityGenerateThumb: true,
NavigatorCapabilityLockFile: true,
NavigatorCapabilityInfo: true,
NavigatorCapabilityVersionControl: true,
NavigatorCapabilityEnterFolder: true,
}, shareNavigatorCapability)
boolset.Sets(map[NavigatorCapability]bool{
NavigatorCapabilityListChildren: true,
NavigatorCapabilityDeleteFile: true,
NavigatorCapabilityLockFile: true,
NavigatorCapabilityRestore: true,
NavigatorCapabilityInfo: true,
}, trashNavigatorCapability)
boolset.Sets(map[NavigatorCapability]bool{
NavigatorCapabilityListChildren: true,
NavigatorCapabilityDownloadFile: true,
NavigatorCapabilityEnterFolder: true,
}, sharedWithMeNavigatorCapability)
}
// ==================== Base Navigator ====================
type (
fileFilter func(ctx context.Context, f *File) (*File, bool)
baseNavigator struct {
fileClient inventory.FileClient
listFilter fileFilter
user *ent.User
hasher hashid.Encoder
config *setting.DBFS
}
)
var defaultFilter = func(ctx context.Context, f *File) (*File, bool) { return f, true }
func newBaseNavigator(fileClient inventory.FileClient, filterFunc fileFilter, user *ent.User,
hasher hashid.Encoder, config *setting.DBFS) *baseNavigator {
return &baseNavigator{
fileClient: fileClient,
listFilter: filterFunc,
user: user,
hasher: hasher,
config: config,
}
}
func (b *baseNavigator) walkNext(ctx context.Context, root *File, next string, isLeaf bool) (*File, error) {
var model *ent.File
if root != nil {
model = root.Model
if root.IsSymbolic() {
return nil, ErrSymbolicFolderFound
}
root.mu.Lock()
if child, ok := root.Children[next]; ok && !isLeaf {
root.mu.Unlock()
return child, nil
}
root.mu.Unlock()
}
child, err := b.fileClient.GetChildFile(ctx, model, b.user.ID, next, isLeaf)
if err != nil {
if ent.IsNotFound(err) {
return nil, fs.ErrPathNotExist.WithError(err)
}
return nil, fmt.Errorf("faield to get child %q: %w", next, err)
}
return newFile(root, child), nil
}
func (b *baseNavigator) walkUp(ctx context.Context, child *File) (*File, error) {
parent, err := b.fileClient.GetParentFile(ctx, child.Model, false)
if err != nil {
return nil, fmt.Errorf("faield to get Parent for %q: %w", child.Name(), err)
}
return newParentFile(parent, child), nil
}
func (b *baseNavigator) children(ctx context.Context, parent *File, args *ListArgs) (*ListResult, error) {
var model *ent.File
if parent != nil {
model = parent.Model
if parent.Model.Type != int(types.FileTypeFolder) {
return nil, fs.ErrPathNotExist
}
if parent.IsSymbolic() {
return nil, ErrSymbolicFolderFound
}
parent.Path[pathIndexUser] = parent.Uri(false)
}
if args.Search != nil {
return b.search(ctx, parent, args)
}
children, err := b.fileClient.GetChildFiles(ctx, &inventory.ListFileParameters{
PaginationArgs: args.Page,
SharedWithMe: args.SharedWithMe,
}, b.user.ID, model)
if err != nil {
return nil, fmt.Errorf("failed to get children: %w", err)
}
return &ListResult{
Files: lo.FilterMap(children.Files, func(model *ent.File, index int) (*File, bool) {
f := newFile(parent, model)
return b.listFilter(ctx, f)
}),
MixedType: children.MixedType,
Pagination: children.PaginationResults,
}, nil
}
func (b *baseNavigator) walk(ctx context.Context, levelFiles []*File, limit, depth int, f WalkFunc) error {
walked := 0
if len(levelFiles) == 0 {
return nil
}
owner := levelFiles[0].Owner()
level := 0
for walked <= limit && depth >= 0 {
if len(levelFiles) == 0 {
break
}
stop := false
depth--
if len(levelFiles) > limit-walked {
levelFiles = levelFiles[:limit-walked]
stop = true
}
if err := f(levelFiles, level); err != nil {
return err
}
if stop {
return ErrFileCountLimitedReached
}
walked += len(levelFiles)
folders := lo.Filter(levelFiles, func(f *File, index int) bool {
return f.Model.Type == int(types.FileTypeFolder) && !f.IsSymbolic()
})
if walked >= limit || len(folders) == 0 {
break
}
levelFiles = levelFiles[:0]
leftCredit := limit - walked
parents := lo.SliceToMap(folders, func(file *File) (int, *File) {
return file.Model.ID, file
})
for leftCredit > 0 {
token := ""
res, err := b.fileClient.GetChildFiles(ctx,
&inventory.ListFileParameters{
PaginationArgs: &inventory.PaginationArgs{
UseCursorPagination: true,
PageToken: token,
PageSize: leftCredit,
},
MixedType: true,
},
owner.ID,
lo.Map(folders, func(item *File, index int) *ent.File {
return item.Model
})...)
if err != nil {
return serializer.NewError(serializer.CodeDBError, "Failed to list children", err)
}
leftCredit -= len(res.Files)
levelFiles = append(levelFiles, lo.Map(res.Files, func(model *ent.File, index int) *File {
p := parents[model.FileChildren]
return newFile(p, model)
})...)
// All files listed
if res.NextPageToken == "" {
break
}
token = res.NextPageToken
}
level++
}
if walked >= limit {
return ErrFileCountLimitedReached
}
return nil
}
func (b *baseNavigator) search(ctx context.Context, parent *File, args *ListArgs) (*ListResult, error) {
if parent == nil {
// Performs mega search for all files in trash fs.
children, err := b.fileClient.GetChildFiles(ctx, &inventory.ListFileParameters{
PaginationArgs: args.Page,
MixedType: true,
Search: args.Search,
SharedWithMe: args.SharedWithMe,
}, b.user.ID, nil)
if err != nil {
return nil, fmt.Errorf("failed to get children: %w", err)
}
return &ListResult{
Files: lo.FilterMap(children.Files, func(model *ent.File, index int) (*File, bool) {
f := newFile(parent, model)
return b.listFilter(ctx, f)
}),
MixedType: children.MixedType,
Pagination: children.PaginationResults,
}, nil
}
// Performs recursive search for all files under the given folder.
walkedFolder := 1
parents := []map[int]*File{{parent.Model.ID: parent}}
startLevel, innerPageToken, err := parseSearchPageToken(args.Page.PageToken)
if err != nil {
return nil, err
}
args.Page.PageToken = innerPageToken
stepLevel := func(level int) (bool, error) {
token := ""
// We don't need metadata in level search.
listCtx := context.WithValue(ctx, inventory.LoadFilePublicMetadata{}, nil)
for walkedFolder <= b.config.MaxRecursiveSearchedFolder {
// TODO: chunk parents into 30000 per group
res, err := b.fileClient.GetChildFiles(listCtx,
&inventory.ListFileParameters{
PaginationArgs: &inventory.PaginationArgs{
UseCursorPagination: true,
PageToken: token,
},
FolderOnly: true,
},
parent.Model.OwnerID,
lo.MapToSlice(parents[level], func(k int, f *File) *ent.File {
return f.Model
})...)
if err != nil {
return false, serializer.NewError(serializer.CodeDBError, "Failed to list children", err)
}
parents = append(parents, lo.SliceToMap(
lo.FilterMap(res.Files, func(model *ent.File, index int) (*File, bool) {
p := parents[level][model.FileChildren]
f := newFile(p, model)
f.Path[pathIndexUser] = p.Uri(false).Join(model.Name)
return f, true
}),
func(f *File) (int, *File) {
return f.Model.ID, f
}))
walkedFolder += len(parents[level+1])
if res.NextPageToken == "" {
break
}
token = res.NextPageToken
}
if len(parents) <= level+1 || len(parents[level+1]) == 0 {
// All possible folders is searched
return true, nil
}
return false, nil
}
// We need to walk from root folder to get the correct level.
for level := 0; level < startLevel; level++ {
stop, err := stepLevel(level)
if err != nil {
return nil, err
}
if stop {
return &ListResult{}, nil
}
}
// Search files starting from current level
res := make([]*File, 0, args.Page.PageSize)
args.Page.UseCursorPagination = true
originalPageSize := args.Page.PageSize
stop := false
for len(res) < originalPageSize && walkedFolder <= b.config.MaxRecursiveSearchedFolder {
// Only requires minimum number of files
args.Page.PageSize = min(originalPageSize, originalPageSize-len(res))
searchRes, err := b.fileClient.GetChildFiles(ctx,
&inventory.ListFileParameters{
PaginationArgs: args.Page,
MixedType: true,
Search: args.Search,
},
parent.Model.OwnerID,
lo.MapToSlice(parents[startLevel], func(k int, f *File) *ent.File {
return f.Model
})...)
if err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to search files", err)
}
newRes := lo.FilterMap(searchRes.Files, func(model *ent.File, index int) (*File, bool) {
p := parents[startLevel][model.FileChildren]
f := newFile(p, model)
f.Path[pathIndexUser] = p.Uri(false).Join(model.Name)
return b.listFilter(ctx, f)
})
res = append(res, newRes...)
if args.StreamCallback != nil {
args.StreamCallback(newRes)
}
args.Page.PageToken = searchRes.NextPageToken
// If no more results under current level, move to next level
if args.Page.PageToken == "" {
if len(res) == originalPageSize {
// Current page is full, no need to search more
startLevel++
break
}
finished, err := stepLevel(startLevel)
if err != nil {
return nil, err
}
if finished {
stop = true
// No more folders under next level, all result is presented
break
}
startLevel++
}
}
if args.StreamCallback != nil {
// Clear res if it's streamed
res = res[:0]
}
searchRes := &ListResult{
Files: res,
MixedType: true,
Pagination: &inventory.PaginationResults{IsCursor: true},
RecursionLimitReached: walkedFolder > b.config.MaxRecursiveSearchedFolder,
}
if walkedFolder <= b.config.MaxRecursiveSearchedFolder && !stop {
searchRes.Pagination.NextPageToken = fmt.Sprintf("%d%s%s", startLevel, searchTokenSeparator, args.Page.PageToken)
}
return searchRes, nil
}
func parseSearchPageToken(token string) (int, string, error) {
if token == "" {
return 0, "", nil
}
tokens := strings.Split(token, searchTokenSeparator)
if len(tokens) != 2 {
return 0, "", fmt.Errorf("invalid page token")
}
level, err := strconv.Atoi(tokens[0])
if err != nil || level < 0 {
return 0, "", fmt.Errorf("invalid page token level")
}
return level, tokens[1], nil
}
func newMyUri() *fs.URI {
res, _ := fs.NewUriFromString(constants.CloudreveScheme + "://" + string(constants.FileSystemMy))
return res
}
func newMyIDUri(uid string) *fs.URI {
res, _ := fs.NewUriFromString(fmt.Sprintf("%s://%s@%s", constants.CloudreveScheme, uid, constants.FileSystemMy))
return res
}
func newTrashUri(name string) *fs.URI {
res, _ := fs.NewUriFromString(fmt.Sprintf("%s://%s", constants.CloudreveScheme, constants.FileSystemTrash))
return res.Join(name)
}
func newSharedWithMeUri(id string) *fs.URI {
res, _ := fs.NewUriFromString(fmt.Sprintf("%s://%s", constants.CloudreveScheme, constants.FileSystemSharedWithMe))
return res.Join(id)
}

View File

@@ -0,0 +1,171 @@
package dbfs
import (
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
)
type dbfsOption struct {
*fs.FsOption
loadFolderSummary bool
extendedInfo bool
loadFilePublicMetadata bool
loadFileShareIfOwned bool
loadEntityUser bool
loadFileEntities bool
useCursorPagination bool
pageToken string
preferredStoragePolicy *ent.StoragePolicy
errOnConflict bool
previousVersion string
removeStaleEntities bool
requiredCapabilities []NavigatorCapability
generateContextHint bool
isSymbolicLink bool
noChainedCreation bool
streamListResponseCallback func(parent fs.File, file []fs.File)
ancestor *File
}
func newDbfsOption() *dbfsOption {
return &dbfsOption{
FsOption: &fs.FsOption{},
}
}
func (o *dbfsOption) apply(opt fs.Option) {
if fsOpt, ok := opt.(fs.OptionFunc); ok {
fsOpt.Apply(o.FsOption)
} else if dbfsOpt, ok := opt.(optionFunc); ok {
dbfsOpt.Apply(o)
}
}
type optionFunc func(*dbfsOption)
func (f optionFunc) Apply(o any) {
if dbfsO, ok := o.(*dbfsOption); ok {
f(dbfsO)
}
}
// WithFilePublicMetadata enables loading file public metadata.
func WithFilePublicMetadata() fs.Option {
return optionFunc(func(o *dbfsOption) {
o.loadFilePublicMetadata = true
})
}
// WithContextHint enables generating context hint for the list operation.
func WithContextHint() fs.Option {
return optionFunc(func(o *dbfsOption) {
o.generateContextHint = true
})
}
// WithFileEntities enables loading file entities.
func WithFileEntities() fs.Option {
return optionFunc(func(o *dbfsOption) {
o.loadFileEntities = true
})
}
// WithCursorPagination enables cursor pagination for the list operation.
func WithCursorPagination(pageToken string) fs.Option {
return optionFunc(func(o *dbfsOption) {
o.useCursorPagination = true
o.pageToken = pageToken
})
}
// WithPreferredStoragePolicy sets the preferred storage policy for the upload operation.
func WithPreferredStoragePolicy(policy *ent.StoragePolicy) fs.Option {
return optionFunc(func(o *dbfsOption) {
o.preferredStoragePolicy = policy
})
}
// WithErrorOnConflict sets to throw error on conflict for the create operation.
func WithErrorOnConflict() fs.Option {
return optionFunc(func(o *dbfsOption) {
o.errOnConflict = true
})
}
// WithPreviousVersion sets the previous version for the update operation.
func WithPreviousVersion(version string) fs.Option {
return optionFunc(func(o *dbfsOption) {
o.previousVersion = version
})
}
// WithRemoveStaleEntities sets to remove stale entities for the update operation.
func WithRemoveStaleEntities() fs.Option {
return optionFunc(func(o *dbfsOption) {
o.removeStaleEntities = true
})
}
// WithRequiredCapabilities sets the required capabilities for operations.
func WithRequiredCapabilities(capabilities ...NavigatorCapability) fs.Option {
return optionFunc(func(o *dbfsOption) {
o.requiredCapabilities = capabilities
})
}
// WithNoChainedCreation sets to disable chained creation for the create operation. This
// will require parent folder existed before creating new files under it.
func WithNoChainedCreation() fs.Option {
return optionFunc(func(o *dbfsOption) {
o.noChainedCreation = true
})
}
// WithFileShareIfOwned enables loading file share link if the file is owned by the user.
func WithFileShareIfOwned() fs.Option {
return optionFunc(func(o *dbfsOption) {
o.loadFileShareIfOwned = true
})
}
// WithStreamListResponseCallback sets the callback for handling stream list response.
func WithStreamListResponseCallback(callback func(parent fs.File, file []fs.File)) fs.Option {
return optionFunc(func(o *dbfsOption) {
o.streamListResponseCallback = callback
})
}
// WithSymbolicLink sets the file is a symbolic link.
func WithSymbolicLink() fs.Option {
return optionFunc(func(o *dbfsOption) {
o.isSymbolicLink = true
})
}
// WithExtendedInfo enables loading extended info for the file.
func WithExtendedInfo() fs.Option {
return optionFunc(func(o *dbfsOption) {
o.extendedInfo = true
})
}
// WithLoadFolderSummary enables loading folder summary.
func WithLoadFolderSummary() fs.Option {
return optionFunc(func(o *dbfsOption) {
o.loadFolderSummary = true
})
}
// WithEntityUser enables loading entity user.
func WithEntityUser() fs.Option {
return optionFunc(func(o *dbfsOption) {
o.loadEntityUser = true
})
}
// WithAncestor sets most recent ancestor for creating files
func WithAncestor(f *File) fs.Option {
return optionFunc(func(o *dbfsOption) {
o.ancestor = f
})
}

View File

@@ -0,0 +1,324 @@
package dbfs
import (
"context"
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
)
var (
ErrShareNotFound = serializer.NewError(serializer.CodeNotFound, "Shared file does not exist", nil)
ErrNotPurchased = serializer.NewError(serializer.CodePurchaseRequired, "You need to purchased this share", nil)
)
const (
PurchaseTicketHeader = constants.CrHeaderPrefix + "Purchase-Ticket"
)
var shareNavigatorCapability = &boolset.BooleanSet{}
// NewShareNavigator creates a navigator for user's "shared" file system.
func NewShareNavigator(u *ent.User, fileClient inventory.FileClient, shareClient inventory.ShareClient,
l logging.Logger, config *setting.DBFS, hasher hashid.Encoder) Navigator {
n := &shareNavigator{
user: u,
l: l,
fileClient: fileClient,
shareClient: shareClient,
config: config,
}
n.baseNavigator = newBaseNavigator(fileClient, defaultFilter, u, hasher, config)
return n
}
type (
shareNavigator struct {
l logging.Logger
user *ent.User
fileClient inventory.FileClient
shareClient inventory.ShareClient
config *setting.DBFS
*baseNavigator
shareRoot *File
singleFileShare bool
ownerRoot *File
share *ent.Share
owner *ent.User
disableRecycle bool
persist func()
}
shareNavigatorState struct {
ShareRoot *File
OwnerRoot *File
SingleFileShare bool
Share *ent.Share
Owner *ent.User
}
)
func (n *shareNavigator) PersistState(kv cache.Driver, key string) {
n.disableRecycle = true
n.persist = func() {
kv.Set(key, shareNavigatorState{
ShareRoot: n.shareRoot,
OwnerRoot: n.ownerRoot,
SingleFileShare: n.singleFileShare,
Share: n.share,
Owner: n.owner,
}, ContextHintTTL)
}
}
func (n *shareNavigator) RestoreState(s State) error {
n.disableRecycle = true
if state, ok := s.(shareNavigatorState); ok {
n.shareRoot = state.ShareRoot
n.ownerRoot = state.OwnerRoot
n.singleFileShare = state.SingleFileShare
n.share = state.Share
n.owner = state.Owner
return nil
}
return fmt.Errorf("invalid state type: %T", s)
}
func (n *shareNavigator) Recycle() {
if n.persist != nil {
n.persist()
n.persist = nil
}
if !n.disableRecycle {
if n.ownerRoot != nil {
n.ownerRoot.Recycle()
} else if n.shareRoot != nil {
n.shareRoot.Recycle()
}
}
}
func (n *shareNavigator) Root(ctx context.Context, path *fs.URI) (*File, error) {
ctx = context.WithValue(ctx, inventory.LoadShareUser{}, true)
ctx = context.WithValue(ctx, inventory.LoadUserGroup{}, true)
ctx = context.WithValue(ctx, inventory.LoadShareFile{}, true)
share, err := n.shareClient.GetByHashID(ctx, path.ID(hashid.EncodeUserID(n.hasher, n.user.ID)))
if err != nil {
return nil, ErrShareNotFound.WithError(err)
}
if err := inventory.IsValidShare(share); err != nil {
return nil, ErrShareNotFound.WithError(err)
}
n.owner = share.Edges.User
// Check password
if share.Password != "" && share.Password != path.Password() {
return nil, ErrShareIncorrectPassword
}
// Share permission setting should overwrite root folder's permission
n.shareRoot = newFile(nil, share.Edges.File)
// Find the user side root of the file.
ownerRoot, err := n.findRoot(ctx, n.shareRoot)
if err != nil {
return nil, err
}
if n.shareRoot.Type() == types.FileTypeFile {
n.singleFileShare = true
n.shareRoot = n.shareRoot.Parent
}
n.shareRoot.Path[pathIndexUser] = path.Root()
n.shareRoot.OwnerModel = n.owner
n.shareRoot.IsUserRoot = true
n.shareRoot.CapabilitiesBs = n.Capabilities(false).Capability
// Check if any ancestors is deleted
if ownerRoot.Name() != inventory.RootFolderName {
return nil, ErrShareNotFound
}
if n.user.ID != n.owner.ID && !n.user.Edges.Group.Permissions.Enabled(int(types.GroupPermissionShareDownload)) {
return nil, serializer.NewError(
serializer.CodeNoPermissionErr,
fmt.Sprintf("You don't have permission to access share links"),
err,
)
}
n.ownerRoot = ownerRoot
n.ownerRoot.Path[pathIndexRoot] = newMyIDUri(hashid.EncodeUserID(n.hasher, n.owner.ID))
n.share = share
return n.shareRoot, nil
}
func (n *shareNavigator) To(ctx context.Context, path *fs.URI) (*File, error) {
if n.shareRoot == nil {
root, err := n.Root(ctx, path)
if err != nil {
return nil, err
}
n.shareRoot = root
}
current, lastAncestor := n.shareRoot, n.shareRoot
elements := path.Elements()
// If target is root of single file share, the root itself is the target.
if len(elements) <= 1 && n.singleFileShare {
file, err := n.latestSharedSingleFile(ctx)
if err != nil {
return nil, err
}
if len(elements) == 1 && file.Name() != elements[0] {
return nil, fs.ErrPathNotExist
}
return file, nil
}
var err error
for index, element := range elements {
lastAncestor = current
current, err = n.walkNext(ctx, current, element, index == len(elements)-1)
if err != nil {
return lastAncestor, fmt.Errorf("failed to walk into %q: %w", element, err)
}
}
return current, nil
}
func (n *shareNavigator) walkNext(ctx context.Context, root *File, next string, isLeaf bool) (*File, error) {
nextFile, err := n.baseNavigator.walkNext(ctx, root, next, isLeaf)
if err != nil {
return nil, err
}
return nextFile, nil
}
func (n *shareNavigator) Children(ctx context.Context, parent *File, args *ListArgs) (*ListResult, error) {
if n.singleFileShare {
file, err := n.latestSharedSingleFile(ctx)
if err != nil {
return nil, err
}
return &ListResult{
Files: []*File{file},
Pagination: &inventory.PaginationResults{},
SingleFileView: true,
}, nil
}
return n.baseNavigator.children(ctx, parent, args)
}
func (n *shareNavigator) latestSharedSingleFile(ctx context.Context) (*File, error) {
if n.singleFileShare {
file, err := n.fileClient.GetByID(ctx, n.share.Edges.File.ID)
if err != nil {
return nil, err
}
f := newFile(n.shareRoot, file)
f.OwnerModel = n.shareRoot.OwnerModel
return f, nil
}
return nil, fs.ErrPathNotExist
}
func (n *shareNavigator) Capabilities(isSearching bool) *fs.NavigatorProps {
res := &fs.NavigatorProps{
Capability: shareNavigatorCapability,
OrderDirectionOptions: fullOrderDirectionOption,
OrderByOptions: fullOrderByOption,
MaxPageSize: n.config.MaxPageSize,
}
if isSearching {
res.OrderByOptions = nil
res.OrderDirectionOptions = nil
}
return res
}
func (n *shareNavigator) FollowTx(ctx context.Context) (func(), error) {
if _, ok := ctx.Value(inventory.TxCtx{}).(*inventory.Tx); !ok {
return nil, fmt.Errorf("navigator: no inherited transaction found in context")
}
newFileClient, _, _, err := inventory.WithTx(ctx, n.fileClient)
if err != nil {
return nil, err
}
newSharClient, _, _, err := inventory.WithTx(ctx, n.shareClient)
oldFileClient, oldShareClient := n.fileClient, n.shareClient
revert := func() {
n.fileClient = oldFileClient
n.shareClient = oldShareClient
n.baseNavigator.fileClient = oldFileClient
}
n.fileClient = newFileClient
n.shareClient = newSharClient
n.baseNavigator.fileClient = newFileClient
return revert, nil
}
func (n *shareNavigator) ExecuteHook(ctx context.Context, hookType fs.HookType, file *File) error {
switch hookType {
case fs.HookTypeBeforeDownload:
if n.singleFileShare {
return n.shareClient.Downloaded(ctx, n.share)
}
}
return nil
}
// findRoot finds the root folder of the given child.
func (n *shareNavigator) findRoot(ctx context.Context, child *File) (*File, error) {
root := child
for {
newRoot, err := n.baseNavigator.walkUp(ctx, root)
if err != nil {
if !ent.IsNotFound(err) {
return nil, err
}
break
}
root = newRoot
}
return root, nil
}
func (n *shareNavigator) Walk(ctx context.Context, levelFiles []*File, limit, depth int, f WalkFunc) error {
return n.baseNavigator.walk(ctx, levelFiles, limit, depth, f)
}

View File

@@ -0,0 +1,141 @@
package dbfs
import (
"context"
"errors"
"fmt"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
)
var sharedWithMeNavigatorCapability = &boolset.BooleanSet{}
// NewSharedWithMeNavigator creates a navigator for user's "shared with me" file system.
func NewSharedWithMeNavigator(u *ent.User, fileClient inventory.FileClient, l logging.Logger,
config *setting.DBFS, hasher hashid.Encoder) Navigator {
n := &sharedWithMeNavigator{
user: u,
l: l,
fileClient: fileClient,
config: config,
hasher: hasher,
}
n.baseNavigator = newBaseNavigator(fileClient, defaultFilter, u, hasher, config)
return n
}
type sharedWithMeNavigator struct {
l logging.Logger
user *ent.User
fileClient inventory.FileClient
config *setting.DBFS
hasher hashid.Encoder
root *File
*baseNavigator
}
func (t *sharedWithMeNavigator) Recycle() {
}
func (n *sharedWithMeNavigator) PersistState(kv cache.Driver, key string) {
}
func (n *sharedWithMeNavigator) RestoreState(s State) error {
return nil
}
func (t *sharedWithMeNavigator) To(ctx context.Context, path *fs.URI) (*File, error) {
// Anonymous user does not have a trash folder.
if inventory.IsAnonymousUser(t.user) {
return nil, ErrLoginRequired
}
elements := path.Elements()
if len(elements) > 0 {
// Shared with me folder is a flatten tree, only root can be accessed.
return nil, fs.ErrPathNotExist.WithError(fmt.Errorf("invalid Path %q", path))
}
if t.root == nil {
rootFile, err := t.fileClient.Root(ctx, t.user)
if err != nil {
t.l.Info("User's root folder not found: %s, will initialize it.", err)
return nil, ErrFsNotInitialized
}
t.root = newFile(nil, rootFile)
rootPath := newSharedWithMeUri("")
t.root.Path[pathIndexRoot], t.root.Path[pathIndexUser] = rootPath, rootPath
t.root.OwnerModel = t.user
t.root.IsUserRoot = true
t.root.CapabilitiesBs = t.Capabilities(false).Capability
}
return t.root, nil
}
func (t *sharedWithMeNavigator) Children(ctx context.Context, parent *File, args *ListArgs) (*ListResult, error) {
args.SharedWithMe = true
res, err := t.baseNavigator.children(ctx, nil, args)
if err != nil {
return nil, err
}
// Adding user uri for each file.
for i := 0; i < len(res.Files); i++ {
res.Files[i].Path[pathIndexUser] = newSharedWithMeUri(hashid.EncodeFileID(t.hasher, res.Files[i].Model.ID))
}
return res, nil
}
func (t *sharedWithMeNavigator) Capabilities(isSearching bool) *fs.NavigatorProps {
res := &fs.NavigatorProps{
Capability: sharedWithMeNavigatorCapability,
OrderDirectionOptions: fullOrderDirectionOption,
OrderByOptions: fullOrderByOption,
MaxPageSize: t.config.MaxPageSize,
}
if isSearching {
res.OrderByOptions = searchLimitedOrderByOption
}
return res
}
func (t *sharedWithMeNavigator) Walk(ctx context.Context, levelFiles []*File, limit, depth int, f WalkFunc) error {
return errors.New("not implemented")
}
func (n *sharedWithMeNavigator) FollowTx(ctx context.Context) (func(), error) {
if _, ok := ctx.Value(inventory.TxCtx{}).(*inventory.Tx); !ok {
return nil, fmt.Errorf("navigator: no inherited transaction found in context")
}
newFileClient, _, _, err := inventory.WithTx(ctx, n.fileClient)
if err != nil {
return nil, err
}
oldFileClient := n.fileClient
revert := func() {
n.fileClient = oldFileClient
n.baseNavigator.fileClient = oldFileClient
}
n.fileClient = newFileClient
n.baseNavigator.fileClient = newFileClient
return revert, nil
}
func (n *sharedWithMeNavigator) ExecuteHook(ctx context.Context, hookType fs.HookType, file *File) error {
return nil
}

View File

@@ -0,0 +1,137 @@
package dbfs
import (
"context"
"fmt"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
)
var trashNavigatorCapability = &boolset.BooleanSet{}
// NewTrashNavigator creates a navigator for user's "trash" file system.
func NewTrashNavigator(u *ent.User, fileClient inventory.FileClient, l logging.Logger, config *setting.DBFS,
hasher hashid.Encoder) Navigator {
return &trashNavigator{
user: u,
l: l,
fileClient: fileClient,
config: config,
baseNavigator: newBaseNavigator(fileClient, defaultFilter, u, hasher, config),
}
}
type trashNavigator struct {
l logging.Logger
user *ent.User
fileClient inventory.FileClient
config *setting.DBFS
*baseNavigator
}
func (t *trashNavigator) Recycle() {
}
func (n *trashNavigator) PersistState(kv cache.Driver, key string) {
}
func (n *trashNavigator) RestoreState(s State) error {
return nil
}
func (t *trashNavigator) To(ctx context.Context, path *fs.URI) (*File, error) {
// Anonymous user does not have a trash folder.
if inventory.IsAnonymousUser(t.user) {
return nil, ErrLoginRequired
}
elements := path.Elements()
if len(elements) > 1 {
// Trash folder is a flatten tree, only 1 layer is supported.
return nil, fs.ErrPathNotExist.WithError(fmt.Errorf("invalid Path %q", path))
}
if len(elements) == 0 {
// Trash folder has no root.
return nil, nil
}
current, err := t.walkNext(ctx, nil, elements[0], true)
if err != nil {
return nil, fmt.Errorf("failed to walk into %q: %w", elements[0], err)
}
current.Path[pathIndexUser] = newTrashUri(current.Model.Name)
current.Path[pathIndexRoot] = current.Path[pathIndexUser]
current.OwnerModel = t.user
return current, nil
}
func (t *trashNavigator) Children(ctx context.Context, parent *File, args *ListArgs) (*ListResult, error) {
if parent != nil {
return nil, fs.ErrPathNotExist
}
res, err := t.baseNavigator.children(ctx, nil, args)
if err != nil {
return nil, err
}
// Adding user uri for each file.
for i := 0; i < len(res.Files); i++ {
res.Files[i].Path[pathIndexUser] = newTrashUri(res.Files[i].Model.Name)
}
return res, nil
}
func (t *trashNavigator) Capabilities(isSearching bool) *fs.NavigatorProps {
res := &fs.NavigatorProps{
Capability: trashNavigatorCapability,
OrderDirectionOptions: fullOrderDirectionOption,
OrderByOptions: fullOrderByOption,
MaxPageSize: t.config.MaxPageSize,
}
if isSearching {
res.OrderByOptions = searchLimitedOrderByOption
}
return res
}
func (t *trashNavigator) Walk(ctx context.Context, levelFiles []*File, limit, depth int, f WalkFunc) error {
return t.baseNavigator.walk(ctx, levelFiles, limit, depth, f)
}
func (n *trashNavigator) FollowTx(ctx context.Context) (func(), error) {
if _, ok := ctx.Value(inventory.TxCtx{}).(*inventory.Tx); !ok {
return nil, fmt.Errorf("navigator: no inherited transaction found in context")
}
newFileClient, _, _, err := inventory.WithTx(ctx, n.fileClient)
if err != nil {
return nil, err
}
oldFileClient := n.fileClient
revert := func() {
n.fileClient = oldFileClient
n.baseNavigator.fileClient = oldFileClient
}
n.fileClient = newFileClient
n.baseNavigator.fileClient = newFileClient
return revert, nil
}
func (n *trashNavigator) ExecuteHook(ctx context.Context, hookType fs.HookType, file *File) error {
return nil
}

View File

@@ -0,0 +1,364 @@
package dbfs
import (
"context"
"fmt"
"math"
"time"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
)
func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ...fs.Option) (*fs.UploadSession, error) {
// Get navigator
navigator, err := f.getNavigator(ctx, req.Props.Uri, NavigatorCapabilityUploadFile, NavigatorCapabilityLockFile)
if err != nil {
return nil, err
}
// Get most recent ancestor or target file
ctx = context.WithValue(ctx, inventory.LoadFileEntity{}, true)
ancestor, err := f.getFileByPath(ctx, navigator, req.Props.Uri)
if err != nil && !ent.IsNotFound(err) {
return nil, fmt.Errorf("failed to get ancestor: %w", err)
}
if ancestor.IsSymbolic() {
return nil, ErrSymbolicFolderFound
}
fileExisted := false
if ancestor.Uri(false).IsSame(req.Props.Uri, hashid.EncodeUserID(f.hasher, f.user.ID)) {
fileExisted = true
}
// If file already exist, and update operation is suspended or existing file is not a file
if fileExisted && (req.Props.EntityType == nil || ancestor.Type() != types.FileTypeFile) {
return nil, fs.ErrFileExisted
}
// If file not exist, only empty entity / version entity is allowed
if !fileExisted && (req.Props.EntityType != nil && *req.Props.EntityType != types.EntityTypeVersion) {
return nil, fs.ErrPathNotExist
}
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && ancestor.OwnerID() != f.user.ID {
return nil, fs.ErrOwnerOnly
}
// Lock target
lockedPath := ancestor.RootUri().JoinRaw(req.Props.Uri.PathTrimmed())
lr := &LockByPath{lockedPath, ancestor, types.FileTypeFile, ""}
ls, err := f.acquireByPath(ctx, time.Until(req.Props.ExpireAt), f.user, false, fs.LockApp(fs.ApplicationUpload), lr)
defer func() { _ = f.Release(ctx, ls) }()
ctx = fs.LockSessionToContext(ctx, ls)
if err != nil {
return nil, err
}
// Get parent folder storage policy and performs validation
policy, err := f.getPreferredPolicy(ctx, ancestor)
if err != nil {
return nil, err
}
// validate upload request
if err := validateNewFile(req.Props.Uri.Name(), req.Props.Size, policy); err != nil {
return nil, err
}
// Validate available capacity
if err := f.validateUserCapacity(ctx, req.Props.Size, ancestor.Owner()); err != nil {
return nil, err
}
// Generate save path by storage policy
isThumbnailAndPolicyNotAvailable := policy.ID != ancestor.Model.StoragePolicyFiles && (req.Props.EntityType != nil && *req.Props.EntityType == types.EntityTypeThumbnail)
if req.Props.SavePath == "" || isThumbnailAndPolicyNotAvailable {
req.Props.SavePath = generateSavePath(policy, req, f.user)
if isThumbnailAndPolicyNotAvailable {
req.Props.SavePath = fmt.Sprintf(
"%s.%s%s",
req.Props.SavePath,
util.RandStringRunes(16),
f.settingClient.ThumbEntitySuffix(ctx))
}
}
// Create upload placeholder
var (
fileId int
entityId int
lockToken string
targetFile *ent.File
)
fc, dbTx, ctx, err := inventory.WithTx(ctx, f.fileClient)
if err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
}
if fileExisted {
entityType := types.EntityTypeVersion
if req.Props.EntityType != nil {
entityType = *req.Props.EntityType
}
entity, err := f.CreateEntity(ctx, ancestor, policy, entityType, req,
WithPreviousVersion(req.Props.PreviousVersion),
fs.WithUploadRequest(req),
WithRemoveStaleEntities(),
)
if err != nil {
_ = inventory.Rollback(dbTx)
return nil, fmt.Errorf("failed to create new entity: %w", err)
}
fileId = ancestor.ID()
entityId = entity.ID()
targetFile = ancestor.Model
lockToken = ls.Exclude(lr, f.user, f.hasher)
} else {
uploadPlaceholder, err := f.Create(ctx, req.Props.Uri, types.FileTypeFile,
fs.WithUploadRequest(req),
WithPreferredStoragePolicy(policy),
WithErrorOnConflict(),
WithAncestor(ancestor),
)
if err != nil {
_ = inventory.Rollback(dbTx)
return nil, fmt.Errorf("failed to create upload placeholder: %w", err)
}
fileId = uploadPlaceholder.ID()
entityId = uploadPlaceholder.Entities()[0].ID()
targetFile = uploadPlaceholder.(*File).Model
lockToken = ls.Exclude(lr, f.user, f.hasher)
}
// create metadata to record uploading entity id
if err := fc.UpsertMetadata(ctx, targetFile, map[string]string{
MetadataUploadSessionID: req.Props.UploadSessionID,
}, nil); err != nil {
_ = inventory.Rollback(dbTx)
return nil, serializer.NewError(serializer.CodeDBError, "Failed to update upload session metadata", err)
}
if err := inventory.CommitWithStorageDiff(ctx, dbTx, f.l, f.userClient); err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit file upload preparation", err)
}
session := &fs.UploadSession{
Props: &fs.UploadProps{
Uri: req.Props.Uri,
Size: req.Props.Size,
SavePath: req.Props.SavePath,
LastModified: req.Props.LastModified,
UploadSessionID: req.Props.UploadSessionID,
ExpireAt: req.Props.ExpireAt,
EntityType: req.Props.EntityType,
},
FileID: fileId,
NewFileCreated: !fileExisted,
EntityID: entityId,
UID: f.user.ID,
Policy: policy,
CallbackSecret: util.RandStringRunes(32),
LockToken: lockToken, // Prevent lock being released.
}
// TODO: frontend should create new upload session if resumed session does not exist.
return session, nil
}
func (f *DBFS) CompleteUpload(ctx context.Context, session *fs.UploadSession) (fs.File, error) {
// Get placeholder file
file, err := f.Get(ctx, session.Props.Uri, WithFileEntities())
if err != nil {
return nil, fmt.Errorf("failed to get placeholder file: %w", err)
}
filePrivate := file.(*File)
// Confirm locks on placeholder file
if session.LockToken != "" {
release, ls, err := f.ConfirmLock(ctx, file, file.Uri(false), session.LockToken)
if err != nil {
return nil, fs.ErrLockExpired.WithError(err)
}
release()
ctx = fs.LockSessionToContext(ctx, ls)
}
// Update placeholder entity to actual desired entity
entityType := types.EntityTypeVersion
if session.Props.EntityType != nil {
entityType = *session.Props.EntityType
}
// Check version retention policy
owner := filePrivate.Owner()
// Max allowed versions
maxVersions := 1
if entityType == types.EntityTypeVersion &&
owner.Settings.VersionRetention &&
(len(owner.Settings.VersionRetentionExt) == 0 || util.IsInExtensionList(owner.Settings.VersionRetentionExt, file.Name())) {
// Retention is enabled for this file
maxVersions = owner.Settings.VersionRetentionMax
if maxVersions == 0 {
// Unlimited versions
maxVersions = math.MaxInt32
}
}
// Start transaction to update file
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
if err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
}
err = fc.UpgradePlaceholder(ctx, filePrivate.Model, session.Props.LastModified, session.EntityID, entityType)
if err != nil {
_ = inventory.Rollback(tx)
return nil, serializer.NewError(serializer.CodeDBError, "Failed to update placeholder file", err)
}
// Remove metadata that are defined in upload session
err = fc.RemoveMetadata(ctx, filePrivate.Model, MetadataUploadSessionID, ThumbDisabledKey)
if err != nil {
_ = inventory.Rollback(tx)
return nil, serializer.NewError(serializer.CodeDBError, "Failed to update placeholder metadata", err)
}
if len(session.Props.Metadata) > 0 {
if err := fc.UpsertMetadata(ctx, filePrivate.Model, session.Props.Metadata, nil); err != nil {
_ = inventory.Rollback(tx)
return nil, serializer.NewError(serializer.CodeDBError, "Failed to upsert placeholder metadata", err)
}
}
diff, err := fc.CapEntities(ctx, filePrivate.Model, owner, maxVersions, entityType)
if err != nil {
_ = inventory.Rollback(tx)
return nil, serializer.NewError(serializer.CodeDBError, "Failed to cap version entities", err)
}
tx.AppendStorageDiff(diff)
if entityType == types.EntityTypeVersion {
// If updating version entity, we need to cap all existing thumbnail entity to let it re-generate.
diff, err = fc.CapEntities(ctx, filePrivate.Model, owner, 0, types.EntityTypeThumbnail)
if err != nil {
_ = inventory.Rollback(tx)
return nil, serializer.NewError(serializer.CodeDBError, "Failed to cap thumbnail entities", err)
}
tx.AppendStorageDiff(diff)
}
if err := inventory.CommitWithStorageDiff(ctx, tx, f.l, f.userClient); err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit file change", err)
}
// Unlock file
if session.LockToken != "" {
if err := f.ls.Unlock(time.Now(), session.LockToken); err != nil {
return nil, serializer.NewError(serializer.CodeLockConflict, "Failed to unlock file", err)
}
}
file, err = f.Get(ctx, session.Props.Uri, WithFileEntities())
if err != nil {
return nil, fmt.Errorf("failed to get updated file: %w", err)
}
return file, nil
}
// This function will be used:
// - File still locked by uplaod session
// - File unlocked, upload session valid
// - File unlocked, upload session not valid
func (f *DBFS) CancelUploadSession(ctx context.Context, path *fs.URI, sessionID string, session *fs.UploadSession) ([]fs.Entity, error) {
// Get placeholder file
file, err := f.Get(ctx, path, WithFileEntities())
if err != nil {
return nil, fmt.Errorf("failed to get placeholder file: %w", err)
}
filePrivate := file.(*File)
// Make sure presented upload session is valid
if session != nil && (session.UID != f.user.ID || session.FileID != file.ID()) {
return nil, serializer.NewError(serializer.CodeNotFound, "Upload session not found", nil)
}
// Confirm locks on placeholder file
if session != nil && session.LockToken != "" {
release, ls, err := f.ConfirmLock(ctx, file, file.Uri(false), session.LockToken)
if err == nil {
release()
ctx = fs.LockSessionToContext(ctx, ls)
}
}
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && filePrivate.OwnerID() != f.user.ID {
return nil, fs.ErrOwnerOnly
}
// Lock file
ls, err := f.acquireByPath(ctx, -1, f.user, true, fs.LockApp(fs.ApplicationUpload),
&LockByPath{filePrivate.Uri(true), filePrivate, filePrivate.Type(), ""})
defer func() { _ = f.Release(ctx, ls) }()
ctx = fs.LockSessionToContext(ctx, ls)
if err != nil {
return nil, err
}
// Find placeholder entity
var entity fs.Entity
for _, e := range filePrivate.Entities() {
if sid := e.UploadSessionID(); sid != nil && sid.String() == sessionID {
entity = e
break
}
}
// Remove upload session metadata
if err := f.fileClient.RemoveMetadata(ctx, filePrivate.Model, MetadataUploadSessionID, ThumbDisabledKey); err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to remove upload session metadata", err)
}
if entity == nil {
// Given upload session does not exist
return nil, nil
}
if session != nil && session.LockToken != "" {
defer func() {
if err := f.ls.Unlock(time.Now(), session.LockToken); err != nil {
f.l.Warning("Failed to unlock file %q: %s", filePrivate.Uri(true).String(), err)
}
}()
}
if len(filePrivate.Entities()) == 1 {
// Only one placeholder entity, just delete this file
return f.Delete(ctx, []*fs.URI{path})
}
// Delete place holder entity
storageDiff, err := f.deleteEntity(ctx, filePrivate, entity.ID())
if err != nil {
return nil, fmt.Errorf("failed to delete placeholder entity: %w", err)
}
if err := f.userClient.ApplyStorageDiff(ctx, storageDiff); err != nil {
return nil, fmt.Errorf("failed to apply storage diff: %w", err)
}
return nil, nil
}

View File

@@ -0,0 +1,88 @@
package dbfs
import (
"context"
"fmt"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"strings"
)
const MaxFileNameLength = 256
// validateFileName validates the file name.
func validateFileName(name string) error {
if len(name) >= MaxFileNameLength || len(name) == 0 {
return fmt.Errorf("length of name must be between 1 and 255")
}
if strings.ContainsAny(name, "\\/:*?\"<>|") {
return fmt.Errorf("name contains illegal characters")
}
if name == "." || name == ".." {
return fmt.Errorf("name cannot be only dot")
}
return nil
}
// validateExtension validates the file extension.
func validateExtension(name string, policy *ent.StoragePolicy) error {
// 不需要验证
if len(policy.Settings.FileType) == 0 {
return nil
}
if !util.IsInExtensionList(policy.Settings.FileType, name) {
return fmt.Errorf("file extension is not allowed")
}
return nil
}
// validateFileSize validates the file size.
func validateFileSize(size int64, policy *ent.StoragePolicy) error {
if policy.MaxSize == 0 {
return nil
} else if size > policy.MaxSize {
return fs.ErrFileSizeTooBig
}
return nil
}
// validateNewFile validates the upload request.
func validateNewFile(fileName string, size int64, policy *ent.StoragePolicy) error {
if err := validateFileName(fileName); err != nil {
return err
}
if err := validateExtension(fileName, policy); err != nil {
return err
}
if err := validateFileSize(size, policy); err != nil {
return err
}
return nil
}
func (f *DBFS) validateUserCapacity(ctx context.Context, size int64, u *ent.User) error {
capacity, err := f.Capacity(ctx, u)
if err != nil {
return fmt.Errorf("failed to get user capacity: %s", err)
}
return f.validateUserCapacityRaw(ctx, size, capacity)
}
// validateUserCapacityRaw validates the user capacity, but does not fetch the capacity.
func (f *DBFS) validateUserCapacityRaw(ctx context.Context, size int64, capacity *fs.Capacity) error {
if capacity.Used+size > capacity.Total {
return fs.ErrInsufficientCapacity
}
return nil
}

763
pkg/filemanager/fs/fs.go Normal file
View File

@@ -0,0 +1,763 @@
package fs
import (
"context"
"encoding/gob"
"errors"
"fmt"
"io"
"time"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/gofrs/uuid"
)
type FsCapability int
const (
FsCapabilityList = FsCapability(iota)
)
var (
ErrDirectLinkInvalid = serializer.NewError(serializer.CodeNotFound, "Direct link invalid", nil)
ErrUnknownPolicyType = serializer.NewError(serializer.CodeInternalSetting, "Unknown policy type", nil)
ErrPathNotExist = serializer.NewError(serializer.CodeParentNotExist, "Path not exist", nil)
ErrFileDeleted = serializer.NewError(serializer.CodeFileDeleted, "File deleted", nil)
ErrEntityNotExist = serializer.NewError(serializer.CodeEntityNotExist, "Entity not exist", nil)
ErrFileExisted = serializer.NewError(serializer.CodeObjectExist, "Object existed", nil)
ErrNotSupportedAction = serializer.NewError(serializer.CodeNoPermissionErr, "Not supported action", nil)
ErrLockConflict = serializer.NewError(serializer.CodeLockConflict, "Lock conflict", nil)
ErrLockExpired = serializer.NewError(serializer.CodeLockConflict, "Lock expired", nil)
ErrModified = serializer.NewError(serializer.CodeConflict, "Object conflict", nil)
ErrIllegalObjectName = serializer.NewError(serializer.CodeIllegalObjectName, "Invalid object name", nil)
ErrFileSizeTooBig = serializer.NewError(serializer.CodeFileTooLarge, "File is too large", nil)
ErrInsufficientCapacity = serializer.NewError(serializer.CodeInsufficientCapacity, "Insufficient capacity", nil)
ErrStaleVersion = serializer.NewError(serializer.CodeStaleVersion, "File is updated during your edit", nil)
ErrOwnerOnly = serializer.NewError(serializer.CodeOwnerOnly, "Only owner or administrator can perform this action", nil)
ErrArchiveSrcSizeTooBig = ErrFileSizeTooBig.WithError(fmt.Errorf("total size of to-be compressed file exceed group limit (%w)", queue.CriticalErr))
)
type (
FileSystem interface {
LockSystem
UploadManager
FileManager
// Recycle recycles a DBFS and its generated resources.
Recycle()
// Capacity returns the storage capacity of the filesystem.
Capacity(ctx context.Context, u *ent.User) (*Capacity, error)
// CheckCapability checks if the filesystem supports given capability.
CheckCapability(ctx context.Context, uri *URI, opts ...Option) error
// StaleEntities returns all stale entities of given IDs. If no ID is given, all
// potential stale entities will be returned.
StaleEntities(ctx context.Context, entities ...int) ([]Entity, error)
// AllFilesInTrashBin returns all files in trash bin, despite owner.
AllFilesInTrashBin(ctx context.Context, opts ...Option) (*ListFileResult, error)
// Walk walks through all files under given path with given depth limit.
Walk(ctx context.Context, path *URI, depth int, walk WalkFunc, opts ...Option) error
// SharedAddressTranslation translates a path that potentially contain shared symbolic to a real address.
SharedAddressTranslation(ctx context.Context, path *URI, opts ...Option) (File, *URI, error)
// ExecuteNavigatorHooks executes hooks of given type on a file for navigator based custom hooks.
ExecuteNavigatorHooks(ctx context.Context, hookType HookType, file File) error
}
FileManager interface {
// Get returns a file by its path.
Get(ctx context.Context, path *URI, opts ...Option) (File, error)
// Create creates a file.
Create(ctx context.Context, path *URI, fileType types.FileType, opts ...Option) (File, error)
// List lists files under give path.
List(ctx context.Context, path *URI, opts ...Option) (File, *ListFileResult, error)
// Rename renames a file.
Rename(ctx context.Context, path *URI, newName string) (File, error)
// Move moves files to dst.
MoveOrCopy(ctx context.Context, path []*URI, dst *URI, isCopy bool) error
// Delete performs hard-delete for given paths, return newly generated stale entities in this delete operation.
Delete(ctx context.Context, path []*URI, opts ...Option) ([]Entity, error)
// GetEntitiesFromFileID returns all entities of a given file.
GetEntity(ctx context.Context, entityID int) (Entity, error)
// UpsertMetadata update or insert metadata of a file.
PatchMetadata(ctx context.Context, path []*URI, metas ...MetadataPatch) error
// SoftDelete moves given files to trash bin.
SoftDelete(ctx context.Context, path ...*URI) error
// Restore restores given files from trash bin to its original location.
Restore(ctx context.Context, path ...*URI) error
// VersionControl performs version control on given file.
// - `delete` is false: set version as current version;
// - `delete` is true: delete version.
VersionControl(ctx context.Context, path *URI, versionId int, delete bool) error
}
UploadManager interface {
// PrepareUpload prepares an upload session. It performs validation on upload request and returns a placeholder
// file if needed.
PrepareUpload(ctx context.Context, req *UploadRequest, opts ...Option) (*UploadSession, error)
// CompleteUpload completes an upload session.
CompleteUpload(ctx context.Context, session *UploadSession) (File, error)
// CancelUploadSession cancels an upload session. Delete the placeholder file if no other entity is created.
CancelUploadSession(ctx context.Context, path *URI, sessionID string, session *UploadSession) ([]Entity, error)
}
LockSystem interface {
// ConfirmLock confirms if a lock token is valid on given URI.
ConfirmLock(ctx context.Context, ancestor File, uri *URI, token ...string) (func(), LockSession, error)
// Lock locks a file. If zeroDepth is true, only the file itself will be locked. Ancestor is closest ancestor
// of the file that will be locked, if the given uri is an existing file, ancestor will be itself.
// `token` is optional and can be used if the requester need to explicitly specify a token.
Lock(ctx context.Context, d time.Duration, requester *ent.User, zeroDepth bool, application lock.Application,
uri *URI, token string) (LockSession, error)
// Unlock unlocks files by given tokens.
Unlock(ctx context.Context, tokens ...string) error
// Refresh refreshes a lock.
Refresh(ctx context.Context, d time.Duration, token string) (lock.LockDetails, error)
}
StatelessUploadManager interface {
// PrepareUpload prepares the upload on the node.
PrepareUpload(ctx context.Context, args *StatelessPrepareUploadService) (*StatelessPrepareUploadResponse, error)
// CompleteUpload completes the upload on the node.
CompleteUpload(ctx context.Context, args *StatelessCompleteUploadService) error
// OnUploadFailed handles the failed upload on the node.
OnUploadFailed(ctx context.Context, args *StatelessOnUploadFailedService) error
// CreateFile creates a file on the node.
CreateFile(ctx context.Context, args *StatelessCreateFileService) error
}
WalkFunc func(file File, level int) error
File interface {
IsNil() bool
ID() int
Name() string
DisplayName() string
Ext() string
Type() types.FileType
Size() int64
UpdatedAt() time.Time
CreatedAt() time.Time
Metadata() map[string]string
// Uri returns the URI of the file.
Uri(isRoot bool) *URI
Owner() *ent.User
OwnerID() int
// RootUri return the URI of the user root file under owner's view.
RootUri() *URI
Entities() []Entity
PrimaryEntity() Entity
PrimaryEntityID() int
Shared() bool
IsSymbolic() bool
PolicyID() (id int)
ExtendedInfo() *FileExtendedInfo
FolderSummary() *FolderSummary
Capabilities() *boolset.BooleanSet
}
Entities []Entity
Entity interface {
ID() int
Type() types.EntityType
Size() int64
UpdatedAt() time.Time
CreatedAt() time.Time
Source() string
ReferenceCount() int
PolicyID() int
UploadSessionID() *uuid.UUID
CreatedBy() *ent.User
Model() *ent.Entity
}
FileExtendedInfo struct {
StoragePolicy *ent.StoragePolicy
StorageUsed int64
Shares []*ent.Share
EntityStoragePolicies map[int]*ent.StoragePolicy
}
FolderSummary struct {
Size int64 `json:"size"`
Files int `json:"files"`
Folders int `json:"folders"`
Completed bool `json:"completed"` // whether the size calculation is completed
CalculatedAt time.Time `json:"calculated_at"`
}
MetadataPatch struct {
Key string `json:"key" binding:"required"`
Value string `json:"value"`
Private bool `json:"private" binding:"ne=true"`
Remove bool `json:"remove"`
}
// ListFileResult result of listing files.
ListFileResult struct {
Files []File
Parent File
Pagination *inventory.PaginationResults
Props *NavigatorProps
ContextHint *uuid.UUID
RecursionLimitReached bool
MixedType bool
SingleFileView bool
StoragePolicy *ent.StoragePolicy
}
// NavigatorProps is the properties of current filesystem.
NavigatorProps struct {
// Supported capabilities of the navigator.
Capability *boolset.BooleanSet `json:"capability"`
// MaxPageSize is the maximum page size of the navigator.
MaxPageSize int `json:"max_page_size"`
// OrderByOptions is the supported order by options of the navigator.
OrderByOptions []string `json:"order_by_options"`
// OrderDirectionOptions is the supported order direction options of the navigator.
OrderDirectionOptions []string `json:"order_direction_options"`
}
// UploadCredential for uploading files in client side.
UploadCredential struct {
SessionID string `json:"session_id"`
ChunkSize int64 `json:"chunk_size"` // 分块大小0 为部分快
Expires int64 `json:"expires"` // 上传凭证过期时间, Unix 时间戳
UploadURLs []string `json:"upload_urls,omitempty"`
Credential string `json:"credential,omitempty"`
UploadID string `json:"uploadID,omitempty"`
Callback string `json:"callback,omitempty"` // 回调地址
Uri string `json:"uri,omitempty"` // 存储路径
AccessKey string `json:"ak,omitempty"`
KeyTime string `json:"keyTime,omitempty"` // COS用有效期
CompleteURL string `json:"completeURL,omitempty"`
StoragePolicy *ent.StoragePolicy
CallbackSecret string `json:"callback_secret,omitempty"`
MimeType string `json:"mime_type,omitempty"` // Expected mimetype
UploadPolicy string `json:"upload_policy,omitempty"` // Upyun upload policy
}
// UploadSession stores the information of an upload session, used in server side.
UploadSession struct {
UID int // 发起者
Policy *ent.StoragePolicy
FileID int // ID of the placeholder file
EntityID int // ID of the new entity
Callback string // 回调 URL 地址
CallbackSecret string // Callback secret
UploadID string // Multi-part upload ID
UploadURL string
Credential string
ChunkSize int64
SentinelTaskID int
NewFileCreated bool // If new file is created for this session
LockToken string // Token of the locked placeholder file
Props *UploadProps
}
// UploadProps properties of an upload session/request.
UploadProps struct {
Uri *URI
Size int64
UploadSessionID string
PreferredStoragePolicy int
SavePath string
LastModified *time.Time
MimeType string
Metadata map[string]string
PreviousVersion string
// EntityType is the type of the entity to be created. If not set, a new file will be created
// with a default version entity. This will be set in update request for existing files.
EntityType *types.EntityType
ExpireAt time.Time
}
// FsOption options for underlying file system.
FsOption struct {
Page int // Page number when listing files.
PageSize int // Size of pages when listing files.
OrderBy string
OrderDirection string
UploadRequest *UploadRequest
UnlinkOnly bool
UploadSession *UploadSession
DownloadSpeed int64
IsDownload bool
Expire *time.Time
Entity Entity
IsThumb bool
EntityType *types.EntityType
EntityTypeNil bool
SkipSoftDelete bool
SysSkipSoftDelete bool
Metadata map[string]string
ArchiveCompression bool
ProgressFunc
MaxArchiveSize int64
DryRun CreateArchiveDryRunFunc
Policy *ent.StoragePolicy
Node StatelessUploadManager
StatelessUserID int
NoCache bool
}
// Option 发送请求的额外设置
Option interface {
Apply(any)
}
OptionFunc func(*FsOption)
// Ctx keys used to detect user canceled operation.
UserCancelCtx struct{}
GinCtx struct{}
// Capacity describes the capacity of a filesystem.
Capacity struct {
Total int64 `json:"total"`
Used int64 `json:"used"`
}
FileCapacity int
LockSession interface {
LastToken() string
}
HookType int
CreateArchiveDryRunFunc func(name string, e Entity)
StatelessPrepareUploadService struct {
UploadRequest *UploadRequest `json:"upload_request" binding:"required"`
UserID int `json:"user_id"`
}
StatelessCompleteUploadService struct {
UploadSession *UploadSession `json:"upload_session" binding:"required"`
UserID int `json:"user_id"`
}
StatelessOnUploadFailedService struct {
UploadSession *UploadSession `json:"upload_session" binding:"required"`
UserID int `json:"user_id"`
}
StatelessCreateFileService struct {
Path string `json:"path" binding:"required"`
Type types.FileType `json:"type" binding:"required"`
UserID int `json:"user_id"`
}
StatelessPrepareUploadResponse struct {
Session *UploadSession
Req *UploadRequest
}
PrepareRelocateRes struct {
Entities map[int]*RelocateEntity `json:"entities,omitempty"`
LockToken string `json:"lock_token,omitempty"`
Policy *ent.StoragePolicy `json:"policy,omitempty"`
}
RelocateEntity struct {
SrcEntity *ent.Entity `json:"src_entity"`
FileUri *URI `json:"file_uri,omitempty"`
NewSavePath string `json:"new_save_path"`
ParentFiles []int `json:"parent_files"`
PrimaryEntityParentFiles []int `json:"primary_entity_parent_files"`
}
)
const (
FileCapacityPreview FileCapacity = iota
FileCapacityEnter
FileCapacityDownload
FileCapacityRename
FileCapacityCopy
FileCapacityMove
)
const (
HookTypeBeforeDownload = HookType(iota)
)
func (p *UploadProps) Copy() *UploadProps {
newProps := *p
return &newProps
}
func (f OptionFunc) Apply(o any) {
f(o.(*FsOption))
}
// ==================== FS Options ====================
// WithUploadSession sets upload session for manager.
func WithUploadSession(s *UploadSession) Option {
return OptionFunc(func(o *FsOption) {
o.UploadSession = s
})
}
// WithPageSize limit items in a page for listing files.
func WithPageSize(s int) Option {
return OptionFunc(func(o *FsOption) {
o.PageSize = s
})
}
// WithPage set page number for listing files.
func WithPage(p int) Option {
return OptionFunc(func(o *FsOption) {
o.Page = p
})
}
// WithOrderBy set order by for listing files.
func WithOrderBy(p string) Option {
return OptionFunc(func(o *FsOption) {
o.OrderBy = p
})
}
// WithOrderDirection set order direction for listing files.
func WithOrderDirection(p string) Option {
return OptionFunc(func(o *FsOption) {
o.OrderDirection = p
})
}
// WithUploadRequest set upload request for uploading files.
func WithUploadRequest(p *UploadRequest) Option {
return OptionFunc(func(o *FsOption) {
o.UploadRequest = p
})
}
// WithProgressFunc set progress function for manager.
func WithProgressFunc(p ProgressFunc) Option {
return OptionFunc(func(o *FsOption) {
o.ProgressFunc = p
})
}
// WithUnlinkOnly set unlink only for unlinking files.
func WithUnlinkOnly(p bool) Option {
return OptionFunc(func(o *FsOption) {
o.UnlinkOnly = p
})
}
// WithDownloadSpeed sets download speed limit for manager.
func WithDownloadSpeed(speed int64) Option {
return OptionFunc(func(o *FsOption) {
o.DownloadSpeed = speed
})
}
func WithIsDownload(b bool) Option {
return OptionFunc(func(o *FsOption) {
o.IsDownload = b
})
}
// WithSysSkipSoftDelete sets whether to skip soft delete without checking
// file ownership.
func WithSysSkipSoftDelete(b bool) Option {
return OptionFunc(func(o *FsOption) {
o.SysSkipSoftDelete = b
})
}
// WithNoCache sets whether to disable cache for entity's URL.
func WithNoCache(b bool) Option {
return OptionFunc(func(o *FsOption) {
o.NoCache = b
})
}
// WithUrlExpire sets expire time for entity's URL.
func WithUrlExpire(t *time.Time) Option {
return OptionFunc(func(o *FsOption) {
o.Expire = t
})
}
// WithEntity sets entity for manager.
func WithEntity(e Entity) Option {
return OptionFunc(func(o *FsOption) {
o.Entity = e
})
}
// WithPolicy sets storage policy overwrite for manager.
func WithPolicy(p *ent.StoragePolicy) Option {
return OptionFunc(func(o *FsOption) {
o.Policy = p
})
}
// WithUseThumb sets whether entity's URL is used for thumbnail.
func WithUseThumb(b bool) Option {
return OptionFunc(func(o *FsOption) {
o.IsThumb = b
})
}
// WithEntityType sets entity type for manager.
func WithEntityType(t types.EntityType) Option {
return OptionFunc(func(o *FsOption) {
o.EntityType = &t
})
}
// WithNoEntityType sets entity type to nil for manager.
func WithNoEntityType() Option {
return OptionFunc(func(o *FsOption) {
o.EntityTypeNil = true
})
}
// WithSkipSoftDelete sets whether to skip soft delete.
func WithSkipSoftDelete(b bool) Option {
return OptionFunc(func(o *FsOption) {
o.SkipSoftDelete = b
})
}
// WithMetadata sets metadata for file creation.
func WithMetadata(m map[string]string) Option {
return OptionFunc(func(o *FsOption) {
o.Metadata = m
})
}
// WithArchiveCompression sets whether to compress files in archive.
func WithArchiveCompression(b bool) Option {
return OptionFunc(func(o *FsOption) {
o.ArchiveCompression = b
})
}
// WithMaxArchiveSize sets maximum size of to be archived file or to-be decompressed
// size, 0 for unlimited.
func WithMaxArchiveSize(s int64) Option {
return OptionFunc(func(o *FsOption) {
o.MaxArchiveSize = s
})
}
// WithDryRun sets whether to perform dry run.
func WithDryRun(b CreateArchiveDryRunFunc) Option {
return OptionFunc(func(o *FsOption) {
o.DryRun = b
})
}
// WithNode sets node for stateless upload manager.
func WithNode(n StatelessUploadManager) Option {
return OptionFunc(func(o *FsOption) {
o.Node = n
})
}
// WithStatelessUserID sets stateless user ID for manager.
func WithStatelessUserID(id int) Option {
return OptionFunc(func(o *FsOption) {
o.StatelessUserID = id
})
}
type WriteMode int
const (
ModeNone WriteMode = 0x00000
ModeOverwrite WriteMode = 0x00001
// Deprecated
ModeNop WriteMode = 0x00004
)
type (
ProgressFunc func(current, diff int64, total int64)
UploadRequest struct {
Props *UploadProps
Mode WriteMode
File io.ReadCloser `json:"-"`
Seeker io.Seeker `json:"-"`
Offset int64
ProgressFunc `json:"-"`
read int64
}
)
func (file *UploadRequest) Read(p []byte) (n int, err error) {
if file.File != nil {
n, err = file.File.Read(p)
file.read += int64(n)
if file.ProgressFunc != nil {
file.ProgressFunc(file.read, int64(n), file.Props.Size)
}
return
}
return 0, io.EOF
}
func (file *UploadRequest) Close() error {
if file.File != nil {
return file.File.Close()
}
return nil
}
func (file *UploadRequest) Seek(offset int64, whence int) (int64, error) {
if file.Seekable() {
previous := file.read
o, err := file.Seeker.Seek(offset, whence)
file.read = o
if file.ProgressFunc != nil {
file.ProgressFunc(o, file.read-previous, file.Props.Size)
}
return o, err
}
return 0, errors.New("no seeker")
}
func (file *UploadRequest) Seekable() bool {
return file.Seeker != nil
}
func init() {
gob.Register(UploadSession{})
gob.Register(FolderSummary{})
}
type ApplicationType string
const (
ApplicationCreate ApplicationType = "create"
ApplicationRename ApplicationType = "rename"
ApplicationSetPermission ApplicationType = "setPermission"
ApplicationMoveCopy ApplicationType = "moveCopy"
ApplicationUpload ApplicationType = "upload"
ApplicationUpdateMetadata ApplicationType = "updateMetadata"
ApplicationDelete ApplicationType = "delete"
ApplicationSoftDelete ApplicationType = "softDelete"
ApplicationDAV ApplicationType = "dav"
ApplicationVersionControl ApplicationType = "versionControl"
ApplicationViewer ApplicationType = "viewer"
ApplicationMount ApplicationType = "mount"
ApplicationRelocate ApplicationType = "relocate"
)
func LockApp(a ApplicationType) lock.Application {
return lock.Application{Type: string(a)}
}
type LockSessionCtxKey struct{}
// LockSessionToContext stores lock session to context.
func LockSessionToContext(ctx context.Context, session LockSession) context.Context {
return context.WithValue(ctx, LockSessionCtxKey{}, session)
}
func FindDesiredEntity(file File, version string, hasher hashid.Encoder, entityType *types.EntityType) (bool, Entity) {
if version == "" {
return true, file.PrimaryEntity()
}
requestedVersion, err := hasher.Decode(version, hashid.EntityID)
if err != nil {
return false, nil
}
hasVersions := false
for _, entity := range file.Entities() {
if entity.Type() == types.EntityTypeVersion {
hasVersions = true
}
if entity.ID() == requestedVersion && (entityType == nil || *entityType == entity.Type()) {
return true, entity
}
}
// Happy path for: File has no versions, requested version is empty entity
if !hasVersions && requestedVersion == 0 {
return true, file.PrimaryEntity()
}
return false, nil
}
type DbEntity struct {
model *ent.Entity
}
func NewEntity(model *ent.Entity) Entity {
return &DbEntity{model: model}
}
func (e *DbEntity) ID() int {
return e.model.ID
}
func (e *DbEntity) Type() types.EntityType {
return types.EntityType(e.model.Type)
}
func (e *DbEntity) Size() int64 {
return e.model.Size
}
func (e *DbEntity) UpdatedAt() time.Time {
return e.model.UpdatedAt
}
func (e *DbEntity) CreatedAt() time.Time {
return e.model.CreatedAt
}
func (e *DbEntity) CreatedBy() *ent.User {
return e.model.Edges.User
}
func (e *DbEntity) Source() string {
return e.model.Source
}
func (e *DbEntity) ReferenceCount() int {
return e.model.ReferenceCount
}
func (e *DbEntity) PolicyID() int {
return e.model.StoragePolicyEntities
}
func (e *DbEntity) UploadSessionID() *uuid.UUID {
return e.model.UploadSessionID
}
func (e *DbEntity) Model() *ent.Entity {
return e.model
}
func NewEmptyEntity(u *ent.User) Entity {
return &DbEntity{
model: &ent.Entity{
UpdatedAt: time.Now(),
ReferenceCount: 1,
CreatedAt: time.Now(),
Edges: ent.EntityEdges{
User: u,
},
},
}
}

View File

@@ -0,0 +1,40 @@
package mime
import (
"context"
"encoding/json"
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"mime"
"path"
)
type MimeDetector interface {
// TypeByName returns the mime type by file name.
TypeByName(ext string) string
}
type mimeDetector struct {
mapping map[string]string
}
func NewMimeDetector(ctx context.Context, settings setting.Provider, l logging.Logger) MimeDetector {
mappingStr := settings.MimeMapping(ctx)
mapping := make(map[string]string)
if err := json.Unmarshal([]byte(mappingStr), &mapping); err != nil {
l.Error("Failed to unmarshal mime mapping: %s, fallback to empty mapping", err)
}
return &mimeDetector{
mapping: mapping,
}
}
func (d *mimeDetector) TypeByName(p string) string {
ext := path.Ext(p)
if m, ok := d.mapping[ext]; ok {
return m
}
return mime.TypeByExtension(ext)
}

421
pkg/filemanager/fs/uri.go Normal file
View File

@@ -0,0 +1,421 @@
package fs
import (
"encoding/json"
"fmt"
"net/url"
"path"
"strconv"
"strings"
"time"
"github.com/cloudreve/Cloudreve/v4/application/constants"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/samber/lo"
)
const (
Separator = "/"
)
const (
QuerySearchName = "name"
QuerySearchNameOpOr = "use_or"
QuerySearchMetadataPrefix = "meta_"
QuerySearchCaseFolding = "case_folding"
QuerySearchType = "type"
QuerySearchTypeCategory = "category"
QuerySearchSizeGte = "size_gte"
QuerySearchSizeLte = "size_lte"
QuerySearchCreatedGte = "created_gte"
QuerySearchCreatedLte = "created_lte"
QuerySearchUpdatedGte = "updated_gte"
QuerySearchUpdatedLte = "updated_lte"
)
type URI struct {
U *url.URL
}
func NewUriFromString(u string) (*URI, error) {
raw, err := url.Parse(u)
if err != nil {
return nil, fmt.Errorf("failed to parse uri: %w", err)
}
if raw.Scheme != constants.CloudreveScheme {
return nil, fmt.Errorf("unknown scheme: %s", raw.Scheme)
}
if strings.HasSuffix(raw.Path, Separator) {
raw.Path = strings.TrimSuffix(raw.Path, Separator)
}
return &URI{U: raw}, nil
}
func NewUriFromStrings(u ...string) ([]*URI, error) {
res := make([]*URI, 0, len(u))
for _, uri := range u {
fsUri, err := NewUriFromString(uri)
if err != nil {
return nil, err
}
res = append(res, fsUri)
}
return res, nil
}
func (u *URI) UnmarshalBinary(text []byte) error {
raw, err := url.Parse(string(text))
if err != nil {
return fmt.Errorf("failed to parse uri: %w", err)
}
u.U = raw
return nil
}
func (u *URI) MarshalBinary() ([]byte, error) {
return u.U.MarshalBinary()
}
func (u *URI) MarshalJSON() ([]byte, error) {
r := map[string]string{
"uri": u.String(),
}
return json.Marshal(r)
}
func (u *URI) UnmarshalJSON(text []byte) error {
r := make(map[string]string)
err := json.Unmarshal(text, &r)
if err != nil {
return err
}
u.U, err = url.Parse(r["uri"])
if err != nil {
return err
}
return nil
}
func (u *URI) String() string {
return u.U.String()
}
func (u *URI) Name() string {
return path.Base(u.Path())
}
func (u *URI) Dir() string {
return path.Dir(u.Path())
}
func (u *URI) Elements() []string {
res := strings.Split(u.PathTrimmed(), Separator)
if len(res) == 1 && res[0] == "" {
return nil
}
return res
}
func (u *URI) ID(defaultUid string) string {
if u.U.User == nil {
if u.FileSystem() != constants.FileSystemShare {
return defaultUid
}
return ""
}
return u.U.User.Username()
}
func (u *URI) Path() string {
p := u.U.Path
if !strings.HasPrefix(u.U.Path, Separator) {
p = Separator + u.U.Path
}
return path.Clean(p)
}
func (u *URI) PathTrimmed() string {
return strings.TrimPrefix(u.Path(), Separator)
}
func (u *URI) Password() string {
if u.U.User == nil {
return ""
}
pwd, _ := u.U.User.Password()
return pwd
}
func (u *URI) Join(elem ...string) *URI {
newUrl, _ := url.Parse(u.U.String())
return &URI{U: newUrl.JoinPath(lo.Map(elem, func(s string, i int) string {
return PathEscape(s)
})...)}
}
// Join path with raw string
func (u *URI) JoinRaw(elem string) *URI {
return u.Join(strings.Split(strings.TrimPrefix(elem, Separator), Separator)...)
}
func (u *URI) DirUri() *URI {
newUrl, _ := url.Parse(u.U.String())
newUrl.Path = path.Dir(newUrl.Path)
return &URI{U: newUrl}
}
func (u *URI) Root() *URI {
newUrl, _ := url.Parse(u.U.String())
newUrl.Path = Separator
newUrl.RawQuery = ""
return &URI{U: newUrl}
}
func (u *URI) SetQuery(q string) *URI {
newUrl, _ := url.Parse(u.U.String())
newUrl.RawQuery = q
return &URI{U: newUrl}
}
func (u *URI) IsSame(p *URI, uid string) bool {
return p.FileSystem() == u.FileSystem() && p.ID(uid) == u.ID(uid) && u.Path() == p.Path()
}
// Rebased returns a new URI with the path rebased to the given base URI. It is
// commnly used in WebDAV address translation with shared folder symlink.
func (u *URI) Rebase(target, base *URI) *URI {
targetPath := target.Path()
basePath := base.Path()
rebasedPath := strings.TrimPrefix(targetPath, basePath)
newUrl, _ := url.Parse(u.U.String())
newUrl.Path = path.Join(newUrl.Path, rebasedPath)
return &URI{U: newUrl}
}
func (u *URI) FileSystem() constants.FileSystemType {
return constants.FileSystemType(strings.ToLower(u.U.Host))
}
// SearchParameters returns the search parameters from the URI. If no search parameters are present, nil is returned.
func (u *URI) SearchParameters() *inventory.SearchFileParameters {
q := u.U.Query()
res := &inventory.SearchFileParameters{
Metadata: make(map[string]string),
}
withSearch := false
if names, ok := q[QuerySearchName]; ok {
withSearch = len(names) > 0
res.Name = names
}
if _, ok := q[QuerySearchNameOpOr]; ok {
res.NameOperatorOr = true
}
if _, ok := q[QuerySearchCaseFolding]; ok {
res.CaseFolding = true
}
if v, ok := q[QuerySearchTypeCategory]; ok {
res.Category = v[0]
withSearch = withSearch || len(res.Category) > 0
}
if t, ok := q[QuerySearchType]; ok {
fileType := types.FileTypeFromString(t[0])
res.Type = &fileType
withSearch = true
}
for k, v := range q {
if strings.HasPrefix(k, QuerySearchMetadataPrefix) {
res.Metadata[strings.TrimPrefix(k, QuerySearchMetadataPrefix)] = v[0]
withSearch = true
}
}
if v, ok := q[QuerySearchSizeGte]; ok {
limit, err := strconv.ParseInt(v[0], 10, 64)
if err == nil {
res.SizeGte = limit
withSearch = true
}
}
if v, ok := q[QuerySearchSizeLte]; ok {
limit, err := strconv.ParseInt(v[0], 10, 64)
if err == nil {
res.SizeLte = limit
withSearch = true
}
}
if v, ok := q[QuerySearchCreatedGte]; ok {
limit, err := strconv.ParseInt(v[0], 10, 64)
if err == nil {
limit := time.Unix(limit, 0)
res.CreatedAtGte = &limit
withSearch = true
}
}
if v, ok := q[QuerySearchCreatedLte]; ok {
limit, err := strconv.ParseInt(v[0], 10, 64)
if err == nil {
limit := time.Unix(limit, 0)
res.CreatedAtLte = &limit
withSearch = true
}
}
if v, ok := q[QuerySearchUpdatedGte]; ok {
limit, err := strconv.ParseInt(v[0], 10, 64)
if err == nil {
limit := time.Unix(limit, 0)
res.UpdatedAtGte = &limit
withSearch = true
}
}
if v, ok := q[QuerySearchUpdatedLte]; ok {
limit, err := strconv.ParseInt(v[0], 10, 64)
if err == nil {
limit := time.Unix(limit, 0)
res.UpdatedAtLte = &limit
withSearch = true
}
}
if withSearch {
return res
}
return nil
}
// EqualOrIsDescendantOf returns true if the URI is equal to the given URI or if it is a descendant of the given URI.
func (u *URI) EqualOrIsDescendantOf(p *URI, uid string) bool {
prefix := p.Path()
if prefix[len(prefix)-1] != Separator[0] {
prefix += Separator
}
return p.FileSystem() == u.FileSystem() && p.ID(uid) == u.ID(uid) &&
(strings.HasPrefix(u.Path(), prefix) || u.Path() == p.Path())
}
func SearchCategoryFromString(s string) setting.SearchCategory {
switch s {
case "image":
return setting.CategoryImage
case "video":
return setting.CategoryVideo
case "audio":
return setting.CategoryAudio
case "document":
return setting.CategoryDocument
default:
return setting.CategoryUnknown
}
}
func NewShareUri(id, password string) string {
if password != "" {
return fmt.Sprintf("%s://%s:%s@%s", constants.CloudreveScheme, id, password, constants.FileSystemShare)
}
return fmt.Sprintf("%s://%s@%s", constants.CloudreveScheme, id, constants.FileSystemShare)
}
// PathEscape is same as url.PathEscape, with modifications to incoporate with JS encodeURI:
// encodeURI() escapes all characters except:
//
// AZ az 09 - _ . ! ~ * ' ( )
// ; / ? : @ & = + $ , #
func PathEscape(s string) string {
hexCount := 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c) {
hexCount++
}
}
if hexCount == 0 {
return s
}
var buf [64]byte
var t []byte
required := len(s) + 2*hexCount
if required <= len(buf) {
t = buf[:required]
} else {
t = make([]byte, required)
}
if hexCount == 0 {
copy(t, s)
for i := 0; i < len(s); i++ {
if s[i] == ' ' {
t[i] = '+'
}
}
return string(t)
}
j := 0
for i := 0; i < len(s); i++ {
switch c := s[i]; {
case shouldEscape(c):
t[j] = '%'
t[j+1] = upperhex[c>>4]
t[j+2] = upperhex[c&15]
j += 3
default:
t[j] = s[i]
j++
}
}
return string(t)
}
const upperhex = "0123456789ABCDEF"
// Return true if the specified character should be escaped when
// appearing in a URL string, according to RFC 3986.
//
// Please be informed that for now shouldEscape does not check all
// reserved characters correctly. See golang.org/issue/5684.
func shouldEscape(c byte) bool {
// §2.3 Unreserved characters (alphanum)
if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
return false
}
switch c {
case '-', '_', '.', '~', '!', '*', '\'', '(', ')', ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '#': // §2.3 Unreserved characters (mark)
return false
}
// Everything else must be escaped.
return true
}