Init V4 community edition (#2265)
* Init V4 community edition * Init V4 community edition
This commit is contained in:
74
pkg/filemanager/chunk/backoff/backoff.go
Normal file
74
pkg/filemanager/chunk/backoff/backoff.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Backoff used for retry sleep backoff
|
||||
type Backoff interface {
|
||||
Next(err error) bool
|
||||
Reset()
|
||||
}
|
||||
|
||||
// ConstantBackoff implements Backoff interface with constant sleep time. If the error
|
||||
// is retryable and with `RetryAfter` defined, the `RetryAfter` will be used as sleep duration.
|
||||
type ConstantBackoff struct {
|
||||
Sleep time.Duration
|
||||
Max int
|
||||
|
||||
tried int
|
||||
}
|
||||
|
||||
func (c *ConstantBackoff) Next(err error) bool {
|
||||
c.tried++
|
||||
if c.tried > c.Max {
|
||||
return false
|
||||
}
|
||||
|
||||
var e *RetryableError
|
||||
if errors.As(err, &e) && e.RetryAfter > 0 {
|
||||
util.Log().Warning("Retryable error %q occurs in backoff, will sleep after %s.", e, e.RetryAfter)
|
||||
time.Sleep(e.RetryAfter)
|
||||
} else {
|
||||
time.Sleep(c.Sleep)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *ConstantBackoff) Reset() {
|
||||
c.tried = 0
|
||||
}
|
||||
|
||||
type RetryableError struct {
|
||||
Err error
|
||||
RetryAfter time.Duration
|
||||
}
|
||||
|
||||
// NewRetryableErrorFromHeader constructs a new RetryableError from http response header
|
||||
// and existing error.
|
||||
func NewRetryableErrorFromHeader(err error, header http.Header) *RetryableError {
|
||||
retryAfter := header.Get("retry-after")
|
||||
if retryAfter == "" {
|
||||
retryAfter = "0"
|
||||
}
|
||||
|
||||
res := &RetryableError{
|
||||
Err: err,
|
||||
}
|
||||
|
||||
if retryAfterSecond, err := strconv.ParseInt(retryAfter, 10, 64); err == nil {
|
||||
res.RetryAfter = time.Duration(retryAfterSecond) * time.Second
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (e *RetryableError) Error() string {
|
||||
return fmt.Sprintf("retryable error with retry-after=%s: %s", e.RetryAfter, e.Err)
|
||||
}
|
||||
170
pkg/filemanager/chunk/chunk.go
Normal file
170
pkg/filemanager/chunk/chunk.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package chunk
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk/backoff"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
const bufferTempPattern = "cdChunk.*.tmp"
|
||||
|
||||
// ChunkProcessFunc callback function for processing a chunk
|
||||
type ChunkProcessFunc func(c *ChunkGroup, chunk io.Reader) error
|
||||
|
||||
// ChunkGroup manage groups of chunks
|
||||
type ChunkGroup struct {
|
||||
file *fs.UploadRequest
|
||||
chunkSize int64
|
||||
backoff backoff.Backoff
|
||||
enableRetryBuffer bool
|
||||
l logging.Logger
|
||||
|
||||
currentIndex int
|
||||
chunkNum int64
|
||||
bufferTemp *os.File
|
||||
tempPath string
|
||||
}
|
||||
|
||||
func NewChunkGroup(file *fs.UploadRequest, chunkSize int64, backoff backoff.Backoff, useBuffer bool, l logging.Logger, tempPath string) *ChunkGroup {
|
||||
c := &ChunkGroup{
|
||||
file: file,
|
||||
chunkSize: chunkSize,
|
||||
backoff: backoff,
|
||||
currentIndex: -1,
|
||||
enableRetryBuffer: useBuffer,
|
||||
l: l,
|
||||
tempPath: tempPath,
|
||||
}
|
||||
|
||||
if c.chunkSize == 0 {
|
||||
c.chunkSize = c.file.Props.Size
|
||||
}
|
||||
|
||||
if c.file.Props.Size == 0 {
|
||||
c.chunkNum = 1
|
||||
} else {
|
||||
c.chunkNum = c.file.Props.Size / c.chunkSize
|
||||
if c.file.Props.Size%c.chunkSize != 0 {
|
||||
c.chunkNum++
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// TempAvailable returns if current chunk temp file is available to be read
|
||||
func (c *ChunkGroup) TempAvailable() bool {
|
||||
if c.bufferTemp != nil {
|
||||
state, _ := c.bufferTemp.Stat()
|
||||
return state != nil && state.Size() == c.Length()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Process a chunk with retry logic
|
||||
func (c *ChunkGroup) Process(processor ChunkProcessFunc) error {
|
||||
reader := io.LimitReader(c.file, c.Length())
|
||||
|
||||
// If useBuffer is enabled, tee the reader to a temp file
|
||||
if c.enableRetryBuffer && c.bufferTemp == nil && !c.file.Seekable() {
|
||||
c.bufferTemp, _ = os.CreateTemp(util.DataPath(c.tempPath), bufferTempPattern)
|
||||
reader = io.TeeReader(reader, c.bufferTemp)
|
||||
}
|
||||
|
||||
if c.bufferTemp != nil {
|
||||
defer func() {
|
||||
if c.bufferTemp != nil {
|
||||
c.bufferTemp.Close()
|
||||
os.Remove(c.bufferTemp.Name())
|
||||
c.bufferTemp = nil
|
||||
}
|
||||
}()
|
||||
|
||||
// if temp buffer file is available, use it
|
||||
if c.TempAvailable() {
|
||||
if _, err := c.bufferTemp.Seek(0, io.SeekStart); err != nil {
|
||||
return fmt.Errorf("failed to seek temp file back to chunk start: %w", err)
|
||||
}
|
||||
|
||||
c.l.Debug("Chunk %d will be read from temp file %q.", c.Index(), c.bufferTemp.Name())
|
||||
reader = io.NopCloser(c.bufferTemp)
|
||||
}
|
||||
}
|
||||
|
||||
err := processor(c, reader)
|
||||
if err != nil {
|
||||
if c.enableRetryBuffer {
|
||||
request.BlackHole(reader)
|
||||
}
|
||||
|
||||
if err != context.Canceled && (c.file.Seekable() || c.TempAvailable()) && c.backoff.Next(err) {
|
||||
if c.file.Seekable() {
|
||||
if _, seekErr := c.file.Seek(c.Start(), io.SeekStart); seekErr != nil {
|
||||
return fmt.Errorf("failed to seek back to chunk start: %w, last error: %s", seekErr, err)
|
||||
}
|
||||
}
|
||||
|
||||
c.l.Debug("Retrying chunk %d, last error: %s", c.currentIndex, err)
|
||||
return c.Process(processor)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
c.l.Debug("Chunk %d processed", c.currentIndex)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start returns the byte index of current chunk
|
||||
func (c *ChunkGroup) Start() int64 {
|
||||
return int64(int64(c.Index()) * c.chunkSize)
|
||||
}
|
||||
|
||||
// Total returns the total length
|
||||
func (c *ChunkGroup) Total() int64 {
|
||||
return int64(c.file.Props.Size)
|
||||
}
|
||||
|
||||
// Num returns the total chunk number
|
||||
func (c *ChunkGroup) Num() int {
|
||||
return int(c.chunkNum)
|
||||
}
|
||||
|
||||
// RangeHeader returns header value of Content-Range
|
||||
func (c *ChunkGroup) RangeHeader() string {
|
||||
return fmt.Sprintf("bytes %d-%d/%d", c.Start(), c.Start()+c.Length()-1, c.Total())
|
||||
}
|
||||
|
||||
// Index returns current chunk index, starts from 0
|
||||
func (c *ChunkGroup) Index() int {
|
||||
return c.currentIndex
|
||||
}
|
||||
|
||||
// Next switch to next chunk, returns whether all chunks are processed
|
||||
func (c *ChunkGroup) Next() bool {
|
||||
c.currentIndex++
|
||||
c.backoff.Reset()
|
||||
return c.currentIndex < int(c.chunkNum)
|
||||
}
|
||||
|
||||
// Length returns the length of current chunk
|
||||
func (c *ChunkGroup) Length() int64 {
|
||||
contentLength := c.chunkSize
|
||||
if c.Index() == int(c.chunkNum-1) {
|
||||
contentLength = c.file.Props.Size - c.chunkSize*(c.chunkNum-1)
|
||||
}
|
||||
|
||||
return int64(contentLength)
|
||||
}
|
||||
|
||||
// IsLast returns if current chunk is the last one
|
||||
func (c *ChunkGroup) IsLast() bool {
|
||||
return c.Index() == int(c.chunkNum-1)
|
||||
}
|
||||
588
pkg/filemanager/driver/cos/cos.go
Normal file
588
pkg/filemanager/driver/cos/cos.go
Normal file
@@ -0,0 +1,588 @@
|
||||
package cos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk/backoff"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/google/go-querystring/query"
|
||||
"github.com/samber/lo"
|
||||
cossdk "github.com/tencentyun/cos-go-sdk-v5"
|
||||
)
|
||||
|
||||
// UploadPolicy 腾讯云COS上传策略
|
||||
type UploadPolicy struct {
|
||||
Expiration string `json:"expiration"`
|
||||
Conditions []interface{} `json:"conditions"`
|
||||
}
|
||||
|
||||
// MetaData 文件元信息
|
||||
type MetaData struct {
|
||||
Size uint64
|
||||
CallbackKey string
|
||||
CallbackURL string
|
||||
}
|
||||
|
||||
type urlOption struct {
|
||||
Speed int64 `url:"x-cos-traffic-limit,omitempty"`
|
||||
ContentDescription string `url:"response-content-disposition,omitempty"`
|
||||
Exif *string `url:"exif,omitempty"`
|
||||
CiProcess string `url:"ci-process,omitempty"`
|
||||
}
|
||||
|
||||
type (
|
||||
CosParts struct {
|
||||
ETag string
|
||||
PartNumber int
|
||||
}
|
||||
)
|
||||
|
||||
// Driver 腾讯云COS适配器模板
|
||||
type Driver struct {
|
||||
policy *ent.StoragePolicy
|
||||
client *cossdk.Client
|
||||
settings setting.Provider
|
||||
config conf.ConfigProvider
|
||||
httpClient request.Client
|
||||
l logging.Logger
|
||||
mime mime.MimeDetector
|
||||
|
||||
chunkSize int64
|
||||
}
|
||||
|
||||
const (
|
||||
// MultiPartUploadThreshold 服务端使用分片上传的阈值
|
||||
MultiPartUploadThreshold int64 = 5 * (1 << 30) // 5GB
|
||||
|
||||
maxDeleteBatch = 1000
|
||||
chunkRetrySleep = time.Duration(5) * time.Second
|
||||
overwriteOptionHeader = "x-cos-forbid-overwrite"
|
||||
partNumberParam = "partNumber"
|
||||
uploadIdParam = "uploadId"
|
||||
contentTypeHeader = "Content-Type"
|
||||
contentLengthHeader = "Content-Length"
|
||||
)
|
||||
|
||||
var (
|
||||
features = &boolset.BooleanSet{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
cossdk.SetNeedSignHeaders("host", false)
|
||||
cossdk.SetNeedSignHeaders("origin", false)
|
||||
boolset.Sets(map[driver.HandlerCapability]bool{
|
||||
driver.HandlerCapabilityUploadSentinelRequired: true,
|
||||
}, features)
|
||||
}
|
||||
|
||||
func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider,
|
||||
config conf.ConfigProvider, l logging.Logger, mime mime.MimeDetector) (*Driver, error) {
|
||||
chunkSize := policy.Settings.ChunkSize
|
||||
if policy.Settings.ChunkSize == 0 {
|
||||
chunkSize = 25 << 20 // 25 MB
|
||||
}
|
||||
|
||||
driver := &Driver{
|
||||
policy: policy,
|
||||
settings: settings,
|
||||
chunkSize: chunkSize,
|
||||
config: config,
|
||||
l: l,
|
||||
mime: mime,
|
||||
httpClient: request.NewClient(config, request.WithLogger(l)),
|
||||
}
|
||||
|
||||
u, err := url.Parse(policy.Server)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse COS bucket server url: %w", err)
|
||||
}
|
||||
driver.client = cossdk.NewClient(&cossdk.BaseURL{BucketURL: u}, &http.Client{
|
||||
Transport: &cossdk.AuthorizationTransport{
|
||||
SecretID: policy.AccessKey,
|
||||
SecretKey: policy.SecretKey,
|
||||
},
|
||||
})
|
||||
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
//
|
||||
//// List 列出COS文件
|
||||
//func (handler Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) {
|
||||
// // 初始化列目录参数
|
||||
// opt := &cossdk.BucketGetOptions{
|
||||
// Prefix: strings.TrimPrefix(base, "/"),
|
||||
// EncodingType: "",
|
||||
// MaxKeys: 1000,
|
||||
// }
|
||||
// // 是否为递归列出
|
||||
// if !recursive {
|
||||
// opt.Delimiter = "/"
|
||||
// }
|
||||
// // 手动补齐结尾的slash
|
||||
// if opt.Prefix != "" {
|
||||
// opt.Prefix += "/"
|
||||
// }
|
||||
//
|
||||
// var (
|
||||
// marker string
|
||||
// objects []cossdk.Object
|
||||
// commons []string
|
||||
// )
|
||||
//
|
||||
// for {
|
||||
// res, _, err := handler.client.Bucket.Get(ctx, opt)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// objects = append(objects, res.Contents...)
|
||||
// commons = append(commons, res.CommonPrefixes...)
|
||||
// // 如果本次未列取完,则继续使用marker获取结果
|
||||
// marker = res.NextMarker
|
||||
// // marker 为空时结果列取完毕,跳出
|
||||
// if marker == "" {
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // 处理列取结果
|
||||
// res := make([]response.Object, 0, len(objects)+len(commons))
|
||||
// // 处理目录
|
||||
// for _, object := range commons {
|
||||
// rel, err := filepath.Rel(opt.Prefix, object)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// res = append(res, response.Object{
|
||||
// Name: path.Base(object),
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Size: 0,
|
||||
// IsDir: true,
|
||||
// LastModify: time.Now(),
|
||||
// })
|
||||
// }
|
||||
// // 处理文件
|
||||
// for _, object := range objects {
|
||||
// rel, err := filepath.Rel(opt.Prefix, object.Key)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// res = append(res, response.Object{
|
||||
// Name: path.Base(object.Key),
|
||||
// Source: object.Key,
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Size: uint64(object.Size),
|
||||
// IsDir: false,
|
||||
// LastModify: time.Now(),
|
||||
// })
|
||||
// }
|
||||
//
|
||||
// return res, nil
|
||||
//
|
||||
//}
|
||||
|
||||
// CORS 创建跨域策略
|
||||
func (handler Driver) CORS() error {
|
||||
_, err := handler.client.Bucket.PutCORS(context.Background(), &cossdk.BucketPutCORSOptions{
|
||||
Rules: []cossdk.BucketCORSRule{{
|
||||
AllowedMethods: []string{
|
||||
"GET",
|
||||
"POST",
|
||||
"PUT",
|
||||
"DELETE",
|
||||
"HEAD",
|
||||
},
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedHeaders: []string{"*"},
|
||||
MaxAgeSeconds: 3600,
|
||||
ExposeHeaders: []string{"ETag"},
|
||||
}},
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Get 获取文件
|
||||
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Put 将文件流保存到指定目录
|
||||
func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
||||
defer file.Close()
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
// 是否允许覆盖
|
||||
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
|
||||
opt := &cossdk.ObjectPutHeaderOptions{
|
||||
ContentType: mimeType,
|
||||
XOptionHeader: &http.Header{
|
||||
overwriteOptionHeader: []string{fmt.Sprintf("%t", overwrite)},
|
||||
},
|
||||
}
|
||||
|
||||
// 小文件直接上传
|
||||
if file.Props.Size < MultiPartUploadThreshold {
|
||||
_, err := handler.client.Object.Put(ctx, file.Props.SavePath, file, &cossdk.ObjectPutOptions{
|
||||
ObjectPutHeaderOptions: opt,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
imur, _, err := handler.client.Object.InitiateMultipartUpload(ctx, file.Props.SavePath, &cossdk.InitiateMultipartUploadOptions{
|
||||
ObjectPutHeaderOptions: opt,
|
||||
})
|
||||
|
||||
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{
|
||||
Max: handler.settings.ChunkRetryLimit(ctx),
|
||||
Sleep: chunkRetrySleep,
|
||||
}, handler.settings.UseChunkBuffer(ctx), handler.l, handler.settings.TempPath(ctx))
|
||||
|
||||
parts := make([]CosParts, 0, chunks.Num())
|
||||
uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error {
|
||||
res, err := handler.client.Object.UploadPart(ctx, file.Props.SavePath, imur.UploadID, current.Index()+1, content, &cossdk.ObjectUploadPartOptions{
|
||||
ContentLength: current.Length(),
|
||||
})
|
||||
if err == nil {
|
||||
parts = append(parts, CosParts{
|
||||
ETag: res.Header.Get("ETag"),
|
||||
PartNumber: current.Index() + 1,
|
||||
})
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for chunks.Next() {
|
||||
if err := chunks.Process(uploadFunc); err != nil {
|
||||
handler.cancelUpload(file.Props.SavePath, imur.UploadID)
|
||||
return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err)
|
||||
}
|
||||
}
|
||||
|
||||
_, _, err = handler.client.Object.CompleteMultipartUpload(ctx, file.Props.SavePath, imur.UploadID, &cossdk.CompleteMultipartUploadOptions{
|
||||
Parts: lo.Map(parts, func(v CosParts, i int) cossdk.Object {
|
||||
return cossdk.Object{
|
||||
ETag: v.ETag,
|
||||
PartNumber: v.PartNumber,
|
||||
}
|
||||
}),
|
||||
XOptionHeader: &http.Header{
|
||||
overwriteOptionHeader: []string{fmt.Sprintf("%t", overwrite)},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
handler.cancelUpload(file.Props.SavePath, imur.UploadID)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete 删除一个或多个文件,
|
||||
// 返回未删除的文件,及遇到的最后一个错误
|
||||
func (handler Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
|
||||
groups := lo.Chunk(files, maxDeleteBatch)
|
||||
failed := make([]string, 0)
|
||||
var lastError error
|
||||
for index, group := range groups {
|
||||
handler.l.Debug("Process delete group #%d: %v", index, group)
|
||||
res, _, err := handler.client.Object.DeleteMulti(ctx,
|
||||
&cossdk.ObjectDeleteMultiOptions{
|
||||
Objects: lo.Map(group, func(item string, index int) cossdk.Object {
|
||||
return cossdk.Object{Key: item}
|
||||
}),
|
||||
Quiet: true,
|
||||
})
|
||||
if err != nil {
|
||||
lastError = err
|
||||
failed = append(failed, group...)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, v := range res.Errors {
|
||||
handler.l.Debug("Failed to delete file: %s, Code:%s, Message:%s", v.Key, v.Code, v.Key)
|
||||
failed = append(failed, v.Key)
|
||||
}
|
||||
}
|
||||
|
||||
if len(failed) > 0 && lastError == nil {
|
||||
lastError = fmt.Errorf("failed to delete files: %v", failed)
|
||||
}
|
||||
|
||||
return failed, lastError
|
||||
}
|
||||
|
||||
// Thumb 获取文件缩略图
|
||||
func (handler Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
w, h := handler.settings.ThumbSize(ctx)
|
||||
thumbParam := fmt.Sprintf("imageMogr2/thumbnail/%dx%d", w, h)
|
||||
|
||||
source, err := handler.signSourceURL(
|
||||
ctx,
|
||||
e.Source(),
|
||||
expire,
|
||||
&urlOption{},
|
||||
)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
thumbURL, _ := url.Parse(source)
|
||||
thumbQuery := thumbURL.Query()
|
||||
thumbQuery.Add(thumbParam, "")
|
||||
thumbURL.RawQuery = thumbQuery.Encode()
|
||||
|
||||
return thumbURL.String(), nil
|
||||
}
|
||||
|
||||
// Source 获取外链URL
|
||||
func (handler Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
|
||||
// 添加各项设置
|
||||
options := urlOption{}
|
||||
if args.Speed > 0 {
|
||||
if args.Speed < 819200 {
|
||||
args.Speed = 819200
|
||||
}
|
||||
if args.Speed > 838860800 {
|
||||
args.Speed = 838860800
|
||||
}
|
||||
options.Speed = args.Speed
|
||||
}
|
||||
if args.IsDownload {
|
||||
encodedFilename := url.PathEscape(args.DisplayName)
|
||||
options.ContentDescription = fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`,
|
||||
encodedFilename, encodedFilename)
|
||||
}
|
||||
|
||||
return handler.signSourceURL(ctx, e.Source(), args.Expire, &options)
|
||||
}
|
||||
|
||||
func (handler Driver) signSourceURL(ctx context.Context, path string, expire *time.Time, options *urlOption) (string, error) {
|
||||
// 公有空间不需要签名
|
||||
if !handler.policy.IsPrivate || (handler.policy.Settings.SourceAuth && handler.policy.Settings.CustomProxy) {
|
||||
file, err := url.Parse(handler.policy.Server)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
file.Path = path
|
||||
|
||||
// 非签名URL不支持设置响应header
|
||||
options.ContentDescription = ""
|
||||
|
||||
optionQuery, err := query.Values(*options)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
file.RawQuery = optionQuery.Encode()
|
||||
|
||||
return file.String(), nil
|
||||
}
|
||||
|
||||
ttl := time.Duration(0)
|
||||
if expire != nil {
|
||||
ttl = time.Until(*expire)
|
||||
} else {
|
||||
// 20 years for permanent link
|
||||
ttl = time.Duration(24) * time.Hour * 365 * 20
|
||||
}
|
||||
|
||||
presignedURL, err := handler.client.Object.GetPresignedURL(ctx, http.MethodGet, path,
|
||||
handler.policy.AccessKey, handler.policy.SecretKey, ttl, options)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return presignedURL.String(), nil
|
||||
}
|
||||
|
||||
// Token 获取上传策略和认证Token
|
||||
func (handler Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
|
||||
// 生成回调地址
|
||||
siteURL := handler.settings.SiteURL(setting.UseFirstSiteUrl(ctx))
|
||||
// 在从机端创建上传会话
|
||||
uploadSession.ChunkSize = handler.chunkSize
|
||||
uploadSession.Callback = routes.MasterSlaveCallbackUrl(siteURL, types.PolicyTypeCos, uploadSession.Props.UploadSessionID, uploadSession.CallbackSecret).String()
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
// 初始化分片上传
|
||||
opt := &cossdk.ObjectPutHeaderOptions{
|
||||
ContentType: mimeType,
|
||||
XOptionHeader: &http.Header{
|
||||
overwriteOptionHeader: []string{"true"},
|
||||
},
|
||||
}
|
||||
|
||||
imur, _, err := handler.client.Object.InitiateMultipartUpload(ctx, file.Props.SavePath, &cossdk.InitiateMultipartUploadOptions{
|
||||
ObjectPutHeaderOptions: opt,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize multipart upload: %w", err)
|
||||
}
|
||||
uploadSession.UploadID = imur.UploadID
|
||||
|
||||
// 为每个分片签名上传 URL
|
||||
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{}, false, handler.l, "")
|
||||
urls := make([]string, chunks.Num())
|
||||
ttl := time.Until(uploadSession.Props.ExpireAt)
|
||||
for chunks.Next() {
|
||||
err := chunks.Process(func(c *chunk.ChunkGroup, chunk io.Reader) error {
|
||||
signedURL, err := handler.client.Object.GetPresignedURL(
|
||||
ctx,
|
||||
http.MethodPut,
|
||||
file.Props.SavePath,
|
||||
handler.policy.AccessKey,
|
||||
handler.policy.SecretKey,
|
||||
ttl,
|
||||
&cossdk.PresignedURLOptions{
|
||||
Query: &url.Values{
|
||||
partNumberParam: []string{fmt.Sprintf("%d", c.Index()+1)},
|
||||
uploadIdParam: []string{imur.UploadID},
|
||||
},
|
||||
Header: &http.Header{
|
||||
contentTypeHeader: []string{"application/octet-stream"},
|
||||
contentLengthHeader: []string{fmt.Sprintf("%d", c.Length())},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
urls[c.Index()] = signedURL.String()
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// 签名完成分片上传的URL
|
||||
completeURL, err := handler.client.Object.GetPresignedURL(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
file.Props.SavePath,
|
||||
handler.policy.AccessKey,
|
||||
handler.policy.SecretKey,
|
||||
time.Until(uploadSession.Props.ExpireAt),
|
||||
&cossdk.PresignedURLOptions{
|
||||
Query: &url.Values{
|
||||
uploadIdParam: []string{imur.UploadID},
|
||||
},
|
||||
Header: &http.Header{
|
||||
overwriteOptionHeader: []string{"true"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &fs.UploadCredential{
|
||||
UploadID: imur.UploadID,
|
||||
UploadURLs: urls,
|
||||
CompleteURL: completeURL.String(),
|
||||
SessionID: uploadSession.Props.UploadSessionID,
|
||||
ChunkSize: handler.chunkSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// 取消上传凭证
|
||||
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
|
||||
_, err := handler.client.Object.AbortMultipartUpload(ctx, uploadSession.Props.SavePath, uploadSession.UploadID)
|
||||
return err
|
||||
}
|
||||
|
||||
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
|
||||
if session.SentinelTaskID == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make sure uploaded file size is correct
|
||||
res, err := handler.client.Object.Head(ctx, session.Props.SavePath, &cossdk.ObjectHeadOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get uploaded file size: %w", err)
|
||||
}
|
||||
|
||||
if res.ContentLength != session.Props.Size {
|
||||
return serializer.NewError(
|
||||
serializer.CodeMetaMismatch,
|
||||
fmt.Sprintf("File size not match, expected: %d, actual: %d", session.Props.Size, res.ContentLength),
|
||||
nil,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *Driver) Capabilities() *driver.Capabilities {
|
||||
mediaMetaExts := handler.policy.Settings.MediaMetaExts
|
||||
if !handler.policy.Settings.NativeMediaProcessing {
|
||||
mediaMetaExts = nil
|
||||
}
|
||||
return &driver.Capabilities{
|
||||
StaticFeatures: features,
|
||||
MediaMetaSupportedExts: mediaMetaExts,
|
||||
MediaMetaProxy: handler.policy.Settings.MediaMetaGeneratorProxy,
|
||||
ThumbSupportedExts: handler.policy.Settings.ThumbExts,
|
||||
ThumbProxy: handler.policy.Settings.ThumbGeneratorProxy,
|
||||
ThumbMaxSize: handler.policy.Settings.ThumbMaxSize,
|
||||
ThumbSupportAllExts: handler.policy.Settings.ThumbSupportAllExts,
|
||||
}
|
||||
}
|
||||
|
||||
// Meta 获取文件信息
|
||||
func (handler Driver) Meta(ctx context.Context, path string) (*MetaData, error) {
|
||||
res, err := handler.client.Object.Head(ctx, path, &cossdk.ObjectHeadOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &MetaData{
|
||||
Size: uint64(res.ContentLength),
|
||||
CallbackKey: res.Header.Get("x-cos-meta-key"),
|
||||
CallbackURL: res.Header.Get("x-cos-meta-callback"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
if util.ContainsString(supportedImageExt, ext) {
|
||||
return handler.extractImageMeta(ctx, path)
|
||||
}
|
||||
|
||||
return handler.extractStreamMeta(ctx, path)
|
||||
}
|
||||
|
||||
func (handler *Driver) LocalPath(ctx context.Context, path string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (handler *Driver) cancelUpload(path, uploadId string) {
|
||||
if _, err := handler.client.Object.AbortMultipartUpload(context.Background(), path, uploadId); err != nil {
|
||||
handler.l.Warning("failed to abort multipart upload: %s", err)
|
||||
}
|
||||
}
|
||||
294
pkg/filemanager/driver/cos/media.go
Normal file
294
pkg/filemanager/driver/cos/media.go
Normal file
@@ -0,0 +1,294 @@
|
||||
package cos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/samber/lo"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
mediaInfoTTL = time.Duration(10) * time.Minute
|
||||
videoInfo = "videoinfo"
|
||||
)
|
||||
|
||||
var (
|
||||
supportedImageExt = []string{"jpg", "jpeg", "png", "gif", "bmp", "webp", "tiff", "heic", "heif"}
|
||||
)
|
||||
|
||||
type (
|
||||
ImageProp struct {
|
||||
Value string `json:"val"`
|
||||
}
|
||||
ImageInfo map[string]ImageProp
|
||||
Error struct {
|
||||
XMLName xml.Name `xml:"Error"`
|
||||
Code string `xml:"Code"`
|
||||
Message string `xml:"Message"`
|
||||
RequestId string `xml:"RequestId"`
|
||||
}
|
||||
Video struct {
|
||||
Index int `xml:"Index"`
|
||||
CodecName string `xml:"CodecName"`
|
||||
CodecLongName string `xml:"CodecLongName"`
|
||||
CodecTimeBase string `xml:"CodecTimeBase"`
|
||||
CodecTagString string `xml:"CodecTagString"`
|
||||
CodecTag string `xml:"CodecTag"`
|
||||
ColorPrimaries string `xml:"ColorPrimaries"`
|
||||
ColorRange string `xml:"ColorRange"`
|
||||
ColorTransfer string `xml:"ColorTransfer"`
|
||||
Profile string `xml:"Profile"`
|
||||
Width int `xml:"Width"`
|
||||
Height int `xml:"Height"`
|
||||
HasBFrame string `xml:"HasBFrame"`
|
||||
RefFrames string `xml:"RefFrames"`
|
||||
Sar string `xml:"Sar"`
|
||||
Dar string `xml:"Dar"`
|
||||
PixFormat string `xml:"PixFormat"`
|
||||
FieldOrder string `xml:"FieldOrder"`
|
||||
Level string `xml:"Level"`
|
||||
Fps string `xml:"Fps"`
|
||||
AvgFps string `xml:"AvgFps"`
|
||||
Timebase string `xml:"Timebase"`
|
||||
StartTime string `xml:"StartTime"`
|
||||
Duration string `xml:"Duration"`
|
||||
Bitrate string `xml:"Bitrate"`
|
||||
NumFrames string `xml:"NumFrames"`
|
||||
Language string `xml:"Language"`
|
||||
}
|
||||
Audio struct {
|
||||
Index int `xml:"Index"`
|
||||
CodecName string `xml:"CodecName"`
|
||||
CodecLongName string `xml:"CodecLongName"`
|
||||
CodecTimeBase string `xml:"CodecTimeBase"`
|
||||
CodecTagString string `xml:"CodecTagString"`
|
||||
CodecTag string `xml:"CodecTag"`
|
||||
SampleFmt string `xml:"SampleFmt"`
|
||||
SampleRate string `xml:"SampleRate"`
|
||||
Channel string `xml:"Channel"`
|
||||
ChannelLayout string `xml:"ChannelLayout"`
|
||||
Timebase string `xml:"Timebase"`
|
||||
StartTime string `xml:"StartTime"`
|
||||
Duration string `xml:"Duration"`
|
||||
Bitrate string `xml:"Bitrate"`
|
||||
Language string `xml:"Language"`
|
||||
}
|
||||
Subtitle struct {
|
||||
Index string `xml:"Index"`
|
||||
Language string `xml:"Language"`
|
||||
}
|
||||
Response struct {
|
||||
XMLName xml.Name `xml:"Response"`
|
||||
MediaInfo struct {
|
||||
Stream struct {
|
||||
Video []Video `xml:"Video"`
|
||||
Audio []Audio `xml:"Audio"`
|
||||
Subtitle []Subtitle `xml:"Subtitle"`
|
||||
} `xml:"Stream"`
|
||||
Format struct {
|
||||
NumStream string `xml:"NumStream"`
|
||||
NumProgram string `xml:"NumProgram"`
|
||||
FormatName string `xml:"FormatName"`
|
||||
FormatLongName string `xml:"FormatLongName"`
|
||||
StartTime string `xml:"StartTime"`
|
||||
Duration string `xml:"Duration"`
|
||||
Bitrate string `xml:"Bitrate"`
|
||||
Size string `xml:"Size"`
|
||||
} `xml:"Format"`
|
||||
} `xml:"MediaInfo"`
|
||||
}
|
||||
)
|
||||
|
||||
func (handler *Driver) extractStreamMeta(ctx context.Context, path string) ([]driver.MediaMeta, error) {
|
||||
resp, err := handler.extractMediaInfo(ctx, path, &urlOption{CiProcess: videoInfo})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var info Response
|
||||
if err := xml.Unmarshal([]byte(resp), &info); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal media info: %w", err)
|
||||
}
|
||||
|
||||
streams := lo.Map(info.MediaInfo.Stream.Video, func(stream Video, index int) mediameta.Stream {
|
||||
return mediameta.Stream{
|
||||
Index: stream.Index,
|
||||
CodecName: stream.CodecName,
|
||||
CodecLongName: stream.CodecLongName,
|
||||
CodecType: "video",
|
||||
Width: stream.Width,
|
||||
Height: stream.Height,
|
||||
Bitrate: stream.Bitrate,
|
||||
}
|
||||
})
|
||||
streams = append(streams, lo.Map(info.MediaInfo.Stream.Audio, func(stream Audio, index int) mediameta.Stream {
|
||||
return mediameta.Stream{
|
||||
Index: stream.Index,
|
||||
CodecName: stream.CodecName,
|
||||
CodecLongName: stream.CodecLongName,
|
||||
CodecType: "audio",
|
||||
Bitrate: stream.Bitrate,
|
||||
}
|
||||
})...)
|
||||
|
||||
metas := make([]driver.MediaMeta, 0)
|
||||
metas = append(metas, mediameta.ProbeMetaTransform(&mediameta.FFProbeMeta{
|
||||
Format: &mediameta.Format{
|
||||
FormatName: info.MediaInfo.Format.FormatName,
|
||||
FormatLongName: info.MediaInfo.Format.FormatLongName,
|
||||
Duration: info.MediaInfo.Format.Duration,
|
||||
Bitrate: info.MediaInfo.Format.Bitrate,
|
||||
},
|
||||
Streams: streams,
|
||||
})...)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (handler *Driver) extractImageMeta(ctx context.Context, path string) ([]driver.MediaMeta, error) {
|
||||
exif := ""
|
||||
resp, err := handler.extractMediaInfo(ctx, path, &urlOption{
|
||||
Exif: &exif,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var imageInfo ImageInfo
|
||||
if err := json.Unmarshal([]byte(resp), &imageInfo); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal media info: %w", err)
|
||||
}
|
||||
|
||||
metas := make([]driver.MediaMeta, 0)
|
||||
exifMap := lo.MapEntries(imageInfo, func(key string, value ImageProp) (string, string) {
|
||||
return key, value.Value
|
||||
})
|
||||
metas = append(metas, mediameta.ExtractExifMap(exifMap, time.Time{})...)
|
||||
metas = append(metas, parseGpsInfo(imageInfo)...)
|
||||
for i := 0; i < len(metas); i++ {
|
||||
metas[i].Type = driver.MetaTypeExif
|
||||
}
|
||||
|
||||
return metas, nil
|
||||
}
|
||||
|
||||
// extractMediaInfo Sends API calls to COS service to extract media info.
|
||||
func (handler *Driver) extractMediaInfo(ctx context.Context, path string, opt *urlOption) (string, error) {
|
||||
mediaInfoExpire := time.Now().Add(mediaInfoTTL)
|
||||
thumbURL, err := handler.signSourceURL(
|
||||
ctx,
|
||||
path,
|
||||
&mediaInfoExpire,
|
||||
opt,
|
||||
)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to sign media info url: %w", err)
|
||||
}
|
||||
|
||||
resp, err := handler.httpClient.
|
||||
Request(http.MethodGet, thumbURL, nil, request.WithContext(ctx)).
|
||||
CheckHTTPResponse(http.StatusOK).
|
||||
GetResponseIgnoreErr()
|
||||
if err != nil {
|
||||
return "", handleCosError(resp, err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func parseGpsInfo(imageInfo ImageInfo) []driver.MediaMeta {
|
||||
latitude := imageInfo["GPSLatitude"] // 31deg 16.26808'
|
||||
longitude := imageInfo["GPSLongitude"] // 120deg 42.91039'
|
||||
latRef := imageInfo["GPSLatitudeRef"] // North
|
||||
lonRef := imageInfo["GPSLongitudeRef"] // East
|
||||
|
||||
// Make sure all value exist in map
|
||||
if latitude.Value == "" || longitude.Value == "" || latRef.Value == "" || lonRef.Value == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
lat := parseRawGPS(latitude.Value, latRef.Value)
|
||||
lon := parseRawGPS(longitude.Value, lonRef.Value)
|
||||
if !math.IsNaN(lat) && !math.IsNaN(lon) {
|
||||
lat, lng := mediameta.NormalizeGPS(lat, lon)
|
||||
return []driver.MediaMeta{{
|
||||
Key: mediameta.GpsLat,
|
||||
Value: fmt.Sprintf("%f", lat),
|
||||
}, {
|
||||
Key: mediameta.GpsLng,
|
||||
Value: fmt.Sprintf("%f", lng),
|
||||
}}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseRawGPS(gpsStr string, ref string) float64 {
|
||||
elem := strings.Split(gpsStr, " ")
|
||||
if len(elem) < 1 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var (
|
||||
deg float64
|
||||
minutes float64
|
||||
seconds float64
|
||||
)
|
||||
|
||||
deg = getGpsElemValue(elem[0])
|
||||
if len(elem) >= 2 {
|
||||
minutes = getGpsElemValue(elem[1])
|
||||
}
|
||||
if len(elem) >= 3 {
|
||||
seconds = getGpsElemValue(elem[2])
|
||||
}
|
||||
|
||||
decimal := deg + minutes/60.0 + seconds/3600.0
|
||||
|
||||
if ref == "S" || ref == "W" {
|
||||
return -decimal
|
||||
}
|
||||
|
||||
return decimal
|
||||
}
|
||||
|
||||
func getGpsElemValue(elm string) float64 {
|
||||
elements := strings.Split(elm, "/")
|
||||
if len(elements) != 2 {
|
||||
return 0
|
||||
}
|
||||
|
||||
numerator, err := strconv.ParseFloat(elements[0], 64)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
denominator, err := strconv.ParseFloat(elements[1], 64)
|
||||
if err != nil || denominator == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
return numerator / denominator
|
||||
}
|
||||
|
||||
func handleCosError(resp string, originErr error) error {
|
||||
if resp == "" {
|
||||
return originErr
|
||||
}
|
||||
|
||||
var err Error
|
||||
if err := xml.Unmarshal([]byte(resp), &err); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal cos error: %w", err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("cos error: %s", err.Message)
|
||||
}
|
||||
118
pkg/filemanager/driver/cos/scf.go
Normal file
118
pkg/filemanager/driver/cos/scf.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package cos
|
||||
|
||||
// TODO: revisit para error
|
||||
const scfFunc = `# -*- coding: utf8 -*-
|
||||
# SCF配置COS触发,向 Cloudreve 发送回调
|
||||
from qcloud_cos_v5 import CosConfig
|
||||
from qcloud_cos_v5 import CosS3Client
|
||||
from qcloud_cos_v5 import CosServiceError
|
||||
from qcloud_cos_v5 import CosClientError
|
||||
import sys
|
||||
import logging
|
||||
import requests
|
||||
|
||||
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
|
||||
logging = logging.getLogger()
|
||||
|
||||
|
||||
def main_handler(event, context):
|
||||
logging.info("start main handler")
|
||||
for record in event['Records']:
|
||||
try:
|
||||
if "x-cos-meta-callback" not in record['cos']['cosObject']['meta']:
|
||||
logging.info("Cannot find callback URL, skiped.")
|
||||
return 'Success'
|
||||
callback = record['cos']['cosObject']['meta']['x-cos-meta-callback']
|
||||
key = record['cos']['cosObject']['key']
|
||||
logging.info("Callback URL is " + callback)
|
||||
|
||||
r = requests.get(callback)
|
||||
print(r.text)
|
||||
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print('Error getting object {} callback url. '.format(key))
|
||||
raise e
|
||||
return "Fail"
|
||||
|
||||
return "Success"
|
||||
`
|
||||
|
||||
//
|
||||
//// CreateSCF 创建回调云函数
|
||||
//func CreateSCF(policy *model.Policy, region string) error {
|
||||
// // 初始化客户端
|
||||
// credential := common.NewCredential(
|
||||
// policy.AccessKey,
|
||||
// policy.SecretKey,
|
||||
// )
|
||||
// cpf := profile.NewClientProfile()
|
||||
// client, err := scf.NewClient(credential, region, cpf)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// // 创建回调代码数据
|
||||
// buff := &bytes.Buffer{}
|
||||
// bs64 := base64.NewEncoder(base64.StdEncoding, buff)
|
||||
// zipWriter := zip.NewWriter(bs64)
|
||||
// header := zip.FileHeader{
|
||||
// Name: "callback.py",
|
||||
// Method: zip.Deflate,
|
||||
// }
|
||||
// writer, err := zipWriter.CreateHeader(&header)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// _, err = io.Copy(writer, strings.NewReader(scfFunc))
|
||||
// zipWriter.Close()
|
||||
//
|
||||
// // 创建云函数
|
||||
// req := scf.NewCreateFunctionRequest()
|
||||
// funcName := "cloudreve_" + hashid.HashID(policy.ID, hashid.PolicyID) + strconv.FormatInt(time.Now().Unix(), 10)
|
||||
// zipFileBytes, _ := ioutil.ReadAll(buff)
|
||||
// zipFileStr := string(zipFileBytes)
|
||||
// codeSource := "ZipFile"
|
||||
// handler := "callback.main_handler"
|
||||
// desc := "Cloudreve 用回调函数"
|
||||
// timeout := int64(60)
|
||||
// runtime := "Python3.6"
|
||||
// req.FunctionName = &funcName
|
||||
// req.Code = &scf.Code{
|
||||
// ZipFile: &zipFileStr,
|
||||
// }
|
||||
// req.Handler = &handler
|
||||
// req.Description = &desc
|
||||
// req.Timeout = &timeout
|
||||
// req.Runtime = &runtime
|
||||
// req.CodeSource = &codeSource
|
||||
//
|
||||
// _, err = client.CreateFunction(req)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// time.Sleep(time.Duration(5) * time.Second)
|
||||
//
|
||||
// // 创建触发器
|
||||
// server, _ := url.Parse(policy.Server)
|
||||
// triggerType := "cos"
|
||||
// triggerDesc := `{"event":"cos:ObjectCreated:Post","filter":{"Prefix":"","Suffix":""}}`
|
||||
// enable := "OPEN"
|
||||
//
|
||||
// trigger := scf.NewCreateTriggerRequest()
|
||||
// trigger.FunctionName = &funcName
|
||||
// trigger.TriggerName = &server.Host
|
||||
// trigger.Type = &triggerType
|
||||
// trigger.TriggerDesc = &triggerDesc
|
||||
// trigger.Enable = &enable
|
||||
//
|
||||
// _, err = client.CreateTrigger(trigger)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// return nil
|
||||
//}
|
||||
122
pkg/filemanager/driver/handler.go
Normal file
122
pkg/filemanager/driver/handler.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
)
|
||||
|
||||
const (
|
||||
// HandlerCapabilityProxyRequired this handler requires Cloudreve's proxy to get file content
|
||||
HandlerCapabilityProxyRequired HandlerCapability = iota
|
||||
// HandlerCapabilityInboundGet this handler supports directly get file's RSCloser, usually
|
||||
// indicates that the file is stored in the same machine as Cloudreve
|
||||
HandlerCapabilityInboundGet
|
||||
// HandlerCapabilityUploadSentinelRequired this handler does not support compliance callback mechanism,
|
||||
// thus it requires Cloudreve's sentinel to guarantee the upload is under control. Cloudreve will try
|
||||
// to delete the placeholder file and cancel the upload session if upload callback is not made after upload
|
||||
// session expire.
|
||||
HandlerCapabilityUploadSentinelRequired
|
||||
)
|
||||
|
||||
type (
|
||||
MetaType string
|
||||
MediaMeta struct {
|
||||
Key string `json:"key"`
|
||||
Value string `json:"value"`
|
||||
Type MetaType `json:"type"`
|
||||
}
|
||||
|
||||
HandlerCapability int
|
||||
|
||||
GetSourceArgs struct {
|
||||
Expire *time.Time
|
||||
IsDownload bool
|
||||
Speed int64
|
||||
DisplayName string
|
||||
}
|
||||
|
||||
// Handler 存储策略适配器
|
||||
Handler interface {
|
||||
// 上传文件, dst为文件存储路径,size 为文件大小。上下文关闭
|
||||
// 时,应取消上传并清理临时文件
|
||||
Put(ctx context.Context, file *fs.UploadRequest) error
|
||||
|
||||
// 删除一个或多个给定路径的文件,返回删除失败的文件路径列表及错误
|
||||
Delete(ctx context.Context, files ...string) ([]string, error)
|
||||
|
||||
// Open physical files. Only implemented if HandlerCapabilityInboundGet capability is set.
|
||||
// Returns file path and an os.File object.
|
||||
Open(ctx context.Context, path string) (*os.File, error)
|
||||
|
||||
// LocalPath returns the local path of a file.
|
||||
// Only implemented if HandlerCapabilityInboundGet capability is set.
|
||||
LocalPath(ctx context.Context, path string) string
|
||||
|
||||
// Thumb returns the URL for a thumbnail of given entity.
|
||||
Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error)
|
||||
|
||||
// 获取外链/下载地址,
|
||||
// url - 站点本身地址,
|
||||
// isDownload - 是否直接下载
|
||||
Source(ctx context.Context, e fs.Entity, args *GetSourceArgs) (string, error)
|
||||
|
||||
// Token 获取有效期为ttl的上传凭证和签名
|
||||
Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error)
|
||||
|
||||
// CancelToken 取消已经创建的有状态上传凭证
|
||||
CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error
|
||||
|
||||
// CompleteUpload completes a previously created upload session.
|
||||
CompleteUpload(ctx context.Context, session *fs.UploadSession) error
|
||||
|
||||
// List 递归列取远程端path路径下文件、目录,不包含path本身,
|
||||
// 返回的对象路径以path作为起始根目录.
|
||||
// recursive - 是否递归列出
|
||||
// List(ctx context.Context, path string, recursive bool) ([]response.Object, error)
|
||||
|
||||
// Capabilities returns the capabilities of this handler
|
||||
Capabilities() *Capabilities
|
||||
|
||||
// MediaMeta extracts media metadata from the given file.
|
||||
MediaMeta(ctx context.Context, path, ext string) ([]MediaMeta, error)
|
||||
}
|
||||
|
||||
Capabilities struct {
|
||||
StaticFeatures *boolset.BooleanSet
|
||||
// MaxSourceExpire indicates the maximum allowed expiration duration of a source URL
|
||||
MaxSourceExpire time.Duration
|
||||
// MinSourceExpire indicates the minimum allowed expiration duration of a source URL
|
||||
MinSourceExpire time.Duration
|
||||
// MediaMetaSupportedExts indicates the extensions of files that support media metadata. Empty list
|
||||
// indicates that no file supports extracting media metadata.
|
||||
MediaMetaSupportedExts []string
|
||||
// GenerateMediaMeta indicates whether to generate media metadata using local generators.
|
||||
MediaMetaProxy bool
|
||||
// ThumbSupportedExts indicates the extensions of files that support thumbnail generation. Empty list
|
||||
// indicates that no file supports thumbnail generation.
|
||||
ThumbSupportedExts []string
|
||||
// ThumbSupportAllExts indicates whether to generate thumbnails for all files, regardless of their extensions.
|
||||
ThumbSupportAllExts bool
|
||||
// ThumbMaxSize indicates the maximum allowed size of a thumbnail. 0 indicates that no limit is set.
|
||||
ThumbMaxSize int64
|
||||
// ThumbProxy indicates whether to generate thumbnails using local generators.
|
||||
ThumbProxy bool
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
MetaTypeExif MetaType = "exif"
|
||||
MediaTypeMusic MetaType = "music"
|
||||
MetaTypeStreamMedia MetaType = "stream"
|
||||
)
|
||||
|
||||
type ForceUsePublicEndpointCtx struct{}
|
||||
|
||||
// WithForcePublicEndpoint sets the context to force using public endpoint for supported storage policies.
|
||||
func WithForcePublicEndpoint(ctx context.Context, value bool) context.Context {
|
||||
return context.WithValue(ctx, ForceUsePublicEndpointCtx{}, value)
|
||||
}
|
||||
75
pkg/filemanager/driver/local/entity.go
Normal file
75
pkg/filemanager/driver/local/entity.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/gofrs/uuid"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewLocalFileEntity creates a new local file entity.
|
||||
func NewLocalFileEntity(t types.EntityType, src string) (fs.Entity, error) {
|
||||
info, err := os.Stat(util.RelativePath(src))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &localFileEntity{
|
||||
t: t,
|
||||
src: src,
|
||||
size: info.Size(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type localFileEntity struct {
|
||||
t types.EntityType
|
||||
src string
|
||||
size int64
|
||||
}
|
||||
|
||||
func (l *localFileEntity) ID() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (l *localFileEntity) Type() types.EntityType {
|
||||
return l.t
|
||||
}
|
||||
|
||||
func (l *localFileEntity) Size() int64 {
|
||||
return l.size
|
||||
}
|
||||
|
||||
func (l *localFileEntity) UpdatedAt() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
func (l *localFileEntity) CreatedAt() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
func (l *localFileEntity) CreatedBy() *ent.User {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *localFileEntity) Source() string {
|
||||
return l.src
|
||||
}
|
||||
|
||||
func (l *localFileEntity) ReferenceCount() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (l *localFileEntity) PolicyID() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (l *localFileEntity) UploadSessionID() *uuid.UUID {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *localFileEntity) Model() *ent.Entity {
|
||||
return nil
|
||||
}
|
||||
11
pkg/filemanager/driver/local/fallocate.go
Normal file
11
pkg/filemanager/driver/local/fallocate.go
Normal file
@@ -0,0 +1,11 @@
|
||||
//go:build !linux && !darwin
|
||||
// +build !linux,!darwin
|
||||
|
||||
package local
|
||||
|
||||
import "os"
|
||||
|
||||
// No-op on non-Linux/Darwin platforms.
|
||||
func Fallocate(file *os.File, offset int64, length int64) error {
|
||||
return nil
|
||||
}
|
||||
27
pkg/filemanager/driver/local/fallocate_darwin.go
Normal file
27
pkg/filemanager/driver/local/fallocate_darwin.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func Fallocate(file *os.File, offset int64, length int64) error {
|
||||
var fst syscall.Fstore_t
|
||||
|
||||
fst.Flags = syscall.F_ALLOCATECONTIG
|
||||
fst.Posmode = syscall.F_PREALLOCATE
|
||||
fst.Offset = 0
|
||||
fst.Length = offset + length
|
||||
fst.Bytesalloc = 0
|
||||
|
||||
// Check https://lists.apple.com/archives/darwin-dev/2007/Dec/msg00040.html
|
||||
_, _, err := syscall.Syscall(syscall.SYS_FCNTL, file.Fd(), syscall.F_PREALLOCATE, uintptr(unsafe.Pointer(&fst)))
|
||||
if err != syscall.Errno(0x0) {
|
||||
fst.Flags = syscall.F_ALLOCATEALL
|
||||
// Ignore the return value
|
||||
_, _, _ = syscall.Syscall(syscall.SYS_FCNTL, file.Fd(), syscall.F_PREALLOCATE, uintptr(unsafe.Pointer(&fst)))
|
||||
}
|
||||
|
||||
return syscall.Ftruncate(int(file.Fd()), fst.Length)
|
||||
}
|
||||
14
pkg/filemanager/driver/local/fallocate_linux.go
Normal file
14
pkg/filemanager/driver/local/fallocate_linux.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func Fallocate(file *os.File, offset int64, length int64) error {
|
||||
if length == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return syscall.Fallocate(int(file.Fd()), 0, offset, length)
|
||||
}
|
||||
301
pkg/filemanager/driver/local/local.go
Normal file
301
pkg/filemanager/driver/local/local.go
Normal file
@@ -0,0 +1,301 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/auth"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
)
|
||||
|
||||
const (
|
||||
Perm = 0744
|
||||
)
|
||||
|
||||
var (
|
||||
capabilities = &driver.Capabilities{
|
||||
StaticFeatures: &boolset.BooleanSet{},
|
||||
MediaMetaProxy: true,
|
||||
ThumbProxy: true,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
boolset.Sets(map[driver.HandlerCapability]bool{
|
||||
driver.HandlerCapabilityProxyRequired: true,
|
||||
driver.HandlerCapabilityInboundGet: true,
|
||||
}, capabilities.StaticFeatures)
|
||||
}
|
||||
|
||||
// Driver 本地策略适配器
|
||||
type Driver struct {
|
||||
Policy *ent.StoragePolicy
|
||||
httpClient request.Client
|
||||
l logging.Logger
|
||||
config conf.ConfigProvider
|
||||
}
|
||||
|
||||
// New constructs a new local driver
|
||||
func New(p *ent.StoragePolicy, l logging.Logger, config conf.ConfigProvider) *Driver {
|
||||
return &Driver{
|
||||
Policy: p,
|
||||
l: l,
|
||||
httpClient: request.NewClient(config, request.WithLogger(l)),
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
//// List 递归列取给定物理路径下所有文件
|
||||
//func (handler *Driver) List(ctx context.Context, path string, recursive bool) ([]response.Object, error) {
|
||||
// var res []response.Object
|
||||
//
|
||||
// // 取得起始路径
|
||||
// root := util.RelativePath(filepath.FromSlash(path))
|
||||
//
|
||||
// // 开始遍历路径下的文件、目录
|
||||
// err := filepath.Walk(root,
|
||||
// func(path string, info os.FileInfo, err error) error {
|
||||
// // 跳过根目录
|
||||
// if path == root {
|
||||
// return nil
|
||||
// }
|
||||
//
|
||||
// if err != nil {
|
||||
// util.Log().Warning("Failed to walk folder %q: %s", path, err)
|
||||
// return filepath.SkipDir
|
||||
// }
|
||||
//
|
||||
// // 将遍历对象的绝对路径转换为相对路径
|
||||
// rel, err := filepath.Rel(root, path)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// res = append(res, response.Object{
|
||||
// Name: info.Name(),
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Source: path,
|
||||
// Size: uint64(info.Size()),
|
||||
// IsDir: info.IsDir(),
|
||||
// LastModify: info.ModTime(),
|
||||
// })
|
||||
//
|
||||
// // 如果非递归,则不步入目录
|
||||
// if !recursive && info.IsDir() {
|
||||
// return filepath.SkipDir
|
||||
// }
|
||||
//
|
||||
// return nil
|
||||
// })
|
||||
//
|
||||
// return res, err
|
||||
//}
|
||||
|
||||
// Get 获取文件内容
|
||||
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
|
||||
// 打开文件
|
||||
file, err := os.Open(handler.LocalPath(ctx, path))
|
||||
if err != nil {
|
||||
handler.l.Debug("Failed to open file: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (handler *Driver) LocalPath(ctx context.Context, path string) string {
|
||||
return util.RelativePath(filepath.FromSlash(path))
|
||||
}
|
||||
|
||||
// Put 将文件流保存到指定目录
|
||||
func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
||||
defer file.Close()
|
||||
dst := util.RelativePath(filepath.FromSlash(file.Props.SavePath))
|
||||
|
||||
// 如果非 Overwrite,则检查是否有重名冲突
|
||||
if file.Mode&fs.ModeOverwrite != fs.ModeOverwrite {
|
||||
if util.Exists(dst) {
|
||||
handler.l.Warning("File with the same name existed or unavailable: %s", dst)
|
||||
return errors.New("file with the same name existed or unavailable")
|
||||
}
|
||||
}
|
||||
|
||||
if err := handler.prepareFileDirectory(dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
openMode := os.O_CREATE | os.O_RDWR
|
||||
if file.Mode&fs.ModeOverwrite == fs.ModeOverwrite && file.Offset == 0 {
|
||||
openMode |= os.O_TRUNC
|
||||
}
|
||||
|
||||
out, err := os.OpenFile(dst, openMode, Perm)
|
||||
if err != nil {
|
||||
handler.l.Warning("Failed to open or create file: %s", err)
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
stat, err := out.Stat()
|
||||
if err != nil {
|
||||
handler.l.Warning("Failed to read file info: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if stat.Size() < file.Offset {
|
||||
return errors.New("size of unfinished uploaded chunks is not as expected")
|
||||
}
|
||||
|
||||
if _, err := out.Seek(file.Offset, io.SeekStart); err != nil {
|
||||
return fmt.Errorf("failed to seek to desired offset %d: %s", file.Offset, err)
|
||||
}
|
||||
|
||||
// 写入文件内容
|
||||
_, err = io.Copy(out, file)
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete 删除一个或多个文件,
|
||||
// 返回未删除的文件,及遇到的最后一个错误
|
||||
func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
|
||||
deleteFailed := make([]string, 0, len(files))
|
||||
var retErr error
|
||||
|
||||
for _, value := range files {
|
||||
filePath := util.RelativePath(filepath.FromSlash(value))
|
||||
if util.Exists(filePath) {
|
||||
err := os.Remove(filePath)
|
||||
if err != nil {
|
||||
handler.l.Warning("Failed to delete file: %s", err)
|
||||
retErr = err
|
||||
deleteFailed = append(deleteFailed, value)
|
||||
}
|
||||
}
|
||||
|
||||
//// 尝试删除文件的缩略图(如果有)
|
||||
//_ = os.Remove(util.RelativePath(value + model.GetSettingByNameWithDefault("thumb_file_suffix", "._thumb")))
|
||||
}
|
||||
|
||||
return deleteFailed, retErr
|
||||
}
|
||||
|
||||
// Thumb 获取文件缩略图
|
||||
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
return "", errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Source 获取外链URL
|
||||
func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
|
||||
return "", errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Token 获取上传策略和认证Token,本地策略直接返回空值
|
||||
func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
|
||||
if file.Mode&fs.ModeOverwrite != fs.ModeOverwrite && util.Exists(uploadSession.Props.SavePath) {
|
||||
return nil, errors.New("placeholder file already exist")
|
||||
}
|
||||
|
||||
dst := util.RelativePath(filepath.FromSlash(uploadSession.Props.SavePath))
|
||||
if err := handler.prepareFileDirectory(dst); err != nil {
|
||||
return nil, fmt.Errorf("failed to prepare file directory: %w", err)
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, Perm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create placeholder file: %w", err)
|
||||
}
|
||||
|
||||
// Preallocate disk space
|
||||
defer f.Close()
|
||||
if handler.Policy.Settings.PreAllocate {
|
||||
if err := Fallocate(f, 0, uploadSession.Props.Size); err != nil {
|
||||
handler.l.Warning("Failed to preallocate file: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &fs.UploadCredential{
|
||||
SessionID: uploadSession.Props.UploadSessionID,
|
||||
ChunkSize: handler.Policy.Settings.ChunkSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *Driver) prepareFileDirectory(dst string) error {
|
||||
basePath := filepath.Dir(dst)
|
||||
if !util.Exists(basePath) {
|
||||
err := os.MkdirAll(basePath, Perm)
|
||||
if err != nil {
|
||||
h.l.Warning("Failed to create directory: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// 取消上传凭证
|
||||
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
|
||||
if session.Callback == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if session.Policy.Edges.Node == nil {
|
||||
return serializer.NewError(serializer.CodeCallbackError, "Node not found", nil)
|
||||
}
|
||||
|
||||
// If callback is set, indicating this handler is used in slave node as a shadowed handler for remote policy,
|
||||
// we need to send callback request to master node.
|
||||
resp := handler.httpClient.Request(
|
||||
"POST",
|
||||
session.Callback,
|
||||
nil,
|
||||
request.WithTimeout(time.Duration(handler.config.Slave().CallbackTimeout)*time.Second),
|
||||
request.WithCredential(
|
||||
auth.HMACAuth{[]byte(session.Policy.Edges.Node.SlaveKey)},
|
||||
int64(handler.config.Slave().SignatureTTL),
|
||||
),
|
||||
request.WithContext(ctx),
|
||||
request.WithCorrelationID(),
|
||||
)
|
||||
|
||||
if resp.Err != nil {
|
||||
return serializer.NewError(serializer.CodeCallbackError, "Slave cannot send callback request", resp.Err)
|
||||
}
|
||||
|
||||
// 解析回调服务端响应
|
||||
res, err := resp.DecodeResponse()
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("Slave cannot parse callback response from master (StatusCode=%d).", resp.Response.StatusCode)
|
||||
return serializer.NewError(serializer.CodeCallbackError, msg, err)
|
||||
}
|
||||
|
||||
if res.Code != 0 {
|
||||
return serializer.NewError(res.Code, res.Msg, errors.New(res.Error))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *Driver) Capabilities() *driver.Capabilities {
|
||||
return capabilities
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
137
pkg/filemanager/driver/obs/media.go
Normal file
137
pkg/filemanager/driver/obs/media.go
Normal file
@@ -0,0 +1,137 @@
|
||||
package obs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/huaweicloud/huaweicloud-sdk-go-obs/obs"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
func (d *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
thumbURL, err := d.signSourceURL(&obs.CreateSignedUrlInput{
|
||||
Method: obs.HttpMethodGet,
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: path,
|
||||
Expires: int(mediaInfoTTL.Seconds()),
|
||||
QueryParams: map[string]string{
|
||||
imageProcessHeader: imageInfoProcessor,
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sign media info url: %w", err)
|
||||
}
|
||||
|
||||
resp, err := d.httpClient.
|
||||
Request(http.MethodGet, thumbURL, nil, request.WithContext(ctx)).
|
||||
CheckHTTPResponse(http.StatusOK).
|
||||
GetResponseIgnoreErr()
|
||||
if err != nil {
|
||||
return nil, handleJsonError(resp, err)
|
||||
}
|
||||
|
||||
var imageInfo map[string]any
|
||||
if err := json.Unmarshal([]byte(resp), &imageInfo); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal media info: %w", err)
|
||||
}
|
||||
|
||||
imageInfoMap := lo.MapEntries(imageInfo, func(k string, v any) (string, string) {
|
||||
if vStr, ok := v.(string); ok {
|
||||
return strings.TrimPrefix(k, "exif:"), vStr
|
||||
}
|
||||
|
||||
return k, fmt.Sprintf("%v", v)
|
||||
})
|
||||
metas := make([]driver.MediaMeta, 0)
|
||||
metas = append(metas, mediameta.ExtractExifMap(imageInfoMap, time.Time{})...)
|
||||
metas = append(metas, parseGpsInfo(imageInfoMap)...)
|
||||
for i := 0; i < len(metas); i++ {
|
||||
metas[i].Type = driver.MetaTypeExif
|
||||
}
|
||||
return metas, nil
|
||||
}
|
||||
|
||||
func parseGpsInfo(imageInfo map[string]string) []driver.MediaMeta {
|
||||
latitude := imageInfo["GPSLatitude"] // 31/1, 162680820/10000000, 0/1
|
||||
longitude := imageInfo["GPSLongitude"] // 120/1, 429103939/10000000, 0/1
|
||||
latRef := imageInfo["GPSLatitudeRef"] // N
|
||||
lonRef := imageInfo["GPSLongitudeRef"] // E
|
||||
|
||||
// Make sure all value exist in map
|
||||
if latitude == "" || longitude == "" || latRef == "" || lonRef == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
lat := parseRawGPS(latitude, latRef)
|
||||
lon := parseRawGPS(longitude, lonRef)
|
||||
if !math.IsNaN(lat) && !math.IsNaN(lon) {
|
||||
lat, lng := mediameta.NormalizeGPS(lat, lon)
|
||||
return []driver.MediaMeta{{
|
||||
Key: mediameta.GpsLat,
|
||||
Value: fmt.Sprintf("%f", lat),
|
||||
}, {
|
||||
Key: mediameta.GpsLng,
|
||||
Value: fmt.Sprintf("%f", lng),
|
||||
}}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseRawGPS(gpsStr string, ref string) float64 {
|
||||
elem := strings.Split(gpsStr, ", ")
|
||||
if len(elem) < 1 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var (
|
||||
deg float64
|
||||
minutes float64
|
||||
seconds float64
|
||||
)
|
||||
|
||||
deg = getGpsElemValue(elem[0])
|
||||
if len(elem) >= 2 {
|
||||
minutes = getGpsElemValue(elem[1])
|
||||
}
|
||||
if len(elem) >= 3 {
|
||||
seconds = getGpsElemValue(elem[2])
|
||||
}
|
||||
|
||||
decimal := deg + minutes/60.0 + seconds/3600.0
|
||||
|
||||
if ref == "S" || ref == "W" {
|
||||
return -decimal
|
||||
}
|
||||
|
||||
return decimal
|
||||
}
|
||||
|
||||
func getGpsElemValue(elm string) float64 {
|
||||
elements := strings.Split(elm, "/")
|
||||
if len(elements) != 2 {
|
||||
return 0
|
||||
}
|
||||
|
||||
numerator, err := strconv.ParseFloat(elements[0], 64)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
denominator, err := strconv.ParseFloat(elements[1], 64)
|
||||
if err != nil || denominator == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
return numerator / denominator
|
||||
}
|
||||
513
pkg/filemanager/driver/obs/obs.go
Normal file
513
pkg/filemanager/driver/obs/obs.go
Normal file
@@ -0,0 +1,513 @@
|
||||
package obs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk/backoff"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/huaweicloud/huaweicloud-sdk-go-obs/obs"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
const (
|
||||
chunkRetrySleep = time.Duration(5) * time.Second
|
||||
maxDeleteBatch = 1000
|
||||
imageProcessHeader = "x-image-process"
|
||||
trafficLimitHeader = "x-obs-traffic-limit"
|
||||
partNumberParam = "partNumber"
|
||||
callbackParam = "x-obs-callback"
|
||||
uploadIdParam = "uploadId"
|
||||
mediaInfoTTL = time.Duration(10) * time.Minute
|
||||
imageInfoProcessor = "image/info"
|
||||
|
||||
// MultiPartUploadThreshold 服务端使用分片上传的阈值
|
||||
MultiPartUploadThreshold int64 = 5 << 30 // 5GB
|
||||
)
|
||||
|
||||
var (
|
||||
features = &boolset.BooleanSet{}
|
||||
)
|
||||
|
||||
type (
|
||||
CallbackPolicy struct {
|
||||
CallbackURL string `json:"callbackUrl"`
|
||||
CallbackBody string `json:"callbackBody"`
|
||||
CallbackBodyType string `json:"callbackBodyType"`
|
||||
}
|
||||
JsonError struct {
|
||||
Message string `json:"message"`
|
||||
Code string `json:"code"`
|
||||
}
|
||||
)
|
||||
|
||||
// Driver Huawei Cloud OBS driver
|
||||
type Driver struct {
|
||||
policy *ent.StoragePolicy
|
||||
chunkSize int64
|
||||
|
||||
settings setting.Provider
|
||||
l logging.Logger
|
||||
config conf.ConfigProvider
|
||||
mime mime.MimeDetector
|
||||
httpClient request.Client
|
||||
obs *obs.ObsClient
|
||||
}
|
||||
|
||||
func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider,
|
||||
config conf.ConfigProvider, l logging.Logger, mime mime.MimeDetector) (*Driver, error) {
|
||||
chunkSize := policy.Settings.ChunkSize
|
||||
if policy.Settings.ChunkSize == 0 {
|
||||
chunkSize = 25 << 20 // 25 MB
|
||||
}
|
||||
|
||||
driver := &Driver{
|
||||
policy: policy,
|
||||
settings: settings,
|
||||
chunkSize: chunkSize,
|
||||
config: config,
|
||||
l: l,
|
||||
mime: mime,
|
||||
httpClient: request.NewClient(config, request.WithLogger(l)),
|
||||
}
|
||||
|
||||
useCname := false
|
||||
if policy.Settings != nil && policy.Settings.UseCname {
|
||||
useCname = true
|
||||
}
|
||||
|
||||
obsClient, err := obs.New(policy.AccessKey, policy.SecretKey, policy.Server, obs.WithSignature(obs.SignatureObs), obs.WithCustomDomainName(useCname))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
driver.obs = obsClient
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
func (d *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
||||
defer file.Close()
|
||||
|
||||
// 是否允许覆盖
|
||||
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
|
||||
if !overwrite {
|
||||
// Check for duplicated file
|
||||
if _, err := d.obs.HeadObject(&obs.HeadObjectInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: file.Props.SavePath,
|
||||
}, obs.WithRequestContext(ctx)); err == nil {
|
||||
return fs.ErrFileExisted
|
||||
}
|
||||
}
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
d.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
// 小文件直接上传
|
||||
if file.Props.Size < MultiPartUploadThreshold {
|
||||
_, err := d.obs.PutObject(&obs.PutObjectInput{
|
||||
PutObjectBasicInput: obs.PutObjectBasicInput{
|
||||
ObjectOperationInput: obs.ObjectOperationInput{
|
||||
Key: file.Props.SavePath,
|
||||
Bucket: d.policy.BucketName,
|
||||
},
|
||||
HttpHeader: obs.HttpHeader{
|
||||
ContentType: mimeType,
|
||||
},
|
||||
ContentLength: file.Props.Size,
|
||||
},
|
||||
Body: file,
|
||||
}, obs.WithRequestContext(ctx))
|
||||
return err
|
||||
}
|
||||
|
||||
// 超过阈值时使用分片上传
|
||||
imur, err := d.obs.InitiateMultipartUpload(&obs.InitiateMultipartUploadInput{
|
||||
ObjectOperationInput: obs.ObjectOperationInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: file.Props.SavePath,
|
||||
},
|
||||
HttpHeader: obs.HttpHeader{
|
||||
ContentType: d.mime.TypeByName(file.Props.Uri.Name()),
|
||||
},
|
||||
}, obs.WithRequestContext(ctx))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initiate multipart upload: %w", err)
|
||||
}
|
||||
|
||||
chunks := chunk.NewChunkGroup(file, d.chunkSize, &backoff.ConstantBackoff{
|
||||
Max: d.settings.ChunkRetryLimit(ctx),
|
||||
Sleep: chunkRetrySleep,
|
||||
}, d.settings.UseChunkBuffer(ctx), d.l, d.settings.TempPath(ctx))
|
||||
|
||||
parts := make([]*obs.UploadPartOutput, 0, chunks.Num())
|
||||
|
||||
uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error {
|
||||
part, err := d.obs.UploadPart(&obs.UploadPartInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: file.Props.SavePath,
|
||||
PartNumber: current.Index() + 1,
|
||||
UploadId: imur.UploadId,
|
||||
Body: content,
|
||||
SourceFile: "",
|
||||
PartSize: current.Length(),
|
||||
}, obs.WithRequestContext(ctx))
|
||||
if err == nil {
|
||||
parts = append(parts, part)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for chunks.Next() {
|
||||
if err := chunks.Process(uploadFunc); err != nil {
|
||||
d.cancelUpload(file.Props.SavePath, imur)
|
||||
return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = d.obs.CompleteMultipartUpload(&obs.CompleteMultipartUploadInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: file.Props.SavePath,
|
||||
UploadId: imur.UploadId,
|
||||
Parts: lo.Map(parts, func(part *obs.UploadPartOutput, i int) obs.Part {
|
||||
return obs.Part{
|
||||
PartNumber: i + 1,
|
||||
ETag: part.ETag,
|
||||
}
|
||||
}),
|
||||
}, obs.WithRequestContext(ctx))
|
||||
if err != nil {
|
||||
d.cancelUpload(file.Props.SavePath, imur)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
|
||||
groups := lo.Chunk(files, maxDeleteBatch)
|
||||
failed := make([]string, 0)
|
||||
var lastError error
|
||||
for index, group := range groups {
|
||||
d.l.Debug("Process delete group #%d: %v", index, group)
|
||||
// 删除文件
|
||||
delRes, err := d.obs.DeleteObjects(&obs.DeleteObjectsInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
Quiet: true,
|
||||
Objects: lo.Map(group, func(item string, index int) obs.ObjectToDelete {
|
||||
return obs.ObjectToDelete{
|
||||
Key: item,
|
||||
}
|
||||
}),
|
||||
}, obs.WithRequestContext(ctx))
|
||||
if err != nil {
|
||||
failed = append(failed, group...)
|
||||
lastError = err
|
||||
continue
|
||||
}
|
||||
|
||||
for _, v := range delRes.Errors {
|
||||
d.l.Debug("Failed to delete file: %s, Code:%s, Message:%s", v.Key, v.Code, v.Key)
|
||||
failed = append(failed, v.Key)
|
||||
}
|
||||
}
|
||||
|
||||
if len(failed) > 0 && lastError == nil {
|
||||
lastError = fmt.Errorf("failed to delete files: %v", failed)
|
||||
}
|
||||
|
||||
return failed, lastError
|
||||
}
|
||||
|
||||
func (d *Driver) Open(ctx context.Context, path string) (*os.File, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (d *Driver) LocalPath(ctx context.Context, path string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (d *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
w, h := d.settings.ThumbSize(ctx)
|
||||
thumbURL, err := d.signSourceURL(&obs.CreateSignedUrlInput{
|
||||
Method: obs.HttpMethodGet,
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: e.Source(),
|
||||
Expires: int(time.Until(*expire).Seconds()),
|
||||
QueryParams: map[string]string{
|
||||
imageProcessHeader: fmt.Sprintf("image/resize,m_lfit,w_%d,h_%d", w, h),
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return thumbURL, nil
|
||||
}
|
||||
|
||||
func (d *Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
|
||||
params := make(map[string]string)
|
||||
if args.IsDownload {
|
||||
encodedFilename := url.PathEscape(args.DisplayName)
|
||||
params["response-content-disposition"] = fmt.Sprintf("attachment; filename=\"%s\"; filename*=UTF-8''%s",
|
||||
args.DisplayName, encodedFilename)
|
||||
}
|
||||
|
||||
expires := 86400 * 265 * 20
|
||||
if args.Expire != nil {
|
||||
expires = int(time.Until(*args.Expire).Seconds())
|
||||
}
|
||||
|
||||
if args.Speed > 0 {
|
||||
// Byte 转换为 bit
|
||||
args.Speed *= 8
|
||||
|
||||
// OSS对速度值有范围限制
|
||||
if args.Speed < 819200 {
|
||||
args.Speed = 819200
|
||||
}
|
||||
if args.Speed > 838860800 {
|
||||
args.Speed = 838860800
|
||||
}
|
||||
}
|
||||
|
||||
if args.Speed > 0 {
|
||||
params[trafficLimitHeader] = strconv.FormatInt(args.Speed, 10)
|
||||
}
|
||||
|
||||
return d.signSourceURL(&obs.CreateSignedUrlInput{
|
||||
Method: obs.HttpMethodGet,
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: e.Source(),
|
||||
Expires: expires,
|
||||
QueryParams: params,
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
|
||||
// Check for duplicated file
|
||||
if _, err := d.obs.HeadObject(&obs.HeadObjectInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: file.Props.SavePath,
|
||||
}, obs.WithRequestContext(ctx)); err == nil {
|
||||
return nil, fs.ErrFileExisted
|
||||
}
|
||||
|
||||
// 生成回调地址
|
||||
siteURL := d.settings.SiteURL(setting.UseFirstSiteUrl(ctx))
|
||||
// 在从机端创建上传会话
|
||||
uploadSession.ChunkSize = d.chunkSize
|
||||
uploadSession.Callback = routes.MasterSlaveCallbackUrl(siteURL, types.PolicyTypeObs, uploadSession.Props.UploadSessionID, uploadSession.CallbackSecret).String()
|
||||
// 回调策略
|
||||
callbackPolicy := CallbackPolicy{
|
||||
CallbackURL: uploadSession.Callback,
|
||||
CallbackBody: `{"name":${key},"source_name":${fname},"size":${size}}`,
|
||||
CallbackBodyType: "application/json",
|
||||
}
|
||||
|
||||
callbackPolicyJSON, err := json.Marshal(callbackPolicy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode callback policy: %w", err)
|
||||
}
|
||||
callbackPolicyEncoded := base64.StdEncoding.EncodeToString(callbackPolicyJSON)
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
d.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
imur, err := d.obs.InitiateMultipartUpload(&obs.InitiateMultipartUploadInput{
|
||||
ObjectOperationInput: obs.ObjectOperationInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: file.Props.SavePath,
|
||||
},
|
||||
HttpHeader: obs.HttpHeader{
|
||||
ContentType: mimeType,
|
||||
},
|
||||
}, obs.WithRequestContext(ctx))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize multipart upload: %w", err)
|
||||
}
|
||||
uploadSession.UploadID = imur.UploadId
|
||||
|
||||
// 为每个分片签名上传 URL
|
||||
chunks := chunk.NewChunkGroup(file, d.chunkSize, &backoff.ConstantBackoff{}, false, d.l, "")
|
||||
urls := make([]string, chunks.Num())
|
||||
ttl := int64(time.Until(uploadSession.Props.ExpireAt).Seconds())
|
||||
for chunks.Next() {
|
||||
err := chunks.Process(func(c *chunk.ChunkGroup, chunk io.Reader) error {
|
||||
signedURL, err := d.obs.CreateSignedUrl(&obs.CreateSignedUrlInput{
|
||||
Method: obs.HttpMethodPut,
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: file.Props.SavePath,
|
||||
QueryParams: map[string]string{
|
||||
partNumberParam: strconv.Itoa(c.Index() + 1),
|
||||
uploadIdParam: uploadSession.UploadID,
|
||||
},
|
||||
Expires: int(ttl),
|
||||
Headers: map[string]string{
|
||||
"Content-Length": strconv.FormatInt(c.Length(), 10),
|
||||
"Content-Type": "application/octet-stream",
|
||||
}, //TODO: Validate +1
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
urls[c.Index()] = signedURL.SignedUrl
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// 签名完成分片上传的URL
|
||||
completeURL, err := d.obs.CreateSignedUrl(&obs.CreateSignedUrlInput{
|
||||
Method: obs.HttpMethodPost,
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: file.Props.SavePath,
|
||||
QueryParams: map[string]string{
|
||||
uploadIdParam: uploadSession.UploadID,
|
||||
callbackParam: callbackPolicyEncoded,
|
||||
},
|
||||
Headers: map[string]string{
|
||||
"Content-Type": "application/octet-stream",
|
||||
},
|
||||
Expires: int(ttl),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &fs.UploadCredential{
|
||||
UploadID: imur.UploadId,
|
||||
UploadURLs: urls,
|
||||
CompleteURL: completeURL.SignedUrl,
|
||||
SessionID: uploadSession.Props.UploadSessionID,
|
||||
ChunkSize: d.chunkSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
|
||||
_, err := d.obs.AbortMultipartUpload(&obs.AbortMultipartUploadInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: uploadSession.Props.SavePath,
|
||||
UploadId: uploadSession.UploadID,
|
||||
}, obs.WithRequestContext(ctx))
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//func (d *Driver) List(ctx context.Context, path string, recursive bool) ([]response.Object, error) {
|
||||
// return nil, errors.New("not implemented")
|
||||
//}
|
||||
|
||||
func (d *Driver) Capabilities() *driver.Capabilities {
|
||||
mediaMetaExts := d.policy.Settings.MediaMetaExts
|
||||
if !d.policy.Settings.NativeMediaProcessing {
|
||||
mediaMetaExts = nil
|
||||
}
|
||||
return &driver.Capabilities{
|
||||
StaticFeatures: features,
|
||||
MediaMetaSupportedExts: mediaMetaExts,
|
||||
MediaMetaProxy: d.policy.Settings.MediaMetaGeneratorProxy,
|
||||
ThumbSupportedExts: d.policy.Settings.ThumbExts,
|
||||
ThumbProxy: d.policy.Settings.ThumbGeneratorProxy,
|
||||
ThumbSupportAllExts: d.policy.Settings.ThumbSupportAllExts,
|
||||
ThumbMaxSize: d.policy.Settings.ThumbMaxSize,
|
||||
}
|
||||
}
|
||||
|
||||
// CORS 创建跨域策略
|
||||
func (d *Driver) CORS() error {
|
||||
_, err := d.obs.SetBucketCors(&obs.SetBucketCorsInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
BucketCors: obs.BucketCors{
|
||||
CorsRules: []obs.CorsRule{
|
||||
{
|
||||
AllowedOrigin: []string{"*"},
|
||||
AllowedMethod: []string{
|
||||
"GET",
|
||||
"POST",
|
||||
"PUT",
|
||||
"DELETE",
|
||||
"HEAD",
|
||||
},
|
||||
ExposeHeader: []string{"Etag"},
|
||||
AllowedHeader: []string{"*"},
|
||||
MaxAgeSeconds: 3600,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Driver) cancelUpload(path string, imur *obs.InitiateMultipartUploadOutput) {
|
||||
if _, err := d.obs.AbortMultipartUpload(&obs.AbortMultipartUploadInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: path,
|
||||
UploadId: imur.UploadId,
|
||||
}); err != nil {
|
||||
d.l.Warning("failed to abort multipart upload: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) signSourceURL(input *obs.CreateSignedUrlInput) (string, error) {
|
||||
signedURL, err := handler.obs.CreateSignedUrl(input)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
finalURL, err := url.Parse(signedURL.SignedUrl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 公有空间替换掉Key及不支持的头
|
||||
if !handler.policy.IsPrivate {
|
||||
query := finalURL.Query()
|
||||
query.Del("AccessKeyId")
|
||||
query.Del("Signature")
|
||||
finalURL.RawQuery = query.Encode()
|
||||
}
|
||||
return finalURL.String(), nil
|
||||
}
|
||||
|
||||
func handleJsonError(resp string, originErr error) error {
|
||||
if resp == "" {
|
||||
return originErr
|
||||
}
|
||||
|
||||
var err JsonError
|
||||
if err := json.Unmarshal([]byte(resp), &err); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal cos error: %w", err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("obs error: %s", err.Message)
|
||||
}
|
||||
517
pkg/filemanager/driver/onedrive/api.go
Normal file
517
pkg/filemanager/driver/onedrive/api.go
Normal file
@@ -0,0 +1,517 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk/backoff"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// SmallFileSize 单文件上传接口最大尺寸
|
||||
SmallFileSize uint64 = 4 * 1024 * 1024
|
||||
// ChunkSize 服务端中转分片上传分片大小
|
||||
ChunkSize uint64 = 10 * 1024 * 1024
|
||||
// ListRetry 列取请求重试次数
|
||||
ListRetry = 1
|
||||
chunkRetrySleep = time.Second * 5
|
||||
|
||||
notFoundError = "itemNotFound"
|
||||
)
|
||||
|
||||
type RetryCtx struct{}
|
||||
|
||||
// GetSourcePath 获取文件的绝对路径
|
||||
func (info *FileInfo) GetSourcePath() string {
|
||||
res, err := url.PathUnescape(info.ParentReference.Path)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.TrimPrefix(
|
||||
path.Join(
|
||||
strings.TrimPrefix(res, "/drive/root:"),
|
||||
info.Name,
|
||||
),
|
||||
"/",
|
||||
)
|
||||
}
|
||||
|
||||
func (client *client) getRequestURL(api string, opts ...Option) string {
|
||||
options := newDefaultOption()
|
||||
for _, o := range opts {
|
||||
o.apply(options)
|
||||
}
|
||||
|
||||
base, _ := url.Parse(client.endpoints.endpointURL)
|
||||
if base == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
if options.useDriverResource {
|
||||
base.Path = path.Join(base.Path, client.endpoints.driverResource, api)
|
||||
} else {
|
||||
base.Path = path.Join(base.Path, api)
|
||||
}
|
||||
|
||||
return base.String()
|
||||
}
|
||||
|
||||
// ListChildren 根据路径列取子对象
|
||||
func (client *client) ListChildren(ctx context.Context, path string) ([]FileInfo, error) {
|
||||
var requestURL string
|
||||
dst := strings.TrimPrefix(path, "/")
|
||||
if dst == "" {
|
||||
requestURL = client.getRequestURL("root/children")
|
||||
} else {
|
||||
requestURL = client.getRequestURL("root:/" + dst + ":/children")
|
||||
}
|
||||
|
||||
res, err := client.requestWithStr(ctx, "GET", requestURL+"?$top=999999999", "", 200)
|
||||
if err != nil {
|
||||
retried := 0
|
||||
if v, ok := ctx.Value(RetryCtx{}).(int); ok {
|
||||
retried = v
|
||||
}
|
||||
if retried < ListRetry {
|
||||
retried++
|
||||
client.l.Debug("Failed to list path %q: %s, will retry in 5 seconds.", path, err)
|
||||
time.Sleep(time.Duration(5) * time.Second)
|
||||
return client.ListChildren(context.WithValue(ctx, RetryCtx{}, retried), path)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
decodeErr error
|
||||
fileInfo ListResponse
|
||||
)
|
||||
decodeErr = json.Unmarshal([]byte(res), &fileInfo)
|
||||
if decodeErr != nil {
|
||||
return nil, decodeErr
|
||||
}
|
||||
|
||||
return fileInfo.Value, nil
|
||||
}
|
||||
|
||||
// Meta 根据资源ID或文件路径获取文件元信息
|
||||
func (client *client) Meta(ctx context.Context, id string, path string) (*FileInfo, error) {
|
||||
var requestURL string
|
||||
if id != "" {
|
||||
requestURL = client.getRequestURL("items/" + id)
|
||||
} else {
|
||||
dst := strings.TrimPrefix(path, "/")
|
||||
requestURL = client.getRequestURL("root:/" + dst)
|
||||
}
|
||||
|
||||
res, err := client.requestWithStr(ctx, "GET", requestURL+"?expand=thumbnails", "", 200)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
decodeErr error
|
||||
fileInfo FileInfo
|
||||
)
|
||||
decodeErr = json.Unmarshal([]byte(res), &fileInfo)
|
||||
if decodeErr != nil {
|
||||
return nil, decodeErr
|
||||
}
|
||||
|
||||
return &fileInfo, nil
|
||||
|
||||
}
|
||||
|
||||
// CreateUploadSession 创建分片上传会话
|
||||
func (client *client) CreateUploadSession(ctx context.Context, dst string, opts ...Option) (string, error) {
|
||||
options := newDefaultOption()
|
||||
for _, o := range opts {
|
||||
o.apply(options)
|
||||
}
|
||||
|
||||
dst = strings.TrimPrefix(dst, "/")
|
||||
requestURL := client.getRequestURL("root:/" + dst + ":/createUploadSession")
|
||||
body := map[string]map[string]interface{}{
|
||||
"item": {
|
||||
"@microsoft.graph.conflictBehavior": options.conflictBehavior,
|
||||
},
|
||||
}
|
||||
bodyBytes, _ := json.Marshal(body)
|
||||
|
||||
res, err := client.requestWithStr(ctx, "POST", requestURL, string(bodyBytes), 200)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var (
|
||||
decodeErr error
|
||||
uploadSession UploadSessionResponse
|
||||
)
|
||||
decodeErr = json.Unmarshal([]byte(res), &uploadSession)
|
||||
if decodeErr != nil {
|
||||
return "", decodeErr
|
||||
}
|
||||
|
||||
return uploadSession.UploadURL, nil
|
||||
}
|
||||
|
||||
// GetSiteIDByURL 通过 SharePoint 站点 URL 获取站点ID
|
||||
func (client *client) GetSiteIDByURL(ctx context.Context, siteUrl string) (string, error) {
|
||||
siteUrlParsed, err := url.Parse(siteUrl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
hostName := siteUrlParsed.Hostname()
|
||||
relativePath := strings.Trim(siteUrlParsed.Path, "/")
|
||||
requestURL := client.getRequestURL(fmt.Sprintf("sites/%s:/%s", hostName, relativePath), WithDriverResource(false))
|
||||
res, reqErr := client.requestWithStr(ctx, "GET", requestURL, "", 200)
|
||||
if reqErr != nil {
|
||||
return "", reqErr
|
||||
}
|
||||
|
||||
var (
|
||||
decodeErr error
|
||||
siteInfo Site
|
||||
)
|
||||
decodeErr = json.Unmarshal([]byte(res), &siteInfo)
|
||||
if decodeErr != nil {
|
||||
return "", decodeErr
|
||||
}
|
||||
|
||||
return siteInfo.ID, nil
|
||||
}
|
||||
|
||||
// GetUploadSessionStatus 查询上传会话状态
|
||||
func (client *client) GetUploadSessionStatus(ctx context.Context, uploadURL string) (*UploadSessionResponse, error) {
|
||||
res, err := client.requestWithStr(ctx, "GET", uploadURL, "", 200)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
decodeErr error
|
||||
uploadSession UploadSessionResponse
|
||||
)
|
||||
decodeErr = json.Unmarshal([]byte(res), &uploadSession)
|
||||
if decodeErr != nil {
|
||||
return nil, decodeErr
|
||||
}
|
||||
|
||||
return &uploadSession, nil
|
||||
}
|
||||
|
||||
// UploadChunk 上传分片
|
||||
func (client *client) UploadChunk(ctx context.Context, uploadURL string, content io.Reader, current *chunk.ChunkGroup) (*UploadSessionResponse, error) {
|
||||
res, err := client.request(
|
||||
ctx, "PUT", uploadURL, content,
|
||||
request.WithContentLength(current.Length()),
|
||||
request.WithHeader(http.Header{
|
||||
"Content-Range": {current.RangeHeader()},
|
||||
}),
|
||||
request.WithoutHeader([]string{"Authorization", "Content-Type"}),
|
||||
request.WithTimeout(0),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upload OneDrive chunk #%d: %w", current.Index(), err)
|
||||
}
|
||||
|
||||
if current.IsLast() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var (
|
||||
decodeErr error
|
||||
uploadRes UploadSessionResponse
|
||||
)
|
||||
decodeErr = json.Unmarshal([]byte(res), &uploadRes)
|
||||
if decodeErr != nil {
|
||||
return nil, decodeErr
|
||||
}
|
||||
|
||||
return &uploadRes, nil
|
||||
}
|
||||
|
||||
// Upload 上传文件
|
||||
func (client *client) Upload(ctx context.Context, file *fs.UploadRequest) error {
|
||||
// 决定是否覆盖文件
|
||||
overwrite := "fail"
|
||||
if file.Mode&fs.ModeOverwrite == fs.ModeOverwrite {
|
||||
overwrite = "replace"
|
||||
}
|
||||
|
||||
size := int(file.Props.Size)
|
||||
dst := file.Props.SavePath
|
||||
|
||||
// 小文件,使用简单上传接口上传
|
||||
if size <= int(SmallFileSize) {
|
||||
_, err := client.SimpleUpload(ctx, dst, file, int64(size), WithConflictBehavior(overwrite))
|
||||
return err
|
||||
}
|
||||
|
||||
// 大文件,进行分片
|
||||
// 创建上传会话
|
||||
uploadURL, err := client.CreateUploadSession(ctx, dst, WithConflictBehavior(overwrite))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initial chunk groups
|
||||
chunks := chunk.NewChunkGroup(file, client.chunkSize, &backoff.ConstantBackoff{
|
||||
Max: client.settings.ChunkRetryLimit(ctx),
|
||||
Sleep: chunkRetrySleep,
|
||||
}, client.settings.UseChunkBuffer(ctx), client.l, client.settings.TempPath(ctx))
|
||||
|
||||
uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error {
|
||||
_, err := client.UploadChunk(ctx, uploadURL, content, current)
|
||||
return err
|
||||
}
|
||||
|
||||
// upload chunks
|
||||
for chunks.Next() {
|
||||
if err := chunks.Process(uploadFunc); err != nil {
|
||||
if err := client.DeleteUploadSession(ctx, uploadURL); err != nil {
|
||||
client.l.Warning("Failed to delete upload session: %s", err)
|
||||
}
|
||||
return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteUploadSession 删除上传会话
|
||||
func (client *client) DeleteUploadSession(ctx context.Context, uploadURL string) error {
|
||||
_, err := client.requestWithStr(ctx, "DELETE", uploadURL, "", 204)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SimpleUpload 上传小文件到dst
|
||||
func (client *client) SimpleUpload(ctx context.Context, dst string, body io.Reader, size int64, opts ...Option) (*UploadResult, error) {
|
||||
options := newDefaultOption()
|
||||
for _, o := range opts {
|
||||
o.apply(options)
|
||||
}
|
||||
|
||||
dst = strings.TrimPrefix(dst, "/")
|
||||
requestURL := client.getRequestURL("root:/" + dst + ":/content")
|
||||
requestURL += ("?@microsoft.graph.conflictBehavior=" + options.conflictBehavior)
|
||||
|
||||
res, err := client.request(ctx, "PUT", requestURL, body, request.WithContentLength(int64(size)),
|
||||
request.WithTimeout(0),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
decodeErr error
|
||||
uploadRes UploadResult
|
||||
)
|
||||
decodeErr = json.Unmarshal([]byte(res), &uploadRes)
|
||||
if decodeErr != nil {
|
||||
return nil, decodeErr
|
||||
}
|
||||
|
||||
return &uploadRes, nil
|
||||
}
|
||||
|
||||
// BatchDelete 并行删除给出的文件,返回删除失败的文件,及第一个遇到的错误。此方法将文件分为
|
||||
// 20个一组,调用Delete并行删除
|
||||
func (client *client) BatchDelete(ctx context.Context, dst []string) ([]string, error) {
|
||||
groupNum := len(dst)/20 + 1
|
||||
finalRes := make([]string, 0, len(dst))
|
||||
res := make([]string, 0, 20)
|
||||
var err error
|
||||
|
||||
for i := 0; i < groupNum; i++ {
|
||||
end := 20*i + 20
|
||||
if i == groupNum-1 {
|
||||
end = len(dst)
|
||||
}
|
||||
|
||||
client.l.Debug("Delete file group: %v.", dst[20*i:end])
|
||||
res, err = client.Delete(ctx, dst[20*i:end])
|
||||
finalRes = append(finalRes, res...)
|
||||
}
|
||||
|
||||
return finalRes, err
|
||||
}
|
||||
|
||||
// Delete 并行删除文件,返回删除失败的文件,及第一个遇到的错误,
|
||||
// 由于API限制,最多删除20个
|
||||
func (client *client) Delete(ctx context.Context, dst []string) ([]string, error) {
|
||||
body := client.makeBatchDeleteRequestsBody(dst)
|
||||
res, err := client.requestWithStr(ctx, "POST", client.getRequestURL("$batch",
|
||||
WithDriverResource(false)), body, 200)
|
||||
if err != nil {
|
||||
return dst, err
|
||||
}
|
||||
|
||||
var (
|
||||
decodeErr error
|
||||
deleteRes BatchResponses
|
||||
)
|
||||
decodeErr = json.Unmarshal([]byte(res), &deleteRes)
|
||||
if decodeErr != nil {
|
||||
return dst, decodeErr
|
||||
}
|
||||
|
||||
// 取得删除失败的文件
|
||||
failed := getDeleteFailed(&deleteRes)
|
||||
if len(failed) != 0 {
|
||||
return failed, ErrDeleteFile
|
||||
}
|
||||
return failed, nil
|
||||
}
|
||||
|
||||
func getDeleteFailed(res *BatchResponses) []string {
|
||||
var failed = make([]string, 0, len(res.Responses))
|
||||
for _, v := range res.Responses {
|
||||
if v.Status != 204 && v.Status != 404 {
|
||||
failed = append(failed, v.ID)
|
||||
}
|
||||
}
|
||||
return failed
|
||||
}
|
||||
|
||||
// makeBatchDeleteRequestsBody 生成批量删除请求正文
|
||||
func (client *client) makeBatchDeleteRequestsBody(files []string) string {
|
||||
req := BatchRequests{
|
||||
Requests: make([]BatchRequest, len(files)),
|
||||
}
|
||||
for i, v := range files {
|
||||
v = strings.TrimPrefix(v, "/")
|
||||
filePath, _ := url.Parse("/" + client.endpoints.driverResource + "/root:/")
|
||||
filePath.Path = path.Join(filePath.Path, v)
|
||||
req.Requests[i] = BatchRequest{
|
||||
ID: v,
|
||||
Method: "DELETE",
|
||||
URL: filePath.EscapedPath(),
|
||||
}
|
||||
}
|
||||
|
||||
res, _ := json.Marshal(req)
|
||||
return string(res)
|
||||
}
|
||||
|
||||
// GetThumbURL 获取给定尺寸的缩略图URL
|
||||
func (client *client) GetThumbURL(ctx context.Context, dst string) (string, error) {
|
||||
dst = strings.TrimPrefix(dst, "/")
|
||||
requestURL := client.getRequestURL("root:/"+dst+":/thumbnails/0") + "/large"
|
||||
|
||||
res, err := client.requestWithStr(ctx, "GET", requestURL, "", 200)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var (
|
||||
decodeErr error
|
||||
thumbRes ThumbResponse
|
||||
)
|
||||
decodeErr = json.Unmarshal([]byte(res), &thumbRes)
|
||||
if decodeErr != nil {
|
||||
return "", decodeErr
|
||||
}
|
||||
|
||||
if thumbRes.URL != "" {
|
||||
return thumbRes.URL, nil
|
||||
}
|
||||
|
||||
if len(thumbRes.Value) == 1 {
|
||||
if res, ok := thumbRes.Value[0]["large"]; ok {
|
||||
return res.(map[string]interface{})["url"].(string), nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", ErrThumbSizeNotFound
|
||||
}
|
||||
|
||||
func sysError(err error) *RespError {
|
||||
return &RespError{APIError: APIError{
|
||||
Code: "system",
|
||||
Message: err.Error(),
|
||||
}}
|
||||
}
|
||||
|
||||
func (client *client) request(ctx context.Context, method string, url string, body io.Reader, option ...request.Option) (string, error) {
|
||||
// 获取凭证
|
||||
err := client.UpdateCredential(ctx)
|
||||
if err != nil {
|
||||
return "", sysError(err)
|
||||
}
|
||||
|
||||
opts := []request.Option{
|
||||
request.WithHeader(http.Header{
|
||||
"Authorization": {"Bearer " + client.credential.String()},
|
||||
"Content-Type": {"application/json"},
|
||||
}),
|
||||
request.WithContext(ctx),
|
||||
request.WithTPSLimit(
|
||||
fmt.Sprintf("policy_%d", client.policy.ID),
|
||||
client.policy.Settings.TPSLimit,
|
||||
client.policy.Settings.TPSLimitBurst,
|
||||
),
|
||||
}
|
||||
|
||||
// 发送请求
|
||||
res := client.httpClient.Request(
|
||||
method,
|
||||
url,
|
||||
body,
|
||||
append(opts, option...)...,
|
||||
)
|
||||
|
||||
if res.Err != nil {
|
||||
return "", sysError(res.Err)
|
||||
}
|
||||
|
||||
respBody, err := res.GetResponse()
|
||||
if err != nil {
|
||||
return "", sysError(err)
|
||||
}
|
||||
|
||||
// 解析请求响应
|
||||
var (
|
||||
errResp RespError
|
||||
decodeErr error
|
||||
)
|
||||
// 如果有错误
|
||||
if res.Response.StatusCode < 200 || res.Response.StatusCode >= 300 {
|
||||
decodeErr = json.Unmarshal([]byte(respBody), &errResp)
|
||||
if decodeErr != nil {
|
||||
client.l.Debug("Onedrive returns unknown response: %s", respBody)
|
||||
return "", sysError(decodeErr)
|
||||
}
|
||||
|
||||
if res.Response.StatusCode == 429 {
|
||||
client.l.Warning("OneDrive request is throttled.")
|
||||
return "", backoff.NewRetryableErrorFromHeader(&errResp, res.Response.Header)
|
||||
}
|
||||
|
||||
return "", &errResp
|
||||
}
|
||||
|
||||
return respBody, nil
|
||||
}
|
||||
|
||||
func (client *client) requestWithStr(ctx context.Context, method string, url string, body string, expectedCode int) (string, error) {
|
||||
// 发送请求
|
||||
bodyReader := io.NopCloser(strings.NewReader(body))
|
||||
return client.request(ctx, method, url, bodyReader,
|
||||
request.WithContentLength(int64(len(body))),
|
||||
)
|
||||
}
|
||||
90
pkg/filemanager/driver/onedrive/client.go
Normal file
90
pkg/filemanager/driver/onedrive/client.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/credmanager"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrAuthEndpoint 无法解析授权端点地址
|
||||
ErrAuthEndpoint = errors.New("failed to parse endpoint url")
|
||||
// ErrInvalidRefreshToken 上传策略无有效的RefreshToken
|
||||
ErrInvalidRefreshToken = errors.New("no valid refresh token in this policy")
|
||||
// ErrDeleteFile 无法删除文件
|
||||
ErrDeleteFile = errors.New("cannot delete file")
|
||||
// ErrClientCanceled 客户端取消操作
|
||||
ErrClientCanceled = errors.New("client canceled")
|
||||
// Desired thumb size not available
|
||||
ErrThumbSizeNotFound = errors.New("thumb size not found")
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
ListChildren(ctx context.Context, path string) ([]FileInfo, error)
|
||||
Meta(ctx context.Context, id string, path string) (*FileInfo, error)
|
||||
CreateUploadSession(ctx context.Context, dst string, opts ...Option) (string, error)
|
||||
GetSiteIDByURL(ctx context.Context, siteUrl string) (string, error)
|
||||
GetUploadSessionStatus(ctx context.Context, uploadURL string) (*UploadSessionResponse, error)
|
||||
Upload(ctx context.Context, file *fs.UploadRequest) error
|
||||
SimpleUpload(ctx context.Context, dst string, body io.Reader, size int64, opts ...Option) (*UploadResult, error)
|
||||
DeleteUploadSession(ctx context.Context, uploadURL string) error
|
||||
BatchDelete(ctx context.Context, dst []string) ([]string, error)
|
||||
GetThumbURL(ctx context.Context, dst string) (string, error)
|
||||
OAuthURL(ctx context.Context, scopes []string) string
|
||||
ObtainToken(ctx context.Context, opts ...Option) (*Credential, error)
|
||||
}
|
||||
|
||||
// client OneDrive客户端
|
||||
type client struct {
|
||||
endpoints *endpoints
|
||||
policy *ent.StoragePolicy
|
||||
credential credmanager.Credential
|
||||
|
||||
httpClient request.Client
|
||||
cred credmanager.CredManager
|
||||
l logging.Logger
|
||||
settings setting.Provider
|
||||
|
||||
chunkSize int64
|
||||
}
|
||||
|
||||
// endpoints OneDrive客户端相关设置
|
||||
type endpoints struct {
|
||||
oAuthEndpoints *oauthEndpoint
|
||||
endpointURL string // 接口请求的基URL
|
||||
driverResource string // 要使用的驱动器
|
||||
}
|
||||
|
||||
// NewClient 根据存储策略获取新的client
|
||||
func NewClient(policy *ent.StoragePolicy, httpClient request.Client, cred credmanager.CredManager,
|
||||
l logging.Logger, settings setting.Provider, chunkSize int64) Client {
|
||||
client := &client{
|
||||
endpoints: &endpoints{
|
||||
endpointURL: policy.Server,
|
||||
driverResource: policy.Settings.OdDriver,
|
||||
},
|
||||
policy: policy,
|
||||
httpClient: httpClient,
|
||||
cred: cred,
|
||||
l: l,
|
||||
settings: settings,
|
||||
chunkSize: chunkSize,
|
||||
}
|
||||
|
||||
if client.endpoints.driverResource == "" {
|
||||
client.endpoints.driverResource = "me/drive"
|
||||
}
|
||||
|
||||
oauthBase := getOAuthEndpoint(policy.Server)
|
||||
client.endpoints.oAuthEndpoints = oauthBase
|
||||
|
||||
return client
|
||||
}
|
||||
271
pkg/filemanager/driver/onedrive/oauth.go
Normal file
271
pkg/filemanager/driver/onedrive/oauth.go
Normal file
@@ -0,0 +1,271 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/credmanager"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
const (
|
||||
AccessTokenExpiryMargin = 600 // 10 minutes
|
||||
)
|
||||
|
||||
// Error 实现error接口
|
||||
func (err OAuthError) Error() string {
|
||||
return err.ErrorDescription
|
||||
}
|
||||
|
||||
// OAuthURL 获取OAuth认证页面URL
|
||||
func (client *client) OAuthURL(ctx context.Context, scope []string) string {
|
||||
query := url.Values{
|
||||
"client_id": {client.policy.BucketName},
|
||||
"scope": {strings.Join(scope, " ")},
|
||||
"response_type": {"code"},
|
||||
"redirect_uri": {client.policy.Settings.OauthRedirect},
|
||||
"state": {strconv.Itoa(client.policy.ID)},
|
||||
}
|
||||
client.endpoints.oAuthEndpoints.authorize.RawQuery = query.Encode()
|
||||
return client.endpoints.oAuthEndpoints.authorize.String()
|
||||
}
|
||||
|
||||
// getOAuthEndpoint gets OAuth endpoints from API endpoint
|
||||
func getOAuthEndpoint(apiEndpoint string) *oauthEndpoint {
|
||||
base, err := url.Parse(apiEndpoint)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
var (
|
||||
token *url.URL
|
||||
authorize *url.URL
|
||||
)
|
||||
switch base.Host {
|
||||
//case "login.live.com":
|
||||
// token, _ = url.Parse("https://login.live.com/oauth20_token.srf")
|
||||
// authorize, _ = url.Parse("https://login.live.com/oauth20_authorize.srf")
|
||||
case "microsoftgraph.chinacloudapi.cn":
|
||||
token, _ = url.Parse("https://login.chinacloudapi.cn/common/oauth2/v2.0/token")
|
||||
authorize, _ = url.Parse("https://login.chinacloudapi.cn/common/oauth2/v2.0/authorize")
|
||||
default:
|
||||
token, _ = url.Parse("https://login.microsoftonline.com/common/oauth2/v2.0/token")
|
||||
authorize, _ = url.Parse("https://login.microsoftonline.com/common/oauth2/v2.0/authorize")
|
||||
}
|
||||
|
||||
return &oauthEndpoint{
|
||||
token: *token,
|
||||
authorize: *authorize,
|
||||
}
|
||||
}
|
||||
|
||||
// Credential 获取token时返回的凭证
|
||||
type Credential struct {
|
||||
ExpiresIn int64 `json:"expires_in"`
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
RefreshedAtUnix int64 `json:"refreshed_at"`
|
||||
|
||||
PolicyID int `json:"policy_id"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
gob.Register(Credential{})
|
||||
}
|
||||
|
||||
func (c Credential) Refresh(ctx context.Context) (credmanager.Credential, error) {
|
||||
if c.RefreshToken == "" {
|
||||
return nil, ErrInvalidRefreshToken
|
||||
}
|
||||
|
||||
dep := dependency.FromContext(ctx)
|
||||
storagePolicyClient := dep.StoragePolicyClient()
|
||||
policy, err := storagePolicyClient.GetPolicyByID(ctx, c.PolicyID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get storage policy: %w", err)
|
||||
}
|
||||
|
||||
oauthBase := getOAuthEndpoint(policy.Server)
|
||||
|
||||
newCredential, err := obtainToken(ctx, &obtainTokenArgs{
|
||||
clientId: policy.BucketName,
|
||||
redirect: policy.Settings.OauthRedirect,
|
||||
secret: policy.SecretKey,
|
||||
refreshToken: c.RefreshToken,
|
||||
client: dep.RequestClient(request.WithLogger(dep.Logger())),
|
||||
tokenEndpoint: oauthBase.token.String(),
|
||||
policyID: c.PolicyID,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.RefreshToken = newCredential.RefreshToken
|
||||
c.AccessToken = newCredential.AccessToken
|
||||
c.ExpiresIn = newCredential.ExpiresIn
|
||||
c.RefreshedAtUnix = time.Now().Unix()
|
||||
|
||||
// Write refresh token to db
|
||||
if err := storagePolicyClient.UpdateAccessKey(ctx, policy, newCredential.RefreshToken); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c Credential) Key() string {
|
||||
return CredentialKey(c.PolicyID)
|
||||
}
|
||||
|
||||
func (c Credential) Expiry() time.Time {
|
||||
return time.Unix(c.ExpiresIn-AccessTokenExpiryMargin, 0)
|
||||
}
|
||||
|
||||
func (c Credential) String() string {
|
||||
return c.AccessToken
|
||||
}
|
||||
|
||||
func (c Credential) RefreshedAt() *time.Time {
|
||||
if c.RefreshedAtUnix == 0 {
|
||||
return nil
|
||||
}
|
||||
refreshedAt := time.Unix(c.RefreshedAtUnix, 0)
|
||||
return &refreshedAt
|
||||
}
|
||||
|
||||
// ObtainToken 通过code或refresh_token兑换token
|
||||
func (client *client) ObtainToken(ctx context.Context, opts ...Option) (*Credential, error) {
|
||||
options := newDefaultOption()
|
||||
for _, o := range opts {
|
||||
o.apply(options)
|
||||
}
|
||||
|
||||
return obtainToken(ctx, &obtainTokenArgs{
|
||||
clientId: client.policy.BucketName,
|
||||
redirect: client.policy.Settings.OauthRedirect,
|
||||
secret: client.policy.SecretKey,
|
||||
code: options.code,
|
||||
refreshToken: options.refreshToken,
|
||||
client: client.httpClient,
|
||||
tokenEndpoint: client.endpoints.oAuthEndpoints.token.String(),
|
||||
policyID: client.policy.ID,
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
type obtainTokenArgs struct {
|
||||
clientId string
|
||||
redirect string
|
||||
secret string
|
||||
code string
|
||||
refreshToken string
|
||||
client request.Client
|
||||
tokenEndpoint string
|
||||
policyID int
|
||||
}
|
||||
|
||||
// obtainToken fetch new access token from Microsoft Graph API
|
||||
func obtainToken(ctx context.Context, args *obtainTokenArgs) (*Credential, error) {
|
||||
body := url.Values{
|
||||
"client_id": {args.clientId},
|
||||
"redirect_uri": {args.redirect},
|
||||
"client_secret": {args.secret},
|
||||
}
|
||||
if args.code != "" {
|
||||
body.Add("grant_type", "authorization_code")
|
||||
body.Add("code", args.code)
|
||||
} else {
|
||||
body.Add("grant_type", "refresh_token")
|
||||
body.Add("refresh_token", args.refreshToken)
|
||||
}
|
||||
strBody := body.Encode()
|
||||
|
||||
res := args.client.Request(
|
||||
"POST",
|
||||
args.tokenEndpoint,
|
||||
io.NopCloser(strings.NewReader(strBody)),
|
||||
request.WithHeader(http.Header{
|
||||
"Content-Type": {"application/x-www-form-urlencoded"}},
|
||||
),
|
||||
request.WithContentLength(int64(len(strBody))),
|
||||
request.WithContext(ctx),
|
||||
)
|
||||
if res.Err != nil {
|
||||
return nil, res.Err
|
||||
}
|
||||
|
||||
respBody, err := res.GetResponse()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
errResp OAuthError
|
||||
credential Credential
|
||||
decodeErr error
|
||||
)
|
||||
|
||||
if res.Response.StatusCode != 200 {
|
||||
decodeErr = json.Unmarshal([]byte(respBody), &errResp)
|
||||
} else {
|
||||
decodeErr = json.Unmarshal([]byte(respBody), &credential)
|
||||
}
|
||||
if decodeErr != nil {
|
||||
return nil, decodeErr
|
||||
}
|
||||
|
||||
if errResp.ErrorType != "" {
|
||||
return nil, errResp
|
||||
}
|
||||
|
||||
credential.PolicyID = args.policyID
|
||||
credential.ExpiresIn = time.Now().Unix() + credential.ExpiresIn
|
||||
if args.code != "" {
|
||||
credential.ExpiresIn = time.Now().Unix() - 10
|
||||
}
|
||||
return &credential, nil
|
||||
}
|
||||
|
||||
// UpdateCredential 更新凭证,并检查有效期
|
||||
func (client *client) UpdateCredential(ctx context.Context) error {
|
||||
newCred, err := client.cred.Obtain(ctx, CredentialKey(client.policy.ID))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obtain token from CredManager: %w", err)
|
||||
}
|
||||
|
||||
client.credential = newCred
|
||||
return nil
|
||||
}
|
||||
|
||||
// RetrieveOneDriveCredentials retrieves OneDrive credentials from DB inventory
|
||||
func RetrieveOneDriveCredentials(ctx context.Context, storagePolicyClient inventory.StoragePolicyClient) ([]credmanager.Credential, error) {
|
||||
odPolicies, err := storagePolicyClient.ListPolicyByType(ctx, types.PolicyTypeOd)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list OneDrive policies: %w", err)
|
||||
}
|
||||
|
||||
return lo.Map(odPolicies, func(item *ent.StoragePolicy, index int) credmanager.Credential {
|
||||
return &Credential{
|
||||
PolicyID: item.ID,
|
||||
ExpiresIn: 0,
|
||||
RefreshToken: item.AccessKey,
|
||||
}
|
||||
}), nil
|
||||
}
|
||||
|
||||
func CredentialKey(policyId int) string {
|
||||
return fmt.Sprintf("cred_od_%d", policyId)
|
||||
}
|
||||
247
pkg/filemanager/driver/onedrive/onedrive.go
Normal file
247
pkg/filemanager/driver/onedrive/onedrive.go
Normal file
@@ -0,0 +1,247 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/credmanager"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Driver OneDrive 适配器
|
||||
type Driver struct {
|
||||
policy *ent.StoragePolicy
|
||||
client Client
|
||||
settings setting.Provider
|
||||
config conf.ConfigProvider
|
||||
l logging.Logger
|
||||
chunkSize int64
|
||||
}
|
||||
|
||||
var (
|
||||
features = &boolset.BooleanSet{}
|
||||
)
|
||||
|
||||
const (
|
||||
streamSaverParam = "stream_saver"
|
||||
)
|
||||
|
||||
func init() {
|
||||
boolset.Sets(map[driver.HandlerCapability]bool{
|
||||
driver.HandlerCapabilityUploadSentinelRequired: true,
|
||||
}, features)
|
||||
}
|
||||
|
||||
// NewDriver 从存储策略初始化新的Driver实例
|
||||
func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider,
|
||||
config conf.ConfigProvider, l logging.Logger, cred credmanager.CredManager) (*Driver, error) {
|
||||
chunkSize := policy.Settings.ChunkSize
|
||||
if policy.Settings.ChunkSize == 0 {
|
||||
chunkSize = 50 << 20 // 50MB
|
||||
}
|
||||
|
||||
c := NewClient(policy, request.NewClient(config, request.WithLogger(l)), cred, l, settings, chunkSize)
|
||||
|
||||
return &Driver{
|
||||
policy: policy,
|
||||
client: c,
|
||||
settings: settings,
|
||||
l: l,
|
||||
config: config,
|
||||
chunkSize: chunkSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//// List 列取项目
|
||||
//func (handler *Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) {
|
||||
// base = strings.TrimPrefix(base, "/")
|
||||
// // 列取子项目
|
||||
// objects, _ := handler.client.ListChildren(ctx, base)
|
||||
//
|
||||
// // 获取真实的列取起始根目录
|
||||
// rootPath := base
|
||||
// if realBase, ok := ctx.Value(fsctx.PathCtx).(string); ok {
|
||||
// rootPath = realBase
|
||||
// } else {
|
||||
// ctx = context.WithValue(ctx, fsctx.PathCtx, base)
|
||||
// }
|
||||
//
|
||||
// // 整理结果
|
||||
// res := make([]response.Object, 0, len(objects))
|
||||
// for _, object := range objects {
|
||||
// source := path.Join(base, object.Name)
|
||||
// rel, err := filepath.Rel(rootPath, source)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// res = append(res, response.Object{
|
||||
// Name: object.Name,
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Source: source,
|
||||
// Size: uint64(object.Size),
|
||||
// IsDir: object.Folder != nil,
|
||||
// LastModify: time.Now(),
|
||||
// })
|
||||
// }
|
||||
//
|
||||
// // 递归列取子目录
|
||||
// if recursive {
|
||||
// for _, object := range objects {
|
||||
// if object.Folder != nil {
|
||||
// sub, _ := handler.List(ctx, path.Join(base, object.Name), recursive)
|
||||
// res = append(res, sub...)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// return res, nil
|
||||
//}
|
||||
|
||||
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Put 将文件流保存到指定目录
|
||||
func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
||||
defer file.Close()
|
||||
|
||||
return handler.client.Upload(ctx, file)
|
||||
}
|
||||
|
||||
// Delete 删除一个或多个文件,
|
||||
// 返回未删除的文件,及遇到的最后一个错误
|
||||
func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
|
||||
return handler.client.BatchDelete(ctx, files)
|
||||
}
|
||||
|
||||
// Thumb 获取文件缩略图
|
||||
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
res, err := handler.client.GetThumbURL(ctx, e.Source())
|
||||
if err != nil {
|
||||
var apiErr *RespError
|
||||
if errors.As(err, &apiErr); err == ErrThumbSizeNotFound || (apiErr != nil && apiErr.APIError.Code == notFoundError) {
|
||||
// OneDrive cannot generate thumbnail for this file
|
||||
return "", fmt.Errorf("thumb not supported in OneDrive: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Source 获取外链URL
|
||||
func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
|
||||
// 缓存不存在,重新获取
|
||||
res, err := handler.client.Meta(ctx, "", e.Source())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if args.IsDownload && handler.policy.Settings.StreamSaver {
|
||||
downloadUrl := res.DownloadURL + "&" + streamSaverParam + "=" + url.QueryEscape(args.DisplayName)
|
||||
return downloadUrl, nil
|
||||
}
|
||||
|
||||
return res.DownloadURL, nil
|
||||
}
|
||||
|
||||
// Token 获取上传会话URL
|
||||
func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
|
||||
// 生成回调地址
|
||||
siteURL := handler.settings.SiteURL(setting.UseFirstSiteUrl(ctx))
|
||||
uploadSession.Callback = routes.MasterSlaveCallbackUrl(siteURL, types.PolicyTypeOd, uploadSession.Props.UploadSessionID, uploadSession.CallbackSecret).String()
|
||||
|
||||
uploadURL, err := handler.client.CreateUploadSession(ctx, file.Props.SavePath, WithConflictBehavior("fail"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 监控回调及上传
|
||||
//go handler.client.MonitorUpload(uploadURL, uploadSession.Key, fileInfo.SavePath, fileInfo.Size, ttl)
|
||||
|
||||
uploadSession.ChunkSize = handler.chunkSize
|
||||
uploadSession.UploadURL = uploadURL
|
||||
return &fs.UploadCredential{
|
||||
ChunkSize: handler.chunkSize,
|
||||
UploadURLs: []string{uploadURL},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// 取消上传凭证
|
||||
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
|
||||
err := handler.client.DeleteUploadSession(ctx, uploadSession.UploadURL)
|
||||
// Create empty placeholder file to stop upload
|
||||
if err == nil {
|
||||
_, err := handler.client.SimpleUpload(ctx, uploadSession.Props.SavePath, strings.NewReader(""), 0, WithConflictBehavior("replace"))
|
||||
if err != nil {
|
||||
handler.l.Warning("Failed to create placeholder file %q:%s", uploadSession.Props.SavePath, err)
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
|
||||
if session.SentinelTaskID == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make sure uploaded file size is correct
|
||||
res, err := handler.client.Meta(ctx, "", session.Props.SavePath)
|
||||
if err != nil {
|
||||
// Create empty placeholder file to stop further upload
|
||||
|
||||
return fmt.Errorf("failed to get uploaded file size: %w", err)
|
||||
}
|
||||
|
||||
isSharePoint := strings.Contains(handler.policy.Settings.OdDriver, "sharepoint.com") ||
|
||||
strings.Contains(handler.policy.Settings.OdDriver, "sharepoint.cn")
|
||||
sizeMismatch := res.Size != session.Props.Size
|
||||
// SharePoint 会对 Office 文档增加 meta data 导致文件大小不一致,这里增加 1 MB 宽容
|
||||
// See: https://github.com/OneDrive/onedrive-api-docs/issues/935
|
||||
if isSharePoint && sizeMismatch && (res.Size > session.Props.Size) && (res.Size-session.Props.Size <= 1048576) {
|
||||
sizeMismatch = false
|
||||
}
|
||||
|
||||
if sizeMismatch {
|
||||
return serializer.NewError(
|
||||
serializer.CodeMetaMismatch,
|
||||
fmt.Sprintf("File size not match, expected: %d, actual: %d", session.Props.Size, res.Size),
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *Driver) Capabilities() *driver.Capabilities {
|
||||
return &driver.Capabilities{
|
||||
StaticFeatures: features,
|
||||
ThumbSupportedExts: handler.policy.Settings.ThumbExts,
|
||||
ThumbSupportAllExts: handler.policy.Settings.ThumbSupportAllExts,
|
||||
ThumbMaxSize: handler.policy.Settings.ThumbMaxSize,
|
||||
ThumbProxy: handler.policy.Settings.ThumbGeneratorProxy,
|
||||
MediaMetaProxy: handler.policy.Settings.MediaMetaGeneratorProxy,
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (handler *Driver) LocalPath(ctx context.Context, path string) string {
|
||||
return ""
|
||||
}
|
||||
59
pkg/filemanager/driver/onedrive/options.go
Normal file
59
pkg/filemanager/driver/onedrive/options.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package onedrive
|
||||
|
||||
import "time"
|
||||
|
||||
// Option 发送请求的额外设置
|
||||
type Option interface {
|
||||
apply(*options)
|
||||
}
|
||||
|
||||
type options struct {
|
||||
redirect string
|
||||
code string
|
||||
refreshToken string
|
||||
conflictBehavior string
|
||||
expires time.Time
|
||||
useDriverResource bool
|
||||
}
|
||||
|
||||
type optionFunc func(*options)
|
||||
|
||||
// WithCode 设置接口Code
|
||||
func WithCode(t string) Option {
|
||||
return optionFunc(func(o *options) {
|
||||
o.code = t
|
||||
})
|
||||
}
|
||||
|
||||
// WithRefreshToken 设置接口RefreshToken
|
||||
func WithRefreshToken(t string) Option {
|
||||
return optionFunc(func(o *options) {
|
||||
o.refreshToken = t
|
||||
})
|
||||
}
|
||||
|
||||
// WithConflictBehavior 设置文件重名后的处理方式
|
||||
func WithConflictBehavior(t string) Option {
|
||||
return optionFunc(func(o *options) {
|
||||
o.conflictBehavior = t
|
||||
})
|
||||
}
|
||||
|
||||
// WithConflictBehavior 设置文件重名后的处理方式
|
||||
func WithDriverResource(t bool) Option {
|
||||
return optionFunc(func(o *options) {
|
||||
o.useDriverResource = t
|
||||
})
|
||||
}
|
||||
|
||||
func (f optionFunc) apply(o *options) {
|
||||
f(o)
|
||||
}
|
||||
|
||||
func newDefaultOption() *options {
|
||||
return &options{
|
||||
conflictBehavior: "fail",
|
||||
useDriverResource: true,
|
||||
expires: time.Now().UTC().Add(time.Duration(1) * time.Hour),
|
||||
}
|
||||
}
|
||||
130
pkg/filemanager/driver/onedrive/types.go
Normal file
130
pkg/filemanager/driver/onedrive/types.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// RespError 接口返回错误
|
||||
type RespError struct {
|
||||
APIError APIError `json:"error"`
|
||||
}
|
||||
|
||||
// APIError 接口返回的错误内容
|
||||
type APIError struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// UploadSessionResponse 分片上传会话
|
||||
type UploadSessionResponse struct {
|
||||
DataContext string `json:"@odata.context"`
|
||||
ExpirationDateTime string `json:"expirationDateTime"`
|
||||
NextExpectedRanges []string `json:"nextExpectedRanges"`
|
||||
UploadURL string `json:"uploadUrl"`
|
||||
}
|
||||
|
||||
// FileInfo 文件元信息
|
||||
type FileInfo struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Image imageInfo `json:"image"`
|
||||
ParentReference parentReference `json:"parentReference"`
|
||||
DownloadURL string `json:"@microsoft.graph.downloadUrl"`
|
||||
File *file `json:"file"`
|
||||
Folder *folder `json:"folder"`
|
||||
}
|
||||
|
||||
type file struct {
|
||||
MimeType string `json:"mimeType"`
|
||||
}
|
||||
|
||||
type folder struct {
|
||||
ChildCount int `json:"childCount"`
|
||||
}
|
||||
|
||||
type imageInfo struct {
|
||||
Height int `json:"height"`
|
||||
Width int `json:"width"`
|
||||
}
|
||||
|
||||
type parentReference struct {
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// UploadResult 上传结果
|
||||
type UploadResult struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Size uint64 `json:"size"`
|
||||
}
|
||||
|
||||
// BatchRequests 批量操作请求
|
||||
type BatchRequests struct {
|
||||
Requests []BatchRequest `json:"requests"`
|
||||
}
|
||||
|
||||
// BatchRequest 批量操作单个请求
|
||||
type BatchRequest struct {
|
||||
ID string `json:"id"`
|
||||
Method string `json:"method"`
|
||||
URL string `json:"url"`
|
||||
Body interface{} `json:"body,omitempty"`
|
||||
Headers map[string]string `json:"headers,omitempty"`
|
||||
}
|
||||
|
||||
// BatchResponses 批量操作响应
|
||||
type BatchResponses struct {
|
||||
Responses []BatchResponse `json:"responses"`
|
||||
}
|
||||
|
||||
// BatchResponse 批量操作单个响应
|
||||
type BatchResponse struct {
|
||||
ID string `json:"id"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
// ThumbResponse 获取缩略图的响应
|
||||
type ThumbResponse struct {
|
||||
Value []map[string]interface{} `json:"value"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// ListResponse 列取子项目响应
|
||||
type ListResponse struct {
|
||||
Value []FileInfo `json:"value"`
|
||||
Context string `json:"@odata.context"`
|
||||
}
|
||||
|
||||
// oauthEndpoint OAuth接口地址
|
||||
type oauthEndpoint struct {
|
||||
token url.URL
|
||||
authorize url.URL
|
||||
}
|
||||
|
||||
// OAuthError OAuth相关接口的错误响应
|
||||
type OAuthError struct {
|
||||
ErrorType string `json:"error"`
|
||||
ErrorDescription string `json:"error_description"`
|
||||
CorrelationID string `json:"correlation_id"`
|
||||
}
|
||||
|
||||
// Site SharePoint 站点信息
|
||||
type Site struct {
|
||||
Description string `json:"description"`
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
DisplayName string `json:"displayName"`
|
||||
WebUrl string `json:"webUrl"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
gob.Register(Credential{})
|
||||
}
|
||||
|
||||
// Error 实现error接口
|
||||
func (err RespError) Error() string {
|
||||
return err.APIError.Message
|
||||
}
|
||||
122
pkg/filemanager/driver/oss/callback.go
Normal file
122
pkg/filemanager/driver/oss/callback.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/md5"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
pubKeyHeader = "x-oss-pub-key-url"
|
||||
pubKeyPrefix = "http://gosspublic.alicdn.com/"
|
||||
pubKeyPrefixHttps = "https://gosspublic.alicdn.com/"
|
||||
pubKeyCacheKey = "oss_public_key"
|
||||
)
|
||||
|
||||
// GetPublicKey 从回调请求或缓存中获取OSS的回调签名公钥
|
||||
func GetPublicKey(r *http.Request, kv cache.Driver, client request.Client) ([]byte, error) {
|
||||
var pubKey []byte
|
||||
|
||||
// 尝试从缓存中获取
|
||||
pub, exist := kv.Get(pubKeyCacheKey)
|
||||
if exist {
|
||||
return pub.([]byte), nil
|
||||
}
|
||||
|
||||
// 从请求中获取
|
||||
pubURL, err := base64.StdEncoding.DecodeString(r.Header.Get(pubKeyHeader))
|
||||
if err != nil {
|
||||
return pubKey, err
|
||||
}
|
||||
|
||||
// 确保这个 public key 是由 OSS 颁发的
|
||||
if !strings.HasPrefix(string(pubURL), pubKeyPrefix) &&
|
||||
!strings.HasPrefix(string(pubURL), pubKeyPrefixHttps) {
|
||||
return pubKey, errors.New("public key url invalid")
|
||||
}
|
||||
|
||||
// 获取公钥
|
||||
body, err := client.Request("GET", string(pubURL), nil).
|
||||
CheckHTTPResponse(200).
|
||||
GetResponse()
|
||||
if err != nil {
|
||||
return pubKey, err
|
||||
}
|
||||
|
||||
// 写入缓存
|
||||
_ = kv.Set(pubKeyCacheKey, []byte(body), 86400*7)
|
||||
|
||||
return []byte(body), nil
|
||||
}
|
||||
|
||||
func getRequestMD5(r *http.Request) ([]byte, error) {
|
||||
var byteMD5 []byte
|
||||
|
||||
// 获取请求正文
|
||||
body, err := io.ReadAll(r.Body)
|
||||
r.Body.Close()
|
||||
if err != nil {
|
||||
return byteMD5, err
|
||||
}
|
||||
r.Body = io.NopCloser(bytes.NewReader(body))
|
||||
|
||||
strURLPathDecode, err := url.PathUnescape(r.URL.Path)
|
||||
if err != nil {
|
||||
return byteMD5, err
|
||||
}
|
||||
|
||||
strAuth := fmt.Sprintf("%s\n%s", strURLPathDecode, string(body))
|
||||
md5Ctx := md5.New()
|
||||
md5Ctx.Write([]byte(strAuth))
|
||||
byteMD5 = md5Ctx.Sum(nil)
|
||||
|
||||
return byteMD5, nil
|
||||
}
|
||||
|
||||
// VerifyCallbackSignature 验证OSS回调请求
|
||||
func VerifyCallbackSignature(r *http.Request, kv cache.Driver, client request.Client) error {
|
||||
bytePublicKey, err := GetPublicKey(r, kv, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
byteMD5, err := getRequestMD5(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
strAuthorizationBase64 := r.Header.Get("authorization")
|
||||
if strAuthorizationBase64 == "" {
|
||||
return errors.New("no authorization field in Request header")
|
||||
}
|
||||
authorization, _ := base64.StdEncoding.DecodeString(strAuthorizationBase64)
|
||||
|
||||
pubBlock, _ := pem.Decode(bytePublicKey)
|
||||
if pubBlock == nil {
|
||||
return errors.New("pubBlock not exist")
|
||||
}
|
||||
pubInterface, err := x509.ParsePKIXPublicKey(pubBlock.Bytes)
|
||||
if (pubInterface == nil) || (err != nil) {
|
||||
return err
|
||||
}
|
||||
pub := pubInterface.(*rsa.PublicKey)
|
||||
|
||||
errorVerifyPKCS1v15 := rsa.VerifyPKCS1v15(pub, crypto.MD5, byteMD5, authorization)
|
||||
if errorVerifyPKCS1v15 != nil {
|
||||
return errorVerifyPKCS1v15
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
359
pkg/filemanager/driver/oss/media.go
Normal file
359
pkg/filemanager/driver/oss/media.go
Normal file
@@ -0,0 +1,359 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/samber/lo"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
imageInfoProcess = "image/info"
|
||||
videoInfoProcess = "video/info"
|
||||
audioInfoProcess = "audio/info"
|
||||
mediaInfoTTL = time.Duration(10) * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
supportedImageExt = []string{"jpg", "jpeg", "png", "gif", "bmp", "webp", "tiff", "heic", "heif"}
|
||||
supportedAudioExt = []string{"mp3", "wav", "flac", "aac", "m4a", "ogg", "wma", "ape", "alac", "amr", "opus"}
|
||||
supportedVideoExt = []string{"mp4", "mkv", "avi", "mov", "flv", "wmv", "rmvb", "webm", "3gp", "mpg", "mpeg", "m4v", "ts", "m3u8", "vob", "f4v", "rm", "asf", "divx", "ogv", "dat", "mts", "m2ts", "swf", "avi", "3g2", "m2v", "m4p", "m4b", "m4r", "m4v", "m4a"}
|
||||
)
|
||||
|
||||
type (
|
||||
ImageProp struct {
|
||||
Value string `json:"value"`
|
||||
}
|
||||
ImageInfo map[string]ImageProp
|
||||
|
||||
Error struct {
|
||||
XMLName xml.Name `xml:"Error"`
|
||||
Text string `xml:",chardata"`
|
||||
Code string `xml:"Code"`
|
||||
Message string `xml:"Message"`
|
||||
RequestId string `xml:"RequestId"`
|
||||
HostId string `xml:"HostId"`
|
||||
EC string `xml:"EC"`
|
||||
RecommendDoc string `xml:"RecommendDoc"`
|
||||
}
|
||||
|
||||
StreamMediaInfo struct {
|
||||
RequestID string `json:"RequestId"`
|
||||
Language string `json:"Language"`
|
||||
Title string `json:"Title"`
|
||||
VideoStreams []VideoStream `json:"VideoStreams"`
|
||||
AudioStreams []AudioStream `json:"AudioStreams"`
|
||||
Subtitles []Subtitle `json:"Subtitles"`
|
||||
StreamCount int64 `json:"StreamCount"`
|
||||
ProgramCount int64 `json:"ProgramCount"`
|
||||
FormatName string `json:"FormatName"`
|
||||
FormatLongName string `json:"FormatLongName"`
|
||||
Size int64 `json:"Size"`
|
||||
StartTime float64 `json:"StartTime"`
|
||||
Bitrate int64 `json:"Bitrate"`
|
||||
Artist string `json:"Artist"`
|
||||
AlbumArtist string `json:"AlbumArtist"`
|
||||
Composer string `json:"Composer"`
|
||||
Performer string `json:"Performer"`
|
||||
Album string `json:"Album"`
|
||||
Duration float64 `json:"Duration"`
|
||||
ProduceTime string `json:"ProduceTime"`
|
||||
LatLong string `json:"LatLong"`
|
||||
VideoWidth int64 `json:"VideoWidth"`
|
||||
VideoHeight int64 `json:"VideoHeight"`
|
||||
Addresses []Address `json:"Addresses"`
|
||||
}
|
||||
|
||||
Address struct {
|
||||
Language string `json:"Language"`
|
||||
AddressLine string `json:"AddressLine"`
|
||||
Country string `json:"Country"`
|
||||
Province string `json:"Province"`
|
||||
City string `json:"City"`
|
||||
District string `json:"District"`
|
||||
Township string `json:"Township"`
|
||||
}
|
||||
|
||||
AudioStream struct {
|
||||
Index int `json:"Index"`
|
||||
Language string `json:"Language"`
|
||||
CodecName string `json:"CodecName"`
|
||||
CodecLongName string `json:"CodecLongName"`
|
||||
CodecTimeBase string `json:"CodecTimeBase"`
|
||||
CodecTagString string `json:"CodecTagString"`
|
||||
CodecTag string `json:"CodecTag"`
|
||||
TimeBase string `json:"TimeBase"`
|
||||
StartTime float64 `json:"StartTime"`
|
||||
Duration float64 `json:"Duration"`
|
||||
Bitrate int64 `json:"Bitrate"`
|
||||
FrameCount int64 `json:"FrameCount"`
|
||||
Lyric string `json:"Lyric"`
|
||||
SampleFormat string `json:"SampleFormat"`
|
||||
SampleRate int64 `json:"SampleRate"`
|
||||
Channels int64 `json:"Channels"`
|
||||
ChannelLayout string `json:"ChannelLayout"`
|
||||
}
|
||||
|
||||
Subtitle struct {
|
||||
Index int64 `json:"Index"`
|
||||
Language string `json:"Language"`
|
||||
CodecName string `json:"CodecName"`
|
||||
CodecLongName string `json:"CodecLongName"`
|
||||
CodecTagString string `json:"CodecTagString"`
|
||||
CodecTag string `json:"CodecTag"`
|
||||
StartTime float64 `json:"StartTime"`
|
||||
Duration float64 `json:"Duration"`
|
||||
Bitrate int64 `json:"Bitrate"`
|
||||
Content string `json:"Content"`
|
||||
Width int64 `json:"Width"`
|
||||
Height int64 `json:"Height"`
|
||||
}
|
||||
|
||||
VideoStream struct {
|
||||
Index int `json:"Index"`
|
||||
Language string `json:"Language"`
|
||||
CodecName string `json:"CodecName"`
|
||||
CodecLongName string `json:"CodecLongName"`
|
||||
Profile string `json:"Profile"`
|
||||
CodecTimeBase string `json:"CodecTimeBase"`
|
||||
CodecTagString string `json:"CodecTagString"`
|
||||
CodecTag string `json:"CodecTag"`
|
||||
Width int `json:"Width"`
|
||||
Height int `json:"Height"`
|
||||
HasBFrames int `json:"HasBFrames"`
|
||||
SampleAspectRatio string `json:"SampleAspectRatio"`
|
||||
DisplayAspectRatio string `json:"DisplayAspectRatio"`
|
||||
PixelFormat string `json:"PixelFormat"`
|
||||
Level int `json:"Level"`
|
||||
FrameRate string `json:"FrameRate"`
|
||||
AverageFrameRate string `json:"AverageFrameRate"`
|
||||
TimeBase string `json:"TimeBase"`
|
||||
StartTime float64 `json:"StartTime"`
|
||||
Duration float64 `json:"Duration"`
|
||||
Bitrate int64 `json:"Bitrate"`
|
||||
FrameCount int64 `json:"FrameCount"`
|
||||
Rotate string `json:"Rotate"`
|
||||
BitDepth int `json:"BitDepth"`
|
||||
ColorSpace string `json:"ColorSpace"`
|
||||
ColorRange string `json:"ColorRange"`
|
||||
ColorTransfer string `json:"ColorTransfer"`
|
||||
ColorPrimaries string `json:"ColorPrimaries"`
|
||||
}
|
||||
)
|
||||
|
||||
func (handler *Driver) extractIMMMeta(ctx context.Context, path, category string) ([]driver.MediaMeta, error) {
|
||||
resp, err := handler.extractMediaInfo(ctx, path, category, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var info StreamMediaInfo
|
||||
if err := json.Unmarshal([]byte(resp), &info); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal media info: %w", err)
|
||||
}
|
||||
|
||||
streams := lo.Map(info.VideoStreams, func(stream VideoStream, index int) mediameta.Stream {
|
||||
bitrate := ""
|
||||
if stream.Bitrate != 0 {
|
||||
bitrate = strconv.FormatInt(stream.Bitrate, 10)
|
||||
}
|
||||
return mediameta.Stream{
|
||||
Index: stream.Index,
|
||||
CodecName: stream.CodecName,
|
||||
CodecLongName: stream.CodecLongName,
|
||||
CodecType: "video",
|
||||
Width: stream.Width,
|
||||
Height: stream.Height,
|
||||
Duration: strconv.FormatFloat(stream.Duration, 'f', -1, 64),
|
||||
Bitrate: bitrate,
|
||||
}
|
||||
})
|
||||
streams = append(streams, lo.Map(info.AudioStreams, func(stream AudioStream, index int) mediameta.Stream {
|
||||
bitrate := ""
|
||||
if stream.Bitrate != 0 {
|
||||
bitrate = strconv.FormatInt(stream.Bitrate, 10)
|
||||
}
|
||||
return mediameta.Stream{
|
||||
Index: stream.Index,
|
||||
CodecName: stream.CodecName,
|
||||
CodecLongName: stream.CodecLongName,
|
||||
CodecType: "audio",
|
||||
Duration: strconv.FormatFloat(stream.Duration, 'f', -1, 64),
|
||||
Bitrate: bitrate,
|
||||
}
|
||||
})...)
|
||||
|
||||
metas := make([]driver.MediaMeta, 0)
|
||||
metas = append(metas, mediameta.ProbeMetaTransform(&mediameta.FFProbeMeta{
|
||||
Format: &mediameta.Format{
|
||||
FormatName: info.FormatName,
|
||||
FormatLongName: info.FormatLongName,
|
||||
Duration: strconv.FormatFloat(info.Duration, 'f', -1, 64),
|
||||
Bitrate: strconv.FormatInt(info.Bitrate, 10),
|
||||
},
|
||||
Streams: streams,
|
||||
})...)
|
||||
|
||||
if info.Artist != "" {
|
||||
metas = append(metas, driver.MediaMeta{
|
||||
Key: mediameta.MusicArtist,
|
||||
Value: info.Artist,
|
||||
Type: driver.MediaTypeMusic,
|
||||
})
|
||||
}
|
||||
|
||||
if info.AlbumArtist != "" {
|
||||
metas = append(metas, driver.MediaMeta{
|
||||
Key: mediameta.MusicAlbumArtists,
|
||||
Value: info.AlbumArtist,
|
||||
Type: driver.MediaTypeMusic,
|
||||
})
|
||||
}
|
||||
|
||||
if info.Composer != "" {
|
||||
metas = append(metas, driver.MediaMeta{
|
||||
Key: mediameta.MusicComposer,
|
||||
Value: info.Composer,
|
||||
Type: driver.MediaTypeMusic,
|
||||
})
|
||||
}
|
||||
|
||||
if info.Album != "" {
|
||||
metas = append(metas, driver.MediaMeta{
|
||||
Key: mediameta.MusicAlbum,
|
||||
Value: info.Album,
|
||||
Type: driver.MediaTypeMusic,
|
||||
})
|
||||
}
|
||||
|
||||
return metas, nil
|
||||
}
|
||||
|
||||
func (handler *Driver) extractImageMeta(ctx context.Context, path string) ([]driver.MediaMeta, error) {
|
||||
resp, err := handler.extractMediaInfo(ctx, path, imageInfoProcess, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var imageInfo ImageInfo
|
||||
if err := json.Unmarshal([]byte(resp), &imageInfo); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal media info: %w", err)
|
||||
}
|
||||
|
||||
metas := make([]driver.MediaMeta, 0)
|
||||
exifMap := lo.MapEntries(imageInfo, func(key string, value ImageProp) (string, string) {
|
||||
return key, value.Value
|
||||
})
|
||||
metas = append(metas, mediameta.ExtractExifMap(exifMap, time.Time{})...)
|
||||
metas = append(metas, parseGpsInfo(imageInfo)...)
|
||||
for i := 0; i < len(metas); i++ {
|
||||
metas[i].Type = driver.MetaTypeExif
|
||||
}
|
||||
|
||||
return metas, nil
|
||||
}
|
||||
|
||||
// extractMediaInfo Sends API calls to OSS IMM service to extract media info.
|
||||
func (handler *Driver) extractMediaInfo(ctx context.Context, path string, category string, forceSign bool) (string, error) {
|
||||
mediaOption := []oss.Option{oss.Process(category)}
|
||||
mediaInfoExpire := time.Now().Add(mediaInfoTTL)
|
||||
thumbURL, err := handler.signSourceURL(
|
||||
ctx,
|
||||
path,
|
||||
&mediaInfoExpire,
|
||||
mediaOption,
|
||||
forceSign,
|
||||
)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to sign media info url: %w", err)
|
||||
}
|
||||
|
||||
resp, err := handler.httpClient.
|
||||
Request(http.MethodGet, thumbURL, nil, request.WithContext(ctx)).
|
||||
CheckHTTPResponse(http.StatusOK).
|
||||
GetResponseIgnoreErr()
|
||||
if err != nil {
|
||||
return "", handleOssError(resp, err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func parseGpsInfo(imageInfo ImageInfo) []driver.MediaMeta {
|
||||
latitude := imageInfo["GPSLatitude"] // 31deg 16.26808'
|
||||
longitude := imageInfo["GPSLongitude"] // 120deg 42.91039'
|
||||
latRef := imageInfo["GPSLatitudeRef"] // North
|
||||
lonRef := imageInfo["GPSLongitudeRef"] // East
|
||||
|
||||
// Make sure all value exist in map
|
||||
if latitude.Value == "" || longitude.Value == "" || latRef.Value == "" || lonRef.Value == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
lat := parseRawGPS(latitude.Value, latRef.Value)
|
||||
lon := parseRawGPS(longitude.Value, lonRef.Value)
|
||||
if !math.IsNaN(lat) && !math.IsNaN(lon) {
|
||||
lat, lng := mediameta.NormalizeGPS(lat, lon)
|
||||
return []driver.MediaMeta{{
|
||||
Key: mediameta.GpsLat,
|
||||
Value: fmt.Sprintf("%f", lat),
|
||||
}, {
|
||||
Key: mediameta.GpsLng,
|
||||
Value: fmt.Sprintf("%f", lng),
|
||||
}}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseRawGPS(gpsStr string, ref string) float64 {
|
||||
elem := strings.Split(gpsStr, " ")
|
||||
if len(elem) < 1 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var (
|
||||
deg float64
|
||||
minutes float64
|
||||
seconds float64
|
||||
)
|
||||
|
||||
deg, _ = strconv.ParseFloat(strings.TrimSuffix(elem[0], "deg"), 64)
|
||||
if len(elem) >= 2 {
|
||||
minutes, _ = strconv.ParseFloat(strings.TrimSuffix(elem[1], "'"), 64)
|
||||
}
|
||||
if len(elem) >= 3 {
|
||||
seconds, _ = strconv.ParseFloat(strings.TrimSuffix(elem[2], "\""), 64)
|
||||
}
|
||||
|
||||
decimal := deg + minutes/60.0 + seconds/3600.0
|
||||
|
||||
if ref == "South" || ref == "West" {
|
||||
return -decimal
|
||||
}
|
||||
|
||||
return decimal
|
||||
}
|
||||
|
||||
func handleOssError(resp string, originErr error) error {
|
||||
if resp == "" {
|
||||
return originErr
|
||||
}
|
||||
|
||||
var err Error
|
||||
if err := xml.Unmarshal([]byte(resp), &err); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal oss error: %w", err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("oss error: %s", err.Message)
|
||||
}
|
||||
548
pkg/filemanager/driver/oss/oss.go
Normal file
548
pkg/filemanager/driver/oss/oss.go
Normal file
@@ -0,0 +1,548 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk/backoff"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
// UploadPolicy 阿里云OSS上传策略
|
||||
type UploadPolicy struct {
|
||||
Expiration string `json:"expiration"`
|
||||
Conditions []interface{} `json:"conditions"`
|
||||
}
|
||||
|
||||
// CallbackPolicy 回调策略
|
||||
type CallbackPolicy struct {
|
||||
CallbackURL string `json:"callbackUrl"`
|
||||
CallbackBody string `json:"callbackBody"`
|
||||
CallbackBodyType string `json:"callbackBodyType"`
|
||||
CallbackSNI bool `json:"callbackSNI"`
|
||||
}
|
||||
|
||||
// Driver 阿里云OSS策略适配器
|
||||
type Driver struct {
|
||||
policy *ent.StoragePolicy
|
||||
|
||||
client *oss.Client
|
||||
bucket *oss.Bucket
|
||||
settings setting.Provider
|
||||
l logging.Logger
|
||||
config conf.ConfigProvider
|
||||
mime mime.MimeDetector
|
||||
httpClient request.Client
|
||||
|
||||
chunkSize int64
|
||||
}
|
||||
|
||||
type key int
|
||||
|
||||
const (
|
||||
chunkRetrySleep = time.Duration(5) * time.Second
|
||||
uploadIdParam = "uploadId"
|
||||
partNumberParam = "partNumber"
|
||||
callbackParam = "callback"
|
||||
completeAllHeader = "x-oss-complete-all"
|
||||
maxDeleteBatch = 1000
|
||||
|
||||
// MultiPartUploadThreshold 服务端使用分片上传的阈值
|
||||
MultiPartUploadThreshold int64 = 5 * (1 << 30) // 5GB
|
||||
)
|
||||
|
||||
var (
|
||||
features = &boolset.BooleanSet{}
|
||||
)
|
||||
|
||||
func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider,
|
||||
config conf.ConfigProvider, l logging.Logger, mime mime.MimeDetector) (*Driver, error) {
|
||||
chunkSize := policy.Settings.ChunkSize
|
||||
if policy.Settings.ChunkSize == 0 {
|
||||
chunkSize = 25 << 20 // 25 MB
|
||||
}
|
||||
|
||||
driver := &Driver{
|
||||
policy: policy,
|
||||
settings: settings,
|
||||
chunkSize: chunkSize,
|
||||
config: config,
|
||||
l: l,
|
||||
mime: mime,
|
||||
httpClient: request.NewClient(config, request.WithLogger(l)),
|
||||
}
|
||||
|
||||
return driver, driver.InitOSSClient(false)
|
||||
}
|
||||
|
||||
// CORS 创建跨域策略
|
||||
func (handler *Driver) CORS() error {
|
||||
return handler.client.SetBucketCORS(handler.policy.BucketName, []oss.CORSRule{
|
||||
{
|
||||
AllowedOrigin: []string{"*"},
|
||||
AllowedMethod: []string{
|
||||
"GET",
|
||||
"POST",
|
||||
"PUT",
|
||||
"DELETE",
|
||||
"HEAD",
|
||||
},
|
||||
ExposeHeader: []string{},
|
||||
AllowedHeader: []string{"*"},
|
||||
MaxAgeSeconds: 3600,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// InitOSSClient 初始化OSS鉴权客户端
|
||||
func (handler *Driver) InitOSSClient(forceUsePublicEndpoint bool) error {
|
||||
if handler.policy == nil {
|
||||
return errors.New("empty policy")
|
||||
}
|
||||
|
||||
opt := make([]oss.ClientOption, 0)
|
||||
|
||||
// 决定是否使用内网 Endpoint
|
||||
endpoint := handler.policy.Server
|
||||
if handler.policy.Settings.ServerSideEndpoint != "" && !forceUsePublicEndpoint {
|
||||
endpoint = handler.policy.Settings.ServerSideEndpoint
|
||||
} else if handler.policy.Settings.UseCname {
|
||||
opt = append(opt, oss.UseCname(true))
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
|
||||
endpoint = "https://" + endpoint
|
||||
}
|
||||
|
||||
// 初始化客户端
|
||||
client, err := oss.New(endpoint, handler.policy.AccessKey, handler.policy.SecretKey, opt...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
handler.client = client
|
||||
|
||||
// 初始化存储桶
|
||||
bucket, err := client.Bucket(handler.policy.BucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
handler.bucket = bucket
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//// List 列出OSS上的文件
|
||||
//func (handler *Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) {
|
||||
// // 列取文件
|
||||
// base = strings.TrimPrefix(base, "/")
|
||||
// if base != "" {
|
||||
// base += "/"
|
||||
// }
|
||||
//
|
||||
// var (
|
||||
// delimiter string
|
||||
// marker string
|
||||
// objects []oss.ObjectProperties
|
||||
// commons []string
|
||||
// )
|
||||
// if !recursive {
|
||||
// delimiter = "/"
|
||||
// }
|
||||
//
|
||||
// for {
|
||||
// subRes, err := handler.bucket.ListObjects(oss.Marker(marker), oss.Prefix(base),
|
||||
// oss.MaxKeys(1000), oss.Delimiter(delimiter))
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// objects = append(objects, subRes.Objects...)
|
||||
// commons = append(commons, subRes.CommonPrefixes...)
|
||||
// marker = subRes.NextMarker
|
||||
// if marker == "" {
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // 处理列取结果
|
||||
// res := make([]response.Object, 0, len(objects)+len(commons))
|
||||
// // 处理目录
|
||||
// for _, object := range commons {
|
||||
// rel, err := filepath.Rel(base, object)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// res = append(res, response.Object{
|
||||
// Name: path.Base(object),
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Size: 0,
|
||||
// IsDir: true,
|
||||
// LastModify: time.Now(),
|
||||
// })
|
||||
// }
|
||||
// // 处理文件
|
||||
// for _, object := range objects {
|
||||
// rel, err := filepath.Rel(base, object.Key)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// res = append(res, response.Object{
|
||||
// Name: path.Base(object.Key),
|
||||
// Source: object.Key,
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Size: uint64(object.Size),
|
||||
// IsDir: false,
|
||||
// LastModify: object.LastModified,
|
||||
// })
|
||||
// }
|
||||
//
|
||||
// return res, nil
|
||||
//}
|
||||
|
||||
// Get 获取文件
|
||||
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Put 将文件流保存到指定目录
|
||||
func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
||||
defer file.Close()
|
||||
|
||||
// 凭证有效期
|
||||
credentialTTL := handler.settings.UploadSessionTTL(ctx)
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
// 是否允许覆盖
|
||||
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
|
||||
options := []oss.Option{
|
||||
oss.WithContext(ctx),
|
||||
oss.Expires(time.Now().Add(credentialTTL * time.Second)),
|
||||
oss.ForbidOverWrite(!overwrite),
|
||||
oss.ContentType(mimeType),
|
||||
}
|
||||
|
||||
// 小文件直接上传
|
||||
if file.Props.Size < MultiPartUploadThreshold {
|
||||
return handler.bucket.PutObject(file.Props.SavePath, file, options...)
|
||||
}
|
||||
|
||||
// 超过阈值时使用分片上传
|
||||
imur, err := handler.bucket.InitiateMultipartUpload(file.Props.SavePath, options...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initiate multipart upload: %w", err)
|
||||
}
|
||||
|
||||
parts := make([]oss.UploadPart, 0)
|
||||
|
||||
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{
|
||||
Max: handler.settings.ChunkRetryLimit(ctx),
|
||||
Sleep: chunkRetrySleep,
|
||||
}, handler.settings.UseChunkBuffer(ctx), handler.l, handler.settings.TempPath(ctx))
|
||||
|
||||
uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error {
|
||||
part, err := handler.bucket.UploadPart(imur, content, current.Length(), current.Index()+1, oss.WithContext(ctx))
|
||||
if err == nil {
|
||||
parts = append(parts, part)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for chunks.Next() {
|
||||
if err := chunks.Process(uploadFunc); err != nil {
|
||||
handler.cancelUpload(imur)
|
||||
return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = handler.bucket.CompleteMultipartUpload(imur, parts, oss.ForbidOverWrite(!overwrite), oss.WithContext(ctx))
|
||||
if err != nil {
|
||||
handler.cancelUpload(imur)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete 删除一个或多个文件,
|
||||
// 返回未删除的文件
|
||||
func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
|
||||
groups := lo.Chunk(files, maxDeleteBatch)
|
||||
failed := make([]string, 0)
|
||||
var lastError error
|
||||
for index, group := range groups {
|
||||
handler.l.Debug("Process delete group #%d: %v", index, group)
|
||||
// 删除文件
|
||||
delRes, err := handler.bucket.DeleteObjects(group)
|
||||
if err != nil {
|
||||
failed = append(failed, group...)
|
||||
lastError = err
|
||||
continue
|
||||
}
|
||||
|
||||
// 统计未删除的文件
|
||||
failed = append(failed, util.SliceDifference(files, delRes.DeletedObjects)...)
|
||||
}
|
||||
|
||||
if len(failed) > 0 && lastError == nil {
|
||||
lastError = fmt.Errorf("failed to delete files: %v", failed)
|
||||
}
|
||||
|
||||
return failed, lastError
|
||||
}
|
||||
|
||||
// Thumb 获取文件缩略图
|
||||
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
usePublicEndpoint := true
|
||||
if forceUsePublicEndpoint, ok := ctx.Value(driver.ForceUsePublicEndpointCtx{}).(bool); ok {
|
||||
usePublicEndpoint = forceUsePublicEndpoint
|
||||
}
|
||||
|
||||
// 初始化客户端
|
||||
if err := handler.InitOSSClient(usePublicEndpoint); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
w, h := handler.settings.ThumbSize(ctx)
|
||||
thumbParam := fmt.Sprintf("image/resize,m_lfit,h_%d,w_%d", h, w)
|
||||
thumbOption := []oss.Option{oss.Process(thumbParam)}
|
||||
thumbURL, err := handler.signSourceURL(
|
||||
ctx,
|
||||
e.Source(),
|
||||
expire,
|
||||
thumbOption,
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return thumbURL, nil
|
||||
}
|
||||
|
||||
// Source 获取外链URL
|
||||
func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
|
||||
// 初始化客户端
|
||||
usePublicEndpoint := true
|
||||
if forceUsePublicEndpoint, ok := ctx.Value(driver.ForceUsePublicEndpointCtx{}).(bool); ok {
|
||||
usePublicEndpoint = forceUsePublicEndpoint
|
||||
}
|
||||
if err := handler.InitOSSClient(usePublicEndpoint); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 添加各项设置
|
||||
var signOptions = make([]oss.Option, 0, 2)
|
||||
if args.IsDownload {
|
||||
encodedFilename := url.PathEscape(args.DisplayName)
|
||||
signOptions = append(signOptions, oss.ResponseContentDisposition(fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`,
|
||||
encodedFilename, encodedFilename)))
|
||||
}
|
||||
if args.Speed > 0 {
|
||||
// Byte 转换为 bit
|
||||
args.Speed *= 8
|
||||
|
||||
// OSS对速度值有范围限制
|
||||
if args.Speed < 819200 {
|
||||
args.Speed = 819200
|
||||
}
|
||||
if args.Speed > 838860800 {
|
||||
args.Speed = 838860800
|
||||
}
|
||||
signOptions = append(signOptions, oss.TrafficLimitParam(args.Speed))
|
||||
}
|
||||
|
||||
return handler.signSourceURL(ctx, e.Source(), args.Expire, signOptions, false)
|
||||
}
|
||||
|
||||
func (handler *Driver) signSourceURL(ctx context.Context, path string, expire *time.Time, options []oss.Option, forceSign bool) (string, error) {
|
||||
ttl := int64(86400 * 365 * 20)
|
||||
if expire != nil {
|
||||
ttl = int64(time.Until(*expire).Seconds())
|
||||
}
|
||||
|
||||
signedURL, err := handler.bucket.SignURL(path, oss.HTTPGet, ttl, options...)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 将最终生成的签名URL域名换成用户自定义的加速域名(如果有)
|
||||
finalURL, err := url.Parse(signedURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 公有空间替换掉Key及不支持的头
|
||||
if !handler.policy.IsPrivate && !forceSign {
|
||||
query := finalURL.Query()
|
||||
query.Del("OSSAccessKeyId")
|
||||
query.Del("Signature")
|
||||
query.Del("response-content-disposition")
|
||||
query.Del("x-oss-traffic-limit")
|
||||
finalURL.RawQuery = query.Encode()
|
||||
}
|
||||
return finalURL.String(), nil
|
||||
}
|
||||
|
||||
// Token 获取上传策略和认证Token
|
||||
func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
|
||||
// 初始化客户端
|
||||
if err := handler.InitOSSClient(true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 生成回调地址
|
||||
siteURL := handler.settings.SiteURL(setting.UseFirstSiteUrl(ctx))
|
||||
// 在从机端创建上传会话
|
||||
uploadSession.ChunkSize = handler.chunkSize
|
||||
uploadSession.Callback = routes.MasterSlaveCallbackUrl(siteURL, types.PolicyTypeOss, uploadSession.Props.UploadSessionID, uploadSession.CallbackSecret).String()
|
||||
|
||||
// 回调策略
|
||||
callbackPolicy := CallbackPolicy{
|
||||
CallbackURL: uploadSession.Callback,
|
||||
CallbackBody: `{"name":${x:fname},"source_name":${object},"size":${size},"pic_info":"${imageInfo.width},${imageInfo.height}"}`,
|
||||
CallbackBodyType: "application/json",
|
||||
CallbackSNI: true,
|
||||
}
|
||||
callbackPolicyJSON, err := json.Marshal(callbackPolicy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode callback policy: %w", err)
|
||||
}
|
||||
callbackPolicyEncoded := base64.StdEncoding.EncodeToString(callbackPolicyJSON)
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
// 初始化分片上传
|
||||
options := []oss.Option{
|
||||
oss.WithContext(ctx),
|
||||
oss.Expires(uploadSession.Props.ExpireAt),
|
||||
oss.ForbidOverWrite(true),
|
||||
oss.ContentType(mimeType),
|
||||
}
|
||||
imur, err := handler.bucket.InitiateMultipartUpload(file.Props.SavePath, options...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize multipart upload: %w", err)
|
||||
}
|
||||
uploadSession.UploadID = imur.UploadID
|
||||
|
||||
// 为每个分片签名上传 URL
|
||||
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{}, false, handler.l, "")
|
||||
urls := make([]string, chunks.Num())
|
||||
ttl := int64(time.Until(uploadSession.Props.ExpireAt).Seconds())
|
||||
for chunks.Next() {
|
||||
err := chunks.Process(func(c *chunk.ChunkGroup, chunk io.Reader) error {
|
||||
signedURL, err := handler.bucket.SignURL(file.Props.SavePath, oss.HTTPPut,
|
||||
ttl,
|
||||
oss.AddParam(partNumberParam, strconv.Itoa(c.Index()+1)),
|
||||
oss.AddParam(uploadIdParam, imur.UploadID),
|
||||
oss.ContentType("application/octet-stream"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
urls[c.Index()] = signedURL
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// 签名完成分片上传的URL
|
||||
completeURL, err := handler.bucket.SignURL(file.Props.SavePath, oss.HTTPPost, ttl,
|
||||
oss.ContentType("application/octet-stream"),
|
||||
oss.AddParam(uploadIdParam, imur.UploadID),
|
||||
oss.Expires(time.Now().Add(time.Duration(ttl)*time.Second)),
|
||||
oss.SetHeader(completeAllHeader, "yes"),
|
||||
oss.ForbidOverWrite(true),
|
||||
oss.AddParam(callbackParam, callbackPolicyEncoded))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &fs.UploadCredential{
|
||||
UploadID: imur.UploadID,
|
||||
UploadURLs: urls,
|
||||
CompleteURL: completeURL,
|
||||
SessionID: uploadSession.Props.UploadSessionID,
|
||||
ChunkSize: handler.chunkSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// 取消上传凭证
|
||||
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
|
||||
return handler.bucket.AbortMultipartUpload(oss.InitiateMultipartUploadResult{UploadID: uploadSession.UploadID, Key: uploadSession.Props.SavePath}, oss.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *Driver) Capabilities() *driver.Capabilities {
|
||||
mediaMetaExts := handler.policy.Settings.MediaMetaExts
|
||||
if !handler.policy.Settings.NativeMediaProcessing {
|
||||
mediaMetaExts = nil
|
||||
}
|
||||
return &driver.Capabilities{
|
||||
StaticFeatures: features,
|
||||
MediaMetaSupportedExts: mediaMetaExts,
|
||||
MediaMetaProxy: handler.policy.Settings.MediaMetaGeneratorProxy,
|
||||
ThumbSupportedExts: handler.policy.Settings.ThumbExts,
|
||||
ThumbProxy: handler.policy.Settings.ThumbGeneratorProxy,
|
||||
ThumbSupportAllExts: handler.policy.Settings.ThumbSupportAllExts,
|
||||
ThumbMaxSize: handler.policy.Settings.ThumbMaxSize,
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
if util.ContainsString(supportedImageExt, ext) {
|
||||
return handler.extractImageMeta(ctx, path)
|
||||
}
|
||||
|
||||
if util.ContainsString(supportedVideoExt, ext) {
|
||||
return handler.extractIMMMeta(ctx, path, videoInfoProcess)
|
||||
}
|
||||
|
||||
if util.ContainsString(supportedAudioExt, ext) {
|
||||
return handler.extractIMMMeta(ctx, path, audioInfoProcess)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unsupported media type in oss: %s", ext)
|
||||
}
|
||||
|
||||
func (handler *Driver) LocalPath(ctx context.Context, path string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (handler *Driver) cancelUpload(imur oss.InitiateMultipartUploadResult) {
|
||||
if err := handler.bucket.AbortMultipartUpload(imur); err != nil {
|
||||
handler.l.Warning("failed to abort multipart upload: %s", err)
|
||||
}
|
||||
}
|
||||
183
pkg/filemanager/driver/qiniu/media.go
Normal file
183
pkg/filemanager/driver/qiniu/media.go
Normal file
@@ -0,0 +1,183 @@
|
||||
package qiniu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/samber/lo"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
exifParam = "exif"
|
||||
avInfoParam = "avinfo"
|
||||
mediaInfoTTL = time.Duration(10) * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
supportedImageExt = []string{"jpg", "jpeg", "png", "gif", "bmp", "webp", "tiff"}
|
||||
)
|
||||
|
||||
type (
|
||||
ImageProp struct {
|
||||
Value string `json:"val"`
|
||||
}
|
||||
ImageInfo map[string]ImageProp
|
||||
QiniuMediaError struct {
|
||||
Error string `json:"error"`
|
||||
Code int `json:"code"`
|
||||
}
|
||||
)
|
||||
|
||||
func (handler *Driver) extractAvMeta(ctx context.Context, path string) ([]driver.MediaMeta, error) {
|
||||
resp, err := handler.extractMediaInfo(ctx, path, avInfoParam)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var avInfo *mediameta.FFProbeMeta
|
||||
if err := json.Unmarshal([]byte(resp), &avInfo); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal media info: %w", err)
|
||||
}
|
||||
|
||||
metas := mediameta.ProbeMetaTransform(avInfo)
|
||||
if artist, ok := avInfo.Format.Tags["artist"]; ok {
|
||||
metas = append(metas, driver.MediaMeta{
|
||||
Key: mediameta.Artist,
|
||||
Value: artist,
|
||||
Type: driver.MediaTypeMusic,
|
||||
})
|
||||
}
|
||||
|
||||
if album, ok := avInfo.Format.Tags["album"]; ok {
|
||||
metas = append(metas, driver.MediaMeta{
|
||||
Key: mediameta.MusicAlbum,
|
||||
Value: album,
|
||||
Type: driver.MediaTypeMusic,
|
||||
})
|
||||
}
|
||||
|
||||
if title, ok := avInfo.Format.Tags["title"]; ok {
|
||||
metas = append(metas, driver.MediaMeta{
|
||||
Key: mediameta.MusicTitle,
|
||||
Value: title,
|
||||
Type: driver.MediaTypeMusic,
|
||||
})
|
||||
}
|
||||
|
||||
return metas, nil
|
||||
}
|
||||
|
||||
func (handler *Driver) extractImageMeta(ctx context.Context, path string) ([]driver.MediaMeta, error) {
|
||||
resp, err := handler.extractMediaInfo(ctx, path, exifParam)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var imageInfo ImageInfo
|
||||
if err := json.Unmarshal([]byte(resp), &imageInfo); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal media info: %w", err)
|
||||
}
|
||||
|
||||
metas := make([]driver.MediaMeta, 0)
|
||||
exifMap := lo.MapEntries(imageInfo, func(key string, value ImageProp) (string, string) {
|
||||
return key, value.Value
|
||||
})
|
||||
metas = append(metas, mediameta.ExtractExifMap(exifMap, time.Time{})...)
|
||||
metas = append(metas, parseGpsInfo(imageInfo)...)
|
||||
for i := 0; i < len(metas); i++ {
|
||||
metas[i].Type = driver.MetaTypeExif
|
||||
}
|
||||
|
||||
return metas, nil
|
||||
}
|
||||
|
||||
func (handler *Driver) extractMediaInfo(ctx context.Context, path string, param string) (string, error) {
|
||||
mediaInfoExpire := time.Now().Add(mediaInfoTTL)
|
||||
ediaInfoUrl := handler.signSourceURL(fmt.Sprintf("%s?%s", path, param), &mediaInfoExpire)
|
||||
resp, err := handler.httpClient.
|
||||
Request(http.MethodGet, ediaInfoUrl, nil, request.WithContext(ctx)).
|
||||
CheckHTTPResponse(http.StatusOK).
|
||||
GetResponseIgnoreErr()
|
||||
if err != nil {
|
||||
return "", unmarshalError(resp, err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func unmarshalError(resp string, originErr error) error {
|
||||
if resp == "" {
|
||||
return originErr
|
||||
}
|
||||
|
||||
var err QiniuMediaError
|
||||
if err := json.Unmarshal([]byte(resp), &err); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal qiniu error: %w", err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("qiniu error: %s", err.Error)
|
||||
}
|
||||
|
||||
func parseGpsInfo(imageInfo ImageInfo) []driver.MediaMeta {
|
||||
latitude := imageInfo["GPSLatitude"] // 31, 16.2680820, 0
|
||||
longitude := imageInfo["GPSLongitude"] // 120, 42.9103939, 0
|
||||
latRef := imageInfo["GPSLatitudeRef"] // N
|
||||
lonRef := imageInfo["GPSLongitudeRef"] // E
|
||||
|
||||
// Make sure all value exist in map
|
||||
if latitude.Value == "" || longitude.Value == "" || latRef.Value == "" || lonRef.Value == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
lat := parseRawGPS(latitude.Value, latRef.Value)
|
||||
lon := parseRawGPS(longitude.Value, lonRef.Value)
|
||||
if !math.IsNaN(lat) && !math.IsNaN(lon) {
|
||||
lat, lng := mediameta.NormalizeGPS(lat, lon)
|
||||
return []driver.MediaMeta{{
|
||||
Key: mediameta.GpsLat,
|
||||
Value: fmt.Sprintf("%f", lat),
|
||||
}, {
|
||||
Key: mediameta.GpsLng,
|
||||
Value: fmt.Sprintf("%f", lng),
|
||||
}}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseRawGPS(gpsStr string, ref string) float64 {
|
||||
elem := strings.Split(gpsStr, ", ")
|
||||
if len(elem) < 1 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var (
|
||||
deg float64
|
||||
minutes float64
|
||||
seconds float64
|
||||
)
|
||||
|
||||
deg, _ = strconv.ParseFloat(elem[0], 64)
|
||||
if len(elem) >= 2 {
|
||||
minutes, _ = strconv.ParseFloat(elem[1], 64)
|
||||
}
|
||||
if len(elem) >= 3 {
|
||||
seconds, _ = strconv.ParseFloat(elem[2], 64)
|
||||
}
|
||||
|
||||
decimal := deg + minutes/60.0 + seconds/3600.0
|
||||
|
||||
if ref == "S" || ref == "W" {
|
||||
return -decimal
|
||||
}
|
||||
|
||||
return decimal
|
||||
}
|
||||
428
pkg/filemanager/driver/qiniu/qiniu.go
Normal file
428
pkg/filemanager/driver/qiniu/qiniu.go
Normal file
@@ -0,0 +1,428 @@
|
||||
package qiniu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk/backoff"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/qiniu/go-sdk/v7/auth/qbox"
|
||||
"github.com/qiniu/go-sdk/v7/storage"
|
||||
"github.com/samber/lo"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
chunkRetrySleep = time.Duration(5) * time.Second
|
||||
maxDeleteBatch = 1000
|
||||
trafficLimitParam = "X-Qiniu-Traffic-Limit"
|
||||
)
|
||||
|
||||
var (
|
||||
features = &boolset.BooleanSet{}
|
||||
)
|
||||
|
||||
// Driver 本地策略适配器
|
||||
type Driver struct {
|
||||
policy *ent.StoragePolicy
|
||||
|
||||
mac *qbox.Mac
|
||||
cfg *storage.Config
|
||||
bucket *storage.BucketManager
|
||||
settings setting.Provider
|
||||
l logging.Logger
|
||||
config conf.ConfigProvider
|
||||
mime mime.MimeDetector
|
||||
httpClient request.Client
|
||||
|
||||
chunkSize int64
|
||||
}
|
||||
|
||||
func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider,
|
||||
config conf.ConfigProvider, l logging.Logger, mime mime.MimeDetector) (*Driver, error) {
|
||||
chunkSize := policy.Settings.ChunkSize
|
||||
if policy.Settings.ChunkSize == 0 {
|
||||
chunkSize = 25 << 20 // 25 MB
|
||||
}
|
||||
|
||||
mac := qbox.NewMac(policy.AccessKey, policy.SecretKey)
|
||||
cfg := &storage.Config{UseHTTPS: true}
|
||||
|
||||
driver := &Driver{
|
||||
policy: policy,
|
||||
settings: settings,
|
||||
chunkSize: chunkSize,
|
||||
config: config,
|
||||
l: l,
|
||||
mime: mime,
|
||||
mac: mac,
|
||||
cfg: cfg,
|
||||
bucket: storage.NewBucketManager(mac, cfg),
|
||||
httpClient: request.NewClient(config, request.WithLogger(l)),
|
||||
}
|
||||
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
//
|
||||
//// List 列出给定路径下的文件
|
||||
//func (handler *Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) {
|
||||
// base = strings.TrimPrefix(base, "/")
|
||||
// if base != "" {
|
||||
// base += "/"
|
||||
// }
|
||||
//
|
||||
// var (
|
||||
// delimiter string
|
||||
// marker string
|
||||
// objects []storage.ListItem
|
||||
// commons []string
|
||||
// )
|
||||
// if !recursive {
|
||||
// delimiter = "/"
|
||||
// }
|
||||
//
|
||||
// for {
|
||||
// entries, folders, nextMarker, hashNext, err := handler.bucket.ListFiles(
|
||||
// handler.policy.BucketName,
|
||||
// base, delimiter, marker, 1000)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// objects = append(objects, entries...)
|
||||
// commons = append(commons, folders...)
|
||||
// if !hashNext {
|
||||
// break
|
||||
// }
|
||||
// marker = nextMarker
|
||||
// }
|
||||
//
|
||||
// // 处理列取结果
|
||||
// res := make([]response.Object, 0, len(objects)+len(commons))
|
||||
// // 处理目录
|
||||
// for _, object := range commons {
|
||||
// rel, err := filepath.Rel(base, object)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// res = append(res, response.Object{
|
||||
// Name: path.Base(object),
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Size: 0,
|
||||
// IsDir: true,
|
||||
// LastModify: time.Now(),
|
||||
// })
|
||||
// }
|
||||
// // 处理文件
|
||||
// for _, object := range objects {
|
||||
// rel, err := filepath.Rel(base, object.Key)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// res = append(res, response.Object{
|
||||
// Name: path.Base(object.Key),
|
||||
// Source: object.Key,
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Size: uint64(object.Fsize),
|
||||
// IsDir: false,
|
||||
// LastModify: time.Unix(object.PutTime/10000000, 0),
|
||||
// })
|
||||
// }
|
||||
//
|
||||
// return res, nil
|
||||
//}
|
||||
|
||||
// Put 将文件流保存到指定目录
|
||||
func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
||||
defer file.Close()
|
||||
|
||||
// 凭证有效期
|
||||
credentialTTL := handler.settings.UploadSessionTTL(ctx)
|
||||
|
||||
// 是否允许覆盖
|
||||
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
|
||||
|
||||
// 生成上传策略
|
||||
scope := handler.policy.BucketName
|
||||
if overwrite {
|
||||
scope = fmt.Sprintf("%s:%s", handler.policy.BucketName, file.Props.SavePath)
|
||||
}
|
||||
putPolicy := storage.PutPolicy{
|
||||
// 指定为覆盖策略
|
||||
Scope: scope,
|
||||
SaveKey: file.Props.SavePath,
|
||||
ForceSaveKey: true,
|
||||
FsizeLimit: file.Props.Size,
|
||||
Expires: uint64(time.Now().Add(credentialTTL).Unix()),
|
||||
}
|
||||
upToken := putPolicy.UploadToken(handler.mac)
|
||||
|
||||
// 初始化分片上传
|
||||
resumeUploader := storage.NewResumeUploaderV2(handler.cfg)
|
||||
upHost, err := resumeUploader.UpHost(handler.policy.AccessKey, handler.policy.BucketName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get upload host: %w", err)
|
||||
}
|
||||
|
||||
ret := &storage.InitPartsRet{}
|
||||
err = resumeUploader.InitParts(ctx, upToken, upHost, handler.policy.BucketName, file.Props.SavePath, true, ret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initiate multipart upload: %w", err)
|
||||
}
|
||||
|
||||
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{
|
||||
Max: handler.settings.ChunkRetryLimit(ctx),
|
||||
Sleep: chunkRetrySleep,
|
||||
}, handler.settings.UseChunkBuffer(ctx), handler.l, handler.settings.TempPath(ctx))
|
||||
|
||||
parts := make([]*storage.UploadPartsRet, 0, chunks.Num())
|
||||
|
||||
uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error {
|
||||
partRet := &storage.UploadPartsRet{}
|
||||
err := resumeUploader.UploadParts(
|
||||
ctx, upToken, upHost, handler.policy.BucketName, file.Props.SavePath, true, ret.UploadID,
|
||||
int64(current.Index()+1), "", partRet, content, int(current.Length()))
|
||||
if err == nil {
|
||||
parts = append(parts, partRet)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for chunks.Next() {
|
||||
if err := chunks.Process(uploadFunc); err != nil {
|
||||
_ = handler.cancelUpload(upHost, file.Props.SavePath, ret.UploadID, upToken)
|
||||
return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err)
|
||||
}
|
||||
}
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
err = resumeUploader.CompleteParts(ctx, upToken, upHost, nil, handler.policy.BucketName,
|
||||
file.Props.SavePath, true, ret.UploadID, &storage.RputV2Extra{
|
||||
MimeType: mimeType,
|
||||
Progresses: lo.Map(parts, func(part *storage.UploadPartsRet, i int) storage.UploadPartInfo {
|
||||
return storage.UploadPartInfo{
|
||||
Etag: part.Etag,
|
||||
PartNumber: int64(i) + 1,
|
||||
}
|
||||
}),
|
||||
})
|
||||
if err != nil {
|
||||
_ = handler.cancelUpload(upHost, file.Props.SavePath, ret.UploadID, upToken)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete 删除一个或多个文件,
|
||||
// 返回未删除的文件
|
||||
func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
|
||||
groups := lo.Chunk(files, maxDeleteBatch)
|
||||
failed := make([]string, 0)
|
||||
var lastError error
|
||||
|
||||
for index, group := range groups {
|
||||
handler.l.Debug("Process delete group #%d: %v", index, group)
|
||||
// 删除文件
|
||||
rets, err := handler.bucket.BatchWithContext(ctx, handler.policy.BucketName, lo.Map(group, func(key string, index int) string {
|
||||
return storage.URIDelete(handler.policy.BucketName, key)
|
||||
}))
|
||||
|
||||
// 处理删除结果
|
||||
if err != nil {
|
||||
for k, ret := range rets {
|
||||
if ret.Code != 200 && ret.Code != 612 {
|
||||
failed = append(failed, group[k])
|
||||
lastError = err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(failed) > 0 && lastError == nil {
|
||||
lastError = fmt.Errorf("failed to delete files: %v", failed)
|
||||
}
|
||||
|
||||
return failed, lastError
|
||||
}
|
||||
|
||||
// Thumb 获取文件缩略图
|
||||
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
w, h := handler.settings.ThumbSize(ctx)
|
||||
|
||||
thumb := fmt.Sprintf("%s?imageView2/1/w/%d/h/%d", e.Source(), w, h)
|
||||
return handler.signSourceURL(
|
||||
thumb,
|
||||
expire,
|
||||
), nil
|
||||
}
|
||||
|
||||
// Source 获取外链URL
|
||||
func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
|
||||
path := e.Source()
|
||||
|
||||
query := url.Values{}
|
||||
|
||||
// 加入下载相关设置
|
||||
if args.IsDownload {
|
||||
query.Add("attname", args.DisplayName)
|
||||
}
|
||||
|
||||
if args.Speed > 0 {
|
||||
// Byte 转换为 bit
|
||||
args.Speed *= 8
|
||||
|
||||
// Qiniu 对速度值有范围限制
|
||||
if args.Speed < 819200 {
|
||||
args.Speed = 819200
|
||||
}
|
||||
if args.Speed > 838860800 {
|
||||
args.Speed = 838860800
|
||||
}
|
||||
query.Add(trafficLimitParam, fmt.Sprintf("%d", args.Speed))
|
||||
}
|
||||
|
||||
if len(query) > 0 {
|
||||
path = path + "?" + query.Encode()
|
||||
}
|
||||
|
||||
// 取得原始文件地址
|
||||
return handler.signSourceURL(path, args.Expire), nil
|
||||
}
|
||||
|
||||
func (handler *Driver) signSourceURL(path string, expire *time.Time) string {
|
||||
var sourceURL string
|
||||
if handler.policy.IsPrivate {
|
||||
deadline := time.Now().Add(time.Duration(24) * time.Hour * 365 * 20).Unix()
|
||||
if expire != nil {
|
||||
deadline = expire.Unix()
|
||||
}
|
||||
sourceURL = storage.MakePrivateURL(handler.mac, handler.policy.Settings.ProxyServer, path, deadline)
|
||||
} else {
|
||||
sourceURL = storage.MakePublicURL(handler.policy.Settings.ProxyServer, path)
|
||||
}
|
||||
return sourceURL
|
||||
}
|
||||
|
||||
// Token 获取上传策略和认证Token
|
||||
func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
|
||||
// 生成回调地址
|
||||
siteURL := handler.settings.SiteURL(setting.UseFirstSiteUrl(ctx))
|
||||
apiUrl := routes.MasterSlaveCallbackUrl(siteURL, types.PolicyTypeQiniu, uploadSession.Props.UploadSessionID, uploadSession.CallbackSecret).String()
|
||||
|
||||
// 创建上传策略
|
||||
putPolicy := storage.PutPolicy{
|
||||
Scope: fmt.Sprintf("%s:%s", handler.policy.BucketName, file.Props.SavePath),
|
||||
CallbackURL: apiUrl,
|
||||
CallbackBody: `{"size":$(fsize),"pic_info":"$(imageInfo.width),$(imageInfo.height)"}`,
|
||||
CallbackBodyType: "application/json",
|
||||
SaveKey: file.Props.SavePath,
|
||||
ForceSaveKey: true,
|
||||
FsizeLimit: file.Props.Size,
|
||||
Expires: uint64(file.Props.ExpireAt.Unix()),
|
||||
}
|
||||
|
||||
// 初始化分片上传
|
||||
upToken := putPolicy.UploadToken(handler.mac)
|
||||
resumeUploader := storage.NewResumeUploaderV2(handler.cfg)
|
||||
upHost, err := resumeUploader.UpHost(handler.policy.AccessKey, handler.policy.BucketName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get upload host: %w", err)
|
||||
}
|
||||
|
||||
ret := &storage.InitPartsRet{}
|
||||
err = resumeUploader.InitParts(ctx, upToken, upHost, handler.policy.BucketName, file.Props.SavePath, true, ret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initiate multipart upload: %w", err)
|
||||
}
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
uploadSession.UploadID = ret.UploadID
|
||||
return &fs.UploadCredential{
|
||||
UploadID: ret.UploadID,
|
||||
UploadURLs: []string{getUploadUrl(upHost, handler.policy.BucketName, file.Props.SavePath, ret.UploadID)},
|
||||
Credential: upToken,
|
||||
SessionID: uploadSession.Props.UploadSessionID,
|
||||
ChunkSize: handler.chunkSize,
|
||||
MimeType: mimeType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// 取消上传凭证
|
||||
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
|
||||
resumeUploader := storage.NewResumeUploaderV2(handler.cfg)
|
||||
return resumeUploader.Client.CallWith(ctx, nil, "DELETE", uploadSession.UploadURL, http.Header{"Authorization": {"UpToken " + uploadSession.Credential}}, nil, 0)
|
||||
}
|
||||
|
||||
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *Driver) Capabilities() *driver.Capabilities {
|
||||
mediaMetaExts := handler.policy.Settings.MediaMetaExts
|
||||
if !handler.policy.Settings.NativeMediaProcessing {
|
||||
mediaMetaExts = nil
|
||||
}
|
||||
return &driver.Capabilities{
|
||||
StaticFeatures: features,
|
||||
MediaMetaSupportedExts: mediaMetaExts,
|
||||
MediaMetaProxy: handler.policy.Settings.MediaMetaGeneratorProxy,
|
||||
ThumbSupportedExts: handler.policy.Settings.ThumbExts,
|
||||
ThumbProxy: handler.policy.Settings.ThumbGeneratorProxy,
|
||||
ThumbSupportAllExts: handler.policy.Settings.ThumbSupportAllExts,
|
||||
ThumbMaxSize: handler.policy.Settings.ThumbMaxSize,
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
if util.ContainsString(supportedImageExt, ext) {
|
||||
return handler.extractImageMeta(ctx, path)
|
||||
}
|
||||
|
||||
return handler.extractAvMeta(ctx, path)
|
||||
}
|
||||
|
||||
func (handler *Driver) LocalPath(ctx context.Context, path string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (handler *Driver) cancelUpload(upHost, savePath, uploadId, upToken string) error {
|
||||
resumeUploader := storage.NewResumeUploaderV2(handler.cfg)
|
||||
uploadUrl := getUploadUrl(upHost, handler.policy.BucketName, savePath, uploadId)
|
||||
err := resumeUploader.Client.CallWith(context.Background(), nil, "DELETE", uploadUrl, http.Header{"Authorization": {"UpToken " + upToken}}, nil, 0)
|
||||
if err != nil {
|
||||
handler.l.Error("Failed to cancel upload session for %q: %s", savePath, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func getUploadUrl(upHost, bucket, key, uploadId string) string {
|
||||
return upHost + "/buckets/" + bucket + "/objects/" + base64.URLEncoding.EncodeToString([]byte(key)) + "/uploads/" + uploadId
|
||||
}
|
||||
266
pkg/filemanager/driver/remote/client.go
Normal file
266
pkg/filemanager/driver/remote/client.go
Normal file
@@ -0,0 +1,266 @@
|
||||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/application/constants"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/auth"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk/backoff"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/gofrs/uuid"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
OverwriteHeader = constants.CrHeaderPrefix + "Overwrite"
|
||||
chunkRetrySleep = time.Duration(5) * time.Second
|
||||
)
|
||||
|
||||
// Client to operate uploading to remote slave server
|
||||
type Client interface {
|
||||
// CreateUploadSession creates remote upload session
|
||||
CreateUploadSession(ctx context.Context, session *fs.UploadSession, overwrite bool) error
|
||||
// GetUploadURL signs an url for uploading file
|
||||
GetUploadURL(ctx context.Context, expires time.Time, sessionID string) (string, string, error)
|
||||
// Upload uploads file to remote server
|
||||
Upload(ctx context.Context, file *fs.UploadRequest) error
|
||||
// DeleteUploadSession deletes remote upload session
|
||||
DeleteUploadSession(ctx context.Context, sessionID string) error
|
||||
// MediaMeta gets media meta from remote server
|
||||
MediaMeta(ctx context.Context, src, ext string) ([]driver.MediaMeta, error)
|
||||
// DeleteFiles deletes files from remote server
|
||||
DeleteFiles(ctx context.Context, files ...string) ([]string, error)
|
||||
}
|
||||
|
||||
type DeleteFileRequest struct {
|
||||
Files []string `json:"files"`
|
||||
}
|
||||
|
||||
// NewClient creates new Client from given policy
|
||||
func NewClient(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider, config conf.ConfigProvider, l logging.Logger) (Client, error) {
|
||||
if policy.Edges.Node == nil {
|
||||
return nil, fmt.Errorf("remote storage policy %d has no node", policy.ID)
|
||||
}
|
||||
|
||||
authInstance := auth.HMACAuth{[]byte(policy.Edges.Node.SlaveKey)}
|
||||
serverURL, err := url.Parse(policy.Edges.Node.Server)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
base, _ := url.Parse(constants.APIPrefixSlave)
|
||||
|
||||
return &remoteClient{
|
||||
policy: policy,
|
||||
authInstance: authInstance,
|
||||
httpClient: request.NewClient(
|
||||
config,
|
||||
request.WithEndpoint(serverURL.ResolveReference(base).String()),
|
||||
request.WithCredential(authInstance, int64(settings.SlaveRequestSignTTL(ctx))),
|
||||
request.WithSlaveMeta(policy.Edges.Node.ID),
|
||||
request.WithMasterMeta(settings.SiteBasic(ctx).ID, settings.SiteURL(setting.UseFirstSiteUrl(ctx)).String()),
|
||||
request.WithCorrelationID(),
|
||||
),
|
||||
settings: settings,
|
||||
l: l,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type remoteClient struct {
|
||||
policy *ent.StoragePolicy
|
||||
authInstance auth.Auth
|
||||
httpClient request.Client
|
||||
settings setting.Provider
|
||||
l logging.Logger
|
||||
}
|
||||
|
||||
func (c *remoteClient) Upload(ctx context.Context, file *fs.UploadRequest) error {
|
||||
ttl := c.settings.UploadSessionTTL(ctx)
|
||||
session := &fs.UploadSession{
|
||||
Props: file.Props.Copy(),
|
||||
Policy: c.policy,
|
||||
}
|
||||
session.Props.UploadSessionID = uuid.Must(uuid.NewV4()).String()
|
||||
session.Props.ExpireAt = time.Now().Add(ttl)
|
||||
|
||||
// Create upload session
|
||||
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
|
||||
if err := c.CreateUploadSession(ctx, session, overwrite); err != nil {
|
||||
return fmt.Errorf("failed to create upload session: %w", err)
|
||||
}
|
||||
|
||||
// Initial chunk groups
|
||||
chunks := chunk.NewChunkGroup(file, c.policy.Settings.ChunkSize, &backoff.ConstantBackoff{
|
||||
Max: c.settings.ChunkRetryLimit(ctx),
|
||||
Sleep: chunkRetrySleep,
|
||||
}, c.settings.UseChunkBuffer(ctx), c.l, c.settings.TempPath(ctx))
|
||||
|
||||
uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error {
|
||||
return c.uploadChunk(ctx, session.Props.UploadSessionID, current.Index(), content, overwrite, current.Length())
|
||||
}
|
||||
|
||||
// upload chunks
|
||||
for chunks.Next() {
|
||||
if err := chunks.Process(uploadFunc); err != nil {
|
||||
if err := c.DeleteUploadSession(ctx, session.Props.UploadSessionID); err != nil {
|
||||
c.l.Warning("failed to delete upload session: %s", err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *remoteClient) DeleteUploadSession(ctx context.Context, sessionID string) error {
|
||||
resp, err := c.httpClient.Request(
|
||||
"DELETE",
|
||||
"upload/"+sessionID,
|
||||
nil,
|
||||
request.WithContext(ctx),
|
||||
request.WithLogger(logging.FromContext(ctx)),
|
||||
).CheckHTTPResponse(200).DecodeResponse()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Code != 0 {
|
||||
return serializer.NewErrorFromResponse(resp)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *remoteClient) DeleteFiles(ctx context.Context, files ...string) ([]string, error) {
|
||||
req := &DeleteFileRequest{
|
||||
Files: files,
|
||||
}
|
||||
|
||||
reqStr, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return files, fmt.Errorf("failed to marshal delete request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := c.httpClient.Request(
|
||||
"DELETE",
|
||||
"file",
|
||||
bytes.NewReader(reqStr),
|
||||
request.WithContext(ctx),
|
||||
request.WithLogger(logging.FromContext(ctx)),
|
||||
).CheckHTTPResponse(200).DecodeResponse()
|
||||
if err != nil {
|
||||
return files, err
|
||||
}
|
||||
|
||||
if resp.Code != 0 {
|
||||
var failed []string
|
||||
failed = files
|
||||
if resp.Code == serializer.CodeNotFullySuccess {
|
||||
resp.GobDecode(&failed)
|
||||
}
|
||||
return failed, fmt.Errorf(resp.Error)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *remoteClient) MediaMeta(ctx context.Context, src, ext string) ([]driver.MediaMeta, error) {
|
||||
resp, err := c.httpClient.Request(
|
||||
http.MethodGet,
|
||||
routes.SlaveMediaMetaRoute(src, ext),
|
||||
nil,
|
||||
request.WithContext(ctx),
|
||||
request.WithLogger(c.l),
|
||||
).CheckHTTPResponse(200).DecodeResponse()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Code != 0 {
|
||||
return nil, fmt.Errorf(resp.Error)
|
||||
}
|
||||
|
||||
var metas []driver.MediaMeta
|
||||
resp.GobDecode(&metas)
|
||||
return metas, nil
|
||||
}
|
||||
|
||||
func (c *remoteClient) CreateUploadSession(ctx context.Context, session *fs.UploadSession, overwrite bool) error {
|
||||
reqBodyEncoded, err := json.Marshal(map[string]interface{}{
|
||||
"session": session,
|
||||
"overwrite": overwrite,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bodyReader := strings.NewReader(string(reqBodyEncoded))
|
||||
resp, err := c.httpClient.Request(
|
||||
"PUT",
|
||||
"upload",
|
||||
bodyReader,
|
||||
request.WithContext(ctx),
|
||||
request.WithLogger(c.l),
|
||||
).CheckHTTPResponse(200).DecodeResponse()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Code != 0 {
|
||||
return serializer.NewErrorFromResponse(resp)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *remoteClient) GetUploadURL(ctx context.Context, expires time.Time, sessionID string) (string, string, error) {
|
||||
base, err := url.Parse(c.policy.Edges.Node.Server)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", routes.SlaveUploadUrl(base, sessionID).String(), nil)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
req = auth.SignRequest(ctx, c.authInstance, req, &expires)
|
||||
return req.URL.String(), req.Header["Authorization"][0], nil
|
||||
}
|
||||
|
||||
func (c *remoteClient) uploadChunk(ctx context.Context, sessionID string, index int, chunk io.Reader, overwrite bool, size int64) error {
|
||||
resp, err := c.httpClient.Request(
|
||||
"POST",
|
||||
fmt.Sprintf("upload/%s?chunk=%d", sessionID, index),
|
||||
chunk,
|
||||
request.WithContext(ctx),
|
||||
request.WithTimeout(time.Duration(0)),
|
||||
request.WithContentLength(size),
|
||||
request.WithHeader(map[string][]string{OverwriteHeader: {fmt.Sprintf("%t", overwrite)}}),
|
||||
).CheckHTTPResponse(200).DecodeResponse()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Code != 0 {
|
||||
return serializer.NewErrorFromResponse(resp)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
273
pkg/filemanager/driver/remote/remote.go
Normal file
273
pkg/filemanager/driver/remote/remote.go
Normal file
@@ -0,0 +1,273 @@
|
||||
package remote
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/auth"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
features = &boolset.BooleanSet{}
|
||||
)
|
||||
|
||||
// Driver 远程存储策略适配器
|
||||
type Driver struct {
|
||||
Client request.Client
|
||||
Policy *ent.StoragePolicy
|
||||
AuthInstance auth.Auth
|
||||
|
||||
uploadClient Client
|
||||
config conf.ConfigProvider
|
||||
settings setting.Provider
|
||||
}
|
||||
|
||||
// New initializes a new Driver from policy
|
||||
func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider,
|
||||
config conf.ConfigProvider, l logging.Logger) (*Driver, error) {
|
||||
client, err := NewClient(ctx, policy, settings, config, l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Driver{
|
||||
Policy: policy,
|
||||
Client: request.NewClient(config),
|
||||
AuthInstance: auth.HMACAuth{[]byte(policy.Edges.Node.SlaveKey)},
|
||||
uploadClient: client,
|
||||
settings: settings,
|
||||
config: config,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//// List 列取文件
|
||||
//func (handler *Driver) List(ctx context.Context, path string, recursive bool) ([]response.Object, error) {
|
||||
// var res []response.Object
|
||||
//
|
||||
// reqBody := serializer.ListRequest{
|
||||
// Path: path,
|
||||
// Recursive: recursive,
|
||||
// }
|
||||
// reqBodyEncoded, err := json.Marshal(reqBody)
|
||||
// if err != nil {
|
||||
// return res, err
|
||||
// }
|
||||
//
|
||||
// // 发送列表请求
|
||||
// bodyReader := strings.NewReader(string(reqBodyEncoded))
|
||||
// signTTL := model.GetIntSetting("slave_api_timeout", 60)
|
||||
// resp, err := handler.Client.Request(
|
||||
// "POST",
|
||||
// handler.getAPIUrl("list"),
|
||||
// bodyReader,
|
||||
// request.WithCredential(handler.AuthInstance, int64(signTTL)),
|
||||
// request.WithMasterMeta(handler.settings.SiteBasic(ctx).ID, handler.settings.SiteURL(setting.UseFirstSiteUrl(ctx)).String()),
|
||||
// ).CheckHTTPResponse(200).DecodeResponse()
|
||||
// if err != nil {
|
||||
// return res, err
|
||||
// }
|
||||
//
|
||||
// // 处理列取结果
|
||||
// if resp.Code != 0 {
|
||||
// return res, errors.New(resp.Error)
|
||||
// }
|
||||
//
|
||||
// if resStr, ok := resp.Data.(string); ok {
|
||||
// err = json.Unmarshal([]byte(resStr), &res)
|
||||
// if err != nil {
|
||||
// return res, err
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// return res, nil
|
||||
//}
|
||||
|
||||
// getAPIUrl 获取接口请求地址
|
||||
func (handler *Driver) getAPIUrl(scope string, routes ...string) string {
|
||||
serverURL, err := url.Parse(handler.Policy.Edges.Node.Server)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
var controller *url.URL
|
||||
|
||||
switch scope {
|
||||
case "delete":
|
||||
controller, _ = url.Parse("/api/v3/slave/delete")
|
||||
case "thumb":
|
||||
controller, _ = url.Parse("/api/v3/slave/thumb")
|
||||
case "list":
|
||||
controller, _ = url.Parse("/api/v3/slave/list")
|
||||
default:
|
||||
controller = serverURL
|
||||
}
|
||||
|
||||
for _, r := range routes {
|
||||
controller.Path = path.Join(controller.Path, r)
|
||||
}
|
||||
|
||||
return serverURL.ResolveReference(controller).String()
|
||||
}
|
||||
|
||||
// Open 获取文件内容
|
||||
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
|
||||
//// 尝试获取速度限制
|
||||
//speedLimit := 0
|
||||
//if user, ok := ctx.Value(fsctx.UserCtx).(model.User); ok {
|
||||
// speedLimit = user.Group.SpeedLimit
|
||||
//}
|
||||
//
|
||||
//// 获取文件源地址
|
||||
//downloadURL, err := handler.Source(ctx, path, nil, true, int64(speedLimit))
|
||||
//if err != nil {
|
||||
// return nil, err
|
||||
//}
|
||||
//
|
||||
//// 获取文件数据流
|
||||
//resp, err := handler.Client.Request(
|
||||
// "GET",
|
||||
// downloadURL,
|
||||
// nil,
|
||||
// request.WithContext(ctx),
|
||||
// request.WithTimeout(time.Duration(0)),
|
||||
// request.WithMasterMeta(handler.settings.SiteBasic(ctx).ID, handler.settings.SiteURL(ctx).String()),
|
||||
//).CheckHTTPResponse(200).GetRSCloser()
|
||||
//if err != nil {
|
||||
// return nil, err
|
||||
//}
|
||||
//
|
||||
//resp.SetFirstFakeChunk()
|
||||
//
|
||||
//// 尝试获取文件大小
|
||||
//if file, ok := ctx.Value(fsctx.FileModelCtx).(model.File); ok {
|
||||
// resp.SetContentLength(int64(file.Size))
|
||||
//}
|
||||
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (handler *Driver) LocalPath(ctx context.Context, path string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Put 将文件流保存到指定目录
|
||||
func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
||||
defer file.Close()
|
||||
|
||||
return handler.uploadClient.Upload(ctx, file)
|
||||
}
|
||||
|
||||
// Delete 删除一个或多个文件,
|
||||
// 返回未删除的文件,及遇到的最后一个错误
|
||||
func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
|
||||
failed, err := handler.uploadClient.DeleteFiles(ctx, files...)
|
||||
if err != nil {
|
||||
return failed, err
|
||||
}
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
// Thumb 获取文件缩略图
|
||||
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
serverURL, err := url.Parse(handler.Policy.Edges.Node.Server)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parse server url failed: %w", err)
|
||||
}
|
||||
|
||||
thumbURL := routes.SlaveThumbUrl(serverURL, e.Source(), ext)
|
||||
signedThumbURL, err := auth.SignURI(ctx, handler.AuthInstance, thumbURL.String(), expire)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return signedThumbURL.String(), nil
|
||||
}
|
||||
|
||||
// Source 获取外链URL
|
||||
func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
|
||||
server, err := url.Parse(handler.Policy.Edges.Node.Server)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
nodeId := 0
|
||||
if handler.config.System().Mode == conf.SlaveMode {
|
||||
nodeId = handler.Policy.NodeID
|
||||
}
|
||||
|
||||
base := routes.SlaveFileContentUrl(
|
||||
server,
|
||||
e.Source(),
|
||||
args.DisplayName,
|
||||
args.IsDownload,
|
||||
args.Speed,
|
||||
nodeId,
|
||||
)
|
||||
internalProxyed, err := auth.SignURI(ctx, handler.AuthInstance, base.String(), args.Expire)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to sign internal slave content URL: %w", err)
|
||||
}
|
||||
|
||||
return internalProxyed.String(), nil
|
||||
}
|
||||
|
||||
// Token 获取上传策略和认证Token
|
||||
func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
|
||||
siteURL := handler.settings.SiteURL(setting.UseFirstSiteUrl(ctx))
|
||||
// 在从机端创建上传会话
|
||||
uploadSession.Callback = routes.MasterSlaveCallbackUrl(siteURL, types.PolicyTypeRemote, uploadSession.Props.UploadSessionID, uploadSession.CallbackSecret).String()
|
||||
if err := handler.uploadClient.CreateUploadSession(ctx, uploadSession, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 获取上传地址
|
||||
uploadURL, sign, err := handler.uploadClient.GetUploadURL(ctx, uploadSession.Props.ExpireAt, uploadSession.Props.UploadSessionID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sign upload url: %w", err)
|
||||
}
|
||||
|
||||
return &fs.UploadCredential{
|
||||
SessionID: uploadSession.Props.UploadSessionID,
|
||||
ChunkSize: handler.Policy.Settings.ChunkSize,
|
||||
UploadURLs: []string{uploadURL},
|
||||
Credential: sign,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// 取消上传凭证
|
||||
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
|
||||
return handler.uploadClient.DeleteUploadSession(ctx, uploadSession.Props.UploadSessionID)
|
||||
}
|
||||
|
||||
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *Driver) Capabilities() *driver.Capabilities {
|
||||
return &driver.Capabilities{
|
||||
StaticFeatures: features,
|
||||
MediaMetaSupportedExts: handler.Policy.Settings.MediaMetaExts,
|
||||
MediaMetaProxy: handler.Policy.Settings.MediaMetaGeneratorProxy,
|
||||
ThumbSupportedExts: handler.Policy.Settings.ThumbExts,
|
||||
ThumbProxy: handler.Policy.Settings.ThumbGeneratorProxy,
|
||||
ThumbMaxSize: handler.Policy.Settings.ThumbMaxSize,
|
||||
ThumbSupportAllExts: handler.Policy.Settings.ThumbSupportAllExts,
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
return handler.uploadClient.MediaMeta(ctx, path, ext)
|
||||
}
|
||||
514
pkg/filemanager/driver/s3/s3.go
Normal file
514
pkg/filemanager/driver/s3/s3.go
Normal file
@@ -0,0 +1,514 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk/backoff"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/samber/lo"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
)
|
||||
|
||||
// Driver S3 compatible driver
|
||||
type Driver struct {
|
||||
policy *ent.StoragePolicy
|
||||
chunkSize int64
|
||||
|
||||
settings setting.Provider
|
||||
l logging.Logger
|
||||
config conf.ConfigProvider
|
||||
mime mime.MimeDetector
|
||||
|
||||
sess *session.Session
|
||||
svc *s3.S3
|
||||
}
|
||||
|
||||
// UploadPolicy S3上传策略
|
||||
type UploadPolicy struct {
|
||||
Expiration string `json:"expiration"`
|
||||
Conditions []interface{} `json:"conditions"`
|
||||
}
|
||||
|
||||
// MetaData 文件信息
|
||||
type MetaData struct {
|
||||
Size int64
|
||||
Etag string
|
||||
}
|
||||
|
||||
var (
|
||||
features = &boolset.BooleanSet{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
boolset.Sets(map[driver.HandlerCapability]bool{
|
||||
driver.HandlerCapabilityUploadSentinelRequired: true,
|
||||
}, features)
|
||||
}
|
||||
|
||||
func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider,
|
||||
config conf.ConfigProvider, l logging.Logger, mime mime.MimeDetector) (*Driver, error) {
|
||||
chunkSize := policy.Settings.ChunkSize
|
||||
if policy.Settings.ChunkSize == 0 {
|
||||
chunkSize = 25 << 20 // 25 MB
|
||||
}
|
||||
|
||||
driver := &Driver{
|
||||
policy: policy,
|
||||
settings: settings,
|
||||
chunkSize: chunkSize,
|
||||
config: config,
|
||||
l: l,
|
||||
mime: mime,
|
||||
}
|
||||
|
||||
sess, err := session.NewSession(&aws.Config{
|
||||
Credentials: credentials.NewStaticCredentials(policy.AccessKey, policy.SecretKey, ""),
|
||||
Endpoint: &policy.Server,
|
||||
Region: &policy.Settings.Region,
|
||||
S3ForcePathStyle: &policy.Settings.S3ForcePathStyle,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
driver.sess = sess
|
||||
driver.svc = s3.New(sess)
|
||||
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
//// List 列出给定路径下的文件
|
||||
//func (handler *Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) {
|
||||
// // 初始化列目录参数
|
||||
// base = strings.TrimPrefix(base, "/")
|
||||
// if base != "" {
|
||||
// base += "/"
|
||||
// }
|
||||
//
|
||||
// opt := &s3.ListObjectsInput{
|
||||
// Bucket: &handler.policy.BucketName,
|
||||
// Prefix: &base,
|
||||
// MaxKeys: aws.Int64(1000),
|
||||
// }
|
||||
//
|
||||
// // 是否为递归列出
|
||||
// if !recursive {
|
||||
// opt.Delimiter = aws.String("/")
|
||||
// }
|
||||
//
|
||||
// var (
|
||||
// objects []*s3.Object
|
||||
// commons []*s3.CommonPrefix
|
||||
// )
|
||||
//
|
||||
// for {
|
||||
// res, err := handler.svc.ListObjectsWithContext(ctx, opt)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// objects = append(objects, res.Contents...)
|
||||
// commons = append(commons, res.CommonPrefixes...)
|
||||
//
|
||||
// // 如果本次未列取完,则继续使用marker获取结果
|
||||
// if *res.IsTruncated {
|
||||
// opt.Marker = res.NextMarker
|
||||
// } else {
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // 处理列取结果
|
||||
// res := make([]response.Object, 0, len(objects)+len(commons))
|
||||
//
|
||||
// // 处理目录
|
||||
// for _, object := range commons {
|
||||
// rel, err := filepath.Rel(*opt.Prefix, *object.Prefix)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// res = append(res, response.Object{
|
||||
// Name: path.Base(*object.Prefix),
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Size: 0,
|
||||
// IsDir: true,
|
||||
// LastModify: time.Now(),
|
||||
// })
|
||||
// }
|
||||
// // 处理文件
|
||||
// for _, object := range objects {
|
||||
// rel, err := filepath.Rel(*opt.Prefix, *object.Key)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// res = append(res, response.Object{
|
||||
// Name: path.Base(*object.Key),
|
||||
// Source: *object.Key,
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Size: uint64(*object.Size),
|
||||
// IsDir: false,
|
||||
// LastModify: time.Now(),
|
||||
// })
|
||||
// }
|
||||
//
|
||||
// return res, nil
|
||||
//
|
||||
//}
|
||||
|
||||
// Open 打开文件
|
||||
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Put 将文件流保存到指定目录
|
||||
func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
||||
defer file.Close()
|
||||
|
||||
// 是否允许覆盖
|
||||
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
|
||||
if !overwrite {
|
||||
// Check for duplicated file
|
||||
if _, err := handler.Meta(ctx, file.Props.SavePath); err == nil {
|
||||
return fs.ErrFileExisted
|
||||
}
|
||||
}
|
||||
|
||||
uploader := s3manager.NewUploader(handler.sess, func(u *s3manager.Uploader) {
|
||||
u.PartSize = handler.chunkSize
|
||||
})
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
_, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &file.Props.SavePath,
|
||||
Body: io.LimitReader(file, file.Props.Size),
|
||||
ContentType: aws.String(mimeType),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete 删除一个或多个文件,
|
||||
// 返回未删除的文件,及遇到的最后一个错误
|
||||
func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
|
||||
failed := make([]string, 0, len(files))
|
||||
batchSize := handler.policy.Settings.S3DeleteBatchSize
|
||||
if batchSize == 0 {
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
|
||||
// The request can contain a list of up to 1000 keys that you want to delete.
|
||||
batchSize = 1000
|
||||
}
|
||||
|
||||
var lastErr error
|
||||
|
||||
groups := lo.Chunk(files, batchSize)
|
||||
for _, group := range groups {
|
||||
if len(group) == 1 {
|
||||
// Invoke single file delete API
|
||||
_, err := handler.svc.DeleteObjectWithContext(ctx, &s3.DeleteObjectInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &group[0],
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if aerr, ok := err.(awserr.Error); ok {
|
||||
// Ignore NoSuchKey error
|
||||
if aerr.Code() == s3.ErrCodeNoSuchKey {
|
||||
continue
|
||||
}
|
||||
}
|
||||
failed = append(failed, group[0])
|
||||
lastErr = err
|
||||
}
|
||||
} else {
|
||||
// Invoke batch delete API
|
||||
res, err := handler.svc.DeleteObjects(
|
||||
&s3.DeleteObjectsInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Delete: &s3.Delete{
|
||||
Objects: lo.Map(group, func(s string, i int) *s3.ObjectIdentifier {
|
||||
return &s3.ObjectIdentifier{Key: &s}
|
||||
}),
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
failed = append(failed, group...)
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
|
||||
for _, v := range res.Errors {
|
||||
handler.l.Debug("Failed to delete file: %s, Code:%s, Message:%s", v.Key, v.Code, v.Key)
|
||||
failed = append(failed, *v.Key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return failed, lastErr
|
||||
|
||||
}
|
||||
|
||||
// Thumb 获取文件缩略图
|
||||
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
return "", errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Source 获取外链URL
|
||||
func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
|
||||
var contentDescription *string
|
||||
if args.IsDownload {
|
||||
encodedFilename := url.PathEscape(args.DisplayName)
|
||||
contentDescription = aws.String(fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`,
|
||||
encodedFilename, encodedFilename))
|
||||
}
|
||||
|
||||
req, _ := handler.svc.GetObjectRequest(
|
||||
&s3.GetObjectInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: aws.String(e.Source()),
|
||||
ResponseContentDisposition: contentDescription,
|
||||
})
|
||||
|
||||
ttl := time.Duration(604800) * time.Second // 7 days
|
||||
if args.Expire != nil {
|
||||
ttl = time.Until(*args.Expire)
|
||||
}
|
||||
signedURL, err := req.Presign(ttl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 将最终生成的签名URL域名换成用户自定义的加速域名(如果有)
|
||||
finalURL, err := url.Parse(signedURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 公有空间替换掉Key及不支持的头
|
||||
if !handler.policy.IsPrivate {
|
||||
finalURL.RawQuery = ""
|
||||
}
|
||||
|
||||
return finalURL.String(), nil
|
||||
}
|
||||
|
||||
// Token 获取上传策略和认证Token
|
||||
func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
|
||||
// Check for duplicated file
|
||||
if _, err := handler.Meta(ctx, file.Props.SavePath); err == nil {
|
||||
return nil, fs.ErrFileExisted
|
||||
}
|
||||
|
||||
// 生成回调地址
|
||||
siteURL := handler.settings.SiteURL(setting.UseFirstSiteUrl(ctx))
|
||||
// 在从机端创建上传会话
|
||||
uploadSession.ChunkSize = handler.chunkSize
|
||||
uploadSession.Callback = routes.MasterSlaveCallbackUrl(siteURL, types.PolicyTypeS3, uploadSession.Props.UploadSessionID, uploadSession.CallbackSecret).String()
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
// 创建分片上传
|
||||
res, err := handler.svc.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &uploadSession.Props.SavePath,
|
||||
Expires: &uploadSession.Props.ExpireAt,
|
||||
ContentType: aws.String(mimeType),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create multipart upload: %w", err)
|
||||
}
|
||||
|
||||
uploadSession.UploadID = *res.UploadId
|
||||
|
||||
// 为每个分片签名上传 URL
|
||||
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{}, false, handler.l, "")
|
||||
urls := make([]string, chunks.Num())
|
||||
for chunks.Next() {
|
||||
err := chunks.Process(func(c *chunk.ChunkGroup, chunk io.Reader) error {
|
||||
signedReq, _ := handler.svc.UploadPartRequest(&s3.UploadPartInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &uploadSession.Props.SavePath,
|
||||
PartNumber: aws.Int64(int64(c.Index() + 1)),
|
||||
ContentLength: aws.Int64(c.Length()),
|
||||
UploadId: res.UploadId,
|
||||
})
|
||||
|
||||
signedURL, err := signedReq.Presign(time.Until(uploadSession.Props.ExpireAt))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
urls[c.Index()] = signedURL
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// 签名完成分片上传的请求URL
|
||||
signedReq, _ := handler.svc.CompleteMultipartUploadRequest(&s3.CompleteMultipartUploadInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &file.Props.SavePath,
|
||||
UploadId: res.UploadId,
|
||||
})
|
||||
|
||||
signedURL, err := signedReq.Presign(time.Until(uploadSession.Props.ExpireAt))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 生成上传凭证
|
||||
return &fs.UploadCredential{
|
||||
UploadID: *res.UploadId,
|
||||
UploadURLs: urls,
|
||||
CompleteURL: signedURL,
|
||||
SessionID: uploadSession.Props.UploadSessionID,
|
||||
ChunkSize: handler.chunkSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Meta 获取文件信息
|
||||
func (handler *Driver) Meta(ctx context.Context, path string) (*MetaData, error) {
|
||||
res, err := handler.svc.HeadObjectWithContext(ctx,
|
||||
&s3.HeadObjectInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &path,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &MetaData{
|
||||
Size: *res.ContentLength,
|
||||
Etag: *res.ETag,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
// CORS 创建跨域策略
|
||||
func (handler *Driver) CORS() error {
|
||||
rule := s3.CORSRule{
|
||||
AllowedMethods: aws.StringSlice([]string{
|
||||
"GET",
|
||||
"POST",
|
||||
"PUT",
|
||||
"DELETE",
|
||||
"HEAD",
|
||||
}),
|
||||
AllowedOrigins: aws.StringSlice([]string{"*"}),
|
||||
AllowedHeaders: aws.StringSlice([]string{"*"}),
|
||||
ExposeHeaders: aws.StringSlice([]string{"ETag"}),
|
||||
MaxAgeSeconds: aws.Int64(3600),
|
||||
}
|
||||
|
||||
_, err := handler.svc.PutBucketCors(&s3.PutBucketCorsInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
CORSConfiguration: &s3.CORSConfiguration{
|
||||
CORSRules: []*s3.CORSRule{&rule},
|
||||
},
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// 取消上传凭证
|
||||
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
|
||||
_, err := handler.svc.AbortMultipartUploadWithContext(ctx, &s3.AbortMultipartUploadInput{
|
||||
UploadId: &uploadSession.UploadID,
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &uploadSession.Props.SavePath,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (handler *Driver) cancelUpload(key, id *string) {
|
||||
if _, err := handler.svc.AbortMultipartUpload(&s3.AbortMultipartUploadInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
UploadId: id,
|
||||
Key: key,
|
||||
}); err != nil {
|
||||
handler.l.Warning("failed to abort multipart upload: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) Capabilities() *driver.Capabilities {
|
||||
return &driver.Capabilities{
|
||||
StaticFeatures: features,
|
||||
MediaMetaProxy: handler.policy.Settings.MediaMetaGeneratorProxy,
|
||||
ThumbProxy: handler.policy.Settings.ThumbGeneratorProxy,
|
||||
MaxSourceExpire: time.Duration(604800) * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (handler *Driver) LocalPath(ctx context.Context, path string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
|
||||
if session.SentinelTaskID == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make sure uploaded file size is correct
|
||||
res, err := handler.Meta(ctx, session.Props.SavePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get uploaded file size: %w", err)
|
||||
}
|
||||
|
||||
if res.Size != session.Props.Size {
|
||||
return serializer.NewError(
|
||||
serializer.CodeMetaMismatch,
|
||||
fmt.Sprintf("File size not match, expected: %d, actual: %d", session.Props.Size, res.Size),
|
||||
nil,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
func (r Reader) Read(p []byte) (int, error) {
|
||||
return r.r.Read(p)
|
||||
}
|
||||
154
pkg/filemanager/driver/upyun/media.go
Normal file
154
pkg/filemanager/driver/upyun/media.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package upyun
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/samber/lo"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
mediaInfoTTL = time.Duration(10) * time.Minute
|
||||
)
|
||||
|
||||
type (
|
||||
ImageInfo struct {
|
||||
Exif map[string]string `json:"EXIF"`
|
||||
}
|
||||
)
|
||||
|
||||
func (handler *Driver) extractImageMeta(ctx context.Context, path string) ([]driver.MediaMeta, error) {
|
||||
resp, err := handler.extractMediaInfo(ctx, path, "!/meta")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fmt.Println(resp)
|
||||
|
||||
var imageInfo ImageInfo
|
||||
if err := json.Unmarshal([]byte(resp), &imageInfo); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal image info: %w", err)
|
||||
}
|
||||
|
||||
metas := make([]driver.MediaMeta, 0, len(imageInfo.Exif))
|
||||
exifMap := lo.MapEntries(imageInfo.Exif, func(key string, value string) (string, string) {
|
||||
switch key {
|
||||
case "0xA434":
|
||||
key = "LensModel"
|
||||
}
|
||||
return key, value
|
||||
})
|
||||
metas = append(metas, mediameta.ExtractExifMap(exifMap, time.Time{})...)
|
||||
metas = append(metas, parseGpsInfo(imageInfo.Exif)...)
|
||||
|
||||
for i := 0; i < len(metas); i++ {
|
||||
metas[i].Type = driver.MetaTypeExif
|
||||
}
|
||||
|
||||
return metas, nil
|
||||
}
|
||||
|
||||
func (handler *Driver) extractMediaInfo(ctx context.Context, path string, param string) (string, error) {
|
||||
mediaInfoExpire := time.Now().Add(mediaInfoTTL)
|
||||
mediaInfoUrl, err := handler.signURL(ctx, path+param, nil, &mediaInfoExpire)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
resp, err := handler.httpClient.
|
||||
Request(http.MethodGet, mediaInfoUrl, nil, request.WithContext(ctx)).
|
||||
CheckHTTPResponse(http.StatusOK).
|
||||
GetResponseIgnoreErr()
|
||||
if err != nil {
|
||||
return "", unmarshalError(resp, err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func unmarshalError(resp string, err error) error {
|
||||
return fmt.Errorf("upyun error: %s", err)
|
||||
}
|
||||
|
||||
func parseGpsInfo(imageInfo map[string]string) []driver.MediaMeta {
|
||||
latitude := imageInfo["GPSLatitude"] // 31/1, 162680820/10000000, 0/1
|
||||
longitude := imageInfo["GPSLongitude"] // 120/1, 429103939/10000000, 0/1
|
||||
latRef := imageInfo["GPSLatitudeRef"] // N
|
||||
lonRef := imageInfo["GPSLongitudeRef"] // E
|
||||
|
||||
// Make sure all value exist in map
|
||||
if latitude == "" || longitude == "" || latRef == "" || lonRef == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
lat := parseRawGPS(latitude, latRef)
|
||||
lon := parseRawGPS(longitude, lonRef)
|
||||
if !math.IsNaN(lat) && !math.IsNaN(lon) {
|
||||
lat, lng := mediameta.NormalizeGPS(lat, lon)
|
||||
return []driver.MediaMeta{{
|
||||
Key: mediameta.GpsLat,
|
||||
Value: fmt.Sprintf("%f", lat),
|
||||
}, {
|
||||
Key: mediameta.GpsLng,
|
||||
Value: fmt.Sprintf("%f", lng),
|
||||
}}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseRawGPS(gpsStr string, ref string) float64 {
|
||||
elem := strings.Split(gpsStr, ",")
|
||||
if len(elem) < 1 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var (
|
||||
deg float64
|
||||
minutes float64
|
||||
seconds float64
|
||||
)
|
||||
|
||||
deg = getGpsElemValue(elem[0])
|
||||
if len(elem) >= 2 {
|
||||
minutes = getGpsElemValue(elem[1])
|
||||
}
|
||||
if len(elem) >= 3 {
|
||||
seconds = getGpsElemValue(elem[2])
|
||||
}
|
||||
|
||||
decimal := deg + minutes/60.0 + seconds/3600.0
|
||||
|
||||
if ref == "S" || ref == "W" {
|
||||
return -decimal
|
||||
}
|
||||
|
||||
return decimal
|
||||
}
|
||||
|
||||
func getGpsElemValue(elm string) float64 {
|
||||
elements := strings.Split(elm, "/")
|
||||
if len(elements) != 2 {
|
||||
return 0
|
||||
}
|
||||
|
||||
numerator, err := strconv.ParseFloat(elements[0], 64)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
denominator, err := strconv.ParseFloat(elements[1], 64)
|
||||
if err != nil || denominator == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
return numerator / denominator
|
||||
}
|
||||
382
pkg/filemanager/driver/upyun/upyun.go
Normal file
382
pkg/filemanager/driver/upyun/upyun.go
Normal file
@@ -0,0 +1,382 @@
|
||||
package upyun
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/upyun/go-sdk/upyun"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type (
|
||||
// UploadPolicy 又拍云上传策略
|
||||
UploadPolicy struct {
|
||||
Bucket string `json:"bucket"`
|
||||
SaveKey string `json:"save-key"`
|
||||
Expiration int64 `json:"expiration"`
|
||||
CallbackURL string `json:"notify-url"`
|
||||
ContentLength uint64 `json:"content-length"`
|
||||
ContentLengthRange string `json:"content-length-range,omitempty"`
|
||||
AllowFileType string `json:"allow-file-type,omitempty"`
|
||||
}
|
||||
// Driver 又拍云策略适配器
|
||||
Driver struct {
|
||||
policy *ent.StoragePolicy
|
||||
|
||||
up *upyun.UpYun
|
||||
settings setting.Provider
|
||||
l logging.Logger
|
||||
config conf.ConfigProvider
|
||||
mime mime.MimeDetector
|
||||
httpClient request.Client
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
features = &boolset.BooleanSet{}
|
||||
)
|
||||
|
||||
func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider,
|
||||
config conf.ConfigProvider, l logging.Logger, mime mime.MimeDetector) (*Driver, error) {
|
||||
driver := &Driver{
|
||||
policy: policy,
|
||||
settings: settings,
|
||||
config: config,
|
||||
l: l,
|
||||
mime: mime,
|
||||
httpClient: request.NewClient(config, request.WithLogger(l)),
|
||||
up: upyun.NewUpYun(&upyun.UpYunConfig{
|
||||
Bucket: policy.BucketName,
|
||||
Operator: policy.AccessKey,
|
||||
Password: policy.SecretKey,
|
||||
}),
|
||||
}
|
||||
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
//func (handler *Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) {
|
||||
// base = strings.TrimPrefix(base, "/")
|
||||
//
|
||||
// // 用于接受SDK返回对象的chan
|
||||
// objChan := make(chan *upyun.FileInfo)
|
||||
// objects := []*upyun.FileInfo{}
|
||||
//
|
||||
// // 列取配置
|
||||
// listConf := &upyun.GetObjectsConfig{
|
||||
// Path: "/" + base,
|
||||
// ObjectsChan: objChan,
|
||||
// MaxListTries: 1,
|
||||
// }
|
||||
// // 递归列取时不限制递归次数
|
||||
// if recursive {
|
||||
// listConf.MaxListLevel = -1
|
||||
// }
|
||||
//
|
||||
// // 启动一个goroutine收集列取的对象信
|
||||
// wg := &sync.WaitGroup{}
|
||||
// wg.Add(1)
|
||||
// go func(input chan *upyun.FileInfo, output *[]*upyun.FileInfo, wg *sync.WaitGroup) {
|
||||
// defer wg.Done()
|
||||
// for {
|
||||
// file, ok := <-input
|
||||
// if !ok {
|
||||
// return
|
||||
// }
|
||||
// *output = append(*output, file)
|
||||
// }
|
||||
// }(objChan, &objects, wg)
|
||||
//
|
||||
// up := upyun.NewUpYun(&upyun.UpYunConfig{
|
||||
// Bucket: handler.policy.BucketName,
|
||||
// Operator: handler.policy.AccessKey,
|
||||
// Password: handler.policy.SecretKey,
|
||||
// })
|
||||
//
|
||||
// err := up.List(listConf)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
//
|
||||
// wg.Wait()
|
||||
//
|
||||
// // 汇总处理列取结果
|
||||
// res := make([]response.Object, 0, len(objects))
|
||||
// for _, object := range objects {
|
||||
// res = append(res, response.Object{
|
||||
// Name: path.Base(object.Name),
|
||||
// RelativePath: object.Name,
|
||||
// Source: path.Join(base, object.Name),
|
||||
// Size: uint64(object.Size),
|
||||
// IsDir: object.IsDir,
|
||||
// LastModify: object.Time,
|
||||
// })
|
||||
// }
|
||||
//
|
||||
// return res, nil
|
||||
//}
|
||||
|
||||
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Put 将文件流保存到指定目录
|
||||
func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
||||
defer file.Close()
|
||||
|
||||
// 是否允许覆盖
|
||||
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
|
||||
if !overwrite {
|
||||
if _, err := handler.up.GetInfo(file.Props.SavePath); err == nil {
|
||||
return fs.ErrFileExisted
|
||||
}
|
||||
}
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
err := handler.up.Put(&upyun.PutObjectConfig{
|
||||
Path: file.Props.SavePath,
|
||||
Reader: file,
|
||||
Headers: map[string]string{
|
||||
"Content-Type": mimeType,
|
||||
},
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete 删除一个或多个文件,
|
||||
// 返回未删除的文件,及遇到的最后一个错误
|
||||
func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
|
||||
failed := make([]string, 0)
|
||||
var lastErr error
|
||||
|
||||
for _, file := range files {
|
||||
if err := handler.up.Delete(&upyun.DeleteObjectConfig{
|
||||
Path: file,
|
||||
Async: true,
|
||||
}); err != nil {
|
||||
filteredErr := strings.ReplaceAll(err.Error(), file, "")
|
||||
if strings.Contains(filteredErr, "Not found") ||
|
||||
strings.Contains(filteredErr, "NoSuchKey") {
|
||||
continue
|
||||
}
|
||||
|
||||
failed = append(failed, file)
|
||||
lastErr = err
|
||||
}
|
||||
}
|
||||
|
||||
return failed, lastErr
|
||||
}
|
||||
|
||||
// Thumb 获取文件缩略图
|
||||
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
w, h := handler.settings.ThumbSize(ctx)
|
||||
|
||||
thumbParam := fmt.Sprintf("!/fwfh/%dx%d", w, h)
|
||||
thumbURL, err := handler.signURL(ctx, e.Source()+thumbParam, nil, expire)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return thumbURL, nil
|
||||
}
|
||||
|
||||
// Source 获取外链URL
|
||||
func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
|
||||
query := url.Values{}
|
||||
|
||||
// 如果是下载文件URL
|
||||
if args.IsDownload {
|
||||
query.Add("_upd", args.DisplayName)
|
||||
}
|
||||
|
||||
return handler.signURL(ctx, e.Source(), &query, args.Expire)
|
||||
}
|
||||
|
||||
func (handler *Driver) signURL(ctx context.Context, path string, query *url.Values, expire *time.Time) (string, error) {
|
||||
sourceURL, err := url.Parse(handler.policy.Settings.ProxyServer)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fileKey, err := url.Parse(url.PathEscape(path))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
sourceURL = sourceURL.ResolveReference(fileKey)
|
||||
if query != nil {
|
||||
sourceURL.RawQuery = query.Encode()
|
||||
|
||||
}
|
||||
|
||||
if !handler.policy.IsPrivate {
|
||||
// 未开启Token防盗链时,直接返回
|
||||
return sourceURL.String(), nil
|
||||
}
|
||||
|
||||
etime := time.Now().Add(time.Duration(24) * time.Hour * 365 * 20).Unix()
|
||||
if expire != nil {
|
||||
etime = expire.Unix()
|
||||
}
|
||||
signStr := fmt.Sprintf(
|
||||
"%s&%d&%s",
|
||||
handler.policy.Settings.Token,
|
||||
etime,
|
||||
sourceURL.Path,
|
||||
)
|
||||
signMd5 := fmt.Sprintf("%x", md5.Sum([]byte(signStr)))
|
||||
finalSign := signMd5[12:20] + strconv.FormatInt(etime, 10)
|
||||
|
||||
// 将签名添加到URL中
|
||||
q := sourceURL.Query()
|
||||
q.Add("_upt", finalSign)
|
||||
sourceURL.RawQuery = q.Encode()
|
||||
|
||||
return sourceURL.String(), nil
|
||||
}
|
||||
|
||||
// Token 获取上传策略和认证Token
|
||||
func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
|
||||
if _, err := handler.up.GetInfo(file.Props.SavePath); err == nil {
|
||||
return nil, fs.ErrFileExisted
|
||||
}
|
||||
|
||||
// 生成回调地址
|
||||
siteURL := handler.settings.SiteURL(setting.UseFirstSiteUrl(ctx))
|
||||
apiUrl := routes.MasterSlaveCallbackUrl(siteURL, types.PolicyTypeUpyun, uploadSession.Props.UploadSessionID, uploadSession.CallbackSecret).String()
|
||||
|
||||
// 上传策略
|
||||
putPolicy := UploadPolicy{
|
||||
Bucket: handler.policy.BucketName,
|
||||
SaveKey: file.Props.SavePath,
|
||||
Expiration: uploadSession.Props.ExpireAt.Unix(),
|
||||
CallbackURL: apiUrl,
|
||||
ContentLength: uint64(file.Props.Size),
|
||||
ContentLengthRange: fmt.Sprintf("0,%d", file.Props.Size),
|
||||
}
|
||||
|
||||
// 生成上传凭证
|
||||
policyJSON, err := json.Marshal(putPolicy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
policyEncoded := base64.StdEncoding.EncodeToString(policyJSON)
|
||||
|
||||
// 生成签名
|
||||
elements := []string{"POST", "/" + handler.policy.BucketName, policyEncoded}
|
||||
signStr := sign(handler.policy.AccessKey, handler.policy.SecretKey, elements)
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
return &fs.UploadCredential{
|
||||
UploadPolicy: policyEncoded,
|
||||
UploadURLs: []string{"https://v0.api.upyun.com/" + handler.policy.BucketName},
|
||||
Credential: signStr,
|
||||
MimeType: mimeType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// 取消上传凭证
|
||||
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *Driver) Capabilities() *driver.Capabilities {
|
||||
mediaMetaExts := handler.policy.Settings.MediaMetaExts
|
||||
if !handler.policy.Settings.NativeMediaProcessing {
|
||||
mediaMetaExts = nil
|
||||
}
|
||||
return &driver.Capabilities{
|
||||
StaticFeatures: features,
|
||||
MediaMetaSupportedExts: mediaMetaExts,
|
||||
MediaMetaProxy: handler.policy.Settings.MediaMetaGeneratorProxy,
|
||||
ThumbSupportedExts: handler.policy.Settings.ThumbExts,
|
||||
ThumbProxy: handler.policy.Settings.ThumbGeneratorProxy,
|
||||
ThumbMaxSize: handler.policy.Settings.ThumbMaxSize,
|
||||
ThumbSupportAllExts: handler.policy.Settings.ThumbSupportAllExts,
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
return handler.extractImageMeta(ctx, path)
|
||||
}
|
||||
|
||||
func (handler *Driver) LocalPath(ctx context.Context, path string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func ValidateCallback(c *gin.Context, session *fs.UploadSession) error {
|
||||
body, err := io.ReadAll(c.Request.Body)
|
||||
c.Request.Body.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read request body: %w", err)
|
||||
}
|
||||
|
||||
c.Request.Body = io.NopCloser(bytes.NewReader(body))
|
||||
contentMD5 := c.Request.Header.Get("Content-Md5")
|
||||
date := c.Request.Header.Get("Date")
|
||||
actualSignature := c.Request.Header.Get("Authorization")
|
||||
actualContentMD5 := fmt.Sprintf("%x", md5.Sum(body))
|
||||
if actualContentMD5 != contentMD5 {
|
||||
return errors.New("MD5 mismatch")
|
||||
}
|
||||
|
||||
// Compare signature
|
||||
signature := sign(session.Policy.AccessKey, session.Policy.SecretKey, []string{
|
||||
"POST",
|
||||
c.Request.URL.Path,
|
||||
date,
|
||||
contentMD5,
|
||||
})
|
||||
if signature != actualSignature {
|
||||
return errors.New("Signature not match")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sign 计算又拍云的签名头
|
||||
func sign(ak, sk string, elements []string) string {
|
||||
password := fmt.Sprintf("%x", md5.Sum([]byte(sk)))
|
||||
mac := hmac.New(sha1.New, []byte(password))
|
||||
value := strings.Join(elements, "&")
|
||||
mac.Write([]byte(value))
|
||||
signStr := base64.StdEncoding.EncodeToString((mac.Sum(nil)))
|
||||
return fmt.Sprintf("UPYUN %s:%s", ak, signStr)
|
||||
}
|
||||
37
pkg/filemanager/driver/util.go
Normal file
37
pkg/filemanager/driver/util.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func ApplyProxyIfNeeded(policy *ent.StoragePolicy, srcUrl *url.URL) (*url.URL, error) {
|
||||
// For custom proxy, generate a new proxyed URL:
|
||||
// [Proxy Scheme][Proxy Host][Proxy Port][ProxyPath + OriginSrcPath][OriginSrcQuery + ProxyQuery]
|
||||
if policy.Settings.CustomProxy {
|
||||
proxy, err := url.Parse(policy.Settings.ProxyServer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse proxy URL: %w", err)
|
||||
}
|
||||
proxy.Path = path.Join(proxy.Path, strings.TrimPrefix(srcUrl.Path, "/"))
|
||||
q := proxy.Query()
|
||||
if len(q) == 0 {
|
||||
proxy.RawQuery = srcUrl.RawQuery
|
||||
} else {
|
||||
// Merge query parameters
|
||||
srcQ := srcUrl.Query()
|
||||
for k, _ := range srcQ {
|
||||
q.Set(k, srcQ.Get(k))
|
||||
}
|
||||
|
||||
proxy.RawQuery = q.Encode()
|
||||
}
|
||||
|
||||
srcUrl = proxy
|
||||
}
|
||||
|
||||
return srcUrl, nil
|
||||
}
|
||||
877
pkg/filemanager/fs/dbfs/dbfs.go
Normal file
877
pkg/filemanager/fs/dbfs/dbfs.go
Normal file
@@ -0,0 +1,877 @@
|
||||
package dbfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/constants"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/gofrs/uuid"
|
||||
"github.com/samber/lo"
|
||||
"golang.org/x/tools/container/intsets"
|
||||
)
|
||||
|
||||
const (
|
||||
ContextHintHeader = constants.CrHeaderPrefix + "Context-Hint"
|
||||
NavigatorStateCachePrefix = "navigator_state_"
|
||||
ContextHintTTL = 5 * 60 // 5 minutes
|
||||
|
||||
folderSummaryCachePrefix = "folder_summary_"
|
||||
)
|
||||
|
||||
type (
|
||||
ContextHintCtxKey struct{}
|
||||
ByPassOwnerCheckCtxKey struct{}
|
||||
)
|
||||
|
||||
func NewDatabaseFS(u *ent.User, fileClient inventory.FileClient, shareClient inventory.ShareClient,
|
||||
l logging.Logger, ls lock.LockSystem, settingClient setting.Provider,
|
||||
storagePolicyClient inventory.StoragePolicyClient, hasher hashid.Encoder, userClient inventory.UserClient,
|
||||
cache, stateKv cache.Driver) fs.FileSystem {
|
||||
return &DBFS{
|
||||
user: u,
|
||||
navigators: make(map[string]Navigator),
|
||||
fileClient: fileClient,
|
||||
shareClient: shareClient,
|
||||
l: l,
|
||||
ls: ls,
|
||||
settingClient: settingClient,
|
||||
storagePolicyClient: storagePolicyClient,
|
||||
hasher: hasher,
|
||||
userClient: userClient,
|
||||
cache: cache,
|
||||
stateKv: stateKv,
|
||||
}
|
||||
}
|
||||
|
||||
type DBFS struct {
|
||||
user *ent.User
|
||||
navigators map[string]Navigator
|
||||
fileClient inventory.FileClient
|
||||
userClient inventory.UserClient
|
||||
storagePolicyClient inventory.StoragePolicyClient
|
||||
shareClient inventory.ShareClient
|
||||
l logging.Logger
|
||||
ls lock.LockSystem
|
||||
settingClient setting.Provider
|
||||
hasher hashid.Encoder
|
||||
cache cache.Driver
|
||||
stateKv cache.Driver
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (f *DBFS) Recycle() {
|
||||
for _, navigator := range f.navigators {
|
||||
navigator.Recycle()
|
||||
}
|
||||
}
|
||||
|
||||
func (f *DBFS) GetEntity(ctx context.Context, entityID int) (fs.Entity, error) {
|
||||
if entityID == 0 {
|
||||
return fs.NewEmptyEntity(f.user), nil
|
||||
}
|
||||
|
||||
files, _, err := f.fileClient.GetEntitiesByIDs(ctx, []int{entityID}, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get entity: %w", err)
|
||||
}
|
||||
|
||||
if len(files) == 0 {
|
||||
return nil, fs.ErrEntityNotExist
|
||||
}
|
||||
|
||||
return fs.NewEntity(files[0]), nil
|
||||
|
||||
}
|
||||
|
||||
func (f *DBFS) List(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.File, *fs.ListFileResult, error) {
|
||||
o := newDbfsOption()
|
||||
for _, opt := range opts {
|
||||
o.apply(opt)
|
||||
}
|
||||
|
||||
// Get navigator
|
||||
navigator, err := f.getNavigator(ctx, path, NavigatorCapabilityListChildren)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
searchParams := path.SearchParameters()
|
||||
isSearching := searchParams != nil
|
||||
|
||||
// Validate pagination args
|
||||
props := navigator.Capabilities(isSearching)
|
||||
if o.PageSize > props.MaxPageSize {
|
||||
o.PageSize = props.MaxPageSize
|
||||
}
|
||||
|
||||
parent, err := f.getFileByPath(ctx, navigator, path)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Parent not exist: %w", err)
|
||||
}
|
||||
|
||||
var hintId *uuid.UUID
|
||||
if o.generateContextHint {
|
||||
newHintId := uuid.Must(uuid.NewV4())
|
||||
hintId = &newHintId
|
||||
}
|
||||
|
||||
if o.loadFilePublicMetadata {
|
||||
ctx = context.WithValue(ctx, inventory.LoadFilePublicMetadata{}, true)
|
||||
}
|
||||
if o.loadFileShareIfOwned && parent != nil && parent.OwnerID() == f.user.ID {
|
||||
ctx = context.WithValue(ctx, inventory.LoadFileShare{}, true)
|
||||
}
|
||||
|
||||
var streamCallback func([]*File)
|
||||
if o.streamListResponseCallback != nil {
|
||||
streamCallback = func(files []*File) {
|
||||
o.streamListResponseCallback(parent, lo.Map(files, func(item *File, index int) fs.File {
|
||||
return item
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
children, err := navigator.Children(ctx, parent, &ListArgs{
|
||||
Page: &inventory.PaginationArgs{
|
||||
Page: o.FsOption.Page,
|
||||
PageSize: o.PageSize,
|
||||
OrderBy: o.OrderBy,
|
||||
Order: inventory.OrderDirection(o.OrderDirection),
|
||||
UseCursorPagination: o.useCursorPagination,
|
||||
PageToken: o.pageToken,
|
||||
},
|
||||
Search: searchParams,
|
||||
StreamCallback: streamCallback,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get children: %w", err)
|
||||
}
|
||||
|
||||
var storagePolicy *ent.StoragePolicy
|
||||
if parent != nil {
|
||||
storagePolicy, err = f.getPreferredPolicy(ctx, parent)
|
||||
if err != nil {
|
||||
f.l.Warning("Failed to get preferred policy: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return parent, &fs.ListFileResult{
|
||||
Files: lo.Map(children.Files, func(item *File, index int) fs.File {
|
||||
return item
|
||||
}),
|
||||
Props: props,
|
||||
Pagination: children.Pagination,
|
||||
ContextHint: hintId,
|
||||
RecursionLimitReached: children.RecursionLimitReached,
|
||||
MixedType: children.MixedType,
|
||||
SingleFileView: children.SingleFileView,
|
||||
Parent: parent,
|
||||
StoragePolicy: storagePolicy,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *DBFS) Capacity(ctx context.Context, u *ent.User) (*fs.Capacity, error) {
|
||||
// First, get user's available storage packs
|
||||
var (
|
||||
res = &fs.Capacity{}
|
||||
)
|
||||
|
||||
requesterGroup, err := u.Edges.GroupOrErr()
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to get user's group", err)
|
||||
}
|
||||
|
||||
res.Used = f.user.Storage
|
||||
res.Total = requesterGroup.MaxStorage
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (f *DBFS) CreateEntity(ctx context.Context, file fs.File, policy *ent.StoragePolicy,
|
||||
entityType types.EntityType, req *fs.UploadRequest, opts ...fs.Option) (fs.Entity, error) {
|
||||
o := newDbfsOption()
|
||||
for _, opt := range opts {
|
||||
o.apply(opt)
|
||||
}
|
||||
|
||||
// If uploader specified previous latest version ID (etag), we should check if it's still valid.
|
||||
if o.previousVersion != "" {
|
||||
entityId, err := f.hasher.Decode(o.previousVersion, hashid.EntityID)
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeParamErr, "Unknown version ID", err)
|
||||
}
|
||||
|
||||
entities, err := file.(*File).Model.Edges.EntitiesOrErr()
|
||||
if err != nil || entities == nil {
|
||||
return nil, fmt.Errorf("create entity: previous entities not load")
|
||||
}
|
||||
|
||||
// File is stale during edit if the latest entity is not the same as the one specified by uploader.
|
||||
if e := file.PrimaryEntity(); e == nil || e.ID() != entityId {
|
||||
return nil, fs.ErrStaleVersion
|
||||
}
|
||||
}
|
||||
|
||||
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
|
||||
}
|
||||
|
||||
fileModel := file.(*File).Model
|
||||
if o.removeStaleEntities {
|
||||
storageDiff, err := fc.RemoveStaleEntities(ctx, fileModel)
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to remove stale entities", err)
|
||||
}
|
||||
|
||||
tx.AppendStorageDiff(storageDiff)
|
||||
}
|
||||
|
||||
entity, storageDiff, err := fc.CreateEntity(ctx, fileModel, &inventory.EntityParameters{
|
||||
OwnerID: file.(*File).Owner().ID,
|
||||
EntityType: entityType,
|
||||
StoragePolicyID: policy.ID,
|
||||
Source: req.Props.SavePath,
|
||||
Size: req.Props.Size,
|
||||
UploadSessionID: uuid.FromStringOrNil(o.UploadRequest.Props.UploadSessionID),
|
||||
})
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to create entity", err)
|
||||
}
|
||||
tx.AppendStorageDiff(storageDiff)
|
||||
|
||||
if err := inventory.CommitWithStorageDiff(ctx, tx, f.l, f.userClient); err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit create change", err)
|
||||
}
|
||||
|
||||
return fs.NewEntity(entity), nil
|
||||
}
|
||||
|
||||
func (f *DBFS) PatchMetadata(ctx context.Context, path []*fs.URI, metas ...fs.MetadataPatch) error {
|
||||
ae := serializer.NewAggregateError()
|
||||
targets := make([]*File, 0, len(path))
|
||||
for _, p := range path {
|
||||
navigator, err := f.getNavigator(ctx, p, NavigatorCapabilityUpdateMetadata, NavigatorCapabilityLockFile)
|
||||
if err != nil {
|
||||
ae.Add(p.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
target, err := f.getFileByPath(ctx, navigator, p)
|
||||
if err != nil {
|
||||
ae.Add(p.String(), fmt.Errorf("failed to get target file: %w", err))
|
||||
continue
|
||||
}
|
||||
|
||||
// Require Update permission
|
||||
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && target.OwnerID() != f.user.ID {
|
||||
return fs.ErrOwnerOnly.WithError(fmt.Errorf("permission denied"))
|
||||
}
|
||||
|
||||
if target.IsRootFolder() {
|
||||
ae.Add(p.String(), fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot move root folder")))
|
||||
continue
|
||||
}
|
||||
|
||||
targets = append(targets, target)
|
||||
}
|
||||
|
||||
if len(targets) == 0 {
|
||||
return ae.Aggregate()
|
||||
}
|
||||
|
||||
// Lock all targets
|
||||
lockTargets := lo.Map(targets, func(value *File, key int) *LockByPath {
|
||||
return &LockByPath{value.Uri(true), value, value.Type(), ""}
|
||||
})
|
||||
ls, err := f.acquireByPath(ctx, -1, f.user, true, fs.LockApp(fs.ApplicationUpdateMetadata), lockTargets...)
|
||||
defer func() { _ = f.Release(ctx, ls) }()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
metadataMap := make(map[string]string)
|
||||
privateMap := make(map[string]bool)
|
||||
deleted := make([]string, 0)
|
||||
for _, meta := range metas {
|
||||
if meta.Remove {
|
||||
deleted = append(deleted, meta.Key)
|
||||
continue
|
||||
}
|
||||
metadataMap[meta.Key] = meta.Value
|
||||
if meta.Private {
|
||||
privateMap[meta.Key] = meta.Private
|
||||
}
|
||||
}
|
||||
|
||||
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
|
||||
if err != nil {
|
||||
return serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
|
||||
}
|
||||
|
||||
for _, target := range targets {
|
||||
if err := fc.UpsertMetadata(ctx, target.Model, metadataMap, privateMap); err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
return fmt.Errorf("failed to upsert metadata: %w", err)
|
||||
}
|
||||
|
||||
if len(deleted) > 0 {
|
||||
if err := fc.RemoveMetadata(ctx, target.Model, deleted...); err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
return fmt.Errorf("failed to remove metadata: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := inventory.Commit(tx); err != nil {
|
||||
return serializer.NewError(serializer.CodeDBError, "Failed to commit metadata change", err)
|
||||
}
|
||||
|
||||
return ae.Aggregate()
|
||||
}
|
||||
|
||||
func (f *DBFS) SharedAddressTranslation(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.File, *fs.URI, error) {
|
||||
o := newDbfsOption()
|
||||
for _, opt := range opts {
|
||||
o.apply(opt)
|
||||
}
|
||||
|
||||
// Get navigator
|
||||
navigator, err := f.getNavigator(ctx, path, o.requiredCapabilities...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ctx = context.WithValue(ctx, inventory.LoadFilePublicMetadata{}, true)
|
||||
if o.loadFileEntities {
|
||||
ctx = context.WithValue(ctx, inventory.LoadFileEntity{}, true)
|
||||
}
|
||||
|
||||
uriTranslation := func(target *File, rebase bool) (fs.File, *fs.URI, error) {
|
||||
// Translate shared address to real address
|
||||
metadata := target.Metadata()
|
||||
if metadata == nil {
|
||||
if err := f.fileClient.QueryMetadata(ctx, target.Model); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to query metadata: %w", err)
|
||||
}
|
||||
metadata = target.Metadata()
|
||||
}
|
||||
redirect, ok := metadata[MetadataSharedRedirect]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("missing metadata %s in symbolic folder %s", MetadataSharedRedirect, path)
|
||||
}
|
||||
|
||||
redirectUri, err := fs.NewUriFromString(redirect)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("invalid redirect uri %s in symbolic folder %s", redirect, path)
|
||||
}
|
||||
newUri := redirectUri
|
||||
if rebase {
|
||||
newUri = redirectUri.Rebase(path, target.Uri(false))
|
||||
}
|
||||
return f.SharedAddressTranslation(ctx, newUri, opts...)
|
||||
}
|
||||
|
||||
target, err := f.getFileByPath(ctx, navigator, path)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrSymbolicFolderFound) && target.Type() == types.FileTypeFolder {
|
||||
return uriTranslation(target, true)
|
||||
}
|
||||
|
||||
if !ent.IsNotFound(err) {
|
||||
return nil, nil, fmt.Errorf("failed to get target file: %w", err)
|
||||
}
|
||||
|
||||
// Request URI does not exist, return most recent ancestor
|
||||
return target, path, err
|
||||
}
|
||||
|
||||
if target.IsSymbolic() {
|
||||
return uriTranslation(target, false)
|
||||
}
|
||||
|
||||
return target, path, nil
|
||||
}
|
||||
|
||||
func (f *DBFS) Get(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.File, error) {
|
||||
o := newDbfsOption()
|
||||
for _, opt := range opts {
|
||||
o.apply(opt)
|
||||
}
|
||||
|
||||
// Get navigator
|
||||
navigator, err := f.getNavigator(ctx, path, o.requiredCapabilities...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if o.loadFilePublicMetadata || o.extendedInfo {
|
||||
ctx = context.WithValue(ctx, inventory.LoadFilePublicMetadata{}, true)
|
||||
}
|
||||
|
||||
if o.loadFileEntities || o.extendedInfo || o.loadFolderSummary {
|
||||
ctx = context.WithValue(ctx, inventory.LoadFileEntity{}, true)
|
||||
}
|
||||
|
||||
if o.loadFileShareIfOwned {
|
||||
ctx = context.WithValue(ctx, inventory.LoadFileShare{}, true)
|
||||
}
|
||||
|
||||
if o.loadEntityUser {
|
||||
ctx = context.WithValue(ctx, inventory.LoadEntityUser{}, true)
|
||||
}
|
||||
|
||||
// Get target file
|
||||
target, err := f.getFileByPath(ctx, navigator, path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get target file: %w", err)
|
||||
}
|
||||
|
||||
if o.extendedInfo && target != nil {
|
||||
extendedInfo := &fs.FileExtendedInfo{
|
||||
StorageUsed: target.SizeUsed(),
|
||||
EntityStoragePolicies: make(map[int]*ent.StoragePolicy),
|
||||
}
|
||||
policyID := target.PolicyID()
|
||||
if policyID > 0 {
|
||||
policy, err := f.storagePolicyClient.GetPolicyByID(ctx, policyID)
|
||||
if err == nil {
|
||||
extendedInfo.StoragePolicy = policy
|
||||
}
|
||||
}
|
||||
|
||||
target.FileExtendedInfo = extendedInfo
|
||||
if target.OwnerID() == f.user.ID || f.user.Edges.Group.Permissions.Enabled(int(types.GroupPermissionIsAdmin)) {
|
||||
target.FileExtendedInfo.Shares = target.Model.Edges.Shares
|
||||
}
|
||||
|
||||
entities := target.Entities()
|
||||
for _, entity := range entities {
|
||||
if _, ok := extendedInfo.EntityStoragePolicies[entity.PolicyID()]; !ok {
|
||||
policy, err := f.storagePolicyClient.GetPolicyByID(ctx, entity.PolicyID())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get policy: %w", err)
|
||||
}
|
||||
|
||||
extendedInfo.EntityStoragePolicies[entity.PolicyID()] = policy
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate folder summary if requested
|
||||
if o.loadFolderSummary && target != nil && target.Type() == types.FileTypeFolder {
|
||||
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && target.OwnerID() != f.user.ID {
|
||||
return nil, fs.ErrOwnerOnly
|
||||
}
|
||||
|
||||
// first, try to load from cache
|
||||
summary, ok := f.cache.Get(fmt.Sprintf("%s%d", folderSummaryCachePrefix, target.ID()))
|
||||
if ok {
|
||||
summaryTyped := summary.(fs.FolderSummary)
|
||||
target.FileFolderSummary = &summaryTyped
|
||||
} else {
|
||||
// cache miss, walk the folder to get the summary
|
||||
newSummary := &fs.FolderSummary{Completed: true}
|
||||
if f.user.Edges.Group == nil {
|
||||
return nil, fmt.Errorf("user group not loaded")
|
||||
}
|
||||
limit := max(f.user.Edges.Group.Settings.MaxWalkedFiles, 1)
|
||||
|
||||
// disable load metadata to speed up
|
||||
ctxWalk := context.WithValue(ctx, inventory.LoadFilePublicMetadata{}, false)
|
||||
if err := navigator.Walk(ctxWalk, []*File{target}, limit, intsets.MaxInt, func(files []*File, l int) error {
|
||||
for _, file := range files {
|
||||
if file.ID() == target.ID() {
|
||||
continue
|
||||
}
|
||||
if file.Type() == types.FileTypeFile {
|
||||
newSummary.Files++
|
||||
} else {
|
||||
newSummary.Folders++
|
||||
}
|
||||
|
||||
newSummary.Size += file.SizeUsed()
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
if !errors.Is(err, ErrFileCountLimitedReached) {
|
||||
return nil, fmt.Errorf("failed to walk: %w", err)
|
||||
}
|
||||
|
||||
newSummary.Completed = false
|
||||
}
|
||||
|
||||
// cache the summary
|
||||
newSummary.CalculatedAt = time.Now()
|
||||
f.cache.Set(fmt.Sprintf("%s%d", folderSummaryCachePrefix, target.ID()), newSummary, f.settingClient.FolderPropsCacheTTL(ctx))
|
||||
target.FileFolderSummary = newSummary
|
||||
}
|
||||
}
|
||||
|
||||
if target == nil {
|
||||
return nil, fmt.Errorf("cannot get root file with nil root")
|
||||
}
|
||||
|
||||
return target, nil
|
||||
}
|
||||
|
||||
func (f *DBFS) CheckCapability(ctx context.Context, uri *fs.URI, opts ...fs.Option) error {
|
||||
o := newDbfsOption()
|
||||
for _, opt := range opts {
|
||||
o.apply(opt)
|
||||
}
|
||||
|
||||
// Get navigator
|
||||
_, err := f.getNavigator(ctx, uri, o.requiredCapabilities...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *DBFS) Walk(ctx context.Context, path *fs.URI, depth int, walk fs.WalkFunc, opts ...fs.Option) error {
|
||||
o := newDbfsOption()
|
||||
for _, opt := range opts {
|
||||
o.apply(opt)
|
||||
}
|
||||
|
||||
if o.loadFilePublicMetadata {
|
||||
ctx = context.WithValue(ctx, inventory.LoadFilePublicMetadata{}, true)
|
||||
}
|
||||
|
||||
if o.loadFileEntities {
|
||||
ctx = context.WithValue(ctx, inventory.LoadFileEntity{}, true)
|
||||
}
|
||||
|
||||
// Get navigator
|
||||
navigator, err := f.getNavigator(ctx, path, o.requiredCapabilities...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
target, err := f.getFileByPath(ctx, navigator, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Require Read permission
|
||||
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && target.OwnerID() != f.user.ID {
|
||||
return fs.ErrOwnerOnly
|
||||
}
|
||||
|
||||
// Walk
|
||||
if f.user.Edges.Group == nil {
|
||||
return fmt.Errorf("user group not loaded")
|
||||
}
|
||||
limit := max(f.user.Edges.Group.Settings.MaxWalkedFiles, 1)
|
||||
|
||||
if err := navigator.Walk(ctx, []*File{target}, limit, depth, func(files []*File, l int) error {
|
||||
for _, file := range files {
|
||||
if err := walk(file, l); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to walk: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *DBFS) ExecuteNavigatorHooks(ctx context.Context, hookType fs.HookType, file fs.File) error {
|
||||
navigator, err := f.getNavigator(ctx, file.Uri(false))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if dbfsFile, ok := file.(*File); ok {
|
||||
return navigator.ExecuteHook(ctx, hookType, dbfsFile)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createFile creates a file with given name and type under given parent folder
|
||||
func (f *DBFS) createFile(ctx context.Context, parent *File, name string, fileType types.FileType, o *dbfsOption) (*File, error) {
|
||||
createFileArgs := &inventory.CreateFileParameters{
|
||||
FileType: fileType,
|
||||
Name: name,
|
||||
MetadataPrivateMask: make(map[string]bool),
|
||||
Metadata: make(map[string]string),
|
||||
IsSymbolic: o.isSymbolicLink,
|
||||
}
|
||||
|
||||
if o.Metadata != nil {
|
||||
for k, v := range o.Metadata {
|
||||
createFileArgs.Metadata[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if o.preferredStoragePolicy != nil {
|
||||
createFileArgs.StoragePolicyID = o.preferredStoragePolicy.ID
|
||||
} else {
|
||||
// get preferred storage policy
|
||||
policy, err := f.getPreferredPolicy(ctx, parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
createFileArgs.StoragePolicyID = policy.ID
|
||||
}
|
||||
|
||||
if o.UploadRequest != nil {
|
||||
createFileArgs.EntityParameters = &inventory.EntityParameters{
|
||||
EntityType: types.EntityTypeVersion,
|
||||
Source: o.UploadRequest.Props.SavePath,
|
||||
Size: o.UploadRequest.Props.Size,
|
||||
ModifiedAt: o.UploadRequest.Props.LastModified,
|
||||
UploadSessionID: uuid.FromStringOrNil(o.UploadRequest.Props.UploadSessionID),
|
||||
}
|
||||
}
|
||||
|
||||
// Start transaction to create files
|
||||
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
|
||||
}
|
||||
|
||||
file, entity, storageDiff, err := fc.CreateFile(ctx, parent.Model, createFileArgs)
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
if ent.IsConstraintError(err) {
|
||||
return nil, fs.ErrFileExisted.WithError(err)
|
||||
}
|
||||
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to create file", err)
|
||||
}
|
||||
|
||||
tx.AppendStorageDiff(storageDiff)
|
||||
if err := inventory.CommitWithStorageDiff(ctx, tx, f.l, f.userClient); err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit create change", err)
|
||||
}
|
||||
|
||||
file.SetEntities([]*ent.Entity{entity})
|
||||
return newFile(parent, file), nil
|
||||
}
|
||||
|
||||
// getPreferredPolicy tries to get the preferred storage policy for the given file.
|
||||
func (f *DBFS) getPreferredPolicy(ctx context.Context, file *File) (*ent.StoragePolicy, error) {
|
||||
ownerGroup := file.Owner().Edges.Group
|
||||
if ownerGroup == nil {
|
||||
return nil, fmt.Errorf("owner group not loaded")
|
||||
}
|
||||
|
||||
groupPolicy, err := f.storagePolicyClient.GetByGroup(ctx, ownerGroup)
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to get available storage policies", err)
|
||||
}
|
||||
|
||||
return groupPolicy, nil
|
||||
}
|
||||
|
||||
func (f *DBFS) getFileByPath(ctx context.Context, navigator Navigator, path *fs.URI) (*File, error) {
|
||||
file, err := navigator.To(ctx, path)
|
||||
if err != nil && errors.Is(err, ErrFsNotInitialized) {
|
||||
// Initialize file system for user if root folder does not exist.
|
||||
uid := path.ID(hashid.EncodeUserID(f.hasher, f.user.ID))
|
||||
uidInt, err := f.hasher.Decode(uid, hashid.UserID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode user ID: %w", err)
|
||||
}
|
||||
|
||||
if err := f.initFs(ctx, uidInt); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize file system: %w", err)
|
||||
}
|
||||
return navigator.To(ctx, path)
|
||||
}
|
||||
|
||||
return file, err
|
||||
}
|
||||
|
||||
// initFs initializes the file system for the user.
|
||||
func (f *DBFS) initFs(ctx context.Context, uid int) error {
|
||||
f.l.Info("Initialize database file system for user %q", f.user.Email)
|
||||
_, err := f.fileClient.CreateFolder(ctx, nil,
|
||||
&inventory.CreateFolderParameters{
|
||||
Owner: uid,
|
||||
Name: inventory.RootFolderName,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create root folder: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *DBFS) getNavigator(ctx context.Context, path *fs.URI, requiredCapabilities ...NavigatorCapability) (Navigator, error) {
|
||||
pathFs := path.FileSystem()
|
||||
config := f.settingClient.DBFS(ctx)
|
||||
navigatorId := f.navigatorId(path)
|
||||
var (
|
||||
res Navigator
|
||||
)
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
if navigator, ok := f.navigators[navigatorId]; ok {
|
||||
res = navigator
|
||||
} else {
|
||||
var n Navigator
|
||||
switch pathFs {
|
||||
case constants.FileSystemMy:
|
||||
n = NewMyNavigator(f.user, f.fileClient, f.userClient, f.l, config, f.hasher)
|
||||
case constants.FileSystemShare:
|
||||
n = NewShareNavigator(f.user, f.fileClient, f.shareClient, f.l, config, f.hasher)
|
||||
case constants.FileSystemTrash:
|
||||
n = NewTrashNavigator(f.user, f.fileClient, f.l, config, f.hasher)
|
||||
case constants.FileSystemSharedWithMe:
|
||||
n = NewSharedWithMeNavigator(f.user, f.fileClient, f.l, config, f.hasher)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown file system %q", pathFs)
|
||||
}
|
||||
|
||||
// retrieve state if context hint is provided
|
||||
if stateID, ok := ctx.Value(ContextHintCtxKey{}).(uuid.UUID); ok && stateID != uuid.Nil {
|
||||
cacheKey := NavigatorStateCachePrefix + stateID.String() + "_" + navigatorId
|
||||
if stateRaw, ok := f.stateKv.Get(cacheKey); ok {
|
||||
if err := n.RestoreState(stateRaw.(State)); err != nil {
|
||||
f.l.Warning("Failed to restore state for navigator %q: %s", navigatorId, err)
|
||||
} else {
|
||||
f.l.Info("Navigator %q restored state (%q) successfully", navigatorId, stateID)
|
||||
}
|
||||
} else {
|
||||
// State expire, refresh it
|
||||
n.PersistState(f.stateKv, cacheKey)
|
||||
}
|
||||
}
|
||||
|
||||
f.navigators[navigatorId] = n
|
||||
res = n
|
||||
}
|
||||
|
||||
// Check fs capabilities
|
||||
capabilities := res.Capabilities(false).Capability
|
||||
for _, capability := range requiredCapabilities {
|
||||
if !capabilities.Enabled(int(capability)) {
|
||||
return nil, fs.ErrNotSupportedAction.WithError(fmt.Errorf("action %q is not supported under current fs", capability))
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (f *DBFS) navigatorId(path *fs.URI) string {
|
||||
uidHashed := hashid.EncodeUserID(f.hasher, f.user.ID)
|
||||
switch path.FileSystem() {
|
||||
case constants.FileSystemMy:
|
||||
return fmt.Sprintf("%s/%s/%d", constants.FileSystemMy, path.ID(uidHashed), f.user.ID)
|
||||
case constants.FileSystemShare:
|
||||
return fmt.Sprintf("%s/%s/%d", constants.FileSystemShare, path.ID(uidHashed), f.user.ID)
|
||||
case constants.FileSystemTrash:
|
||||
return fmt.Sprintf("%s/%s", constants.FileSystemTrash, path.ID(uidHashed))
|
||||
default:
|
||||
return fmt.Sprintf("%s/%s/%d", path.FileSystem(), path.ID(uidHashed), f.user.ID)
|
||||
}
|
||||
}
|
||||
|
||||
// generateSavePath generates the physical save path for the upload request.
|
||||
func generateSavePath(policy *ent.StoragePolicy, req *fs.UploadRequest, user *ent.User) string {
|
||||
baseTable := map[string]string{
|
||||
"{randomkey16}": util.RandStringRunes(16),
|
||||
"{randomkey8}": util.RandStringRunes(8),
|
||||
"{timestamp}": strconv.FormatInt(time.Now().Unix(), 10),
|
||||
"{timestamp_nano}": strconv.FormatInt(time.Now().UnixNano(), 10),
|
||||
"{randomnum2}": strconv.Itoa(rand.Intn(2)),
|
||||
"{randomnum3}": strconv.Itoa(rand.Intn(3)),
|
||||
"{randomnum4}": strconv.Itoa(rand.Intn(4)),
|
||||
"{randomnum8}": strconv.Itoa(rand.Intn(8)),
|
||||
"{uid}": strconv.Itoa(user.ID),
|
||||
"{datetime}": time.Now().Format("20060102150405"),
|
||||
"{date}": time.Now().Format("20060102"),
|
||||
"{year}": time.Now().Format("2006"),
|
||||
"{month}": time.Now().Format("01"),
|
||||
"{day}": time.Now().Format("02"),
|
||||
"{hour}": time.Now().Format("15"),
|
||||
"{minute}": time.Now().Format("04"),
|
||||
"{second}": time.Now().Format("05"),
|
||||
}
|
||||
|
||||
dirRule := policy.DirNameRule
|
||||
dirRule = filepath.ToSlash(dirRule)
|
||||
dirRule = util.Replace(baseTable, dirRule)
|
||||
dirRule = util.Replace(map[string]string{
|
||||
"{path}": req.Props.Uri.Dir() + fs.Separator,
|
||||
}, dirRule)
|
||||
|
||||
originName := req.Props.Uri.Name()
|
||||
nameTable := map[string]string{
|
||||
"{originname}": originName,
|
||||
"{ext}": filepath.Ext(originName),
|
||||
"{originname_without_ext}": strings.TrimSuffix(originName, filepath.Ext(originName)),
|
||||
"{uuid}": uuid.Must(uuid.NewV4()).String(),
|
||||
}
|
||||
|
||||
nameRule := policy.FileNameRule
|
||||
nameRule = util.Replace(baseTable, nameRule)
|
||||
nameRule = util.Replace(nameTable, nameRule)
|
||||
|
||||
return path.Join(path.Clean(dirRule), nameRule)
|
||||
}
|
||||
|
||||
func canMoveOrCopyTo(src, dst *fs.URI, isCopy bool) bool {
|
||||
if isCopy {
|
||||
return src.FileSystem() == dst.FileSystem() && src.FileSystem() == constants.FileSystemMy
|
||||
} else {
|
||||
switch src.FileSystem() {
|
||||
case constants.FileSystemMy:
|
||||
return dst.FileSystem() == constants.FileSystemMy || dst.FileSystem() == constants.FileSystemTrash
|
||||
case constants.FileSystemTrash:
|
||||
return dst.FileSystem() == constants.FileSystemMy
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func allAncestors(targets []*File) []*ent.File {
|
||||
return lo.Map(
|
||||
lo.UniqBy(
|
||||
lo.FlatMap(targets, func(value *File, index int) []*File {
|
||||
return value.Ancestors()
|
||||
}),
|
||||
func(item *File) int {
|
||||
return item.ID()
|
||||
},
|
||||
),
|
||||
func(item *File, index int) *ent.File {
|
||||
return item.Model
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func WithBypassOwnerCheck(ctx context.Context) context.Context {
|
||||
return context.WithValue(ctx, ByPassOwnerCheckCtxKey{}, true)
|
||||
}
|
||||
335
pkg/filemanager/fs/dbfs/file.go
Normal file
335
pkg/filemanager/fs/dbfs/file.go
Normal file
@@ -0,0 +1,335 @@
|
||||
package dbfs
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
func init() {
|
||||
gob.Register(File{})
|
||||
gob.Register(shareNavigatorState{})
|
||||
gob.Register(map[string]*File{})
|
||||
gob.Register(map[int]*File{})
|
||||
}
|
||||
|
||||
var filePool = &sync.Pool{
|
||||
New: func() any {
|
||||
return &File{
|
||||
Children: make(map[string]*File),
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
type (
|
||||
File struct {
|
||||
Model *ent.File
|
||||
Children map[string]*File
|
||||
Parent *File
|
||||
Path [2]*fs.URI
|
||||
OwnerModel *ent.User
|
||||
IsUserRoot bool
|
||||
CapabilitiesBs *boolset.BooleanSet
|
||||
FileExtendedInfo *fs.FileExtendedInfo
|
||||
FileFolderSummary *fs.FolderSummary
|
||||
|
||||
mu *sync.Mutex
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
MetadataSysPrefix = "sys:"
|
||||
MetadataUploadSessionPrefix = MetadataSysPrefix + "upload_session"
|
||||
MetadataUploadSessionID = MetadataUploadSessionPrefix + "_id"
|
||||
MetadataSharedRedirect = MetadataSysPrefix + "shared_redirect"
|
||||
MetadataRestoreUri = MetadataSysPrefix + "restore_uri"
|
||||
MetadataExpectedCollectTime = MetadataSysPrefix + "expected_collect_time"
|
||||
|
||||
ThumbMetadataPrefix = "thumb:"
|
||||
ThumbDisabledKey = ThumbMetadataPrefix + "disabled"
|
||||
|
||||
pathIndexRoot = 0
|
||||
pathIndexUser = 1
|
||||
)
|
||||
|
||||
func (f *File) Name() string {
|
||||
return f.Model.Name
|
||||
}
|
||||
|
||||
func (f *File) IsNil() bool {
|
||||
return f == nil
|
||||
}
|
||||
|
||||
func (f *File) DisplayName() string {
|
||||
if uri, ok := f.Metadata()[MetadataRestoreUri]; ok {
|
||||
restoreUri, err := fs.NewUriFromString(uri)
|
||||
if err != nil {
|
||||
return f.Name()
|
||||
}
|
||||
|
||||
return path.Base(restoreUri.Path())
|
||||
}
|
||||
|
||||
return f.Name()
|
||||
}
|
||||
|
||||
func (f *File) CanHaveChildren() bool {
|
||||
return f.Type() == types.FileTypeFolder && !f.IsSymbolic()
|
||||
}
|
||||
|
||||
func (f *File) Ext() string {
|
||||
return util.Ext(f.Name())
|
||||
}
|
||||
|
||||
func (f *File) ID() int {
|
||||
return f.Model.ID
|
||||
}
|
||||
|
||||
func (f *File) IsSymbolic() bool {
|
||||
return f.Model.IsSymbolic
|
||||
}
|
||||
|
||||
func (f *File) Type() types.FileType {
|
||||
return types.FileType(f.Model.Type)
|
||||
}
|
||||
|
||||
func (f *File) Size() int64 {
|
||||
return f.Model.Size
|
||||
}
|
||||
|
||||
func (f *File) SizeUsed() int64 {
|
||||
return lo.SumBy(f.Entities(), func(item fs.Entity) int64 {
|
||||
return item.Size()
|
||||
})
|
||||
}
|
||||
|
||||
func (f *File) UpdatedAt() time.Time {
|
||||
return f.Model.UpdatedAt
|
||||
}
|
||||
|
||||
func (f *File) CreatedAt() time.Time {
|
||||
return f.Model.CreatedAt
|
||||
}
|
||||
|
||||
func (f *File) ExtendedInfo() *fs.FileExtendedInfo {
|
||||
return f.FileExtendedInfo
|
||||
}
|
||||
|
||||
func (f *File) Owner() *ent.User {
|
||||
parent := f
|
||||
for parent != nil {
|
||||
if parent.OwnerModel != nil {
|
||||
return parent.OwnerModel
|
||||
}
|
||||
parent = parent.Parent
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) OwnerID() int {
|
||||
return f.Model.OwnerID
|
||||
}
|
||||
|
||||
func (f *File) Shared() bool {
|
||||
return len(f.Model.Edges.Shares) > 0
|
||||
}
|
||||
|
||||
func (f *File) Metadata() map[string]string {
|
||||
if f.Model.Edges.Metadata == nil {
|
||||
return nil
|
||||
}
|
||||
return lo.Associate(f.Model.Edges.Metadata, func(item *ent.Metadata) (string, string) {
|
||||
return item.Name, item.Value
|
||||
})
|
||||
}
|
||||
|
||||
// Uri returns the URI of the file.
|
||||
// If isRoot is true, the URI will be returned from owner's view.
|
||||
// Otherwise, the URI will be returned from user's view.
|
||||
func (f *File) Uri(isRoot bool) *fs.URI {
|
||||
index := 1
|
||||
if isRoot {
|
||||
index = 0
|
||||
}
|
||||
if f.Path[index] != nil || f.Parent == nil {
|
||||
return f.Path[index]
|
||||
}
|
||||
|
||||
// Find the root file
|
||||
elements := make([]string, 0)
|
||||
parent := f
|
||||
for parent.Parent != nil && parent.Path[index] == nil {
|
||||
elements = append([]string{parent.Name()}, elements...)
|
||||
parent = parent.Parent
|
||||
}
|
||||
|
||||
if parent.Path[index] == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return parent.Path[index].Join(elements...)
|
||||
}
|
||||
|
||||
// UserRoot return the root file from user's view.
|
||||
func (f *File) UserRoot() *File {
|
||||
root := f
|
||||
for root != nil && !root.IsUserRoot {
|
||||
root = root.Parent
|
||||
}
|
||||
|
||||
return root
|
||||
}
|
||||
|
||||
// Root return the root file from owner's view.
|
||||
func (f *File) Root() *File {
|
||||
root := f
|
||||
for root.Parent != nil {
|
||||
root = root.Parent
|
||||
}
|
||||
|
||||
return root
|
||||
}
|
||||
|
||||
// RootUri return the URI of the user root file under owner's view.
|
||||
func (f *File) RootUri() *fs.URI {
|
||||
return f.UserRoot().Uri(true)
|
||||
}
|
||||
|
||||
func (f *File) Replace(model *ent.File) *File {
|
||||
f.mu.Lock()
|
||||
delete(f.Parent.Children, f.Model.Name)
|
||||
f.mu.Unlock()
|
||||
|
||||
defer f.Recycle()
|
||||
replaced := newFile(f.Parent, model)
|
||||
if f.IsRootFile() {
|
||||
// If target is a root file, the user path should remain the same.
|
||||
replaced.Path[pathIndexUser] = f.Path[pathIndexUser]
|
||||
}
|
||||
|
||||
return replaced
|
||||
}
|
||||
|
||||
// Ancestors return all ancestors of the file, until the owner root is reached.
|
||||
func (f *File) Ancestors() []*File {
|
||||
return f.AncestorsChain()[1:]
|
||||
}
|
||||
|
||||
// AncestorsChain return all ancestors of the file (including itself), until the owner root is reached.
|
||||
func (f *File) AncestorsChain() []*File {
|
||||
ancestors := make([]*File, 0)
|
||||
parent := f
|
||||
for parent != nil {
|
||||
ancestors = append(ancestors, parent)
|
||||
parent = parent.Parent
|
||||
}
|
||||
|
||||
return ancestors
|
||||
}
|
||||
|
||||
func (f *File) PolicyID() int {
|
||||
root := f
|
||||
return root.Model.StoragePolicyFiles
|
||||
}
|
||||
|
||||
// IsRootFolder return true if the file is the root folder under user's view.
|
||||
func (f *File) IsRootFolder() bool {
|
||||
return f.Type() == types.FileTypeFolder && f.IsRootFile()
|
||||
}
|
||||
|
||||
// IsRootFile return true if the file is the root file under user's view.
|
||||
func (f *File) IsRootFile() bool {
|
||||
uri := f.Uri(false)
|
||||
p := uri.Path()
|
||||
return f.Model.Name == inventory.RootFolderName || p == fs.Separator || p == ""
|
||||
}
|
||||
|
||||
func (f *File) Entities() []fs.Entity {
|
||||
return lo.Map(f.Model.Edges.Entities, func(item *ent.Entity, index int) fs.Entity {
|
||||
return fs.NewEntity(item)
|
||||
})
|
||||
}
|
||||
|
||||
func (f *File) PrimaryEntity() fs.Entity {
|
||||
primary, _ := lo.Find(f.Model.Edges.Entities, func(item *ent.Entity) bool {
|
||||
return item.Type == int(types.EntityTypeVersion) && item.ID == f.Model.PrimaryEntity
|
||||
})
|
||||
if primary != nil {
|
||||
return fs.NewEntity(primary)
|
||||
}
|
||||
|
||||
return fs.NewEmptyEntity(f.Owner())
|
||||
}
|
||||
|
||||
func (f *File) PrimaryEntityID() int {
|
||||
return f.Model.PrimaryEntity
|
||||
}
|
||||
|
||||
func (f *File) FolderSummary() *fs.FolderSummary {
|
||||
return f.FileFolderSummary
|
||||
}
|
||||
|
||||
func (f *File) Capabilities() *boolset.BooleanSet {
|
||||
return f.CapabilitiesBs
|
||||
}
|
||||
|
||||
func newFile(parent *File, model *ent.File) *File {
|
||||
f := filePool.Get().(*File)
|
||||
f.Model = model
|
||||
|
||||
if parent != nil {
|
||||
f.Parent = parent
|
||||
parent.mu.Lock()
|
||||
parent.Children[model.Name] = f
|
||||
if parent.Path[pathIndexUser] != nil {
|
||||
f.Path[pathIndexUser] = parent.Path[pathIndexUser].Join(model.Name)
|
||||
}
|
||||
|
||||
if parent.Path[pathIndexRoot] != nil {
|
||||
f.Path[pathIndexRoot] = parent.Path[pathIndexRoot].Join(model.Name)
|
||||
}
|
||||
|
||||
f.CapabilitiesBs = parent.CapabilitiesBs
|
||||
f.mu = parent.mu
|
||||
parent.mu.Unlock()
|
||||
} else {
|
||||
f.mu = &sync.Mutex{}
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func newParentFile(parent *ent.File, child *File) *File {
|
||||
newParent := newFile(nil, parent)
|
||||
newParent.Children[child.Name()] = child
|
||||
child.Parent = newParent
|
||||
newParent.mu = child.mu
|
||||
return newParent
|
||||
}
|
||||
|
||||
func (f *File) Recycle() {
|
||||
for _, child := range f.Children {
|
||||
child.Recycle()
|
||||
}
|
||||
|
||||
f.Model = nil
|
||||
f.Children = make(map[string]*File)
|
||||
f.Path[0] = nil
|
||||
f.Path[1] = nil
|
||||
f.Parent = nil
|
||||
f.OwnerModel = nil
|
||||
f.IsUserRoot = false
|
||||
f.mu = nil
|
||||
|
||||
filePool.Put(f)
|
||||
}
|
||||
55
pkg/filemanager/fs/dbfs/global.go
Normal file
55
pkg/filemanager/fs/dbfs/global.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package dbfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
func (f *DBFS) StaleEntities(ctx context.Context, entities ...int) ([]fs.Entity, error) {
|
||||
res, err := f.fileClient.StaleEntities(ctx, entities...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return lo.Map(res, func(e *ent.Entity, i int) fs.Entity {
|
||||
return fs.NewEntity(e)
|
||||
}), nil
|
||||
}
|
||||
|
||||
func (f *DBFS) AllFilesInTrashBin(ctx context.Context, opts ...fs.Option) (*fs.ListFileResult, error) {
|
||||
o := newDbfsOption()
|
||||
for _, opt := range opts {
|
||||
o.apply(opt)
|
||||
}
|
||||
|
||||
navigator, err := f.getNavigator(ctx, newTrashUri(""), NavigatorCapabilityListChildren)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx = context.WithValue(ctx, inventory.LoadFilePublicMetadata{}, true)
|
||||
children, err := navigator.Children(ctx, nil, &ListArgs{
|
||||
Page: &inventory.PaginationArgs{
|
||||
Page: o.FsOption.Page,
|
||||
PageSize: o.PageSize,
|
||||
OrderBy: o.OrderBy,
|
||||
Order: inventory.OrderDirection(o.OrderDirection),
|
||||
UseCursorPagination: o.useCursorPagination,
|
||||
PageToken: o.pageToken,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &fs.ListFileResult{
|
||||
Files: lo.Map(children.Files, func(item *File, index int) fs.File {
|
||||
return item
|
||||
}),
|
||||
Pagination: children.Pagination,
|
||||
RecursionLimitReached: children.RecursionLimitReached,
|
||||
}, nil
|
||||
}
|
||||
325
pkg/filemanager/fs/dbfs/lock.go
Normal file
325
pkg/filemanager/fs/dbfs/lock.go
Normal file
@@ -0,0 +1,325 @@
|
||||
package dbfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type (
|
||||
LockSession struct {
|
||||
Tokens map[string]string
|
||||
TokenStack [][]string
|
||||
}
|
||||
|
||||
LockByPath struct {
|
||||
Uri *fs.URI
|
||||
ClosestAncestor *File
|
||||
Type types.FileType
|
||||
Token string
|
||||
}
|
||||
|
||||
AlwaysIncludeTokenCtx struct{}
|
||||
)
|
||||
|
||||
func (f *DBFS) ConfirmLock(ctx context.Context, ancestor fs.File, uri *fs.URI, token ...string) (func(), fs.LockSession, error) {
|
||||
session := LockSessionFromCtx(ctx)
|
||||
lockUri := ancestor.RootUri().JoinRaw(uri.PathTrimmed())
|
||||
ns, root, lKey := lockTupleFromUri(lockUri, f.user, f.hasher)
|
||||
lc := lock.LockInfo{
|
||||
Ns: ns,
|
||||
Root: root,
|
||||
Token: token,
|
||||
}
|
||||
|
||||
// Skip if already locked in current session
|
||||
if _, ok := session.Tokens[lKey]; ok {
|
||||
return func() {}, session, nil
|
||||
}
|
||||
|
||||
release, tokenHit, err := f.ls.Confirm(time.Now(), lc)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
session.Tokens[lKey] = tokenHit
|
||||
stackIndex := len(session.TokenStack) - 1
|
||||
session.TokenStack[stackIndex] = append(session.TokenStack[stackIndex], lKey)
|
||||
return release, session, nil
|
||||
}
|
||||
|
||||
func (f *DBFS) Lock(ctx context.Context, d time.Duration, requester *ent.User, zeroDepth bool, application lock.Application,
|
||||
uri *fs.URI, token string) (fs.LockSession, error) {
|
||||
// Get navigator
|
||||
navigator, err := f.getNavigator(ctx, uri, NavigatorCapabilityLockFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ancestor, err := f.getFileByPath(ctx, navigator, uri)
|
||||
if err != nil && !ent.IsNotFound(err) {
|
||||
return nil, fmt.Errorf("failed to get ancestor: %w", err)
|
||||
}
|
||||
|
||||
if ancestor.IsRootFolder() && ancestor.Uri(false).IsSame(uri, hashid.EncodeUserID(f.hasher, f.user.ID)) {
|
||||
return nil, fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot lock root folder"))
|
||||
}
|
||||
|
||||
// Lock require create or update permission
|
||||
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && ancestor.Owner().ID != requester.ID {
|
||||
return nil, fs.ErrOwnerOnly
|
||||
}
|
||||
|
||||
t := types.FileTypeFile
|
||||
if ancestor.Uri(false).IsSame(uri, hashid.EncodeUserID(f.hasher, f.user.ID)) {
|
||||
t = ancestor.Type()
|
||||
}
|
||||
lr := &LockByPath{
|
||||
Uri: ancestor.RootUri().JoinRaw(uri.PathTrimmed()),
|
||||
ClosestAncestor: ancestor,
|
||||
Type: t,
|
||||
Token: token,
|
||||
}
|
||||
ls, err := f.acquireByPath(ctx, d, requester, zeroDepth, application, lr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ls, nil
|
||||
}
|
||||
|
||||
func (f *DBFS) Unlock(ctx context.Context, tokens ...string) error {
|
||||
return f.ls.Unlock(time.Now(), tokens...)
|
||||
}
|
||||
|
||||
func (f *DBFS) Refresh(ctx context.Context, d time.Duration, token string) (lock.LockDetails, error) {
|
||||
return f.ls.Refresh(time.Now(), d, token)
|
||||
}
|
||||
|
||||
func (f *DBFS) acquireByPath(ctx context.Context, duration time.Duration,
|
||||
requester *ent.User, zeroDepth bool, application lock.Application, locks ...*LockByPath) (*LockSession, error) {
|
||||
session := LockSessionFromCtx(ctx)
|
||||
|
||||
// Prepare lock details for each file
|
||||
lockDetails := make([]lock.LockDetails, 0, len(locks))
|
||||
lockedRequest := make([]*LockByPath, 0, len(locks))
|
||||
for _, l := range locks {
|
||||
ns, root, lKey := lockTupleFromUri(l.Uri, f.user, f.hasher)
|
||||
ld := lock.LockDetails{
|
||||
Owner: lock.Owner{
|
||||
Application: application,
|
||||
},
|
||||
Ns: ns,
|
||||
Root: root,
|
||||
ZeroDepth: zeroDepth,
|
||||
Duration: duration,
|
||||
Type: l.Type,
|
||||
Token: l.Token,
|
||||
}
|
||||
|
||||
// Skip if already locked in current session
|
||||
if _, ok := session.Tokens[lKey]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
lockDetails = append(lockDetails, ld)
|
||||
lockedRequest = append(lockedRequest, l)
|
||||
}
|
||||
|
||||
// Acquire lock
|
||||
tokens, err := f.ls.Create(time.Now(), lockDetails...)
|
||||
if len(tokens) > 0 {
|
||||
for i, token := range tokens {
|
||||
key := lockDetails[i].Key()
|
||||
session.Tokens[key] = token
|
||||
stackIndex := len(session.TokenStack) - 1
|
||||
session.TokenStack[stackIndex] = append(session.TokenStack[stackIndex], key)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
var conflicts lock.ConflictError
|
||||
if errors.As(err, &conflicts) {
|
||||
// Conflict with existing lock, generate user-friendly error message
|
||||
conflicts = lo.Map(conflicts, func(c *lock.ConflictDetail, index int) *lock.ConflictDetail {
|
||||
lr := lockedRequest[c.Index]
|
||||
if lr.ClosestAncestor.Root().Model.OwnerID == requester.ID {
|
||||
// Add absolute path for owner issued lock request
|
||||
c.Path = newMyUri().JoinRaw(c.Path).String()
|
||||
return c
|
||||
}
|
||||
|
||||
// Hide token for non-owner requester
|
||||
if v, ok := ctx.Value(AlwaysIncludeTokenCtx{}).(bool); !ok || !v {
|
||||
c.Token = ""
|
||||
}
|
||||
|
||||
// If conflicted resources still under user root, expose the relative path
|
||||
userRoot := lr.ClosestAncestor.UserRoot()
|
||||
userRootPath := userRoot.Uri(true).Path()
|
||||
if strings.HasPrefix(c.Path, userRootPath) {
|
||||
c.Path = userRoot.
|
||||
Uri(false).
|
||||
Join(strings.Split(strings.TrimPrefix(c.Path, userRootPath), fs.Separator)...).String()
|
||||
return c
|
||||
}
|
||||
|
||||
// Hide sensitive information for non-owner issued lock request
|
||||
c.Path = ""
|
||||
return c
|
||||
})
|
||||
|
||||
return session, fs.ErrLockConflict.WithError(conflicts)
|
||||
}
|
||||
|
||||
return session, fmt.Errorf("faield to create lock: %w", err)
|
||||
}
|
||||
|
||||
// Check if any ancestor is modified during `getFileByPath` and `lock`.
|
||||
if err := f.ensureConsistency(
|
||||
ctx,
|
||||
lo.Map(lockedRequest, func(item *LockByPath, index int) *File {
|
||||
return item.ClosestAncestor
|
||||
})...,
|
||||
); err != nil {
|
||||
return session, err
|
||||
}
|
||||
|
||||
return session, nil
|
||||
}
|
||||
|
||||
func (f *DBFS) Release(ctx context.Context, session *LockSession) error {
|
||||
if session == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
stackIndex := len(session.TokenStack) - 1
|
||||
err := f.ls.Unlock(time.Now(), lo.Map(session.TokenStack[stackIndex], func(key string, index int) string {
|
||||
return session.Tokens[key]
|
||||
})...)
|
||||
if err == nil {
|
||||
for _, key := range session.TokenStack[stackIndex] {
|
||||
delete(session.Tokens, key)
|
||||
}
|
||||
session.TokenStack = session.TokenStack[:len(session.TokenStack)-1]
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ensureConsistency queries database for all given files and its ancestors, make sure there's no modification in
|
||||
// between. This is to make sure there's no modification between navigator's first query and lock acquisition.
|
||||
func (f *DBFS) ensureConsistency(ctx context.Context, files ...*File) error {
|
||||
if len(files) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Generate a list of unique files (include ancestors) to check
|
||||
uniqueFiles := make(map[int]*File)
|
||||
for _, file := range files {
|
||||
for root := file; root != nil; root = root.Parent {
|
||||
if _, ok := uniqueFiles[root.Model.ID]; ok {
|
||||
// This file and its ancestors are already included
|
||||
break
|
||||
}
|
||||
|
||||
uniqueFiles[root.Model.ID] = root
|
||||
}
|
||||
}
|
||||
|
||||
page := 0
|
||||
fileIds := lo.Keys(uniqueFiles)
|
||||
for page >= 0 {
|
||||
files, next, err := f.fileClient.GetByIDs(ctx, fileIds, page)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check file consistency: %w", err)
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
latest := uniqueFiles[file.ID].Model
|
||||
if file.Name != latest.Name ||
|
||||
file.FileChildren != latest.FileChildren ||
|
||||
file.OwnerID != latest.OwnerID ||
|
||||
file.Type != latest.Type {
|
||||
return fs.ErrModified.
|
||||
WithError(fmt.Errorf("file %s has been modified before lock acquisition", file.Name))
|
||||
}
|
||||
}
|
||||
|
||||
page = next
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LockSessionFromCtx retrieves lock session from context. If no lock session
|
||||
// found, a new empty lock session will be returned.
|
||||
func LockSessionFromCtx(ctx context.Context) *LockSession {
|
||||
l, _ := ctx.Value(fs.LockSessionCtxKey{}).(*LockSession)
|
||||
if l == nil {
|
||||
ls := &LockSession{
|
||||
Tokens: make(map[string]string),
|
||||
TokenStack: make([][]string, 0),
|
||||
}
|
||||
|
||||
l = ls
|
||||
}
|
||||
|
||||
l.TokenStack = append(l.TokenStack, make([]string, 0))
|
||||
return l
|
||||
}
|
||||
|
||||
// Exclude removes lock from session, so that it won't be released.
|
||||
func (l *LockSession) Exclude(lock *LockByPath, u *ent.User, hasher hashid.Encoder) string {
|
||||
_, _, lKey := lockTupleFromUri(lock.Uri, u, hasher)
|
||||
foundInCurrentStack := false
|
||||
token, found := l.Tokens[lKey]
|
||||
if found {
|
||||
stackIndex := len(l.TokenStack) - 1
|
||||
l.TokenStack[stackIndex] = lo.Filter(l.TokenStack[stackIndex], func(t string, index int) bool {
|
||||
if t == lKey {
|
||||
foundInCurrentStack = true
|
||||
}
|
||||
return t != lKey
|
||||
})
|
||||
if foundInCurrentStack {
|
||||
delete(l.Tokens, lKey)
|
||||
return token
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (l *LockSession) LastToken() string {
|
||||
stackIndex := len(l.TokenStack) - 1
|
||||
if len(l.TokenStack[stackIndex]) == 0 {
|
||||
return ""
|
||||
}
|
||||
return l.Tokens[l.TokenStack[stackIndex][len(l.TokenStack[stackIndex])-1]]
|
||||
}
|
||||
|
||||
// WithAlwaysIncludeToken returns a new context with a flag to always include token in conflic response.
|
||||
func WithAlwaysIncludeToken(ctx context.Context) context.Context {
|
||||
return context.WithValue(ctx, AlwaysIncludeTokenCtx{}, true)
|
||||
}
|
||||
|
||||
func lockTupleFromUri(uri *fs.URI, u *ent.User, hasher hashid.Encoder) (string, string, string) {
|
||||
id := uri.ID(hashid.EncodeUserID(hasher, u.ID))
|
||||
if id == "" {
|
||||
id = strconv.Itoa(u.ID)
|
||||
}
|
||||
ns := fmt.Sprintf(id + "/" + string(uri.FileSystem()))
|
||||
root := uri.Path()
|
||||
return ns, root, ns + "/" + root
|
||||
}
|
||||
831
pkg/filemanager/fs/dbfs/manage.go
Normal file
831
pkg/filemanager/fs/dbfs/manage.go
Normal file
@@ -0,0 +1,831 @@
|
||||
package dbfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/samber/lo"
|
||||
"golang.org/x/tools/container/intsets"
|
||||
)
|
||||
|
||||
func (f *DBFS) Create(ctx context.Context, path *fs.URI, fileType types.FileType, opts ...fs.Option) (fs.File, error) {
|
||||
o := newDbfsOption()
|
||||
for _, opt := range opts {
|
||||
o.apply(opt)
|
||||
}
|
||||
|
||||
// Get navigator
|
||||
navigator, err := f.getNavigator(ctx, path, NavigatorCapabilityCreateFile, NavigatorCapabilityLockFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get most recent ancestor
|
||||
var ancestor *File
|
||||
if o.ancestor != nil {
|
||||
ancestor = o.ancestor
|
||||
} else {
|
||||
ancestor, err = f.getFileByPath(ctx, navigator, path)
|
||||
if err != nil && !ent.IsNotFound(err) {
|
||||
return nil, fmt.Errorf("failed to get ancestor: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if ancestor.Uri(false).IsSame(path, hashid.EncodeUserID(f.hasher, f.user.ID)) {
|
||||
if ancestor.Type() == fileType {
|
||||
if o.errOnConflict {
|
||||
return ancestor, fs.ErrFileExisted
|
||||
}
|
||||
|
||||
// Target file already exist, return it.
|
||||
return ancestor, nil
|
||||
}
|
||||
|
||||
// File with the same name but different type already exist
|
||||
return nil, fs.ErrFileExisted.
|
||||
WithError(fmt.Errorf("object with the same name but different type %q already exist", ancestor.Type()))
|
||||
}
|
||||
|
||||
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && ancestor.Owner().ID != f.user.ID {
|
||||
return nil, fs.ErrOwnerOnly
|
||||
}
|
||||
|
||||
// Lock ancestor
|
||||
lockedPath := ancestor.RootUri().JoinRaw(path.PathTrimmed())
|
||||
ls, err := f.acquireByPath(ctx, -1, f.user, false, fs.LockApp(fs.ApplicationCreate),
|
||||
&LockByPath{lockedPath, ancestor, fileType, ""})
|
||||
defer func() { _ = f.Release(ctx, ls) }()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// For all ancestors in user's desired path, create folders if not exist
|
||||
existedElements := ancestor.Uri(false).Elements()
|
||||
desired := path.Elements()
|
||||
if (len(desired)-len(existedElements) > 1) && o.noChainedCreation {
|
||||
return nil, fs.ErrPathNotExist
|
||||
}
|
||||
|
||||
for i := len(existedElements); i < len(desired); i++ {
|
||||
// Make sure parent is a folder
|
||||
if !ancestor.CanHaveChildren() {
|
||||
return nil, fs.ErrNotSupportedAction.WithError(fmt.Errorf("parent must be a valid folder"))
|
||||
}
|
||||
|
||||
// Validate object name
|
||||
if err := validateFileName(desired[i]); err != nil {
|
||||
return nil, fs.ErrIllegalObjectName.WithError(err)
|
||||
}
|
||||
|
||||
if i < len(desired)-1 || fileType == types.FileTypeFolder {
|
||||
args := &inventory.CreateFolderParameters{
|
||||
Owner: ancestor.Model.OwnerID,
|
||||
Name: desired[i],
|
||||
}
|
||||
|
||||
// Apply options for last element
|
||||
if i == len(desired)-1 {
|
||||
if o.Metadata != nil {
|
||||
args.Metadata = o.Metadata
|
||||
}
|
||||
args.IsSymbolic = o.isSymbolicLink
|
||||
}
|
||||
|
||||
// Create folder if it is not the last element or the target is a folder
|
||||
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
|
||||
}
|
||||
|
||||
newFolder, err := fc.CreateFolder(ctx, ancestor.Model, args)
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
return nil, fmt.Errorf("failed to create folder %q: %w", desired[i], err)
|
||||
}
|
||||
|
||||
if err := inventory.Commit(tx); err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit folder creation", err)
|
||||
}
|
||||
|
||||
ancestor = newFile(ancestor, newFolder)
|
||||
} else {
|
||||
file, err := f.createFile(ctx, ancestor, desired[i], fileType, o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
}
|
||||
|
||||
return ancestor, nil
|
||||
}
|
||||
|
||||
func (f *DBFS) Rename(ctx context.Context, path *fs.URI, newName string) (fs.File, error) {
|
||||
// Get navigator
|
||||
navigator, err := f.getNavigator(ctx, path, NavigatorCapabilityRenameFile, NavigatorCapabilityLockFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get target file
|
||||
target, err := f.getFileByPath(ctx, navigator, path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get target file: %w", err)
|
||||
}
|
||||
oldName := target.Name()
|
||||
|
||||
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && target.Owner().ID != f.user.ID {
|
||||
return nil, fs.ErrOwnerOnly
|
||||
}
|
||||
|
||||
// Root folder cannot be modified
|
||||
if target.IsRootFolder() {
|
||||
return nil, fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot modify root folder"))
|
||||
}
|
||||
|
||||
// Validate new name
|
||||
if err := validateFileName(newName); err != nil {
|
||||
return nil, fs.ErrIllegalObjectName.WithError(err)
|
||||
}
|
||||
|
||||
// If target is a file, validate file extension
|
||||
policy, err := f.getPreferredPolicy(ctx, target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if target.Type() == types.FileTypeFile {
|
||||
if err := validateExtension(newName, policy); err != nil {
|
||||
return nil, fs.ErrIllegalObjectName.WithError(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Lock target
|
||||
ls, err := f.acquireByPath(ctx, -1, f.user, false, fs.LockApp(fs.ApplicationRename),
|
||||
&LockByPath{target.Uri(true), target, target.Type(), ""})
|
||||
defer func() { _ = f.Release(ctx, ls) }()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Rename target
|
||||
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
|
||||
}
|
||||
|
||||
updated, err := fc.Rename(ctx, target.Model, newName)
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
if ent.IsConstraintError(err) {
|
||||
return nil, fs.ErrFileExisted.WithError(err)
|
||||
}
|
||||
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "failed to update file", err)
|
||||
}
|
||||
|
||||
if target.Type() == types.FileTypeFile && !strings.EqualFold(filepath.Ext(newName), filepath.Ext(oldName)) {
|
||||
if err := fc.RemoveMetadata(ctx, target.Model, ThumbDisabledKey); err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "failed to remove disabled thumbnail mark", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := inventory.Commit(tx); err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit rename change", err)
|
||||
}
|
||||
|
||||
return target.Replace(updated), nil
|
||||
}
|
||||
|
||||
func (f *DBFS) SoftDelete(ctx context.Context, path ...*fs.URI) error {
|
||||
ae := serializer.NewAggregateError()
|
||||
targets := make([]*File, 0, len(path))
|
||||
for _, p := range path {
|
||||
// Get navigator
|
||||
navigator, err := f.getNavigator(ctx, p, NavigatorCapabilitySoftDelete)
|
||||
if err != nil {
|
||||
ae.Add(p.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Get target file
|
||||
target, err := f.getFileByPath(ctx, navigator, p)
|
||||
if err != nil {
|
||||
ae.Add(p.String(), fmt.Errorf("failed to get target file: %w", err))
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && target.Owner().ID != f.user.ID {
|
||||
ae.Add(p.String(), fs.ErrOwnerOnly.WithError(fmt.Errorf("only file owner can delete file without trash bin")))
|
||||
continue
|
||||
}
|
||||
|
||||
// Root folder cannot be deleted
|
||||
if target.IsRootFolder() {
|
||||
ae.Add(p.String(), fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot delete root folder")))
|
||||
continue
|
||||
}
|
||||
|
||||
targets = append(targets, target)
|
||||
}
|
||||
|
||||
if len(targets) == 0 {
|
||||
return ae.Aggregate()
|
||||
}
|
||||
// Lock all targets
|
||||
lockTargets := lo.Map(targets, func(value *File, key int) *LockByPath {
|
||||
return &LockByPath{value.Uri(true), value, value.Type(), ""}
|
||||
})
|
||||
ls, err := f.acquireByPath(ctx, -1, f.user, false, fs.LockApp(fs.ApplicationSoftDelete), lockTargets...)
|
||||
defer func() { _ = f.Release(ctx, ls) }()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start transaction to soft-delete files
|
||||
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
|
||||
if err != nil {
|
||||
return serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
|
||||
}
|
||||
|
||||
for _, target := range targets {
|
||||
// Perform soft-delete
|
||||
if err := fc.SoftDelete(ctx, target.Model); err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
return serializer.NewError(serializer.CodeDBError, "failed to soft-delete file", err)
|
||||
}
|
||||
|
||||
// Save restore uri into metadata
|
||||
if err := fc.UpsertMetadata(ctx, target.Model, map[string]string{
|
||||
MetadataRestoreUri: target.Uri(true).String(),
|
||||
MetadataExpectedCollectTime: strconv.FormatInt(
|
||||
time.Now().Add(time.Duration(target.Owner().Edges.Group.Settings.TrashRetention)*time.Second).Unix(),
|
||||
10),
|
||||
}, nil); err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
return serializer.NewError(serializer.CodeDBError, "failed to update metadata", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Commit transaction
|
||||
if err := inventory.Commit(tx); err != nil {
|
||||
return serializer.NewError(serializer.CodeDBError, "Failed to commit soft-delete change", err)
|
||||
}
|
||||
|
||||
return ae.Aggregate()
|
||||
}
|
||||
|
||||
func (f *DBFS) Delete(ctx context.Context, path []*fs.URI, opts ...fs.Option) ([]fs.Entity, error) {
|
||||
o := newDbfsOption()
|
||||
for _, opt := range opts {
|
||||
o.apply(opt)
|
||||
}
|
||||
|
||||
var opt *types.EntityRecycleOption
|
||||
if o.UnlinkOnly {
|
||||
opt = &types.EntityRecycleOption{
|
||||
UnlinkOnly: true,
|
||||
}
|
||||
}
|
||||
|
||||
ae := serializer.NewAggregateError()
|
||||
fileNavGroup := make(map[Navigator][]*File)
|
||||
ctx = context.WithValue(ctx, inventory.LoadFileEntity{}, true)
|
||||
|
||||
for _, p := range path {
|
||||
// Get navigator
|
||||
navigator, err := f.getNavigator(ctx, p, NavigatorCapabilityDeleteFile, NavigatorCapabilityLockFile)
|
||||
if err != nil {
|
||||
ae.Add(p.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Get target file
|
||||
target, err := f.getFileByPath(ctx, navigator, p)
|
||||
if err != nil {
|
||||
ae.Add(p.String(), fmt.Errorf("failed to get target file: %w", err))
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !o.SysSkipSoftDelete && !ok && target.Owner().ID != f.user.ID {
|
||||
ae.Add(p.String(), fs.ErrOwnerOnly)
|
||||
continue
|
||||
}
|
||||
|
||||
// Root folder cannot be deleted
|
||||
if target.IsRootFolder() {
|
||||
ae.Add(p.String(), fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot delete root folder")))
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := fileNavGroup[navigator]; !ok {
|
||||
fileNavGroup[navigator] = make([]*File, 0)
|
||||
}
|
||||
fileNavGroup[navigator] = append(fileNavGroup[navigator], target)
|
||||
}
|
||||
|
||||
targets := lo.Flatten(lo.Values(fileNavGroup))
|
||||
if len(targets) == 0 {
|
||||
return nil, ae.Aggregate()
|
||||
}
|
||||
// Lock all targets
|
||||
lockTargets := lo.Map(targets, func(value *File, key int) *LockByPath {
|
||||
return &LockByPath{value.Uri(true), value, value.Type(), ""}
|
||||
})
|
||||
ls, err := f.acquireByPath(ctx, -1, f.user, false, fs.LockApp(fs.ApplicationDelete), lockTargets...)
|
||||
defer func() { _ = f.Release(ctx, ls) }()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
|
||||
}
|
||||
|
||||
// Delete targets
|
||||
newStaleEntities, storageDiff, err := f.deleteFiles(ctx, fileNavGroup, fc, opt)
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "failed to delete files", err)
|
||||
}
|
||||
|
||||
tx.AppendStorageDiff(storageDiff)
|
||||
if err := inventory.CommitWithStorageDiff(ctx, tx, f.l, f.userClient); err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit delete change", err)
|
||||
}
|
||||
|
||||
return newStaleEntities, ae.Aggregate()
|
||||
}
|
||||
|
||||
func (f *DBFS) VersionControl(ctx context.Context, path *fs.URI, versionId int, delete bool) error {
|
||||
// Get navigator
|
||||
navigator, err := f.getNavigator(ctx, path, NavigatorCapabilityVersionControl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get target file
|
||||
ctx = context.WithValue(ctx, inventory.LoadFileEntity{}, true)
|
||||
target, err := f.getFileByPath(ctx, navigator, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get target file: %w", err)
|
||||
}
|
||||
|
||||
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && target.Owner().ID != f.user.ID {
|
||||
return fs.ErrOwnerOnly
|
||||
}
|
||||
|
||||
// Target must be a file
|
||||
if target.Type() != types.FileTypeFile {
|
||||
return fs.ErrNotSupportedAction.WithError(fmt.Errorf("target must be a valid file"))
|
||||
}
|
||||
|
||||
// Lock file
|
||||
ls, err := f.acquireByPath(ctx, -1, f.user, true, fs.LockApp(fs.ApplicationVersionControl),
|
||||
&LockByPath{target.Uri(true), target, target.Type(), ""})
|
||||
defer func() { _ = f.Release(ctx, ls) }()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if delete {
|
||||
storageDiff, err := f.deleteEntity(ctx, target, versionId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := f.userClient.ApplyStorageDiff(ctx, storageDiff); err != nil {
|
||||
f.l.Error("Failed to apply storage diff after deleting version: %s", err)
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
return f.setCurrentVersion(ctx, target, versionId)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *DBFS) Restore(ctx context.Context, path ...*fs.URI) error {
|
||||
ae := serializer.NewAggregateError()
|
||||
targets := make([]*File, 0, len(path))
|
||||
ctx = context.WithValue(ctx, inventory.LoadFilePublicMetadata{}, true)
|
||||
|
||||
for _, p := range path {
|
||||
// Get navigator
|
||||
navigator, err := f.getNavigator(ctx, p, NavigatorCapabilityRestore)
|
||||
if err != nil {
|
||||
ae.Add(p.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Get target file
|
||||
target, err := f.getFileByPath(ctx, navigator, p)
|
||||
if err != nil {
|
||||
ae.Add(p.String(), fmt.Errorf("failed to get file: %w", err))
|
||||
continue
|
||||
}
|
||||
|
||||
targets = append(targets, target)
|
||||
}
|
||||
|
||||
if len(targets) == 0 {
|
||||
return ae.Aggregate()
|
||||
}
|
||||
|
||||
allTrashUriStr := lo.FilterMap(targets, func(t *File, key int) ([]*fs.URI, bool) {
|
||||
if restoreUri, ok := t.Metadata()[MetadataRestoreUri]; ok {
|
||||
srcUrl, err := fs.NewUriFromString(restoreUri)
|
||||
if err != nil {
|
||||
ae.Add(t.Uri(false).String(), fs.ErrNotSupportedAction.WithError(fmt.Errorf("invalid restore uri: %w", err)))
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return []*fs.URI{t.Uri(false), srcUrl.DirUri()}, true
|
||||
}
|
||||
|
||||
ae.Add(t.Uri(false).String(), fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot restore file without required metadata mark")))
|
||||
return nil, false
|
||||
})
|
||||
|
||||
// Copy each file to its original location
|
||||
for _, uris := range allTrashUriStr {
|
||||
if err := f.MoveOrCopy(ctx, []*fs.URI{uris[0]}, uris[1], false); err != nil {
|
||||
if !ae.Merge(err) {
|
||||
ae.Add(uris[0].String(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ae.Aggregate()
|
||||
|
||||
}
|
||||
|
||||
func (f *DBFS) MoveOrCopy(ctx context.Context, path []*fs.URI, dst *fs.URI, isCopy bool) error {
|
||||
targets := make([]*File, 0, len(path))
|
||||
dstNavigator, err := f.getNavigator(ctx, dst, NavigatorCapabilityLockFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get destination file
|
||||
destination, err := f.getFileByPath(ctx, dstNavigator, dst)
|
||||
if err != nil {
|
||||
return fmt.Errorf("faield to get destination folder: %w", err)
|
||||
}
|
||||
|
||||
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && destination.Owner().ID != f.user.ID {
|
||||
return fs.ErrOwnerOnly
|
||||
}
|
||||
|
||||
// Target must be a folder
|
||||
if !destination.CanHaveChildren() {
|
||||
return fs.ErrNotSupportedAction.WithError(fmt.Errorf("destination must be a valid folder"))
|
||||
}
|
||||
|
||||
ae := serializer.NewAggregateError()
|
||||
fileNavGroup := make(map[Navigator][]*File)
|
||||
dstRootPath := destination.Uri(true)
|
||||
ctx = context.WithValue(ctx, inventory.LoadFileEntity{}, true)
|
||||
ctx = context.WithValue(ctx, inventory.LoadFileMetadata{}, true)
|
||||
|
||||
for _, p := range path {
|
||||
// Get navigator
|
||||
navigator, err := f.getNavigator(ctx, p, NavigatorCapabilityLockFile)
|
||||
if err != nil {
|
||||
ae.Add(p.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check fs capability
|
||||
if !canMoveOrCopyTo(p, dst, isCopy) {
|
||||
ae.Add(p.String(), fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot move or copy file form %s to %s", p.String(), dst.String())))
|
||||
continue
|
||||
}
|
||||
|
||||
// Get target file
|
||||
target, err := f.getFileByPath(ctx, navigator, p)
|
||||
if err != nil {
|
||||
ae.Add(p.String(), fmt.Errorf("failed to get file: %w", err))
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && target.Owner().ID != f.user.ID {
|
||||
ae.Add(p.String(), fs.ErrOwnerOnly)
|
||||
continue
|
||||
}
|
||||
|
||||
// Root folder cannot be moved or copied
|
||||
if target.IsRootFolder() {
|
||||
ae.Add(p.String(), fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot move root folder")))
|
||||
continue
|
||||
}
|
||||
|
||||
// Cannot move or copy folder to its descendant
|
||||
if target.Type() == types.FileTypeFolder &&
|
||||
dstRootPath.EqualOrIsDescendantOf(target.Uri(true), hashid.EncodeUserID(f.hasher, f.user.ID)) {
|
||||
ae.Add(p.String(), fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot move or copy folder to itself or its descendant")))
|
||||
continue
|
||||
}
|
||||
|
||||
targets = append(targets, target)
|
||||
if isCopy {
|
||||
if _, ok := fileNavGroup[navigator]; !ok {
|
||||
fileNavGroup[navigator] = make([]*File, 0)
|
||||
}
|
||||
fileNavGroup[navigator] = append(fileNavGroup[navigator], target)
|
||||
}
|
||||
}
|
||||
|
||||
if len(targets) > 0 {
|
||||
// Lock all targets
|
||||
lockTargets := lo.Map(targets, func(value *File, key int) *LockByPath {
|
||||
return &LockByPath{value.Uri(true), value, value.Type(), ""}
|
||||
})
|
||||
|
||||
// Lock destination
|
||||
dstBase := destination.Uri(true)
|
||||
dstLockTargets := lo.Map(targets, func(value *File, key int) *LockByPath {
|
||||
return &LockByPath{dstBase.Join(value.Name()), destination, value.Type(), ""}
|
||||
})
|
||||
allLockTargets := make([]*LockByPath, 0, len(targets)*2)
|
||||
if !isCopy {
|
||||
// For moving files from trash bin, also lock the dst with restored name.
|
||||
dstRestoreTargets := lo.FilterMap(targets, func(value *File, key int) (*LockByPath, bool) {
|
||||
if _, ok := value.Metadata()[MetadataRestoreUri]; ok {
|
||||
return &LockByPath{dstBase.Join(value.DisplayName()), destination, value.Type(), ""}, true
|
||||
}
|
||||
return nil, false
|
||||
})
|
||||
allLockTargets = append(allLockTargets, lockTargets...)
|
||||
allLockTargets = append(allLockTargets, dstRestoreTargets...)
|
||||
}
|
||||
allLockTargets = append(allLockTargets, dstLockTargets...)
|
||||
ls, err := f.acquireByPath(ctx, -1, f.user, false, fs.LockApp(fs.ApplicationMoveCopy), allLockTargets...)
|
||||
defer func() { _ = f.Release(ctx, ls) }()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start transaction to move files
|
||||
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
|
||||
if err != nil {
|
||||
return serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
|
||||
}
|
||||
|
||||
var (
|
||||
storageDiff inventory.StorageDiff
|
||||
)
|
||||
if isCopy {
|
||||
_, storageDiff, err = f.copyFiles(ctx, fileNavGroup, destination, fc)
|
||||
} else {
|
||||
storageDiff, err = f.moveFiles(ctx, targets, destination, fc, dstNavigator)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
return err
|
||||
}
|
||||
|
||||
tx.AppendStorageDiff(storageDiff)
|
||||
if err := inventory.CommitWithStorageDiff(ctx, tx, f.l, f.userClient); err != nil {
|
||||
return serializer.NewError(serializer.CodeDBError, "Failed to commit move change", err)
|
||||
}
|
||||
|
||||
// TODO: after move, dbfs cache should be cleared
|
||||
}
|
||||
|
||||
return ae.Aggregate()
|
||||
}
|
||||
|
||||
func (f *DBFS) deleteEntity(ctx context.Context, target *File, entityId int) (inventory.StorageDiff, error) {
|
||||
if target.PrimaryEntityID() == entityId {
|
||||
return nil, fs.ErrNotSupportedAction.WithError(fmt.Errorf("cannot delete current version"))
|
||||
}
|
||||
|
||||
targetVersion, found := lo.Find(target.Entities(), func(item fs.Entity) bool {
|
||||
return item.ID() == entityId
|
||||
})
|
||||
if !found {
|
||||
return nil, fs.ErrEntityNotExist.WithError(fmt.Errorf("version not found"))
|
||||
}
|
||||
|
||||
diff, err := f.fileClient.UnlinkEntity(ctx, targetVersion.Model(), target.Model, target.Owner())
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to unlink entity", err)
|
||||
}
|
||||
|
||||
if targetVersion.UploadSessionID() != nil {
|
||||
err = f.fileClient.RemoveMetadata(ctx, target.Model, MetadataUploadSessionID)
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to remove upload session metadata", err)
|
||||
}
|
||||
}
|
||||
return diff, nil
|
||||
}
|
||||
|
||||
func (f *DBFS) setCurrentVersion(ctx context.Context, target *File, versionId int) error {
|
||||
if target.PrimaryEntityID() == versionId {
|
||||
return nil
|
||||
}
|
||||
|
||||
targetVersion, found := lo.Find(target.Entities(), func(item fs.Entity) bool {
|
||||
return item.ID() == versionId && item.Type() == types.EntityTypeVersion && item.UploadSessionID() == nil
|
||||
})
|
||||
if !found {
|
||||
return fs.ErrEntityNotExist.WithError(fmt.Errorf("version not found"))
|
||||
}
|
||||
|
||||
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
|
||||
if err != nil {
|
||||
return serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
|
||||
}
|
||||
|
||||
if err := f.fileClient.SetPrimaryEntity(ctx, target.Model, targetVersion.ID()); err != nil {
|
||||
return serializer.NewError(serializer.CodeDBError, "Failed to set primary entity", err)
|
||||
}
|
||||
|
||||
// Cap thumbnail entities
|
||||
diff, err := fc.CapEntities(ctx, target.Model, target.Owner(), 0, types.EntityTypeThumbnail)
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
return serializer.NewError(serializer.CodeDBError, "Failed to cap thumbnail entities", err)
|
||||
}
|
||||
|
||||
tx.AppendStorageDiff(diff)
|
||||
if err := inventory.CommitWithStorageDiff(ctx, tx, f.l, f.userClient); err != nil {
|
||||
return serializer.NewError(serializer.CodeDBError, "Failed to commit set current version", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *DBFS) deleteFiles(ctx context.Context, targets map[Navigator][]*File, fc inventory.FileClient, opt *types.EntityRecycleOption) ([]fs.Entity, inventory.StorageDiff, error) {
|
||||
if f.user.Edges.Group == nil {
|
||||
return nil, nil, fmt.Errorf("user group not loaded")
|
||||
}
|
||||
limit := max(f.user.Edges.Group.Settings.MaxWalkedFiles, 1)
|
||||
allStaleEntities := make([]fs.Entity, 0, len(targets))
|
||||
storageDiff := make(inventory.StorageDiff)
|
||||
for n, files := range targets {
|
||||
// Let navigator use tx
|
||||
reset, err := n.FollowTx(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
defer reset()
|
||||
|
||||
// List all files to be deleted
|
||||
toBeDeletedFiles := make([]*File, 0, len(files))
|
||||
if err := n.Walk(ctx, files, limit, intsets.MaxInt, func(targets []*File, level int) error {
|
||||
limit -= len(targets)
|
||||
toBeDeletedFiles = append(toBeDeletedFiles, targets...)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to walk files: %w", err)
|
||||
}
|
||||
|
||||
// Delete files
|
||||
staleEntities, diff, err := fc.Delete(ctx, lo.Map(toBeDeletedFiles, func(item *File, index int) *ent.File {
|
||||
return item.Model
|
||||
}), opt)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to delete files: %w", err)
|
||||
}
|
||||
storageDiff.Merge(diff)
|
||||
allStaleEntities = append(allStaleEntities, lo.Map(staleEntities, func(item *ent.Entity, index int) fs.Entity {
|
||||
return fs.NewEntity(item)
|
||||
})...)
|
||||
}
|
||||
|
||||
return allStaleEntities, storageDiff, nil
|
||||
}
|
||||
|
||||
func (f *DBFS) copyFiles(ctx context.Context, targets map[Navigator][]*File, destination *File, fc inventory.FileClient) (map[int]*ent.File, inventory.StorageDiff, error) {
|
||||
if f.user.Edges.Group == nil {
|
||||
return nil, nil, fmt.Errorf("user group not loaded")
|
||||
}
|
||||
limit := max(f.user.Edges.Group.Settings.MaxWalkedFiles, 1)
|
||||
capacity, err := f.Capacity(ctx, destination.Owner())
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("copy files: failed to destination owner capacity: %w", err)
|
||||
}
|
||||
|
||||
dstAncestors := lo.Map(destination.AncestorsChain(), func(item *File, index int) *ent.File {
|
||||
return item.Model
|
||||
})
|
||||
|
||||
// newTargetsMap is the map of between new target files in first layer, and its src file ID.
|
||||
newTargetsMap := make(map[int]*ent.File)
|
||||
storageDiff := make(inventory.StorageDiff)
|
||||
var diff inventory.StorageDiff
|
||||
for n, files := range targets {
|
||||
initialDstMap := make(map[int][]*ent.File)
|
||||
for _, file := range files {
|
||||
initialDstMap[file.Model.FileChildren] = dstAncestors
|
||||
}
|
||||
|
||||
firstLayer := true
|
||||
// Let navigator use tx
|
||||
reset, err := n.FollowTx(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
defer reset()
|
||||
|
||||
if err := n.Walk(ctx, files, limit, intsets.MaxInt, func(targets []*File, level int) error {
|
||||
// check capacity for each file
|
||||
sizeTotal := int64(0)
|
||||
for _, file := range targets {
|
||||
sizeTotal += file.SizeUsed()
|
||||
}
|
||||
|
||||
if err := f.validateUserCapacityRaw(ctx, sizeTotal, capacity); err != nil {
|
||||
return fs.ErrInsufficientCapacity
|
||||
}
|
||||
|
||||
limit -= len(targets)
|
||||
initialDstMap, diff, err = fc.Copy(ctx, lo.Map(targets, func(item *File, index int) *ent.File {
|
||||
return item.Model
|
||||
}), initialDstMap)
|
||||
if err != nil {
|
||||
if ent.IsConstraintError(err) {
|
||||
return fs.ErrFileExisted.WithError(err)
|
||||
}
|
||||
|
||||
return serializer.NewError(serializer.CodeDBError, "Failed to copy files", err)
|
||||
}
|
||||
|
||||
storageDiff.Merge(diff)
|
||||
|
||||
if firstLayer {
|
||||
for k, v := range initialDstMap {
|
||||
newTargetsMap[k] = v[0]
|
||||
}
|
||||
}
|
||||
|
||||
capacity.Used += sizeTotal
|
||||
firstLayer = false
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to walk files: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return newTargetsMap, storageDiff, nil
|
||||
}
|
||||
|
||||
func (f *DBFS) moveFiles(ctx context.Context, targets []*File, destination *File, fc inventory.FileClient, n Navigator) (inventory.StorageDiff, error) {
|
||||
models := lo.Map(targets, func(value *File, key int) *ent.File {
|
||||
return value.Model
|
||||
})
|
||||
|
||||
// Change targets' parent
|
||||
if err := fc.SetParent(ctx, models, destination.Model); err != nil {
|
||||
if ent.IsConstraintError(err) {
|
||||
return nil, fs.ErrFileExisted.WithError(err)
|
||||
}
|
||||
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to move file", err)
|
||||
}
|
||||
|
||||
var (
|
||||
storageDiff inventory.StorageDiff
|
||||
)
|
||||
|
||||
// For files moved out from trash bin
|
||||
for _, file := range targets {
|
||||
if _, ok := file.Metadata()[MetadataRestoreUri]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// renaming it to its original name
|
||||
if _, err := fc.Rename(ctx, file.Model, file.DisplayName()); err != nil {
|
||||
if ent.IsConstraintError(err) {
|
||||
return nil, fs.ErrFileExisted.WithError(err)
|
||||
}
|
||||
|
||||
return storageDiff, serializer.NewError(serializer.CodeDBError, "Failed to rename file from trash bin to its original name", err)
|
||||
}
|
||||
|
||||
// Remove trash bin metadata
|
||||
if err := fc.RemoveMetadata(ctx, file.Model, MetadataRestoreUri, MetadataExpectedCollectTime); err != nil {
|
||||
return storageDiff, serializer.NewError(serializer.CodeDBError, "Failed to remove trash related metadata", err)
|
||||
}
|
||||
}
|
||||
|
||||
return storageDiff, nil
|
||||
}
|
||||
172
pkg/filemanager/fs/dbfs/my_navigator.go
Normal file
172
pkg/filemanager/fs/dbfs/my_navigator.go
Normal file
@@ -0,0 +1,172 @@
|
||||
package dbfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
)
|
||||
|
||||
var myNavigatorCapability = &boolset.BooleanSet{}
|
||||
|
||||
// NewMyNavigator creates a navigator for user's "my" file system.
|
||||
func NewMyNavigator(u *ent.User, fileClient inventory.FileClient, userClient inventory.UserClient, l logging.Logger,
|
||||
config *setting.DBFS, hasher hashid.Encoder) Navigator {
|
||||
return &myNavigator{
|
||||
user: u,
|
||||
l: l,
|
||||
fileClient: fileClient,
|
||||
userClient: userClient,
|
||||
config: config,
|
||||
baseNavigator: newBaseNavigator(fileClient, defaultFilter, u, hasher, config),
|
||||
}
|
||||
}
|
||||
|
||||
type myNavigator struct {
|
||||
l logging.Logger
|
||||
user *ent.User
|
||||
fileClient inventory.FileClient
|
||||
userClient inventory.UserClient
|
||||
|
||||
config *setting.DBFS
|
||||
*baseNavigator
|
||||
root *File
|
||||
disableRecycle bool
|
||||
persist func()
|
||||
}
|
||||
|
||||
func (n *myNavigator) Recycle() {
|
||||
if n.persist != nil {
|
||||
n.persist()
|
||||
n.persist = nil
|
||||
}
|
||||
if n.root != nil && !n.disableRecycle {
|
||||
n.root.Recycle()
|
||||
}
|
||||
}
|
||||
|
||||
func (n *myNavigator) PersistState(kv cache.Driver, key string) {
|
||||
n.disableRecycle = true
|
||||
n.persist = func() {
|
||||
kv.Set(key, n.root, ContextHintTTL)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *myNavigator) RestoreState(s State) error {
|
||||
n.disableRecycle = true
|
||||
if state, ok := s.(*File); ok {
|
||||
n.root = state
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("invalid state type: %T", s)
|
||||
}
|
||||
|
||||
func (n *myNavigator) To(ctx context.Context, path *fs.URI) (*File, error) {
|
||||
if n.root == nil {
|
||||
// Anonymous user does not have a root folder.
|
||||
if inventory.IsAnonymousUser(n.user) {
|
||||
return nil, ErrLoginRequired
|
||||
}
|
||||
|
||||
fsUid, err := n.hasher.Decode(path.ID(hashid.EncodeUserID(n.hasher, n.user.ID)), hashid.UserID)
|
||||
if err != nil {
|
||||
return nil, fs.ErrPathNotExist.WithError(fmt.Errorf("invalid user id"))
|
||||
}
|
||||
if fsUid != n.user.ID {
|
||||
return nil, ErrPermissionDenied
|
||||
}
|
||||
|
||||
targetUser, err := n.userClient.GetLoginUserByID(ctx, fsUid)
|
||||
if err != nil {
|
||||
return nil, fs.ErrPathNotExist.WithError(fmt.Errorf("user not found: %w", err))
|
||||
}
|
||||
|
||||
rootFile, err := n.fileClient.Root(ctx, targetUser)
|
||||
if err != nil {
|
||||
n.l.Info("User's root folder not found: %s, will initialize it.", err)
|
||||
return nil, ErrFsNotInitialized
|
||||
}
|
||||
|
||||
n.root = newFile(nil, rootFile)
|
||||
rootPath := path.Root()
|
||||
n.root.Path[pathIndexRoot], n.root.Path[pathIndexUser] = rootPath, rootPath
|
||||
n.root.OwnerModel = targetUser
|
||||
n.root.IsUserRoot = true
|
||||
n.root.CapabilitiesBs = n.Capabilities(false).Capability
|
||||
}
|
||||
|
||||
current, lastAncestor := n.root, n.root
|
||||
elements := path.Elements()
|
||||
var err error
|
||||
for index, element := range elements {
|
||||
lastAncestor = current
|
||||
current, err = n.walkNext(ctx, current, element, index == len(elements)-1)
|
||||
if err != nil {
|
||||
return lastAncestor, fmt.Errorf("failed to walk into %q: %w", element, err)
|
||||
}
|
||||
}
|
||||
|
||||
return current, nil
|
||||
}
|
||||
|
||||
func (n *myNavigator) Children(ctx context.Context, parent *File, args *ListArgs) (*ListResult, error) {
|
||||
return n.baseNavigator.children(ctx, parent, args)
|
||||
}
|
||||
|
||||
func (n *myNavigator) walkNext(ctx context.Context, root *File, next string, isLeaf bool) (*File, error) {
|
||||
return n.baseNavigator.walkNext(ctx, root, next, isLeaf)
|
||||
}
|
||||
|
||||
func (n *myNavigator) Capabilities(isSearching bool) *fs.NavigatorProps {
|
||||
res := &fs.NavigatorProps{
|
||||
Capability: myNavigatorCapability,
|
||||
OrderDirectionOptions: fullOrderDirectionOption,
|
||||
OrderByOptions: fullOrderByOption,
|
||||
MaxPageSize: n.config.MaxPageSize,
|
||||
}
|
||||
if isSearching {
|
||||
res.OrderByOptions = nil
|
||||
res.OrderDirectionOptions = nil
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (n *myNavigator) Walk(ctx context.Context, levelFiles []*File, limit, depth int, f WalkFunc) error {
|
||||
return n.baseNavigator.walk(ctx, levelFiles, limit, depth, f)
|
||||
}
|
||||
|
||||
func (n *myNavigator) FollowTx(ctx context.Context) (func(), error) {
|
||||
if _, ok := ctx.Value(inventory.TxCtx{}).(*inventory.Tx); !ok {
|
||||
return nil, fmt.Errorf("navigator: no inherited transaction found in context")
|
||||
}
|
||||
newFileClient, _, _, err := inventory.WithTx(ctx, n.fileClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newUserClient, _, _, err := inventory.WithTx(ctx, n.userClient)
|
||||
|
||||
oldFileClient, oldUserClient := n.fileClient, n.userClient
|
||||
revert := func() {
|
||||
n.fileClient = oldFileClient
|
||||
n.userClient = oldUserClient
|
||||
n.baseNavigator.fileClient = oldFileClient
|
||||
}
|
||||
|
||||
n.fileClient = newFileClient
|
||||
n.userClient = newUserClient
|
||||
n.baseNavigator.fileClient = newFileClient
|
||||
return revert, nil
|
||||
}
|
||||
|
||||
func (n *myNavigator) ExecuteHook(ctx context.Context, hookType fs.HookType, file *File) error {
|
||||
return nil
|
||||
}
|
||||
536
pkg/filemanager/fs/dbfs/navigator.go
Normal file
536
pkg/filemanager/fs/dbfs/navigator.go
Normal file
@@ -0,0 +1,536 @@
|
||||
package dbfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/constants"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrFsNotInitialized = fmt.Errorf("fs not initialized")
|
||||
ErrPermissionDenied = serializer.NewError(serializer.CodeNoPermissionErr, "Permission denied", nil)
|
||||
|
||||
ErrShareIncorrectPassword = serializer.NewError(serializer.CodeIncorrectPassword, "Incorrect share password", nil)
|
||||
ErrFileCountLimitedReached = serializer.NewError(serializer.CodeFileCountLimitedReached, "Walked file count reached limit", nil)
|
||||
ErrSymbolicFolderFound = serializer.NewError(serializer.CodeNoPermissionErr, "Symbolic folder cannot be walked into", nil)
|
||||
ErrLoginRequired = serializer.NewError(serializer.CodeCheckLogin, "Login required", nil)
|
||||
|
||||
fullOrderByOption = []string{"name", "size", "updated_at", "created_at"}
|
||||
searchLimitedOrderByOption = []string{"created_at"}
|
||||
fullOrderDirectionOption = []string{"asc", "desc"}
|
||||
)
|
||||
|
||||
type (
|
||||
// Navigator is a navigator for database file system.
|
||||
Navigator interface {
|
||||
Recycle()
|
||||
// To returns the file by path. If given path is not exist, returns ErrFileNotFound and most-recent ancestor.
|
||||
To(ctx context.Context, path *fs.URI) (*File, error)
|
||||
// Children returns the children of the parent file.
|
||||
Children(ctx context.Context, parent *File, args *ListArgs) (*ListResult, error)
|
||||
// Capabilities returns the capabilities of the navigator.
|
||||
Capabilities(isSearching bool) *fs.NavigatorProps
|
||||
// Walk walks the file tree until limit is reached.
|
||||
Walk(ctx context.Context, levelFiles []*File, limit, depth int, f WalkFunc) error
|
||||
// PersistState tells navigator to persist the state of the navigator before recycle.
|
||||
PersistState(kv cache.Driver, key string)
|
||||
// RestoreState restores the state of the navigator.
|
||||
RestoreState(s State) error
|
||||
// FollowTx let the navigator inherit the transaction. Return a function to reset back to previous DB client.
|
||||
FollowTx(ctx context.Context) (func(), error)
|
||||
// ExecuteHook performs custom operations before or after certain actions.
|
||||
ExecuteHook(ctx context.Context, hookType fs.HookType, file *File) error
|
||||
}
|
||||
|
||||
State interface{}
|
||||
|
||||
NavigatorCapability int
|
||||
ListArgs struct {
|
||||
Page *inventory.PaginationArgs
|
||||
Search *inventory.SearchFileParameters
|
||||
SharedWithMe bool
|
||||
StreamCallback func([]*File)
|
||||
}
|
||||
// ListResult is the result of a list operation.
|
||||
ListResult struct {
|
||||
Files []*File
|
||||
MixedType bool
|
||||
Pagination *inventory.PaginationResults
|
||||
RecursionLimitReached bool
|
||||
SingleFileView bool
|
||||
}
|
||||
WalkFunc func([]*File, int) error
|
||||
)
|
||||
|
||||
const (
|
||||
NavigatorCapabilityCreateFile NavigatorCapability = iota
|
||||
NavigatorCapabilityRenameFile
|
||||
NavigatorCapability_CommunityPlacehodler1
|
||||
NavigatorCapability_CommunityPlacehodler2
|
||||
NavigatorCapability_CommunityPlacehodler3
|
||||
NavigatorCapability_CommunityPlacehodler4
|
||||
NavigatorCapabilityUploadFile
|
||||
NavigatorCapabilityDownloadFile
|
||||
NavigatorCapabilityUpdateMetadata
|
||||
NavigatorCapabilityListChildren
|
||||
NavigatorCapabilityGenerateThumb
|
||||
NavigatorCapability_CommunityPlacehodler5
|
||||
NavigatorCapability_CommunityPlacehodler6
|
||||
NavigatorCapability_CommunityPlacehodler7
|
||||
NavigatorCapabilityDeleteFile
|
||||
NavigatorCapabilityLockFile
|
||||
NavigatorCapabilitySoftDelete
|
||||
NavigatorCapabilityRestore
|
||||
NavigatorCapabilityShare
|
||||
NavigatorCapabilityInfo
|
||||
NavigatorCapabilityVersionControl
|
||||
NavigatorCapability_CommunityPlacehodler8
|
||||
NavigatorCapability_CommunityPlacehodler9
|
||||
NavigatorCapabilityEnterFolder
|
||||
|
||||
searchTokenSeparator = "|"
|
||||
)
|
||||
|
||||
func init() {
|
||||
boolset.Sets(map[NavigatorCapability]bool{
|
||||
NavigatorCapabilityCreateFile: true,
|
||||
NavigatorCapabilityRenameFile: true,
|
||||
NavigatorCapabilityUploadFile: true,
|
||||
NavigatorCapabilityDownloadFile: true,
|
||||
NavigatorCapabilityUpdateMetadata: true,
|
||||
NavigatorCapabilityListChildren: true,
|
||||
NavigatorCapabilityGenerateThumb: true,
|
||||
NavigatorCapabilityDeleteFile: true,
|
||||
NavigatorCapabilityLockFile: true,
|
||||
NavigatorCapabilitySoftDelete: true,
|
||||
NavigatorCapabilityShare: true,
|
||||
NavigatorCapabilityInfo: true,
|
||||
NavigatorCapabilityVersionControl: true,
|
||||
NavigatorCapabilityEnterFolder: true,
|
||||
}, myNavigatorCapability)
|
||||
boolset.Sets(map[NavigatorCapability]bool{
|
||||
NavigatorCapabilityDownloadFile: true,
|
||||
NavigatorCapabilityListChildren: true,
|
||||
NavigatorCapabilityGenerateThumb: true,
|
||||
NavigatorCapabilityLockFile: true,
|
||||
NavigatorCapabilityInfo: true,
|
||||
NavigatorCapabilityVersionControl: true,
|
||||
NavigatorCapabilityEnterFolder: true,
|
||||
}, shareNavigatorCapability)
|
||||
boolset.Sets(map[NavigatorCapability]bool{
|
||||
NavigatorCapabilityListChildren: true,
|
||||
NavigatorCapabilityDeleteFile: true,
|
||||
NavigatorCapabilityLockFile: true,
|
||||
NavigatorCapabilityRestore: true,
|
||||
NavigatorCapabilityInfo: true,
|
||||
}, trashNavigatorCapability)
|
||||
boolset.Sets(map[NavigatorCapability]bool{
|
||||
NavigatorCapabilityListChildren: true,
|
||||
NavigatorCapabilityDownloadFile: true,
|
||||
NavigatorCapabilityEnterFolder: true,
|
||||
}, sharedWithMeNavigatorCapability)
|
||||
}
|
||||
|
||||
// ==================== Base Navigator ====================
|
||||
type (
|
||||
fileFilter func(ctx context.Context, f *File) (*File, bool)
|
||||
baseNavigator struct {
|
||||
fileClient inventory.FileClient
|
||||
listFilter fileFilter
|
||||
user *ent.User
|
||||
hasher hashid.Encoder
|
||||
config *setting.DBFS
|
||||
}
|
||||
)
|
||||
|
||||
var defaultFilter = func(ctx context.Context, f *File) (*File, bool) { return f, true }
|
||||
|
||||
func newBaseNavigator(fileClient inventory.FileClient, filterFunc fileFilter, user *ent.User,
|
||||
hasher hashid.Encoder, config *setting.DBFS) *baseNavigator {
|
||||
return &baseNavigator{
|
||||
fileClient: fileClient,
|
||||
listFilter: filterFunc,
|
||||
user: user,
|
||||
hasher: hasher,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *baseNavigator) walkNext(ctx context.Context, root *File, next string, isLeaf bool) (*File, error) {
|
||||
var model *ent.File
|
||||
if root != nil {
|
||||
model = root.Model
|
||||
if root.IsSymbolic() {
|
||||
return nil, ErrSymbolicFolderFound
|
||||
}
|
||||
|
||||
root.mu.Lock()
|
||||
if child, ok := root.Children[next]; ok && !isLeaf {
|
||||
root.mu.Unlock()
|
||||
return child, nil
|
||||
}
|
||||
root.mu.Unlock()
|
||||
}
|
||||
|
||||
child, err := b.fileClient.GetChildFile(ctx, model, b.user.ID, next, isLeaf)
|
||||
if err != nil {
|
||||
if ent.IsNotFound(err) {
|
||||
return nil, fs.ErrPathNotExist.WithError(err)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("faield to get child %q: %w", next, err)
|
||||
}
|
||||
|
||||
return newFile(root, child), nil
|
||||
}
|
||||
|
||||
func (b *baseNavigator) walkUp(ctx context.Context, child *File) (*File, error) {
|
||||
parent, err := b.fileClient.GetParentFile(ctx, child.Model, false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("faield to get Parent for %q: %w", child.Name(), err)
|
||||
}
|
||||
|
||||
return newParentFile(parent, child), nil
|
||||
}
|
||||
|
||||
func (b *baseNavigator) children(ctx context.Context, parent *File, args *ListArgs) (*ListResult, error) {
|
||||
var model *ent.File
|
||||
if parent != nil {
|
||||
model = parent.Model
|
||||
if parent.Model.Type != int(types.FileTypeFolder) {
|
||||
return nil, fs.ErrPathNotExist
|
||||
}
|
||||
|
||||
if parent.IsSymbolic() {
|
||||
return nil, ErrSymbolicFolderFound
|
||||
}
|
||||
|
||||
parent.Path[pathIndexUser] = parent.Uri(false)
|
||||
}
|
||||
|
||||
if args.Search != nil {
|
||||
return b.search(ctx, parent, args)
|
||||
}
|
||||
|
||||
children, err := b.fileClient.GetChildFiles(ctx, &inventory.ListFileParameters{
|
||||
PaginationArgs: args.Page,
|
||||
SharedWithMe: args.SharedWithMe,
|
||||
}, b.user.ID, model)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get children: %w", err)
|
||||
}
|
||||
|
||||
return &ListResult{
|
||||
Files: lo.FilterMap(children.Files, func(model *ent.File, index int) (*File, bool) {
|
||||
f := newFile(parent, model)
|
||||
return b.listFilter(ctx, f)
|
||||
}),
|
||||
MixedType: children.MixedType,
|
||||
Pagination: children.PaginationResults,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *baseNavigator) walk(ctx context.Context, levelFiles []*File, limit, depth int, f WalkFunc) error {
|
||||
walked := 0
|
||||
if len(levelFiles) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
owner := levelFiles[0].Owner()
|
||||
|
||||
level := 0
|
||||
for walked <= limit && depth >= 0 {
|
||||
if len(levelFiles) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
stop := false
|
||||
depth--
|
||||
if len(levelFiles) > limit-walked {
|
||||
levelFiles = levelFiles[:limit-walked]
|
||||
stop = true
|
||||
}
|
||||
if err := f(levelFiles, level); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if stop {
|
||||
return ErrFileCountLimitedReached
|
||||
}
|
||||
|
||||
walked += len(levelFiles)
|
||||
folders := lo.Filter(levelFiles, func(f *File, index int) bool {
|
||||
return f.Model.Type == int(types.FileTypeFolder) && !f.IsSymbolic()
|
||||
})
|
||||
|
||||
if walked >= limit || len(folders) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
levelFiles = levelFiles[:0]
|
||||
leftCredit := limit - walked
|
||||
parents := lo.SliceToMap(folders, func(file *File) (int, *File) {
|
||||
return file.Model.ID, file
|
||||
})
|
||||
for leftCredit > 0 {
|
||||
token := ""
|
||||
res, err := b.fileClient.GetChildFiles(ctx,
|
||||
&inventory.ListFileParameters{
|
||||
PaginationArgs: &inventory.PaginationArgs{
|
||||
UseCursorPagination: true,
|
||||
PageToken: token,
|
||||
PageSize: leftCredit,
|
||||
},
|
||||
MixedType: true,
|
||||
},
|
||||
owner.ID,
|
||||
lo.Map(folders, func(item *File, index int) *ent.File {
|
||||
return item.Model
|
||||
})...)
|
||||
if err != nil {
|
||||
return serializer.NewError(serializer.CodeDBError, "Failed to list children", err)
|
||||
}
|
||||
|
||||
leftCredit -= len(res.Files)
|
||||
|
||||
levelFiles = append(levelFiles, lo.Map(res.Files, func(model *ent.File, index int) *File {
|
||||
p := parents[model.FileChildren]
|
||||
return newFile(p, model)
|
||||
})...)
|
||||
|
||||
// All files listed
|
||||
if res.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
|
||||
token = res.NextPageToken
|
||||
}
|
||||
level++
|
||||
}
|
||||
|
||||
if walked >= limit {
|
||||
return ErrFileCountLimitedReached
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *baseNavigator) search(ctx context.Context, parent *File, args *ListArgs) (*ListResult, error) {
|
||||
if parent == nil {
|
||||
// Performs mega search for all files in trash fs.
|
||||
children, err := b.fileClient.GetChildFiles(ctx, &inventory.ListFileParameters{
|
||||
PaginationArgs: args.Page,
|
||||
MixedType: true,
|
||||
Search: args.Search,
|
||||
SharedWithMe: args.SharedWithMe,
|
||||
}, b.user.ID, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get children: %w", err)
|
||||
}
|
||||
|
||||
return &ListResult{
|
||||
Files: lo.FilterMap(children.Files, func(model *ent.File, index int) (*File, bool) {
|
||||
f := newFile(parent, model)
|
||||
return b.listFilter(ctx, f)
|
||||
}),
|
||||
MixedType: children.MixedType,
|
||||
Pagination: children.PaginationResults,
|
||||
}, nil
|
||||
}
|
||||
// Performs recursive search for all files under the given folder.
|
||||
walkedFolder := 1
|
||||
parents := []map[int]*File{{parent.Model.ID: parent}}
|
||||
startLevel, innerPageToken, err := parseSearchPageToken(args.Page.PageToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
args.Page.PageToken = innerPageToken
|
||||
|
||||
stepLevel := func(level int) (bool, error) {
|
||||
token := ""
|
||||
// We don't need metadata in level search.
|
||||
listCtx := context.WithValue(ctx, inventory.LoadFilePublicMetadata{}, nil)
|
||||
for walkedFolder <= b.config.MaxRecursiveSearchedFolder {
|
||||
// TODO: chunk parents into 30000 per group
|
||||
res, err := b.fileClient.GetChildFiles(listCtx,
|
||||
&inventory.ListFileParameters{
|
||||
PaginationArgs: &inventory.PaginationArgs{
|
||||
UseCursorPagination: true,
|
||||
PageToken: token,
|
||||
},
|
||||
FolderOnly: true,
|
||||
},
|
||||
parent.Model.OwnerID,
|
||||
lo.MapToSlice(parents[level], func(k int, f *File) *ent.File {
|
||||
return f.Model
|
||||
})...)
|
||||
if err != nil {
|
||||
return false, serializer.NewError(serializer.CodeDBError, "Failed to list children", err)
|
||||
}
|
||||
|
||||
parents = append(parents, lo.SliceToMap(
|
||||
lo.FilterMap(res.Files, func(model *ent.File, index int) (*File, bool) {
|
||||
p := parents[level][model.FileChildren]
|
||||
f := newFile(p, model)
|
||||
f.Path[pathIndexUser] = p.Uri(false).Join(model.Name)
|
||||
return f, true
|
||||
}),
|
||||
func(f *File) (int, *File) {
|
||||
return f.Model.ID, f
|
||||
}))
|
||||
|
||||
walkedFolder += len(parents[level+1])
|
||||
if res.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
|
||||
token = res.NextPageToken
|
||||
}
|
||||
|
||||
if len(parents) <= level+1 || len(parents[level+1]) == 0 {
|
||||
// All possible folders is searched
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// We need to walk from root folder to get the correct level.
|
||||
for level := 0; level < startLevel; level++ {
|
||||
stop, err := stepLevel(level)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if stop {
|
||||
return &ListResult{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Search files starting from current level
|
||||
res := make([]*File, 0, args.Page.PageSize)
|
||||
args.Page.UseCursorPagination = true
|
||||
originalPageSize := args.Page.PageSize
|
||||
stop := false
|
||||
for len(res) < originalPageSize && walkedFolder <= b.config.MaxRecursiveSearchedFolder {
|
||||
// Only requires minimum number of files
|
||||
args.Page.PageSize = min(originalPageSize, originalPageSize-len(res))
|
||||
searchRes, err := b.fileClient.GetChildFiles(ctx,
|
||||
&inventory.ListFileParameters{
|
||||
PaginationArgs: args.Page,
|
||||
MixedType: true,
|
||||
Search: args.Search,
|
||||
},
|
||||
parent.Model.OwnerID,
|
||||
lo.MapToSlice(parents[startLevel], func(k int, f *File) *ent.File {
|
||||
return f.Model
|
||||
})...)
|
||||
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to search files", err)
|
||||
}
|
||||
|
||||
newRes := lo.FilterMap(searchRes.Files, func(model *ent.File, index int) (*File, bool) {
|
||||
p := parents[startLevel][model.FileChildren]
|
||||
f := newFile(p, model)
|
||||
f.Path[pathIndexUser] = p.Uri(false).Join(model.Name)
|
||||
return b.listFilter(ctx, f)
|
||||
})
|
||||
res = append(res, newRes...)
|
||||
if args.StreamCallback != nil {
|
||||
args.StreamCallback(newRes)
|
||||
}
|
||||
|
||||
args.Page.PageToken = searchRes.NextPageToken
|
||||
// If no more results under current level, move to next level
|
||||
if args.Page.PageToken == "" {
|
||||
if len(res) == originalPageSize {
|
||||
// Current page is full, no need to search more
|
||||
startLevel++
|
||||
break
|
||||
}
|
||||
|
||||
finished, err := stepLevel(startLevel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if finished {
|
||||
stop = true
|
||||
// No more folders under next level, all result is presented
|
||||
break
|
||||
}
|
||||
|
||||
startLevel++
|
||||
}
|
||||
}
|
||||
|
||||
if args.StreamCallback != nil {
|
||||
// Clear res if it's streamed
|
||||
res = res[:0]
|
||||
}
|
||||
|
||||
searchRes := &ListResult{
|
||||
Files: res,
|
||||
MixedType: true,
|
||||
Pagination: &inventory.PaginationResults{IsCursor: true},
|
||||
RecursionLimitReached: walkedFolder > b.config.MaxRecursiveSearchedFolder,
|
||||
}
|
||||
|
||||
if walkedFolder <= b.config.MaxRecursiveSearchedFolder && !stop {
|
||||
searchRes.Pagination.NextPageToken = fmt.Sprintf("%d%s%s", startLevel, searchTokenSeparator, args.Page.PageToken)
|
||||
}
|
||||
|
||||
return searchRes, nil
|
||||
}
|
||||
|
||||
func parseSearchPageToken(token string) (int, string, error) {
|
||||
if token == "" {
|
||||
return 0, "", nil
|
||||
}
|
||||
|
||||
tokens := strings.Split(token, searchTokenSeparator)
|
||||
if len(tokens) != 2 {
|
||||
return 0, "", fmt.Errorf("invalid page token")
|
||||
}
|
||||
|
||||
level, err := strconv.Atoi(tokens[0])
|
||||
if err != nil || level < 0 {
|
||||
return 0, "", fmt.Errorf("invalid page token level")
|
||||
}
|
||||
|
||||
return level, tokens[1], nil
|
||||
}
|
||||
|
||||
func newMyUri() *fs.URI {
|
||||
res, _ := fs.NewUriFromString(constants.CloudreveScheme + "://" + string(constants.FileSystemMy))
|
||||
return res
|
||||
}
|
||||
|
||||
func newMyIDUri(uid string) *fs.URI {
|
||||
res, _ := fs.NewUriFromString(fmt.Sprintf("%s://%s@%s", constants.CloudreveScheme, uid, constants.FileSystemMy))
|
||||
return res
|
||||
}
|
||||
|
||||
func newTrashUri(name string) *fs.URI {
|
||||
res, _ := fs.NewUriFromString(fmt.Sprintf("%s://%s", constants.CloudreveScheme, constants.FileSystemTrash))
|
||||
return res.Join(name)
|
||||
}
|
||||
|
||||
func newSharedWithMeUri(id string) *fs.URI {
|
||||
res, _ := fs.NewUriFromString(fmt.Sprintf("%s://%s", constants.CloudreveScheme, constants.FileSystemSharedWithMe))
|
||||
return res.Join(id)
|
||||
}
|
||||
171
pkg/filemanager/fs/dbfs/options.go
Normal file
171
pkg/filemanager/fs/dbfs/options.go
Normal file
@@ -0,0 +1,171 @@
|
||||
package dbfs
|
||||
|
||||
import (
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
)
|
||||
|
||||
type dbfsOption struct {
|
||||
*fs.FsOption
|
||||
loadFolderSummary bool
|
||||
extendedInfo bool
|
||||
loadFilePublicMetadata bool
|
||||
loadFileShareIfOwned bool
|
||||
loadEntityUser bool
|
||||
loadFileEntities bool
|
||||
useCursorPagination bool
|
||||
pageToken string
|
||||
preferredStoragePolicy *ent.StoragePolicy
|
||||
errOnConflict bool
|
||||
previousVersion string
|
||||
removeStaleEntities bool
|
||||
requiredCapabilities []NavigatorCapability
|
||||
generateContextHint bool
|
||||
isSymbolicLink bool
|
||||
noChainedCreation bool
|
||||
streamListResponseCallback func(parent fs.File, file []fs.File)
|
||||
ancestor *File
|
||||
}
|
||||
|
||||
func newDbfsOption() *dbfsOption {
|
||||
return &dbfsOption{
|
||||
FsOption: &fs.FsOption{},
|
||||
}
|
||||
}
|
||||
|
||||
func (o *dbfsOption) apply(opt fs.Option) {
|
||||
if fsOpt, ok := opt.(fs.OptionFunc); ok {
|
||||
fsOpt.Apply(o.FsOption)
|
||||
} else if dbfsOpt, ok := opt.(optionFunc); ok {
|
||||
dbfsOpt.Apply(o)
|
||||
}
|
||||
}
|
||||
|
||||
type optionFunc func(*dbfsOption)
|
||||
|
||||
func (f optionFunc) Apply(o any) {
|
||||
if dbfsO, ok := o.(*dbfsOption); ok {
|
||||
f(dbfsO)
|
||||
}
|
||||
}
|
||||
|
||||
// WithFilePublicMetadata enables loading file public metadata.
|
||||
func WithFilePublicMetadata() fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
o.loadFilePublicMetadata = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithContextHint enables generating context hint for the list operation.
|
||||
func WithContextHint() fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
o.generateContextHint = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithFileEntities enables loading file entities.
|
||||
func WithFileEntities() fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
o.loadFileEntities = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithCursorPagination enables cursor pagination for the list operation.
|
||||
func WithCursorPagination(pageToken string) fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
o.useCursorPagination = true
|
||||
o.pageToken = pageToken
|
||||
})
|
||||
}
|
||||
|
||||
// WithPreferredStoragePolicy sets the preferred storage policy for the upload operation.
|
||||
func WithPreferredStoragePolicy(policy *ent.StoragePolicy) fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
o.preferredStoragePolicy = policy
|
||||
})
|
||||
}
|
||||
|
||||
// WithErrorOnConflict sets to throw error on conflict for the create operation.
|
||||
func WithErrorOnConflict() fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
o.errOnConflict = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithPreviousVersion sets the previous version for the update operation.
|
||||
func WithPreviousVersion(version string) fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
o.previousVersion = version
|
||||
})
|
||||
}
|
||||
|
||||
// WithRemoveStaleEntities sets to remove stale entities for the update operation.
|
||||
func WithRemoveStaleEntities() fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
o.removeStaleEntities = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithRequiredCapabilities sets the required capabilities for operations.
|
||||
func WithRequiredCapabilities(capabilities ...NavigatorCapability) fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
o.requiredCapabilities = capabilities
|
||||
})
|
||||
}
|
||||
|
||||
// WithNoChainedCreation sets to disable chained creation for the create operation. This
|
||||
// will require parent folder existed before creating new files under it.
|
||||
func WithNoChainedCreation() fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
o.noChainedCreation = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithFileShareIfOwned enables loading file share link if the file is owned by the user.
|
||||
func WithFileShareIfOwned() fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
o.loadFileShareIfOwned = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithStreamListResponseCallback sets the callback for handling stream list response.
|
||||
func WithStreamListResponseCallback(callback func(parent fs.File, file []fs.File)) fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
o.streamListResponseCallback = callback
|
||||
})
|
||||
}
|
||||
|
||||
// WithSymbolicLink sets the file is a symbolic link.
|
||||
func WithSymbolicLink() fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
o.isSymbolicLink = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithExtendedInfo enables loading extended info for the file.
|
||||
func WithExtendedInfo() fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
o.extendedInfo = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithLoadFolderSummary enables loading folder summary.
|
||||
func WithLoadFolderSummary() fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
o.loadFolderSummary = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithEntityUser enables loading entity user.
|
||||
func WithEntityUser() fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
o.loadEntityUser = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithAncestor sets most recent ancestor for creating files
|
||||
func WithAncestor(f *File) fs.Option {
|
||||
return optionFunc(func(o *dbfsOption) {
|
||||
o.ancestor = f
|
||||
})
|
||||
}
|
||||
324
pkg/filemanager/fs/dbfs/share_navigator.go
Normal file
324
pkg/filemanager/fs/dbfs/share_navigator.go
Normal file
@@ -0,0 +1,324 @@
|
||||
package dbfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/constants"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrShareNotFound = serializer.NewError(serializer.CodeNotFound, "Shared file does not exist", nil)
|
||||
ErrNotPurchased = serializer.NewError(serializer.CodePurchaseRequired, "You need to purchased this share", nil)
|
||||
)
|
||||
|
||||
const (
|
||||
PurchaseTicketHeader = constants.CrHeaderPrefix + "Purchase-Ticket"
|
||||
)
|
||||
|
||||
var shareNavigatorCapability = &boolset.BooleanSet{}
|
||||
|
||||
// NewShareNavigator creates a navigator for user's "shared" file system.
|
||||
func NewShareNavigator(u *ent.User, fileClient inventory.FileClient, shareClient inventory.ShareClient,
|
||||
l logging.Logger, config *setting.DBFS, hasher hashid.Encoder) Navigator {
|
||||
n := &shareNavigator{
|
||||
user: u,
|
||||
l: l,
|
||||
fileClient: fileClient,
|
||||
shareClient: shareClient,
|
||||
config: config,
|
||||
}
|
||||
n.baseNavigator = newBaseNavigator(fileClient, defaultFilter, u, hasher, config)
|
||||
return n
|
||||
}
|
||||
|
||||
type (
|
||||
shareNavigator struct {
|
||||
l logging.Logger
|
||||
user *ent.User
|
||||
fileClient inventory.FileClient
|
||||
shareClient inventory.ShareClient
|
||||
config *setting.DBFS
|
||||
|
||||
*baseNavigator
|
||||
shareRoot *File
|
||||
singleFileShare bool
|
||||
ownerRoot *File
|
||||
share *ent.Share
|
||||
owner *ent.User
|
||||
disableRecycle bool
|
||||
persist func()
|
||||
}
|
||||
|
||||
shareNavigatorState struct {
|
||||
ShareRoot *File
|
||||
OwnerRoot *File
|
||||
SingleFileShare bool
|
||||
Share *ent.Share
|
||||
Owner *ent.User
|
||||
}
|
||||
)
|
||||
|
||||
func (n *shareNavigator) PersistState(kv cache.Driver, key string) {
|
||||
n.disableRecycle = true
|
||||
n.persist = func() {
|
||||
kv.Set(key, shareNavigatorState{
|
||||
ShareRoot: n.shareRoot,
|
||||
OwnerRoot: n.ownerRoot,
|
||||
SingleFileShare: n.singleFileShare,
|
||||
Share: n.share,
|
||||
Owner: n.owner,
|
||||
}, ContextHintTTL)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *shareNavigator) RestoreState(s State) error {
|
||||
n.disableRecycle = true
|
||||
if state, ok := s.(shareNavigatorState); ok {
|
||||
n.shareRoot = state.ShareRoot
|
||||
n.ownerRoot = state.OwnerRoot
|
||||
n.singleFileShare = state.SingleFileShare
|
||||
n.share = state.Share
|
||||
n.owner = state.Owner
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("invalid state type: %T", s)
|
||||
}
|
||||
|
||||
func (n *shareNavigator) Recycle() {
|
||||
if n.persist != nil {
|
||||
n.persist()
|
||||
n.persist = nil
|
||||
}
|
||||
|
||||
if !n.disableRecycle {
|
||||
if n.ownerRoot != nil {
|
||||
n.ownerRoot.Recycle()
|
||||
} else if n.shareRoot != nil {
|
||||
n.shareRoot.Recycle()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *shareNavigator) Root(ctx context.Context, path *fs.URI) (*File, error) {
|
||||
ctx = context.WithValue(ctx, inventory.LoadShareUser{}, true)
|
||||
ctx = context.WithValue(ctx, inventory.LoadUserGroup{}, true)
|
||||
ctx = context.WithValue(ctx, inventory.LoadShareFile{}, true)
|
||||
share, err := n.shareClient.GetByHashID(ctx, path.ID(hashid.EncodeUserID(n.hasher, n.user.ID)))
|
||||
if err != nil {
|
||||
return nil, ErrShareNotFound.WithError(err)
|
||||
}
|
||||
|
||||
if err := inventory.IsValidShare(share); err != nil {
|
||||
return nil, ErrShareNotFound.WithError(err)
|
||||
}
|
||||
|
||||
n.owner = share.Edges.User
|
||||
|
||||
// Check password
|
||||
if share.Password != "" && share.Password != path.Password() {
|
||||
return nil, ErrShareIncorrectPassword
|
||||
}
|
||||
|
||||
// Share permission setting should overwrite root folder's permission
|
||||
n.shareRoot = newFile(nil, share.Edges.File)
|
||||
|
||||
// Find the user side root of the file.
|
||||
ownerRoot, err := n.findRoot(ctx, n.shareRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if n.shareRoot.Type() == types.FileTypeFile {
|
||||
n.singleFileShare = true
|
||||
n.shareRoot = n.shareRoot.Parent
|
||||
}
|
||||
|
||||
n.shareRoot.Path[pathIndexUser] = path.Root()
|
||||
n.shareRoot.OwnerModel = n.owner
|
||||
n.shareRoot.IsUserRoot = true
|
||||
n.shareRoot.CapabilitiesBs = n.Capabilities(false).Capability
|
||||
|
||||
// Check if any ancestors is deleted
|
||||
if ownerRoot.Name() != inventory.RootFolderName {
|
||||
return nil, ErrShareNotFound
|
||||
}
|
||||
|
||||
if n.user.ID != n.owner.ID && !n.user.Edges.Group.Permissions.Enabled(int(types.GroupPermissionShareDownload)) {
|
||||
return nil, serializer.NewError(
|
||||
serializer.CodeNoPermissionErr,
|
||||
fmt.Sprintf("You don't have permission to access share links"),
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
n.ownerRoot = ownerRoot
|
||||
n.ownerRoot.Path[pathIndexRoot] = newMyIDUri(hashid.EncodeUserID(n.hasher, n.owner.ID))
|
||||
n.share = share
|
||||
return n.shareRoot, nil
|
||||
}
|
||||
|
||||
func (n *shareNavigator) To(ctx context.Context, path *fs.URI) (*File, error) {
|
||||
if n.shareRoot == nil {
|
||||
root, err := n.Root(ctx, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n.shareRoot = root
|
||||
}
|
||||
|
||||
current, lastAncestor := n.shareRoot, n.shareRoot
|
||||
elements := path.Elements()
|
||||
|
||||
// If target is root of single file share, the root itself is the target.
|
||||
if len(elements) <= 1 && n.singleFileShare {
|
||||
file, err := n.latestSharedSingleFile(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(elements) == 1 && file.Name() != elements[0] {
|
||||
return nil, fs.ErrPathNotExist
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
for index, element := range elements {
|
||||
lastAncestor = current
|
||||
current, err = n.walkNext(ctx, current, element, index == len(elements)-1)
|
||||
if err != nil {
|
||||
return lastAncestor, fmt.Errorf("failed to walk into %q: %w", element, err)
|
||||
}
|
||||
}
|
||||
|
||||
return current, nil
|
||||
}
|
||||
|
||||
func (n *shareNavigator) walkNext(ctx context.Context, root *File, next string, isLeaf bool) (*File, error) {
|
||||
nextFile, err := n.baseNavigator.walkNext(ctx, root, next, isLeaf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nextFile, nil
|
||||
}
|
||||
|
||||
func (n *shareNavigator) Children(ctx context.Context, parent *File, args *ListArgs) (*ListResult, error) {
|
||||
if n.singleFileShare {
|
||||
file, err := n.latestSharedSingleFile(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ListResult{
|
||||
Files: []*File{file},
|
||||
Pagination: &inventory.PaginationResults{},
|
||||
SingleFileView: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return n.baseNavigator.children(ctx, parent, args)
|
||||
}
|
||||
|
||||
func (n *shareNavigator) latestSharedSingleFile(ctx context.Context) (*File, error) {
|
||||
if n.singleFileShare {
|
||||
file, err := n.fileClient.GetByID(ctx, n.share.Edges.File.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := newFile(n.shareRoot, file)
|
||||
f.OwnerModel = n.shareRoot.OwnerModel
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
return nil, fs.ErrPathNotExist
|
||||
}
|
||||
|
||||
func (n *shareNavigator) Capabilities(isSearching bool) *fs.NavigatorProps {
|
||||
res := &fs.NavigatorProps{
|
||||
Capability: shareNavigatorCapability,
|
||||
OrderDirectionOptions: fullOrderDirectionOption,
|
||||
OrderByOptions: fullOrderByOption,
|
||||
MaxPageSize: n.config.MaxPageSize,
|
||||
}
|
||||
|
||||
if isSearching {
|
||||
res.OrderByOptions = nil
|
||||
res.OrderDirectionOptions = nil
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (n *shareNavigator) FollowTx(ctx context.Context) (func(), error) {
|
||||
if _, ok := ctx.Value(inventory.TxCtx{}).(*inventory.Tx); !ok {
|
||||
return nil, fmt.Errorf("navigator: no inherited transaction found in context")
|
||||
}
|
||||
newFileClient, _, _, err := inventory.WithTx(ctx, n.fileClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newSharClient, _, _, err := inventory.WithTx(ctx, n.shareClient)
|
||||
|
||||
oldFileClient, oldShareClient := n.fileClient, n.shareClient
|
||||
revert := func() {
|
||||
n.fileClient = oldFileClient
|
||||
n.shareClient = oldShareClient
|
||||
n.baseNavigator.fileClient = oldFileClient
|
||||
}
|
||||
|
||||
n.fileClient = newFileClient
|
||||
n.shareClient = newSharClient
|
||||
n.baseNavigator.fileClient = newFileClient
|
||||
return revert, nil
|
||||
}
|
||||
|
||||
func (n *shareNavigator) ExecuteHook(ctx context.Context, hookType fs.HookType, file *File) error {
|
||||
switch hookType {
|
||||
case fs.HookTypeBeforeDownload:
|
||||
if n.singleFileShare {
|
||||
return n.shareClient.Downloaded(ctx, n.share)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// findRoot finds the root folder of the given child.
|
||||
func (n *shareNavigator) findRoot(ctx context.Context, child *File) (*File, error) {
|
||||
root := child
|
||||
for {
|
||||
newRoot, err := n.baseNavigator.walkUp(ctx, root)
|
||||
if err != nil {
|
||||
if !ent.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
root = newRoot
|
||||
}
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
func (n *shareNavigator) Walk(ctx context.Context, levelFiles []*File, limit, depth int, f WalkFunc) error {
|
||||
return n.baseNavigator.walk(ctx, levelFiles, limit, depth, f)
|
||||
}
|
||||
141
pkg/filemanager/fs/dbfs/sharewithme_navigator.go
Normal file
141
pkg/filemanager/fs/dbfs/sharewithme_navigator.go
Normal file
@@ -0,0 +1,141 @@
|
||||
package dbfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
)
|
||||
|
||||
var sharedWithMeNavigatorCapability = &boolset.BooleanSet{}
|
||||
|
||||
// NewSharedWithMeNavigator creates a navigator for user's "shared with me" file system.
|
||||
func NewSharedWithMeNavigator(u *ent.User, fileClient inventory.FileClient, l logging.Logger,
|
||||
config *setting.DBFS, hasher hashid.Encoder) Navigator {
|
||||
n := &sharedWithMeNavigator{
|
||||
user: u,
|
||||
l: l,
|
||||
fileClient: fileClient,
|
||||
config: config,
|
||||
hasher: hasher,
|
||||
}
|
||||
n.baseNavigator = newBaseNavigator(fileClient, defaultFilter, u, hasher, config)
|
||||
return n
|
||||
}
|
||||
|
||||
type sharedWithMeNavigator struct {
|
||||
l logging.Logger
|
||||
user *ent.User
|
||||
fileClient inventory.FileClient
|
||||
config *setting.DBFS
|
||||
hasher hashid.Encoder
|
||||
|
||||
root *File
|
||||
*baseNavigator
|
||||
}
|
||||
|
||||
func (t *sharedWithMeNavigator) Recycle() {
|
||||
|
||||
}
|
||||
|
||||
func (n *sharedWithMeNavigator) PersistState(kv cache.Driver, key string) {
|
||||
}
|
||||
|
||||
func (n *sharedWithMeNavigator) RestoreState(s State) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *sharedWithMeNavigator) To(ctx context.Context, path *fs.URI) (*File, error) {
|
||||
// Anonymous user does not have a trash folder.
|
||||
if inventory.IsAnonymousUser(t.user) {
|
||||
return nil, ErrLoginRequired
|
||||
}
|
||||
|
||||
elements := path.Elements()
|
||||
if len(elements) > 0 {
|
||||
// Shared with me folder is a flatten tree, only root can be accessed.
|
||||
return nil, fs.ErrPathNotExist.WithError(fmt.Errorf("invalid Path %q", path))
|
||||
}
|
||||
|
||||
if t.root == nil {
|
||||
rootFile, err := t.fileClient.Root(ctx, t.user)
|
||||
if err != nil {
|
||||
t.l.Info("User's root folder not found: %s, will initialize it.", err)
|
||||
return nil, ErrFsNotInitialized
|
||||
}
|
||||
|
||||
t.root = newFile(nil, rootFile)
|
||||
rootPath := newSharedWithMeUri("")
|
||||
t.root.Path[pathIndexRoot], t.root.Path[pathIndexUser] = rootPath, rootPath
|
||||
t.root.OwnerModel = t.user
|
||||
t.root.IsUserRoot = true
|
||||
t.root.CapabilitiesBs = t.Capabilities(false).Capability
|
||||
}
|
||||
|
||||
return t.root, nil
|
||||
}
|
||||
|
||||
func (t *sharedWithMeNavigator) Children(ctx context.Context, parent *File, args *ListArgs) (*ListResult, error) {
|
||||
args.SharedWithMe = true
|
||||
res, err := t.baseNavigator.children(ctx, nil, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Adding user uri for each file.
|
||||
for i := 0; i < len(res.Files); i++ {
|
||||
res.Files[i].Path[pathIndexUser] = newSharedWithMeUri(hashid.EncodeFileID(t.hasher, res.Files[i].Model.ID))
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (t *sharedWithMeNavigator) Capabilities(isSearching bool) *fs.NavigatorProps {
|
||||
res := &fs.NavigatorProps{
|
||||
Capability: sharedWithMeNavigatorCapability,
|
||||
OrderDirectionOptions: fullOrderDirectionOption,
|
||||
OrderByOptions: fullOrderByOption,
|
||||
MaxPageSize: t.config.MaxPageSize,
|
||||
}
|
||||
|
||||
if isSearching {
|
||||
res.OrderByOptions = searchLimitedOrderByOption
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (t *sharedWithMeNavigator) Walk(ctx context.Context, levelFiles []*File, limit, depth int, f WalkFunc) error {
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (n *sharedWithMeNavigator) FollowTx(ctx context.Context) (func(), error) {
|
||||
if _, ok := ctx.Value(inventory.TxCtx{}).(*inventory.Tx); !ok {
|
||||
return nil, fmt.Errorf("navigator: no inherited transaction found in context")
|
||||
}
|
||||
newFileClient, _, _, err := inventory.WithTx(ctx, n.fileClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
oldFileClient := n.fileClient
|
||||
revert := func() {
|
||||
n.fileClient = oldFileClient
|
||||
n.baseNavigator.fileClient = oldFileClient
|
||||
}
|
||||
|
||||
n.fileClient = newFileClient
|
||||
n.baseNavigator.fileClient = newFileClient
|
||||
return revert, nil
|
||||
}
|
||||
|
||||
func (n *sharedWithMeNavigator) ExecuteHook(ctx context.Context, hookType fs.HookType, file *File) error {
|
||||
return nil
|
||||
}
|
||||
137
pkg/filemanager/fs/dbfs/trash_navigator.go
Normal file
137
pkg/filemanager/fs/dbfs/trash_navigator.go
Normal file
@@ -0,0 +1,137 @@
|
||||
package dbfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
)
|
||||
|
||||
var trashNavigatorCapability = &boolset.BooleanSet{}
|
||||
|
||||
// NewTrashNavigator creates a navigator for user's "trash" file system.
|
||||
func NewTrashNavigator(u *ent.User, fileClient inventory.FileClient, l logging.Logger, config *setting.DBFS,
|
||||
hasher hashid.Encoder) Navigator {
|
||||
return &trashNavigator{
|
||||
user: u,
|
||||
l: l,
|
||||
fileClient: fileClient,
|
||||
config: config,
|
||||
baseNavigator: newBaseNavigator(fileClient, defaultFilter, u, hasher, config),
|
||||
}
|
||||
}
|
||||
|
||||
type trashNavigator struct {
|
||||
l logging.Logger
|
||||
user *ent.User
|
||||
fileClient inventory.FileClient
|
||||
config *setting.DBFS
|
||||
|
||||
*baseNavigator
|
||||
}
|
||||
|
||||
func (t *trashNavigator) Recycle() {
|
||||
|
||||
}
|
||||
|
||||
func (n *trashNavigator) PersistState(kv cache.Driver, key string) {
|
||||
}
|
||||
|
||||
func (n *trashNavigator) RestoreState(s State) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *trashNavigator) To(ctx context.Context, path *fs.URI) (*File, error) {
|
||||
// Anonymous user does not have a trash folder.
|
||||
if inventory.IsAnonymousUser(t.user) {
|
||||
return nil, ErrLoginRequired
|
||||
}
|
||||
|
||||
elements := path.Elements()
|
||||
if len(elements) > 1 {
|
||||
// Trash folder is a flatten tree, only 1 layer is supported.
|
||||
return nil, fs.ErrPathNotExist.WithError(fmt.Errorf("invalid Path %q", path))
|
||||
}
|
||||
|
||||
if len(elements) == 0 {
|
||||
// Trash folder has no root.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
current, err := t.walkNext(ctx, nil, elements[0], true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to walk into %q: %w", elements[0], err)
|
||||
}
|
||||
|
||||
current.Path[pathIndexUser] = newTrashUri(current.Model.Name)
|
||||
current.Path[pathIndexRoot] = current.Path[pathIndexUser]
|
||||
current.OwnerModel = t.user
|
||||
return current, nil
|
||||
}
|
||||
|
||||
func (t *trashNavigator) Children(ctx context.Context, parent *File, args *ListArgs) (*ListResult, error) {
|
||||
if parent != nil {
|
||||
return nil, fs.ErrPathNotExist
|
||||
}
|
||||
|
||||
res, err := t.baseNavigator.children(ctx, nil, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Adding user uri for each file.
|
||||
for i := 0; i < len(res.Files); i++ {
|
||||
res.Files[i].Path[pathIndexUser] = newTrashUri(res.Files[i].Model.Name)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (t *trashNavigator) Capabilities(isSearching bool) *fs.NavigatorProps {
|
||||
res := &fs.NavigatorProps{
|
||||
Capability: trashNavigatorCapability,
|
||||
OrderDirectionOptions: fullOrderDirectionOption,
|
||||
OrderByOptions: fullOrderByOption,
|
||||
MaxPageSize: t.config.MaxPageSize,
|
||||
}
|
||||
|
||||
if isSearching {
|
||||
res.OrderByOptions = searchLimitedOrderByOption
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (t *trashNavigator) Walk(ctx context.Context, levelFiles []*File, limit, depth int, f WalkFunc) error {
|
||||
return t.baseNavigator.walk(ctx, levelFiles, limit, depth, f)
|
||||
}
|
||||
|
||||
func (n *trashNavigator) FollowTx(ctx context.Context) (func(), error) {
|
||||
if _, ok := ctx.Value(inventory.TxCtx{}).(*inventory.Tx); !ok {
|
||||
return nil, fmt.Errorf("navigator: no inherited transaction found in context")
|
||||
}
|
||||
newFileClient, _, _, err := inventory.WithTx(ctx, n.fileClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
oldFileClient := n.fileClient
|
||||
revert := func() {
|
||||
n.fileClient = oldFileClient
|
||||
n.baseNavigator.fileClient = oldFileClient
|
||||
}
|
||||
|
||||
n.fileClient = newFileClient
|
||||
n.baseNavigator.fileClient = newFileClient
|
||||
return revert, nil
|
||||
}
|
||||
|
||||
func (n *trashNavigator) ExecuteHook(ctx context.Context, hookType fs.HookType, file *File) error {
|
||||
return nil
|
||||
}
|
||||
364
pkg/filemanager/fs/dbfs/upload.go
Normal file
364
pkg/filemanager/fs/dbfs/upload.go
Normal file
@@ -0,0 +1,364 @@
|
||||
package dbfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
)
|
||||
|
||||
func (f *DBFS) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ...fs.Option) (*fs.UploadSession, error) {
|
||||
// Get navigator
|
||||
navigator, err := f.getNavigator(ctx, req.Props.Uri, NavigatorCapabilityUploadFile, NavigatorCapabilityLockFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get most recent ancestor or target file
|
||||
ctx = context.WithValue(ctx, inventory.LoadFileEntity{}, true)
|
||||
ancestor, err := f.getFileByPath(ctx, navigator, req.Props.Uri)
|
||||
if err != nil && !ent.IsNotFound(err) {
|
||||
return nil, fmt.Errorf("failed to get ancestor: %w", err)
|
||||
}
|
||||
|
||||
if ancestor.IsSymbolic() {
|
||||
return nil, ErrSymbolicFolderFound
|
||||
}
|
||||
|
||||
fileExisted := false
|
||||
if ancestor.Uri(false).IsSame(req.Props.Uri, hashid.EncodeUserID(f.hasher, f.user.ID)) {
|
||||
fileExisted = true
|
||||
}
|
||||
|
||||
// If file already exist, and update operation is suspended or existing file is not a file
|
||||
if fileExisted && (req.Props.EntityType == nil || ancestor.Type() != types.FileTypeFile) {
|
||||
return nil, fs.ErrFileExisted
|
||||
}
|
||||
|
||||
// If file not exist, only empty entity / version entity is allowed
|
||||
if !fileExisted && (req.Props.EntityType != nil && *req.Props.EntityType != types.EntityTypeVersion) {
|
||||
return nil, fs.ErrPathNotExist
|
||||
}
|
||||
|
||||
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && ancestor.OwnerID() != f.user.ID {
|
||||
return nil, fs.ErrOwnerOnly
|
||||
}
|
||||
|
||||
// Lock target
|
||||
lockedPath := ancestor.RootUri().JoinRaw(req.Props.Uri.PathTrimmed())
|
||||
lr := &LockByPath{lockedPath, ancestor, types.FileTypeFile, ""}
|
||||
ls, err := f.acquireByPath(ctx, time.Until(req.Props.ExpireAt), f.user, false, fs.LockApp(fs.ApplicationUpload), lr)
|
||||
defer func() { _ = f.Release(ctx, ls) }()
|
||||
ctx = fs.LockSessionToContext(ctx, ls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get parent folder storage policy and performs validation
|
||||
policy, err := f.getPreferredPolicy(ctx, ancestor)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// validate upload request
|
||||
if err := validateNewFile(req.Props.Uri.Name(), req.Props.Size, policy); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Validate available capacity
|
||||
if err := f.validateUserCapacity(ctx, req.Props.Size, ancestor.Owner()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Generate save path by storage policy
|
||||
isThumbnailAndPolicyNotAvailable := policy.ID != ancestor.Model.StoragePolicyFiles && (req.Props.EntityType != nil && *req.Props.EntityType == types.EntityTypeThumbnail)
|
||||
if req.Props.SavePath == "" || isThumbnailAndPolicyNotAvailable {
|
||||
req.Props.SavePath = generateSavePath(policy, req, f.user)
|
||||
if isThumbnailAndPolicyNotAvailable {
|
||||
req.Props.SavePath = fmt.Sprintf(
|
||||
"%s.%s%s",
|
||||
req.Props.SavePath,
|
||||
util.RandStringRunes(16),
|
||||
f.settingClient.ThumbEntitySuffix(ctx))
|
||||
}
|
||||
}
|
||||
|
||||
// Create upload placeholder
|
||||
var (
|
||||
fileId int
|
||||
entityId int
|
||||
lockToken string
|
||||
targetFile *ent.File
|
||||
)
|
||||
fc, dbTx, ctx, err := inventory.WithTx(ctx, f.fileClient)
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
|
||||
}
|
||||
|
||||
if fileExisted {
|
||||
entityType := types.EntityTypeVersion
|
||||
if req.Props.EntityType != nil {
|
||||
entityType = *req.Props.EntityType
|
||||
}
|
||||
entity, err := f.CreateEntity(ctx, ancestor, policy, entityType, req,
|
||||
WithPreviousVersion(req.Props.PreviousVersion),
|
||||
fs.WithUploadRequest(req),
|
||||
WithRemoveStaleEntities(),
|
||||
)
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(dbTx)
|
||||
return nil, fmt.Errorf("failed to create new entity: %w", err)
|
||||
}
|
||||
fileId = ancestor.ID()
|
||||
entityId = entity.ID()
|
||||
targetFile = ancestor.Model
|
||||
lockToken = ls.Exclude(lr, f.user, f.hasher)
|
||||
} else {
|
||||
uploadPlaceholder, err := f.Create(ctx, req.Props.Uri, types.FileTypeFile,
|
||||
fs.WithUploadRequest(req),
|
||||
WithPreferredStoragePolicy(policy),
|
||||
WithErrorOnConflict(),
|
||||
WithAncestor(ancestor),
|
||||
)
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(dbTx)
|
||||
return nil, fmt.Errorf("failed to create upload placeholder: %w", err)
|
||||
}
|
||||
|
||||
fileId = uploadPlaceholder.ID()
|
||||
entityId = uploadPlaceholder.Entities()[0].ID()
|
||||
targetFile = uploadPlaceholder.(*File).Model
|
||||
lockToken = ls.Exclude(lr, f.user, f.hasher)
|
||||
}
|
||||
|
||||
// create metadata to record uploading entity id
|
||||
if err := fc.UpsertMetadata(ctx, targetFile, map[string]string{
|
||||
MetadataUploadSessionID: req.Props.UploadSessionID,
|
||||
}, nil); err != nil {
|
||||
_ = inventory.Rollback(dbTx)
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to update upload session metadata", err)
|
||||
}
|
||||
|
||||
if err := inventory.CommitWithStorageDiff(ctx, dbTx, f.l, f.userClient); err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit file upload preparation", err)
|
||||
}
|
||||
|
||||
session := &fs.UploadSession{
|
||||
Props: &fs.UploadProps{
|
||||
Uri: req.Props.Uri,
|
||||
Size: req.Props.Size,
|
||||
SavePath: req.Props.SavePath,
|
||||
LastModified: req.Props.LastModified,
|
||||
UploadSessionID: req.Props.UploadSessionID,
|
||||
ExpireAt: req.Props.ExpireAt,
|
||||
EntityType: req.Props.EntityType,
|
||||
},
|
||||
FileID: fileId,
|
||||
NewFileCreated: !fileExisted,
|
||||
EntityID: entityId,
|
||||
UID: f.user.ID,
|
||||
Policy: policy,
|
||||
CallbackSecret: util.RandStringRunes(32),
|
||||
LockToken: lockToken, // Prevent lock being released.
|
||||
}
|
||||
|
||||
// TODO: frontend should create new upload session if resumed session does not exist.
|
||||
return session, nil
|
||||
}
|
||||
|
||||
func (f *DBFS) CompleteUpload(ctx context.Context, session *fs.UploadSession) (fs.File, error) {
|
||||
// Get placeholder file
|
||||
file, err := f.Get(ctx, session.Props.Uri, WithFileEntities())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get placeholder file: %w", err)
|
||||
}
|
||||
|
||||
filePrivate := file.(*File)
|
||||
|
||||
// Confirm locks on placeholder file
|
||||
if session.LockToken != "" {
|
||||
release, ls, err := f.ConfirmLock(ctx, file, file.Uri(false), session.LockToken)
|
||||
if err != nil {
|
||||
return nil, fs.ErrLockExpired.WithError(err)
|
||||
}
|
||||
|
||||
release()
|
||||
ctx = fs.LockSessionToContext(ctx, ls)
|
||||
}
|
||||
|
||||
// Update placeholder entity to actual desired entity
|
||||
entityType := types.EntityTypeVersion
|
||||
if session.Props.EntityType != nil {
|
||||
entityType = *session.Props.EntityType
|
||||
}
|
||||
|
||||
// Check version retention policy
|
||||
owner := filePrivate.Owner()
|
||||
// Max allowed versions
|
||||
maxVersions := 1
|
||||
if entityType == types.EntityTypeVersion &&
|
||||
owner.Settings.VersionRetention &&
|
||||
(len(owner.Settings.VersionRetentionExt) == 0 || util.IsInExtensionList(owner.Settings.VersionRetentionExt, file.Name())) {
|
||||
// Retention is enabled for this file
|
||||
maxVersions = owner.Settings.VersionRetentionMax
|
||||
if maxVersions == 0 {
|
||||
// Unlimited versions
|
||||
maxVersions = math.MaxInt32
|
||||
}
|
||||
}
|
||||
|
||||
// Start transaction to update file
|
||||
fc, tx, ctx, err := inventory.WithTx(ctx, f.fileClient)
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to start transaction", err)
|
||||
}
|
||||
|
||||
err = fc.UpgradePlaceholder(ctx, filePrivate.Model, session.Props.LastModified, session.EntityID, entityType)
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to update placeholder file", err)
|
||||
}
|
||||
|
||||
// Remove metadata that are defined in upload session
|
||||
err = fc.RemoveMetadata(ctx, filePrivate.Model, MetadataUploadSessionID, ThumbDisabledKey)
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to update placeholder metadata", err)
|
||||
}
|
||||
|
||||
if len(session.Props.Metadata) > 0 {
|
||||
if err := fc.UpsertMetadata(ctx, filePrivate.Model, session.Props.Metadata, nil); err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to upsert placeholder metadata", err)
|
||||
}
|
||||
}
|
||||
|
||||
diff, err := fc.CapEntities(ctx, filePrivate.Model, owner, maxVersions, entityType)
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to cap version entities", err)
|
||||
}
|
||||
tx.AppendStorageDiff(diff)
|
||||
|
||||
if entityType == types.EntityTypeVersion {
|
||||
// If updating version entity, we need to cap all existing thumbnail entity to let it re-generate.
|
||||
diff, err = fc.CapEntities(ctx, filePrivate.Model, owner, 0, types.EntityTypeThumbnail)
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to cap thumbnail entities", err)
|
||||
}
|
||||
|
||||
tx.AppendStorageDiff(diff)
|
||||
}
|
||||
|
||||
if err := inventory.CommitWithStorageDiff(ctx, tx, f.l, f.userClient); err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to commit file change", err)
|
||||
}
|
||||
|
||||
// Unlock file
|
||||
if session.LockToken != "" {
|
||||
if err := f.ls.Unlock(time.Now(), session.LockToken); err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeLockConflict, "Failed to unlock file", err)
|
||||
}
|
||||
}
|
||||
|
||||
file, err = f.Get(ctx, session.Props.Uri, WithFileEntities())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get updated file: %w", err)
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// This function will be used:
|
||||
// - File still locked by uplaod session
|
||||
// - File unlocked, upload session valid
|
||||
// - File unlocked, upload session not valid
|
||||
func (f *DBFS) CancelUploadSession(ctx context.Context, path *fs.URI, sessionID string, session *fs.UploadSession) ([]fs.Entity, error) {
|
||||
// Get placeholder file
|
||||
file, err := f.Get(ctx, path, WithFileEntities())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get placeholder file: %w", err)
|
||||
}
|
||||
|
||||
filePrivate := file.(*File)
|
||||
|
||||
// Make sure presented upload session is valid
|
||||
if session != nil && (session.UID != f.user.ID || session.FileID != file.ID()) {
|
||||
return nil, serializer.NewError(serializer.CodeNotFound, "Upload session not found", nil)
|
||||
}
|
||||
|
||||
// Confirm locks on placeholder file
|
||||
if session != nil && session.LockToken != "" {
|
||||
release, ls, err := f.ConfirmLock(ctx, file, file.Uri(false), session.LockToken)
|
||||
if err == nil {
|
||||
release()
|
||||
ctx = fs.LockSessionToContext(ctx, ls)
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := ctx.Value(ByPassOwnerCheckCtxKey{}).(bool); !ok && filePrivate.OwnerID() != f.user.ID {
|
||||
return nil, fs.ErrOwnerOnly
|
||||
}
|
||||
|
||||
// Lock file
|
||||
ls, err := f.acquireByPath(ctx, -1, f.user, true, fs.LockApp(fs.ApplicationUpload),
|
||||
&LockByPath{filePrivate.Uri(true), filePrivate, filePrivate.Type(), ""})
|
||||
defer func() { _ = f.Release(ctx, ls) }()
|
||||
ctx = fs.LockSessionToContext(ctx, ls)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Find placeholder entity
|
||||
var entity fs.Entity
|
||||
for _, e := range filePrivate.Entities() {
|
||||
if sid := e.UploadSessionID(); sid != nil && sid.String() == sessionID {
|
||||
entity = e
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Remove upload session metadata
|
||||
if err := f.fileClient.RemoveMetadata(ctx, filePrivate.Model, MetadataUploadSessionID, ThumbDisabledKey); err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "Failed to remove upload session metadata", err)
|
||||
}
|
||||
|
||||
if entity == nil {
|
||||
// Given upload session does not exist
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if session != nil && session.LockToken != "" {
|
||||
defer func() {
|
||||
if err := f.ls.Unlock(time.Now(), session.LockToken); err != nil {
|
||||
f.l.Warning("Failed to unlock file %q: %s", filePrivate.Uri(true).String(), err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if len(filePrivate.Entities()) == 1 {
|
||||
// Only one placeholder entity, just delete this file
|
||||
return f.Delete(ctx, []*fs.URI{path})
|
||||
}
|
||||
|
||||
// Delete place holder entity
|
||||
storageDiff, err := f.deleteEntity(ctx, filePrivate, entity.ID())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to delete placeholder entity: %w", err)
|
||||
}
|
||||
|
||||
if err := f.userClient.ApplyStorageDiff(ctx, storageDiff); err != nil {
|
||||
return nil, fmt.Errorf("failed to apply storage diff: %w", err)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
88
pkg/filemanager/fs/dbfs/validator.go
Normal file
88
pkg/filemanager/fs/dbfs/validator.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package dbfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const MaxFileNameLength = 256
|
||||
|
||||
// validateFileName validates the file name.
|
||||
func validateFileName(name string) error {
|
||||
if len(name) >= MaxFileNameLength || len(name) == 0 {
|
||||
return fmt.Errorf("length of name must be between 1 and 255")
|
||||
}
|
||||
|
||||
if strings.ContainsAny(name, "\\/:*?\"<>|") {
|
||||
return fmt.Errorf("name contains illegal characters")
|
||||
}
|
||||
|
||||
if name == "." || name == ".." {
|
||||
return fmt.Errorf("name cannot be only dot")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateExtension validates the file extension.
|
||||
func validateExtension(name string, policy *ent.StoragePolicy) error {
|
||||
// 不需要验证
|
||||
if len(policy.Settings.FileType) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !util.IsInExtensionList(policy.Settings.FileType, name) {
|
||||
return fmt.Errorf("file extension is not allowed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateFileSize validates the file size.
|
||||
func validateFileSize(size int64, policy *ent.StoragePolicy) error {
|
||||
if policy.MaxSize == 0 {
|
||||
return nil
|
||||
} else if size > policy.MaxSize {
|
||||
return fs.ErrFileSizeTooBig
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateNewFile validates the upload request.
|
||||
func validateNewFile(fileName string, size int64, policy *ent.StoragePolicy) error {
|
||||
if err := validateFileName(fileName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := validateExtension(fileName, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := validateFileSize(size, policy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *DBFS) validateUserCapacity(ctx context.Context, size int64, u *ent.User) error {
|
||||
capacity, err := f.Capacity(ctx, u)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get user capacity: %s", err)
|
||||
}
|
||||
|
||||
return f.validateUserCapacityRaw(ctx, size, capacity)
|
||||
}
|
||||
|
||||
// validateUserCapacityRaw validates the user capacity, but does not fetch the capacity.
|
||||
func (f *DBFS) validateUserCapacityRaw(ctx context.Context, size int64, capacity *fs.Capacity) error {
|
||||
if capacity.Used+size > capacity.Total {
|
||||
return fs.ErrInsufficientCapacity
|
||||
}
|
||||
return nil
|
||||
}
|
||||
763
pkg/filemanager/fs/fs.go
Normal file
763
pkg/filemanager/fs/fs.go
Normal file
@@ -0,0 +1,763 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/gofrs/uuid"
|
||||
)
|
||||
|
||||
type FsCapability int
|
||||
|
||||
const (
|
||||
FsCapabilityList = FsCapability(iota)
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDirectLinkInvalid = serializer.NewError(serializer.CodeNotFound, "Direct link invalid", nil)
|
||||
ErrUnknownPolicyType = serializer.NewError(serializer.CodeInternalSetting, "Unknown policy type", nil)
|
||||
ErrPathNotExist = serializer.NewError(serializer.CodeParentNotExist, "Path not exist", nil)
|
||||
ErrFileDeleted = serializer.NewError(serializer.CodeFileDeleted, "File deleted", nil)
|
||||
ErrEntityNotExist = serializer.NewError(serializer.CodeEntityNotExist, "Entity not exist", nil)
|
||||
ErrFileExisted = serializer.NewError(serializer.CodeObjectExist, "Object existed", nil)
|
||||
ErrNotSupportedAction = serializer.NewError(serializer.CodeNoPermissionErr, "Not supported action", nil)
|
||||
ErrLockConflict = serializer.NewError(serializer.CodeLockConflict, "Lock conflict", nil)
|
||||
ErrLockExpired = serializer.NewError(serializer.CodeLockConflict, "Lock expired", nil)
|
||||
ErrModified = serializer.NewError(serializer.CodeConflict, "Object conflict", nil)
|
||||
ErrIllegalObjectName = serializer.NewError(serializer.CodeIllegalObjectName, "Invalid object name", nil)
|
||||
ErrFileSizeTooBig = serializer.NewError(serializer.CodeFileTooLarge, "File is too large", nil)
|
||||
ErrInsufficientCapacity = serializer.NewError(serializer.CodeInsufficientCapacity, "Insufficient capacity", nil)
|
||||
ErrStaleVersion = serializer.NewError(serializer.CodeStaleVersion, "File is updated during your edit", nil)
|
||||
ErrOwnerOnly = serializer.NewError(serializer.CodeOwnerOnly, "Only owner or administrator can perform this action", nil)
|
||||
ErrArchiveSrcSizeTooBig = ErrFileSizeTooBig.WithError(fmt.Errorf("total size of to-be compressed file exceed group limit (%w)", queue.CriticalErr))
|
||||
)
|
||||
|
||||
type (
|
||||
FileSystem interface {
|
||||
LockSystem
|
||||
UploadManager
|
||||
FileManager
|
||||
// Recycle recycles a DBFS and its generated resources.
|
||||
Recycle()
|
||||
// Capacity returns the storage capacity of the filesystem.
|
||||
Capacity(ctx context.Context, u *ent.User) (*Capacity, error)
|
||||
// CheckCapability checks if the filesystem supports given capability.
|
||||
CheckCapability(ctx context.Context, uri *URI, opts ...Option) error
|
||||
// StaleEntities returns all stale entities of given IDs. If no ID is given, all
|
||||
// potential stale entities will be returned.
|
||||
StaleEntities(ctx context.Context, entities ...int) ([]Entity, error)
|
||||
// AllFilesInTrashBin returns all files in trash bin, despite owner.
|
||||
AllFilesInTrashBin(ctx context.Context, opts ...Option) (*ListFileResult, error)
|
||||
// Walk walks through all files under given path with given depth limit.
|
||||
Walk(ctx context.Context, path *URI, depth int, walk WalkFunc, opts ...Option) error
|
||||
// SharedAddressTranslation translates a path that potentially contain shared symbolic to a real address.
|
||||
SharedAddressTranslation(ctx context.Context, path *URI, opts ...Option) (File, *URI, error)
|
||||
// ExecuteNavigatorHooks executes hooks of given type on a file for navigator based custom hooks.
|
||||
ExecuteNavigatorHooks(ctx context.Context, hookType HookType, file File) error
|
||||
}
|
||||
|
||||
FileManager interface {
|
||||
// Get returns a file by its path.
|
||||
Get(ctx context.Context, path *URI, opts ...Option) (File, error)
|
||||
// Create creates a file.
|
||||
Create(ctx context.Context, path *URI, fileType types.FileType, opts ...Option) (File, error)
|
||||
// List lists files under give path.
|
||||
List(ctx context.Context, path *URI, opts ...Option) (File, *ListFileResult, error)
|
||||
// Rename renames a file.
|
||||
Rename(ctx context.Context, path *URI, newName string) (File, error)
|
||||
// Move moves files to dst.
|
||||
MoveOrCopy(ctx context.Context, path []*URI, dst *URI, isCopy bool) error
|
||||
// Delete performs hard-delete for given paths, return newly generated stale entities in this delete operation.
|
||||
Delete(ctx context.Context, path []*URI, opts ...Option) ([]Entity, error)
|
||||
// GetEntitiesFromFileID returns all entities of a given file.
|
||||
GetEntity(ctx context.Context, entityID int) (Entity, error)
|
||||
// UpsertMetadata update or insert metadata of a file.
|
||||
PatchMetadata(ctx context.Context, path []*URI, metas ...MetadataPatch) error
|
||||
// SoftDelete moves given files to trash bin.
|
||||
SoftDelete(ctx context.Context, path ...*URI) error
|
||||
// Restore restores given files from trash bin to its original location.
|
||||
Restore(ctx context.Context, path ...*URI) error
|
||||
// VersionControl performs version control on given file.
|
||||
// - `delete` is false: set version as current version;
|
||||
// - `delete` is true: delete version.
|
||||
VersionControl(ctx context.Context, path *URI, versionId int, delete bool) error
|
||||
}
|
||||
|
||||
UploadManager interface {
|
||||
// PrepareUpload prepares an upload session. It performs validation on upload request and returns a placeholder
|
||||
// file if needed.
|
||||
PrepareUpload(ctx context.Context, req *UploadRequest, opts ...Option) (*UploadSession, error)
|
||||
// CompleteUpload completes an upload session.
|
||||
CompleteUpload(ctx context.Context, session *UploadSession) (File, error)
|
||||
// CancelUploadSession cancels an upload session. Delete the placeholder file if no other entity is created.
|
||||
CancelUploadSession(ctx context.Context, path *URI, sessionID string, session *UploadSession) ([]Entity, error)
|
||||
}
|
||||
|
||||
LockSystem interface {
|
||||
// ConfirmLock confirms if a lock token is valid on given URI.
|
||||
ConfirmLock(ctx context.Context, ancestor File, uri *URI, token ...string) (func(), LockSession, error)
|
||||
// Lock locks a file. If zeroDepth is true, only the file itself will be locked. Ancestor is closest ancestor
|
||||
// of the file that will be locked, if the given uri is an existing file, ancestor will be itself.
|
||||
// `token` is optional and can be used if the requester need to explicitly specify a token.
|
||||
Lock(ctx context.Context, d time.Duration, requester *ent.User, zeroDepth bool, application lock.Application,
|
||||
uri *URI, token string) (LockSession, error)
|
||||
// Unlock unlocks files by given tokens.
|
||||
Unlock(ctx context.Context, tokens ...string) error
|
||||
// Refresh refreshes a lock.
|
||||
Refresh(ctx context.Context, d time.Duration, token string) (lock.LockDetails, error)
|
||||
}
|
||||
|
||||
StatelessUploadManager interface {
|
||||
// PrepareUpload prepares the upload on the node.
|
||||
PrepareUpload(ctx context.Context, args *StatelessPrepareUploadService) (*StatelessPrepareUploadResponse, error)
|
||||
// CompleteUpload completes the upload on the node.
|
||||
CompleteUpload(ctx context.Context, args *StatelessCompleteUploadService) error
|
||||
// OnUploadFailed handles the failed upload on the node.
|
||||
OnUploadFailed(ctx context.Context, args *StatelessOnUploadFailedService) error
|
||||
// CreateFile creates a file on the node.
|
||||
CreateFile(ctx context.Context, args *StatelessCreateFileService) error
|
||||
}
|
||||
|
||||
WalkFunc func(file File, level int) error
|
||||
|
||||
File interface {
|
||||
IsNil() bool
|
||||
ID() int
|
||||
Name() string
|
||||
DisplayName() string
|
||||
Ext() string
|
||||
Type() types.FileType
|
||||
Size() int64
|
||||
UpdatedAt() time.Time
|
||||
CreatedAt() time.Time
|
||||
Metadata() map[string]string
|
||||
// Uri returns the URI of the file.
|
||||
Uri(isRoot bool) *URI
|
||||
Owner() *ent.User
|
||||
OwnerID() int
|
||||
// RootUri return the URI of the user root file under owner's view.
|
||||
RootUri() *URI
|
||||
Entities() []Entity
|
||||
PrimaryEntity() Entity
|
||||
PrimaryEntityID() int
|
||||
Shared() bool
|
||||
IsSymbolic() bool
|
||||
PolicyID() (id int)
|
||||
ExtendedInfo() *FileExtendedInfo
|
||||
FolderSummary() *FolderSummary
|
||||
Capabilities() *boolset.BooleanSet
|
||||
}
|
||||
|
||||
Entities []Entity
|
||||
Entity interface {
|
||||
ID() int
|
||||
Type() types.EntityType
|
||||
Size() int64
|
||||
UpdatedAt() time.Time
|
||||
CreatedAt() time.Time
|
||||
Source() string
|
||||
ReferenceCount() int
|
||||
PolicyID() int
|
||||
UploadSessionID() *uuid.UUID
|
||||
CreatedBy() *ent.User
|
||||
Model() *ent.Entity
|
||||
}
|
||||
|
||||
FileExtendedInfo struct {
|
||||
StoragePolicy *ent.StoragePolicy
|
||||
StorageUsed int64
|
||||
Shares []*ent.Share
|
||||
EntityStoragePolicies map[int]*ent.StoragePolicy
|
||||
}
|
||||
|
||||
FolderSummary struct {
|
||||
Size int64 `json:"size"`
|
||||
Files int `json:"files"`
|
||||
Folders int `json:"folders"`
|
||||
Completed bool `json:"completed"` // whether the size calculation is completed
|
||||
CalculatedAt time.Time `json:"calculated_at"`
|
||||
}
|
||||
|
||||
MetadataPatch struct {
|
||||
Key string `json:"key" binding:"required"`
|
||||
Value string `json:"value"`
|
||||
Private bool `json:"private" binding:"ne=true"`
|
||||
Remove bool `json:"remove"`
|
||||
}
|
||||
|
||||
// ListFileResult result of listing files.
|
||||
ListFileResult struct {
|
||||
Files []File
|
||||
Parent File
|
||||
Pagination *inventory.PaginationResults
|
||||
Props *NavigatorProps
|
||||
ContextHint *uuid.UUID
|
||||
RecursionLimitReached bool
|
||||
MixedType bool
|
||||
SingleFileView bool
|
||||
StoragePolicy *ent.StoragePolicy
|
||||
}
|
||||
|
||||
// NavigatorProps is the properties of current filesystem.
|
||||
NavigatorProps struct {
|
||||
// Supported capabilities of the navigator.
|
||||
Capability *boolset.BooleanSet `json:"capability"`
|
||||
// MaxPageSize is the maximum page size of the navigator.
|
||||
MaxPageSize int `json:"max_page_size"`
|
||||
// OrderByOptions is the supported order by options of the navigator.
|
||||
OrderByOptions []string `json:"order_by_options"`
|
||||
// OrderDirectionOptions is the supported order direction options of the navigator.
|
||||
OrderDirectionOptions []string `json:"order_direction_options"`
|
||||
}
|
||||
|
||||
// UploadCredential for uploading files in client side.
|
||||
UploadCredential struct {
|
||||
SessionID string `json:"session_id"`
|
||||
ChunkSize int64 `json:"chunk_size"` // 分块大小,0 为部分快
|
||||
Expires int64 `json:"expires"` // 上传凭证过期时间, Unix 时间戳
|
||||
UploadURLs []string `json:"upload_urls,omitempty"`
|
||||
Credential string `json:"credential,omitempty"`
|
||||
UploadID string `json:"uploadID,omitempty"`
|
||||
Callback string `json:"callback,omitempty"` // 回调地址
|
||||
Uri string `json:"uri,omitempty"` // 存储路径
|
||||
AccessKey string `json:"ak,omitempty"`
|
||||
KeyTime string `json:"keyTime,omitempty"` // COS用有效期
|
||||
CompleteURL string `json:"completeURL,omitempty"`
|
||||
StoragePolicy *ent.StoragePolicy
|
||||
CallbackSecret string `json:"callback_secret,omitempty"`
|
||||
MimeType string `json:"mime_type,omitempty"` // Expected mimetype
|
||||
UploadPolicy string `json:"upload_policy,omitempty"` // Upyun upload policy
|
||||
}
|
||||
|
||||
// UploadSession stores the information of an upload session, used in server side.
|
||||
UploadSession struct {
|
||||
UID int // 发起者
|
||||
Policy *ent.StoragePolicy
|
||||
FileID int // ID of the placeholder file
|
||||
EntityID int // ID of the new entity
|
||||
Callback string // 回调 URL 地址
|
||||
CallbackSecret string // Callback secret
|
||||
UploadID string // Multi-part upload ID
|
||||
UploadURL string
|
||||
Credential string
|
||||
ChunkSize int64
|
||||
SentinelTaskID int
|
||||
NewFileCreated bool // If new file is created for this session
|
||||
|
||||
LockToken string // Token of the locked placeholder file
|
||||
Props *UploadProps
|
||||
}
|
||||
|
||||
// UploadProps properties of an upload session/request.
|
||||
UploadProps struct {
|
||||
Uri *URI
|
||||
Size int64
|
||||
UploadSessionID string
|
||||
PreferredStoragePolicy int
|
||||
SavePath string
|
||||
LastModified *time.Time
|
||||
MimeType string
|
||||
Metadata map[string]string
|
||||
PreviousVersion string
|
||||
// EntityType is the type of the entity to be created. If not set, a new file will be created
|
||||
// with a default version entity. This will be set in update request for existing files.
|
||||
EntityType *types.EntityType
|
||||
ExpireAt time.Time
|
||||
}
|
||||
|
||||
// FsOption options for underlying file system.
|
||||
FsOption struct {
|
||||
Page int // Page number when listing files.
|
||||
PageSize int // Size of pages when listing files.
|
||||
OrderBy string
|
||||
OrderDirection string
|
||||
UploadRequest *UploadRequest
|
||||
UnlinkOnly bool
|
||||
UploadSession *UploadSession
|
||||
DownloadSpeed int64
|
||||
IsDownload bool
|
||||
Expire *time.Time
|
||||
Entity Entity
|
||||
IsThumb bool
|
||||
EntityType *types.EntityType
|
||||
EntityTypeNil bool
|
||||
SkipSoftDelete bool
|
||||
SysSkipSoftDelete bool
|
||||
Metadata map[string]string
|
||||
ArchiveCompression bool
|
||||
ProgressFunc
|
||||
MaxArchiveSize int64
|
||||
DryRun CreateArchiveDryRunFunc
|
||||
Policy *ent.StoragePolicy
|
||||
Node StatelessUploadManager
|
||||
StatelessUserID int
|
||||
NoCache bool
|
||||
}
|
||||
|
||||
// Option 发送请求的额外设置
|
||||
Option interface {
|
||||
Apply(any)
|
||||
}
|
||||
|
||||
OptionFunc func(*FsOption)
|
||||
|
||||
// Ctx keys used to detect user canceled operation.
|
||||
UserCancelCtx struct{}
|
||||
GinCtx struct{}
|
||||
|
||||
// Capacity describes the capacity of a filesystem.
|
||||
Capacity struct {
|
||||
Total int64 `json:"total"`
|
||||
Used int64 `json:"used"`
|
||||
}
|
||||
|
||||
FileCapacity int
|
||||
|
||||
LockSession interface {
|
||||
LastToken() string
|
||||
}
|
||||
|
||||
HookType int
|
||||
|
||||
CreateArchiveDryRunFunc func(name string, e Entity)
|
||||
|
||||
StatelessPrepareUploadService struct {
|
||||
UploadRequest *UploadRequest `json:"upload_request" binding:"required"`
|
||||
UserID int `json:"user_id"`
|
||||
}
|
||||
StatelessCompleteUploadService struct {
|
||||
UploadSession *UploadSession `json:"upload_session" binding:"required"`
|
||||
UserID int `json:"user_id"`
|
||||
}
|
||||
StatelessOnUploadFailedService struct {
|
||||
UploadSession *UploadSession `json:"upload_session" binding:"required"`
|
||||
UserID int `json:"user_id"`
|
||||
}
|
||||
StatelessCreateFileService struct {
|
||||
Path string `json:"path" binding:"required"`
|
||||
Type types.FileType `json:"type" binding:"required"`
|
||||
UserID int `json:"user_id"`
|
||||
}
|
||||
StatelessPrepareUploadResponse struct {
|
||||
Session *UploadSession
|
||||
Req *UploadRequest
|
||||
}
|
||||
|
||||
PrepareRelocateRes struct {
|
||||
Entities map[int]*RelocateEntity `json:"entities,omitempty"`
|
||||
LockToken string `json:"lock_token,omitempty"`
|
||||
Policy *ent.StoragePolicy `json:"policy,omitempty"`
|
||||
}
|
||||
|
||||
RelocateEntity struct {
|
||||
SrcEntity *ent.Entity `json:"src_entity"`
|
||||
FileUri *URI `json:"file_uri,omitempty"`
|
||||
NewSavePath string `json:"new_save_path"`
|
||||
ParentFiles []int `json:"parent_files"`
|
||||
PrimaryEntityParentFiles []int `json:"primary_entity_parent_files"`
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
FileCapacityPreview FileCapacity = iota
|
||||
FileCapacityEnter
|
||||
FileCapacityDownload
|
||||
FileCapacityRename
|
||||
FileCapacityCopy
|
||||
FileCapacityMove
|
||||
)
|
||||
|
||||
const (
|
||||
HookTypeBeforeDownload = HookType(iota)
|
||||
)
|
||||
|
||||
func (p *UploadProps) Copy() *UploadProps {
|
||||
newProps := *p
|
||||
return &newProps
|
||||
}
|
||||
|
||||
func (f OptionFunc) Apply(o any) {
|
||||
f(o.(*FsOption))
|
||||
}
|
||||
|
||||
// ==================== FS Options ====================
|
||||
|
||||
// WithUploadSession sets upload session for manager.
|
||||
func WithUploadSession(s *UploadSession) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.UploadSession = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithPageSize limit items in a page for listing files.
|
||||
func WithPageSize(s int) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.PageSize = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithPage set page number for listing files.
|
||||
func WithPage(p int) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.Page = p
|
||||
})
|
||||
}
|
||||
|
||||
// WithOrderBy set order by for listing files.
|
||||
func WithOrderBy(p string) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.OrderBy = p
|
||||
})
|
||||
}
|
||||
|
||||
// WithOrderDirection set order direction for listing files.
|
||||
func WithOrderDirection(p string) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.OrderDirection = p
|
||||
})
|
||||
}
|
||||
|
||||
// WithUploadRequest set upload request for uploading files.
|
||||
func WithUploadRequest(p *UploadRequest) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.UploadRequest = p
|
||||
})
|
||||
}
|
||||
|
||||
// WithProgressFunc set progress function for manager.
|
||||
func WithProgressFunc(p ProgressFunc) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.ProgressFunc = p
|
||||
})
|
||||
}
|
||||
|
||||
// WithUnlinkOnly set unlink only for unlinking files.
|
||||
func WithUnlinkOnly(p bool) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.UnlinkOnly = p
|
||||
})
|
||||
}
|
||||
|
||||
// WithDownloadSpeed sets download speed limit for manager.
|
||||
func WithDownloadSpeed(speed int64) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.DownloadSpeed = speed
|
||||
})
|
||||
}
|
||||
|
||||
func WithIsDownload(b bool) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.IsDownload = b
|
||||
})
|
||||
}
|
||||
|
||||
// WithSysSkipSoftDelete sets whether to skip soft delete without checking
|
||||
// file ownership.
|
||||
func WithSysSkipSoftDelete(b bool) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.SysSkipSoftDelete = b
|
||||
})
|
||||
}
|
||||
|
||||
// WithNoCache sets whether to disable cache for entity's URL.
|
||||
func WithNoCache(b bool) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.NoCache = b
|
||||
})
|
||||
}
|
||||
|
||||
// WithUrlExpire sets expire time for entity's URL.
|
||||
func WithUrlExpire(t *time.Time) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.Expire = t
|
||||
})
|
||||
}
|
||||
|
||||
// WithEntity sets entity for manager.
|
||||
func WithEntity(e Entity) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.Entity = e
|
||||
})
|
||||
}
|
||||
|
||||
// WithPolicy sets storage policy overwrite for manager.
|
||||
func WithPolicy(p *ent.StoragePolicy) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.Policy = p
|
||||
})
|
||||
}
|
||||
|
||||
// WithUseThumb sets whether entity's URL is used for thumbnail.
|
||||
func WithUseThumb(b bool) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.IsThumb = b
|
||||
})
|
||||
}
|
||||
|
||||
// WithEntityType sets entity type for manager.
|
||||
func WithEntityType(t types.EntityType) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.EntityType = &t
|
||||
})
|
||||
}
|
||||
|
||||
// WithNoEntityType sets entity type to nil for manager.
|
||||
func WithNoEntityType() Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.EntityTypeNil = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithSkipSoftDelete sets whether to skip soft delete.
|
||||
func WithSkipSoftDelete(b bool) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.SkipSoftDelete = b
|
||||
})
|
||||
}
|
||||
|
||||
// WithMetadata sets metadata for file creation.
|
||||
func WithMetadata(m map[string]string) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.Metadata = m
|
||||
})
|
||||
}
|
||||
|
||||
// WithArchiveCompression sets whether to compress files in archive.
|
||||
func WithArchiveCompression(b bool) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.ArchiveCompression = b
|
||||
})
|
||||
}
|
||||
|
||||
// WithMaxArchiveSize sets maximum size of to be archived file or to-be decompressed
|
||||
// size, 0 for unlimited.
|
||||
func WithMaxArchiveSize(s int64) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.MaxArchiveSize = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithDryRun sets whether to perform dry run.
|
||||
func WithDryRun(b CreateArchiveDryRunFunc) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.DryRun = b
|
||||
})
|
||||
}
|
||||
|
||||
// WithNode sets node for stateless upload manager.
|
||||
func WithNode(n StatelessUploadManager) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.Node = n
|
||||
})
|
||||
}
|
||||
|
||||
// WithStatelessUserID sets stateless user ID for manager.
|
||||
func WithStatelessUserID(id int) Option {
|
||||
return OptionFunc(func(o *FsOption) {
|
||||
o.StatelessUserID = id
|
||||
})
|
||||
}
|
||||
|
||||
type WriteMode int
|
||||
|
||||
const (
|
||||
ModeNone WriteMode = 0x00000
|
||||
ModeOverwrite WriteMode = 0x00001
|
||||
// Deprecated
|
||||
ModeNop WriteMode = 0x00004
|
||||
)
|
||||
|
||||
type (
|
||||
ProgressFunc func(current, diff int64, total int64)
|
||||
UploadRequest struct {
|
||||
Props *UploadProps
|
||||
|
||||
Mode WriteMode
|
||||
File io.ReadCloser `json:"-"`
|
||||
Seeker io.Seeker `json:"-"`
|
||||
Offset int64
|
||||
ProgressFunc `json:"-"`
|
||||
|
||||
read int64
|
||||
}
|
||||
)
|
||||
|
||||
func (file *UploadRequest) Read(p []byte) (n int, err error) {
|
||||
if file.File != nil {
|
||||
n, err = file.File.Read(p)
|
||||
file.read += int64(n)
|
||||
if file.ProgressFunc != nil {
|
||||
file.ProgressFunc(file.read, int64(n), file.Props.Size)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
func (file *UploadRequest) Close() error {
|
||||
if file.File != nil {
|
||||
return file.File.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (file *UploadRequest) Seek(offset int64, whence int) (int64, error) {
|
||||
if file.Seekable() {
|
||||
previous := file.read
|
||||
o, err := file.Seeker.Seek(offset, whence)
|
||||
file.read = o
|
||||
if file.ProgressFunc != nil {
|
||||
file.ProgressFunc(o, file.read-previous, file.Props.Size)
|
||||
}
|
||||
return o, err
|
||||
}
|
||||
|
||||
return 0, errors.New("no seeker")
|
||||
}
|
||||
|
||||
func (file *UploadRequest) Seekable() bool {
|
||||
return file.Seeker != nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
gob.Register(UploadSession{})
|
||||
gob.Register(FolderSummary{})
|
||||
}
|
||||
|
||||
type ApplicationType string
|
||||
|
||||
const (
|
||||
ApplicationCreate ApplicationType = "create"
|
||||
ApplicationRename ApplicationType = "rename"
|
||||
ApplicationSetPermission ApplicationType = "setPermission"
|
||||
ApplicationMoveCopy ApplicationType = "moveCopy"
|
||||
ApplicationUpload ApplicationType = "upload"
|
||||
ApplicationUpdateMetadata ApplicationType = "updateMetadata"
|
||||
ApplicationDelete ApplicationType = "delete"
|
||||
ApplicationSoftDelete ApplicationType = "softDelete"
|
||||
ApplicationDAV ApplicationType = "dav"
|
||||
ApplicationVersionControl ApplicationType = "versionControl"
|
||||
ApplicationViewer ApplicationType = "viewer"
|
||||
ApplicationMount ApplicationType = "mount"
|
||||
ApplicationRelocate ApplicationType = "relocate"
|
||||
)
|
||||
|
||||
func LockApp(a ApplicationType) lock.Application {
|
||||
return lock.Application{Type: string(a)}
|
||||
}
|
||||
|
||||
type LockSessionCtxKey struct{}
|
||||
|
||||
// LockSessionToContext stores lock session to context.
|
||||
func LockSessionToContext(ctx context.Context, session LockSession) context.Context {
|
||||
return context.WithValue(ctx, LockSessionCtxKey{}, session)
|
||||
}
|
||||
|
||||
func FindDesiredEntity(file File, version string, hasher hashid.Encoder, entityType *types.EntityType) (bool, Entity) {
|
||||
if version == "" {
|
||||
return true, file.PrimaryEntity()
|
||||
}
|
||||
|
||||
requestedVersion, err := hasher.Decode(version, hashid.EntityID)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
hasVersions := false
|
||||
for _, entity := range file.Entities() {
|
||||
if entity.Type() == types.EntityTypeVersion {
|
||||
hasVersions = true
|
||||
}
|
||||
|
||||
if entity.ID() == requestedVersion && (entityType == nil || *entityType == entity.Type()) {
|
||||
return true, entity
|
||||
}
|
||||
}
|
||||
|
||||
// Happy path for: File has no versions, requested version is empty entity
|
||||
if !hasVersions && requestedVersion == 0 {
|
||||
return true, file.PrimaryEntity()
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
type DbEntity struct {
|
||||
model *ent.Entity
|
||||
}
|
||||
|
||||
func NewEntity(model *ent.Entity) Entity {
|
||||
return &DbEntity{model: model}
|
||||
}
|
||||
|
||||
func (e *DbEntity) ID() int {
|
||||
return e.model.ID
|
||||
}
|
||||
|
||||
func (e *DbEntity) Type() types.EntityType {
|
||||
return types.EntityType(e.model.Type)
|
||||
}
|
||||
|
||||
func (e *DbEntity) Size() int64 {
|
||||
return e.model.Size
|
||||
}
|
||||
|
||||
func (e *DbEntity) UpdatedAt() time.Time {
|
||||
return e.model.UpdatedAt
|
||||
}
|
||||
|
||||
func (e *DbEntity) CreatedAt() time.Time {
|
||||
return e.model.CreatedAt
|
||||
}
|
||||
|
||||
func (e *DbEntity) CreatedBy() *ent.User {
|
||||
return e.model.Edges.User
|
||||
}
|
||||
|
||||
func (e *DbEntity) Source() string {
|
||||
return e.model.Source
|
||||
}
|
||||
|
||||
func (e *DbEntity) ReferenceCount() int {
|
||||
return e.model.ReferenceCount
|
||||
}
|
||||
|
||||
func (e *DbEntity) PolicyID() int {
|
||||
return e.model.StoragePolicyEntities
|
||||
}
|
||||
|
||||
func (e *DbEntity) UploadSessionID() *uuid.UUID {
|
||||
return e.model.UploadSessionID
|
||||
}
|
||||
|
||||
func (e *DbEntity) Model() *ent.Entity {
|
||||
return e.model
|
||||
}
|
||||
|
||||
func NewEmptyEntity(u *ent.User) Entity {
|
||||
return &DbEntity{
|
||||
model: &ent.Entity{
|
||||
UpdatedAt: time.Now(),
|
||||
ReferenceCount: 1,
|
||||
CreatedAt: time.Now(),
|
||||
Edges: ent.EntityEdges{
|
||||
User: u,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
40
pkg/filemanager/fs/mime/mime.go
Normal file
40
pkg/filemanager/fs/mime/mime.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package mime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"mime"
|
||||
"path"
|
||||
)
|
||||
|
||||
type MimeDetector interface {
|
||||
// TypeByName returns the mime type by file name.
|
||||
TypeByName(ext string) string
|
||||
}
|
||||
|
||||
type mimeDetector struct {
|
||||
mapping map[string]string
|
||||
}
|
||||
|
||||
func NewMimeDetector(ctx context.Context, settings setting.Provider, l logging.Logger) MimeDetector {
|
||||
mappingStr := settings.MimeMapping(ctx)
|
||||
mapping := make(map[string]string)
|
||||
if err := json.Unmarshal([]byte(mappingStr), &mapping); err != nil {
|
||||
l.Error("Failed to unmarshal mime mapping: %s, fallback to empty mapping", err)
|
||||
}
|
||||
|
||||
return &mimeDetector{
|
||||
mapping: mapping,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *mimeDetector) TypeByName(p string) string {
|
||||
ext := path.Ext(p)
|
||||
if m, ok := d.mapping[ext]; ok {
|
||||
return m
|
||||
}
|
||||
|
||||
return mime.TypeByExtension(ext)
|
||||
}
|
||||
421
pkg/filemanager/fs/uri.go
Normal file
421
pkg/filemanager/fs/uri.go
Normal file
@@ -0,0 +1,421 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/constants"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
const (
|
||||
Separator = "/"
|
||||
)
|
||||
|
||||
const (
|
||||
QuerySearchName = "name"
|
||||
QuerySearchNameOpOr = "use_or"
|
||||
QuerySearchMetadataPrefix = "meta_"
|
||||
QuerySearchCaseFolding = "case_folding"
|
||||
QuerySearchType = "type"
|
||||
QuerySearchTypeCategory = "category"
|
||||
QuerySearchSizeGte = "size_gte"
|
||||
QuerySearchSizeLte = "size_lte"
|
||||
QuerySearchCreatedGte = "created_gte"
|
||||
QuerySearchCreatedLte = "created_lte"
|
||||
QuerySearchUpdatedGte = "updated_gte"
|
||||
QuerySearchUpdatedLte = "updated_lte"
|
||||
)
|
||||
|
||||
type URI struct {
|
||||
U *url.URL
|
||||
}
|
||||
|
||||
func NewUriFromString(u string) (*URI, error) {
|
||||
raw, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse uri: %w", err)
|
||||
}
|
||||
|
||||
if raw.Scheme != constants.CloudreveScheme {
|
||||
return nil, fmt.Errorf("unknown scheme: %s", raw.Scheme)
|
||||
}
|
||||
|
||||
if strings.HasSuffix(raw.Path, Separator) {
|
||||
raw.Path = strings.TrimSuffix(raw.Path, Separator)
|
||||
}
|
||||
|
||||
return &URI{U: raw}, nil
|
||||
}
|
||||
|
||||
func NewUriFromStrings(u ...string) ([]*URI, error) {
|
||||
res := make([]*URI, 0, len(u))
|
||||
for _, uri := range u {
|
||||
fsUri, err := NewUriFromString(uri)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res = append(res, fsUri)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (u *URI) UnmarshalBinary(text []byte) error {
|
||||
raw, err := url.Parse(string(text))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse uri: %w", err)
|
||||
}
|
||||
|
||||
u.U = raw
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *URI) MarshalBinary() ([]byte, error) {
|
||||
return u.U.MarshalBinary()
|
||||
}
|
||||
|
||||
func (u *URI) MarshalJSON() ([]byte, error) {
|
||||
r := map[string]string{
|
||||
"uri": u.String(),
|
||||
}
|
||||
return json.Marshal(r)
|
||||
}
|
||||
|
||||
func (u *URI) UnmarshalJSON(text []byte) error {
|
||||
r := make(map[string]string)
|
||||
err := json.Unmarshal(text, &r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
u.U, err = url.Parse(r["uri"])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *URI) String() string {
|
||||
return u.U.String()
|
||||
}
|
||||
|
||||
func (u *URI) Name() string {
|
||||
return path.Base(u.Path())
|
||||
}
|
||||
|
||||
func (u *URI) Dir() string {
|
||||
return path.Dir(u.Path())
|
||||
}
|
||||
|
||||
func (u *URI) Elements() []string {
|
||||
res := strings.Split(u.PathTrimmed(), Separator)
|
||||
if len(res) == 1 && res[0] == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (u *URI) ID(defaultUid string) string {
|
||||
if u.U.User == nil {
|
||||
if u.FileSystem() != constants.FileSystemShare {
|
||||
return defaultUid
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
return u.U.User.Username()
|
||||
}
|
||||
|
||||
func (u *URI) Path() string {
|
||||
p := u.U.Path
|
||||
if !strings.HasPrefix(u.U.Path, Separator) {
|
||||
p = Separator + u.U.Path
|
||||
}
|
||||
|
||||
return path.Clean(p)
|
||||
}
|
||||
|
||||
func (u *URI) PathTrimmed() string {
|
||||
return strings.TrimPrefix(u.Path(), Separator)
|
||||
}
|
||||
|
||||
func (u *URI) Password() string {
|
||||
if u.U.User == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
pwd, _ := u.U.User.Password()
|
||||
return pwd
|
||||
}
|
||||
|
||||
func (u *URI) Join(elem ...string) *URI {
|
||||
newUrl, _ := url.Parse(u.U.String())
|
||||
return &URI{U: newUrl.JoinPath(lo.Map(elem, func(s string, i int) string {
|
||||
return PathEscape(s)
|
||||
})...)}
|
||||
}
|
||||
|
||||
// Join path with raw string
|
||||
func (u *URI) JoinRaw(elem string) *URI {
|
||||
return u.Join(strings.Split(strings.TrimPrefix(elem, Separator), Separator)...)
|
||||
}
|
||||
|
||||
func (u *URI) DirUri() *URI {
|
||||
newUrl, _ := url.Parse(u.U.String())
|
||||
newUrl.Path = path.Dir(newUrl.Path)
|
||||
|
||||
return &URI{U: newUrl}
|
||||
}
|
||||
|
||||
func (u *URI) Root() *URI {
|
||||
newUrl, _ := url.Parse(u.U.String())
|
||||
newUrl.Path = Separator
|
||||
newUrl.RawQuery = ""
|
||||
|
||||
return &URI{U: newUrl}
|
||||
}
|
||||
|
||||
func (u *URI) SetQuery(q string) *URI {
|
||||
newUrl, _ := url.Parse(u.U.String())
|
||||
newUrl.RawQuery = q
|
||||
return &URI{U: newUrl}
|
||||
}
|
||||
|
||||
func (u *URI) IsSame(p *URI, uid string) bool {
|
||||
return p.FileSystem() == u.FileSystem() && p.ID(uid) == u.ID(uid) && u.Path() == p.Path()
|
||||
}
|
||||
|
||||
// Rebased returns a new URI with the path rebased to the given base URI. It is
|
||||
// commnly used in WebDAV address translation with shared folder symlink.
|
||||
func (u *URI) Rebase(target, base *URI) *URI {
|
||||
targetPath := target.Path()
|
||||
basePath := base.Path()
|
||||
rebasedPath := strings.TrimPrefix(targetPath, basePath)
|
||||
|
||||
newUrl, _ := url.Parse(u.U.String())
|
||||
newUrl.Path = path.Join(newUrl.Path, rebasedPath)
|
||||
return &URI{U: newUrl}
|
||||
}
|
||||
|
||||
func (u *URI) FileSystem() constants.FileSystemType {
|
||||
return constants.FileSystemType(strings.ToLower(u.U.Host))
|
||||
}
|
||||
|
||||
// SearchParameters returns the search parameters from the URI. If no search parameters are present, nil is returned.
|
||||
func (u *URI) SearchParameters() *inventory.SearchFileParameters {
|
||||
q := u.U.Query()
|
||||
res := &inventory.SearchFileParameters{
|
||||
Metadata: make(map[string]string),
|
||||
}
|
||||
withSearch := false
|
||||
|
||||
if names, ok := q[QuerySearchName]; ok {
|
||||
withSearch = len(names) > 0
|
||||
res.Name = names
|
||||
}
|
||||
|
||||
if _, ok := q[QuerySearchNameOpOr]; ok {
|
||||
res.NameOperatorOr = true
|
||||
}
|
||||
|
||||
if _, ok := q[QuerySearchCaseFolding]; ok {
|
||||
res.CaseFolding = true
|
||||
}
|
||||
|
||||
if v, ok := q[QuerySearchTypeCategory]; ok {
|
||||
res.Category = v[0]
|
||||
withSearch = withSearch || len(res.Category) > 0
|
||||
}
|
||||
|
||||
if t, ok := q[QuerySearchType]; ok {
|
||||
fileType := types.FileTypeFromString(t[0])
|
||||
res.Type = &fileType
|
||||
withSearch = true
|
||||
}
|
||||
|
||||
for k, v := range q {
|
||||
if strings.HasPrefix(k, QuerySearchMetadataPrefix) {
|
||||
res.Metadata[strings.TrimPrefix(k, QuerySearchMetadataPrefix)] = v[0]
|
||||
withSearch = true
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := q[QuerySearchSizeGte]; ok {
|
||||
limit, err := strconv.ParseInt(v[0], 10, 64)
|
||||
if err == nil {
|
||||
res.SizeGte = limit
|
||||
withSearch = true
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := q[QuerySearchSizeLte]; ok {
|
||||
limit, err := strconv.ParseInt(v[0], 10, 64)
|
||||
if err == nil {
|
||||
res.SizeLte = limit
|
||||
withSearch = true
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := q[QuerySearchCreatedGte]; ok {
|
||||
limit, err := strconv.ParseInt(v[0], 10, 64)
|
||||
if err == nil {
|
||||
limit := time.Unix(limit, 0)
|
||||
res.CreatedAtGte = &limit
|
||||
withSearch = true
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := q[QuerySearchCreatedLte]; ok {
|
||||
limit, err := strconv.ParseInt(v[0], 10, 64)
|
||||
if err == nil {
|
||||
limit := time.Unix(limit, 0)
|
||||
res.CreatedAtLte = &limit
|
||||
withSearch = true
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := q[QuerySearchUpdatedGte]; ok {
|
||||
limit, err := strconv.ParseInt(v[0], 10, 64)
|
||||
if err == nil {
|
||||
limit := time.Unix(limit, 0)
|
||||
res.UpdatedAtGte = &limit
|
||||
withSearch = true
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := q[QuerySearchUpdatedLte]; ok {
|
||||
limit, err := strconv.ParseInt(v[0], 10, 64)
|
||||
if err == nil {
|
||||
limit := time.Unix(limit, 0)
|
||||
res.UpdatedAtLte = &limit
|
||||
withSearch = true
|
||||
}
|
||||
}
|
||||
|
||||
if withSearch {
|
||||
return res
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EqualOrIsDescendantOf returns true if the URI is equal to the given URI or if it is a descendant of the given URI.
|
||||
func (u *URI) EqualOrIsDescendantOf(p *URI, uid string) bool {
|
||||
prefix := p.Path()
|
||||
if prefix[len(prefix)-1] != Separator[0] {
|
||||
prefix += Separator
|
||||
}
|
||||
|
||||
return p.FileSystem() == u.FileSystem() && p.ID(uid) == u.ID(uid) &&
|
||||
(strings.HasPrefix(u.Path(), prefix) || u.Path() == p.Path())
|
||||
}
|
||||
|
||||
func SearchCategoryFromString(s string) setting.SearchCategory {
|
||||
switch s {
|
||||
case "image":
|
||||
return setting.CategoryImage
|
||||
case "video":
|
||||
return setting.CategoryVideo
|
||||
case "audio":
|
||||
return setting.CategoryAudio
|
||||
case "document":
|
||||
return setting.CategoryDocument
|
||||
default:
|
||||
return setting.CategoryUnknown
|
||||
}
|
||||
}
|
||||
|
||||
func NewShareUri(id, password string) string {
|
||||
if password != "" {
|
||||
return fmt.Sprintf("%s://%s:%s@%s", constants.CloudreveScheme, id, password, constants.FileSystemShare)
|
||||
}
|
||||
return fmt.Sprintf("%s://%s@%s", constants.CloudreveScheme, id, constants.FileSystemShare)
|
||||
}
|
||||
|
||||
// PathEscape is same as url.PathEscape, with modifications to incoporate with JS encodeURI:
|
||||
// encodeURI() escapes all characters except:
|
||||
//
|
||||
// A–Z a–z 0–9 - _ . ! ~ * ' ( )
|
||||
// ; / ? : @ & = + $ , #
|
||||
func PathEscape(s string) string {
|
||||
hexCount := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if shouldEscape(c) {
|
||||
hexCount++
|
||||
}
|
||||
}
|
||||
|
||||
if hexCount == 0 {
|
||||
return s
|
||||
}
|
||||
|
||||
var buf [64]byte
|
||||
var t []byte
|
||||
|
||||
required := len(s) + 2*hexCount
|
||||
if required <= len(buf) {
|
||||
t = buf[:required]
|
||||
} else {
|
||||
t = make([]byte, required)
|
||||
}
|
||||
|
||||
if hexCount == 0 {
|
||||
copy(t, s)
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == ' ' {
|
||||
t[i] = '+'
|
||||
}
|
||||
}
|
||||
return string(t)
|
||||
}
|
||||
|
||||
j := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch c := s[i]; {
|
||||
case shouldEscape(c):
|
||||
t[j] = '%'
|
||||
t[j+1] = upperhex[c>>4]
|
||||
t[j+2] = upperhex[c&15]
|
||||
j += 3
|
||||
default:
|
||||
t[j] = s[i]
|
||||
j++
|
||||
}
|
||||
}
|
||||
return string(t)
|
||||
}
|
||||
|
||||
const upperhex = "0123456789ABCDEF"
|
||||
|
||||
// Return true if the specified character should be escaped when
|
||||
// appearing in a URL string, according to RFC 3986.
|
||||
//
|
||||
// Please be informed that for now shouldEscape does not check all
|
||||
// reserved characters correctly. See golang.org/issue/5684.
|
||||
func shouldEscape(c byte) bool {
|
||||
// §2.3 Unreserved characters (alphanum)
|
||||
if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
|
||||
return false
|
||||
}
|
||||
|
||||
switch c {
|
||||
case '-', '_', '.', '~', '!', '*', '\'', '(', ')', ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '#': // §2.3 Unreserved characters (mark)
|
||||
return false
|
||||
}
|
||||
|
||||
// Everything else must be escaped.
|
||||
return true
|
||||
}
|
||||
467
pkg/filemanager/lock/memlock.go
Normal file
467
pkg/filemanager/lock/memlock.go
Normal file
@@ -0,0 +1,467 @@
|
||||
package lock
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"errors"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/gofrs/uuid"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrConfirmationFailed is returned by a LockSystem's Confirm method.
|
||||
ErrConfirmationFailed = errors.New("memlock: confirmation failed")
|
||||
ErrNoSuchLock = errors.New("memlock: no such lock")
|
||||
ErrLocked = errors.New("memlock: locked")
|
||||
)
|
||||
|
||||
// LockSystem manages access to a collection of named resources. The elements
|
||||
// in a lock name are separated by slash ('/', U+002F) characters, regardless
|
||||
// of host operating system convention.
|
||||
type LockSystem interface {
|
||||
Create(now time.Time, details ...LockDetails) ([]string, error)
|
||||
Unlock(now time.Time, tokens ...string) error
|
||||
Confirm(now time.Time, requests LockInfo) (func(), string, error)
|
||||
Refresh(now time.Time, duration time.Duration, token string) (LockDetails, error)
|
||||
}
|
||||
|
||||
// LockDetails are a lock's metadata.
|
||||
type LockDetails struct {
|
||||
// Root is the root resource name being locked. For a zero-depth lock, the
|
||||
// root is the only resource being locked.
|
||||
Root string
|
||||
// Namespace of this lock.
|
||||
Ns string
|
||||
// Duration is the lock timeout. A negative duration means infinite.
|
||||
Duration time.Duration
|
||||
// Owner of this lock
|
||||
Owner Owner
|
||||
// ZeroDepth is whether the lock has zero depth. If it does not have zero
|
||||
// depth, it has infinite depth.
|
||||
ZeroDepth bool
|
||||
// FileType is the type of the file being locked. This is used to display user-friendly error message.
|
||||
Type types.FileType
|
||||
// Optional, customize the token of the lock.
|
||||
Token string
|
||||
}
|
||||
|
||||
func (d *LockDetails) Key() string {
|
||||
return d.Ns + "/" + d.Root
|
||||
}
|
||||
|
||||
type Owner struct {
|
||||
// Name of the application who are currently lock this.
|
||||
Application Application `json:"application"`
|
||||
}
|
||||
|
||||
type Application struct {
|
||||
Type string `json:"type"`
|
||||
InnerXML string `json:"inner_xml,omitempty"`
|
||||
ViewerID string `json:"viewer_id,omitempty"`
|
||||
}
|
||||
|
||||
// LockInfo is a lock confirmation request.
|
||||
type LockInfo struct {
|
||||
Ns string
|
||||
Root string
|
||||
Token []string
|
||||
}
|
||||
|
||||
type memLS struct {
|
||||
l logging.Logger
|
||||
hasher hashid.Encoder
|
||||
mu sync.Mutex
|
||||
byName map[string]map[string]*memLSNode
|
||||
byToken map[string]*memLSNode
|
||||
gen uint64
|
||||
// byExpiry only contains those nodes whose LockDetails have a finite
|
||||
// Duration and are yet to expire.
|
||||
byExpiry byExpiry
|
||||
}
|
||||
|
||||
// NewMemLS returns a new in-memory LockSystem.
|
||||
func NewMemLS(hasher hashid.Encoder, l logging.Logger) LockSystem {
|
||||
return &memLS{
|
||||
byName: make(map[string]map[string]*memLSNode),
|
||||
byToken: make(map[string]*memLSNode),
|
||||
hasher: hasher,
|
||||
l: l,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *memLS) Confirm(now time.Time, request LockInfo) (func(), string, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.collectExpiredNodes(now)
|
||||
|
||||
m.l.Debug("Memlock confirm: NS:%s, Root: %s, Token: %v", request.Ns, request.Root, request.Token)
|
||||
n := m.lookup(request.Ns, request.Root, request.Token...)
|
||||
if n == nil {
|
||||
return nil, "", ErrConfirmationFailed
|
||||
}
|
||||
|
||||
m.hold(n)
|
||||
return func() {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.unhold(n)
|
||||
}, n.token, nil
|
||||
}
|
||||
|
||||
func (m *memLS) Refresh(now time.Time, duration time.Duration, token string) (LockDetails, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.collectExpiredNodes(now)
|
||||
|
||||
m.l.Debug("Memlock refresh: Token: %s, Duration: %v", token, duration)
|
||||
n := m.byToken[token]
|
||||
if n == nil {
|
||||
return LockDetails{}, ErrNoSuchLock
|
||||
}
|
||||
if n.held {
|
||||
return LockDetails{}, ErrLocked
|
||||
}
|
||||
if n.byExpiryIndex >= 0 {
|
||||
heap.Remove(&m.byExpiry, n.byExpiryIndex)
|
||||
}
|
||||
n.details.Duration = duration
|
||||
if n.details.Duration >= 0 {
|
||||
n.expiry = now.Add(n.details.Duration)
|
||||
heap.Push(&m.byExpiry, n)
|
||||
}
|
||||
return n.details, nil
|
||||
}
|
||||
|
||||
func (m *memLS) Create(now time.Time, details ...LockDetails) ([]string, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.collectExpiredNodes(now)
|
||||
|
||||
conflicts := make([]*ConflictDetail, 0)
|
||||
locks := make([]*memLSNode, 0, len(details))
|
||||
for i, detail := range details {
|
||||
// TODO: remove in production
|
||||
// if !strings.Contains(detail.Ns, "my") && !strings.Contains(detail.Ns, "trash") {
|
||||
// panic("invalid namespace")
|
||||
// }
|
||||
// Check lock conflicts
|
||||
detail.Root = util.SlashClean(detail.Root)
|
||||
m.l.Debug("Memlock create: NS:%s, Root: %s, Duration: %v, ZeroDepth: %v", detail.Ns, detail.Root, detail.Duration, detail.ZeroDepth)
|
||||
conflict := m.canCreate(i, detail.Ns, detail.Root, detail.ZeroDepth)
|
||||
if len(conflict) > 0 {
|
||||
conflicts = append(conflicts, conflict...)
|
||||
// Stop processing more locks since there's already conflicts
|
||||
break
|
||||
} else {
|
||||
// Create locks
|
||||
n := m.create(detail.Ns, detail.Root, detail.Token)
|
||||
m.byToken[n.token] = n
|
||||
n.details = detail
|
||||
if n.details.Duration >= 0 {
|
||||
n.expiry = now.Add(n.details.Duration)
|
||||
heap.Push(&m.byExpiry, n)
|
||||
}
|
||||
locks = append(locks, n)
|
||||
}
|
||||
}
|
||||
|
||||
if len(conflicts) > 0 {
|
||||
for _, l := range locks {
|
||||
m.remove(l)
|
||||
}
|
||||
|
||||
return nil, ConflictError(conflicts)
|
||||
}
|
||||
|
||||
return lo.Map(locks, func(item *memLSNode, index int) string {
|
||||
return item.token
|
||||
}), nil
|
||||
}
|
||||
|
||||
func (m *memLS) canCreate(index int, ns, name string, zeroDepth bool) []*ConflictDetail {
|
||||
n := m.byName[ns]
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
conflicts := make([]*ConflictDetail, 0)
|
||||
canCreate := walkToRoot(name, func(name0 string, first bool) bool {
|
||||
n := m.byName[ns][name0]
|
||||
if n == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if first {
|
||||
if n.token != "" {
|
||||
// The target node is already locked.
|
||||
conflicts = append(conflicts, n.toConflictDetail(index, m.hasher))
|
||||
return false
|
||||
}
|
||||
if !zeroDepth {
|
||||
// The requested lock depth is infinite, and the fact that n exists
|
||||
// (n != nil) means that a descendent of the target node is locked.
|
||||
conflicts = append(conflicts,
|
||||
lo.MapToSlice(n.childLocks, func(key string, value *memLSNode) *ConflictDetail {
|
||||
return value.toConflictDetail(index, m.hasher)
|
||||
},
|
||||
)...)
|
||||
return false
|
||||
}
|
||||
} else if n.token != "" && !n.details.ZeroDepth {
|
||||
// An ancestor of the target node is locked with infinite depth.
|
||||
conflicts = append(conflicts, n.toConflictDetail(index, m.hasher))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
if !canCreate {
|
||||
return conflicts
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memLS) Unlock(now time.Time, tokens ...string) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.collectExpiredNodes(now)
|
||||
conflicts := make([]*ConflictDetail, 0)
|
||||
toBeRemoved := make([]*memLSNode, 0, len(tokens))
|
||||
|
||||
for i, token := range tokens {
|
||||
n := m.byToken[token]
|
||||
if n == nil {
|
||||
return ErrNoSuchLock
|
||||
}
|
||||
if n.held {
|
||||
conflicts = append(conflicts, n.toConflictDetail(i, m.hasher))
|
||||
} else {
|
||||
toBeRemoved = append(toBeRemoved, n)
|
||||
}
|
||||
}
|
||||
|
||||
if len(conflicts) > 0 {
|
||||
return ConflictError(conflicts)
|
||||
}
|
||||
|
||||
for _, n := range toBeRemoved {
|
||||
m.remove(n)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memLS) collectExpiredNodes(now time.Time) {
|
||||
for len(m.byExpiry) > 0 {
|
||||
if now.Before(m.byExpiry[0].expiry) {
|
||||
break
|
||||
}
|
||||
m.remove(m.byExpiry[0])
|
||||
}
|
||||
}
|
||||
|
||||
func (m *memLS) create(ns, name, token string) (ret *memLSNode) {
|
||||
if _, ok := m.byName[ns]; !ok {
|
||||
m.byName[ns] = make(map[string]*memLSNode)
|
||||
}
|
||||
|
||||
if token == "" {
|
||||
token = uuid.Must(uuid.NewV4()).String()
|
||||
}
|
||||
|
||||
walkToRoot(name, func(name0 string, first bool) bool {
|
||||
n := m.byName[ns][name0]
|
||||
if n == nil {
|
||||
n = &memLSNode{
|
||||
details: LockDetails{
|
||||
Root: name0,
|
||||
},
|
||||
childLocks: make(map[string]*memLSNode),
|
||||
byExpiryIndex: -1,
|
||||
}
|
||||
m.byName[ns][name0] = n
|
||||
}
|
||||
n.refCount++
|
||||
if first {
|
||||
n.token = token
|
||||
ret = n
|
||||
} else {
|
||||
n.childLocks[token] = ret
|
||||
}
|
||||
return true
|
||||
})
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m *memLS) lookup(ns, name string, tokens ...string) (n *memLSNode) {
|
||||
for _, token := range tokens {
|
||||
n = m.byToken[token]
|
||||
if n == nil || n.held {
|
||||
continue
|
||||
}
|
||||
if n.details.Ns != ns {
|
||||
continue
|
||||
}
|
||||
if name == n.details.Root {
|
||||
return n
|
||||
}
|
||||
if n.details.ZeroDepth {
|
||||
continue
|
||||
}
|
||||
if n.details.Root == "/" || strings.HasPrefix(name, n.details.Root+"/") {
|
||||
return n
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *memLS) remove(n *memLSNode) {
|
||||
delete(m.byToken, n.token)
|
||||
token := n.token
|
||||
n.token = ""
|
||||
walkToRoot(n.details.Root, func(name0 string, first bool) bool {
|
||||
x := m.byName[n.details.Ns][name0]
|
||||
x.refCount--
|
||||
delete(x.childLocks, token)
|
||||
if x.refCount == 0 {
|
||||
delete(m.byName[n.details.Ns], name0)
|
||||
if len(m.byName[n.details.Ns]) == 0 {
|
||||
delete(m.byName, n.details.Root)
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
if n.byExpiryIndex >= 0 {
|
||||
heap.Remove(&m.byExpiry, n.byExpiryIndex)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *memLS) hold(n *memLSNode) {
|
||||
if n.held {
|
||||
panic("dbfs: memLS inconsistent held state")
|
||||
}
|
||||
n.held = true
|
||||
if n.details.Duration >= 0 && n.byExpiryIndex >= 0 {
|
||||
heap.Remove(&m.byExpiry, n.byExpiryIndex)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *memLS) unhold(n *memLSNode) {
|
||||
if !n.held {
|
||||
panic("dbfs: memLS inconsistent held state")
|
||||
}
|
||||
n.held = false
|
||||
if n.details.Duration >= 0 {
|
||||
heap.Push(&m.byExpiry, n)
|
||||
}
|
||||
}
|
||||
|
||||
func walkToRoot(name string, f func(name0 string, first bool) bool) bool {
|
||||
for first := true; ; first = false {
|
||||
if !f(name, first) {
|
||||
return false
|
||||
}
|
||||
if name == "/" {
|
||||
break
|
||||
}
|
||||
name = name[:strings.LastIndex(name, "/")]
|
||||
if name == "" {
|
||||
name = "/"
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type memLSNode struct {
|
||||
// details are the lock metadata. Even if this node's name is not explicitly locked,
|
||||
// details.Root will still equal the node's name.
|
||||
details LockDetails
|
||||
// token is the unique identifier for this node's lock. An empty token means that
|
||||
// this node is not explicitly locked.
|
||||
token string
|
||||
// refCount is the number of self-or-descendent nodes that are explicitly locked.
|
||||
refCount int
|
||||
// expiry is when this node's lock expires.
|
||||
expiry time.Time
|
||||
// byExpiryIndex is the index of this node in memLS.byExpiry. It is -1
|
||||
// if this node does not expire, or has expired.
|
||||
byExpiryIndex int
|
||||
// held is whether this node's lock is actively held by a Confirm call.
|
||||
held bool
|
||||
// childLocks hold the relation between lock token and child locks.
|
||||
// This is used to find out who is locking this file.
|
||||
childLocks map[string]*memLSNode
|
||||
}
|
||||
|
||||
func (n *memLSNode) toConflictDetail(index int, hasher hashid.Encoder) *ConflictDetail {
|
||||
return &ConflictDetail{
|
||||
Path: n.details.Root,
|
||||
Owner: Owner{
|
||||
Application: n.details.Owner.Application,
|
||||
},
|
||||
Token: n.token,
|
||||
Index: index,
|
||||
Type: n.details.Type,
|
||||
}
|
||||
}
|
||||
|
||||
type byExpiry []*memLSNode
|
||||
|
||||
func (b *byExpiry) Len() int {
|
||||
return len(*b)
|
||||
}
|
||||
|
||||
func (b *byExpiry) Less(i, j int) bool {
|
||||
return (*b)[i].expiry.Before((*b)[j].expiry)
|
||||
}
|
||||
|
||||
func (b *byExpiry) Swap(i, j int) {
|
||||
(*b)[i], (*b)[j] = (*b)[j], (*b)[i]
|
||||
(*b)[i].byExpiryIndex = i
|
||||
(*b)[j].byExpiryIndex = j
|
||||
}
|
||||
|
||||
func (b *byExpiry) Push(x interface{}) {
|
||||
n := x.(*memLSNode)
|
||||
n.byExpiryIndex = len(*b)
|
||||
*b = append(*b, n)
|
||||
}
|
||||
|
||||
func (b *byExpiry) Pop() interface{} {
|
||||
i := len(*b) - 1
|
||||
n := (*b)[i]
|
||||
(*b)[i] = nil
|
||||
n.byExpiryIndex = -1
|
||||
*b = (*b)[:i]
|
||||
return n
|
||||
}
|
||||
|
||||
// ConflictDetail represent lock conflicts that can be present to end users.
|
||||
type ConflictDetail struct {
|
||||
Path string `json:"path,omitempty"`
|
||||
Token string `json:"token,omitempty"`
|
||||
Owner Owner `json:"owner,omitempty"`
|
||||
Index int `json:"-"`
|
||||
Type types.FileType `json:"type"`
|
||||
}
|
||||
|
||||
type ConflictError []*ConflictDetail
|
||||
|
||||
func (r ConflictError) Error() string {
|
||||
return "conflict with locked resource: " + strings.Join(
|
||||
lo.Map(r, func(item *ConflictDetail, index int) string {
|
||||
return "\"" + item.Path + "\""
|
||||
}), ",")
|
||||
}
|
||||
|
||||
func (r ConflictError) Unwrap() error {
|
||||
return ErrLocked
|
||||
}
|
||||
1
pkg/filemanager/lock/memlock_test.go
Normal file
1
pkg/filemanager/lock/memlock_test.go
Normal file
@@ -0,0 +1 @@
|
||||
package lock
|
||||
124
pkg/filemanager/manager/archive.go
Normal file
124
pkg/filemanager/manager/archive.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
|
||||
"golang.org/x/tools/container/intsets"
|
||||
)
|
||||
|
||||
func (m *manager) CreateArchive(ctx context.Context, uris []*fs.URI, writer io.Writer, opts ...fs.Option) (int, error) {
|
||||
o := newOption()
|
||||
for _, opt := range opts {
|
||||
opt.Apply(o)
|
||||
}
|
||||
|
||||
failed := 0
|
||||
|
||||
// List all top level files
|
||||
files := make([]fs.File, 0, len(uris))
|
||||
for _, uri := range uris {
|
||||
file, err := m.Get(ctx, uri, dbfs.WithFileEntities(), dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityDownloadFile))
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get file %s: %w", uri, err)
|
||||
}
|
||||
|
||||
files = append(files, file)
|
||||
}
|
||||
|
||||
zipWriter := zip.NewWriter(writer)
|
||||
defer zipWriter.Close()
|
||||
|
||||
var compressed int64
|
||||
for _, file := range files {
|
||||
if file.Type() == types.FileTypeFile {
|
||||
if err := m.compressFileToArchive(ctx, "/", file, zipWriter, o.ArchiveCompression, o.DryRun); err != nil {
|
||||
failed++
|
||||
m.l.Warning("Failed to compress file %s: %s, skipping it...", file.Uri(false), err)
|
||||
}
|
||||
|
||||
compressed += file.Size()
|
||||
if o.ProgressFunc != nil {
|
||||
o.ProgressFunc(compressed, file.Size(), 0)
|
||||
}
|
||||
|
||||
if o.MaxArchiveSize > 0 && compressed > o.MaxArchiveSize {
|
||||
return 0, fs.ErrArchiveSrcSizeTooBig
|
||||
}
|
||||
|
||||
} else {
|
||||
if err := m.Walk(ctx, file.Uri(false), intsets.MaxInt, func(f fs.File, level int) error {
|
||||
if f.Type() == types.FileTypeFolder || f.IsSymbolic() {
|
||||
return nil
|
||||
}
|
||||
if err := m.compressFileToArchive(ctx, strings.TrimPrefix(f.Uri(false).Dir(),
|
||||
file.Uri(false).Dir()), f, zipWriter, o.ArchiveCompression, o.DryRun); err != nil {
|
||||
failed++
|
||||
m.l.Warning("Failed to compress file %s: %s, skipping it...", f.Uri(false), err)
|
||||
}
|
||||
|
||||
compressed += f.Size()
|
||||
if o.ProgressFunc != nil {
|
||||
o.ProgressFunc(compressed, f.Size(), 0)
|
||||
}
|
||||
|
||||
if o.MaxArchiveSize > 0 && compressed > o.MaxArchiveSize {
|
||||
return fs.ErrArchiveSrcSizeTooBig
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
m.l.Warning("Failed to walk folder %s: %s, skipping it...", file.Uri(false), err)
|
||||
failed++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return failed, nil
|
||||
}
|
||||
|
||||
func (m *manager) compressFileToArchive(ctx context.Context, parent string, file fs.File, zipWriter *zip.Writer,
|
||||
compression bool, dryrun fs.CreateArchiveDryRunFunc) error {
|
||||
es, err := m.GetEntitySource(ctx, file.PrimaryEntityID())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get entity source for file %s: %w", file.Uri(false), err)
|
||||
}
|
||||
|
||||
zipName := filepath.FromSlash(path.Join(parent, file.DisplayName()))
|
||||
if dryrun != nil {
|
||||
dryrun(zipName, es.Entity())
|
||||
return nil
|
||||
}
|
||||
|
||||
m.l.Debug("Compressing %s to archive...", file.Uri(false))
|
||||
header := &zip.FileHeader{
|
||||
Name: zipName,
|
||||
Modified: file.UpdatedAt(),
|
||||
UncompressedSize64: uint64(file.Size()),
|
||||
}
|
||||
|
||||
if !compression {
|
||||
header.Method = zip.Store
|
||||
} else {
|
||||
header.Method = zip.Deflate
|
||||
}
|
||||
|
||||
writer, err := zipWriter.CreateHeader(header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create zip header for %s: %w", file.Uri(false), err)
|
||||
}
|
||||
|
||||
es.Apply(entitysource.WithContext(ctx))
|
||||
_, err = io.Copy(writer, es)
|
||||
return err
|
||||
|
||||
}
|
||||
365
pkg/filemanager/manager/entity.go
Normal file
365
pkg/filemanager/manager/entity.go
Normal file
@@ -0,0 +1,365 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/user"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type EntityManagement interface {
|
||||
// GetEntityUrls gets download urls of given entities, return URLs and the earliest expiry time
|
||||
GetEntityUrls(ctx context.Context, args []GetEntityUrlArgs, opts ...fs.Option) ([]string, *time.Time, error)
|
||||
// GetUrlForRedirectedDirectLink gets redirected direct download link of given direct link
|
||||
GetUrlForRedirectedDirectLink(ctx context.Context, dl *ent.DirectLink, opts ...fs.Option) (string, *time.Time, error)
|
||||
// GetDirectLink gets permanent direct download link of given files
|
||||
GetDirectLink(ctx context.Context, urls ...*fs.URI) ([]DirectLink, error)
|
||||
// GetEntitySource gets source of given entity
|
||||
GetEntitySource(ctx context.Context, entityID int, opts ...fs.Option) (entitysource.EntitySource, error)
|
||||
// Thumbnail gets thumbnail entity of given file
|
||||
Thumbnail(ctx context.Context, uri *fs.URI) (entitysource.EntitySource, error)
|
||||
// SubmitAndAwaitThumbnailTask submits a thumbnail task and waits for result
|
||||
SubmitAndAwaitThumbnailTask(ctx context.Context, uri *fs.URI, ext string, entity fs.Entity) (fs.Entity, error)
|
||||
// SetCurrentVersion sets current version of given file
|
||||
SetCurrentVersion(ctx context.Context, path *fs.URI, version int) error
|
||||
// DeleteVersion deletes a version of given file
|
||||
DeleteVersion(ctx context.Context, path *fs.URI, version int) error
|
||||
// ExtractAndSaveMediaMeta extracts and saves media meta into file metadata of given file.
|
||||
ExtractAndSaveMediaMeta(ctx context.Context, uri *fs.URI, entityID int) error
|
||||
// RecycleEntities recycles a group of entities
|
||||
RecycleEntities(ctx context.Context, force bool, entityIDs ...int) error
|
||||
}
|
||||
|
||||
type DirectLink struct {
|
||||
File fs.File
|
||||
Url string
|
||||
}
|
||||
|
||||
func (m *manager) GetDirectLink(ctx context.Context, urls ...*fs.URI) ([]DirectLink, error) {
|
||||
ae := serializer.NewAggregateError()
|
||||
res := make([]DirectLink, 0, len(urls))
|
||||
useRedirect := m.user.Edges.Group.Settings.RedirectedSource
|
||||
fileClient := m.dep.FileClient()
|
||||
siteUrl := m.settings.SiteURL(ctx)
|
||||
|
||||
for _, url := range urls {
|
||||
file, err := m.fs.Get(
|
||||
ctx, url,
|
||||
dbfs.WithFileEntities(),
|
||||
dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityDownloadFile),
|
||||
)
|
||||
if err != nil {
|
||||
ae.Add(url.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
if file.OwnerID() != m.user.ID {
|
||||
ae.Add(url.String(), fs.ErrOwnerOnly)
|
||||
continue
|
||||
}
|
||||
|
||||
if file.Type() != types.FileTypeFile {
|
||||
ae.Add(url.String(), fs.ErrEntityNotExist)
|
||||
continue
|
||||
}
|
||||
|
||||
target := file.PrimaryEntity()
|
||||
if target == nil {
|
||||
ae.Add(url.String(), fs.ErrEntityNotExist)
|
||||
continue
|
||||
}
|
||||
|
||||
// Hooks for entity download
|
||||
if err := m.fs.ExecuteNavigatorHooks(ctx, fs.HookTypeBeforeDownload, file); err != nil {
|
||||
m.l.Warning("Failed to execute navigator hooks: %s", err)
|
||||
}
|
||||
|
||||
if useRedirect {
|
||||
// Use redirect source
|
||||
link, err := fileClient.CreateDirectLink(ctx, file.ID(), file.Name(), m.user.Edges.Group.SpeedLimit)
|
||||
if err != nil {
|
||||
ae.Add(url.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
linkHashID := hashid.EncodeSourceLinkID(m.hasher, link.ID)
|
||||
res = append(res, DirectLink{
|
||||
File: file,
|
||||
Url: routes.MasterDirectLink(siteUrl, linkHashID, link.Name).String(),
|
||||
})
|
||||
} else {
|
||||
// Use direct source
|
||||
policy, d, err := m.getEntityPolicyDriver(ctx, target, nil)
|
||||
if err != nil {
|
||||
ae.Add(url.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
source := entitysource.NewEntitySource(target, d, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(),
|
||||
m.l, m.config, m.dep.MimeDetector(ctx))
|
||||
sourceUrl, err := source.Url(ctx,
|
||||
entitysource.WithSpeedLimit(int64(m.user.Edges.Group.SpeedLimit)),
|
||||
entitysource.WithDisplayName(file.Name()),
|
||||
)
|
||||
if err != nil {
|
||||
ae.Add(url.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
res = append(res, DirectLink{
|
||||
File: file,
|
||||
Url: sourceUrl.Url,
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return res, ae.Aggregate()
|
||||
}
|
||||
|
||||
func (m *manager) GetUrlForRedirectedDirectLink(ctx context.Context, dl *ent.DirectLink, opts ...fs.Option) (string, *time.Time, error) {
|
||||
o := newOption()
|
||||
for _, opt := range opts {
|
||||
opt.Apply(o)
|
||||
}
|
||||
|
||||
file, err := dl.Edges.FileOrErr()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
owner, err := file.Edges.OwnerOrErr()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
entities, err := file.Edges.EntitiesOrErr()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// File owner must be active
|
||||
if owner.Status != user.StatusActive {
|
||||
return "", nil, fs.ErrDirectLinkInvalid.WithError(fmt.Errorf("file owner is not active"))
|
||||
}
|
||||
|
||||
// Find primary entity
|
||||
target, found := lo.Find(entities, func(entity *ent.Entity) bool {
|
||||
return entity.ID == file.PrimaryEntity
|
||||
})
|
||||
if !found {
|
||||
return "", nil, fs.ErrDirectLinkInvalid.WithError(fmt.Errorf("primary entity not found"))
|
||||
}
|
||||
primaryEntity := fs.NewEntity(target)
|
||||
|
||||
// Generate url
|
||||
var (
|
||||
res string
|
||||
expire *time.Time
|
||||
)
|
||||
|
||||
// Try to read from cache.
|
||||
cacheKey := entityUrlCacheKey(primaryEntity.ID(), int64(dl.Speed), dl.Name, false,
|
||||
m.settings.SiteURL(ctx).String())
|
||||
if cached, ok := m.kv.Get(cacheKey); ok {
|
||||
cachedItem := cached.(EntityUrlCache)
|
||||
res = cachedItem.Url
|
||||
expire = cachedItem.ExpireAt
|
||||
} else {
|
||||
// Cache miss, Generate new url
|
||||
policy, d, err := m.getEntityPolicyDriver(ctx, primaryEntity, nil)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
source := entitysource.NewEntitySource(primaryEntity, d, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(),
|
||||
m.l, m.config, m.dep.MimeDetector(ctx))
|
||||
downloadUrl, err := source.Url(ctx,
|
||||
entitysource.WithExpire(o.Expire),
|
||||
entitysource.WithDownload(false),
|
||||
entitysource.WithSpeedLimit(int64(dl.Speed)),
|
||||
entitysource.WithDisplayName(dl.Name),
|
||||
)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// Save into kv
|
||||
cacheValidDuration := expireTimeToTTL(o.Expire) - m.settings.EntityUrlCacheMargin(ctx)
|
||||
if cacheValidDuration > 0 {
|
||||
m.kv.Set(cacheKey, EntityUrlCache{
|
||||
Url: downloadUrl.Url,
|
||||
ExpireAt: downloadUrl.ExpireAt,
|
||||
}, cacheValidDuration)
|
||||
}
|
||||
|
||||
res = downloadUrl.Url
|
||||
expire = downloadUrl.ExpireAt
|
||||
}
|
||||
|
||||
return res, expire, nil
|
||||
}
|
||||
|
||||
func (m *manager) GetEntityUrls(ctx context.Context, args []GetEntityUrlArgs, opts ...fs.Option) ([]string, *time.Time, error) {
|
||||
o := newOption()
|
||||
for _, opt := range opts {
|
||||
opt.Apply(o)
|
||||
}
|
||||
|
||||
var earliestExpireAt *time.Time
|
||||
res := make([]string, len(args))
|
||||
ae := serializer.NewAggregateError()
|
||||
for i, arg := range args {
|
||||
file, err := m.fs.Get(
|
||||
ctx, arg.URI,
|
||||
dbfs.WithFileEntities(),
|
||||
dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityDownloadFile),
|
||||
)
|
||||
if err != nil {
|
||||
ae.Add(arg.URI.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
if file.Type() != types.FileTypeFile {
|
||||
ae.Add(arg.URI.String(), fs.ErrEntityNotExist)
|
||||
continue
|
||||
}
|
||||
|
||||
var (
|
||||
target fs.Entity
|
||||
found bool
|
||||
)
|
||||
if arg.PreferredEntityID != "" {
|
||||
found, target = fs.FindDesiredEntity(file, arg.PreferredEntityID, m.hasher, nil)
|
||||
if !found {
|
||||
ae.Add(arg.URI.String(), fs.ErrEntityNotExist)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// No preferred entity ID, use the primary version entity
|
||||
target = file.PrimaryEntity()
|
||||
if target == nil {
|
||||
ae.Add(arg.URI.String(), fs.ErrEntityNotExist)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Hooks for entity download
|
||||
if err := m.fs.ExecuteNavigatorHooks(ctx, fs.HookTypeBeforeDownload, file); err != nil {
|
||||
m.l.Warning("Failed to execute navigator hooks: %s", err)
|
||||
}
|
||||
|
||||
// Try to read from cache.
|
||||
cacheKey := entityUrlCacheKey(target.ID(), o.DownloadSpeed, getEntityDisplayName(file, target), o.IsDownload,
|
||||
m.settings.SiteURL(ctx).String())
|
||||
if cached, ok := m.kv.Get(cacheKey); ok && !o.NoCache {
|
||||
cachedItem := cached.(EntityUrlCache)
|
||||
// Find the earliest expiry time
|
||||
if cachedItem.ExpireAt != nil && (earliestExpireAt == nil || cachedItem.ExpireAt.Before(*earliestExpireAt)) {
|
||||
earliestExpireAt = cachedItem.ExpireAt
|
||||
}
|
||||
res[i] = cachedItem.Url
|
||||
continue
|
||||
}
|
||||
|
||||
// Cache miss, Generate new url
|
||||
policy, d, err := m.getEntityPolicyDriver(ctx, target, nil)
|
||||
if err != nil {
|
||||
ae.Add(arg.URI.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
source := entitysource.NewEntitySource(target, d, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(),
|
||||
m.l, m.config, m.dep.MimeDetector(ctx))
|
||||
downloadUrl, err := source.Url(ctx,
|
||||
entitysource.WithExpire(o.Expire),
|
||||
entitysource.WithDownload(o.IsDownload),
|
||||
entitysource.WithSpeedLimit(o.DownloadSpeed),
|
||||
entitysource.WithDisplayName(getEntityDisplayName(file, target)),
|
||||
)
|
||||
if err != nil {
|
||||
ae.Add(arg.URI.String(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Find the earliest expiry time
|
||||
if downloadUrl.ExpireAt != nil && (earliestExpireAt == nil || downloadUrl.ExpireAt.Before(*earliestExpireAt)) {
|
||||
earliestExpireAt = downloadUrl.ExpireAt
|
||||
}
|
||||
|
||||
// Save into kv
|
||||
cacheValidDuration := expireTimeToTTL(o.Expire) - m.settings.EntityUrlCacheMargin(ctx)
|
||||
if cacheValidDuration > 0 {
|
||||
m.kv.Set(cacheKey, EntityUrlCache{
|
||||
Url: downloadUrl.Url,
|
||||
ExpireAt: downloadUrl.ExpireAt,
|
||||
}, cacheValidDuration)
|
||||
}
|
||||
|
||||
res[i] = downloadUrl.Url
|
||||
}
|
||||
|
||||
return res, earliestExpireAt, ae.Aggregate()
|
||||
}
|
||||
|
||||
func (m *manager) GetEntitySource(ctx context.Context, entityID int, opts ...fs.Option) (entitysource.EntitySource, error) {
|
||||
o := newOption()
|
||||
for _, opt := range opts {
|
||||
opt.Apply(o)
|
||||
}
|
||||
|
||||
var (
|
||||
entity fs.Entity
|
||||
err error
|
||||
)
|
||||
|
||||
if o.Entity != nil {
|
||||
entity = o.Entity
|
||||
} else {
|
||||
entity, err = m.fs.GetEntity(ctx, entityID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if entity.ReferenceCount() == 0 {
|
||||
return nil, fs.ErrEntityNotExist
|
||||
}
|
||||
}
|
||||
|
||||
policy, handler, err := m.getEntityPolicyDriver(ctx, entity, o.Policy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return entitysource.NewEntitySource(entity, handler, policy, m.auth, m.settings, m.hasher, m.dep.RequestClient(), m.l,
|
||||
m.config, m.dep.MimeDetector(ctx), entitysource.WithContext(ctx), entitysource.WithThumb(o.IsThumb)), nil
|
||||
}
|
||||
|
||||
func (l *manager) SetCurrentVersion(ctx context.Context, path *fs.URI, version int) error {
|
||||
return l.fs.VersionControl(ctx, path, version, false)
|
||||
}
|
||||
|
||||
func (l *manager) DeleteVersion(ctx context.Context, path *fs.URI, version int) error {
|
||||
return l.fs.VersionControl(ctx, path, version, true)
|
||||
}
|
||||
|
||||
func entityUrlCacheKey(id int, speed int64, displayName string, download bool, siteUrl string) string {
|
||||
hash := sha1.New()
|
||||
hash.Write([]byte(fmt.Sprintf("%d_%d_%s_%t_%s", id,
|
||||
speed, displayName, download, siteUrl)))
|
||||
hashRes := hex.EncodeToString(hash.Sum(nil))
|
||||
|
||||
return fmt.Sprintf("%s_%s", EntityUrlCacheKeyPrefix, hashRes)
|
||||
}
|
||||
958
pkg/filemanager/manager/entitysource/entitysource.go
Normal file
958
pkg/filemanager/manager/entitysource/entitysource.go
Normal file
@@ -0,0 +1,958 @@
|
||||
package entitysource
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/auth"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/local"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/juju/ratelimit"
|
||||
)
|
||||
|
||||
const (
|
||||
shortSeekBytes = 1024
|
||||
// The algorithm uses at most sniffLen bytes to make its decision.
|
||||
sniffLen = 512
|
||||
defaultUrlExpire = time.Hour * 1
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNoContentLength is returned by Seek when the initial http response did not include a Content-Length header
|
||||
ErrNoContentLength = errors.New("Content-Length was not set")
|
||||
|
||||
// errNoOverlap is returned by serveContent's parseRange if first-byte-pos of
|
||||
// all of the byte-range-spec values is greater than the content size.
|
||||
errNoOverlap = errors.New("invalid range: failed to overlap")
|
||||
)
|
||||
|
||||
type EntitySource interface {
|
||||
io.ReadSeekCloser
|
||||
io.ReaderAt
|
||||
|
||||
// Url generates a download url for the entity.
|
||||
Url(ctx context.Context, opts ...EntitySourceOption) (*EntityUrl, error)
|
||||
// Serve serves the entity to the client, with supports on Range header and If- cache control.
|
||||
Serve(w http.ResponseWriter, r *http.Request, opts ...EntitySourceOption)
|
||||
// Entity returns the entity of the source.
|
||||
Entity() fs.Entity
|
||||
// IsLocal returns true if the source is in local machine.
|
||||
IsLocal() bool
|
||||
// LocalPath returns the local path of the source file.
|
||||
LocalPath(ctx context.Context) string
|
||||
// Apply applies the options to the source.
|
||||
Apply(opts ...EntitySourceOption)
|
||||
// CloneToLocalSrc clones the source to a local file source.
|
||||
CloneToLocalSrc(t types.EntityType, src string) (EntitySource, error)
|
||||
// ShouldInternalProxy returns true if the source will/should be proxied by internal proxy.
|
||||
ShouldInternalProxy(opts ...EntitySourceOption) bool
|
||||
}
|
||||
|
||||
type EntitySourceOption interface {
|
||||
Apply(any)
|
||||
}
|
||||
|
||||
type EntitySourceOptions struct {
|
||||
SpeedLimit int64
|
||||
Expire *time.Time
|
||||
IsDownload bool
|
||||
NoInternalProxy bool
|
||||
DisplayName string
|
||||
OneTimeDownloadKey string
|
||||
Ctx context.Context
|
||||
IsThumb bool
|
||||
}
|
||||
|
||||
type EntityUrl struct {
|
||||
Url string
|
||||
ExpireAt *time.Time
|
||||
}
|
||||
|
||||
type EntitySourceOptionFunc func(any)
|
||||
|
||||
// WithSpeedLimit set speed limit for file source (if supported)
|
||||
func WithSpeedLimit(limit int64) EntitySourceOption {
|
||||
return EntitySourceOptionFunc(func(option any) {
|
||||
option.(*EntitySourceOptions).SpeedLimit = limit
|
||||
})
|
||||
}
|
||||
|
||||
// WithExpire set expire time for file source
|
||||
func WithExpire(expire *time.Time) EntitySourceOption {
|
||||
return EntitySourceOptionFunc(func(option any) {
|
||||
option.(*EntitySourceOptions).Expire = expire
|
||||
})
|
||||
}
|
||||
|
||||
// WithDownload set file URL as download
|
||||
func WithDownload(isDownload bool) EntitySourceOption {
|
||||
return EntitySourceOptionFunc(func(option any) {
|
||||
option.(*EntitySourceOptions).IsDownload = isDownload
|
||||
})
|
||||
}
|
||||
|
||||
// WithNoInternalProxy overwrite policy's internal proxy setting
|
||||
func WithNoInternalProxy() EntitySourceOption {
|
||||
return EntitySourceOptionFunc(func(option any) {
|
||||
option.(*EntitySourceOptions).NoInternalProxy = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithDisplayName set display name for file source
|
||||
func WithDisplayName(name string) EntitySourceOption {
|
||||
return EntitySourceOptionFunc(func(option any) {
|
||||
option.(*EntitySourceOptions).DisplayName = name
|
||||
})
|
||||
}
|
||||
|
||||
// WithContext set context for file source
|
||||
func WithContext(ctx context.Context) EntitySourceOption {
|
||||
return EntitySourceOptionFunc(func(option any) {
|
||||
option.(*EntitySourceOptions).Ctx = ctx
|
||||
})
|
||||
}
|
||||
|
||||
// WithThumb set entity source as thumb. This will result in entity source URL
|
||||
// generated with thumbnail processing parameters. For sidecar thumb files,
|
||||
// this option will be ignored.
|
||||
func WithThumb(isThumb bool) EntitySourceOption {
|
||||
return EntitySourceOptionFunc(func(option any) {
|
||||
option.(*EntitySourceOptions).IsThumb = isThumb
|
||||
})
|
||||
}
|
||||
|
||||
func (f EntitySourceOptionFunc) Apply(option any) {
|
||||
f(option)
|
||||
}
|
||||
|
||||
type (
|
||||
entitySource struct {
|
||||
e fs.Entity
|
||||
handler driver.Handler
|
||||
policy *ent.StoragePolicy
|
||||
generalAuth auth.Auth
|
||||
settings setting.Provider
|
||||
hasher hashid.Encoder
|
||||
c request.Client
|
||||
l logging.Logger
|
||||
config conf.ConfigProvider
|
||||
mime mime.MimeDetector
|
||||
|
||||
rsc io.ReadCloser
|
||||
pos int64
|
||||
o *EntitySourceOptions
|
||||
}
|
||||
)
|
||||
|
||||
// NewEntitySource creates a new EntitySource.
|
||||
func NewEntitySource(
|
||||
e fs.Entity,
|
||||
handler driver.Handler,
|
||||
policy *ent.StoragePolicy,
|
||||
generalAuth auth.Auth,
|
||||
settings setting.Provider,
|
||||
hasher hashid.Encoder,
|
||||
c request.Client,
|
||||
l logging.Logger,
|
||||
config conf.ConfigProvider,
|
||||
mime mime.MimeDetector,
|
||||
opts ...EntitySourceOption,
|
||||
) EntitySource {
|
||||
s := &entitySource{
|
||||
e: e,
|
||||
handler: handler,
|
||||
policy: policy,
|
||||
generalAuth: generalAuth,
|
||||
settings: settings,
|
||||
hasher: hasher,
|
||||
c: c,
|
||||
config: config,
|
||||
l: l,
|
||||
mime: mime,
|
||||
o: &EntitySourceOptions{},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.Apply(s.o)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (f *entitySource) Apply(opts ...EntitySourceOption) {
|
||||
for _, opt := range opts {
|
||||
opt.Apply(f.o)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *entitySource) CloneToLocalSrc(t types.EntityType, src string) (EntitySource, error) {
|
||||
e, err := local.NewLocalFileEntity(t, src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
policy := &ent.StoragePolicy{Type: types.PolicyTypeLocal}
|
||||
handler := local.New(policy, f.l, f.config)
|
||||
|
||||
newSrc := NewEntitySource(e, handler, policy, f.generalAuth, f.settings, f.hasher, f.c, f.l, f.config, f.mime).(*entitySource)
|
||||
newSrc.o = f.o
|
||||
return newSrc, nil
|
||||
}
|
||||
|
||||
func (f *entitySource) Entity() fs.Entity {
|
||||
return f.e
|
||||
}
|
||||
|
||||
func (f *entitySource) IsLocal() bool {
|
||||
return f.handler.Capabilities().StaticFeatures.Enabled(int(driver.HandlerCapabilityInboundGet))
|
||||
}
|
||||
|
||||
func (f *entitySource) LocalPath(ctx context.Context) string {
|
||||
return f.handler.LocalPath(ctx, f.e.Source())
|
||||
}
|
||||
|
||||
func (f *entitySource) Serve(w http.ResponseWriter, r *http.Request, opts ...EntitySourceOption) {
|
||||
for _, opt := range opts {
|
||||
opt.Apply(f.o)
|
||||
}
|
||||
|
||||
if f.IsLocal() {
|
||||
// For local files, validate file existence by resetting rsc
|
||||
if err := f.resetRequest(); err != nil {
|
||||
f.l.Warning("Failed to serve local entity %q: %s", err, f.e.Source())
|
||||
http.Error(w, "Entity data does not exist.", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
etag := "\"" + hashid.EncodeEntityID(f.hasher, f.e.ID()) + "\""
|
||||
w.Header().Set("Etag", "\""+hashid.EncodeEntityID(f.hasher, f.e.ID())+"\"")
|
||||
|
||||
if f.o.IsDownload {
|
||||
encodedFilename := url.PathEscape(f.o.DisplayName)
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"; filename*=UTF-8''%s",
|
||||
f.o.DisplayName, encodedFilename))
|
||||
}
|
||||
|
||||
done, rangeReq := checkPreconditions(w, r, etag)
|
||||
if done {
|
||||
return
|
||||
}
|
||||
|
||||
if !f.IsLocal() {
|
||||
// for non-local file, reverse-proxy the request
|
||||
expire := time.Now().Add(defaultUrlExpire)
|
||||
u, err := f.Url(driver.WithForcePublicEndpoint(f.o.Ctx, false), WithNoInternalProxy(), WithExpire(&expire))
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
target, err := url.Parse(u.Url)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
proxy := &httputil.ReverseProxy{
|
||||
Director: func(request *http.Request) {
|
||||
request.URL.Scheme = target.Scheme
|
||||
request.URL.Host = target.Host
|
||||
request.URL.Path = target.Path
|
||||
request.URL.RawPath = target.RawPath
|
||||
request.URL.RawQuery = target.RawQuery
|
||||
request.Host = target.Host
|
||||
request.Header.Del("Authorization")
|
||||
},
|
||||
ModifyResponse: func(response *http.Response) error {
|
||||
response.Header.Del("ETag")
|
||||
response.Header.Del("Content-Disposition")
|
||||
response.Header.Del("Cache-Control")
|
||||
logging.Request(f.l,
|
||||
false,
|
||||
response.StatusCode,
|
||||
response.Request.Method,
|
||||
request.LocalIP,
|
||||
response.Request.URL.String(),
|
||||
"",
|
||||
start,
|
||||
)
|
||||
return nil
|
||||
},
|
||||
ErrorHandler: func(writer http.ResponseWriter, request *http.Request, err error) {
|
||||
f.l.Error("Reverse proxy error in %q: %s", request.URL.String(), err)
|
||||
writer.WriteHeader(http.StatusBadGateway)
|
||||
writer.Write([]byte("[Cloudreve] Bad Gateway"))
|
||||
},
|
||||
}
|
||||
|
||||
r = r.Clone(f.o.Ctx)
|
||||
defer func() {
|
||||
if err := recover(); err != nil && err != http.ErrAbortHandler {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
proxy.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
code := http.StatusOK
|
||||
// If Content-Type isn't set, use the file's extension to find it, but
|
||||
// if the Content-Type is unset explicitly, do not sniff the type.
|
||||
ctypes, haveType := w.Header()["Content-Type"]
|
||||
var ctype string
|
||||
if !haveType {
|
||||
ctype = f.mime.TypeByName(f.o.DisplayName)
|
||||
if ctype == "" {
|
||||
// read a chunk to decide between utf-8 text and binary
|
||||
var buf [sniffLen]byte
|
||||
n, _ := io.ReadFull(f, buf[:])
|
||||
ctype = http.DetectContentType(buf[:n])
|
||||
_, err := f.Seek(0, io.SeekStart) // rewind to output whole file
|
||||
if err != nil {
|
||||
http.Error(w, "seeker can't seek", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
w.Header().Set("Content-Type", ctype)
|
||||
} else if len(ctypes) > 0 {
|
||||
ctype = ctypes[0]
|
||||
}
|
||||
|
||||
size := f.e.Size()
|
||||
if size < 0 {
|
||||
// Should never happen but just to be sure
|
||||
http.Error(w, "negative content size computed", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// handle Content-Range header.
|
||||
sendSize := size
|
||||
var sendContent io.Reader = f
|
||||
ranges, err := parseRange(rangeReq, size)
|
||||
switch err {
|
||||
case nil:
|
||||
case errNoOverlap:
|
||||
if size == 0 {
|
||||
// Some clients add a Range header to all requests to
|
||||
// limit the size of the response. If the file is empty,
|
||||
// ignore the range header and respond with a 200 rather
|
||||
// than a 416.
|
||||
ranges = nil
|
||||
break
|
||||
}
|
||||
w.Header().Set("Content-Range", fmt.Sprintf("bytes */%d", size))
|
||||
fallthrough
|
||||
default:
|
||||
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
|
||||
return
|
||||
}
|
||||
|
||||
if sumRangesSize(ranges) > size {
|
||||
// The total number of bytes in all the ranges
|
||||
// is larger than the size of the file by
|
||||
// itself, so this is probably an attack, or a
|
||||
// dumb client. Ignore the range request.
|
||||
ranges = nil
|
||||
}
|
||||
switch {
|
||||
case len(ranges) == 1:
|
||||
// RFC 7233, Section 4.1:
|
||||
// "If a single part is being transferred, the server
|
||||
// generating the 206 response MUST generate a
|
||||
// Content-Range header field, describing what range
|
||||
// of the selected representation is enclosed, and a
|
||||
// payload consisting of the range.
|
||||
// ...
|
||||
// A server MUST NOT generate a multipart response to
|
||||
// a request for a single range, since a client that
|
||||
// does not request multiple parts might not support
|
||||
// multipart responses."
|
||||
ra := ranges[0]
|
||||
if _, err := f.Seek(ra.start, io.SeekStart); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusRequestedRangeNotSatisfiable)
|
||||
return
|
||||
}
|
||||
sendSize = ra.length
|
||||
code = http.StatusPartialContent
|
||||
w.Header().Set("Content-Range", ra.contentRange(size))
|
||||
case len(ranges) > 1:
|
||||
sendSize = rangesMIMESize(ranges, ctype, size)
|
||||
code = http.StatusPartialContent
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
mw := multipart.NewWriter(pw)
|
||||
w.Header().Set("Content-Type", "multipart/byteranges; boundary="+mw.Boundary())
|
||||
sendContent = pr
|
||||
defer pr.Close() // cause writing goroutine to fail and exit if CopyN doesn't finish.
|
||||
go func() {
|
||||
for _, ra := range ranges {
|
||||
part, err := mw.CreatePart(ra.mimeHeader(ctype, size))
|
||||
if err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if _, err := f.Seek(ra.start, io.SeekStart); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if _, err := io.CopyN(part, f, ra.length); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
mw.Close()
|
||||
pw.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
w.Header().Set("Accept-Ranges", "bytes")
|
||||
if w.Header().Get("Content-Encoding") == "" {
|
||||
w.Header().Set("Content-Length", strconv.FormatInt(sendSize, 10))
|
||||
}
|
||||
|
||||
w.WriteHeader(code)
|
||||
|
||||
if r.Method != "HEAD" {
|
||||
io.CopyN(w, sendContent, sendSize)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *entitySource) Read(p []byte) (n int, err error) {
|
||||
if f.rsc == nil {
|
||||
err = f.resetRequest()
|
||||
}
|
||||
if f.rsc != nil {
|
||||
n, err = f.rsc.Read(p)
|
||||
f.pos += int64(n)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *entitySource) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
if f.IsLocal() {
|
||||
if f.rsc == nil {
|
||||
err = f.resetRequest()
|
||||
}
|
||||
if readAt, ok := f.rsc.(io.ReaderAt); ok {
|
||||
return readAt.ReadAt(p, off)
|
||||
}
|
||||
}
|
||||
|
||||
return 0, errors.New("source does not support ReadAt")
|
||||
}
|
||||
|
||||
func (f *entitySource) Seek(offset int64, whence int) (int64, error) {
|
||||
var err error
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
case io.SeekCurrent:
|
||||
offset += f.pos
|
||||
case io.SeekEnd:
|
||||
offset = f.e.Size() + offset
|
||||
}
|
||||
if f.rsc != nil {
|
||||
// Try to read, which is cheaper than doing a request
|
||||
if f.pos < offset && offset-f.pos <= shortSeekBytes {
|
||||
_, err := io.CopyN(io.Discard, f, offset-f.pos)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if f.pos != offset {
|
||||
err = f.rsc.Close()
|
||||
f.rsc = nil
|
||||
}
|
||||
}
|
||||
f.pos = offset
|
||||
return f.pos, err
|
||||
}
|
||||
|
||||
func (f *entitySource) Close() error {
|
||||
if f.rsc != nil {
|
||||
return f.rsc.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *entitySource) ShouldInternalProxy(opts ...EntitySourceOption) bool {
|
||||
for _, opt := range opts {
|
||||
opt.Apply(f.o)
|
||||
}
|
||||
handlerCapability := f.handler.Capabilities()
|
||||
return f.e.ID() == 0 || handlerCapability.StaticFeatures.Enabled(int(driver.HandlerCapabilityProxyRequired)) ||
|
||||
f.policy.Settings.InternalProxy && !f.o.NoInternalProxy
|
||||
}
|
||||
|
||||
func (f *entitySource) Url(ctx context.Context, opts ...EntitySourceOption) (*EntityUrl, error) {
|
||||
for _, opt := range opts {
|
||||
opt.Apply(f.o)
|
||||
}
|
||||
|
||||
var (
|
||||
srcUrl *url.URL
|
||||
err error
|
||||
srcUrlStr string
|
||||
)
|
||||
|
||||
expire := f.o.Expire
|
||||
displayName := f.o.DisplayName
|
||||
if displayName == "" {
|
||||
displayName = path.Base(util.FormSlash(f.e.Source()))
|
||||
}
|
||||
|
||||
// Use internal proxy URL if:
|
||||
// 1. Internal proxy is required by driver's definition
|
||||
// 2. Internal proxy is enabled in Policy setting and not disabled by option
|
||||
// 3. It's an empty entity.
|
||||
handlerCapability := f.handler.Capabilities()
|
||||
if f.ShouldInternalProxy() {
|
||||
siteUrl := f.settings.SiteURL(ctx)
|
||||
base := routes.MasterFileContentUrl(
|
||||
siteUrl,
|
||||
hashid.EncodeEntityID(f.hasher, f.e.ID()),
|
||||
displayName,
|
||||
f.o.IsDownload,
|
||||
f.o.IsThumb,
|
||||
f.o.SpeedLimit,
|
||||
)
|
||||
|
||||
srcUrl, err = auth.SignURI(ctx, f.generalAuth, base.String(), expire)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sign internal proxy URL: %w", err)
|
||||
}
|
||||
|
||||
if f.IsLocal() {
|
||||
// For local file, we need to apply proxy if needed
|
||||
srcUrl, err = driver.ApplyProxyIfNeeded(f.policy, srcUrl)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to apply proxy: %w", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
expire = capExpireTime(expire, handlerCapability.MinSourceExpire, handlerCapability.MaxSourceExpire)
|
||||
if f.o.IsThumb {
|
||||
srcUrlStr, err = f.handler.Thumb(ctx, expire, util.Ext(f.o.DisplayName), f.e)
|
||||
} else {
|
||||
srcUrlStr, err = f.handler.Source(ctx, f.e, &driver.GetSourceArgs{
|
||||
Expire: expire,
|
||||
IsDownload: f.o.IsDownload,
|
||||
Speed: f.o.SpeedLimit,
|
||||
DisplayName: displayName,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get source URL: %w", err)
|
||||
}
|
||||
|
||||
srcUrl, err = url.Parse(srcUrlStr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse origin URL: %w", err)
|
||||
}
|
||||
|
||||
srcUrl, err = driver.ApplyProxyIfNeeded(f.policy, srcUrl)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to apply proxy: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &EntityUrl{
|
||||
Url: srcUrl.String(),
|
||||
ExpireAt: expire,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *entitySource) resetRequest() error {
|
||||
// For inbound files, we can use the handler to open the file directly
|
||||
if f.IsLocal() {
|
||||
if f.rsc == nil {
|
||||
file, err := f.handler.Open(f.o.Ctx, f.e.Source())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open inbound file: %w", err)
|
||||
}
|
||||
|
||||
if f.pos > 0 {
|
||||
_, err = file.Seek(f.pos, io.SeekStart)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to seek inbound file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
f.rsc = file
|
||||
|
||||
if f.o.SpeedLimit > 0 {
|
||||
bucket := ratelimit.NewBucketWithRate(float64(f.o.SpeedLimit), f.o.SpeedLimit)
|
||||
f.rsc = lrs{f.rsc, ratelimit.Reader(f.rsc, bucket)}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
expire := time.Now().Add(defaultUrlExpire)
|
||||
u, err := f.Url(driver.WithForcePublicEndpoint(f.o.Ctx, false), WithNoInternalProxy(), WithExpire(&expire))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate download url: %w", err)
|
||||
}
|
||||
|
||||
h := http.Header{}
|
||||
h.Set("Range", fmt.Sprintf("bytes=%d-", f.pos))
|
||||
resp := f.c.Request(http.MethodGet, u.Url, nil,
|
||||
request.WithContext(f.o.Ctx),
|
||||
request.WithLogger(f.l),
|
||||
request.WithHeader(h),
|
||||
).CheckHTTPResponse(http.StatusOK, http.StatusPartialContent)
|
||||
if resp.Err != nil {
|
||||
return fmt.Errorf("failed to request download url: %w", resp.Err)
|
||||
}
|
||||
|
||||
f.rsc = resp.Response.Body
|
||||
return nil
|
||||
}
|
||||
|
||||
// capExpireTime make sure expire time is not too long or too short (if min or max is set)
|
||||
func capExpireTime(expire *time.Time, min, max time.Duration) *time.Time {
|
||||
timeNow := time.Now()
|
||||
if expire == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cappedExpires := *expire
|
||||
// Make sure expire time is not too long or too short
|
||||
if min > 0 && expire.Before(timeNow.Add(min)) {
|
||||
cappedExpires = timeNow.Add(min)
|
||||
} else if max > 0 && expire.After(timeNow.Add(max)) {
|
||||
cappedExpires = timeNow.Add(max)
|
||||
}
|
||||
|
||||
return &cappedExpires
|
||||
}
|
||||
|
||||
// checkPreconditions evaluates request preconditions and reports whether a precondition
|
||||
// resulted in sending StatusNotModified or StatusPreconditionFailed.
|
||||
func checkPreconditions(w http.ResponseWriter, r *http.Request, etag string) (done bool, rangeHeader string) {
|
||||
// This function carefully follows RFC 7232 section 6.
|
||||
ch := checkIfMatch(r, etag)
|
||||
if ch == condFalse {
|
||||
w.WriteHeader(http.StatusPreconditionFailed)
|
||||
return true, ""
|
||||
}
|
||||
switch checkIfNoneMatch(r, etag) {
|
||||
case condFalse:
|
||||
if r.Method == "GET" || r.Method == "HEAD" {
|
||||
writeNotModified(w)
|
||||
return true, ""
|
||||
} else {
|
||||
w.WriteHeader(http.StatusPreconditionFailed)
|
||||
return true, ""
|
||||
}
|
||||
}
|
||||
|
||||
rangeHeader = r.Header.Get("Range")
|
||||
if rangeHeader != "" && checkIfRange(r, etag) == condFalse {
|
||||
rangeHeader = ""
|
||||
}
|
||||
return false, rangeHeader
|
||||
}
|
||||
|
||||
// condResult is the result of an HTTP request precondition check.
|
||||
// See https://tools.ietf.org/html/rfc7232 section 3.
|
||||
type condResult int
|
||||
|
||||
const (
|
||||
condNone condResult = iota
|
||||
condTrue
|
||||
condFalse
|
||||
)
|
||||
|
||||
func checkIfMatch(r *http.Request, currentEtag string) condResult {
|
||||
im := r.Header.Get("If-Match")
|
||||
if im == "" {
|
||||
return condNone
|
||||
}
|
||||
for {
|
||||
im = textproto.TrimString(im)
|
||||
if len(im) == 0 {
|
||||
break
|
||||
}
|
||||
if im[0] == ',' {
|
||||
im = im[1:]
|
||||
continue
|
||||
}
|
||||
if im[0] == '*' {
|
||||
return condTrue
|
||||
}
|
||||
etag, remain := scanETag(im)
|
||||
if etag == "" {
|
||||
break
|
||||
}
|
||||
if etagStrongMatch(etag, currentEtag) {
|
||||
return condTrue
|
||||
}
|
||||
im = remain
|
||||
}
|
||||
|
||||
return condFalse
|
||||
}
|
||||
|
||||
// scanETag determines if a syntactically valid ETag is present at s. If so,
|
||||
// the ETag and remaining text after consuming ETag is returned. Otherwise,
|
||||
// it returns "", "".
|
||||
func scanETag(s string) (etag string, remain string) {
|
||||
s = textproto.TrimString(s)
|
||||
start := 0
|
||||
if strings.HasPrefix(s, "W/") {
|
||||
start = 2
|
||||
}
|
||||
if len(s[start:]) < 2 || s[start] != '"' {
|
||||
return "", ""
|
||||
}
|
||||
// ETag is either W/"text" or "text".
|
||||
// See RFC 7232 2.3.
|
||||
for i := start + 1; i < len(s); i++ {
|
||||
c := s[i]
|
||||
switch {
|
||||
// Character values allowed in ETags.
|
||||
case c == 0x21 || c >= 0x23 && c <= 0x7E || c >= 0x80:
|
||||
case c == '"':
|
||||
return s[:i+1], s[i+1:]
|
||||
default:
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// etagStrongMatch reports whether a and b match using strong ETag comparison.
|
||||
// Assumes a and b are valid ETags.
|
||||
func etagStrongMatch(a, b string) bool {
|
||||
return a == b && a != "" && a[0] == '"'
|
||||
}
|
||||
|
||||
func checkIfNoneMatch(r *http.Request, currentEtag string) condResult {
|
||||
inm := r.Header.Get("If-None-Match")
|
||||
if inm == "" {
|
||||
return condNone
|
||||
}
|
||||
buf := inm
|
||||
for {
|
||||
buf = textproto.TrimString(buf)
|
||||
if len(buf) == 0 {
|
||||
break
|
||||
}
|
||||
if buf[0] == ',' {
|
||||
buf = buf[1:]
|
||||
continue
|
||||
}
|
||||
if buf[0] == '*' {
|
||||
return condFalse
|
||||
}
|
||||
etag, remain := scanETag(buf)
|
||||
if etag == "" {
|
||||
break
|
||||
}
|
||||
if etagWeakMatch(etag, currentEtag) {
|
||||
return condFalse
|
||||
}
|
||||
buf = remain
|
||||
}
|
||||
return condTrue
|
||||
}
|
||||
|
||||
// etagWeakMatch reports whether a and b match using weak ETag comparison.
|
||||
// Assumes a and b are valid ETags.
|
||||
func etagWeakMatch(a, b string) bool {
|
||||
return strings.TrimPrefix(a, "W/") == strings.TrimPrefix(b, "W/")
|
||||
}
|
||||
|
||||
func writeNotModified(w http.ResponseWriter) {
|
||||
// RFC 7232 section 4.1:
|
||||
// a sender SHOULD NOT generate representation metadata other than the
|
||||
// above listed fields unless said metadata exists for the purpose of
|
||||
// guiding cache updates (e.g., Last-Modified might be useful if the
|
||||
// response does not have an ETag field).
|
||||
h := w.Header()
|
||||
delete(h, "Content-Type")
|
||||
delete(h, "Content-Length")
|
||||
delete(h, "Content-Encoding")
|
||||
if h.Get("Etag") != "" {
|
||||
delete(h, "Last-Modified")
|
||||
}
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
}
|
||||
|
||||
func checkIfRange(r *http.Request, currentEtag string) condResult {
|
||||
if r.Method != "GET" && r.Method != "HEAD" {
|
||||
return condNone
|
||||
}
|
||||
ir := r.Header.Get("If-Range")
|
||||
if ir == "" {
|
||||
return condNone
|
||||
}
|
||||
etag, _ := scanETag(ir)
|
||||
if etag != "" {
|
||||
if etagStrongMatch(etag, currentEtag) {
|
||||
return condTrue
|
||||
} else {
|
||||
return condFalse
|
||||
}
|
||||
}
|
||||
|
||||
return condFalse
|
||||
}
|
||||
|
||||
// httpRange specifies the byte range to be sent to the client.
|
||||
type httpRange struct {
|
||||
start, length int64
|
||||
}
|
||||
|
||||
func (r httpRange) contentRange(size int64) string {
|
||||
return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size)
|
||||
}
|
||||
|
||||
func (r httpRange) mimeHeader(contentType string, size int64) textproto.MIMEHeader {
|
||||
return textproto.MIMEHeader{
|
||||
"Content-Range": {r.contentRange(size)},
|
||||
"Content-Type": {contentType},
|
||||
}
|
||||
}
|
||||
|
||||
// parseRange parses a Range header string as per RFC 7233.
|
||||
// errNoOverlap is returned if none of the ranges overlap.
|
||||
func parseRange(s string, size int64) ([]httpRange, error) {
|
||||
if s == "" {
|
||||
return nil, nil // header not present
|
||||
}
|
||||
const b = "bytes="
|
||||
if !strings.HasPrefix(s, b) {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
var ranges []httpRange
|
||||
noOverlap := false
|
||||
for _, ra := range strings.Split(s[len(b):], ",") {
|
||||
ra = textproto.TrimString(ra)
|
||||
if ra == "" {
|
||||
continue
|
||||
}
|
||||
start, end, ok := strings.Cut(ra, "-")
|
||||
if !ok {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
start, end = textproto.TrimString(start), textproto.TrimString(end)
|
||||
var r httpRange
|
||||
if start == "" {
|
||||
// If no start is specified, end specifies the
|
||||
// range start relative to the end of the file,
|
||||
// and we are dealing with <suffix-length>
|
||||
// which has to be a non-negative integer as per
|
||||
// RFC 7233 Section 2.1 "Byte-Ranges".
|
||||
if end == "" || end[0] == '-' {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
i, err := strconv.ParseInt(end, 10, 64)
|
||||
if i < 0 || err != nil {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
if i > size {
|
||||
i = size
|
||||
}
|
||||
r.start = size - i
|
||||
r.length = size - r.start
|
||||
} else {
|
||||
i, err := strconv.ParseInt(start, 10, 64)
|
||||
if err != nil || i < 0 {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
if i >= size {
|
||||
// If the range begins after the size of the content,
|
||||
// then it does not overlap.
|
||||
noOverlap = true
|
||||
continue
|
||||
}
|
||||
r.start = i
|
||||
if end == "" {
|
||||
// If no end is specified, range extends to end of the file.
|
||||
r.length = size - r.start
|
||||
} else {
|
||||
i, err := strconv.ParseInt(end, 10, 64)
|
||||
if err != nil || r.start > i {
|
||||
return nil, errors.New("invalid range")
|
||||
}
|
||||
if i >= size {
|
||||
i = size - 1
|
||||
}
|
||||
r.length = i - r.start + 1
|
||||
}
|
||||
}
|
||||
ranges = append(ranges, r)
|
||||
}
|
||||
if noOverlap && len(ranges) == 0 {
|
||||
// The specified ranges did not overlap with the content.
|
||||
return nil, errNoOverlap
|
||||
}
|
||||
return ranges, nil
|
||||
}
|
||||
|
||||
func sumRangesSize(ranges []httpRange) (size int64) {
|
||||
for _, ra := range ranges {
|
||||
size += ra.length
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// countingWriter counts how many bytes have been written to it.
|
||||
type countingWriter int64
|
||||
|
||||
func (w *countingWriter) Write(p []byte) (n int, err error) {
|
||||
*w += countingWriter(len(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// rangesMIMESize returns the number of bytes it takes to encode the
|
||||
// provided ranges as a multipart response.
|
||||
func rangesMIMESize(ranges []httpRange, contentType string, contentSize int64) (encSize int64) {
|
||||
var w countingWriter
|
||||
mw := multipart.NewWriter(&w)
|
||||
for _, ra := range ranges {
|
||||
mw.CreatePart(ra.mimeHeader(contentType, contentSize))
|
||||
encSize += ra.length
|
||||
}
|
||||
mw.Close()
|
||||
encSize += int64(w)
|
||||
return
|
||||
}
|
||||
|
||||
type lrs struct {
|
||||
c io.Closer
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
func (r lrs) Read(p []byte) (int, error) {
|
||||
return r.r.Read(p)
|
||||
}
|
||||
|
||||
func (r lrs) Close() error {
|
||||
return r.c.Close()
|
||||
}
|
||||
114
pkg/filemanager/manager/fs.go
Normal file
114
pkg/filemanager/manager/fs.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/cos"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/local"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/obs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/onedrive"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/oss"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/qiniu"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/remote"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/s3"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/upyun"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
)
|
||||
|
||||
func (m *manager) LocalDriver(policy *ent.StoragePolicy) driver.Handler {
|
||||
if policy == nil {
|
||||
policy = &ent.StoragePolicy{Type: types.PolicyTypeLocal, Settings: &types.PolicySetting{}}
|
||||
}
|
||||
return local.New(policy, m.l, m.config)
|
||||
}
|
||||
|
||||
func (m *manager) CastStoragePolicyOnSlave(ctx context.Context, policy *ent.StoragePolicy) *ent.StoragePolicy {
|
||||
if !m.stateless {
|
||||
return policy
|
||||
}
|
||||
|
||||
nodeId := cluster.NodeIdFromContext(ctx)
|
||||
if policy.Type == types.PolicyTypeRemote {
|
||||
if nodeId != policy.NodeID {
|
||||
return policy
|
||||
}
|
||||
|
||||
policyCopy := *policy
|
||||
policyCopy.Type = types.PolicyTypeLocal
|
||||
return &policyCopy
|
||||
} else if policy.Type == types.PolicyTypeLocal {
|
||||
policyCopy := *policy
|
||||
policyCopy.NodeID = nodeId
|
||||
policyCopy.Type = types.PolicyTypeRemote
|
||||
policyCopy.SetNode(&ent.Node{
|
||||
ID: nodeId,
|
||||
Server: cluster.MasterSiteUrlFromContext(ctx),
|
||||
SlaveKey: m.config.Slave().Secret,
|
||||
})
|
||||
return &policyCopy
|
||||
} else if policy.Type == types.PolicyTypeOss {
|
||||
policyCopy := *policy
|
||||
if policyCopy.Settings != nil {
|
||||
policyCopy.Settings.ServerSideEndpoint = ""
|
||||
}
|
||||
}
|
||||
|
||||
return policy
|
||||
}
|
||||
|
||||
func (m *manager) GetStorageDriver(ctx context.Context, policy *ent.StoragePolicy) (driver.Handler, error) {
|
||||
switch policy.Type {
|
||||
case types.PolicyTypeLocal:
|
||||
return local.New(policy, m.l, m.config), nil
|
||||
case types.PolicyTypeRemote:
|
||||
return remote.New(ctx, policy, m.settings, m.config, m.l)
|
||||
case types.PolicyTypeOss:
|
||||
return oss.New(ctx, policy, m.settings, m.config, m.l, m.dep.MimeDetector(ctx))
|
||||
case types.PolicyTypeCos:
|
||||
return cos.New(ctx, policy, m.settings, m.config, m.l, m.dep.MimeDetector(ctx))
|
||||
case types.PolicyTypeS3:
|
||||
return s3.New(ctx, policy, m.settings, m.config, m.l, m.dep.MimeDetector(ctx))
|
||||
case types.PolicyTypeObs:
|
||||
return obs.New(ctx, policy, m.settings, m.config, m.l, m.dep.MimeDetector(ctx))
|
||||
case types.PolicyTypeQiniu:
|
||||
return qiniu.New(ctx, policy, m.settings, m.config, m.l, m.dep.MimeDetector(ctx))
|
||||
case types.PolicyTypeUpyun:
|
||||
return upyun.New(ctx, policy, m.settings, m.config, m.l, m.dep.MimeDetector(ctx))
|
||||
case types.PolicyTypeOd:
|
||||
return onedrive.New(ctx, policy, m.settings, m.config, m.l, m.dep.CredManager())
|
||||
default:
|
||||
return nil, ErrUnknownPolicyType
|
||||
}
|
||||
}
|
||||
|
||||
func (m *manager) getEntityPolicyDriver(cxt context.Context, e fs.Entity, policyOverwrite *ent.StoragePolicy) (*ent.StoragePolicy, driver.Handler, error) {
|
||||
policyID := e.PolicyID()
|
||||
var (
|
||||
policy *ent.StoragePolicy
|
||||
err error
|
||||
)
|
||||
if policyID == 0 {
|
||||
policy = &ent.StoragePolicy{Type: types.PolicyTypeLocal, Settings: &types.PolicySetting{}}
|
||||
} else {
|
||||
if policyOverwrite != nil && policyOverwrite.ID == policyID {
|
||||
policy = policyOverwrite
|
||||
} else {
|
||||
policy, err = m.policyClient.GetPolicyByID(cxt, e.PolicyID())
|
||||
if err != nil {
|
||||
return nil, nil, serializer.NewError(serializer.CodeDBError, "failed to get policy", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
d, err := m.GetStorageDriver(cxt, policy)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return policy, d, nil
|
||||
}
|
||||
171
pkg/filemanager/manager/manager.go
Normal file
171
pkg/filemanager/manager/manager.go
Normal file
@@ -0,0 +1,171 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/auth"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrUnknownPolicyType = serializer.NewError(serializer.CodeInternalSetting, "Unknown policy type", nil)
|
||||
)
|
||||
|
||||
const (
|
||||
UploadSessionCachePrefix = "callback_"
|
||||
// Ctx key for upload session
|
||||
UploadSessionCtx = "uploadSession"
|
||||
)
|
||||
|
||||
type (
|
||||
FileOperation interface {
|
||||
// Get gets file object by given path
|
||||
Get(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.File, error)
|
||||
// List lists files under given path
|
||||
List(ctx context.Context, path *fs.URI, args *ListArgs) (fs.File, *fs.ListFileResult, error)
|
||||
// Create creates a file or directory
|
||||
Create(ctx context.Context, path *fs.URI, fileType types.FileType, opt ...fs.Option) (fs.File, error)
|
||||
// Rename renames a file or directory
|
||||
Rename(ctx context.Context, path *fs.URI, newName string) (fs.File, error)
|
||||
// Delete deletes a group of file or directory. UnlinkOnly indicates whether to delete file record in DB only.
|
||||
Delete(ctx context.Context, path []*fs.URI, opts ...fs.Option) error
|
||||
// Restore restores a group of files
|
||||
Restore(ctx context.Context, path ...*fs.URI) error
|
||||
// MoveOrCopy moves or copies a group of files
|
||||
MoveOrCopy(ctx context.Context, src []*fs.URI, dst *fs.URI, isCopy bool) error
|
||||
// Update puts file content. If given file does not exist, it will create a new one.
|
||||
Update(ctx context.Context, req *fs.UploadRequest, opts ...fs.Option) (fs.File, error)
|
||||
// Walk walks through given path
|
||||
Walk(ctx context.Context, path *fs.URI, depth int, f fs.WalkFunc, opts ...fs.Option) error
|
||||
// UpsertMedata update or insert metadata of given file
|
||||
PatchMedata(ctx context.Context, path []*fs.URI, data ...fs.MetadataPatch) error
|
||||
// CreateViewerSession creates a viewer session for given file
|
||||
CreateViewerSession(ctx context.Context, uri *fs.URI, version string, viewer *setting.Viewer) (*ViewerSession, error)
|
||||
}
|
||||
|
||||
FsManagement interface {
|
||||
// SharedAddressTranslation translates shared symbolic address to real address. If path does not exist,
|
||||
// most recent existing parent directory will be returned.
|
||||
SharedAddressTranslation(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.File, *fs.URI, error)
|
||||
// Capacity gets capacity of current file system
|
||||
Capacity(ctx context.Context) (*fs.Capacity, error)
|
||||
// CheckIfCapacityExceeded checks if given user's capacity exceeded, and send notification email
|
||||
CheckIfCapacityExceeded(ctx context.Context) error
|
||||
// LocalDriver gets local driver for operating local files.
|
||||
LocalDriver(policy *ent.StoragePolicy) driver.Handler
|
||||
// CastStoragePolicyOnSlave check if given storage policy need to be casted to another.
|
||||
// It is used on slave node, when local policy need to cast to remote policy;
|
||||
// Remote policy with same node ID can be casted to local policy.
|
||||
CastStoragePolicyOnSlave(ctx context.Context, policy *ent.StoragePolicy) *ent.StoragePolicy
|
||||
// GetStorageDriver gets storage driver for given policy
|
||||
GetStorageDriver(ctx context.Context, policy *ent.StoragePolicy) (driver.Handler, error)
|
||||
}
|
||||
|
||||
ShareManagement interface {
|
||||
// CreateShare creates a share link for given path
|
||||
CreateOrUpdateShare(ctx context.Context, path *fs.URI, args *CreateShareArgs) (*ent.Share, error)
|
||||
}
|
||||
|
||||
Archiver interface {
|
||||
CreateArchive(ctx context.Context, uris []*fs.URI, writer io.Writer, opts ...fs.Option) (int, error)
|
||||
}
|
||||
|
||||
FileManager interface {
|
||||
fs.LockSystem
|
||||
FileOperation
|
||||
EntityManagement
|
||||
UploadManagement
|
||||
FsManagement
|
||||
ShareManagement
|
||||
Archiver
|
||||
|
||||
// Recycle reset current FileManager object and put back to resource pool
|
||||
Recycle()
|
||||
}
|
||||
|
||||
// GetEntityUrlArgs single args to get entity url
|
||||
GetEntityUrlArgs struct {
|
||||
URI *fs.URI
|
||||
PreferredEntityID string
|
||||
}
|
||||
|
||||
// CreateShareArgs args to create share link
|
||||
CreateShareArgs struct {
|
||||
ExistedShareID int
|
||||
IsPrivate bool
|
||||
RemainDownloads int
|
||||
Expire *time.Time
|
||||
}
|
||||
)
|
||||
|
||||
type manager struct {
|
||||
user *ent.User
|
||||
l logging.Logger
|
||||
fs fs.FileSystem
|
||||
settings setting.Provider
|
||||
kv cache.Driver
|
||||
config conf.ConfigProvider
|
||||
stateless bool
|
||||
auth auth.Auth
|
||||
hasher hashid.Encoder
|
||||
policyClient inventory.StoragePolicyClient
|
||||
|
||||
dep dependency.Dep
|
||||
}
|
||||
|
||||
func NewFileManager(dep dependency.Dep, u *ent.User) FileManager {
|
||||
config := dep.ConfigProvider()
|
||||
if config.System().Mode == conf.SlaveMode || u == nil {
|
||||
return newStatelessFileManager(dep)
|
||||
}
|
||||
return &manager{
|
||||
l: dep.Logger(),
|
||||
user: u,
|
||||
settings: dep.SettingProvider(),
|
||||
fs: dbfs.NewDatabaseFS(u, dep.FileClient(), dep.ShareClient(), dep.Logger(), dep.LockSystem(),
|
||||
dep.SettingProvider(), dep.StoragePolicyClient(), dep.HashIDEncoder(), dep.UserClient(), dep.KV(), dep.NavigatorStateKV()),
|
||||
kv: dep.KV(),
|
||||
config: config,
|
||||
auth: dep.GeneralAuth(),
|
||||
hasher: dep.HashIDEncoder(),
|
||||
policyClient: dep.StoragePolicyClient(),
|
||||
dep: dep,
|
||||
}
|
||||
}
|
||||
|
||||
func newStatelessFileManager(dep dependency.Dep) FileManager {
|
||||
return &manager{
|
||||
l: dep.Logger(),
|
||||
settings: dep.SettingProvider(),
|
||||
kv: dep.KV(),
|
||||
config: dep.ConfigProvider(),
|
||||
stateless: true,
|
||||
auth: dep.GeneralAuth(),
|
||||
dep: dep,
|
||||
hasher: dep.HashIDEncoder(),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *manager) Recycle() {
|
||||
if m.fs != nil {
|
||||
m.fs.Recycle()
|
||||
}
|
||||
}
|
||||
|
||||
func newOption() *fs.FsOption {
|
||||
return &fs.FsOption{}
|
||||
}
|
||||
193
pkg/filemanager/manager/mediameta.go
Normal file
193
pkg/filemanager/manager/mediameta.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/task"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type (
|
||||
MediaMetaTask struct {
|
||||
*queue.DBTask
|
||||
}
|
||||
|
||||
MediaMetaTaskState struct {
|
||||
Uri *fs.URI `json:"uri"`
|
||||
EntityID int `json:"entity_id"`
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
queue.RegisterResumableTaskFactory(queue.MediaMetaTaskType, NewMediaMetaTaskFromModel)
|
||||
}
|
||||
|
||||
// NewMediaMetaTask creates a new MediaMetaTask to
|
||||
func NewMediaMetaTask(ctx context.Context, uri *fs.URI, entityID int, creator *ent.User) (*MediaMetaTask, error) {
|
||||
state := &MediaMetaTaskState{
|
||||
Uri: uri,
|
||||
EntityID: entityID,
|
||||
}
|
||||
stateBytes, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal state: %w", err)
|
||||
}
|
||||
|
||||
return &MediaMetaTask{
|
||||
DBTask: &queue.DBTask{
|
||||
DirectOwner: creator,
|
||||
Task: &ent.Task{
|
||||
Type: queue.MediaMetaTaskType,
|
||||
CorrelationID: logging.CorrelationID(ctx),
|
||||
PrivateState: string(stateBytes),
|
||||
PublicState: &types.TaskPublicState{},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewMediaMetaTaskFromModel(task *ent.Task) queue.Task {
|
||||
return &MediaMetaTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: task,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MediaMetaTask) Do(ctx context.Context) (task.Status, error) {
|
||||
dep := dependency.FromContext(ctx)
|
||||
fm := NewFileManager(dep, inventory.UserFromContext(ctx)).(*manager)
|
||||
|
||||
// unmarshal state
|
||||
var state MediaMetaTaskState
|
||||
if err := json.Unmarshal([]byte(m.State()), &state); err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to unmarshal state: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
err := fm.ExtractAndSaveMediaMeta(ctx, state.Uri, state.EntityID)
|
||||
if err != nil {
|
||||
return task.StatusError, err
|
||||
}
|
||||
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
|
||||
func (m *manager) ExtractAndSaveMediaMeta(ctx context.Context, uri *fs.URI, entityID int) error {
|
||||
// 1. retrieve file info
|
||||
file, err := m.fs.Get(ctx, uri, dbfs.WithFileEntities())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get file: %w", err)
|
||||
}
|
||||
|
||||
versions := lo.Filter(file.Entities(), func(i fs.Entity, index int) bool {
|
||||
return i.Type() == types.EntityTypeVersion
|
||||
})
|
||||
targetVersion, versionIndex, found := lo.FindIndexOf(versions, func(i fs.Entity) bool {
|
||||
return i.ID() == entityID
|
||||
})
|
||||
if !found {
|
||||
return fmt.Errorf("failed to find version: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
if versionIndex != 0 {
|
||||
m.l.Debug("Skip media meta task for non-latest version.")
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
metas []driver.MediaMeta
|
||||
)
|
||||
// 2. try using native driver
|
||||
_, d, err := m.getEntityPolicyDriver(ctx, targetVersion, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get storage driver: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
driverCaps := d.Capabilities()
|
||||
if util.IsInExtensionList(driverCaps.MediaMetaSupportedExts, file.Name()) {
|
||||
m.l.Debug("Using native driver to generate media meta.")
|
||||
metas, err = d.MediaMeta(ctx, targetVersion.Source(), file.Ext())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get media meta using native driver: %w", err)
|
||||
}
|
||||
} else if driverCaps.MediaMetaProxy && util.IsInExtensionList(m.dep.MediaMetaExtractor(ctx).Exts(), file.Name()) {
|
||||
m.l.Debug("Using local extractor to generate media meta.")
|
||||
extractor := m.dep.MediaMetaExtractor(ctx)
|
||||
source, err := m.GetEntitySource(ctx, targetVersion.ID())
|
||||
defer source.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get entity source: %w", err)
|
||||
}
|
||||
|
||||
metas, err = extractor.Extract(ctx, file.Ext(), source)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to extract media meta using local extractor: %w", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
m.l.Debug("No available generator for media meta.")
|
||||
return nil
|
||||
}
|
||||
|
||||
m.l.Debug("%d media meta generated.", len(metas))
|
||||
m.l.Debug("Media meta: %v", metas)
|
||||
|
||||
// 3. save meta
|
||||
if len(metas) > 0 {
|
||||
if err := m.fs.PatchMetadata(ctx, []*fs.URI{uri}, lo.Map(metas, func(i driver.MediaMeta, index int) fs.MetadataPatch {
|
||||
return fs.MetadataPatch{
|
||||
Key: fmt.Sprintf("%s:%s", i.Type, i.Key),
|
||||
Value: i.Value,
|
||||
}
|
||||
})...); err != nil {
|
||||
return fmt.Errorf("failed to save media meta: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *manager) shouldGenerateMediaMeta(ctx context.Context, d driver.Handler, fileName string) bool {
|
||||
driverCaps := d.Capabilities()
|
||||
if util.IsInExtensionList(driverCaps.MediaMetaSupportedExts, fileName) {
|
||||
// Handler support it natively
|
||||
return true
|
||||
}
|
||||
|
||||
if driverCaps.MediaMetaProxy && util.IsInExtensionList(m.dep.MediaMetaExtractor(ctx).Exts(), fileName) {
|
||||
// Handler does not support. but proxy is enabled.
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *manager) mediaMetaForNewEntity(ctx context.Context, session *fs.UploadSession, d driver.Handler) {
|
||||
if session.Props.EntityType == nil || *session.Props.EntityType == types.EntityTypeVersion {
|
||||
if !m.shouldGenerateMediaMeta(ctx, d, session.Props.Uri.Name()) {
|
||||
return
|
||||
}
|
||||
|
||||
mediaMetaTask, err := NewMediaMetaTask(ctx, session.Props.Uri, session.EntityID, m.user)
|
||||
if err != nil {
|
||||
m.l.Warning("Failed to create media meta task: %s", err)
|
||||
return
|
||||
}
|
||||
if err := m.dep.MediaMetaQueue(ctx).QueueTask(ctx, mediaMetaTask); err != nil {
|
||||
m.l.Warning("Failed to queue media meta task: %s", err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
174
pkg/filemanager/manager/metadata.go
Normal file
174
pkg/filemanager/manager/metadata.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/application/constants"
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/go-playground/validator/v10"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type (
|
||||
metadataValidator func(ctx context.Context, m *manager, patch *fs.MetadataPatch) error
|
||||
)
|
||||
|
||||
const (
|
||||
wildcardMetadataKey = "*"
|
||||
customizeMetadataSuffix = "customize"
|
||||
tagMetadataSuffix = "tag"
|
||||
iconColorMetadataKey = customizeMetadataSuffix + ":icon_color"
|
||||
emojiIconMetadataKey = customizeMetadataSuffix + ":emoji"
|
||||
shareOwnerMetadataKey = dbfs.MetadataSysPrefix + "shared_owner"
|
||||
shareRedirectMetadataKey = dbfs.MetadataSysPrefix + "shared_redirect"
|
||||
)
|
||||
|
||||
var (
|
||||
validate = validator.New()
|
||||
|
||||
lastEmojiHash = ""
|
||||
emojiPresets = map[string]struct{}{}
|
||||
|
||||
// validateColor validates a color value
|
||||
validateColor = func(optional bool) metadataValidator {
|
||||
return func(ctx context.Context, m *manager, patch *fs.MetadataPatch) error {
|
||||
if patch.Remove {
|
||||
return nil
|
||||
}
|
||||
|
||||
tag := "omitempty,iscolor"
|
||||
if !optional {
|
||||
tag = "required,iscolor"
|
||||
}
|
||||
|
||||
res := validate.Var(patch.Value, tag)
|
||||
if res != nil {
|
||||
return fmt.Errorf("invalid color: %w", res)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
validators = map[string]map[string]metadataValidator{
|
||||
"sys": {
|
||||
wildcardMetadataKey: func(ctx context.Context, m *manager, patch *fs.MetadataPatch) error {
|
||||
if patch.Remove {
|
||||
return fmt.Errorf("cannot remove system metadata")
|
||||
}
|
||||
|
||||
dep := dependency.FromContext(ctx)
|
||||
// Validate share owner is valid hashid
|
||||
if patch.Key == shareOwnerMetadataKey {
|
||||
hasher := dep.HashIDEncoder()
|
||||
_, err := hasher.Decode(patch.Value, hashid.UserID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid share owner: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate share redirect uri is valid share uri
|
||||
if patch.Key == shareRedirectMetadataKey {
|
||||
uri, err := fs.NewUriFromString(patch.Value)
|
||||
if err != nil || uri.FileSystem() != constants.FileSystemShare {
|
||||
return fmt.Errorf("invalid redirect uri: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unsupported system metadata key: %s", patch.Key)
|
||||
},
|
||||
},
|
||||
"dav": {},
|
||||
customizeMetadataSuffix: {
|
||||
iconColorMetadataKey: validateColor(false),
|
||||
emojiIconMetadataKey: func(ctx context.Context, m *manager, patch *fs.MetadataPatch) error {
|
||||
if patch.Remove {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate if patched emoji is within preset list.
|
||||
emojis := m.settings.EmojiPresets(ctx)
|
||||
current := fmt.Sprintf("%x", (sha1.Sum([]byte(emojis))))
|
||||
if current != lastEmojiHash {
|
||||
presets := make(map[string][]string)
|
||||
if err := json.Unmarshal([]byte(emojis), &presets); err != nil {
|
||||
return fmt.Errorf("failed to read emoji setting: %w", err)
|
||||
}
|
||||
|
||||
emojiPresets = make(map[string]struct{})
|
||||
for _, v := range presets {
|
||||
for _, emoji := range v {
|
||||
emojiPresets[emoji] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := emojiPresets[patch.Value]; !ok {
|
||||
return fmt.Errorf("unsupported emoji")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
tagMetadataSuffix: {
|
||||
wildcardMetadataKey: func(ctx context.Context, m *manager, patch *fs.MetadataPatch) error {
|
||||
if err := validateColor(true)(ctx, m, patch); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if patch.Key == tagMetadataSuffix+":" {
|
||||
return fmt.Errorf("invalid metadata key")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func (m *manager) PatchMedata(ctx context.Context, path []*fs.URI, data ...fs.MetadataPatch) error {
|
||||
if err := m.validateMetadata(ctx, data...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.fs.PatchMetadata(ctx, path, data...)
|
||||
}
|
||||
|
||||
func (m *manager) validateMetadata(ctx context.Context, data ...fs.MetadataPatch) error {
|
||||
for _, patch := range data {
|
||||
category := strings.Split(patch.Key, ":")
|
||||
if len(category) < 2 {
|
||||
return serializer.NewError(serializer.CodeParamErr, "Invalid metadata key", nil)
|
||||
}
|
||||
|
||||
categoryValidators, ok := validators[category[0]]
|
||||
if !ok {
|
||||
return serializer.NewError(serializer.CodeParamErr, "Invalid metadata key",
|
||||
fmt.Errorf("unknown category: %s", category[0]))
|
||||
}
|
||||
|
||||
// Explicit validators
|
||||
if v, ok := categoryValidators[patch.Key]; ok {
|
||||
if err := v(ctx, m, &patch); err != nil {
|
||||
return serializer.NewError(serializer.CodeParamErr, "Invalid metadata patch", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Wildcard validators
|
||||
if v, ok := categoryValidators[wildcardMetadataKey]; ok {
|
||||
if err := v(ctx, m, &patch); err != nil {
|
||||
return serializer.NewError(serializer.CodeParamErr, "Invalid metadata patch", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
296
pkg/filemanager/manager/operation.go
Normal file
296
pkg/filemanager/manager/operation.go
Normal file
@@ -0,0 +1,296 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
const (
|
||||
EntityUrlCacheKeyPrefix = "entity_url_"
|
||||
DownloadSentinelCachePrefix = "download_sentinel_"
|
||||
)
|
||||
|
||||
type (
|
||||
ListArgs struct {
|
||||
Page int
|
||||
PageSize int
|
||||
PageToken string
|
||||
Order string
|
||||
OrderDirection string
|
||||
// StreamResponseCallback is used for streamed list operation, e.g. searching files.
|
||||
// Whenever a new item is found, this callback will be called with the current item and the parent item.
|
||||
StreamResponseCallback func(fs.File, []fs.File)
|
||||
}
|
||||
|
||||
EntityUrlCache struct {
|
||||
Url string
|
||||
ExpireAt *time.Time
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
gob.Register(EntityUrlCache{})
|
||||
}
|
||||
|
||||
func (m *manager) Get(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.File, error) {
|
||||
return m.fs.Get(ctx, path, opts...)
|
||||
}
|
||||
|
||||
func (m *manager) List(ctx context.Context, path *fs.URI, args *ListArgs) (fs.File, *fs.ListFileResult, error) {
|
||||
dbfsSetting := m.settings.DBFS(ctx)
|
||||
opts := []fs.Option{
|
||||
fs.WithPageSize(args.PageSize),
|
||||
fs.WithOrderBy(args.Order),
|
||||
fs.WithOrderDirection(args.OrderDirection),
|
||||
dbfs.WithFilePublicMetadata(),
|
||||
dbfs.WithContextHint(),
|
||||
dbfs.WithFileShareIfOwned(),
|
||||
}
|
||||
|
||||
searchParams := path.SearchParameters()
|
||||
if searchParams != nil {
|
||||
if dbfsSetting.UseSSEForSearch {
|
||||
opts = append(opts, dbfs.WithStreamListResponseCallback(args.StreamResponseCallback))
|
||||
}
|
||||
|
||||
if searchParams.Category != "" {
|
||||
// Overwrite search query with predefined category
|
||||
category := fs.SearchCategoryFromString(searchParams.Category)
|
||||
if category == setting.CategoryUnknown {
|
||||
return nil, nil, fmt.Errorf("unknown category: %s", searchParams.Category)
|
||||
}
|
||||
|
||||
path = path.SetQuery(m.settings.SearchCategoryQuery(ctx, category))
|
||||
searchParams = path.SearchParameters()
|
||||
}
|
||||
}
|
||||
|
||||
if dbfsSetting.UseCursorPagination || searchParams != nil {
|
||||
opts = append(opts, dbfs.WithCursorPagination(args.PageToken))
|
||||
} else {
|
||||
opts = append(opts, fs.WithPage(args.Page))
|
||||
}
|
||||
|
||||
return m.fs.List(ctx, path, opts...)
|
||||
}
|
||||
|
||||
func (m *manager) SharedAddressTranslation(ctx context.Context, path *fs.URI, opts ...fs.Option) (fs.File, *fs.URI, error) {
|
||||
o := newOption()
|
||||
for _, opt := range opts {
|
||||
opt.Apply(o)
|
||||
}
|
||||
|
||||
return m.fs.SharedAddressTranslation(ctx, path)
|
||||
}
|
||||
|
||||
func (m *manager) Create(ctx context.Context, path *fs.URI, fileType types.FileType, opts ...fs.Option) (fs.File, error) {
|
||||
o := newOption()
|
||||
for _, opt := range opts {
|
||||
opt.Apply(o)
|
||||
}
|
||||
|
||||
if m.stateless {
|
||||
return nil, o.Node.CreateFile(ctx, &fs.StatelessCreateFileService{
|
||||
Path: path.String(),
|
||||
Type: fileType,
|
||||
UserID: o.StatelessUserID,
|
||||
})
|
||||
}
|
||||
|
||||
isSymbolic := false
|
||||
if o.Metadata != nil {
|
||||
if err := m.validateMetadata(ctx, lo.MapToSlice(o.Metadata, func(key string, value string) fs.MetadataPatch {
|
||||
if key == shareRedirectMetadataKey {
|
||||
isSymbolic = true
|
||||
}
|
||||
|
||||
return fs.MetadataPatch{
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
})...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if isSymbolic {
|
||||
opts = append(opts, dbfs.WithSymbolicLink())
|
||||
}
|
||||
|
||||
return m.fs.Create(ctx, path, fileType, opts...)
|
||||
}
|
||||
|
||||
func (m *manager) Rename(ctx context.Context, path *fs.URI, newName string) (fs.File, error) {
|
||||
return m.fs.Rename(ctx, path, newName)
|
||||
}
|
||||
|
||||
func (m *manager) MoveOrCopy(ctx context.Context, src []*fs.URI, dst *fs.URI, isCopy bool) error {
|
||||
return m.fs.MoveOrCopy(ctx, src, dst, isCopy)
|
||||
}
|
||||
|
||||
func (m *manager) SoftDelete(ctx context.Context, path ...*fs.URI) error {
|
||||
return m.fs.SoftDelete(ctx, path...)
|
||||
}
|
||||
|
||||
func (m *manager) Delete(ctx context.Context, path []*fs.URI, opts ...fs.Option) error {
|
||||
o := newOption()
|
||||
for _, opt := range opts {
|
||||
opt.Apply(o)
|
||||
}
|
||||
|
||||
if !o.SkipSoftDelete && !o.SysSkipSoftDelete {
|
||||
return m.SoftDelete(ctx, path...)
|
||||
}
|
||||
|
||||
staleEntities, err := m.fs.Delete(ctx, path, fs.WithUnlinkOnly(o.UnlinkOnly), fs.WithSysSkipSoftDelete(o.SysSkipSoftDelete))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.l.Debug("New stale entities: %v", staleEntities)
|
||||
|
||||
// Delete stale entities
|
||||
if len(staleEntities) > 0 {
|
||||
t, err := newExplicitEntityRecycleTask(ctx, lo.Map(staleEntities, func(entity fs.Entity, index int) int {
|
||||
return entity.ID()
|
||||
}))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create explicit entity recycle task: %w", err)
|
||||
}
|
||||
|
||||
if err := m.dep.EntityRecycleQueue(ctx).QueueTask(ctx, t); err != nil {
|
||||
return fmt.Errorf("failed to queue explicit entity recycle task: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *manager) Walk(ctx context.Context, path *fs.URI, depth int, f fs.WalkFunc, opts ...fs.Option) error {
|
||||
return m.fs.Walk(ctx, path, depth, f, opts...)
|
||||
}
|
||||
|
||||
func (m *manager) Capacity(ctx context.Context) (*fs.Capacity, error) {
|
||||
res, err := m.fs.Capacity(ctx, m.user)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (m *manager) CheckIfCapacityExceeded(ctx context.Context) error {
|
||||
ctx = context.WithValue(ctx, inventory.LoadUserGroup{}, true)
|
||||
capacity, err := m.Capacity(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get user capacity: %w", err)
|
||||
}
|
||||
|
||||
if capacity.Used <= capacity.Total {
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *manager) ConfirmLock(ctx context.Context, ancestor fs.File, uri *fs.URI, token ...string) (func(), fs.LockSession, error) {
|
||||
return l.fs.ConfirmLock(ctx, ancestor, uri, token...)
|
||||
}
|
||||
|
||||
func (l *manager) Lock(ctx context.Context, d time.Duration, requester *ent.User, zeroDepth bool,
|
||||
application lock.Application, uri *fs.URI, token string) (fs.LockSession, error) {
|
||||
return l.fs.Lock(ctx, d, requester, zeroDepth, application, uri, token)
|
||||
}
|
||||
|
||||
func (l *manager) Unlock(ctx context.Context, tokens ...string) error {
|
||||
return l.fs.Unlock(ctx, tokens...)
|
||||
}
|
||||
|
||||
func (l *manager) Refresh(ctx context.Context, d time.Duration, token string) (lock.LockDetails, error) {
|
||||
return l.fs.Refresh(ctx, d, token)
|
||||
}
|
||||
|
||||
func (l *manager) Restore(ctx context.Context, path ...*fs.URI) error {
|
||||
return l.fs.Restore(ctx, path...)
|
||||
}
|
||||
|
||||
func (l *manager) CreateOrUpdateShare(ctx context.Context, path *fs.URI, args *CreateShareArgs) (*ent.Share, error) {
|
||||
file, err := l.fs.Get(ctx, path, dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityShare))
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeNotFound, "src file not found", err)
|
||||
}
|
||||
|
||||
// Only file owner can share file
|
||||
if file.OwnerID() != l.user.ID {
|
||||
return nil, serializer.NewError(serializer.CodeNoPermissionErr, "permission denied", nil)
|
||||
}
|
||||
|
||||
if file.IsSymbolic() {
|
||||
return nil, serializer.NewError(serializer.CodeNoPermissionErr, "cannot share symbolic file", nil)
|
||||
}
|
||||
|
||||
var existed *ent.Share
|
||||
shareClient := l.dep.ShareClient()
|
||||
if args.ExistedShareID != 0 {
|
||||
loadShareCtx := context.WithValue(ctx, inventory.LoadShareFile{}, true)
|
||||
existed, err = shareClient.GetByID(loadShareCtx, args.ExistedShareID)
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeNotFound, "failed to get existed share", err)
|
||||
}
|
||||
|
||||
if existed.Edges.File.ID != file.ID() {
|
||||
return nil, serializer.NewError(serializer.CodeNotFound, "share link not found", nil)
|
||||
}
|
||||
}
|
||||
|
||||
password := ""
|
||||
if args.IsPrivate {
|
||||
password = util.RandString(8, util.RandomLowerCases)
|
||||
}
|
||||
|
||||
share, err := shareClient.Upsert(ctx, &inventory.CreateShareParams{
|
||||
OwnerID: file.OwnerID(),
|
||||
FileID: file.ID(),
|
||||
Password: password,
|
||||
Expires: args.Expire,
|
||||
RemainDownloads: args.RemainDownloads,
|
||||
Existed: existed,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, serializer.NewError(serializer.CodeDBError, "failed to create share", err)
|
||||
}
|
||||
|
||||
return share, nil
|
||||
}
|
||||
|
||||
func getEntityDisplayName(f fs.File, e fs.Entity) string {
|
||||
switch e.Type() {
|
||||
case types.EntityTypeThumbnail:
|
||||
return fmt.Sprintf("%s_thumbnail", f.DisplayName())
|
||||
case types.EntityTypeLivePhoto:
|
||||
return fmt.Sprintf("%s_live_photo.mov", f.DisplayName())
|
||||
default:
|
||||
return f.Name()
|
||||
}
|
||||
}
|
||||
|
||||
func expireTimeToTTL(expireAt *time.Time) int {
|
||||
if expireAt == nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
return int(time.Until(*expireAt).Seconds())
|
||||
}
|
||||
374
pkg/filemanager/manager/recycle.go
Normal file
374
pkg/filemanager/manager/recycle.go
Normal file
@@ -0,0 +1,374 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/task"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/crontab"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type (
|
||||
ExplicitEntityRecycleTask struct {
|
||||
*queue.DBTask
|
||||
}
|
||||
|
||||
ExplicitEntityRecycleTaskState struct {
|
||||
EntityIDs []int `json:"entity_ids,omitempty"`
|
||||
Errors [][]RecycleError `json:"errors,omitempty"`
|
||||
}
|
||||
|
||||
RecycleError struct {
|
||||
ID string `json:"id"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
queue.RegisterResumableTaskFactory(queue.ExplicitEntityRecycleTaskType, NewExplicitEntityRecycleTaskFromModel)
|
||||
queue.RegisterResumableTaskFactory(queue.EntityRecycleRoutineTaskType, NewEntityRecycleRoutineTaskFromModel)
|
||||
crontab.Register(setting.CronTypeEntityCollect, func(ctx context.Context) {
|
||||
dep := dependency.FromContext(ctx)
|
||||
l := dep.Logger()
|
||||
t, err := NewEntityRecycleRoutineTask(ctx)
|
||||
if err != nil {
|
||||
l.Error("Failed to create entity recycle routine task: %s", err)
|
||||
}
|
||||
|
||||
if err := dep.EntityRecycleQueue(ctx).QueueTask(ctx, t); err != nil {
|
||||
l.Error("Failed to queue entity recycle routine task: %s", err)
|
||||
}
|
||||
})
|
||||
crontab.Register(setting.CronTypeTrashBinCollect, CronCollectTrashBin)
|
||||
}
|
||||
|
||||
func NewExplicitEntityRecycleTaskFromModel(task *ent.Task) queue.Task {
|
||||
return &ExplicitEntityRecycleTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: task,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newExplicitEntityRecycleTask(ctx context.Context, entities []int) (*ExplicitEntityRecycleTask, error) {
|
||||
state := &ExplicitEntityRecycleTaskState{
|
||||
EntityIDs: entities,
|
||||
Errors: make([][]RecycleError, 0),
|
||||
}
|
||||
stateBytes, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal state: %w", err)
|
||||
}
|
||||
|
||||
t := &ExplicitEntityRecycleTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: &ent.Task{
|
||||
Type: queue.ExplicitEntityRecycleTaskType,
|
||||
CorrelationID: logging.CorrelationID(ctx),
|
||||
PrivateState: string(stateBytes),
|
||||
PublicState: &types.TaskPublicState{
|
||||
ResumeTime: time.Now().Unix() - 1,
|
||||
},
|
||||
},
|
||||
DirectOwner: inventory.UserFromContext(ctx),
|
||||
},
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (m *ExplicitEntityRecycleTask) Do(ctx context.Context) (task.Status, error) {
|
||||
dep := dependency.FromContext(ctx)
|
||||
fm := NewFileManager(dep, inventory.UserFromContext(ctx)).(*manager)
|
||||
|
||||
// unmarshal state
|
||||
state := &ExplicitEntityRecycleTaskState{}
|
||||
if err := json.Unmarshal([]byte(m.State()), state); err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to unmarshal state: %w", err)
|
||||
}
|
||||
|
||||
// recycle entities
|
||||
err := fm.RecycleEntities(ctx, false, state.EntityIDs...)
|
||||
if err != nil {
|
||||
appendAe(&state.Errors, err)
|
||||
privateState, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to marshal state: %w", err)
|
||||
}
|
||||
m.Task.PrivateState = string(privateState)
|
||||
return task.StatusError, err
|
||||
}
|
||||
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
|
||||
type (
|
||||
EntityRecycleRoutineTask struct {
|
||||
*queue.DBTask
|
||||
}
|
||||
|
||||
EntityRecycleRoutineTaskState struct {
|
||||
Errors [][]RecycleError `json:"errors,omitempty"`
|
||||
}
|
||||
)
|
||||
|
||||
func NewEntityRecycleRoutineTaskFromModel(task *ent.Task) queue.Task {
|
||||
return &EntityRecycleRoutineTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: task,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewEntityRecycleRoutineTask(ctx context.Context) (queue.Task, error) {
|
||||
state := &EntityRecycleRoutineTaskState{
|
||||
Errors: make([][]RecycleError, 0),
|
||||
}
|
||||
stateBytes, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal state: %w", err)
|
||||
}
|
||||
|
||||
t := &EntityRecycleRoutineTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: &ent.Task{
|
||||
Type: queue.EntityRecycleRoutineTaskType,
|
||||
CorrelationID: logging.CorrelationID(ctx),
|
||||
PrivateState: string(stateBytes),
|
||||
PublicState: &types.TaskPublicState{
|
||||
ResumeTime: time.Now().Unix() - 1,
|
||||
},
|
||||
},
|
||||
DirectOwner: inventory.UserFromContext(ctx),
|
||||
},
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (m *EntityRecycleRoutineTask) Do(ctx context.Context) (task.Status, error) {
|
||||
dep := dependency.FromContext(ctx)
|
||||
fm := NewFileManager(dep, inventory.UserFromContext(ctx)).(*manager)
|
||||
|
||||
// unmarshal state
|
||||
state := &EntityRecycleRoutineTaskState{}
|
||||
if err := json.Unmarshal([]byte(m.State()), state); err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to unmarshal state: %w", err)
|
||||
}
|
||||
|
||||
// recycle entities
|
||||
err := fm.RecycleEntities(ctx, false)
|
||||
if err != nil {
|
||||
appendAe(&state.Errors, err)
|
||||
|
||||
privateState, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to marshal state: %w", err)
|
||||
}
|
||||
m.Task.PrivateState = string(privateState)
|
||||
return task.StatusError, err
|
||||
}
|
||||
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
|
||||
// RecycleEntities delete given entities. If the ID list is empty, it will walk through
|
||||
// all stale entities in DB.
|
||||
func (m *manager) RecycleEntities(ctx context.Context, force bool, entityIDs ...int) error {
|
||||
ae := serializer.NewAggregateError()
|
||||
entities, err := m.fs.StaleEntities(ctx, entityIDs...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get entities: %w", err)
|
||||
}
|
||||
|
||||
// Group entities by policy ID
|
||||
entityGroup := lo.GroupBy(entities, func(entity fs.Entity) int {
|
||||
return entity.PolicyID()
|
||||
})
|
||||
|
||||
// Delete entity in each group in batch
|
||||
for _, entities := range entityGroup {
|
||||
entityChunk := lo.Chunk(entities, 100)
|
||||
m.l.Info("Recycling %d entities in %d batches", len(entities), len(entityChunk))
|
||||
|
||||
for batch, chunk := range entityChunk {
|
||||
m.l.Info("Start to recycle batch #%d, %d entities", batch, len(chunk))
|
||||
mapSrcToId := make(map[string]int, len(chunk))
|
||||
_, d, err := m.getEntityPolicyDriver(ctx, chunk[0], nil)
|
||||
if err != nil {
|
||||
for _, entity := range chunk {
|
||||
ae.Add(strconv.Itoa(entity.ID()), err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
for _, entity := range chunk {
|
||||
mapSrcToId[entity.Source()] = entity.ID()
|
||||
}
|
||||
|
||||
res, err := d.Delete(ctx, lo.Map(chunk, func(entity fs.Entity, index int) string {
|
||||
return entity.Source()
|
||||
})...)
|
||||
if err != nil {
|
||||
for _, src := range res {
|
||||
ae.Add(strconv.Itoa(mapSrcToId[src]), err)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete upload session if it's still valid
|
||||
for _, entity := range chunk {
|
||||
sid := entity.UploadSessionID()
|
||||
if sid == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if session, ok := m.kv.Get(UploadSessionCachePrefix + sid.String()); ok {
|
||||
session := session.(fs.UploadSession)
|
||||
if err := d.CancelToken(ctx, &session); err != nil {
|
||||
m.l.Warning("Failed to cancel upload session for %q: %s, this is expected if it's remote policy.", session.Props.Uri.String(), err)
|
||||
}
|
||||
_ = m.kv.Delete(UploadSessionCachePrefix, sid.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Filtering out entities that are successfully deleted
|
||||
rawAe := ae.Raw()
|
||||
successEntities := lo.FilterMap(chunk, func(entity fs.Entity, index int) (int, bool) {
|
||||
entityIdStr := fmt.Sprintf("%d", entity.ID())
|
||||
_, ok := rawAe[entityIdStr]
|
||||
if !ok {
|
||||
// No error, deleted
|
||||
return entity.ID(), true
|
||||
}
|
||||
|
||||
if force {
|
||||
ae.Remove(entityIdStr)
|
||||
}
|
||||
return entity.ID(), force
|
||||
})
|
||||
|
||||
// Remove entities from DB
|
||||
fc, tx, ctx, err := inventory.WithTx(ctx, m.dep.FileClient())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start transaction: %w", err)
|
||||
}
|
||||
storageReduced, err := fc.RemoveEntitiesByID(ctx, successEntities...)
|
||||
if err != nil {
|
||||
_ = inventory.Rollback(tx)
|
||||
return fmt.Errorf("failed to remove entities from DB: %w", err)
|
||||
}
|
||||
|
||||
tx.AppendStorageDiff(storageReduced)
|
||||
if err := inventory.CommitWithStorageDiff(ctx, tx, m.l, m.dep.UserClient()); err != nil {
|
||||
return fmt.Errorf("failed to commit delete change: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return ae.Aggregate()
|
||||
}
|
||||
|
||||
const (
|
||||
MinimumTrashCollectBatch = 1000
|
||||
)
|
||||
|
||||
// CronCollectTrashBin walks through all files in trash bin and delete them if they are expired.
|
||||
func CronCollectTrashBin(ctx context.Context) {
|
||||
dep := dependency.FromContext(ctx)
|
||||
l := dep.Logger()
|
||||
fm := NewFileManager(dep, inventory.UserFromContext(ctx)).(*manager)
|
||||
pageSize := dep.SettingProvider().DBFS(ctx).MaxPageSize
|
||||
batch := 0
|
||||
expiredFiles := make([]fs.File, 0)
|
||||
for {
|
||||
res, err := fm.fs.AllFilesInTrashBin(ctx, fs.WithPageSize(pageSize))
|
||||
if err != nil {
|
||||
l.Error("Failed to get files in trash bin: %s", err)
|
||||
}
|
||||
|
||||
expired := lo.Filter(res.Files, func(file fs.File, index int) bool {
|
||||
if expire, ok := file.Metadata()[dbfs.MetadataExpectedCollectTime]; ok {
|
||||
expireUnix, err := strconv.ParseInt(expire, 10, 64)
|
||||
if err != nil {
|
||||
l.Warning("Failed to parse expected collect time %q: %s, will treat as expired", expire, err)
|
||||
}
|
||||
|
||||
if expireUnix < time.Now().Unix() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
l.Info("Found %d files in trash bin pending collect, in batch #%d", len(res.Files), batch)
|
||||
|
||||
expiredFiles = append(expiredFiles, expired...)
|
||||
if len(expiredFiles) >= MinimumTrashCollectBatch {
|
||||
collectTrashBin(ctx, expiredFiles, dep, l)
|
||||
expiredFiles = expiredFiles[:0]
|
||||
}
|
||||
|
||||
if res.Pagination.NextPageToken == "" {
|
||||
if len(expiredFiles) > 0 {
|
||||
collectTrashBin(ctx, expiredFiles, dep, l)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
batch++
|
||||
}
|
||||
}
|
||||
|
||||
func collectTrashBin(ctx context.Context, files []fs.File, dep dependency.Dep, l logging.Logger) {
|
||||
l.Info("Start to collect %d files in trash bin", len(files))
|
||||
uc := dep.UserClient()
|
||||
|
||||
// Group files by Owners
|
||||
fileGroup := lo.GroupBy(files, func(file fs.File) int {
|
||||
return file.OwnerID()
|
||||
})
|
||||
|
||||
for uid, expiredFiles := range fileGroup {
|
||||
ctx = context.WithValue(ctx, inventory.LoadUserGroup{}, true)
|
||||
user, err := uc.GetByID(ctx, uid)
|
||||
if err != nil {
|
||||
l.Error("Failed to get user %d: %s", uid, err)
|
||||
continue
|
||||
}
|
||||
|
||||
ctx = context.WithValue(ctx, inventory.UserCtx{}, user)
|
||||
fm := NewFileManager(dep, user).(*manager)
|
||||
if err := fm.Delete(ctx, lo.Map(expiredFiles, func(file fs.File, index int) *fs.URI {
|
||||
return file.Uri(false)
|
||||
}), fs.WithSkipSoftDelete(true)); err != nil {
|
||||
l.Error("Failed to delete files for user %d: %s", uid, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func appendAe(errs *[][]RecycleError, err error) {
|
||||
var ae *serializer.AggregateError
|
||||
*errs = append(*errs, make([]RecycleError, 0))
|
||||
if errors.As(err, &ae) {
|
||||
(*errs)[len(*errs)-1] = lo.MapToSlice(ae.Raw(), func(key string, value error) RecycleError {
|
||||
return RecycleError{
|
||||
ID: key,
|
||||
Error: value.Error(),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
294
pkg/filemanager/manager/thumbnail.go
Normal file
294
pkg/filemanager/manager/thumbnail.go
Normal file
@@ -0,0 +1,294 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/task"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/local"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
// Thumbnail returns the thumbnail entity of the file.
|
||||
func (m *manager) Thumbnail(ctx context.Context, uri *fs.URI) (entitysource.EntitySource, error) {
|
||||
// retrieve file info
|
||||
file, err := m.fs.Get(ctx, uri, dbfs.WithFileEntities(), dbfs.WithFilePublicMetadata())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get file: %w", err)
|
||||
}
|
||||
|
||||
// 0. Check if thumb is disabled in this file.
|
||||
if _, ok := file.Metadata()[dbfs.ThumbDisabledKey]; ok || file.Type() != types.FileTypeFile {
|
||||
return nil, fs.ErrEntityNotExist
|
||||
}
|
||||
|
||||
// 1. If thumbnail entity exist, use it.
|
||||
entities := file.Entities()
|
||||
thumbEntity, found := lo.Find(entities, func(e fs.Entity) bool {
|
||||
return e.Type() == types.EntityTypeThumbnail
|
||||
})
|
||||
if found {
|
||||
thumbSource, err := m.GetEntitySource(ctx, 0, fs.WithEntity(thumbEntity))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get entity source: %w", err)
|
||||
}
|
||||
|
||||
thumbSource.Apply(entitysource.WithDisplayName(file.DisplayName() + ".jpg"))
|
||||
return thumbSource, nil
|
||||
}
|
||||
|
||||
latest := file.PrimaryEntity()
|
||||
// If primary entity not exist, or it's empty
|
||||
if latest == nil || latest.ID() == 0 {
|
||||
return nil, fmt.Errorf("failed to get latest version")
|
||||
}
|
||||
|
||||
// 2. Thumb entity not exist, try native policy generator
|
||||
_, handler, err := m.getEntityPolicyDriver(ctx, latest, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get entity policy driver: %w", err)
|
||||
}
|
||||
capabilities := handler.Capabilities()
|
||||
// Check if file extension and size is supported by native policy generator.
|
||||
if capabilities.ThumbSupportAllExts || util.IsInExtensionList(capabilities.ThumbSupportedExts, file.DisplayName()) &&
|
||||
(capabilities.ThumbMaxSize == 0 || latest.Size() <= capabilities.ThumbMaxSize) {
|
||||
thumbSource, err := m.GetEntitySource(ctx, 0, fs.WithEntity(latest), fs.WithUseThumb(true))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get latest entity source: %w", err)
|
||||
}
|
||||
|
||||
thumbSource.Apply(entitysource.WithDisplayName(file.DisplayName()))
|
||||
return thumbSource, nil
|
||||
} else if capabilities.ThumbProxy {
|
||||
if err := m.fs.CheckCapability(ctx, uri,
|
||||
dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityGenerateThumb)); err != nil {
|
||||
// Current FS does not support generate new thumb.
|
||||
return nil, fs.ErrEntityNotExist
|
||||
}
|
||||
|
||||
thumbEntity, err := m.SubmitAndAwaitThumbnailTask(ctx, uri, file.Ext(), latest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute thumb task: %w", err)
|
||||
}
|
||||
|
||||
thumbSource, err := m.GetEntitySource(ctx, 0, fs.WithEntity(thumbEntity))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get entity source: %w", err)
|
||||
}
|
||||
|
||||
return thumbSource, nil
|
||||
} else {
|
||||
// 4. If proxy generator not support, mark thumb as not available.
|
||||
_ = disableThumb(ctx, m, uri)
|
||||
}
|
||||
|
||||
return nil, fs.ErrEntityNotExist
|
||||
}
|
||||
|
||||
func (m *manager) SubmitAndAwaitThumbnailTask(ctx context.Context, uri *fs.URI, ext string, entity fs.Entity) (fs.Entity, error) {
|
||||
es, err := m.GetEntitySource(ctx, 0, fs.WithEntity(entity))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get entity source: %w", err)
|
||||
}
|
||||
|
||||
defer es.Close()
|
||||
t := newGenerateThumbTask(ctx, m, uri, ext, es)
|
||||
if err := m.dep.ThumbQueue(ctx).QueueTask(ctx, t); err != nil {
|
||||
return nil, fmt.Errorf("failed to queue task: %w", err)
|
||||
}
|
||||
|
||||
// Wait for task to finish
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case res := <-t.sig:
|
||||
if res.err != nil {
|
||||
return nil, fmt.Errorf("failed to generate thumb: %w", res.err)
|
||||
}
|
||||
|
||||
return res.thumbEntity, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (m *manager) generateThumb(ctx context.Context, uri *fs.URI, ext string, es entitysource.EntitySource) (fs.Entity, error) {
|
||||
// Generate thumb
|
||||
pipeline := m.dep.ThumbPipeline()
|
||||
res, err := pipeline.Generate(ctx, es, ext, nil)
|
||||
if err != nil {
|
||||
if res != nil && res.Path != "" {
|
||||
_ = os.Remove(res.Path)
|
||||
}
|
||||
|
||||
if !errors.Is(err, context.Canceled) && !m.stateless {
|
||||
if err := disableThumb(ctx, m, uri); err != nil {
|
||||
m.l.Warning("Failed to disable thumb: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to generate thumb: %w", err)
|
||||
}
|
||||
|
||||
defer os.Remove(res.Path)
|
||||
|
||||
// Upload thumb entity
|
||||
thumbFile, err := os.Open(res.Path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open temp thumb %q: %w", res.Path, err)
|
||||
}
|
||||
|
||||
defer thumbFile.Close()
|
||||
fileInfo, err := thumbFile.Stat()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to stat temp thumb %q: %w", res.Path, err)
|
||||
}
|
||||
|
||||
var (
|
||||
thumbEntity fs.Entity
|
||||
)
|
||||
if m.stateless {
|
||||
_, d, err := m.getEntityPolicyDriver(ctx, es.Entity(), nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get storage driver: %w", err)
|
||||
}
|
||||
|
||||
savePath := es.Entity().Source() + m.settings.ThumbSlaveSidecarSuffix(ctx)
|
||||
if err := d.Put(ctx, &fs.UploadRequest{
|
||||
File: thumbFile,
|
||||
Seeker: thumbFile,
|
||||
Props: &fs.UploadProps{SavePath: savePath},
|
||||
}); err != nil {
|
||||
return nil, fmt.Errorf("failed to save thumb sidecar: %w", err)
|
||||
}
|
||||
|
||||
thumbEntity, err = local.NewLocalFileEntity(types.EntityTypeThumbnail, savePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create local thumb entity: %w", err)
|
||||
}
|
||||
} else {
|
||||
entityType := types.EntityTypeThumbnail
|
||||
req := &fs.UploadRequest{
|
||||
Props: &fs.UploadProps{
|
||||
Uri: uri,
|
||||
Size: fileInfo.Size(),
|
||||
SavePath: fmt.Sprintf(
|
||||
"%s.%s%s",
|
||||
es.Entity().Source(),
|
||||
util.RandStringRunes(16),
|
||||
m.settings.ThumbEntitySuffix(ctx),
|
||||
),
|
||||
MimeType: m.dep.MimeDetector(ctx).TypeByName("thumb.jpg"),
|
||||
EntityType: &entityType,
|
||||
},
|
||||
File: thumbFile,
|
||||
Seeker: thumbFile,
|
||||
}
|
||||
|
||||
// Generating thumb can be triggered by users with read-only permission. We can bypass update permission check.
|
||||
ctx = dbfs.WithBypassOwnerCheck(ctx)
|
||||
|
||||
file, err := m.Update(ctx, req, fs.WithEntityType(types.EntityTypeThumbnail))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upload thumb entity: %w", err)
|
||||
}
|
||||
|
||||
entities := file.Entities()
|
||||
found := false
|
||||
thumbEntity, found = lo.Find(entities, func(e fs.Entity) bool {
|
||||
return e.Type() == types.EntityTypeThumbnail
|
||||
})
|
||||
if !found {
|
||||
return nil, fmt.Errorf("failed to find thumb entity")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if m.settings.ThumbGCAfterGen(ctx) {
|
||||
m.l.Debug("GC after thumb generation")
|
||||
runtime.GC()
|
||||
}
|
||||
|
||||
return thumbEntity, nil
|
||||
}
|
||||
|
||||
type (
|
||||
GenerateThumbTask struct {
|
||||
*queue.InMemoryTask
|
||||
es entitysource.EntitySource
|
||||
ext string
|
||||
m *manager
|
||||
uri *fs.URI
|
||||
sig chan *generateRes
|
||||
}
|
||||
generateRes struct {
|
||||
thumbEntity fs.Entity
|
||||
err error
|
||||
}
|
||||
)
|
||||
|
||||
func newGenerateThumbTask(ctx context.Context, m *manager, uri *fs.URI, ext string, es entitysource.EntitySource) *GenerateThumbTask {
|
||||
t := &GenerateThumbTask{
|
||||
InMemoryTask: &queue.InMemoryTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: &ent.Task{
|
||||
CorrelationID: logging.CorrelationID(ctx),
|
||||
PublicState: &types.TaskPublicState{},
|
||||
},
|
||||
},
|
||||
},
|
||||
es: es,
|
||||
ext: ext,
|
||||
m: m,
|
||||
uri: uri,
|
||||
sig: make(chan *generateRes, 2),
|
||||
}
|
||||
|
||||
t.InMemoryTask.DBTask.Task.SetUser(m.user)
|
||||
return t
|
||||
}
|
||||
|
||||
func (m *GenerateThumbTask) Do(ctx context.Context) (task.Status, error) {
|
||||
var (
|
||||
res fs.Entity
|
||||
err error
|
||||
)
|
||||
defer func() { m.sig <- &generateRes{res, err} }()
|
||||
|
||||
// Make sure user does not cancel request before we start generating thumb.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
return task.StatusError, err
|
||||
default:
|
||||
}
|
||||
|
||||
res, err = m.m.generateThumb(ctx, m.uri, m.ext, m.es)
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
|
||||
func (m *GenerateThumbTask) OnError(err error, d time.Duration) {
|
||||
m.InMemoryTask.OnError(err, d)
|
||||
m.sig <- &generateRes{nil, err}
|
||||
}
|
||||
|
||||
func disableThumb(ctx context.Context, m *manager, uri *fs.URI) error {
|
||||
return m.fs.PatchMetadata(
|
||||
dbfs.WithBypassOwnerCheck(ctx),
|
||||
[]*fs.URI{uri}, fs.MetadataPatch{
|
||||
Key: dbfs.ThumbDisabledKey,
|
||||
Value: "",
|
||||
Private: false,
|
||||
})
|
||||
}
|
||||
500
pkg/filemanager/manager/upload.go
Normal file
500
pkg/filemanager/manager/upload.go
Normal file
@@ -0,0 +1,500 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/task"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/gofrs/uuid"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type (
|
||||
UploadManagement interface {
|
||||
// CreateUploadSession creates a upload session for given upload request
|
||||
CreateUploadSession(ctx context.Context, req *fs.UploadRequest, opts ...fs.Option) (*fs.UploadCredential, error)
|
||||
// ConfirmUploadSession confirms whether upload session is valid for upload.
|
||||
ConfirmUploadSession(ctx context.Context, session *fs.UploadSession, chunkIndex int) (fs.File, error)
|
||||
// Upload uploads file data to storage
|
||||
Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy) error
|
||||
// CompleteUpload completes upload session and returns file object
|
||||
CompleteUpload(ctx context.Context, session *fs.UploadSession) (fs.File, error)
|
||||
// CancelUploadSession cancels upload session
|
||||
CancelUploadSession(ctx context.Context, path *fs.URI, sessionID string) error
|
||||
// OnUploadFailed should be called when an unmanaged upload failed before complete.
|
||||
OnUploadFailed(ctx context.Context, session *fs.UploadSession)
|
||||
// Similar to CompleteUpload, but does not create actual uplaod session in storage.
|
||||
PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ...fs.Option) (*fs.UploadSession, error)
|
||||
}
|
||||
)
|
||||
|
||||
func (m *manager) CreateUploadSession(ctx context.Context, req *fs.UploadRequest, opts ...fs.Option) (*fs.UploadCredential, error) {
|
||||
o := newOption()
|
||||
for _, opt := range opts {
|
||||
opt.Apply(o)
|
||||
}
|
||||
|
||||
// Validate metadata
|
||||
if req.Props.Metadata != nil {
|
||||
if err := m.validateMetadata(ctx, lo.MapToSlice(req.Props.Metadata, func(key string, value string) fs.MetadataPatch {
|
||||
return fs.MetadataPatch{
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
})...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
uploadSession := o.UploadSession
|
||||
var (
|
||||
err error
|
||||
)
|
||||
|
||||
if uploadSession == nil {
|
||||
// If upload session not specified, invoke DBFS to create one
|
||||
sessionID := uuid.Must(uuid.NewV4()).String()
|
||||
req.Props.UploadSessionID = sessionID
|
||||
ttl := m.settings.UploadSessionTTL(ctx)
|
||||
req.Props.ExpireAt = time.Now().Add(ttl)
|
||||
|
||||
// Prepare for upload
|
||||
uploadSession, err = m.fs.PrepareUpload(ctx, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("faield to prepare uplaod: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
d, err := m.GetStorageDriver(ctx, m.CastStoragePolicyOnSlave(ctx, uploadSession.Policy))
|
||||
if err != nil {
|
||||
m.OnUploadFailed(ctx, uploadSession)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
uploadSession.ChunkSize = uploadSession.Policy.Settings.ChunkSize
|
||||
// Create upload credential for underlying storage driver
|
||||
credential := &fs.UploadCredential{}
|
||||
if !uploadSession.Policy.Settings.Relay || m.stateless {
|
||||
credential, err = d.Token(ctx, uploadSession, req)
|
||||
if err != nil {
|
||||
m.OnUploadFailed(ctx, uploadSession)
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// For relayed upload, we don't need to create credential
|
||||
uploadSession.ChunkSize = 0
|
||||
credential.ChunkSize = 0
|
||||
}
|
||||
credential.SessionID = uploadSession.Props.UploadSessionID
|
||||
credential.Expires = req.Props.ExpireAt.Unix()
|
||||
credential.StoragePolicy = uploadSession.Policy
|
||||
credential.CallbackSecret = uploadSession.CallbackSecret
|
||||
credential.Uri = uploadSession.Props.Uri.String()
|
||||
|
||||
// If upload sentinel check is required, queue a check task
|
||||
if d.Capabilities().StaticFeatures.Enabled(int(driver.HandlerCapabilityUploadSentinelRequired)) {
|
||||
t, err := newUploadSentinelCheckTask(ctx, uploadSession)
|
||||
if err != nil {
|
||||
m.OnUploadFailed(ctx, uploadSession)
|
||||
return nil, fmt.Errorf("failed to create upload sentinel check task: %w", err)
|
||||
}
|
||||
|
||||
if err := m.dep.EntityRecycleQueue(ctx).QueueTask(ctx, t); err != nil {
|
||||
m.OnUploadFailed(ctx, uploadSession)
|
||||
return nil, fmt.Errorf("failed to queue upload sentinel check task: %w", err)
|
||||
}
|
||||
|
||||
uploadSession.SentinelTaskID = t.ID()
|
||||
}
|
||||
|
||||
err = m.kv.Set(
|
||||
UploadSessionCachePrefix+req.Props.UploadSessionID,
|
||||
*uploadSession,
|
||||
max(1, int(req.Props.ExpireAt.Sub(time.Now()).Seconds())),
|
||||
)
|
||||
if err != nil {
|
||||
m.OnUploadFailed(ctx, uploadSession)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return credential, nil
|
||||
}
|
||||
|
||||
func (m *manager) ConfirmUploadSession(ctx context.Context, session *fs.UploadSession, chunkIndex int) (fs.File, error) {
|
||||
// Get placeholder file
|
||||
file, err := m.fs.Get(ctx, session.Props.Uri)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get placeholder file: %w", err)
|
||||
}
|
||||
|
||||
// Confirm locks on placeholder file
|
||||
if session.LockToken == "" {
|
||||
release, ls, err := m.fs.ConfirmLock(ctx, file, file.Uri(false), session.LockToken)
|
||||
if err != nil {
|
||||
return nil, fs.ErrLockExpired.WithError(err)
|
||||
}
|
||||
|
||||
defer release()
|
||||
ctx = fs.LockSessionToContext(ctx, ls)
|
||||
}
|
||||
|
||||
// Make sure this storage policy is OK to receive data from clients to Cloudreve server.
|
||||
if session.Policy.Type != types.PolicyTypeLocal && !session.Policy.Settings.Relay {
|
||||
return nil, serializer.NewError(serializer.CodePolicyNotAllowed, "", nil)
|
||||
}
|
||||
|
||||
actualSizeStart := int64(chunkIndex) * session.ChunkSize
|
||||
if session.Policy.Settings.ChunkSize == 0 && chunkIndex > 0 {
|
||||
return nil, serializer.NewError(serializer.CodeInvalidChunkIndex, "Chunk index cannot be greater than 0", nil)
|
||||
}
|
||||
|
||||
if actualSizeStart > 0 && actualSizeStart >= session.Props.Size {
|
||||
return nil, serializer.NewError(serializer.CodeInvalidChunkIndex, "Chunk offset cannot be greater than file size", nil)
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (m *manager) PrepareUpload(ctx context.Context, req *fs.UploadRequest, opts ...fs.Option) (*fs.UploadSession, error) {
|
||||
return m.fs.PrepareUpload(ctx, req, opts...)
|
||||
}
|
||||
|
||||
func (m *manager) Upload(ctx context.Context, req *fs.UploadRequest, policy *ent.StoragePolicy) error {
|
||||
d, err := m.GetStorageDriver(ctx, m.CastStoragePolicyOnSlave(ctx, policy))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := d.Put(ctx, req); err != nil {
|
||||
return serializer.NewError(serializer.CodeIOFailed, "Failed to upload file", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *manager) CancelUploadSession(ctx context.Context, path *fs.URI, sessionID string) error {
|
||||
// Get upload session
|
||||
var session *fs.UploadSession
|
||||
sessionRaw, ok := m.kv.Get(UploadSessionCachePrefix + sessionID)
|
||||
if ok {
|
||||
sessionTyped := sessionRaw.(fs.UploadSession)
|
||||
session = &sessionTyped
|
||||
}
|
||||
|
||||
var (
|
||||
staleEntities []fs.Entity
|
||||
err error
|
||||
)
|
||||
|
||||
if !m.stateless {
|
||||
staleEntities, err = m.fs.CancelUploadSession(ctx, path, sessionID, session)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.l.Debug("New stale entities: %v", staleEntities)
|
||||
}
|
||||
|
||||
if session != nil {
|
||||
ctx = context.WithValue(ctx, cluster.SlaveNodeIDCtx{}, strconv.Itoa(session.Policy.NodeID))
|
||||
d, err := m.GetStorageDriver(ctx, m.CastStoragePolicyOnSlave(ctx, session.Policy))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get storage driver: %w", err)
|
||||
}
|
||||
|
||||
if m.stateless {
|
||||
if _, err = d.Delete(ctx, session.Props.SavePath); err != nil {
|
||||
return fmt.Errorf("failed to delete file: %w", err)
|
||||
}
|
||||
} else {
|
||||
if err = d.CancelToken(ctx, session); err != nil {
|
||||
return fmt.Errorf("failed to cancel upload session: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
m.kv.Delete(UploadSessionCachePrefix, session.Props.UploadSessionID)
|
||||
}
|
||||
|
||||
// Delete stale entities
|
||||
if len(staleEntities) > 0 {
|
||||
t, err := newExplicitEntityRecycleTask(ctx, lo.Map(staleEntities, func(entity fs.Entity, index int) int {
|
||||
return entity.ID()
|
||||
}))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create explicit entity recycle task: %w", err)
|
||||
}
|
||||
|
||||
if err := m.dep.EntityRecycleQueue(ctx).QueueTask(ctx, t); err != nil {
|
||||
return fmt.Errorf("failed to queue explicit entity recycle task: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *manager) CompleteUpload(ctx context.Context, session *fs.UploadSession) (fs.File, error) {
|
||||
d, err := m.GetStorageDriver(ctx, m.CastStoragePolicyOnSlave(ctx, session.Policy))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := d.CompleteUpload(ctx, session); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
file fs.File
|
||||
)
|
||||
if m.fs != nil {
|
||||
file, err = m.fs.CompleteUpload(ctx, session)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to complete upload: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if session.SentinelTaskID > 0 {
|
||||
// Cancel sentinel check task
|
||||
m.l.Debug("Cancel upload sentinel check task [%d].", session.SentinelTaskID)
|
||||
if err := m.dep.TaskClient().SetCompleteByID(ctx, session.SentinelTaskID); err != nil {
|
||||
m.l.Warning("Failed to set upload sentinel check task [%d] to complete: %s", session.SentinelTaskID, err)
|
||||
}
|
||||
}
|
||||
|
||||
m.onNewEntityUploaded(ctx, session, d)
|
||||
// Remove upload session
|
||||
_ = m.kv.Delete(UploadSessionCachePrefix, session.Props.UploadSessionID)
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (m *manager) Update(ctx context.Context, req *fs.UploadRequest, opts ...fs.Option) (fs.File, error) {
|
||||
o := newOption()
|
||||
for _, opt := range opts {
|
||||
opt.Apply(o)
|
||||
}
|
||||
entityType := types.EntityTypeVersion
|
||||
if o.EntityType != nil {
|
||||
entityType = *o.EntityType
|
||||
}
|
||||
|
||||
req.Props.EntityType = &entityType
|
||||
if o.EntityTypeNil {
|
||||
req.Props.EntityType = nil
|
||||
}
|
||||
|
||||
req.Props.UploadSessionID = uuid.Must(uuid.NewV4()).String()
|
||||
|
||||
if m.stateless {
|
||||
return m.updateStateless(ctx, req, o)
|
||||
}
|
||||
|
||||
// Prepare for upload
|
||||
uploadSession, err := m.fs.PrepareUpload(ctx, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("faield to prepare uplaod: %w", err)
|
||||
}
|
||||
|
||||
if err := m.Upload(ctx, req, uploadSession.Policy); err != nil {
|
||||
m.OnUploadFailed(ctx, uploadSession)
|
||||
return nil, fmt.Errorf("failed to upload new entity: %w", err)
|
||||
}
|
||||
|
||||
file, err := m.CompleteUpload(ctx, uploadSession)
|
||||
if err != nil {
|
||||
m.OnUploadFailed(ctx, uploadSession)
|
||||
return nil, fmt.Errorf("failed to complete update: %w", err)
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (m *manager) OnUploadFailed(ctx context.Context, session *fs.UploadSession) {
|
||||
ctx = context.WithoutCancel(ctx)
|
||||
if !m.stateless {
|
||||
if session.LockToken != "" {
|
||||
if err := m.Unlock(ctx, session.LockToken); err != nil {
|
||||
m.l.Warning("OnUploadFailed hook failed to unlock: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if session.NewFileCreated {
|
||||
if err := m.Delete(ctx, []*fs.URI{session.Props.Uri}, fs.WithSysSkipSoftDelete(true)); err != nil {
|
||||
m.l.Warning("OnUploadFailed hook failed to delete file: %s", err)
|
||||
}
|
||||
} else {
|
||||
if err := m.fs.VersionControl(ctx, session.Props.Uri, session.EntityID, true); err != nil {
|
||||
m.l.Warning("OnUploadFailed hook failed to version control: %s", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
d, err := m.GetStorageDriver(ctx, m.CastStoragePolicyOnSlave(ctx, session.Policy))
|
||||
if err != nil {
|
||||
m.l.Warning("OnUploadFailed hook failed: %s", err)
|
||||
}
|
||||
|
||||
if failed, err := d.Delete(ctx, session.Props.SavePath); err != nil {
|
||||
m.l.Warning("OnUploadFailed hook failed to remove uploaded file: %s, failed file: %v", err, failed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// similar to Update, but expected to be executed on slave node.
|
||||
func (m *manager) updateStateless(ctx context.Context, req *fs.UploadRequest, o *fs.FsOption) (fs.File, error) {
|
||||
// Prepare for upload
|
||||
res, err := o.Node.PrepareUpload(ctx, &fs.StatelessPrepareUploadService{
|
||||
UploadRequest: req,
|
||||
UserID: o.StatelessUserID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("faield to prepare uplaod: %w", err)
|
||||
}
|
||||
|
||||
req.Props = res.Req.Props
|
||||
if err := m.Upload(ctx, req, res.Session.Policy); err != nil {
|
||||
if err := o.Node.OnUploadFailed(ctx, &fs.StatelessOnUploadFailedService{
|
||||
UploadSession: res.Session,
|
||||
UserID: o.StatelessUserID,
|
||||
}); err != nil {
|
||||
m.l.Warning("Failed to call stateless OnUploadFailed: %s", err)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to upload new entity: %w", err)
|
||||
}
|
||||
|
||||
err = o.Node.CompleteUpload(ctx, &fs.StatelessCompleteUploadService{
|
||||
UploadSession: res.Session,
|
||||
UserID: o.StatelessUserID,
|
||||
})
|
||||
if err != nil {
|
||||
if err := o.Node.OnUploadFailed(ctx, &fs.StatelessOnUploadFailedService{
|
||||
UploadSession: res.Session,
|
||||
UserID: o.StatelessUserID,
|
||||
}); err != nil {
|
||||
m.l.Warning("Failed to call stateless OnUploadFailed: %s", err)
|
||||
}
|
||||
return nil, fmt.Errorf("failed to complete update: %w", err)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *manager) onNewEntityUploaded(ctx context.Context, session *fs.UploadSession, d driver.Handler) {
|
||||
if !m.stateless {
|
||||
// Submit media meta task for new entity
|
||||
m.mediaMetaForNewEntity(ctx, session, d)
|
||||
}
|
||||
}
|
||||
|
||||
// Upload sentinel check task is used for compliant storage policy (COS, S3...), it will delete the marked entity.
|
||||
// It is expected to be queued after upload session is created, and canceled after upload callback is completed.
|
||||
// If this task is executed, it means the upload callback does not complete in time.
|
||||
type (
|
||||
UploadSentinelCheckTask struct {
|
||||
*queue.DBTask
|
||||
}
|
||||
UploadSentinelCheckTaskState struct {
|
||||
Session *fs.UploadSession `json:"session"`
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
uploadSentinelCheckMargin = 5 * time.Minute
|
||||
)
|
||||
|
||||
func init() {
|
||||
queue.RegisterResumableTaskFactory(queue.UploadSentinelCheckTaskType, NewUploadSentinelCheckTaskFromModel)
|
||||
}
|
||||
|
||||
func NewUploadSentinelCheckTaskFromModel(task *ent.Task) queue.Task {
|
||||
return &UploadSentinelCheckTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: task,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newUploadSentinelCheckTask(ctx context.Context, uploadSession *fs.UploadSession) (*ExplicitEntityRecycleTask, error) {
|
||||
state := &UploadSentinelCheckTaskState{
|
||||
Session: uploadSession,
|
||||
}
|
||||
stateBytes, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal state: %w", err)
|
||||
}
|
||||
|
||||
resumeAfter := uploadSession.Props.ExpireAt.Add(uploadSentinelCheckMargin)
|
||||
t := &ExplicitEntityRecycleTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: &ent.Task{
|
||||
Type: queue.UploadSentinelCheckTaskType,
|
||||
CorrelationID: logging.CorrelationID(ctx),
|
||||
PrivateState: string(stateBytes),
|
||||
PublicState: &types.TaskPublicState{
|
||||
ResumeTime: resumeAfter.Unix(),
|
||||
},
|
||||
},
|
||||
DirectOwner: inventory.UserFromContext(ctx),
|
||||
},
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (m *UploadSentinelCheckTask) Do(ctx context.Context) (task.Status, error) {
|
||||
dep := dependency.FromContext(ctx)
|
||||
taskClient := dep.TaskClient()
|
||||
l := dep.Logger()
|
||||
fm := NewFileManager(dep, inventory.UserFromContext(ctx)).(*manager)
|
||||
|
||||
// Check if sentinel is canceled due to callback complete
|
||||
t, err := taskClient.GetTaskByID(ctx, m.ID())
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to get task by ID: %w", err)
|
||||
}
|
||||
|
||||
if t.Status == task.StatusCompleted {
|
||||
l.Info("Upload sentinel check task [%d] is canceled due to callback complete.", m.ID())
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
|
||||
// unmarshal state
|
||||
state := &UploadSentinelCheckTaskState{}
|
||||
if err := json.Unmarshal([]byte(m.State()), state); err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to unmarshal state: %w", err)
|
||||
}
|
||||
|
||||
l.Info("Upload sentinel check triggered, clean up stale place holder entity [%d].", state.Session.EntityID)
|
||||
entity, err := fm.fs.GetEntity(ctx, state.Session.EntityID)
|
||||
if err != nil {
|
||||
l.Debug("Failed to get entity [%d]: %s, skip sentinel check.", state.Session.EntityID, err)
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
|
||||
_, d, err := fm.getEntityPolicyDriver(ctx, entity, nil)
|
||||
if err != nil {
|
||||
l.Debug("Failed to get storage driver for entity [%d]: %s", state.Session.EntityID, err)
|
||||
return task.StatusError, err
|
||||
}
|
||||
|
||||
_, err = d.Delete(ctx, entity.Source())
|
||||
if err != nil {
|
||||
l.Debug("Failed to delete entity source [%d]: %s", state.Session.EntityID, err)
|
||||
return task.StatusError, err
|
||||
}
|
||||
|
||||
if err := d.CancelToken(ctx, state.Session); err != nil {
|
||||
l.Debug("Failed to cancel token [%d]: %s", state.Session.EntityID, err)
|
||||
}
|
||||
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
93
pkg/filemanager/manager/viewer.go
Normal file
93
pkg/filemanager/manager/viewer.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/gofrs/uuid"
|
||||
)
|
||||
|
||||
type (
|
||||
ViewerSession struct {
|
||||
ID string `json:"id"`
|
||||
AccessToken string `json:"access_token"`
|
||||
Expires int64 `json:"expires"`
|
||||
File fs.File `json:"-"`
|
||||
}
|
||||
ViewerSessionCache struct {
|
||||
ID string
|
||||
Uri string
|
||||
UserID int
|
||||
FileID int
|
||||
ViewerID string
|
||||
Version string
|
||||
Token string
|
||||
}
|
||||
ViewerSessionCacheCtx struct{}
|
||||
ViewerCtx struct{}
|
||||
)
|
||||
|
||||
const (
|
||||
ViewerSessionCachePrefix = "viewer_session_"
|
||||
|
||||
sessionExpiresPadding = 10
|
||||
)
|
||||
|
||||
func init() {
|
||||
gob.Register(ViewerSessionCache{})
|
||||
}
|
||||
|
||||
func (m *manager) CreateViewerSession(ctx context.Context, uri *fs.URI, version string, viewer *setting.Viewer) (*ViewerSession, error) {
|
||||
file, err := m.fs.Get(ctx, uri, dbfs.WithFileEntities())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
versionType := types.EntityTypeVersion
|
||||
found, desired := fs.FindDesiredEntity(file, version, m.hasher, &versionType)
|
||||
if !found {
|
||||
return nil, fs.ErrEntityNotExist
|
||||
}
|
||||
|
||||
if desired.Size() > m.settings.MaxOnlineEditSize(ctx) {
|
||||
return nil, fs.ErrFileSizeTooBig
|
||||
}
|
||||
|
||||
sessionID := uuid.Must(uuid.NewV4()).String()
|
||||
token := util.RandStringRunes(128)
|
||||
sessionCache := &ViewerSessionCache{
|
||||
ID: sessionID,
|
||||
Uri: file.Uri(false).String(),
|
||||
UserID: m.user.ID,
|
||||
ViewerID: viewer.ID,
|
||||
FileID: file.ID(),
|
||||
Version: version,
|
||||
Token: fmt.Sprintf("%s.%s", sessionID, token),
|
||||
}
|
||||
ttl := m.settings.ViewerSessionTTL(ctx)
|
||||
if err := m.kv.Set(ViewerSessionCachePrefix+sessionID, *sessionCache, ttl); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ViewerSession{
|
||||
File: file,
|
||||
ID: sessionID,
|
||||
AccessToken: sessionCache.Token,
|
||||
Expires: time.Now().Add(time.Duration(ttl-sessionExpiresPadding) * time.Second).UnixMilli(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ViewerSessionFromContext(ctx context.Context) *ViewerSessionCache {
|
||||
return ctx.Value(ViewerSessionCacheCtx{}).(*ViewerSessionCache)
|
||||
}
|
||||
|
||||
func ViewerFromContext(ctx context.Context) *setting.Viewer {
|
||||
return ctx.Value(ViewerCtx{}).(*setting.Viewer)
|
||||
}
|
||||
682
pkg/filemanager/workflows/archive.go
Normal file
682
pkg/filemanager/workflows/archive.go
Normal file
@@ -0,0 +1,682 @@
|
||||
package workflows
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/task"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/gofrs/uuid"
|
||||
)
|
||||
|
||||
type (
|
||||
CreateArchiveTask struct {
|
||||
*queue.DBTask
|
||||
|
||||
l logging.Logger
|
||||
state *CreateArchiveTaskState
|
||||
progress queue.Progresses
|
||||
node cluster.Node
|
||||
}
|
||||
|
||||
CreateArchiveTaskPhase string
|
||||
|
||||
CreateArchiveTaskState struct {
|
||||
Uris []string `json:"uris,omitempty"`
|
||||
Dst string `json:"dst,omitempty"`
|
||||
TempPath string `json:"temp_path,omitempty"`
|
||||
ArchiveFile string `json:"archive_file,omitempty"`
|
||||
Phase CreateArchiveTaskPhase `json:"phase,omitempty"`
|
||||
SlaveUploadTaskID int `json:"slave__upload_task_id,omitempty"`
|
||||
SlaveArchiveTaskID int `json:"slave__archive_task_id,omitempty"`
|
||||
SlaveCompressState *SlaveCreateArchiveTaskState `json:"slave_compress_state,omitempty"`
|
||||
Failed int `json:"failed,omitempty"`
|
||||
NodeState `json:",inline"`
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
CreateArchiveTaskPhaseNotStarted CreateArchiveTaskPhase = "not_started"
|
||||
CreateArchiveTaskPhaseCompressFiles CreateArchiveTaskPhase = "compress_files"
|
||||
CreateArchiveTaskPhaseUploadArchive CreateArchiveTaskPhase = "upload_archive"
|
||||
|
||||
CreateArchiveTaskPhaseAwaitSlaveCompressing CreateArchiveTaskPhase = "await_slave_compressing"
|
||||
CreateArchiveTaskPhaseCreateAndAwaitSlaveUploading CreateArchiveTaskPhase = "await_slave_uploading"
|
||||
CreateArchiveTaskPhaseCompleteUpload CreateArchiveTaskPhase = "complete_upload"
|
||||
|
||||
ProgressTypeArchiveCount = "archive_count"
|
||||
ProgressTypeArchiveSize = "archive_size"
|
||||
ProgressTypeUpload = "upload"
|
||||
ProgressTypeUploadCount = "upload_count"
|
||||
)
|
||||
|
||||
func init() {
|
||||
queue.RegisterResumableTaskFactory(queue.CreateArchiveTaskType, NewCreateArchiveTaskFromModel)
|
||||
}
|
||||
|
||||
// NewCreateArchiveTask creates a new CreateArchiveTask
|
||||
func NewCreateArchiveTask(ctx context.Context, src []string, dst string) (queue.Task, error) {
|
||||
state := &CreateArchiveTaskState{
|
||||
Uris: src,
|
||||
Dst: dst,
|
||||
NodeState: NodeState{},
|
||||
}
|
||||
stateBytes, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal state: %w", err)
|
||||
}
|
||||
|
||||
t := &CreateArchiveTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: &ent.Task{
|
||||
Type: queue.CreateArchiveTaskType,
|
||||
CorrelationID: logging.CorrelationID(ctx),
|
||||
PrivateState: string(stateBytes),
|
||||
PublicState: &types.TaskPublicState{},
|
||||
},
|
||||
DirectOwner: inventory.UserFromContext(ctx),
|
||||
},
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func NewCreateArchiveTaskFromModel(task *ent.Task) queue.Task {
|
||||
return &CreateArchiveTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: task,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *CreateArchiveTask) Do(ctx context.Context) (task.Status, error) {
|
||||
dep := dependency.FromContext(ctx)
|
||||
m.l = dep.Logger()
|
||||
|
||||
m.Lock()
|
||||
if m.progress == nil {
|
||||
m.progress = make(queue.Progresses)
|
||||
}
|
||||
m.Unlock()
|
||||
|
||||
// unmarshal state
|
||||
state := &CreateArchiveTaskState{}
|
||||
if err := json.Unmarshal([]byte(m.State()), state); err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to unmarshal state: %w", err)
|
||||
}
|
||||
m.state = state
|
||||
|
||||
// select node
|
||||
node, err := allocateNode(ctx, dep, &m.state.NodeState, types.NodeCapabilityCreateArchive)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to allocate node: %w", err)
|
||||
}
|
||||
m.node = node
|
||||
|
||||
next := task.StatusCompleted
|
||||
|
||||
if m.node.IsMaster() {
|
||||
// Initialize temp folder
|
||||
// Compress files
|
||||
// Upload files to dst
|
||||
switch m.state.Phase {
|
||||
case CreateArchiveTaskPhaseNotStarted, "":
|
||||
next, err = m.initializeTempFolder(ctx, dep)
|
||||
case CreateArchiveTaskPhaseCompressFiles:
|
||||
next, err = m.createArchiveFile(ctx, dep)
|
||||
case CreateArchiveTaskPhaseUploadArchive:
|
||||
next, err = m.uploadArchive(ctx, dep)
|
||||
default:
|
||||
next, err = task.StatusError, fmt.Errorf("unknown phase %q: %w", m.state.Phase, queue.CriticalErr)
|
||||
}
|
||||
} else {
|
||||
// Listing all files and send to slave node for compressing
|
||||
// Await compressing and send to slave for uploading
|
||||
// Await uploading and complete upload
|
||||
switch m.state.Phase {
|
||||
case CreateArchiveTaskPhaseNotStarted, "":
|
||||
next, err = m.listEntitiesAndSendToSlave(ctx, dep)
|
||||
case CreateArchiveTaskPhaseAwaitSlaveCompressing:
|
||||
next, err = m.awaitSlaveCompressing(ctx, dep)
|
||||
case CreateArchiveTaskPhaseCreateAndAwaitSlaveUploading:
|
||||
next, err = m.createAndAwaitSlaveUploading(ctx, dep)
|
||||
case CreateArchiveTaskPhaseCompleteUpload:
|
||||
next, err = m.completeUpload(ctx, dep)
|
||||
default:
|
||||
next, err = task.StatusError, fmt.Errorf("unknown phase %q: %w", m.state.Phase, queue.CriticalErr)
|
||||
}
|
||||
}
|
||||
|
||||
newStateStr, marshalErr := json.Marshal(m.state)
|
||||
if marshalErr != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to marshal state: %w", marshalErr)
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
m.Task.PrivateState = string(newStateStr)
|
||||
m.Unlock()
|
||||
return next, err
|
||||
}
|
||||
|
||||
func (m *CreateArchiveTask) Cleanup(ctx context.Context) error {
|
||||
if m.state.SlaveCompressState != nil && m.state.SlaveCompressState.TempPath != "" && m.node != nil {
|
||||
if err := m.node.CleanupFolders(context.Background(), m.state.SlaveCompressState.TempPath); err != nil {
|
||||
m.l.Warning("Failed to cleanup slave temp folder %s: %s", m.state.SlaveCompressState.TempPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
if m.state.TempPath != "" {
|
||||
time.Sleep(time.Duration(1) * time.Second)
|
||||
return os.RemoveAll(m.state.TempPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *CreateArchiveTask) initializeTempFolder(ctx context.Context, dep dependency.Dep) (task.Status, error) {
|
||||
tempPath, err := prepareTempFolder(ctx, dep, m)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to prepare temp folder: %w", err)
|
||||
}
|
||||
|
||||
m.state.TempPath = tempPath
|
||||
m.state.Phase = CreateArchiveTaskPhaseCompressFiles
|
||||
m.ResumeAfter(0)
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
func (m *CreateArchiveTask) listEntitiesAndSendToSlave(ctx context.Context, dep dependency.Dep) (task.Status, error) {
|
||||
uris, err := fs.NewUriFromStrings(m.state.Uris...)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to create uri from strings: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
payload := &SlaveCreateArchiveTaskState{
|
||||
Entities: make([]SlaveCreateArchiveEntity, 0, len(uris)),
|
||||
Policies: make(map[int]*ent.StoragePolicy),
|
||||
}
|
||||
|
||||
user := inventory.UserFromContext(ctx)
|
||||
fm := manager.NewFileManager(dep, user)
|
||||
storagePolicyClient := dep.StoragePolicyClient()
|
||||
|
||||
failed, err := fm.CreateArchive(ctx, uris, io.Discard,
|
||||
fs.WithDryRun(func(name string, e fs.Entity) {
|
||||
payload.Entities = append(payload.Entities, SlaveCreateArchiveEntity{
|
||||
Entity: e.Model(),
|
||||
Path: name,
|
||||
})
|
||||
if _, ok := payload.Policies[e.PolicyID()]; !ok {
|
||||
policy, err := storagePolicyClient.GetPolicyByID(ctx, e.PolicyID())
|
||||
if err != nil {
|
||||
m.l.Warning("Failed to get policy %d: %s", e.PolicyID(), err)
|
||||
} else {
|
||||
payload.Policies[e.PolicyID()] = policy
|
||||
}
|
||||
}
|
||||
}),
|
||||
fs.WithMaxArchiveSize(user.Edges.Group.Settings.CompressSize),
|
||||
)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to compress files: %w", err)
|
||||
}
|
||||
|
||||
m.state.Failed = failed
|
||||
payloadStr, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to marshal payload: %w", err)
|
||||
}
|
||||
|
||||
taskId, err := m.node.CreateTask(ctx, queue.SlaveCreateArchiveTaskType, string(payloadStr))
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to create slave task: %w", err)
|
||||
}
|
||||
|
||||
m.state.Phase = CreateArchiveTaskPhaseAwaitSlaveCompressing
|
||||
m.state.SlaveArchiveTaskID = taskId
|
||||
m.ResumeAfter((10 * time.Second))
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
func (m *CreateArchiveTask) awaitSlaveCompressing(ctx context.Context, dep dependency.Dep) (task.Status, error) {
|
||||
t, err := m.node.GetTask(ctx, m.state.SlaveArchiveTaskID, false)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to get slave task: %w", err)
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
m.state.NodeState.progress = t.Progress
|
||||
m.Unlock()
|
||||
|
||||
m.state.SlaveCompressState = &SlaveCreateArchiveTaskState{}
|
||||
if err := json.Unmarshal([]byte(t.PrivateState), m.state.SlaveCompressState); err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to unmarshal slave compress state: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
if t.Status == task.StatusError {
|
||||
return task.StatusError, fmt.Errorf("slave task failed: %s (%w)", t.Error, queue.CriticalErr)
|
||||
}
|
||||
|
||||
if t.Status == task.StatusCanceled {
|
||||
return task.StatusError, fmt.Errorf("slave task canceled (%w)", queue.CriticalErr)
|
||||
}
|
||||
|
||||
if t.Status == task.StatusCompleted {
|
||||
m.state.Phase = CreateArchiveTaskPhaseCreateAndAwaitSlaveUploading
|
||||
m.ResumeAfter(0)
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
m.l.Info("Slave task %d is still compressing, resume after 30s.", m.state.SlaveArchiveTaskID)
|
||||
m.ResumeAfter((time.Second * 30))
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
func (m *CreateArchiveTask) createAndAwaitSlaveUploading(ctx context.Context, dep dependency.Dep) (task.Status, error) {
|
||||
u := inventory.UserFromContext(ctx)
|
||||
|
||||
if m.state.SlaveUploadTaskID == 0 {
|
||||
dst, err := fs.NewUriFromString(m.state.Dst)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to parse dst uri %q: %s (%w)", m.state.Dst, err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
// Create slave upload task
|
||||
payload := &SlaveUploadTaskState{
|
||||
Files: []SlaveUploadEntity{
|
||||
{
|
||||
Size: m.state.SlaveCompressState.CompressedSize,
|
||||
Uri: dst,
|
||||
Src: m.state.SlaveCompressState.ZipFilePath,
|
||||
},
|
||||
},
|
||||
MaxParallel: dep.SettingProvider().MaxParallelTransfer(ctx),
|
||||
UserID: u.ID,
|
||||
}
|
||||
|
||||
payloadStr, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to marshal payload: %w", err)
|
||||
}
|
||||
|
||||
taskId, err := m.node.CreateTask(ctx, queue.SlaveUploadTaskType, string(payloadStr))
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to create slave task: %w", err)
|
||||
}
|
||||
|
||||
m.state.NodeState.progress = nil
|
||||
m.state.SlaveUploadTaskID = taskId
|
||||
m.ResumeAfter(0)
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
m.l.Info("Checking slave upload task %d...", m.state.SlaveUploadTaskID)
|
||||
t, err := m.node.GetTask(ctx, m.state.SlaveUploadTaskID, true)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to get slave task: %w", err)
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
m.state.NodeState.progress = t.Progress
|
||||
m.Unlock()
|
||||
|
||||
if t.Status == task.StatusError {
|
||||
return task.StatusError, fmt.Errorf("slave task failed: %s (%w)", t.Error, queue.CriticalErr)
|
||||
}
|
||||
|
||||
if t.Status == task.StatusCanceled {
|
||||
return task.StatusError, fmt.Errorf("slave task canceled (%w)", queue.CriticalErr)
|
||||
}
|
||||
|
||||
if t.Status == task.StatusCompleted {
|
||||
m.state.Phase = CreateArchiveTaskPhaseCompleteUpload
|
||||
m.ResumeAfter(0)
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
m.l.Info("Slave task %d is still uploading, resume after 30s.", m.state.SlaveUploadTaskID)
|
||||
m.ResumeAfter(time.Second * 30)
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
func (m *CreateArchiveTask) completeUpload(ctx context.Context, dep dependency.Dep) (task.Status, error) {
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
|
||||
func (m *CreateArchiveTask) createArchiveFile(ctx context.Context, dep dependency.Dep) (task.Status, error) {
|
||||
uris, err := fs.NewUriFromStrings(m.state.Uris...)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to create uri from strings: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
user := inventory.UserFromContext(ctx)
|
||||
fm := manager.NewFileManager(dep, user)
|
||||
|
||||
// Create temp zip file
|
||||
fileName := fmt.Sprintf("%s.zip", uuid.Must(uuid.NewV4()))
|
||||
zipFilePath := filepath.Join(
|
||||
m.state.TempPath,
|
||||
fileName,
|
||||
)
|
||||
zipFile, err := util.CreatNestedFile(zipFilePath)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to create zip file: %w", err)
|
||||
}
|
||||
|
||||
defer zipFile.Close()
|
||||
|
||||
// Start compressing
|
||||
m.Lock()
|
||||
m.progress[ProgressTypeArchiveCount] = &queue.Progress{}
|
||||
m.progress[ProgressTypeArchiveSize] = &queue.Progress{}
|
||||
m.Unlock()
|
||||
failed, err := fm.CreateArchive(ctx, uris, zipFile,
|
||||
fs.WithArchiveCompression(true),
|
||||
fs.WithMaxArchiveSize(user.Edges.Group.Settings.CompressSize),
|
||||
fs.WithProgressFunc(func(current, diff int64, total int64) {
|
||||
atomic.AddInt64(&m.progress[ProgressTypeArchiveSize].Current, diff)
|
||||
atomic.AddInt64(&m.progress[ProgressTypeArchiveCount].Current, 1)
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
zipFile.Close()
|
||||
_ = os.Remove(zipFilePath)
|
||||
return task.StatusError, fmt.Errorf("failed to compress files: %w", err)
|
||||
}
|
||||
|
||||
m.state.Failed = failed
|
||||
m.Lock()
|
||||
delete(m.progress, ProgressTypeArchiveSize)
|
||||
delete(m.progress, ProgressTypeArchiveCount)
|
||||
m.Unlock()
|
||||
|
||||
m.state.Phase = CreateArchiveTaskPhaseUploadArchive
|
||||
m.state.ArchiveFile = fileName
|
||||
m.ResumeAfter(0)
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
func (m *CreateArchiveTask) uploadArchive(ctx context.Context, dep dependency.Dep) (task.Status, error) {
|
||||
fm := manager.NewFileManager(dep, inventory.UserFromContext(ctx))
|
||||
zipFilePath := filepath.Join(
|
||||
m.state.TempPath,
|
||||
m.state.ArchiveFile,
|
||||
)
|
||||
|
||||
m.l.Info("Uploading archive file %s to %s...", zipFilePath, m.state.Dst)
|
||||
|
||||
uri, err := fs.NewUriFromString(m.state.Dst)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf(
|
||||
"failed to parse dst uri %q: %s (%w)",
|
||||
m.state.Dst,
|
||||
err,
|
||||
queue.CriticalErr,
|
||||
)
|
||||
}
|
||||
|
||||
file, err := os.Open(zipFilePath)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to open compressed archive %q: %s", m.state.ArchiveFile, err)
|
||||
}
|
||||
defer file.Close()
|
||||
fi, err := file.Stat()
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to get file info: %w", err)
|
||||
}
|
||||
size := fi.Size()
|
||||
|
||||
m.Lock()
|
||||
m.progress[ProgressTypeUpload] = &queue.Progress{}
|
||||
m.Unlock()
|
||||
fileData := &fs.UploadRequest{
|
||||
Props: &fs.UploadProps{
|
||||
Uri: uri,
|
||||
Size: size,
|
||||
},
|
||||
ProgressFunc: func(current, diff int64, total int64) {
|
||||
atomic.StoreInt64(&m.progress[ProgressTypeUpload].Current, current)
|
||||
atomic.StoreInt64(&m.progress[ProgressTypeUpload].Total, total)
|
||||
},
|
||||
File: file,
|
||||
Seeker: file,
|
||||
}
|
||||
|
||||
_, err = fm.Update(ctx, fileData)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to upload archive file: %w", err)
|
||||
}
|
||||
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
|
||||
func (m *CreateArchiveTask) Progress(ctx context.Context) queue.Progresses {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
if m.state.NodeState.progress != nil {
|
||||
merged := make(queue.Progresses)
|
||||
for k, v := range m.progress {
|
||||
merged[k] = v
|
||||
}
|
||||
|
||||
for k, v := range m.state.NodeState.progress {
|
||||
merged[k] = v
|
||||
}
|
||||
|
||||
return merged
|
||||
}
|
||||
return m.progress
|
||||
}
|
||||
|
||||
func (m *CreateArchiveTask) Summarize(hasher hashid.Encoder) *queue.Summary {
|
||||
// unmarshal state
|
||||
if m.state == nil {
|
||||
if err := json.Unmarshal([]byte(m.State()), &m.state); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
failed := m.state.Failed
|
||||
if m.state.SlaveCompressState != nil {
|
||||
failed = m.state.SlaveCompressState.Failed
|
||||
}
|
||||
|
||||
return &queue.Summary{
|
||||
NodeID: m.state.NodeID,
|
||||
Phase: string(m.state.Phase),
|
||||
Props: map[string]any{
|
||||
SummaryKeySrcMultiple: m.state.Uris,
|
||||
SummaryKeyDst: m.state.Dst,
|
||||
SummaryKeyFailed: failed,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type (
|
||||
SlaveCreateArchiveEntity struct {
|
||||
Entity *ent.Entity `json:"entity"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
SlaveCreateArchiveTaskState struct {
|
||||
Entities []SlaveCreateArchiveEntity `json:"entities"`
|
||||
Policies map[int]*ent.StoragePolicy `json:"policies"`
|
||||
CompressedSize int64 `json:"compressed_size"`
|
||||
TempPath string `json:"temp_path"`
|
||||
ZipFilePath string `json:"zip_file_path"`
|
||||
Failed int `json:"failed"`
|
||||
}
|
||||
SlaveCreateArchiveTask struct {
|
||||
*queue.InMemoryTask
|
||||
|
||||
mu sync.RWMutex
|
||||
progress queue.Progresses
|
||||
l logging.Logger
|
||||
state *SlaveCreateArchiveTaskState
|
||||
}
|
||||
)
|
||||
|
||||
// NewSlaveCreateArchiveTask creates a new SlaveCreateArchiveTask from raw private state
|
||||
func NewSlaveCreateArchiveTask(ctx context.Context, props *types.SlaveTaskProps, id int, state string) queue.Task {
|
||||
return &SlaveCreateArchiveTask{
|
||||
InMemoryTask: &queue.InMemoryTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: &ent.Task{
|
||||
ID: id,
|
||||
CorrelationID: logging.CorrelationID(ctx),
|
||||
PublicState: &types.TaskPublicState{
|
||||
SlaveTaskProps: props,
|
||||
},
|
||||
PrivateState: state,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
progress: make(queue.Progresses),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *SlaveCreateArchiveTask) Do(ctx context.Context) (task.Status, error) {
|
||||
ctx = prepareSlaveTaskCtx(ctx, t.Model().PublicState.SlaveTaskProps)
|
||||
dep := dependency.FromContext(ctx)
|
||||
t.l = dep.Logger()
|
||||
fm := manager.NewFileManager(dep, nil)
|
||||
|
||||
// unmarshal state
|
||||
state := &SlaveCreateArchiveTaskState{}
|
||||
if err := json.Unmarshal([]byte(t.State()), state); err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to unmarshal state: %w", err)
|
||||
}
|
||||
|
||||
t.state = state
|
||||
|
||||
totalFiles := int64(0)
|
||||
totalFileSize := int64(0)
|
||||
for _, e := range t.state.Entities {
|
||||
totalFiles++
|
||||
totalFileSize += e.Entity.Size
|
||||
}
|
||||
|
||||
t.Lock()
|
||||
t.progress[ProgressTypeArchiveCount] = &queue.Progress{Total: totalFiles}
|
||||
t.progress[ProgressTypeArchiveSize] = &queue.Progress{Total: totalFileSize}
|
||||
t.Unlock()
|
||||
|
||||
// 3. Create temp workspace
|
||||
tempPath, err := prepareTempFolder(ctx, dep, t)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to prepare temp folder: %w", err)
|
||||
}
|
||||
t.state.TempPath = tempPath
|
||||
|
||||
// 2. Create archive file
|
||||
fileName := fmt.Sprintf("%s.zip", uuid.Must(uuid.NewV4()))
|
||||
zipFilePath := filepath.Join(
|
||||
t.state.TempPath,
|
||||
fileName,
|
||||
)
|
||||
zipFile, err := util.CreatNestedFile(zipFilePath)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to create zip file: %w", err)
|
||||
}
|
||||
|
||||
defer zipFile.Close()
|
||||
|
||||
zipWriter := zip.NewWriter(zipFile)
|
||||
defer zipWriter.Close()
|
||||
|
||||
// 3. Download each entity and write into zip file
|
||||
for _, e := range t.state.Entities {
|
||||
policy, ok := t.state.Policies[e.Entity.StoragePolicyEntities]
|
||||
if !ok {
|
||||
state.Failed++
|
||||
t.l.Warning("Policy not found for entity %d, skipping...", e.Entity.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
entity := fs.NewEntity(e.Entity)
|
||||
es, err := fm.GetEntitySource(ctx, 0,
|
||||
fs.WithEntity(entity),
|
||||
fs.WithPolicy(fm.CastStoragePolicyOnSlave(ctx, policy)),
|
||||
)
|
||||
if err != nil {
|
||||
state.Failed++
|
||||
t.l.Warning("Failed to get entity source for entity %d: %s, skipping...", e.Entity.ID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Write to zip file
|
||||
header := &zip.FileHeader{
|
||||
Name: e.Path,
|
||||
Modified: entity.UpdatedAt(),
|
||||
UncompressedSize64: uint64(entity.Size()),
|
||||
Method: zip.Deflate,
|
||||
}
|
||||
|
||||
writer, err := zipWriter.CreateHeader(header)
|
||||
if err != nil {
|
||||
es.Close()
|
||||
state.Failed++
|
||||
t.l.Warning("Failed to create zip header for %s: %s, skipping...", e.Path, err)
|
||||
continue
|
||||
}
|
||||
|
||||
es.Apply(entitysource.WithContext(ctx))
|
||||
_, err = io.Copy(writer, es)
|
||||
es.Close()
|
||||
if err != nil {
|
||||
state.Failed++
|
||||
t.l.Warning("Failed to write entity %d to zip file: %s, skipping...", e.Entity.ID, err)
|
||||
}
|
||||
|
||||
atomic.AddInt64(&t.progress[ProgressTypeArchiveSize].Current, entity.Size())
|
||||
atomic.AddInt64(&t.progress[ProgressTypeArchiveCount].Current, 1)
|
||||
}
|
||||
|
||||
zipWriter.Close()
|
||||
stat, err := zipFile.Stat()
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to get compressed file info: %w", err)
|
||||
}
|
||||
|
||||
t.state.CompressedSize = stat.Size()
|
||||
t.state.ZipFilePath = zipFilePath
|
||||
// Clear unused fields to save space
|
||||
t.state.Entities = nil
|
||||
t.state.Policies = nil
|
||||
|
||||
newStateStr, marshalErr := json.Marshal(t.state)
|
||||
if marshalErr != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to marshal state: %w", marshalErr)
|
||||
}
|
||||
|
||||
t.Lock()
|
||||
t.Task.PrivateState = string(newStateStr)
|
||||
t.Unlock()
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
|
||||
func (m *SlaveCreateArchiveTask) Progress(ctx context.Context) queue.Progresses {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
return m.progress
|
||||
}
|
||||
766
pkg/filemanager/workflows/extract.go
Normal file
766
pkg/filemanager/workflows/extract.go
Normal file
@@ -0,0 +1,766 @@
|
||||
package workflows
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/task"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/gofrs/uuid"
|
||||
"github.com/mholt/archiver/v4"
|
||||
)
|
||||
|
||||
type (
|
||||
ExtractArchiveTask struct {
|
||||
*queue.DBTask
|
||||
|
||||
l logging.Logger
|
||||
state *ExtractArchiveTaskState
|
||||
progress queue.Progresses
|
||||
node cluster.Node
|
||||
}
|
||||
ExtractArchiveTaskPhase string
|
||||
ExtractArchiveTaskState struct {
|
||||
Uri string `json:"uri,omitempty"`
|
||||
Encoding string `json:"encoding,omitempty"`
|
||||
Dst string `json:"dst,omitempty"`
|
||||
TempPath string `json:"temp_path,omitempty"`
|
||||
TempZipFilePath string `json:"temp_zip_file_path,omitempty"`
|
||||
ProcessedCursor string `json:"processed_cursor,omitempty"`
|
||||
SlaveTaskID int `json:"slave_task_id,omitempty"`
|
||||
NodeState `json:",inline"`
|
||||
Phase ExtractArchiveTaskPhase `json:"phase,omitempty"`
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
ExtractArchivePhaseNotStarted ExtractArchiveTaskPhase = ""
|
||||
ExtractArchivePhaseDownloadZip ExtractArchiveTaskPhase = "download_zip"
|
||||
ExtractArchivePhaseAwaitSlaveComplete ExtractArchiveTaskPhase = "await_slave_complete"
|
||||
|
||||
ProgressTypeExtractCount = "extract_count"
|
||||
ProgressTypeExtractSize = "extract_size"
|
||||
ProgressTypeDownload = "download"
|
||||
|
||||
SummaryKeySrc = "src"
|
||||
SummaryKeyDst = "dst"
|
||||
)
|
||||
|
||||
func init() {
|
||||
queue.RegisterResumableTaskFactory(queue.ExtractArchiveTaskType, NewExtractArchiveTaskFromModel)
|
||||
}
|
||||
|
||||
// NewExtractArchiveTask creates a new ExtractArchiveTask
|
||||
func NewExtractArchiveTask(ctx context.Context, src, dst, encoding string) (queue.Task, error) {
|
||||
state := &ExtractArchiveTaskState{
|
||||
Uri: src,
|
||||
Dst: dst,
|
||||
Encoding: encoding,
|
||||
NodeState: NodeState{},
|
||||
}
|
||||
stateBytes, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal state: %w", err)
|
||||
}
|
||||
|
||||
t := &ExtractArchiveTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: &ent.Task{
|
||||
Type: queue.ExtractArchiveTaskType,
|
||||
CorrelationID: logging.CorrelationID(ctx),
|
||||
PrivateState: string(stateBytes),
|
||||
PublicState: &types.TaskPublicState{},
|
||||
},
|
||||
DirectOwner: inventory.UserFromContext(ctx),
|
||||
},
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func NewExtractArchiveTaskFromModel(task *ent.Task) queue.Task {
|
||||
return &ExtractArchiveTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: task,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ExtractArchiveTask) Do(ctx context.Context) (task.Status, error) {
|
||||
dep := dependency.FromContext(ctx)
|
||||
m.l = dep.Logger()
|
||||
|
||||
m.Lock()
|
||||
if m.progress == nil {
|
||||
m.progress = make(queue.Progresses)
|
||||
}
|
||||
m.Unlock()
|
||||
|
||||
// unmarshal state
|
||||
state := &ExtractArchiveTaskState{}
|
||||
if err := json.Unmarshal([]byte(m.State()), state); err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to unmarshal state: %w", err)
|
||||
}
|
||||
m.state = state
|
||||
|
||||
// select node
|
||||
node, err := allocateNode(ctx, dep, &m.state.NodeState, types.NodeCapabilityExtractArchive)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to allocate node: %w", err)
|
||||
}
|
||||
m.node = node
|
||||
|
||||
next := task.StatusCompleted
|
||||
|
||||
if node.IsMaster() {
|
||||
switch m.state.Phase {
|
||||
case ExtractArchivePhaseNotStarted:
|
||||
next, err = m.masterExtractArchive(ctx, dep)
|
||||
case ExtractArchivePhaseDownloadZip:
|
||||
next, err = m.masterDownloadZip(ctx, dep)
|
||||
default:
|
||||
next, err = task.StatusError, fmt.Errorf("unknown phase %q: %w", m.state.Phase, queue.CriticalErr)
|
||||
}
|
||||
} else {
|
||||
switch m.state.Phase {
|
||||
case ExtractArchivePhaseNotStarted:
|
||||
next, err = m.createSlaveExtractTask(ctx, dep)
|
||||
case ExtractArchivePhaseAwaitSlaveComplete:
|
||||
next, err = m.awaitSlaveExtractComplete(ctx, dep)
|
||||
default:
|
||||
next, err = task.StatusError, fmt.Errorf("unknown phase %q: %w", m.state.Phase, queue.CriticalErr)
|
||||
}
|
||||
}
|
||||
|
||||
newStateStr, marshalErr := json.Marshal(m.state)
|
||||
if marshalErr != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to marshal state: %w", marshalErr)
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
m.Task.PrivateState = string(newStateStr)
|
||||
m.Unlock()
|
||||
return next, err
|
||||
}
|
||||
|
||||
func (m *ExtractArchiveTask) createSlaveExtractTask(ctx context.Context, dep dependency.Dep) (task.Status, error) {
|
||||
uri, err := fs.NewUriFromString(m.state.Uri)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to parse src uri: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
user := inventory.UserFromContext(ctx)
|
||||
fm := manager.NewFileManager(dep, user)
|
||||
|
||||
// Get entity source to extract
|
||||
archiveFile, err := fm.Get(ctx, uri, dbfs.WithFileEntities(), dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityDownloadFile))
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to get archive file: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
// Validate file size
|
||||
if user.Edges.Group.Settings.DecompressSize > 0 && archiveFile.Size() > user.Edges.Group.Settings.DecompressSize {
|
||||
return task.StatusError,
|
||||
fmt.Errorf("file size %d exceeds the limit %d (%w)", archiveFile.Size(), user.Edges.Group.Settings.DecompressSize, queue.CriticalErr)
|
||||
}
|
||||
|
||||
// Create slave task
|
||||
storagePolicyClient := dep.StoragePolicyClient()
|
||||
policy, err := storagePolicyClient.GetPolicyByID(ctx, archiveFile.PrimaryEntity().PolicyID())
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to get policy: %w", err)
|
||||
}
|
||||
|
||||
payload := &SlaveExtractArchiveTaskState{
|
||||
FileName: archiveFile.DisplayName(),
|
||||
Entity: archiveFile.PrimaryEntity().Model(),
|
||||
Policy: policy,
|
||||
Encoding: m.state.Encoding,
|
||||
Dst: m.state.Dst,
|
||||
UserID: user.ID,
|
||||
}
|
||||
|
||||
payloadStr, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to marshal payload: %w", err)
|
||||
}
|
||||
|
||||
taskId, err := m.node.CreateTask(ctx, queue.SlaveExtractArchiveType, string(payloadStr))
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to create slave task: %w", err)
|
||||
}
|
||||
|
||||
m.state.Phase = ExtractArchivePhaseAwaitSlaveComplete
|
||||
m.state.SlaveTaskID = taskId
|
||||
m.ResumeAfter((10 * time.Second))
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
func (m *ExtractArchiveTask) awaitSlaveExtractComplete(ctx context.Context, dep dependency.Dep) (task.Status, error) {
|
||||
t, err := m.node.GetTask(ctx, m.state.SlaveTaskID, true)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to get slave task: %w", err)
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
m.state.NodeState.progress = t.Progress
|
||||
m.Unlock()
|
||||
|
||||
if t.Status == task.StatusError {
|
||||
return task.StatusError, fmt.Errorf("slave task failed: %s (%w)", t.Error, queue.CriticalErr)
|
||||
}
|
||||
|
||||
if t.Status == task.StatusCanceled {
|
||||
return task.StatusError, fmt.Errorf("slave task canceled (%w)", queue.CriticalErr)
|
||||
}
|
||||
|
||||
if t.Status == task.StatusCompleted {
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
|
||||
m.l.Info("Slave task %d is still compressing, resume after 30s.", m.state.SlaveTaskID)
|
||||
m.ResumeAfter((time.Second * 30))
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
func (m *ExtractArchiveTask) masterExtractArchive(ctx context.Context, dep dependency.Dep) (task.Status, error) {
|
||||
uri, err := fs.NewUriFromString(m.state.Uri)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to parse src uri: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
dst, err := fs.NewUriFromString(m.state.Dst)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to parse dst uri: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
user := inventory.UserFromContext(ctx)
|
||||
fm := manager.NewFileManager(dep, user)
|
||||
|
||||
// Get entity source to extract
|
||||
archiveFile, err := fm.Get(ctx, uri, dbfs.WithFileEntities(), dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityDownloadFile))
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to get archive file: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
// Validate file size
|
||||
if user.Edges.Group.Settings.DecompressSize > 0 && archiveFile.Size() > user.Edges.Group.Settings.DecompressSize {
|
||||
return task.StatusError,
|
||||
fmt.Errorf("file size %d exceeds the limit %d (%w)", archiveFile.Size(), user.Edges.Group.Settings.DecompressSize, queue.CriticalErr)
|
||||
}
|
||||
|
||||
es, err := fm.GetEntitySource(ctx, 0, fs.WithEntity(archiveFile.PrimaryEntity()))
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to get entity source: %w", err)
|
||||
}
|
||||
|
||||
defer es.Close()
|
||||
|
||||
m.l.Info("Extracting archive %q to %q", uri, m.state.Dst)
|
||||
// Identify file format
|
||||
format, readStream, err := archiver.Identify(archiveFile.DisplayName(), es)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to identify archive format: %w", err)
|
||||
}
|
||||
|
||||
m.l.Info("Archive file %q format identified as %q", uri, format.Name())
|
||||
|
||||
extractor, ok := format.(archiver.Extractor)
|
||||
if !ok {
|
||||
return task.StatusError, fmt.Errorf("format not an extractor %s")
|
||||
}
|
||||
|
||||
if format.Name() == ".zip" {
|
||||
// Zip extractor requires a Seeker+ReadAt
|
||||
if m.state.TempZipFilePath == "" && !es.IsLocal() {
|
||||
m.state.Phase = ExtractArchivePhaseDownloadZip
|
||||
m.ResumeAfter(0)
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
if m.state.TempZipFilePath != "" {
|
||||
// Use temp zip file path
|
||||
zipFile, err := os.Open(m.state.TempZipFilePath)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to open temp zip file: %w", err)
|
||||
}
|
||||
|
||||
defer zipFile.Close()
|
||||
readStream = zipFile
|
||||
}
|
||||
|
||||
if es.IsLocal() {
|
||||
if _, err = es.Seek(0, 0); err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to seek entity source: %w", err)
|
||||
}
|
||||
|
||||
readStream = es
|
||||
}
|
||||
|
||||
if m.state.Encoding != "" {
|
||||
m.l.Info("Using encoding %q for zip archive", m.state.Encoding)
|
||||
extractor = archiver.Zip{TextEncoding: m.state.Encoding}
|
||||
}
|
||||
}
|
||||
|
||||
needSkipToCursor := false
|
||||
if m.state.ProcessedCursor != "" {
|
||||
needSkipToCursor = true
|
||||
}
|
||||
m.Lock()
|
||||
m.progress[ProgressTypeExtractCount] = &queue.Progress{}
|
||||
m.progress[ProgressTypeExtractSize] = &queue.Progress{}
|
||||
m.Unlock()
|
||||
|
||||
// extract and upload
|
||||
err = extractor.Extract(ctx, readStream, nil, func(ctx context.Context, f archiver.File) error {
|
||||
if needSkipToCursor && f.NameInArchive != m.state.ProcessedCursor {
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractCount].Current, 1)
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractSize].Current, f.Size())
|
||||
m.l.Info("File %q already processed, skipping...", f.NameInArchive)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Found cursor, start from cursor +1
|
||||
if m.state.ProcessedCursor == f.NameInArchive {
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractCount].Current, 1)
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractSize].Current, f.Size())
|
||||
needSkipToCursor = false
|
||||
return nil
|
||||
}
|
||||
|
||||
rawPath := util.FormSlash(f.NameInArchive)
|
||||
savePath := dst.JoinRaw(rawPath)
|
||||
|
||||
// Check if path is legit
|
||||
if !strings.HasPrefix(savePath.Path(), util.FillSlash(path.Clean(dst.Path()))) {
|
||||
m.l.Warning("Path %q is not legit, skipping...", f.NameInArchive)
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractCount].Current, 1)
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractSize].Current, f.Size())
|
||||
return nil
|
||||
}
|
||||
|
||||
if f.FileInfo.IsDir() {
|
||||
_, err := fm.Create(ctx, savePath, types.FileTypeFolder)
|
||||
if err != nil {
|
||||
m.l.Warning("Failed to create directory %q: %s, skipping...", rawPath, err)
|
||||
}
|
||||
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractCount].Current, 1)
|
||||
m.state.ProcessedCursor = f.NameInArchive
|
||||
return nil
|
||||
}
|
||||
|
||||
fileStream, err := f.Open()
|
||||
if err != nil {
|
||||
m.l.Warning("Failed to open file %q in archive file: %s, skipping...", rawPath, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
fileData := &fs.UploadRequest{
|
||||
Props: &fs.UploadProps{
|
||||
Uri: savePath,
|
||||
Size: f.Size(),
|
||||
},
|
||||
ProgressFunc: func(current, diff int64, total int64) {
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractSize].Current, diff)
|
||||
},
|
||||
File: fileStream,
|
||||
}
|
||||
|
||||
_, err = fm.Update(ctx, fileData, fs.WithNoEntityType())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload file %q in archive file: %w", rawPath, err)
|
||||
}
|
||||
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractCount].Current, 1)
|
||||
m.state.ProcessedCursor = f.NameInArchive
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to extract archive: %w", err)
|
||||
}
|
||||
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
|
||||
func (m *ExtractArchiveTask) masterDownloadZip(ctx context.Context, dep dependency.Dep) (task.Status, error) {
|
||||
uri, err := fs.NewUriFromString(m.state.Uri)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to parse src uri: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
user := inventory.UserFromContext(ctx)
|
||||
fm := manager.NewFileManager(dep, user)
|
||||
|
||||
// Get entity source to extract
|
||||
archiveFile, err := fm.Get(ctx, uri, dbfs.WithFileEntities(), dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityDownloadFile))
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to get archive file: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
es, err := fm.GetEntitySource(ctx, 0, fs.WithEntity(archiveFile.PrimaryEntity()))
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to get entity source: %w", err)
|
||||
}
|
||||
|
||||
defer es.Close()
|
||||
|
||||
// For non-local entity, we need to download the whole zip file first
|
||||
tempPath, err := prepareTempFolder(ctx, dep, m)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to prepare temp folder: %w", err)
|
||||
}
|
||||
m.state.TempPath = tempPath
|
||||
|
||||
fileName := fmt.Sprintf("%s.zip", uuid.Must(uuid.NewV4()))
|
||||
zipFilePath := filepath.Join(
|
||||
m.state.TempPath,
|
||||
fileName,
|
||||
)
|
||||
|
||||
zipFile, err := util.CreatNestedFile(zipFilePath)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to create zip file: %w", err)
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
m.progress[ProgressTypeDownload] = &queue.Progress{Total: es.Entity().Size()}
|
||||
m.Unlock()
|
||||
|
||||
defer zipFile.Close()
|
||||
if _, err := io.Copy(zipFile, util.NewCallbackReader(es, func(i int64) {
|
||||
atomic.AddInt64(&m.progress[ProgressTypeDownload].Current, i)
|
||||
})); err != nil {
|
||||
zipFile.Close()
|
||||
if err := os.Remove(zipFilePath); err != nil {
|
||||
m.l.Warning("Failed to remove temp zip file %q: %s", zipFilePath, err)
|
||||
}
|
||||
return task.StatusError, fmt.Errorf("failed to copy zip file to local temp: %w", err)
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
delete(m.progress, ProgressTypeDownload)
|
||||
m.Unlock()
|
||||
m.state.TempZipFilePath = zipFilePath
|
||||
m.state.Phase = ExtractArchivePhaseNotStarted
|
||||
m.ResumeAfter(0)
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
func (m *ExtractArchiveTask) Summarize(hasher hashid.Encoder) *queue.Summary {
|
||||
if m.state == nil {
|
||||
if err := json.Unmarshal([]byte(m.State()), &m.state); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &queue.Summary{
|
||||
NodeID: m.state.NodeID,
|
||||
Phase: string(m.state.Phase),
|
||||
Props: map[string]any{
|
||||
SummaryKeySrc: m.state.Uri,
|
||||
SummaryKeyDst: m.state.Dst,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ExtractArchiveTask) Progress(ctx context.Context) queue.Progresses {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
if m.state.NodeState.progress != nil {
|
||||
merged := make(queue.Progresses)
|
||||
for k, v := range m.progress {
|
||||
merged[k] = v
|
||||
}
|
||||
|
||||
for k, v := range m.state.NodeState.progress {
|
||||
merged[k] = v
|
||||
}
|
||||
|
||||
return merged
|
||||
}
|
||||
return m.progress
|
||||
}
|
||||
|
||||
func (m *ExtractArchiveTask) Cleanup(ctx context.Context) error {
|
||||
if m.state.TempPath != "" {
|
||||
time.Sleep(time.Duration(1) * time.Second)
|
||||
return os.RemoveAll(m.state.TempPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type (
|
||||
SlaveExtractArchiveTask struct {
|
||||
*queue.InMemoryTask
|
||||
|
||||
l logging.Logger
|
||||
state *SlaveExtractArchiveTaskState
|
||||
progress queue.Progresses
|
||||
node cluster.Node
|
||||
}
|
||||
|
||||
SlaveExtractArchiveTaskState struct {
|
||||
FileName string `json:"file_name"`
|
||||
Entity *ent.Entity `json:"entity"`
|
||||
Policy *ent.StoragePolicy `json:"policy"`
|
||||
Encoding string `json:"encoding,omitempty"`
|
||||
Dst string `json:"dst,omitempty"`
|
||||
UserID int `json:"user_id"`
|
||||
TempPath string `json:"temp_path,omitempty"`
|
||||
TempZipFilePath string `json:"temp_zip_file_path,omitempty"`
|
||||
ProcessedCursor string `json:"processed_cursor,omitempty"`
|
||||
}
|
||||
)
|
||||
|
||||
// NewSlaveExtractArchiveTask creates a new SlaveExtractArchiveTask from raw private state
|
||||
func NewSlaveExtractArchiveTask(ctx context.Context, props *types.SlaveTaskProps, id int, state string) queue.Task {
|
||||
return &SlaveExtractArchiveTask{
|
||||
InMemoryTask: &queue.InMemoryTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: &ent.Task{
|
||||
ID: id,
|
||||
CorrelationID: logging.CorrelationID(ctx),
|
||||
PublicState: &types.TaskPublicState{
|
||||
SlaveTaskProps: props,
|
||||
},
|
||||
PrivateState: state,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
progress: make(queue.Progresses),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *SlaveExtractArchiveTask) Do(ctx context.Context) (task.Status, error) {
|
||||
ctx = prepareSlaveTaskCtx(ctx, m.Model().PublicState.SlaveTaskProps)
|
||||
dep := dependency.FromContext(ctx)
|
||||
m.l = dep.Logger()
|
||||
np, err := dep.NodePool(ctx)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to get node pool: %w", err)
|
||||
}
|
||||
|
||||
m.node, err = np.Get(ctx, types.NodeCapabilityNone, 0)
|
||||
if err != nil || !m.node.IsMaster() {
|
||||
return task.StatusError, fmt.Errorf("failed to get master node: %w", err)
|
||||
}
|
||||
|
||||
fm := manager.NewFileManager(dep, nil)
|
||||
|
||||
// unmarshal state
|
||||
state := &SlaveExtractArchiveTaskState{}
|
||||
if err := json.Unmarshal([]byte(m.State()), state); err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to unmarshal state: %w", err)
|
||||
}
|
||||
|
||||
m.state = state
|
||||
m.Lock()
|
||||
if m.progress == nil {
|
||||
m.progress = make(queue.Progresses)
|
||||
}
|
||||
m.progress[ProgressTypeExtractCount] = &queue.Progress{}
|
||||
m.progress[ProgressTypeExtractSize] = &queue.Progress{}
|
||||
m.Unlock()
|
||||
|
||||
dst, err := fs.NewUriFromString(m.state.Dst)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to parse dst uri: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
// 1. Get entity source
|
||||
entity := fs.NewEntity(m.state.Entity)
|
||||
es, err := fm.GetEntitySource(ctx, 0, fs.WithEntity(entity), fs.WithPolicy(fm.CastStoragePolicyOnSlave(ctx, m.state.Policy)))
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to get entity source: %w", err)
|
||||
}
|
||||
|
||||
defer es.Close()
|
||||
|
||||
// 2. Identify file format
|
||||
format, readStream, err := archiver.Identify(m.state.FileName, es)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to identify archive format: %w", err)
|
||||
}
|
||||
m.l.Info("Archive file %q format identified as %q", m.state.FileName, format.Name())
|
||||
|
||||
extractor, ok := format.(archiver.Extractor)
|
||||
if !ok {
|
||||
return task.StatusError, fmt.Errorf("format not an extractor %s")
|
||||
}
|
||||
|
||||
if format.Name() == ".zip" {
|
||||
if _, err = es.Seek(0, 0); err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to seek entity source: %w", err)
|
||||
}
|
||||
|
||||
if m.state.TempZipFilePath == "" && !es.IsLocal() {
|
||||
tempPath, err := prepareTempFolder(ctx, dep, m)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to prepare temp folder: %w", err)
|
||||
}
|
||||
m.state.TempPath = tempPath
|
||||
|
||||
fileName := fmt.Sprintf("%s.zip", uuid.Must(uuid.NewV4()))
|
||||
zipFilePath := filepath.Join(
|
||||
m.state.TempPath,
|
||||
fileName,
|
||||
)
|
||||
zipFile, err := util.CreatNestedFile(zipFilePath)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to create zip file: %w", err)
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
m.progress[ProgressTypeDownload] = &queue.Progress{Total: es.Entity().Size()}
|
||||
m.Unlock()
|
||||
|
||||
defer zipFile.Close()
|
||||
if _, err := io.Copy(zipFile, util.NewCallbackReader(es, func(i int64) {
|
||||
atomic.AddInt64(&m.progress[ProgressTypeDownload].Current, i)
|
||||
})); err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to copy zip file to local temp: %w", err)
|
||||
}
|
||||
|
||||
zipFile.Close()
|
||||
m.state.TempZipFilePath = zipFilePath
|
||||
}
|
||||
|
||||
if es.IsLocal() {
|
||||
readStream = es
|
||||
} else if m.state.TempZipFilePath != "" {
|
||||
// Use temp zip file path
|
||||
zipFile, err := os.Open(m.state.TempZipFilePath)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to open temp zip file: %w", err)
|
||||
}
|
||||
|
||||
defer zipFile.Close()
|
||||
readStream = zipFile
|
||||
}
|
||||
|
||||
if es.IsLocal() {
|
||||
readStream = es
|
||||
}
|
||||
|
||||
if m.state.Encoding != "" {
|
||||
m.l.Info("Using encoding %q for zip archive", m.state.Encoding)
|
||||
extractor = archiver.Zip{TextEncoding: m.state.Encoding}
|
||||
}
|
||||
}
|
||||
|
||||
needSkipToCursor := false
|
||||
if m.state.ProcessedCursor != "" {
|
||||
needSkipToCursor = true
|
||||
}
|
||||
|
||||
// 3. Extract and upload
|
||||
err = extractor.Extract(ctx, readStream, nil, func(ctx context.Context, f archiver.File) error {
|
||||
if needSkipToCursor && f.NameInArchive != m.state.ProcessedCursor {
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractCount].Current, 1)
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractSize].Current, f.Size())
|
||||
m.l.Info("File %q already processed, skipping...", f.NameInArchive)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Found cursor, start from cursor +1
|
||||
if m.state.ProcessedCursor == f.NameInArchive {
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractCount].Current, 1)
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractSize].Current, f.Size())
|
||||
needSkipToCursor = false
|
||||
return nil
|
||||
}
|
||||
|
||||
rawPath := util.FormSlash(f.NameInArchive)
|
||||
savePath := dst.JoinRaw(rawPath)
|
||||
|
||||
// Check if path is legit
|
||||
if !strings.HasPrefix(savePath.Path(), util.FillSlash(path.Clean(dst.Path()))) {
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractCount].Current, 1)
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractSize].Current, f.Size())
|
||||
m.l.Warning("Path %q is not legit, skipping...", f.NameInArchive)
|
||||
return nil
|
||||
}
|
||||
|
||||
if f.FileInfo.IsDir() {
|
||||
_, err := fm.Create(ctx, savePath, types.FileTypeFolder, fs.WithNode(m.node), fs.WithStatelessUserID(m.state.UserID))
|
||||
if err != nil {
|
||||
m.l.Warning("Failed to create directory %q: %s, skipping...", rawPath, err)
|
||||
}
|
||||
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractCount].Current, 1)
|
||||
m.state.ProcessedCursor = f.NameInArchive
|
||||
return nil
|
||||
}
|
||||
|
||||
fileStream, err := f.Open()
|
||||
if err != nil {
|
||||
m.l.Warning("Failed to open file %q in archive file: %s, skipping...", rawPath, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
fileData := &fs.UploadRequest{
|
||||
Props: &fs.UploadProps{
|
||||
Uri: savePath,
|
||||
Size: f.Size(),
|
||||
},
|
||||
ProgressFunc: func(current, diff int64, total int64) {
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractSize].Current, diff)
|
||||
},
|
||||
File: fileStream,
|
||||
}
|
||||
|
||||
_, err = fm.Update(ctx, fileData, fs.WithNode(m.node), fs.WithStatelessUserID(m.state.UserID), fs.WithNoEntityType())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to upload file %q in archive file: %w", rawPath, err)
|
||||
}
|
||||
|
||||
atomic.AddInt64(&m.progress[ProgressTypeExtractCount].Current, 1)
|
||||
m.state.ProcessedCursor = f.NameInArchive
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to extract archive: %w", err)
|
||||
}
|
||||
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
|
||||
func (m *SlaveExtractArchiveTask) Cleanup(ctx context.Context) error {
|
||||
if m.state.TempPath != "" {
|
||||
time.Sleep(time.Duration(1) * time.Second)
|
||||
return os.RemoveAll(m.state.TempPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SlaveExtractArchiveTask) Progress(ctx context.Context) queue.Progresses {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.progress
|
||||
}
|
||||
657
pkg/filemanager/workflows/remote_download.go
Normal file
657
pkg/filemanager/workflows/remote_download.go
Normal file
@@ -0,0 +1,657 @@
|
||||
package workflows
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/task"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/downloader"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/gofrs/uuid"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
type (
|
||||
RemoteDownloadTask struct {
|
||||
*queue.DBTask
|
||||
|
||||
l logging.Logger
|
||||
state *RemoteDownloadTaskState
|
||||
node cluster.Node
|
||||
d downloader.Downloader
|
||||
progress queue.Progresses
|
||||
}
|
||||
RemoteDownloadTaskPhase string
|
||||
RemoteDownloadTaskState struct {
|
||||
SrcFileUri string `json:"src_file_uri,omitempty"`
|
||||
SrcUri string `json:"src_uri,omitempty"`
|
||||
Dst string `json:"dst,omitempty"`
|
||||
Handle *downloader.TaskHandle `json:"handle,omitempty"`
|
||||
Status *downloader.TaskStatus `json:"status,omitempty"`
|
||||
NodeState `json:",inline"`
|
||||
Phase RemoteDownloadTaskPhase `json:"phase,omitempty"`
|
||||
SlaveUploadTaskID int `json:"slave__upload_task_id,omitempty"`
|
||||
SlaveUploadState *SlaveUploadTaskState `json:"slave_upload_state,omitempty"`
|
||||
GetTaskStatusTried int `json:"get_task_status_tried,omitempty"`
|
||||
Transferred map[int]interface{} `json:"transferred,omitempty"`
|
||||
Failed int `json:"failed,omitempty"`
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
RemoteDownloadTaskPhaseNotStarted RemoteDownloadTaskPhase = ""
|
||||
RemoteDownloadTaskPhaseMonitor = "monitor"
|
||||
RemoteDownloadTaskPhaseTransfer = "transfer"
|
||||
RemoteDownloadTaskPhaseAwaitSeeding = "seeding"
|
||||
|
||||
GetTaskStatusMaxTries = 5
|
||||
|
||||
SummaryKeyDownloadStatus = "download"
|
||||
SummaryKeySrcStr = "src_str"
|
||||
|
||||
ProgressTypeRelocateTransferCount = "relocate"
|
||||
ProgressTypeUploadSinglePrefix = "upload_single_"
|
||||
|
||||
SummaryKeySrcMultiple = "src_multiple"
|
||||
SummaryKeySrcDstPolicyID = "dst_policy_id"
|
||||
SummaryKeyFailed = "failed"
|
||||
)
|
||||
|
||||
func init() {
|
||||
queue.RegisterResumableTaskFactory(queue.RemoteDownloadTaskType, NewRemoteDownloadTaskFromModel)
|
||||
}
|
||||
|
||||
// NewRemoteDownloadTask creates a new RemoteDownloadTask
|
||||
func NewRemoteDownloadTask(ctx context.Context, src string, srcFile, dst string) (queue.Task, error) {
|
||||
state := &RemoteDownloadTaskState{
|
||||
SrcUri: src,
|
||||
SrcFileUri: srcFile,
|
||||
Dst: dst,
|
||||
NodeState: NodeState{},
|
||||
}
|
||||
stateBytes, err := json.Marshal(state)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal state: %w", err)
|
||||
}
|
||||
|
||||
t := &RemoteDownloadTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: &ent.Task{
|
||||
Type: queue.RemoteDownloadTaskType,
|
||||
CorrelationID: logging.CorrelationID(ctx),
|
||||
PrivateState: string(stateBytes),
|
||||
PublicState: &types.TaskPublicState{},
|
||||
},
|
||||
DirectOwner: inventory.UserFromContext(ctx),
|
||||
},
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func NewRemoteDownloadTaskFromModel(task *ent.Task) queue.Task {
|
||||
return &RemoteDownloadTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: task,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *RemoteDownloadTask) Do(ctx context.Context) (task.Status, error) {
|
||||
dep := dependency.FromContext(ctx)
|
||||
m.l = dep.Logger()
|
||||
|
||||
// unmarshal state
|
||||
state := &RemoteDownloadTaskState{}
|
||||
if err := json.Unmarshal([]byte(m.State()), state); err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to unmarshal state: %w", err)
|
||||
}
|
||||
m.state = state
|
||||
|
||||
// select node
|
||||
node, err := allocateNode(ctx, dep, &m.state.NodeState, types.NodeCapabilityRemoteDownload)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to allocate node: %w", err)
|
||||
}
|
||||
m.node = node
|
||||
|
||||
// create downloader instance
|
||||
if m.d == nil {
|
||||
d, err := node.CreateDownloader(ctx, dep.RequestClient(), dep.SettingProvider())
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to create downloader: %w", err)
|
||||
}
|
||||
|
||||
m.d = d
|
||||
}
|
||||
|
||||
next := task.StatusCompleted
|
||||
switch m.state.Phase {
|
||||
case RemoteDownloadTaskPhaseNotStarted:
|
||||
next, err = m.createDownloadTask(ctx, dep)
|
||||
case RemoteDownloadTaskPhaseMonitor, RemoteDownloadTaskPhaseAwaitSeeding:
|
||||
next, err = m.monitor(ctx, dep)
|
||||
case RemoteDownloadTaskPhaseTransfer:
|
||||
if m.node.IsMaster() {
|
||||
next, err = m.masterTransfer(ctx, dep)
|
||||
} else {
|
||||
next, err = m.slaveTransfer(ctx, dep)
|
||||
}
|
||||
}
|
||||
|
||||
newStateStr, marshalErr := json.Marshal(m.state)
|
||||
if marshalErr != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to marshal state: %w", marshalErr)
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
m.Task.PrivateState = string(newStateStr)
|
||||
m.Unlock()
|
||||
return next, err
|
||||
}
|
||||
|
||||
func (m *RemoteDownloadTask) createDownloadTask(ctx context.Context, dep dependency.Dep) (task.Status, error) {
|
||||
if m.state.Handle != nil {
|
||||
m.state.Phase = RemoteDownloadTaskPhaseMonitor
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
user := inventory.UserFromContext(ctx)
|
||||
torrentUrl := m.state.SrcUri
|
||||
if m.state.SrcFileUri != "" {
|
||||
// Target is a torrent file
|
||||
uri, err := fs.NewUriFromString(m.state.SrcFileUri)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to parse src file uri: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
fm := manager.NewFileManager(dep, user)
|
||||
expire := time.Now().Add(dep.SettingProvider().EntityUrlValidDuration(ctx))
|
||||
torrentUrls, _, err := fm.GetEntityUrls(ctx, []manager.GetEntityUrlArgs{
|
||||
{URI: uri},
|
||||
}, fs.WithUrlExpire(&expire))
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to get torrent entity urls: %w", err)
|
||||
}
|
||||
|
||||
if len(torrentUrls) == 0 {
|
||||
return task.StatusError, fmt.Errorf("no torrent urls found")
|
||||
}
|
||||
|
||||
torrentUrl = torrentUrls[0]
|
||||
}
|
||||
|
||||
// Create download task
|
||||
handle, err := m.d.CreateTask(ctx, torrentUrl, user.Edges.Group.Settings.RemoteDownloadOptions)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to create download task: %w", err)
|
||||
}
|
||||
|
||||
m.state.Handle = handle
|
||||
m.state.Phase = RemoteDownloadTaskPhaseMonitor
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
func (m *RemoteDownloadTask) monitor(ctx context.Context, dep dependency.Dep) (task.Status, error) {
|
||||
resumeAfter := time.Duration(m.node.Settings(ctx).Interval) * time.Second
|
||||
|
||||
// Update task status
|
||||
status, err := m.d.Info(ctx, m.state.Handle)
|
||||
if err != nil {
|
||||
if errors.Is(err, downloader.ErrTaskNotFount) && m.state.Status != nil {
|
||||
// If task is not found, but it previously existed, consider it as canceled
|
||||
m.l.Warning("task not found, consider it as canceled")
|
||||
return task.StatusCanceled, nil
|
||||
}
|
||||
|
||||
m.state.GetTaskStatusTried++
|
||||
if m.state.GetTaskStatusTried >= GetTaskStatusMaxTries {
|
||||
return task.StatusError, fmt.Errorf("failed to get task status after %d retry: %w", m.state.GetTaskStatusTried, err)
|
||||
}
|
||||
|
||||
m.l.Warning("failed to get task info: %s, will retry.", err)
|
||||
m.ResumeAfter(resumeAfter)
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
// Follow to new handle if needed
|
||||
if status.FollowedBy != nil {
|
||||
m.l.Info("Task handle updated to %v", status.FollowedBy)
|
||||
m.state.Handle = status.FollowedBy
|
||||
m.ResumeAfter(0)
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
if m.state.Status == nil || m.state.Status.Total != status.Total {
|
||||
m.l.Info("download size changed, re-validate files.")
|
||||
// First time to get status / total size changed, check user capacity
|
||||
if err := m.validateFiles(ctx, dep, status); err != nil {
|
||||
m.state.Status = status
|
||||
return task.StatusError, fmt.Errorf("failed to validate files: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
}
|
||||
|
||||
m.state.Status = status
|
||||
m.state.GetTaskStatusTried = 0
|
||||
|
||||
m.l.Debug("Monitor %q task state: %s", status.Name, status.State)
|
||||
switch status.State {
|
||||
case downloader.StatusSeeding:
|
||||
m.l.Info("Download task seeding")
|
||||
if m.state.Phase == RemoteDownloadTaskPhaseMonitor {
|
||||
// Not transferred
|
||||
m.state.Phase = RemoteDownloadTaskPhaseTransfer
|
||||
return task.StatusSuspending, nil
|
||||
} else if !m.node.Settings(ctx).WaitForSeeding {
|
||||
// Skip seeding
|
||||
m.l.Info("Download task seeding skipped.")
|
||||
return task.StatusCompleted, nil
|
||||
} else {
|
||||
// Still seeding
|
||||
m.ResumeAfter(resumeAfter)
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
case downloader.StatusCompleted:
|
||||
m.l.Info("Download task completed")
|
||||
if m.state.Phase == RemoteDownloadTaskPhaseMonitor {
|
||||
// Not transferred
|
||||
m.state.Phase = RemoteDownloadTaskPhaseTransfer
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
// Seeding complete
|
||||
m.l.Info("Download task seeding completed")
|
||||
return task.StatusCompleted, nil
|
||||
case downloader.StatusDownloading:
|
||||
m.ResumeAfter(resumeAfter)
|
||||
return task.StatusSuspending, nil
|
||||
case downloader.StatusUnknown, downloader.StatusError:
|
||||
return task.StatusError, fmt.Errorf("download task failed with state %q (%w)", status.State, queue.CriticalErr)
|
||||
}
|
||||
|
||||
m.ResumeAfter(resumeAfter)
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
func (m *RemoteDownloadTask) slaveTransfer(ctx context.Context, dep dependency.Dep) (task.Status, error) {
|
||||
u := inventory.UserFromContext(ctx)
|
||||
if m.state.Transferred == nil {
|
||||
m.state.Transferred = make(map[int]interface{})
|
||||
}
|
||||
|
||||
if m.state.SlaveUploadTaskID == 0 {
|
||||
dstUri, err := fs.NewUriFromString(m.state.Dst)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to parse dst uri %q: %s (%w)", m.state.Dst, err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
// Create slave upload task
|
||||
payload := &SlaveUploadTaskState{
|
||||
Files: []SlaveUploadEntity{},
|
||||
MaxParallel: dep.SettingProvider().MaxParallelTransfer(ctx),
|
||||
UserID: u.ID,
|
||||
}
|
||||
|
||||
// Construct files to be transferred
|
||||
for _, f := range m.state.Status.Files {
|
||||
if !f.Selected {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip already transferred
|
||||
if _, ok := m.state.Transferred[f.Index]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
dst := dstUri.JoinRaw(f.Name)
|
||||
src := filepath.FromSlash(path.Join(m.state.Status.SavePath, f.Name))
|
||||
payload.Files = append(payload.Files, SlaveUploadEntity{
|
||||
Src: src,
|
||||
Uri: dst,
|
||||
Size: f.Size,
|
||||
Index: f.Index,
|
||||
})
|
||||
}
|
||||
|
||||
payloadStr, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to marshal payload: %w", err)
|
||||
}
|
||||
|
||||
taskId, err := m.node.CreateTask(ctx, queue.SlaveUploadTaskType, string(payloadStr))
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to create slave task: %w", err)
|
||||
}
|
||||
|
||||
m.state.NodeState.progress = nil
|
||||
m.state.SlaveUploadTaskID = taskId
|
||||
m.ResumeAfter(0)
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
m.l.Info("Checking slave upload task %d...", m.state.SlaveUploadTaskID)
|
||||
t, err := m.node.GetTask(ctx, m.state.SlaveUploadTaskID, true)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to get slave task: %w", err)
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
m.state.NodeState.progress = t.Progress
|
||||
m.Unlock()
|
||||
|
||||
m.state.SlaveUploadState = &SlaveUploadTaskState{}
|
||||
if err := json.Unmarshal([]byte(t.PrivateState), m.state.SlaveUploadState); err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to unmarshal slave compress state: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
if t.Status == task.StatusError || t.Status == task.StatusCompleted {
|
||||
if len(m.state.SlaveUploadState.Transferred) < len(m.state.SlaveUploadState.Files) {
|
||||
// Not all files transferred, retry
|
||||
slaveTaskId := m.state.SlaveUploadTaskID
|
||||
m.state.SlaveUploadTaskID = 0
|
||||
for i, _ := range m.state.SlaveUploadState.Transferred {
|
||||
m.state.Transferred[m.state.SlaveUploadState.Files[i].Index] = struct{}{}
|
||||
}
|
||||
|
||||
m.l.Warning("Slave task %d failed to transfer %d files, retrying...", slaveTaskId, len(m.state.SlaveUploadState.Files)-len(m.state.SlaveUploadState.Transferred))
|
||||
return task.StatusError, fmt.Errorf(
|
||||
"slave task failed to transfer %d files, first 5 errors: %s",
|
||||
len(m.state.SlaveUploadState.Files)-len(m.state.SlaveUploadState.Transferred),
|
||||
m.state.SlaveUploadState.First5TransferErrors,
|
||||
)
|
||||
} else {
|
||||
m.state.Phase = RemoteDownloadTaskPhaseAwaitSeeding
|
||||
m.ResumeAfter(0)
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
}
|
||||
|
||||
if t.Status == task.StatusCanceled {
|
||||
return task.StatusError, fmt.Errorf("slave task canceled (%w)", queue.CriticalErr)
|
||||
}
|
||||
|
||||
m.l.Info("Slave task %d is still uploading, resume after 30s.", m.state.SlaveUploadTaskID)
|
||||
m.ResumeAfter(time.Second * 30)
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
func (m *RemoteDownloadTask) masterTransfer(ctx context.Context, dep dependency.Dep) (task.Status, error) {
|
||||
if m.state.Transferred == nil {
|
||||
m.state.Transferred = make(map[int]interface{})
|
||||
}
|
||||
|
||||
maxParallel := dep.SettingProvider().MaxParallelTransfer(ctx)
|
||||
wg := sync.WaitGroup{}
|
||||
worker := make(chan int, maxParallel)
|
||||
for i := 0; i < maxParallel; i++ {
|
||||
worker <- i
|
||||
}
|
||||
|
||||
// Sum up total count and select files
|
||||
totalCount := 0
|
||||
totalSize := int64(0)
|
||||
allFiles := make([]downloader.TaskFile, 0, len(m.state.Status.Files))
|
||||
for _, f := range m.state.Status.Files {
|
||||
if f.Selected {
|
||||
allFiles = append(allFiles, f)
|
||||
totalSize += f.Size
|
||||
totalCount++
|
||||
}
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
m.progress = make(queue.Progresses)
|
||||
m.progress[ProgressTypeUploadCount] = &queue.Progress{Total: int64(totalCount)}
|
||||
m.progress[ProgressTypeUpload] = &queue.Progress{Total: totalSize}
|
||||
m.Unlock()
|
||||
|
||||
dstUri, err := fs.NewUriFromString(m.state.Dst)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to parse dst uri: %s (%w)", err, queue.CriticalErr)
|
||||
}
|
||||
|
||||
user := inventory.UserFromContext(ctx)
|
||||
fm := manager.NewFileManager(dep, user)
|
||||
failed := int64(0)
|
||||
ae := serializer.NewAggregateError()
|
||||
|
||||
transferFunc := func(workerId int, file downloader.TaskFile) {
|
||||
defer func() {
|
||||
atomic.AddInt64(&m.progress[ProgressTypeUploadCount].Current, 1)
|
||||
worker <- workerId
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
dst := dstUri.JoinRaw(file.Name)
|
||||
src := filepath.FromSlash(path.Join(m.state.Status.SavePath, file.Name))
|
||||
m.l.Info("Uploading file %s to %s...", src, file.Name, dst)
|
||||
|
||||
progressKey := fmt.Sprintf("%s%d", ProgressTypeUploadSinglePrefix, workerId)
|
||||
m.Lock()
|
||||
m.progress[progressKey] = &queue.Progress{Identifier: dst.String(), Total: file.Size}
|
||||
m.Unlock()
|
||||
|
||||
fileStream, err := os.Open(src)
|
||||
if err != nil {
|
||||
m.l.Warning("Failed to open file %s: %s", src, err.Error())
|
||||
atomic.AddInt64(&m.progress[ProgressTypeUpload].Current, file.Size)
|
||||
atomic.AddInt64(&failed, 1)
|
||||
ae.Add(file.Name, fmt.Errorf("failed to open file: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
defer fileStream.Close()
|
||||
|
||||
fileData := &fs.UploadRequest{
|
||||
Props: &fs.UploadProps{
|
||||
Uri: dst,
|
||||
Size: file.Size,
|
||||
},
|
||||
ProgressFunc: func(current, diff int64, total int64) {
|
||||
atomic.AddInt64(&m.progress[progressKey].Current, diff)
|
||||
atomic.AddInt64(&m.progress[ProgressTypeUpload].Current, diff)
|
||||
},
|
||||
File: fileStream,
|
||||
}
|
||||
|
||||
_, err = fm.Update(ctx, fileData, fs.WithNoEntityType())
|
||||
if err != nil {
|
||||
m.l.Warning("Failed to upload file %s: %s", src, err.Error())
|
||||
atomic.AddInt64(&failed, 1)
|
||||
atomic.AddInt64(&m.progress[ProgressTypeUpload].Current, file.Size)
|
||||
ae.Add(file.Name, fmt.Errorf("failed to upload file: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
m.state.Transferred[file.Index] = nil
|
||||
m.Unlock()
|
||||
}
|
||||
|
||||
// Start upload files
|
||||
for _, file := range allFiles {
|
||||
// Check if file is already transferred
|
||||
if _, ok := m.state.Transferred[file.Index]; ok {
|
||||
m.l.Info("File %s already transferred, skipping...", file.Name)
|
||||
atomic.AddInt64(&m.progress[ProgressTypeUpload].Current, file.Size)
|
||||
atomic.AddInt64(&m.progress[ProgressTypeUploadCount].Current, 1)
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return task.StatusError, ctx.Err()
|
||||
case workerId := <-worker:
|
||||
wg.Add(1)
|
||||
|
||||
go transferFunc(workerId, file)
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
if failed > 0 {
|
||||
m.state.Failed = int(failed)
|
||||
m.l.Error("Failed to transfer %d file(s).", failed)
|
||||
return task.StatusError, fmt.Errorf("failed to transfer %d file(s), first 5 errors: %s", failed, ae.FormatFirstN(5))
|
||||
}
|
||||
|
||||
m.l.Info("All files transferred.")
|
||||
m.state.Phase = RemoteDownloadTaskPhaseAwaitSeeding
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
func (m *RemoteDownloadTask) awaitSeeding(ctx context.Context, dep dependency.Dep) (task.Status, error) {
|
||||
return task.StatusSuspending, nil
|
||||
}
|
||||
|
||||
func (m *RemoteDownloadTask) validateFiles(ctx context.Context, dep dependency.Dep, status *downloader.TaskStatus) error {
|
||||
// Validate files
|
||||
user := inventory.UserFromContext(ctx)
|
||||
fm := manager.NewFileManager(dep, user)
|
||||
|
||||
dstUri, err := fs.NewUriFromString(m.state.Dst)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse dst uri: %w", err)
|
||||
}
|
||||
|
||||
selectedFiles := lo.Filter(status.Files, func(f downloader.TaskFile, _ int) bool {
|
||||
return f.Selected
|
||||
})
|
||||
if len(selectedFiles) == 0 {
|
||||
return fmt.Errorf("no selected file found in download task")
|
||||
}
|
||||
|
||||
// find the first valid file
|
||||
var placeholderFileName string
|
||||
for _, f := range selectedFiles {
|
||||
if f.Name != "" {
|
||||
placeholderFileName = f.Name
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if placeholderFileName == "" {
|
||||
// File name not available yet, generate one
|
||||
m.l.Debug("File name not available yet, generate one to validate the destination")
|
||||
placeholderFileName = uuid.Must(uuid.NewV4()).String()
|
||||
}
|
||||
|
||||
// Create a placeholder file then delete it to validate the destination
|
||||
session, err := fm.PrepareUpload(ctx, &fs.UploadRequest{
|
||||
Props: &fs.UploadProps{
|
||||
Uri: dstUri.Join(path.Base(placeholderFileName)),
|
||||
Size: status.Total,
|
||||
UploadSessionID: uuid.Must(uuid.NewV4()).String(),
|
||||
ExpireAt: time.Now().Add(time.Second * 3600),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fm.OnUploadFailed(ctx, session)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *RemoteDownloadTask) Cleanup(ctx context.Context) error {
|
||||
if m.state.Handle != nil {
|
||||
if err := m.d.Cancel(ctx, m.state.Handle); err != nil {
|
||||
m.l.Warning("failed to cancel download task: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if m.state.Status != nil && m.node.IsMaster() && m.state.Status.SavePath != "" {
|
||||
if err := os.RemoveAll(m.state.Status.SavePath); err != nil {
|
||||
m.l.Warning("failed to remove download temp folder: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDownloadTarget sets the files to download for the task
|
||||
func (m *RemoteDownloadTask) SetDownloadTarget(ctx context.Context, args ...*downloader.SetFileToDownloadArgs) error {
|
||||
if m.state.Handle == nil {
|
||||
return fmt.Errorf("download task not created")
|
||||
}
|
||||
|
||||
return m.d.SetFilesToDownload(ctx, m.state.Handle, args...)
|
||||
}
|
||||
|
||||
// CancelDownload cancels the download task
|
||||
func (m *RemoteDownloadTask) CancelDownload(ctx context.Context) error {
|
||||
if m.state.Handle == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return m.d.Cancel(ctx, m.state.Handle)
|
||||
}
|
||||
|
||||
func (m *RemoteDownloadTask) Summarize(hasher hashid.Encoder) *queue.Summary {
|
||||
// unmarshal state
|
||||
if m.state == nil {
|
||||
if err := json.Unmarshal([]byte(m.State()), &m.state); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var status *downloader.TaskStatus
|
||||
if m.state.Status != nil {
|
||||
status = &*m.state.Status
|
||||
|
||||
// Redact save path
|
||||
status.SavePath = ""
|
||||
}
|
||||
|
||||
failed := m.state.Failed
|
||||
if m.state.SlaveUploadState != nil && m.state.Phase != RemoteDownloadTaskPhaseTransfer {
|
||||
failed = len(m.state.SlaveUploadState.Files) - len(m.state.SlaveUploadState.Transferred)
|
||||
}
|
||||
|
||||
return &queue.Summary{
|
||||
Phase: string(m.state.Phase),
|
||||
NodeID: m.state.NodeID,
|
||||
Props: map[string]any{
|
||||
SummaryKeySrcStr: m.state.SrcUri,
|
||||
SummaryKeySrc: m.state.SrcFileUri,
|
||||
SummaryKeyDst: m.state.Dst,
|
||||
SummaryKeyFailed: failed,
|
||||
SummaryKeyDownloadStatus: status,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *RemoteDownloadTask) Progress(ctx context.Context) queue.Progresses {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
if m.state.NodeState.progress != nil {
|
||||
merged := make(queue.Progresses)
|
||||
for k, v := range m.progress {
|
||||
merged[k] = v
|
||||
}
|
||||
|
||||
for k, v := range m.state.NodeState.progress {
|
||||
merged[k] = v
|
||||
}
|
||||
|
||||
return merged
|
||||
}
|
||||
return m.progress
|
||||
}
|
||||
224
pkg/filemanager/workflows/upload.go
Normal file
224
pkg/filemanager/workflows/upload.go
Normal file
@@ -0,0 +1,224 @@
|
||||
package workflows
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent/task"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
)
|
||||
|
||||
type (
|
||||
SlaveUploadEntity struct {
|
||||
Uri *fs.URI `json:"uri"`
|
||||
Src string `json:"src"`
|
||||
Size int64 `json:"size"`
|
||||
Index int `json:"index"`
|
||||
}
|
||||
SlaveUploadTaskState struct {
|
||||
MaxParallel int `json:"max_parallel"`
|
||||
Files []SlaveUploadEntity `json:"files"`
|
||||
Transferred map[int]interface{} `json:"transferred"`
|
||||
UserID int `json:"user_id"`
|
||||
First5TransferErrors string `json:"first_5_transfer_errors,omitempty"`
|
||||
}
|
||||
SlaveUploadTask struct {
|
||||
*queue.InMemoryTask
|
||||
|
||||
progress queue.Progresses
|
||||
l logging.Logger
|
||||
state *SlaveUploadTaskState
|
||||
node cluster.Node
|
||||
}
|
||||
)
|
||||
|
||||
// NewSlaveUploadTask creates a new SlaveUploadTask from raw private state
|
||||
func NewSlaveUploadTask(ctx context.Context, props *types.SlaveTaskProps, id int, state string) queue.Task {
|
||||
return &SlaveUploadTask{
|
||||
InMemoryTask: &queue.InMemoryTask{
|
||||
DBTask: &queue.DBTask{
|
||||
Task: &ent.Task{
|
||||
ID: id,
|
||||
CorrelationID: logging.CorrelationID(ctx),
|
||||
PublicState: &types.TaskPublicState{
|
||||
SlaveTaskProps: props,
|
||||
},
|
||||
PrivateState: state,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
progress: make(queue.Progresses),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *SlaveUploadTask) Do(ctx context.Context) (task.Status, error) {
|
||||
ctx = prepareSlaveTaskCtx(ctx, t.Model().PublicState.SlaveTaskProps)
|
||||
dep := dependency.FromContext(ctx)
|
||||
t.l = dep.Logger()
|
||||
|
||||
np, err := dep.NodePool(ctx)
|
||||
if err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to get node pool: %w", err)
|
||||
}
|
||||
|
||||
t.node, err = np.Get(ctx, types.NodeCapabilityNone, 0)
|
||||
if err != nil || !t.node.IsMaster() {
|
||||
return task.StatusError, fmt.Errorf("failed to get master node: %w", err)
|
||||
}
|
||||
|
||||
fm := manager.NewFileManager(dep, nil)
|
||||
|
||||
// unmarshal state
|
||||
state := &SlaveUploadTaskState{}
|
||||
if err := json.Unmarshal([]byte(t.State()), state); err != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to unmarshal state: %w", err)
|
||||
}
|
||||
|
||||
t.state = state
|
||||
if t.state.Transferred == nil {
|
||||
t.state.Transferred = make(map[int]interface{})
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
worker := make(chan int, t.state.MaxParallel)
|
||||
for i := 0; i < t.state.MaxParallel; i++ {
|
||||
worker <- i
|
||||
}
|
||||
|
||||
// Sum up total count
|
||||
totalCount := 0
|
||||
totalSize := int64(0)
|
||||
for _, res := range state.Files {
|
||||
totalSize += res.Size
|
||||
totalCount++
|
||||
}
|
||||
t.Lock()
|
||||
t.progress[ProgressTypeUploadCount] = &queue.Progress{}
|
||||
t.progress[ProgressTypeUpload] = &queue.Progress{}
|
||||
t.Unlock()
|
||||
atomic.StoreInt64(&t.progress[ProgressTypeUploadCount].Total, int64(totalCount))
|
||||
atomic.StoreInt64(&t.progress[ProgressTypeUpload].Total, totalSize)
|
||||
ae := serializer.NewAggregateError()
|
||||
transferFunc := func(workerId, fileId int, file SlaveUploadEntity) {
|
||||
defer func() {
|
||||
atomic.AddInt64(&t.progress[ProgressTypeUploadCount].Current, 1)
|
||||
worker <- workerId
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
t.l.Info("Uploading file %s to %s...", file.Src, file.Uri.String())
|
||||
|
||||
progressKey := fmt.Sprintf("%s%d", ProgressTypeUploadSinglePrefix, workerId)
|
||||
t.Lock()
|
||||
t.progress[progressKey] = &queue.Progress{Identifier: file.Uri.String(), Total: file.Size}
|
||||
t.Unlock()
|
||||
|
||||
handle, err := os.Open(file.Src)
|
||||
if err != nil {
|
||||
t.l.Warning("Failed to open file %s: %s", file.Src, err.Error())
|
||||
atomic.AddInt64(&t.progress[ProgressTypeUpload].Current, file.Size)
|
||||
ae.Add(filepath.Base(file.Src), fmt.Errorf("failed to open file: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
stat, err := handle.Stat()
|
||||
if err != nil {
|
||||
t.l.Warning("Failed to get file stat for %s: %s", file.Src, err.Error())
|
||||
handle.Close()
|
||||
atomic.AddInt64(&t.progress[ProgressTypeUpload].Current, file.Size)
|
||||
ae.Add(filepath.Base(file.Src), fmt.Errorf("failed to get file stat: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
fileData := &fs.UploadRequest{
|
||||
Props: &fs.UploadProps{
|
||||
Uri: file.Uri,
|
||||
Size: stat.Size(),
|
||||
},
|
||||
ProgressFunc: func(current, diff int64, total int64) {
|
||||
atomic.AddInt64(&t.progress[progressKey].Current, diff)
|
||||
atomic.AddInt64(&t.progress[ProgressTypeUpload].Current, diff)
|
||||
atomic.StoreInt64(&t.progress[progressKey].Total, total)
|
||||
},
|
||||
File: handle,
|
||||
Seeker: handle,
|
||||
}
|
||||
|
||||
_, err = fm.Update(ctx, fileData, fs.WithNode(t.node), fs.WithStatelessUserID(t.state.UserID), fs.WithNoEntityType())
|
||||
if err != nil {
|
||||
handle.Close()
|
||||
t.l.Warning("Failed to upload file %s: %s", file.Src, err.Error())
|
||||
atomic.AddInt64(&t.progress[ProgressTypeUpload].Current, file.Size)
|
||||
ae.Add(filepath.Base(file.Src), fmt.Errorf("failed to upload file: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
t.Lock()
|
||||
t.state.Transferred[fileId] = nil
|
||||
t.Unlock()
|
||||
handle.Close()
|
||||
}
|
||||
|
||||
// Start upload files
|
||||
for fileId, file := range t.state.Files {
|
||||
// Check if file is already transferred
|
||||
if _, ok := t.state.Transferred[fileId]; ok {
|
||||
t.l.Info("File %s already transferred, skipping...", file.Src)
|
||||
atomic.AddInt64(&t.progress[ProgressTypeUpload].Current, file.Size)
|
||||
atomic.AddInt64(&t.progress[ProgressTypeUploadCount].Current, 1)
|
||||
continue
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return task.StatusError, ctx.Err()
|
||||
case workerId := <-worker:
|
||||
wg.Add(1)
|
||||
|
||||
go transferFunc(workerId, fileId, file)
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
t.state.First5TransferErrors = ae.FormatFirstN(5)
|
||||
newStateStr, marshalErr := json.Marshal(t.state)
|
||||
if marshalErr != nil {
|
||||
return task.StatusError, fmt.Errorf("failed to marshal state: %w", marshalErr)
|
||||
}
|
||||
t.Lock()
|
||||
t.Task.PrivateState = string(newStateStr)
|
||||
t.Unlock()
|
||||
|
||||
// If all files are failed to transfer, return error
|
||||
if len(t.state.Transferred) != len(t.state.Files) {
|
||||
t.l.Warning("%d files not transferred", len(t.state.Files)-len(t.state.Transferred))
|
||||
if len(t.state.Transferred) == 0 {
|
||||
return task.StatusError, fmt.Errorf("all file failed to transfer")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return task.StatusCompleted, nil
|
||||
}
|
||||
|
||||
func (m *SlaveUploadTask) Progress(ctx context.Context) queue.Progresses {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
return m.progress
|
||||
}
|
||||
62
pkg/filemanager/workflows/worfklows.go
Normal file
62
pkg/filemanager/workflows/worfklows.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package workflows
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
)
|
||||
|
||||
const (
|
||||
TaskTempPath = "fm_workflows"
|
||||
slaveProgressRefreshInterval = 5 * time.Second
|
||||
)
|
||||
|
||||
type NodeState struct {
|
||||
NodeID int `json:"node_id"`
|
||||
|
||||
progress queue.Progresses
|
||||
}
|
||||
|
||||
// allocateNode allocates a node for the task.
|
||||
func allocateNode(ctx context.Context, dep dependency.Dep, state *NodeState, capability types.NodeCapability) (cluster.Node, error) {
|
||||
np, err := dep.NodePool(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get node pool: %w", err)
|
||||
}
|
||||
|
||||
node, err := np.Get(ctx, capability, state.NodeID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get node: %w", err)
|
||||
}
|
||||
|
||||
state.NodeID = node.ID()
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// prepareSlaveTaskCtx prepares the context for the slave task.
|
||||
func prepareSlaveTaskCtx(ctx context.Context, props *types.SlaveTaskProps) context.Context {
|
||||
ctx = context.WithValue(ctx, cluster.SlaveNodeIDCtx{}, strconv.Itoa(props.NodeID))
|
||||
ctx = context.WithValue(ctx, cluster.MasterSiteUrlCtx{}, props.MasterSiteURl)
|
||||
ctx = context.WithValue(ctx, cluster.MasterSiteVersionCtx{}, props.MasterSiteVersion)
|
||||
ctx = context.WithValue(ctx, cluster.MasterSiteIDCtx{}, props.MasterSiteID)
|
||||
return ctx
|
||||
}
|
||||
|
||||
func prepareTempFolder(ctx context.Context, dep dependency.Dep, t queue.Task) (string, error) {
|
||||
settings := dep.SettingProvider()
|
||||
tempPath := util.DataPath(path.Join(settings.TempPath(ctx), TaskTempPath, strconv.Itoa(t.ID())))
|
||||
if err := util.CreatNestedFolder(tempPath); err != nil {
|
||||
return "", fmt.Errorf("failed to create temp folder: %w", err)
|
||||
}
|
||||
|
||||
dep.Logger().Info("Temp folder created: %s", tempPath)
|
||||
return tempPath, nil
|
||||
}
|
||||
Reference in New Issue
Block a user