Init V4 community edition (#2265)
* Init V4 community edition * Init V4 community edition
This commit is contained in:
588
pkg/filemanager/driver/cos/cos.go
Normal file
588
pkg/filemanager/driver/cos/cos.go
Normal file
@@ -0,0 +1,588 @@
|
||||
package cos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk/backoff"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/google/go-querystring/query"
|
||||
"github.com/samber/lo"
|
||||
cossdk "github.com/tencentyun/cos-go-sdk-v5"
|
||||
)
|
||||
|
||||
// UploadPolicy 腾讯云COS上传策略
|
||||
type UploadPolicy struct {
|
||||
Expiration string `json:"expiration"`
|
||||
Conditions []interface{} `json:"conditions"`
|
||||
}
|
||||
|
||||
// MetaData 文件元信息
|
||||
type MetaData struct {
|
||||
Size uint64
|
||||
CallbackKey string
|
||||
CallbackURL string
|
||||
}
|
||||
|
||||
type urlOption struct {
|
||||
Speed int64 `url:"x-cos-traffic-limit,omitempty"`
|
||||
ContentDescription string `url:"response-content-disposition,omitempty"`
|
||||
Exif *string `url:"exif,omitempty"`
|
||||
CiProcess string `url:"ci-process,omitempty"`
|
||||
}
|
||||
|
||||
type (
|
||||
CosParts struct {
|
||||
ETag string
|
||||
PartNumber int
|
||||
}
|
||||
)
|
||||
|
||||
// Driver 腾讯云COS适配器模板
|
||||
type Driver struct {
|
||||
policy *ent.StoragePolicy
|
||||
client *cossdk.Client
|
||||
settings setting.Provider
|
||||
config conf.ConfigProvider
|
||||
httpClient request.Client
|
||||
l logging.Logger
|
||||
mime mime.MimeDetector
|
||||
|
||||
chunkSize int64
|
||||
}
|
||||
|
||||
const (
|
||||
// MultiPartUploadThreshold 服务端使用分片上传的阈值
|
||||
MultiPartUploadThreshold int64 = 5 * (1 << 30) // 5GB
|
||||
|
||||
maxDeleteBatch = 1000
|
||||
chunkRetrySleep = time.Duration(5) * time.Second
|
||||
overwriteOptionHeader = "x-cos-forbid-overwrite"
|
||||
partNumberParam = "partNumber"
|
||||
uploadIdParam = "uploadId"
|
||||
contentTypeHeader = "Content-Type"
|
||||
contentLengthHeader = "Content-Length"
|
||||
)
|
||||
|
||||
var (
|
||||
features = &boolset.BooleanSet{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
cossdk.SetNeedSignHeaders("host", false)
|
||||
cossdk.SetNeedSignHeaders("origin", false)
|
||||
boolset.Sets(map[driver.HandlerCapability]bool{
|
||||
driver.HandlerCapabilityUploadSentinelRequired: true,
|
||||
}, features)
|
||||
}
|
||||
|
||||
func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider,
|
||||
config conf.ConfigProvider, l logging.Logger, mime mime.MimeDetector) (*Driver, error) {
|
||||
chunkSize := policy.Settings.ChunkSize
|
||||
if policy.Settings.ChunkSize == 0 {
|
||||
chunkSize = 25 << 20 // 25 MB
|
||||
}
|
||||
|
||||
driver := &Driver{
|
||||
policy: policy,
|
||||
settings: settings,
|
||||
chunkSize: chunkSize,
|
||||
config: config,
|
||||
l: l,
|
||||
mime: mime,
|
||||
httpClient: request.NewClient(config, request.WithLogger(l)),
|
||||
}
|
||||
|
||||
u, err := url.Parse(policy.Server)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse COS bucket server url: %w", err)
|
||||
}
|
||||
driver.client = cossdk.NewClient(&cossdk.BaseURL{BucketURL: u}, &http.Client{
|
||||
Transport: &cossdk.AuthorizationTransport{
|
||||
SecretID: policy.AccessKey,
|
||||
SecretKey: policy.SecretKey,
|
||||
},
|
||||
})
|
||||
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
//
|
||||
//// List 列出COS文件
|
||||
//func (handler Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) {
|
||||
// // 初始化列目录参数
|
||||
// opt := &cossdk.BucketGetOptions{
|
||||
// Prefix: strings.TrimPrefix(base, "/"),
|
||||
// EncodingType: "",
|
||||
// MaxKeys: 1000,
|
||||
// }
|
||||
// // 是否为递归列出
|
||||
// if !recursive {
|
||||
// opt.Delimiter = "/"
|
||||
// }
|
||||
// // 手动补齐结尾的slash
|
||||
// if opt.Prefix != "" {
|
||||
// opt.Prefix += "/"
|
||||
// }
|
||||
//
|
||||
// var (
|
||||
// marker string
|
||||
// objects []cossdk.Object
|
||||
// commons []string
|
||||
// )
|
||||
//
|
||||
// for {
|
||||
// res, _, err := handler.client.Bucket.Get(ctx, opt)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// objects = append(objects, res.Contents...)
|
||||
// commons = append(commons, res.CommonPrefixes...)
|
||||
// // 如果本次未列取完,则继续使用marker获取结果
|
||||
// marker = res.NextMarker
|
||||
// // marker 为空时结果列取完毕,跳出
|
||||
// if marker == "" {
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // 处理列取结果
|
||||
// res := make([]response.Object, 0, len(objects)+len(commons))
|
||||
// // 处理目录
|
||||
// for _, object := range commons {
|
||||
// rel, err := filepath.Rel(opt.Prefix, object)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// res = append(res, response.Object{
|
||||
// Name: path.Base(object),
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Size: 0,
|
||||
// IsDir: true,
|
||||
// LastModify: time.Now(),
|
||||
// })
|
||||
// }
|
||||
// // 处理文件
|
||||
// for _, object := range objects {
|
||||
// rel, err := filepath.Rel(opt.Prefix, object.Key)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// res = append(res, response.Object{
|
||||
// Name: path.Base(object.Key),
|
||||
// Source: object.Key,
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Size: uint64(object.Size),
|
||||
// IsDir: false,
|
||||
// LastModify: time.Now(),
|
||||
// })
|
||||
// }
|
||||
//
|
||||
// return res, nil
|
||||
//
|
||||
//}
|
||||
|
||||
// CORS 创建跨域策略
|
||||
func (handler Driver) CORS() error {
|
||||
_, err := handler.client.Bucket.PutCORS(context.Background(), &cossdk.BucketPutCORSOptions{
|
||||
Rules: []cossdk.BucketCORSRule{{
|
||||
AllowedMethods: []string{
|
||||
"GET",
|
||||
"POST",
|
||||
"PUT",
|
||||
"DELETE",
|
||||
"HEAD",
|
||||
},
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedHeaders: []string{"*"},
|
||||
MaxAgeSeconds: 3600,
|
||||
ExposeHeaders: []string{"ETag"},
|
||||
}},
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Get 获取文件
|
||||
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Put 将文件流保存到指定目录
|
||||
func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
||||
defer file.Close()
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
// 是否允许覆盖
|
||||
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
|
||||
opt := &cossdk.ObjectPutHeaderOptions{
|
||||
ContentType: mimeType,
|
||||
XOptionHeader: &http.Header{
|
||||
overwriteOptionHeader: []string{fmt.Sprintf("%t", overwrite)},
|
||||
},
|
||||
}
|
||||
|
||||
// 小文件直接上传
|
||||
if file.Props.Size < MultiPartUploadThreshold {
|
||||
_, err := handler.client.Object.Put(ctx, file.Props.SavePath, file, &cossdk.ObjectPutOptions{
|
||||
ObjectPutHeaderOptions: opt,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
imur, _, err := handler.client.Object.InitiateMultipartUpload(ctx, file.Props.SavePath, &cossdk.InitiateMultipartUploadOptions{
|
||||
ObjectPutHeaderOptions: opt,
|
||||
})
|
||||
|
||||
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{
|
||||
Max: handler.settings.ChunkRetryLimit(ctx),
|
||||
Sleep: chunkRetrySleep,
|
||||
}, handler.settings.UseChunkBuffer(ctx), handler.l, handler.settings.TempPath(ctx))
|
||||
|
||||
parts := make([]CosParts, 0, chunks.Num())
|
||||
uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error {
|
||||
res, err := handler.client.Object.UploadPart(ctx, file.Props.SavePath, imur.UploadID, current.Index()+1, content, &cossdk.ObjectUploadPartOptions{
|
||||
ContentLength: current.Length(),
|
||||
})
|
||||
if err == nil {
|
||||
parts = append(parts, CosParts{
|
||||
ETag: res.Header.Get("ETag"),
|
||||
PartNumber: current.Index() + 1,
|
||||
})
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for chunks.Next() {
|
||||
if err := chunks.Process(uploadFunc); err != nil {
|
||||
handler.cancelUpload(file.Props.SavePath, imur.UploadID)
|
||||
return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err)
|
||||
}
|
||||
}
|
||||
|
||||
_, _, err = handler.client.Object.CompleteMultipartUpload(ctx, file.Props.SavePath, imur.UploadID, &cossdk.CompleteMultipartUploadOptions{
|
||||
Parts: lo.Map(parts, func(v CosParts, i int) cossdk.Object {
|
||||
return cossdk.Object{
|
||||
ETag: v.ETag,
|
||||
PartNumber: v.PartNumber,
|
||||
}
|
||||
}),
|
||||
XOptionHeader: &http.Header{
|
||||
overwriteOptionHeader: []string{fmt.Sprintf("%t", overwrite)},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
handler.cancelUpload(file.Props.SavePath, imur.UploadID)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete 删除一个或多个文件,
|
||||
// 返回未删除的文件,及遇到的最后一个错误
|
||||
func (handler Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
|
||||
groups := lo.Chunk(files, maxDeleteBatch)
|
||||
failed := make([]string, 0)
|
||||
var lastError error
|
||||
for index, group := range groups {
|
||||
handler.l.Debug("Process delete group #%d: %v", index, group)
|
||||
res, _, err := handler.client.Object.DeleteMulti(ctx,
|
||||
&cossdk.ObjectDeleteMultiOptions{
|
||||
Objects: lo.Map(group, func(item string, index int) cossdk.Object {
|
||||
return cossdk.Object{Key: item}
|
||||
}),
|
||||
Quiet: true,
|
||||
})
|
||||
if err != nil {
|
||||
lastError = err
|
||||
failed = append(failed, group...)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, v := range res.Errors {
|
||||
handler.l.Debug("Failed to delete file: %s, Code:%s, Message:%s", v.Key, v.Code, v.Key)
|
||||
failed = append(failed, v.Key)
|
||||
}
|
||||
}
|
||||
|
||||
if len(failed) > 0 && lastError == nil {
|
||||
lastError = fmt.Errorf("failed to delete files: %v", failed)
|
||||
}
|
||||
|
||||
return failed, lastError
|
||||
}
|
||||
|
||||
// Thumb 获取文件缩略图
|
||||
func (handler Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
w, h := handler.settings.ThumbSize(ctx)
|
||||
thumbParam := fmt.Sprintf("imageMogr2/thumbnail/%dx%d", w, h)
|
||||
|
||||
source, err := handler.signSourceURL(
|
||||
ctx,
|
||||
e.Source(),
|
||||
expire,
|
||||
&urlOption{},
|
||||
)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
thumbURL, _ := url.Parse(source)
|
||||
thumbQuery := thumbURL.Query()
|
||||
thumbQuery.Add(thumbParam, "")
|
||||
thumbURL.RawQuery = thumbQuery.Encode()
|
||||
|
||||
return thumbURL.String(), nil
|
||||
}
|
||||
|
||||
// Source 获取外链URL
|
||||
func (handler Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
|
||||
// 添加各项设置
|
||||
options := urlOption{}
|
||||
if args.Speed > 0 {
|
||||
if args.Speed < 819200 {
|
||||
args.Speed = 819200
|
||||
}
|
||||
if args.Speed > 838860800 {
|
||||
args.Speed = 838860800
|
||||
}
|
||||
options.Speed = args.Speed
|
||||
}
|
||||
if args.IsDownload {
|
||||
encodedFilename := url.PathEscape(args.DisplayName)
|
||||
options.ContentDescription = fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`,
|
||||
encodedFilename, encodedFilename)
|
||||
}
|
||||
|
||||
return handler.signSourceURL(ctx, e.Source(), args.Expire, &options)
|
||||
}
|
||||
|
||||
func (handler Driver) signSourceURL(ctx context.Context, path string, expire *time.Time, options *urlOption) (string, error) {
|
||||
// 公有空间不需要签名
|
||||
if !handler.policy.IsPrivate || (handler.policy.Settings.SourceAuth && handler.policy.Settings.CustomProxy) {
|
||||
file, err := url.Parse(handler.policy.Server)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
file.Path = path
|
||||
|
||||
// 非签名URL不支持设置响应header
|
||||
options.ContentDescription = ""
|
||||
|
||||
optionQuery, err := query.Values(*options)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
file.RawQuery = optionQuery.Encode()
|
||||
|
||||
return file.String(), nil
|
||||
}
|
||||
|
||||
ttl := time.Duration(0)
|
||||
if expire != nil {
|
||||
ttl = time.Until(*expire)
|
||||
} else {
|
||||
// 20 years for permanent link
|
||||
ttl = time.Duration(24) * time.Hour * 365 * 20
|
||||
}
|
||||
|
||||
presignedURL, err := handler.client.Object.GetPresignedURL(ctx, http.MethodGet, path,
|
||||
handler.policy.AccessKey, handler.policy.SecretKey, ttl, options)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return presignedURL.String(), nil
|
||||
}
|
||||
|
||||
// Token 获取上传策略和认证Token
|
||||
func (handler Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
|
||||
// 生成回调地址
|
||||
siteURL := handler.settings.SiteURL(setting.UseFirstSiteUrl(ctx))
|
||||
// 在从机端创建上传会话
|
||||
uploadSession.ChunkSize = handler.chunkSize
|
||||
uploadSession.Callback = routes.MasterSlaveCallbackUrl(siteURL, types.PolicyTypeCos, uploadSession.Props.UploadSessionID, uploadSession.CallbackSecret).String()
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
// 初始化分片上传
|
||||
opt := &cossdk.ObjectPutHeaderOptions{
|
||||
ContentType: mimeType,
|
||||
XOptionHeader: &http.Header{
|
||||
overwriteOptionHeader: []string{"true"},
|
||||
},
|
||||
}
|
||||
|
||||
imur, _, err := handler.client.Object.InitiateMultipartUpload(ctx, file.Props.SavePath, &cossdk.InitiateMultipartUploadOptions{
|
||||
ObjectPutHeaderOptions: opt,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize multipart upload: %w", err)
|
||||
}
|
||||
uploadSession.UploadID = imur.UploadID
|
||||
|
||||
// 为每个分片签名上传 URL
|
||||
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{}, false, handler.l, "")
|
||||
urls := make([]string, chunks.Num())
|
||||
ttl := time.Until(uploadSession.Props.ExpireAt)
|
||||
for chunks.Next() {
|
||||
err := chunks.Process(func(c *chunk.ChunkGroup, chunk io.Reader) error {
|
||||
signedURL, err := handler.client.Object.GetPresignedURL(
|
||||
ctx,
|
||||
http.MethodPut,
|
||||
file.Props.SavePath,
|
||||
handler.policy.AccessKey,
|
||||
handler.policy.SecretKey,
|
||||
ttl,
|
||||
&cossdk.PresignedURLOptions{
|
||||
Query: &url.Values{
|
||||
partNumberParam: []string{fmt.Sprintf("%d", c.Index()+1)},
|
||||
uploadIdParam: []string{imur.UploadID},
|
||||
},
|
||||
Header: &http.Header{
|
||||
contentTypeHeader: []string{"application/octet-stream"},
|
||||
contentLengthHeader: []string{fmt.Sprintf("%d", c.Length())},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
urls[c.Index()] = signedURL.String()
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// 签名完成分片上传的URL
|
||||
completeURL, err := handler.client.Object.GetPresignedURL(
|
||||
ctx,
|
||||
http.MethodPost,
|
||||
file.Props.SavePath,
|
||||
handler.policy.AccessKey,
|
||||
handler.policy.SecretKey,
|
||||
time.Until(uploadSession.Props.ExpireAt),
|
||||
&cossdk.PresignedURLOptions{
|
||||
Query: &url.Values{
|
||||
uploadIdParam: []string{imur.UploadID},
|
||||
},
|
||||
Header: &http.Header{
|
||||
overwriteOptionHeader: []string{"true"},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &fs.UploadCredential{
|
||||
UploadID: imur.UploadID,
|
||||
UploadURLs: urls,
|
||||
CompleteURL: completeURL.String(),
|
||||
SessionID: uploadSession.Props.UploadSessionID,
|
||||
ChunkSize: handler.chunkSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// 取消上传凭证
|
||||
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
|
||||
_, err := handler.client.Object.AbortMultipartUpload(ctx, uploadSession.Props.SavePath, uploadSession.UploadID)
|
||||
return err
|
||||
}
|
||||
|
||||
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
|
||||
if session.SentinelTaskID == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make sure uploaded file size is correct
|
||||
res, err := handler.client.Object.Head(ctx, session.Props.SavePath, &cossdk.ObjectHeadOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get uploaded file size: %w", err)
|
||||
}
|
||||
|
||||
if res.ContentLength != session.Props.Size {
|
||||
return serializer.NewError(
|
||||
serializer.CodeMetaMismatch,
|
||||
fmt.Sprintf("File size not match, expected: %d, actual: %d", session.Props.Size, res.ContentLength),
|
||||
nil,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *Driver) Capabilities() *driver.Capabilities {
|
||||
mediaMetaExts := handler.policy.Settings.MediaMetaExts
|
||||
if !handler.policy.Settings.NativeMediaProcessing {
|
||||
mediaMetaExts = nil
|
||||
}
|
||||
return &driver.Capabilities{
|
||||
StaticFeatures: features,
|
||||
MediaMetaSupportedExts: mediaMetaExts,
|
||||
MediaMetaProxy: handler.policy.Settings.MediaMetaGeneratorProxy,
|
||||
ThumbSupportedExts: handler.policy.Settings.ThumbExts,
|
||||
ThumbProxy: handler.policy.Settings.ThumbGeneratorProxy,
|
||||
ThumbMaxSize: handler.policy.Settings.ThumbMaxSize,
|
||||
ThumbSupportAllExts: handler.policy.Settings.ThumbSupportAllExts,
|
||||
}
|
||||
}
|
||||
|
||||
// Meta 获取文件信息
|
||||
func (handler Driver) Meta(ctx context.Context, path string) (*MetaData, error) {
|
||||
res, err := handler.client.Object.Head(ctx, path, &cossdk.ObjectHeadOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &MetaData{
|
||||
Size: uint64(res.ContentLength),
|
||||
CallbackKey: res.Header.Get("x-cos-meta-key"),
|
||||
CallbackURL: res.Header.Get("x-cos-meta-callback"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
if util.ContainsString(supportedImageExt, ext) {
|
||||
return handler.extractImageMeta(ctx, path)
|
||||
}
|
||||
|
||||
return handler.extractStreamMeta(ctx, path)
|
||||
}
|
||||
|
||||
func (handler *Driver) LocalPath(ctx context.Context, path string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (handler *Driver) cancelUpload(path, uploadId string) {
|
||||
if _, err := handler.client.Object.AbortMultipartUpload(context.Background(), path, uploadId); err != nil {
|
||||
handler.l.Warning("failed to abort multipart upload: %s", err)
|
||||
}
|
||||
}
|
||||
294
pkg/filemanager/driver/cos/media.go
Normal file
294
pkg/filemanager/driver/cos/media.go
Normal file
@@ -0,0 +1,294 @@
|
||||
package cos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/samber/lo"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
mediaInfoTTL = time.Duration(10) * time.Minute
|
||||
videoInfo = "videoinfo"
|
||||
)
|
||||
|
||||
var (
|
||||
supportedImageExt = []string{"jpg", "jpeg", "png", "gif", "bmp", "webp", "tiff", "heic", "heif"}
|
||||
)
|
||||
|
||||
type (
|
||||
ImageProp struct {
|
||||
Value string `json:"val"`
|
||||
}
|
||||
ImageInfo map[string]ImageProp
|
||||
Error struct {
|
||||
XMLName xml.Name `xml:"Error"`
|
||||
Code string `xml:"Code"`
|
||||
Message string `xml:"Message"`
|
||||
RequestId string `xml:"RequestId"`
|
||||
}
|
||||
Video struct {
|
||||
Index int `xml:"Index"`
|
||||
CodecName string `xml:"CodecName"`
|
||||
CodecLongName string `xml:"CodecLongName"`
|
||||
CodecTimeBase string `xml:"CodecTimeBase"`
|
||||
CodecTagString string `xml:"CodecTagString"`
|
||||
CodecTag string `xml:"CodecTag"`
|
||||
ColorPrimaries string `xml:"ColorPrimaries"`
|
||||
ColorRange string `xml:"ColorRange"`
|
||||
ColorTransfer string `xml:"ColorTransfer"`
|
||||
Profile string `xml:"Profile"`
|
||||
Width int `xml:"Width"`
|
||||
Height int `xml:"Height"`
|
||||
HasBFrame string `xml:"HasBFrame"`
|
||||
RefFrames string `xml:"RefFrames"`
|
||||
Sar string `xml:"Sar"`
|
||||
Dar string `xml:"Dar"`
|
||||
PixFormat string `xml:"PixFormat"`
|
||||
FieldOrder string `xml:"FieldOrder"`
|
||||
Level string `xml:"Level"`
|
||||
Fps string `xml:"Fps"`
|
||||
AvgFps string `xml:"AvgFps"`
|
||||
Timebase string `xml:"Timebase"`
|
||||
StartTime string `xml:"StartTime"`
|
||||
Duration string `xml:"Duration"`
|
||||
Bitrate string `xml:"Bitrate"`
|
||||
NumFrames string `xml:"NumFrames"`
|
||||
Language string `xml:"Language"`
|
||||
}
|
||||
Audio struct {
|
||||
Index int `xml:"Index"`
|
||||
CodecName string `xml:"CodecName"`
|
||||
CodecLongName string `xml:"CodecLongName"`
|
||||
CodecTimeBase string `xml:"CodecTimeBase"`
|
||||
CodecTagString string `xml:"CodecTagString"`
|
||||
CodecTag string `xml:"CodecTag"`
|
||||
SampleFmt string `xml:"SampleFmt"`
|
||||
SampleRate string `xml:"SampleRate"`
|
||||
Channel string `xml:"Channel"`
|
||||
ChannelLayout string `xml:"ChannelLayout"`
|
||||
Timebase string `xml:"Timebase"`
|
||||
StartTime string `xml:"StartTime"`
|
||||
Duration string `xml:"Duration"`
|
||||
Bitrate string `xml:"Bitrate"`
|
||||
Language string `xml:"Language"`
|
||||
}
|
||||
Subtitle struct {
|
||||
Index string `xml:"Index"`
|
||||
Language string `xml:"Language"`
|
||||
}
|
||||
Response struct {
|
||||
XMLName xml.Name `xml:"Response"`
|
||||
MediaInfo struct {
|
||||
Stream struct {
|
||||
Video []Video `xml:"Video"`
|
||||
Audio []Audio `xml:"Audio"`
|
||||
Subtitle []Subtitle `xml:"Subtitle"`
|
||||
} `xml:"Stream"`
|
||||
Format struct {
|
||||
NumStream string `xml:"NumStream"`
|
||||
NumProgram string `xml:"NumProgram"`
|
||||
FormatName string `xml:"FormatName"`
|
||||
FormatLongName string `xml:"FormatLongName"`
|
||||
StartTime string `xml:"StartTime"`
|
||||
Duration string `xml:"Duration"`
|
||||
Bitrate string `xml:"Bitrate"`
|
||||
Size string `xml:"Size"`
|
||||
} `xml:"Format"`
|
||||
} `xml:"MediaInfo"`
|
||||
}
|
||||
)
|
||||
|
||||
func (handler *Driver) extractStreamMeta(ctx context.Context, path string) ([]driver.MediaMeta, error) {
|
||||
resp, err := handler.extractMediaInfo(ctx, path, &urlOption{CiProcess: videoInfo})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var info Response
|
||||
if err := xml.Unmarshal([]byte(resp), &info); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal media info: %w", err)
|
||||
}
|
||||
|
||||
streams := lo.Map(info.MediaInfo.Stream.Video, func(stream Video, index int) mediameta.Stream {
|
||||
return mediameta.Stream{
|
||||
Index: stream.Index,
|
||||
CodecName: stream.CodecName,
|
||||
CodecLongName: stream.CodecLongName,
|
||||
CodecType: "video",
|
||||
Width: stream.Width,
|
||||
Height: stream.Height,
|
||||
Bitrate: stream.Bitrate,
|
||||
}
|
||||
})
|
||||
streams = append(streams, lo.Map(info.MediaInfo.Stream.Audio, func(stream Audio, index int) mediameta.Stream {
|
||||
return mediameta.Stream{
|
||||
Index: stream.Index,
|
||||
CodecName: stream.CodecName,
|
||||
CodecLongName: stream.CodecLongName,
|
||||
CodecType: "audio",
|
||||
Bitrate: stream.Bitrate,
|
||||
}
|
||||
})...)
|
||||
|
||||
metas := make([]driver.MediaMeta, 0)
|
||||
metas = append(metas, mediameta.ProbeMetaTransform(&mediameta.FFProbeMeta{
|
||||
Format: &mediameta.Format{
|
||||
FormatName: info.MediaInfo.Format.FormatName,
|
||||
FormatLongName: info.MediaInfo.Format.FormatLongName,
|
||||
Duration: info.MediaInfo.Format.Duration,
|
||||
Bitrate: info.MediaInfo.Format.Bitrate,
|
||||
},
|
||||
Streams: streams,
|
||||
})...)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (handler *Driver) extractImageMeta(ctx context.Context, path string) ([]driver.MediaMeta, error) {
|
||||
exif := ""
|
||||
resp, err := handler.extractMediaInfo(ctx, path, &urlOption{
|
||||
Exif: &exif,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var imageInfo ImageInfo
|
||||
if err := json.Unmarshal([]byte(resp), &imageInfo); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal media info: %w", err)
|
||||
}
|
||||
|
||||
metas := make([]driver.MediaMeta, 0)
|
||||
exifMap := lo.MapEntries(imageInfo, func(key string, value ImageProp) (string, string) {
|
||||
return key, value.Value
|
||||
})
|
||||
metas = append(metas, mediameta.ExtractExifMap(exifMap, time.Time{})...)
|
||||
metas = append(metas, parseGpsInfo(imageInfo)...)
|
||||
for i := 0; i < len(metas); i++ {
|
||||
metas[i].Type = driver.MetaTypeExif
|
||||
}
|
||||
|
||||
return metas, nil
|
||||
}
|
||||
|
||||
// extractMediaInfo Sends API calls to COS service to extract media info.
|
||||
func (handler *Driver) extractMediaInfo(ctx context.Context, path string, opt *urlOption) (string, error) {
|
||||
mediaInfoExpire := time.Now().Add(mediaInfoTTL)
|
||||
thumbURL, err := handler.signSourceURL(
|
||||
ctx,
|
||||
path,
|
||||
&mediaInfoExpire,
|
||||
opt,
|
||||
)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to sign media info url: %w", err)
|
||||
}
|
||||
|
||||
resp, err := handler.httpClient.
|
||||
Request(http.MethodGet, thumbURL, nil, request.WithContext(ctx)).
|
||||
CheckHTTPResponse(http.StatusOK).
|
||||
GetResponseIgnoreErr()
|
||||
if err != nil {
|
||||
return "", handleCosError(resp, err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func parseGpsInfo(imageInfo ImageInfo) []driver.MediaMeta {
|
||||
latitude := imageInfo["GPSLatitude"] // 31deg 16.26808'
|
||||
longitude := imageInfo["GPSLongitude"] // 120deg 42.91039'
|
||||
latRef := imageInfo["GPSLatitudeRef"] // North
|
||||
lonRef := imageInfo["GPSLongitudeRef"] // East
|
||||
|
||||
// Make sure all value exist in map
|
||||
if latitude.Value == "" || longitude.Value == "" || latRef.Value == "" || lonRef.Value == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
lat := parseRawGPS(latitude.Value, latRef.Value)
|
||||
lon := parseRawGPS(longitude.Value, lonRef.Value)
|
||||
if !math.IsNaN(lat) && !math.IsNaN(lon) {
|
||||
lat, lng := mediameta.NormalizeGPS(lat, lon)
|
||||
return []driver.MediaMeta{{
|
||||
Key: mediameta.GpsLat,
|
||||
Value: fmt.Sprintf("%f", lat),
|
||||
}, {
|
||||
Key: mediameta.GpsLng,
|
||||
Value: fmt.Sprintf("%f", lng),
|
||||
}}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseRawGPS(gpsStr string, ref string) float64 {
|
||||
elem := strings.Split(gpsStr, " ")
|
||||
if len(elem) < 1 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var (
|
||||
deg float64
|
||||
minutes float64
|
||||
seconds float64
|
||||
)
|
||||
|
||||
deg = getGpsElemValue(elem[0])
|
||||
if len(elem) >= 2 {
|
||||
minutes = getGpsElemValue(elem[1])
|
||||
}
|
||||
if len(elem) >= 3 {
|
||||
seconds = getGpsElemValue(elem[2])
|
||||
}
|
||||
|
||||
decimal := deg + minutes/60.0 + seconds/3600.0
|
||||
|
||||
if ref == "S" || ref == "W" {
|
||||
return -decimal
|
||||
}
|
||||
|
||||
return decimal
|
||||
}
|
||||
|
||||
func getGpsElemValue(elm string) float64 {
|
||||
elements := strings.Split(elm, "/")
|
||||
if len(elements) != 2 {
|
||||
return 0
|
||||
}
|
||||
|
||||
numerator, err := strconv.ParseFloat(elements[0], 64)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
denominator, err := strconv.ParseFloat(elements[1], 64)
|
||||
if err != nil || denominator == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
return numerator / denominator
|
||||
}
|
||||
|
||||
func handleCosError(resp string, originErr error) error {
|
||||
if resp == "" {
|
||||
return originErr
|
||||
}
|
||||
|
||||
var err Error
|
||||
if err := xml.Unmarshal([]byte(resp), &err); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal cos error: %w", err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("cos error: %s", err.Message)
|
||||
}
|
||||
118
pkg/filemanager/driver/cos/scf.go
Normal file
118
pkg/filemanager/driver/cos/scf.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package cos
|
||||
|
||||
// TODO: revisit para error
|
||||
const scfFunc = `# -*- coding: utf8 -*-
|
||||
# SCF配置COS触发,向 Cloudreve 发送回调
|
||||
from qcloud_cos_v5 import CosConfig
|
||||
from qcloud_cos_v5 import CosS3Client
|
||||
from qcloud_cos_v5 import CosServiceError
|
||||
from qcloud_cos_v5 import CosClientError
|
||||
import sys
|
||||
import logging
|
||||
import requests
|
||||
|
||||
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
|
||||
logging = logging.getLogger()
|
||||
|
||||
|
||||
def main_handler(event, context):
|
||||
logging.info("start main handler")
|
||||
for record in event['Records']:
|
||||
try:
|
||||
if "x-cos-meta-callback" not in record['cos']['cosObject']['meta']:
|
||||
logging.info("Cannot find callback URL, skiped.")
|
||||
return 'Success'
|
||||
callback = record['cos']['cosObject']['meta']['x-cos-meta-callback']
|
||||
key = record['cos']['cosObject']['key']
|
||||
logging.info("Callback URL is " + callback)
|
||||
|
||||
r = requests.get(callback)
|
||||
print(r.text)
|
||||
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print('Error getting object {} callback url. '.format(key))
|
||||
raise e
|
||||
return "Fail"
|
||||
|
||||
return "Success"
|
||||
`
|
||||
|
||||
//
|
||||
//// CreateSCF 创建回调云函数
|
||||
//func CreateSCF(policy *model.Policy, region string) error {
|
||||
// // 初始化客户端
|
||||
// credential := common.NewCredential(
|
||||
// policy.AccessKey,
|
||||
// policy.SecretKey,
|
||||
// )
|
||||
// cpf := profile.NewClientProfile()
|
||||
// client, err := scf.NewClient(credential, region, cpf)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// // 创建回调代码数据
|
||||
// buff := &bytes.Buffer{}
|
||||
// bs64 := base64.NewEncoder(base64.StdEncoding, buff)
|
||||
// zipWriter := zip.NewWriter(bs64)
|
||||
// header := zip.FileHeader{
|
||||
// Name: "callback.py",
|
||||
// Method: zip.Deflate,
|
||||
// }
|
||||
// writer, err := zipWriter.CreateHeader(&header)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// _, err = io.Copy(writer, strings.NewReader(scfFunc))
|
||||
// zipWriter.Close()
|
||||
//
|
||||
// // 创建云函数
|
||||
// req := scf.NewCreateFunctionRequest()
|
||||
// funcName := "cloudreve_" + hashid.HashID(policy.ID, hashid.PolicyID) + strconv.FormatInt(time.Now().Unix(), 10)
|
||||
// zipFileBytes, _ := ioutil.ReadAll(buff)
|
||||
// zipFileStr := string(zipFileBytes)
|
||||
// codeSource := "ZipFile"
|
||||
// handler := "callback.main_handler"
|
||||
// desc := "Cloudreve 用回调函数"
|
||||
// timeout := int64(60)
|
||||
// runtime := "Python3.6"
|
||||
// req.FunctionName = &funcName
|
||||
// req.Code = &scf.Code{
|
||||
// ZipFile: &zipFileStr,
|
||||
// }
|
||||
// req.Handler = &handler
|
||||
// req.Description = &desc
|
||||
// req.Timeout = &timeout
|
||||
// req.Runtime = &runtime
|
||||
// req.CodeSource = &codeSource
|
||||
//
|
||||
// _, err = client.CreateFunction(req)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// time.Sleep(time.Duration(5) * time.Second)
|
||||
//
|
||||
// // 创建触发器
|
||||
// server, _ := url.Parse(policy.Server)
|
||||
// triggerType := "cos"
|
||||
// triggerDesc := `{"event":"cos:ObjectCreated:Post","filter":{"Prefix":"","Suffix":""}}`
|
||||
// enable := "OPEN"
|
||||
//
|
||||
// trigger := scf.NewCreateTriggerRequest()
|
||||
// trigger.FunctionName = &funcName
|
||||
// trigger.TriggerName = &server.Host
|
||||
// trigger.Type = &triggerType
|
||||
// trigger.TriggerDesc = &triggerDesc
|
||||
// trigger.Enable = &enable
|
||||
//
|
||||
// _, err = client.CreateTrigger(trigger)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// return nil
|
||||
//}
|
||||
122
pkg/filemanager/driver/handler.go
Normal file
122
pkg/filemanager/driver/handler.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
)
|
||||
|
||||
const (
|
||||
// HandlerCapabilityProxyRequired this handler requires Cloudreve's proxy to get file content
|
||||
HandlerCapabilityProxyRequired HandlerCapability = iota
|
||||
// HandlerCapabilityInboundGet this handler supports directly get file's RSCloser, usually
|
||||
// indicates that the file is stored in the same machine as Cloudreve
|
||||
HandlerCapabilityInboundGet
|
||||
// HandlerCapabilityUploadSentinelRequired this handler does not support compliance callback mechanism,
|
||||
// thus it requires Cloudreve's sentinel to guarantee the upload is under control. Cloudreve will try
|
||||
// to delete the placeholder file and cancel the upload session if upload callback is not made after upload
|
||||
// session expire.
|
||||
HandlerCapabilityUploadSentinelRequired
|
||||
)
|
||||
|
||||
type (
|
||||
MetaType string
|
||||
MediaMeta struct {
|
||||
Key string `json:"key"`
|
||||
Value string `json:"value"`
|
||||
Type MetaType `json:"type"`
|
||||
}
|
||||
|
||||
HandlerCapability int
|
||||
|
||||
GetSourceArgs struct {
|
||||
Expire *time.Time
|
||||
IsDownload bool
|
||||
Speed int64
|
||||
DisplayName string
|
||||
}
|
||||
|
||||
// Handler 存储策略适配器
|
||||
Handler interface {
|
||||
// 上传文件, dst为文件存储路径,size 为文件大小。上下文关闭
|
||||
// 时,应取消上传并清理临时文件
|
||||
Put(ctx context.Context, file *fs.UploadRequest) error
|
||||
|
||||
// 删除一个或多个给定路径的文件,返回删除失败的文件路径列表及错误
|
||||
Delete(ctx context.Context, files ...string) ([]string, error)
|
||||
|
||||
// Open physical files. Only implemented if HandlerCapabilityInboundGet capability is set.
|
||||
// Returns file path and an os.File object.
|
||||
Open(ctx context.Context, path string) (*os.File, error)
|
||||
|
||||
// LocalPath returns the local path of a file.
|
||||
// Only implemented if HandlerCapabilityInboundGet capability is set.
|
||||
LocalPath(ctx context.Context, path string) string
|
||||
|
||||
// Thumb returns the URL for a thumbnail of given entity.
|
||||
Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error)
|
||||
|
||||
// 获取外链/下载地址,
|
||||
// url - 站点本身地址,
|
||||
// isDownload - 是否直接下载
|
||||
Source(ctx context.Context, e fs.Entity, args *GetSourceArgs) (string, error)
|
||||
|
||||
// Token 获取有效期为ttl的上传凭证和签名
|
||||
Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error)
|
||||
|
||||
// CancelToken 取消已经创建的有状态上传凭证
|
||||
CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error
|
||||
|
||||
// CompleteUpload completes a previously created upload session.
|
||||
CompleteUpload(ctx context.Context, session *fs.UploadSession) error
|
||||
|
||||
// List 递归列取远程端path路径下文件、目录,不包含path本身,
|
||||
// 返回的对象路径以path作为起始根目录.
|
||||
// recursive - 是否递归列出
|
||||
// List(ctx context.Context, path string, recursive bool) ([]response.Object, error)
|
||||
|
||||
// Capabilities returns the capabilities of this handler
|
||||
Capabilities() *Capabilities
|
||||
|
||||
// MediaMeta extracts media metadata from the given file.
|
||||
MediaMeta(ctx context.Context, path, ext string) ([]MediaMeta, error)
|
||||
}
|
||||
|
||||
Capabilities struct {
|
||||
StaticFeatures *boolset.BooleanSet
|
||||
// MaxSourceExpire indicates the maximum allowed expiration duration of a source URL
|
||||
MaxSourceExpire time.Duration
|
||||
// MinSourceExpire indicates the minimum allowed expiration duration of a source URL
|
||||
MinSourceExpire time.Duration
|
||||
// MediaMetaSupportedExts indicates the extensions of files that support media metadata. Empty list
|
||||
// indicates that no file supports extracting media metadata.
|
||||
MediaMetaSupportedExts []string
|
||||
// GenerateMediaMeta indicates whether to generate media metadata using local generators.
|
||||
MediaMetaProxy bool
|
||||
// ThumbSupportedExts indicates the extensions of files that support thumbnail generation. Empty list
|
||||
// indicates that no file supports thumbnail generation.
|
||||
ThumbSupportedExts []string
|
||||
// ThumbSupportAllExts indicates whether to generate thumbnails for all files, regardless of their extensions.
|
||||
ThumbSupportAllExts bool
|
||||
// ThumbMaxSize indicates the maximum allowed size of a thumbnail. 0 indicates that no limit is set.
|
||||
ThumbMaxSize int64
|
||||
// ThumbProxy indicates whether to generate thumbnails using local generators.
|
||||
ThumbProxy bool
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
MetaTypeExif MetaType = "exif"
|
||||
MediaTypeMusic MetaType = "music"
|
||||
MetaTypeStreamMedia MetaType = "stream"
|
||||
)
|
||||
|
||||
type ForceUsePublicEndpointCtx struct{}
|
||||
|
||||
// WithForcePublicEndpoint sets the context to force using public endpoint for supported storage policies.
|
||||
func WithForcePublicEndpoint(ctx context.Context, value bool) context.Context {
|
||||
return context.WithValue(ctx, ForceUsePublicEndpointCtx{}, value)
|
||||
}
|
||||
75
pkg/filemanager/driver/local/entity.go
Normal file
75
pkg/filemanager/driver/local/entity.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/gofrs/uuid"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewLocalFileEntity creates a new local file entity.
|
||||
func NewLocalFileEntity(t types.EntityType, src string) (fs.Entity, error) {
|
||||
info, err := os.Stat(util.RelativePath(src))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &localFileEntity{
|
||||
t: t,
|
||||
src: src,
|
||||
size: info.Size(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type localFileEntity struct {
|
||||
t types.EntityType
|
||||
src string
|
||||
size int64
|
||||
}
|
||||
|
||||
func (l *localFileEntity) ID() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (l *localFileEntity) Type() types.EntityType {
|
||||
return l.t
|
||||
}
|
||||
|
||||
func (l *localFileEntity) Size() int64 {
|
||||
return l.size
|
||||
}
|
||||
|
||||
func (l *localFileEntity) UpdatedAt() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
func (l *localFileEntity) CreatedAt() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
func (l *localFileEntity) CreatedBy() *ent.User {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *localFileEntity) Source() string {
|
||||
return l.src
|
||||
}
|
||||
|
||||
func (l *localFileEntity) ReferenceCount() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (l *localFileEntity) PolicyID() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (l *localFileEntity) UploadSessionID() *uuid.UUID {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *localFileEntity) Model() *ent.Entity {
|
||||
return nil
|
||||
}
|
||||
11
pkg/filemanager/driver/local/fallocate.go
Normal file
11
pkg/filemanager/driver/local/fallocate.go
Normal file
@@ -0,0 +1,11 @@
|
||||
//go:build !linux && !darwin
|
||||
// +build !linux,!darwin
|
||||
|
||||
package local
|
||||
|
||||
import "os"
|
||||
|
||||
// No-op on non-Linux/Darwin platforms.
|
||||
func Fallocate(file *os.File, offset int64, length int64) error {
|
||||
return nil
|
||||
}
|
||||
27
pkg/filemanager/driver/local/fallocate_darwin.go
Normal file
27
pkg/filemanager/driver/local/fallocate_darwin.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func Fallocate(file *os.File, offset int64, length int64) error {
|
||||
var fst syscall.Fstore_t
|
||||
|
||||
fst.Flags = syscall.F_ALLOCATECONTIG
|
||||
fst.Posmode = syscall.F_PREALLOCATE
|
||||
fst.Offset = 0
|
||||
fst.Length = offset + length
|
||||
fst.Bytesalloc = 0
|
||||
|
||||
// Check https://lists.apple.com/archives/darwin-dev/2007/Dec/msg00040.html
|
||||
_, _, err := syscall.Syscall(syscall.SYS_FCNTL, file.Fd(), syscall.F_PREALLOCATE, uintptr(unsafe.Pointer(&fst)))
|
||||
if err != syscall.Errno(0x0) {
|
||||
fst.Flags = syscall.F_ALLOCATEALL
|
||||
// Ignore the return value
|
||||
_, _, _ = syscall.Syscall(syscall.SYS_FCNTL, file.Fd(), syscall.F_PREALLOCATE, uintptr(unsafe.Pointer(&fst)))
|
||||
}
|
||||
|
||||
return syscall.Ftruncate(int(file.Fd()), fst.Length)
|
||||
}
|
||||
14
pkg/filemanager/driver/local/fallocate_linux.go
Normal file
14
pkg/filemanager/driver/local/fallocate_linux.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func Fallocate(file *os.File, offset int64, length int64) error {
|
||||
if length == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return syscall.Fallocate(int(file.Fd()), 0, offset, length)
|
||||
}
|
||||
301
pkg/filemanager/driver/local/local.go
Normal file
301
pkg/filemanager/driver/local/local.go
Normal file
@@ -0,0 +1,301 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/auth"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
)
|
||||
|
||||
const (
|
||||
Perm = 0744
|
||||
)
|
||||
|
||||
var (
|
||||
capabilities = &driver.Capabilities{
|
||||
StaticFeatures: &boolset.BooleanSet{},
|
||||
MediaMetaProxy: true,
|
||||
ThumbProxy: true,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
boolset.Sets(map[driver.HandlerCapability]bool{
|
||||
driver.HandlerCapabilityProxyRequired: true,
|
||||
driver.HandlerCapabilityInboundGet: true,
|
||||
}, capabilities.StaticFeatures)
|
||||
}
|
||||
|
||||
// Driver 本地策略适配器
|
||||
type Driver struct {
|
||||
Policy *ent.StoragePolicy
|
||||
httpClient request.Client
|
||||
l logging.Logger
|
||||
config conf.ConfigProvider
|
||||
}
|
||||
|
||||
// New constructs a new local driver
|
||||
func New(p *ent.StoragePolicy, l logging.Logger, config conf.ConfigProvider) *Driver {
|
||||
return &Driver{
|
||||
Policy: p,
|
||||
l: l,
|
||||
httpClient: request.NewClient(config, request.WithLogger(l)),
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
//// List 递归列取给定物理路径下所有文件
|
||||
//func (handler *Driver) List(ctx context.Context, path string, recursive bool) ([]response.Object, error) {
|
||||
// var res []response.Object
|
||||
//
|
||||
// // 取得起始路径
|
||||
// root := util.RelativePath(filepath.FromSlash(path))
|
||||
//
|
||||
// // 开始遍历路径下的文件、目录
|
||||
// err := filepath.Walk(root,
|
||||
// func(path string, info os.FileInfo, err error) error {
|
||||
// // 跳过根目录
|
||||
// if path == root {
|
||||
// return nil
|
||||
// }
|
||||
//
|
||||
// if err != nil {
|
||||
// util.Log().Warning("Failed to walk folder %q: %s", path, err)
|
||||
// return filepath.SkipDir
|
||||
// }
|
||||
//
|
||||
// // 将遍历对象的绝对路径转换为相对路径
|
||||
// rel, err := filepath.Rel(root, path)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// res = append(res, response.Object{
|
||||
// Name: info.Name(),
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Source: path,
|
||||
// Size: uint64(info.Size()),
|
||||
// IsDir: info.IsDir(),
|
||||
// LastModify: info.ModTime(),
|
||||
// })
|
||||
//
|
||||
// // 如果非递归,则不步入目录
|
||||
// if !recursive && info.IsDir() {
|
||||
// return filepath.SkipDir
|
||||
// }
|
||||
//
|
||||
// return nil
|
||||
// })
|
||||
//
|
||||
// return res, err
|
||||
//}
|
||||
|
||||
// Get 获取文件内容
|
||||
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
|
||||
// 打开文件
|
||||
file, err := os.Open(handler.LocalPath(ctx, path))
|
||||
if err != nil {
|
||||
handler.l.Debug("Failed to open file: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (handler *Driver) LocalPath(ctx context.Context, path string) string {
|
||||
return util.RelativePath(filepath.FromSlash(path))
|
||||
}
|
||||
|
||||
// Put 将文件流保存到指定目录
|
||||
func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
||||
defer file.Close()
|
||||
dst := util.RelativePath(filepath.FromSlash(file.Props.SavePath))
|
||||
|
||||
// 如果非 Overwrite,则检查是否有重名冲突
|
||||
if file.Mode&fs.ModeOverwrite != fs.ModeOverwrite {
|
||||
if util.Exists(dst) {
|
||||
handler.l.Warning("File with the same name existed or unavailable: %s", dst)
|
||||
return errors.New("file with the same name existed or unavailable")
|
||||
}
|
||||
}
|
||||
|
||||
if err := handler.prepareFileDirectory(dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
openMode := os.O_CREATE | os.O_RDWR
|
||||
if file.Mode&fs.ModeOverwrite == fs.ModeOverwrite && file.Offset == 0 {
|
||||
openMode |= os.O_TRUNC
|
||||
}
|
||||
|
||||
out, err := os.OpenFile(dst, openMode, Perm)
|
||||
if err != nil {
|
||||
handler.l.Warning("Failed to open or create file: %s", err)
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
stat, err := out.Stat()
|
||||
if err != nil {
|
||||
handler.l.Warning("Failed to read file info: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if stat.Size() < file.Offset {
|
||||
return errors.New("size of unfinished uploaded chunks is not as expected")
|
||||
}
|
||||
|
||||
if _, err := out.Seek(file.Offset, io.SeekStart); err != nil {
|
||||
return fmt.Errorf("failed to seek to desired offset %d: %s", file.Offset, err)
|
||||
}
|
||||
|
||||
// 写入文件内容
|
||||
_, err = io.Copy(out, file)
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete 删除一个或多个文件,
|
||||
// 返回未删除的文件,及遇到的最后一个错误
|
||||
func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
|
||||
deleteFailed := make([]string, 0, len(files))
|
||||
var retErr error
|
||||
|
||||
for _, value := range files {
|
||||
filePath := util.RelativePath(filepath.FromSlash(value))
|
||||
if util.Exists(filePath) {
|
||||
err := os.Remove(filePath)
|
||||
if err != nil {
|
||||
handler.l.Warning("Failed to delete file: %s", err)
|
||||
retErr = err
|
||||
deleteFailed = append(deleteFailed, value)
|
||||
}
|
||||
}
|
||||
|
||||
//// 尝试删除文件的缩略图(如果有)
|
||||
//_ = os.Remove(util.RelativePath(value + model.GetSettingByNameWithDefault("thumb_file_suffix", "._thumb")))
|
||||
}
|
||||
|
||||
return deleteFailed, retErr
|
||||
}
|
||||
|
||||
// Thumb 获取文件缩略图
|
||||
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
return "", errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Source 获取外链URL
|
||||
func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
|
||||
return "", errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Token 获取上传策略和认证Token,本地策略直接返回空值
|
||||
func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
|
||||
if file.Mode&fs.ModeOverwrite != fs.ModeOverwrite && util.Exists(uploadSession.Props.SavePath) {
|
||||
return nil, errors.New("placeholder file already exist")
|
||||
}
|
||||
|
||||
dst := util.RelativePath(filepath.FromSlash(uploadSession.Props.SavePath))
|
||||
if err := handler.prepareFileDirectory(dst); err != nil {
|
||||
return nil, fmt.Errorf("failed to prepare file directory: %w", err)
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, Perm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create placeholder file: %w", err)
|
||||
}
|
||||
|
||||
// Preallocate disk space
|
||||
defer f.Close()
|
||||
if handler.Policy.Settings.PreAllocate {
|
||||
if err := Fallocate(f, 0, uploadSession.Props.Size); err != nil {
|
||||
handler.l.Warning("Failed to preallocate file: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &fs.UploadCredential{
|
||||
SessionID: uploadSession.Props.UploadSessionID,
|
||||
ChunkSize: handler.Policy.Settings.ChunkSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (h *Driver) prepareFileDirectory(dst string) error {
|
||||
basePath := filepath.Dir(dst)
|
||||
if !util.Exists(basePath) {
|
||||
err := os.MkdirAll(basePath, Perm)
|
||||
if err != nil {
|
||||
h.l.Warning("Failed to create directory: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// 取消上传凭证
|
||||
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
|
||||
if session.Callback == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if session.Policy.Edges.Node == nil {
|
||||
return serializer.NewError(serializer.CodeCallbackError, "Node not found", nil)
|
||||
}
|
||||
|
||||
// If callback is set, indicating this handler is used in slave node as a shadowed handler for remote policy,
|
||||
// we need to send callback request to master node.
|
||||
resp := handler.httpClient.Request(
|
||||
"POST",
|
||||
session.Callback,
|
||||
nil,
|
||||
request.WithTimeout(time.Duration(handler.config.Slave().CallbackTimeout)*time.Second),
|
||||
request.WithCredential(
|
||||
auth.HMACAuth{[]byte(session.Policy.Edges.Node.SlaveKey)},
|
||||
int64(handler.config.Slave().SignatureTTL),
|
||||
),
|
||||
request.WithContext(ctx),
|
||||
request.WithCorrelationID(),
|
||||
)
|
||||
|
||||
if resp.Err != nil {
|
||||
return serializer.NewError(serializer.CodeCallbackError, "Slave cannot send callback request", resp.Err)
|
||||
}
|
||||
|
||||
// 解析回调服务端响应
|
||||
res, err := resp.DecodeResponse()
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("Slave cannot parse callback response from master (StatusCode=%d).", resp.Response.StatusCode)
|
||||
return serializer.NewError(serializer.CodeCallbackError, msg, err)
|
||||
}
|
||||
|
||||
if res.Code != 0 {
|
||||
return serializer.NewError(res.Code, res.Msg, errors.New(res.Error))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *Driver) Capabilities() *driver.Capabilities {
|
||||
return capabilities
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
137
pkg/filemanager/driver/obs/media.go
Normal file
137
pkg/filemanager/driver/obs/media.go
Normal file
@@ -0,0 +1,137 @@
|
||||
package obs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/huaweicloud/huaweicloud-sdk-go-obs/obs"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
func (d *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
thumbURL, err := d.signSourceURL(&obs.CreateSignedUrlInput{
|
||||
Method: obs.HttpMethodGet,
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: path,
|
||||
Expires: int(mediaInfoTTL.Seconds()),
|
||||
QueryParams: map[string]string{
|
||||
imageProcessHeader: imageInfoProcessor,
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sign media info url: %w", err)
|
||||
}
|
||||
|
||||
resp, err := d.httpClient.
|
||||
Request(http.MethodGet, thumbURL, nil, request.WithContext(ctx)).
|
||||
CheckHTTPResponse(http.StatusOK).
|
||||
GetResponseIgnoreErr()
|
||||
if err != nil {
|
||||
return nil, handleJsonError(resp, err)
|
||||
}
|
||||
|
||||
var imageInfo map[string]any
|
||||
if err := json.Unmarshal([]byte(resp), &imageInfo); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal media info: %w", err)
|
||||
}
|
||||
|
||||
imageInfoMap := lo.MapEntries(imageInfo, func(k string, v any) (string, string) {
|
||||
if vStr, ok := v.(string); ok {
|
||||
return strings.TrimPrefix(k, "exif:"), vStr
|
||||
}
|
||||
|
||||
return k, fmt.Sprintf("%v", v)
|
||||
})
|
||||
metas := make([]driver.MediaMeta, 0)
|
||||
metas = append(metas, mediameta.ExtractExifMap(imageInfoMap, time.Time{})...)
|
||||
metas = append(metas, parseGpsInfo(imageInfoMap)...)
|
||||
for i := 0; i < len(metas); i++ {
|
||||
metas[i].Type = driver.MetaTypeExif
|
||||
}
|
||||
return metas, nil
|
||||
}
|
||||
|
||||
func parseGpsInfo(imageInfo map[string]string) []driver.MediaMeta {
|
||||
latitude := imageInfo["GPSLatitude"] // 31/1, 162680820/10000000, 0/1
|
||||
longitude := imageInfo["GPSLongitude"] // 120/1, 429103939/10000000, 0/1
|
||||
latRef := imageInfo["GPSLatitudeRef"] // N
|
||||
lonRef := imageInfo["GPSLongitudeRef"] // E
|
||||
|
||||
// Make sure all value exist in map
|
||||
if latitude == "" || longitude == "" || latRef == "" || lonRef == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
lat := parseRawGPS(latitude, latRef)
|
||||
lon := parseRawGPS(longitude, lonRef)
|
||||
if !math.IsNaN(lat) && !math.IsNaN(lon) {
|
||||
lat, lng := mediameta.NormalizeGPS(lat, lon)
|
||||
return []driver.MediaMeta{{
|
||||
Key: mediameta.GpsLat,
|
||||
Value: fmt.Sprintf("%f", lat),
|
||||
}, {
|
||||
Key: mediameta.GpsLng,
|
||||
Value: fmt.Sprintf("%f", lng),
|
||||
}}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseRawGPS(gpsStr string, ref string) float64 {
|
||||
elem := strings.Split(gpsStr, ", ")
|
||||
if len(elem) < 1 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var (
|
||||
deg float64
|
||||
minutes float64
|
||||
seconds float64
|
||||
)
|
||||
|
||||
deg = getGpsElemValue(elem[0])
|
||||
if len(elem) >= 2 {
|
||||
minutes = getGpsElemValue(elem[1])
|
||||
}
|
||||
if len(elem) >= 3 {
|
||||
seconds = getGpsElemValue(elem[2])
|
||||
}
|
||||
|
||||
decimal := deg + minutes/60.0 + seconds/3600.0
|
||||
|
||||
if ref == "S" || ref == "W" {
|
||||
return -decimal
|
||||
}
|
||||
|
||||
return decimal
|
||||
}
|
||||
|
||||
func getGpsElemValue(elm string) float64 {
|
||||
elements := strings.Split(elm, "/")
|
||||
if len(elements) != 2 {
|
||||
return 0
|
||||
}
|
||||
|
||||
numerator, err := strconv.ParseFloat(elements[0], 64)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
denominator, err := strconv.ParseFloat(elements[1], 64)
|
||||
if err != nil || denominator == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
return numerator / denominator
|
||||
}
|
||||
513
pkg/filemanager/driver/obs/obs.go
Normal file
513
pkg/filemanager/driver/obs/obs.go
Normal file
@@ -0,0 +1,513 @@
|
||||
package obs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk/backoff"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/huaweicloud/huaweicloud-sdk-go-obs/obs"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
const (
|
||||
chunkRetrySleep = time.Duration(5) * time.Second
|
||||
maxDeleteBatch = 1000
|
||||
imageProcessHeader = "x-image-process"
|
||||
trafficLimitHeader = "x-obs-traffic-limit"
|
||||
partNumberParam = "partNumber"
|
||||
callbackParam = "x-obs-callback"
|
||||
uploadIdParam = "uploadId"
|
||||
mediaInfoTTL = time.Duration(10) * time.Minute
|
||||
imageInfoProcessor = "image/info"
|
||||
|
||||
// MultiPartUploadThreshold 服务端使用分片上传的阈值
|
||||
MultiPartUploadThreshold int64 = 5 << 30 // 5GB
|
||||
)
|
||||
|
||||
var (
|
||||
features = &boolset.BooleanSet{}
|
||||
)
|
||||
|
||||
type (
|
||||
CallbackPolicy struct {
|
||||
CallbackURL string `json:"callbackUrl"`
|
||||
CallbackBody string `json:"callbackBody"`
|
||||
CallbackBodyType string `json:"callbackBodyType"`
|
||||
}
|
||||
JsonError struct {
|
||||
Message string `json:"message"`
|
||||
Code string `json:"code"`
|
||||
}
|
||||
)
|
||||
|
||||
// Driver Huawei Cloud OBS driver
|
||||
type Driver struct {
|
||||
policy *ent.StoragePolicy
|
||||
chunkSize int64
|
||||
|
||||
settings setting.Provider
|
||||
l logging.Logger
|
||||
config conf.ConfigProvider
|
||||
mime mime.MimeDetector
|
||||
httpClient request.Client
|
||||
obs *obs.ObsClient
|
||||
}
|
||||
|
||||
func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider,
|
||||
config conf.ConfigProvider, l logging.Logger, mime mime.MimeDetector) (*Driver, error) {
|
||||
chunkSize := policy.Settings.ChunkSize
|
||||
if policy.Settings.ChunkSize == 0 {
|
||||
chunkSize = 25 << 20 // 25 MB
|
||||
}
|
||||
|
||||
driver := &Driver{
|
||||
policy: policy,
|
||||
settings: settings,
|
||||
chunkSize: chunkSize,
|
||||
config: config,
|
||||
l: l,
|
||||
mime: mime,
|
||||
httpClient: request.NewClient(config, request.WithLogger(l)),
|
||||
}
|
||||
|
||||
useCname := false
|
||||
if policy.Settings != nil && policy.Settings.UseCname {
|
||||
useCname = true
|
||||
}
|
||||
|
||||
obsClient, err := obs.New(policy.AccessKey, policy.SecretKey, policy.Server, obs.WithSignature(obs.SignatureObs), obs.WithCustomDomainName(useCname))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
driver.obs = obsClient
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
func (d *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
||||
defer file.Close()
|
||||
|
||||
// 是否允许覆盖
|
||||
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
|
||||
if !overwrite {
|
||||
// Check for duplicated file
|
||||
if _, err := d.obs.HeadObject(&obs.HeadObjectInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: file.Props.SavePath,
|
||||
}, obs.WithRequestContext(ctx)); err == nil {
|
||||
return fs.ErrFileExisted
|
||||
}
|
||||
}
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
d.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
// 小文件直接上传
|
||||
if file.Props.Size < MultiPartUploadThreshold {
|
||||
_, err := d.obs.PutObject(&obs.PutObjectInput{
|
||||
PutObjectBasicInput: obs.PutObjectBasicInput{
|
||||
ObjectOperationInput: obs.ObjectOperationInput{
|
||||
Key: file.Props.SavePath,
|
||||
Bucket: d.policy.BucketName,
|
||||
},
|
||||
HttpHeader: obs.HttpHeader{
|
||||
ContentType: mimeType,
|
||||
},
|
||||
ContentLength: file.Props.Size,
|
||||
},
|
||||
Body: file,
|
||||
}, obs.WithRequestContext(ctx))
|
||||
return err
|
||||
}
|
||||
|
||||
// 超过阈值时使用分片上传
|
||||
imur, err := d.obs.InitiateMultipartUpload(&obs.InitiateMultipartUploadInput{
|
||||
ObjectOperationInput: obs.ObjectOperationInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: file.Props.SavePath,
|
||||
},
|
||||
HttpHeader: obs.HttpHeader{
|
||||
ContentType: d.mime.TypeByName(file.Props.Uri.Name()),
|
||||
},
|
||||
}, obs.WithRequestContext(ctx))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initiate multipart upload: %w", err)
|
||||
}
|
||||
|
||||
chunks := chunk.NewChunkGroup(file, d.chunkSize, &backoff.ConstantBackoff{
|
||||
Max: d.settings.ChunkRetryLimit(ctx),
|
||||
Sleep: chunkRetrySleep,
|
||||
}, d.settings.UseChunkBuffer(ctx), d.l, d.settings.TempPath(ctx))
|
||||
|
||||
parts := make([]*obs.UploadPartOutput, 0, chunks.Num())
|
||||
|
||||
uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error {
|
||||
part, err := d.obs.UploadPart(&obs.UploadPartInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: file.Props.SavePath,
|
||||
PartNumber: current.Index() + 1,
|
||||
UploadId: imur.UploadId,
|
||||
Body: content,
|
||||
SourceFile: "",
|
||||
PartSize: current.Length(),
|
||||
}, obs.WithRequestContext(ctx))
|
||||
if err == nil {
|
||||
parts = append(parts, part)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for chunks.Next() {
|
||||
if err := chunks.Process(uploadFunc); err != nil {
|
||||
d.cancelUpload(file.Props.SavePath, imur)
|
||||
return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = d.obs.CompleteMultipartUpload(&obs.CompleteMultipartUploadInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: file.Props.SavePath,
|
||||
UploadId: imur.UploadId,
|
||||
Parts: lo.Map(parts, func(part *obs.UploadPartOutput, i int) obs.Part {
|
||||
return obs.Part{
|
||||
PartNumber: i + 1,
|
||||
ETag: part.ETag,
|
||||
}
|
||||
}),
|
||||
}, obs.WithRequestContext(ctx))
|
||||
if err != nil {
|
||||
d.cancelUpload(file.Props.SavePath, imur)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
|
||||
groups := lo.Chunk(files, maxDeleteBatch)
|
||||
failed := make([]string, 0)
|
||||
var lastError error
|
||||
for index, group := range groups {
|
||||
d.l.Debug("Process delete group #%d: %v", index, group)
|
||||
// 删除文件
|
||||
delRes, err := d.obs.DeleteObjects(&obs.DeleteObjectsInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
Quiet: true,
|
||||
Objects: lo.Map(group, func(item string, index int) obs.ObjectToDelete {
|
||||
return obs.ObjectToDelete{
|
||||
Key: item,
|
||||
}
|
||||
}),
|
||||
}, obs.WithRequestContext(ctx))
|
||||
if err != nil {
|
||||
failed = append(failed, group...)
|
||||
lastError = err
|
||||
continue
|
||||
}
|
||||
|
||||
for _, v := range delRes.Errors {
|
||||
d.l.Debug("Failed to delete file: %s, Code:%s, Message:%s", v.Key, v.Code, v.Key)
|
||||
failed = append(failed, v.Key)
|
||||
}
|
||||
}
|
||||
|
||||
if len(failed) > 0 && lastError == nil {
|
||||
lastError = fmt.Errorf("failed to delete files: %v", failed)
|
||||
}
|
||||
|
||||
return failed, lastError
|
||||
}
|
||||
|
||||
func (d *Driver) Open(ctx context.Context, path string) (*os.File, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (d *Driver) LocalPath(ctx context.Context, path string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (d *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
w, h := d.settings.ThumbSize(ctx)
|
||||
thumbURL, err := d.signSourceURL(&obs.CreateSignedUrlInput{
|
||||
Method: obs.HttpMethodGet,
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: e.Source(),
|
||||
Expires: int(time.Until(*expire).Seconds()),
|
||||
QueryParams: map[string]string{
|
||||
imageProcessHeader: fmt.Sprintf("image/resize,m_lfit,w_%d,h_%d", w, h),
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return thumbURL, nil
|
||||
}
|
||||
|
||||
func (d *Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
|
||||
params := make(map[string]string)
|
||||
if args.IsDownload {
|
||||
encodedFilename := url.PathEscape(args.DisplayName)
|
||||
params["response-content-disposition"] = fmt.Sprintf("attachment; filename=\"%s\"; filename*=UTF-8''%s",
|
||||
args.DisplayName, encodedFilename)
|
||||
}
|
||||
|
||||
expires := 86400 * 265 * 20
|
||||
if args.Expire != nil {
|
||||
expires = int(time.Until(*args.Expire).Seconds())
|
||||
}
|
||||
|
||||
if args.Speed > 0 {
|
||||
// Byte 转换为 bit
|
||||
args.Speed *= 8
|
||||
|
||||
// OSS对速度值有范围限制
|
||||
if args.Speed < 819200 {
|
||||
args.Speed = 819200
|
||||
}
|
||||
if args.Speed > 838860800 {
|
||||
args.Speed = 838860800
|
||||
}
|
||||
}
|
||||
|
||||
if args.Speed > 0 {
|
||||
params[trafficLimitHeader] = strconv.FormatInt(args.Speed, 10)
|
||||
}
|
||||
|
||||
return d.signSourceURL(&obs.CreateSignedUrlInput{
|
||||
Method: obs.HttpMethodGet,
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: e.Source(),
|
||||
Expires: expires,
|
||||
QueryParams: params,
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
|
||||
// Check for duplicated file
|
||||
if _, err := d.obs.HeadObject(&obs.HeadObjectInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: file.Props.SavePath,
|
||||
}, obs.WithRequestContext(ctx)); err == nil {
|
||||
return nil, fs.ErrFileExisted
|
||||
}
|
||||
|
||||
// 生成回调地址
|
||||
siteURL := d.settings.SiteURL(setting.UseFirstSiteUrl(ctx))
|
||||
// 在从机端创建上传会话
|
||||
uploadSession.ChunkSize = d.chunkSize
|
||||
uploadSession.Callback = routes.MasterSlaveCallbackUrl(siteURL, types.PolicyTypeObs, uploadSession.Props.UploadSessionID, uploadSession.CallbackSecret).String()
|
||||
// 回调策略
|
||||
callbackPolicy := CallbackPolicy{
|
||||
CallbackURL: uploadSession.Callback,
|
||||
CallbackBody: `{"name":${key},"source_name":${fname},"size":${size}}`,
|
||||
CallbackBodyType: "application/json",
|
||||
}
|
||||
|
||||
callbackPolicyJSON, err := json.Marshal(callbackPolicy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode callback policy: %w", err)
|
||||
}
|
||||
callbackPolicyEncoded := base64.StdEncoding.EncodeToString(callbackPolicyJSON)
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
d.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
imur, err := d.obs.InitiateMultipartUpload(&obs.InitiateMultipartUploadInput{
|
||||
ObjectOperationInput: obs.ObjectOperationInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: file.Props.SavePath,
|
||||
},
|
||||
HttpHeader: obs.HttpHeader{
|
||||
ContentType: mimeType,
|
||||
},
|
||||
}, obs.WithRequestContext(ctx))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize multipart upload: %w", err)
|
||||
}
|
||||
uploadSession.UploadID = imur.UploadId
|
||||
|
||||
// 为每个分片签名上传 URL
|
||||
chunks := chunk.NewChunkGroup(file, d.chunkSize, &backoff.ConstantBackoff{}, false, d.l, "")
|
||||
urls := make([]string, chunks.Num())
|
||||
ttl := int64(time.Until(uploadSession.Props.ExpireAt).Seconds())
|
||||
for chunks.Next() {
|
||||
err := chunks.Process(func(c *chunk.ChunkGroup, chunk io.Reader) error {
|
||||
signedURL, err := d.obs.CreateSignedUrl(&obs.CreateSignedUrlInput{
|
||||
Method: obs.HttpMethodPut,
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: file.Props.SavePath,
|
||||
QueryParams: map[string]string{
|
||||
partNumberParam: strconv.Itoa(c.Index() + 1),
|
||||
uploadIdParam: uploadSession.UploadID,
|
||||
},
|
||||
Expires: int(ttl),
|
||||
Headers: map[string]string{
|
||||
"Content-Length": strconv.FormatInt(c.Length(), 10),
|
||||
"Content-Type": "application/octet-stream",
|
||||
}, //TODO: Validate +1
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
urls[c.Index()] = signedURL.SignedUrl
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// 签名完成分片上传的URL
|
||||
completeURL, err := d.obs.CreateSignedUrl(&obs.CreateSignedUrlInput{
|
||||
Method: obs.HttpMethodPost,
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: file.Props.SavePath,
|
||||
QueryParams: map[string]string{
|
||||
uploadIdParam: uploadSession.UploadID,
|
||||
callbackParam: callbackPolicyEncoded,
|
||||
},
|
||||
Headers: map[string]string{
|
||||
"Content-Type": "application/octet-stream",
|
||||
},
|
||||
Expires: int(ttl),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &fs.UploadCredential{
|
||||
UploadID: imur.UploadId,
|
||||
UploadURLs: urls,
|
||||
CompleteURL: completeURL.SignedUrl,
|
||||
SessionID: uploadSession.Props.UploadSessionID,
|
||||
ChunkSize: d.chunkSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
|
||||
_, err := d.obs.AbortMultipartUpload(&obs.AbortMultipartUploadInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: uploadSession.Props.SavePath,
|
||||
UploadId: uploadSession.UploadID,
|
||||
}, obs.WithRequestContext(ctx))
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//func (d *Driver) List(ctx context.Context, path string, recursive bool) ([]response.Object, error) {
|
||||
// return nil, errors.New("not implemented")
|
||||
//}
|
||||
|
||||
func (d *Driver) Capabilities() *driver.Capabilities {
|
||||
mediaMetaExts := d.policy.Settings.MediaMetaExts
|
||||
if !d.policy.Settings.NativeMediaProcessing {
|
||||
mediaMetaExts = nil
|
||||
}
|
||||
return &driver.Capabilities{
|
||||
StaticFeatures: features,
|
||||
MediaMetaSupportedExts: mediaMetaExts,
|
||||
MediaMetaProxy: d.policy.Settings.MediaMetaGeneratorProxy,
|
||||
ThumbSupportedExts: d.policy.Settings.ThumbExts,
|
||||
ThumbProxy: d.policy.Settings.ThumbGeneratorProxy,
|
||||
ThumbSupportAllExts: d.policy.Settings.ThumbSupportAllExts,
|
||||
ThumbMaxSize: d.policy.Settings.ThumbMaxSize,
|
||||
}
|
||||
}
|
||||
|
||||
// CORS 创建跨域策略
|
||||
func (d *Driver) CORS() error {
|
||||
_, err := d.obs.SetBucketCors(&obs.SetBucketCorsInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
BucketCors: obs.BucketCors{
|
||||
CorsRules: []obs.CorsRule{
|
||||
{
|
||||
AllowedOrigin: []string{"*"},
|
||||
AllowedMethod: []string{
|
||||
"GET",
|
||||
"POST",
|
||||
"PUT",
|
||||
"DELETE",
|
||||
"HEAD",
|
||||
},
|
||||
ExposeHeader: []string{"Etag"},
|
||||
AllowedHeader: []string{"*"},
|
||||
MaxAgeSeconds: 3600,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Driver) cancelUpload(path string, imur *obs.InitiateMultipartUploadOutput) {
|
||||
if _, err := d.obs.AbortMultipartUpload(&obs.AbortMultipartUploadInput{
|
||||
Bucket: d.policy.BucketName,
|
||||
Key: path,
|
||||
UploadId: imur.UploadId,
|
||||
}); err != nil {
|
||||
d.l.Warning("failed to abort multipart upload: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) signSourceURL(input *obs.CreateSignedUrlInput) (string, error) {
|
||||
signedURL, err := handler.obs.CreateSignedUrl(input)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
finalURL, err := url.Parse(signedURL.SignedUrl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 公有空间替换掉Key及不支持的头
|
||||
if !handler.policy.IsPrivate {
|
||||
query := finalURL.Query()
|
||||
query.Del("AccessKeyId")
|
||||
query.Del("Signature")
|
||||
finalURL.RawQuery = query.Encode()
|
||||
}
|
||||
return finalURL.String(), nil
|
||||
}
|
||||
|
||||
func handleJsonError(resp string, originErr error) error {
|
||||
if resp == "" {
|
||||
return originErr
|
||||
}
|
||||
|
||||
var err JsonError
|
||||
if err := json.Unmarshal([]byte(resp), &err); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal cos error: %w", err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("obs error: %s", err.Message)
|
||||
}
|
||||
517
pkg/filemanager/driver/onedrive/api.go
Normal file
517
pkg/filemanager/driver/onedrive/api.go
Normal file
@@ -0,0 +1,517 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk/backoff"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// SmallFileSize 单文件上传接口最大尺寸
|
||||
SmallFileSize uint64 = 4 * 1024 * 1024
|
||||
// ChunkSize 服务端中转分片上传分片大小
|
||||
ChunkSize uint64 = 10 * 1024 * 1024
|
||||
// ListRetry 列取请求重试次数
|
||||
ListRetry = 1
|
||||
chunkRetrySleep = time.Second * 5
|
||||
|
||||
notFoundError = "itemNotFound"
|
||||
)
|
||||
|
||||
type RetryCtx struct{}
|
||||
|
||||
// GetSourcePath 获取文件的绝对路径
|
||||
func (info *FileInfo) GetSourcePath() string {
|
||||
res, err := url.PathUnescape(info.ParentReference.Path)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.TrimPrefix(
|
||||
path.Join(
|
||||
strings.TrimPrefix(res, "/drive/root:"),
|
||||
info.Name,
|
||||
),
|
||||
"/",
|
||||
)
|
||||
}
|
||||
|
||||
func (client *client) getRequestURL(api string, opts ...Option) string {
|
||||
options := newDefaultOption()
|
||||
for _, o := range opts {
|
||||
o.apply(options)
|
||||
}
|
||||
|
||||
base, _ := url.Parse(client.endpoints.endpointURL)
|
||||
if base == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
if options.useDriverResource {
|
||||
base.Path = path.Join(base.Path, client.endpoints.driverResource, api)
|
||||
} else {
|
||||
base.Path = path.Join(base.Path, api)
|
||||
}
|
||||
|
||||
return base.String()
|
||||
}
|
||||
|
||||
// ListChildren 根据路径列取子对象
|
||||
func (client *client) ListChildren(ctx context.Context, path string) ([]FileInfo, error) {
|
||||
var requestURL string
|
||||
dst := strings.TrimPrefix(path, "/")
|
||||
if dst == "" {
|
||||
requestURL = client.getRequestURL("root/children")
|
||||
} else {
|
||||
requestURL = client.getRequestURL("root:/" + dst + ":/children")
|
||||
}
|
||||
|
||||
res, err := client.requestWithStr(ctx, "GET", requestURL+"?$top=999999999", "", 200)
|
||||
if err != nil {
|
||||
retried := 0
|
||||
if v, ok := ctx.Value(RetryCtx{}).(int); ok {
|
||||
retried = v
|
||||
}
|
||||
if retried < ListRetry {
|
||||
retried++
|
||||
client.l.Debug("Failed to list path %q: %s, will retry in 5 seconds.", path, err)
|
||||
time.Sleep(time.Duration(5) * time.Second)
|
||||
return client.ListChildren(context.WithValue(ctx, RetryCtx{}, retried), path)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
decodeErr error
|
||||
fileInfo ListResponse
|
||||
)
|
||||
decodeErr = json.Unmarshal([]byte(res), &fileInfo)
|
||||
if decodeErr != nil {
|
||||
return nil, decodeErr
|
||||
}
|
||||
|
||||
return fileInfo.Value, nil
|
||||
}
|
||||
|
||||
// Meta 根据资源ID或文件路径获取文件元信息
|
||||
func (client *client) Meta(ctx context.Context, id string, path string) (*FileInfo, error) {
|
||||
var requestURL string
|
||||
if id != "" {
|
||||
requestURL = client.getRequestURL("items/" + id)
|
||||
} else {
|
||||
dst := strings.TrimPrefix(path, "/")
|
||||
requestURL = client.getRequestURL("root:/" + dst)
|
||||
}
|
||||
|
||||
res, err := client.requestWithStr(ctx, "GET", requestURL+"?expand=thumbnails", "", 200)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
decodeErr error
|
||||
fileInfo FileInfo
|
||||
)
|
||||
decodeErr = json.Unmarshal([]byte(res), &fileInfo)
|
||||
if decodeErr != nil {
|
||||
return nil, decodeErr
|
||||
}
|
||||
|
||||
return &fileInfo, nil
|
||||
|
||||
}
|
||||
|
||||
// CreateUploadSession 创建分片上传会话
|
||||
func (client *client) CreateUploadSession(ctx context.Context, dst string, opts ...Option) (string, error) {
|
||||
options := newDefaultOption()
|
||||
for _, o := range opts {
|
||||
o.apply(options)
|
||||
}
|
||||
|
||||
dst = strings.TrimPrefix(dst, "/")
|
||||
requestURL := client.getRequestURL("root:/" + dst + ":/createUploadSession")
|
||||
body := map[string]map[string]interface{}{
|
||||
"item": {
|
||||
"@microsoft.graph.conflictBehavior": options.conflictBehavior,
|
||||
},
|
||||
}
|
||||
bodyBytes, _ := json.Marshal(body)
|
||||
|
||||
res, err := client.requestWithStr(ctx, "POST", requestURL, string(bodyBytes), 200)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var (
|
||||
decodeErr error
|
||||
uploadSession UploadSessionResponse
|
||||
)
|
||||
decodeErr = json.Unmarshal([]byte(res), &uploadSession)
|
||||
if decodeErr != nil {
|
||||
return "", decodeErr
|
||||
}
|
||||
|
||||
return uploadSession.UploadURL, nil
|
||||
}
|
||||
|
||||
// GetSiteIDByURL 通过 SharePoint 站点 URL 获取站点ID
|
||||
func (client *client) GetSiteIDByURL(ctx context.Context, siteUrl string) (string, error) {
|
||||
siteUrlParsed, err := url.Parse(siteUrl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
hostName := siteUrlParsed.Hostname()
|
||||
relativePath := strings.Trim(siteUrlParsed.Path, "/")
|
||||
requestURL := client.getRequestURL(fmt.Sprintf("sites/%s:/%s", hostName, relativePath), WithDriverResource(false))
|
||||
res, reqErr := client.requestWithStr(ctx, "GET", requestURL, "", 200)
|
||||
if reqErr != nil {
|
||||
return "", reqErr
|
||||
}
|
||||
|
||||
var (
|
||||
decodeErr error
|
||||
siteInfo Site
|
||||
)
|
||||
decodeErr = json.Unmarshal([]byte(res), &siteInfo)
|
||||
if decodeErr != nil {
|
||||
return "", decodeErr
|
||||
}
|
||||
|
||||
return siteInfo.ID, nil
|
||||
}
|
||||
|
||||
// GetUploadSessionStatus 查询上传会话状态
|
||||
func (client *client) GetUploadSessionStatus(ctx context.Context, uploadURL string) (*UploadSessionResponse, error) {
|
||||
res, err := client.requestWithStr(ctx, "GET", uploadURL, "", 200)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
decodeErr error
|
||||
uploadSession UploadSessionResponse
|
||||
)
|
||||
decodeErr = json.Unmarshal([]byte(res), &uploadSession)
|
||||
if decodeErr != nil {
|
||||
return nil, decodeErr
|
||||
}
|
||||
|
||||
return &uploadSession, nil
|
||||
}
|
||||
|
||||
// UploadChunk 上传分片
|
||||
func (client *client) UploadChunk(ctx context.Context, uploadURL string, content io.Reader, current *chunk.ChunkGroup) (*UploadSessionResponse, error) {
|
||||
res, err := client.request(
|
||||
ctx, "PUT", uploadURL, content,
|
||||
request.WithContentLength(current.Length()),
|
||||
request.WithHeader(http.Header{
|
||||
"Content-Range": {current.RangeHeader()},
|
||||
}),
|
||||
request.WithoutHeader([]string{"Authorization", "Content-Type"}),
|
||||
request.WithTimeout(0),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upload OneDrive chunk #%d: %w", current.Index(), err)
|
||||
}
|
||||
|
||||
if current.IsLast() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var (
|
||||
decodeErr error
|
||||
uploadRes UploadSessionResponse
|
||||
)
|
||||
decodeErr = json.Unmarshal([]byte(res), &uploadRes)
|
||||
if decodeErr != nil {
|
||||
return nil, decodeErr
|
||||
}
|
||||
|
||||
return &uploadRes, nil
|
||||
}
|
||||
|
||||
// Upload 上传文件
|
||||
func (client *client) Upload(ctx context.Context, file *fs.UploadRequest) error {
|
||||
// 决定是否覆盖文件
|
||||
overwrite := "fail"
|
||||
if file.Mode&fs.ModeOverwrite == fs.ModeOverwrite {
|
||||
overwrite = "replace"
|
||||
}
|
||||
|
||||
size := int(file.Props.Size)
|
||||
dst := file.Props.SavePath
|
||||
|
||||
// 小文件,使用简单上传接口上传
|
||||
if size <= int(SmallFileSize) {
|
||||
_, err := client.SimpleUpload(ctx, dst, file, int64(size), WithConflictBehavior(overwrite))
|
||||
return err
|
||||
}
|
||||
|
||||
// 大文件,进行分片
|
||||
// 创建上传会话
|
||||
uploadURL, err := client.CreateUploadSession(ctx, dst, WithConflictBehavior(overwrite))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Initial chunk groups
|
||||
chunks := chunk.NewChunkGroup(file, client.chunkSize, &backoff.ConstantBackoff{
|
||||
Max: client.settings.ChunkRetryLimit(ctx),
|
||||
Sleep: chunkRetrySleep,
|
||||
}, client.settings.UseChunkBuffer(ctx), client.l, client.settings.TempPath(ctx))
|
||||
|
||||
uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error {
|
||||
_, err := client.UploadChunk(ctx, uploadURL, content, current)
|
||||
return err
|
||||
}
|
||||
|
||||
// upload chunks
|
||||
for chunks.Next() {
|
||||
if err := chunks.Process(uploadFunc); err != nil {
|
||||
if err := client.DeleteUploadSession(ctx, uploadURL); err != nil {
|
||||
client.l.Warning("Failed to delete upload session: %s", err)
|
||||
}
|
||||
return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteUploadSession 删除上传会话
|
||||
func (client *client) DeleteUploadSession(ctx context.Context, uploadURL string) error {
|
||||
_, err := client.requestWithStr(ctx, "DELETE", uploadURL, "", 204)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SimpleUpload 上传小文件到dst
|
||||
func (client *client) SimpleUpload(ctx context.Context, dst string, body io.Reader, size int64, opts ...Option) (*UploadResult, error) {
|
||||
options := newDefaultOption()
|
||||
for _, o := range opts {
|
||||
o.apply(options)
|
||||
}
|
||||
|
||||
dst = strings.TrimPrefix(dst, "/")
|
||||
requestURL := client.getRequestURL("root:/" + dst + ":/content")
|
||||
requestURL += ("?@microsoft.graph.conflictBehavior=" + options.conflictBehavior)
|
||||
|
||||
res, err := client.request(ctx, "PUT", requestURL, body, request.WithContentLength(int64(size)),
|
||||
request.WithTimeout(0),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
decodeErr error
|
||||
uploadRes UploadResult
|
||||
)
|
||||
decodeErr = json.Unmarshal([]byte(res), &uploadRes)
|
||||
if decodeErr != nil {
|
||||
return nil, decodeErr
|
||||
}
|
||||
|
||||
return &uploadRes, nil
|
||||
}
|
||||
|
||||
// BatchDelete 并行删除给出的文件,返回删除失败的文件,及第一个遇到的错误。此方法将文件分为
|
||||
// 20个一组,调用Delete并行删除
|
||||
func (client *client) BatchDelete(ctx context.Context, dst []string) ([]string, error) {
|
||||
groupNum := len(dst)/20 + 1
|
||||
finalRes := make([]string, 0, len(dst))
|
||||
res := make([]string, 0, 20)
|
||||
var err error
|
||||
|
||||
for i := 0; i < groupNum; i++ {
|
||||
end := 20*i + 20
|
||||
if i == groupNum-1 {
|
||||
end = len(dst)
|
||||
}
|
||||
|
||||
client.l.Debug("Delete file group: %v.", dst[20*i:end])
|
||||
res, err = client.Delete(ctx, dst[20*i:end])
|
||||
finalRes = append(finalRes, res...)
|
||||
}
|
||||
|
||||
return finalRes, err
|
||||
}
|
||||
|
||||
// Delete 并行删除文件,返回删除失败的文件,及第一个遇到的错误,
|
||||
// 由于API限制,最多删除20个
|
||||
func (client *client) Delete(ctx context.Context, dst []string) ([]string, error) {
|
||||
body := client.makeBatchDeleteRequestsBody(dst)
|
||||
res, err := client.requestWithStr(ctx, "POST", client.getRequestURL("$batch",
|
||||
WithDriverResource(false)), body, 200)
|
||||
if err != nil {
|
||||
return dst, err
|
||||
}
|
||||
|
||||
var (
|
||||
decodeErr error
|
||||
deleteRes BatchResponses
|
||||
)
|
||||
decodeErr = json.Unmarshal([]byte(res), &deleteRes)
|
||||
if decodeErr != nil {
|
||||
return dst, decodeErr
|
||||
}
|
||||
|
||||
// 取得删除失败的文件
|
||||
failed := getDeleteFailed(&deleteRes)
|
||||
if len(failed) != 0 {
|
||||
return failed, ErrDeleteFile
|
||||
}
|
||||
return failed, nil
|
||||
}
|
||||
|
||||
func getDeleteFailed(res *BatchResponses) []string {
|
||||
var failed = make([]string, 0, len(res.Responses))
|
||||
for _, v := range res.Responses {
|
||||
if v.Status != 204 && v.Status != 404 {
|
||||
failed = append(failed, v.ID)
|
||||
}
|
||||
}
|
||||
return failed
|
||||
}
|
||||
|
||||
// makeBatchDeleteRequestsBody 生成批量删除请求正文
|
||||
func (client *client) makeBatchDeleteRequestsBody(files []string) string {
|
||||
req := BatchRequests{
|
||||
Requests: make([]BatchRequest, len(files)),
|
||||
}
|
||||
for i, v := range files {
|
||||
v = strings.TrimPrefix(v, "/")
|
||||
filePath, _ := url.Parse("/" + client.endpoints.driverResource + "/root:/")
|
||||
filePath.Path = path.Join(filePath.Path, v)
|
||||
req.Requests[i] = BatchRequest{
|
||||
ID: v,
|
||||
Method: "DELETE",
|
||||
URL: filePath.EscapedPath(),
|
||||
}
|
||||
}
|
||||
|
||||
res, _ := json.Marshal(req)
|
||||
return string(res)
|
||||
}
|
||||
|
||||
// GetThumbURL 获取给定尺寸的缩略图URL
|
||||
func (client *client) GetThumbURL(ctx context.Context, dst string) (string, error) {
|
||||
dst = strings.TrimPrefix(dst, "/")
|
||||
requestURL := client.getRequestURL("root:/"+dst+":/thumbnails/0") + "/large"
|
||||
|
||||
res, err := client.requestWithStr(ctx, "GET", requestURL, "", 200)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var (
|
||||
decodeErr error
|
||||
thumbRes ThumbResponse
|
||||
)
|
||||
decodeErr = json.Unmarshal([]byte(res), &thumbRes)
|
||||
if decodeErr != nil {
|
||||
return "", decodeErr
|
||||
}
|
||||
|
||||
if thumbRes.URL != "" {
|
||||
return thumbRes.URL, nil
|
||||
}
|
||||
|
||||
if len(thumbRes.Value) == 1 {
|
||||
if res, ok := thumbRes.Value[0]["large"]; ok {
|
||||
return res.(map[string]interface{})["url"].(string), nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", ErrThumbSizeNotFound
|
||||
}
|
||||
|
||||
func sysError(err error) *RespError {
|
||||
return &RespError{APIError: APIError{
|
||||
Code: "system",
|
||||
Message: err.Error(),
|
||||
}}
|
||||
}
|
||||
|
||||
func (client *client) request(ctx context.Context, method string, url string, body io.Reader, option ...request.Option) (string, error) {
|
||||
// 获取凭证
|
||||
err := client.UpdateCredential(ctx)
|
||||
if err != nil {
|
||||
return "", sysError(err)
|
||||
}
|
||||
|
||||
opts := []request.Option{
|
||||
request.WithHeader(http.Header{
|
||||
"Authorization": {"Bearer " + client.credential.String()},
|
||||
"Content-Type": {"application/json"},
|
||||
}),
|
||||
request.WithContext(ctx),
|
||||
request.WithTPSLimit(
|
||||
fmt.Sprintf("policy_%d", client.policy.ID),
|
||||
client.policy.Settings.TPSLimit,
|
||||
client.policy.Settings.TPSLimitBurst,
|
||||
),
|
||||
}
|
||||
|
||||
// 发送请求
|
||||
res := client.httpClient.Request(
|
||||
method,
|
||||
url,
|
||||
body,
|
||||
append(opts, option...)...,
|
||||
)
|
||||
|
||||
if res.Err != nil {
|
||||
return "", sysError(res.Err)
|
||||
}
|
||||
|
||||
respBody, err := res.GetResponse()
|
||||
if err != nil {
|
||||
return "", sysError(err)
|
||||
}
|
||||
|
||||
// 解析请求响应
|
||||
var (
|
||||
errResp RespError
|
||||
decodeErr error
|
||||
)
|
||||
// 如果有错误
|
||||
if res.Response.StatusCode < 200 || res.Response.StatusCode >= 300 {
|
||||
decodeErr = json.Unmarshal([]byte(respBody), &errResp)
|
||||
if decodeErr != nil {
|
||||
client.l.Debug("Onedrive returns unknown response: %s", respBody)
|
||||
return "", sysError(decodeErr)
|
||||
}
|
||||
|
||||
if res.Response.StatusCode == 429 {
|
||||
client.l.Warning("OneDrive request is throttled.")
|
||||
return "", backoff.NewRetryableErrorFromHeader(&errResp, res.Response.Header)
|
||||
}
|
||||
|
||||
return "", &errResp
|
||||
}
|
||||
|
||||
return respBody, nil
|
||||
}
|
||||
|
||||
func (client *client) requestWithStr(ctx context.Context, method string, url string, body string, expectedCode int) (string, error) {
|
||||
// 发送请求
|
||||
bodyReader := io.NopCloser(strings.NewReader(body))
|
||||
return client.request(ctx, method, url, bodyReader,
|
||||
request.WithContentLength(int64(len(body))),
|
||||
)
|
||||
}
|
||||
90
pkg/filemanager/driver/onedrive/client.go
Normal file
90
pkg/filemanager/driver/onedrive/client.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/credmanager"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrAuthEndpoint 无法解析授权端点地址
|
||||
ErrAuthEndpoint = errors.New("failed to parse endpoint url")
|
||||
// ErrInvalidRefreshToken 上传策略无有效的RefreshToken
|
||||
ErrInvalidRefreshToken = errors.New("no valid refresh token in this policy")
|
||||
// ErrDeleteFile 无法删除文件
|
||||
ErrDeleteFile = errors.New("cannot delete file")
|
||||
// ErrClientCanceled 客户端取消操作
|
||||
ErrClientCanceled = errors.New("client canceled")
|
||||
// Desired thumb size not available
|
||||
ErrThumbSizeNotFound = errors.New("thumb size not found")
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
ListChildren(ctx context.Context, path string) ([]FileInfo, error)
|
||||
Meta(ctx context.Context, id string, path string) (*FileInfo, error)
|
||||
CreateUploadSession(ctx context.Context, dst string, opts ...Option) (string, error)
|
||||
GetSiteIDByURL(ctx context.Context, siteUrl string) (string, error)
|
||||
GetUploadSessionStatus(ctx context.Context, uploadURL string) (*UploadSessionResponse, error)
|
||||
Upload(ctx context.Context, file *fs.UploadRequest) error
|
||||
SimpleUpload(ctx context.Context, dst string, body io.Reader, size int64, opts ...Option) (*UploadResult, error)
|
||||
DeleteUploadSession(ctx context.Context, uploadURL string) error
|
||||
BatchDelete(ctx context.Context, dst []string) ([]string, error)
|
||||
GetThumbURL(ctx context.Context, dst string) (string, error)
|
||||
OAuthURL(ctx context.Context, scopes []string) string
|
||||
ObtainToken(ctx context.Context, opts ...Option) (*Credential, error)
|
||||
}
|
||||
|
||||
// client OneDrive客户端
|
||||
type client struct {
|
||||
endpoints *endpoints
|
||||
policy *ent.StoragePolicy
|
||||
credential credmanager.Credential
|
||||
|
||||
httpClient request.Client
|
||||
cred credmanager.CredManager
|
||||
l logging.Logger
|
||||
settings setting.Provider
|
||||
|
||||
chunkSize int64
|
||||
}
|
||||
|
||||
// endpoints OneDrive客户端相关设置
|
||||
type endpoints struct {
|
||||
oAuthEndpoints *oauthEndpoint
|
||||
endpointURL string // 接口请求的基URL
|
||||
driverResource string // 要使用的驱动器
|
||||
}
|
||||
|
||||
// NewClient 根据存储策略获取新的client
|
||||
func NewClient(policy *ent.StoragePolicy, httpClient request.Client, cred credmanager.CredManager,
|
||||
l logging.Logger, settings setting.Provider, chunkSize int64) Client {
|
||||
client := &client{
|
||||
endpoints: &endpoints{
|
||||
endpointURL: policy.Server,
|
||||
driverResource: policy.Settings.OdDriver,
|
||||
},
|
||||
policy: policy,
|
||||
httpClient: httpClient,
|
||||
cred: cred,
|
||||
l: l,
|
||||
settings: settings,
|
||||
chunkSize: chunkSize,
|
||||
}
|
||||
|
||||
if client.endpoints.driverResource == "" {
|
||||
client.endpoints.driverResource = "me/drive"
|
||||
}
|
||||
|
||||
oauthBase := getOAuthEndpoint(policy.Server)
|
||||
client.endpoints.oAuthEndpoints = oauthBase
|
||||
|
||||
return client
|
||||
}
|
||||
271
pkg/filemanager/driver/onedrive/oauth.go
Normal file
271
pkg/filemanager/driver/onedrive/oauth.go
Normal file
@@ -0,0 +1,271 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cloudreve/Cloudreve/v4/application/dependency"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/credmanager"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
const (
|
||||
AccessTokenExpiryMargin = 600 // 10 minutes
|
||||
)
|
||||
|
||||
// Error 实现error接口
|
||||
func (err OAuthError) Error() string {
|
||||
return err.ErrorDescription
|
||||
}
|
||||
|
||||
// OAuthURL 获取OAuth认证页面URL
|
||||
func (client *client) OAuthURL(ctx context.Context, scope []string) string {
|
||||
query := url.Values{
|
||||
"client_id": {client.policy.BucketName},
|
||||
"scope": {strings.Join(scope, " ")},
|
||||
"response_type": {"code"},
|
||||
"redirect_uri": {client.policy.Settings.OauthRedirect},
|
||||
"state": {strconv.Itoa(client.policy.ID)},
|
||||
}
|
||||
client.endpoints.oAuthEndpoints.authorize.RawQuery = query.Encode()
|
||||
return client.endpoints.oAuthEndpoints.authorize.String()
|
||||
}
|
||||
|
||||
// getOAuthEndpoint gets OAuth endpoints from API endpoint
|
||||
func getOAuthEndpoint(apiEndpoint string) *oauthEndpoint {
|
||||
base, err := url.Parse(apiEndpoint)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
var (
|
||||
token *url.URL
|
||||
authorize *url.URL
|
||||
)
|
||||
switch base.Host {
|
||||
//case "login.live.com":
|
||||
// token, _ = url.Parse("https://login.live.com/oauth20_token.srf")
|
||||
// authorize, _ = url.Parse("https://login.live.com/oauth20_authorize.srf")
|
||||
case "microsoftgraph.chinacloudapi.cn":
|
||||
token, _ = url.Parse("https://login.chinacloudapi.cn/common/oauth2/v2.0/token")
|
||||
authorize, _ = url.Parse("https://login.chinacloudapi.cn/common/oauth2/v2.0/authorize")
|
||||
default:
|
||||
token, _ = url.Parse("https://login.microsoftonline.com/common/oauth2/v2.0/token")
|
||||
authorize, _ = url.Parse("https://login.microsoftonline.com/common/oauth2/v2.0/authorize")
|
||||
}
|
||||
|
||||
return &oauthEndpoint{
|
||||
token: *token,
|
||||
authorize: *authorize,
|
||||
}
|
||||
}
|
||||
|
||||
// Credential 获取token时返回的凭证
|
||||
type Credential struct {
|
||||
ExpiresIn int64 `json:"expires_in"`
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
RefreshedAtUnix int64 `json:"refreshed_at"`
|
||||
|
||||
PolicyID int `json:"policy_id"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
gob.Register(Credential{})
|
||||
}
|
||||
|
||||
func (c Credential) Refresh(ctx context.Context) (credmanager.Credential, error) {
|
||||
if c.RefreshToken == "" {
|
||||
return nil, ErrInvalidRefreshToken
|
||||
}
|
||||
|
||||
dep := dependency.FromContext(ctx)
|
||||
storagePolicyClient := dep.StoragePolicyClient()
|
||||
policy, err := storagePolicyClient.GetPolicyByID(ctx, c.PolicyID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get storage policy: %w", err)
|
||||
}
|
||||
|
||||
oauthBase := getOAuthEndpoint(policy.Server)
|
||||
|
||||
newCredential, err := obtainToken(ctx, &obtainTokenArgs{
|
||||
clientId: policy.BucketName,
|
||||
redirect: policy.Settings.OauthRedirect,
|
||||
secret: policy.SecretKey,
|
||||
refreshToken: c.RefreshToken,
|
||||
client: dep.RequestClient(request.WithLogger(dep.Logger())),
|
||||
tokenEndpoint: oauthBase.token.String(),
|
||||
policyID: c.PolicyID,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.RefreshToken = newCredential.RefreshToken
|
||||
c.AccessToken = newCredential.AccessToken
|
||||
c.ExpiresIn = newCredential.ExpiresIn
|
||||
c.RefreshedAtUnix = time.Now().Unix()
|
||||
|
||||
// Write refresh token to db
|
||||
if err := storagePolicyClient.UpdateAccessKey(ctx, policy, newCredential.RefreshToken); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c Credential) Key() string {
|
||||
return CredentialKey(c.PolicyID)
|
||||
}
|
||||
|
||||
func (c Credential) Expiry() time.Time {
|
||||
return time.Unix(c.ExpiresIn-AccessTokenExpiryMargin, 0)
|
||||
}
|
||||
|
||||
func (c Credential) String() string {
|
||||
return c.AccessToken
|
||||
}
|
||||
|
||||
func (c Credential) RefreshedAt() *time.Time {
|
||||
if c.RefreshedAtUnix == 0 {
|
||||
return nil
|
||||
}
|
||||
refreshedAt := time.Unix(c.RefreshedAtUnix, 0)
|
||||
return &refreshedAt
|
||||
}
|
||||
|
||||
// ObtainToken 通过code或refresh_token兑换token
|
||||
func (client *client) ObtainToken(ctx context.Context, opts ...Option) (*Credential, error) {
|
||||
options := newDefaultOption()
|
||||
for _, o := range opts {
|
||||
o.apply(options)
|
||||
}
|
||||
|
||||
return obtainToken(ctx, &obtainTokenArgs{
|
||||
clientId: client.policy.BucketName,
|
||||
redirect: client.policy.Settings.OauthRedirect,
|
||||
secret: client.policy.SecretKey,
|
||||
code: options.code,
|
||||
refreshToken: options.refreshToken,
|
||||
client: client.httpClient,
|
||||
tokenEndpoint: client.endpoints.oAuthEndpoints.token.String(),
|
||||
policyID: client.policy.ID,
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
type obtainTokenArgs struct {
|
||||
clientId string
|
||||
redirect string
|
||||
secret string
|
||||
code string
|
||||
refreshToken string
|
||||
client request.Client
|
||||
tokenEndpoint string
|
||||
policyID int
|
||||
}
|
||||
|
||||
// obtainToken fetch new access token from Microsoft Graph API
|
||||
func obtainToken(ctx context.Context, args *obtainTokenArgs) (*Credential, error) {
|
||||
body := url.Values{
|
||||
"client_id": {args.clientId},
|
||||
"redirect_uri": {args.redirect},
|
||||
"client_secret": {args.secret},
|
||||
}
|
||||
if args.code != "" {
|
||||
body.Add("grant_type", "authorization_code")
|
||||
body.Add("code", args.code)
|
||||
} else {
|
||||
body.Add("grant_type", "refresh_token")
|
||||
body.Add("refresh_token", args.refreshToken)
|
||||
}
|
||||
strBody := body.Encode()
|
||||
|
||||
res := args.client.Request(
|
||||
"POST",
|
||||
args.tokenEndpoint,
|
||||
io.NopCloser(strings.NewReader(strBody)),
|
||||
request.WithHeader(http.Header{
|
||||
"Content-Type": {"application/x-www-form-urlencoded"}},
|
||||
),
|
||||
request.WithContentLength(int64(len(strBody))),
|
||||
request.WithContext(ctx),
|
||||
)
|
||||
if res.Err != nil {
|
||||
return nil, res.Err
|
||||
}
|
||||
|
||||
respBody, err := res.GetResponse()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
errResp OAuthError
|
||||
credential Credential
|
||||
decodeErr error
|
||||
)
|
||||
|
||||
if res.Response.StatusCode != 200 {
|
||||
decodeErr = json.Unmarshal([]byte(respBody), &errResp)
|
||||
} else {
|
||||
decodeErr = json.Unmarshal([]byte(respBody), &credential)
|
||||
}
|
||||
if decodeErr != nil {
|
||||
return nil, decodeErr
|
||||
}
|
||||
|
||||
if errResp.ErrorType != "" {
|
||||
return nil, errResp
|
||||
}
|
||||
|
||||
credential.PolicyID = args.policyID
|
||||
credential.ExpiresIn = time.Now().Unix() + credential.ExpiresIn
|
||||
if args.code != "" {
|
||||
credential.ExpiresIn = time.Now().Unix() - 10
|
||||
}
|
||||
return &credential, nil
|
||||
}
|
||||
|
||||
// UpdateCredential 更新凭证,并检查有效期
|
||||
func (client *client) UpdateCredential(ctx context.Context) error {
|
||||
newCred, err := client.cred.Obtain(ctx, CredentialKey(client.policy.ID))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obtain token from CredManager: %w", err)
|
||||
}
|
||||
|
||||
client.credential = newCred
|
||||
return nil
|
||||
}
|
||||
|
||||
// RetrieveOneDriveCredentials retrieves OneDrive credentials from DB inventory
|
||||
func RetrieveOneDriveCredentials(ctx context.Context, storagePolicyClient inventory.StoragePolicyClient) ([]credmanager.Credential, error) {
|
||||
odPolicies, err := storagePolicyClient.ListPolicyByType(ctx, types.PolicyTypeOd)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list OneDrive policies: %w", err)
|
||||
}
|
||||
|
||||
return lo.Map(odPolicies, func(item *ent.StoragePolicy, index int) credmanager.Credential {
|
||||
return &Credential{
|
||||
PolicyID: item.ID,
|
||||
ExpiresIn: 0,
|
||||
RefreshToken: item.AccessKey,
|
||||
}
|
||||
}), nil
|
||||
}
|
||||
|
||||
func CredentialKey(policyId int) string {
|
||||
return fmt.Sprintf("cred_od_%d", policyId)
|
||||
}
|
||||
247
pkg/filemanager/driver/onedrive/onedrive.go
Normal file
247
pkg/filemanager/driver/onedrive/onedrive.go
Normal file
@@ -0,0 +1,247 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/credmanager"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Driver OneDrive 适配器
|
||||
type Driver struct {
|
||||
policy *ent.StoragePolicy
|
||||
client Client
|
||||
settings setting.Provider
|
||||
config conf.ConfigProvider
|
||||
l logging.Logger
|
||||
chunkSize int64
|
||||
}
|
||||
|
||||
var (
|
||||
features = &boolset.BooleanSet{}
|
||||
)
|
||||
|
||||
const (
|
||||
streamSaverParam = "stream_saver"
|
||||
)
|
||||
|
||||
func init() {
|
||||
boolset.Sets(map[driver.HandlerCapability]bool{
|
||||
driver.HandlerCapabilityUploadSentinelRequired: true,
|
||||
}, features)
|
||||
}
|
||||
|
||||
// NewDriver 从存储策略初始化新的Driver实例
|
||||
func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider,
|
||||
config conf.ConfigProvider, l logging.Logger, cred credmanager.CredManager) (*Driver, error) {
|
||||
chunkSize := policy.Settings.ChunkSize
|
||||
if policy.Settings.ChunkSize == 0 {
|
||||
chunkSize = 50 << 20 // 50MB
|
||||
}
|
||||
|
||||
c := NewClient(policy, request.NewClient(config, request.WithLogger(l)), cred, l, settings, chunkSize)
|
||||
|
||||
return &Driver{
|
||||
policy: policy,
|
||||
client: c,
|
||||
settings: settings,
|
||||
l: l,
|
||||
config: config,
|
||||
chunkSize: chunkSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//// List 列取项目
|
||||
//func (handler *Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) {
|
||||
// base = strings.TrimPrefix(base, "/")
|
||||
// // 列取子项目
|
||||
// objects, _ := handler.client.ListChildren(ctx, base)
|
||||
//
|
||||
// // 获取真实的列取起始根目录
|
||||
// rootPath := base
|
||||
// if realBase, ok := ctx.Value(fsctx.PathCtx).(string); ok {
|
||||
// rootPath = realBase
|
||||
// } else {
|
||||
// ctx = context.WithValue(ctx, fsctx.PathCtx, base)
|
||||
// }
|
||||
//
|
||||
// // 整理结果
|
||||
// res := make([]response.Object, 0, len(objects))
|
||||
// for _, object := range objects {
|
||||
// source := path.Join(base, object.Name)
|
||||
// rel, err := filepath.Rel(rootPath, source)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// res = append(res, response.Object{
|
||||
// Name: object.Name,
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Source: source,
|
||||
// Size: uint64(object.Size),
|
||||
// IsDir: object.Folder != nil,
|
||||
// LastModify: time.Now(),
|
||||
// })
|
||||
// }
|
||||
//
|
||||
// // 递归列取子目录
|
||||
// if recursive {
|
||||
// for _, object := range objects {
|
||||
// if object.Folder != nil {
|
||||
// sub, _ := handler.List(ctx, path.Join(base, object.Name), recursive)
|
||||
// res = append(res, sub...)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// return res, nil
|
||||
//}
|
||||
|
||||
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Put 将文件流保存到指定目录
|
||||
func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
||||
defer file.Close()
|
||||
|
||||
return handler.client.Upload(ctx, file)
|
||||
}
|
||||
|
||||
// Delete 删除一个或多个文件,
|
||||
// 返回未删除的文件,及遇到的最后一个错误
|
||||
func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
|
||||
return handler.client.BatchDelete(ctx, files)
|
||||
}
|
||||
|
||||
// Thumb 获取文件缩略图
|
||||
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
res, err := handler.client.GetThumbURL(ctx, e.Source())
|
||||
if err != nil {
|
||||
var apiErr *RespError
|
||||
if errors.As(err, &apiErr); err == ErrThumbSizeNotFound || (apiErr != nil && apiErr.APIError.Code == notFoundError) {
|
||||
// OneDrive cannot generate thumbnail for this file
|
||||
return "", fmt.Errorf("thumb not supported in OneDrive: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Source 获取外链URL
|
||||
func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
|
||||
// 缓存不存在,重新获取
|
||||
res, err := handler.client.Meta(ctx, "", e.Source())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if args.IsDownload && handler.policy.Settings.StreamSaver {
|
||||
downloadUrl := res.DownloadURL + "&" + streamSaverParam + "=" + url.QueryEscape(args.DisplayName)
|
||||
return downloadUrl, nil
|
||||
}
|
||||
|
||||
return res.DownloadURL, nil
|
||||
}
|
||||
|
||||
// Token 获取上传会话URL
|
||||
func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
|
||||
// 生成回调地址
|
||||
siteURL := handler.settings.SiteURL(setting.UseFirstSiteUrl(ctx))
|
||||
uploadSession.Callback = routes.MasterSlaveCallbackUrl(siteURL, types.PolicyTypeOd, uploadSession.Props.UploadSessionID, uploadSession.CallbackSecret).String()
|
||||
|
||||
uploadURL, err := handler.client.CreateUploadSession(ctx, file.Props.SavePath, WithConflictBehavior("fail"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 监控回调及上传
|
||||
//go handler.client.MonitorUpload(uploadURL, uploadSession.Key, fileInfo.SavePath, fileInfo.Size, ttl)
|
||||
|
||||
uploadSession.ChunkSize = handler.chunkSize
|
||||
uploadSession.UploadURL = uploadURL
|
||||
return &fs.UploadCredential{
|
||||
ChunkSize: handler.chunkSize,
|
||||
UploadURLs: []string{uploadURL},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// 取消上传凭证
|
||||
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
|
||||
err := handler.client.DeleteUploadSession(ctx, uploadSession.UploadURL)
|
||||
// Create empty placeholder file to stop upload
|
||||
if err == nil {
|
||||
_, err := handler.client.SimpleUpload(ctx, uploadSession.Props.SavePath, strings.NewReader(""), 0, WithConflictBehavior("replace"))
|
||||
if err != nil {
|
||||
handler.l.Warning("Failed to create placeholder file %q:%s", uploadSession.Props.SavePath, err)
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
|
||||
if session.SentinelTaskID == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make sure uploaded file size is correct
|
||||
res, err := handler.client.Meta(ctx, "", session.Props.SavePath)
|
||||
if err != nil {
|
||||
// Create empty placeholder file to stop further upload
|
||||
|
||||
return fmt.Errorf("failed to get uploaded file size: %w", err)
|
||||
}
|
||||
|
||||
isSharePoint := strings.Contains(handler.policy.Settings.OdDriver, "sharepoint.com") ||
|
||||
strings.Contains(handler.policy.Settings.OdDriver, "sharepoint.cn")
|
||||
sizeMismatch := res.Size != session.Props.Size
|
||||
// SharePoint 会对 Office 文档增加 meta data 导致文件大小不一致,这里增加 1 MB 宽容
|
||||
// See: https://github.com/OneDrive/onedrive-api-docs/issues/935
|
||||
if isSharePoint && sizeMismatch && (res.Size > session.Props.Size) && (res.Size-session.Props.Size <= 1048576) {
|
||||
sizeMismatch = false
|
||||
}
|
||||
|
||||
if sizeMismatch {
|
||||
return serializer.NewError(
|
||||
serializer.CodeMetaMismatch,
|
||||
fmt.Sprintf("File size not match, expected: %d, actual: %d", session.Props.Size, res.Size),
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *Driver) Capabilities() *driver.Capabilities {
|
||||
return &driver.Capabilities{
|
||||
StaticFeatures: features,
|
||||
ThumbSupportedExts: handler.policy.Settings.ThumbExts,
|
||||
ThumbSupportAllExts: handler.policy.Settings.ThumbSupportAllExts,
|
||||
ThumbMaxSize: handler.policy.Settings.ThumbMaxSize,
|
||||
ThumbProxy: handler.policy.Settings.ThumbGeneratorProxy,
|
||||
MediaMetaProxy: handler.policy.Settings.MediaMetaGeneratorProxy,
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (handler *Driver) LocalPath(ctx context.Context, path string) string {
|
||||
return ""
|
||||
}
|
||||
59
pkg/filemanager/driver/onedrive/options.go
Normal file
59
pkg/filemanager/driver/onedrive/options.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package onedrive
|
||||
|
||||
import "time"
|
||||
|
||||
// Option 发送请求的额外设置
|
||||
type Option interface {
|
||||
apply(*options)
|
||||
}
|
||||
|
||||
type options struct {
|
||||
redirect string
|
||||
code string
|
||||
refreshToken string
|
||||
conflictBehavior string
|
||||
expires time.Time
|
||||
useDriverResource bool
|
||||
}
|
||||
|
||||
type optionFunc func(*options)
|
||||
|
||||
// WithCode 设置接口Code
|
||||
func WithCode(t string) Option {
|
||||
return optionFunc(func(o *options) {
|
||||
o.code = t
|
||||
})
|
||||
}
|
||||
|
||||
// WithRefreshToken 设置接口RefreshToken
|
||||
func WithRefreshToken(t string) Option {
|
||||
return optionFunc(func(o *options) {
|
||||
o.refreshToken = t
|
||||
})
|
||||
}
|
||||
|
||||
// WithConflictBehavior 设置文件重名后的处理方式
|
||||
func WithConflictBehavior(t string) Option {
|
||||
return optionFunc(func(o *options) {
|
||||
o.conflictBehavior = t
|
||||
})
|
||||
}
|
||||
|
||||
// WithConflictBehavior 设置文件重名后的处理方式
|
||||
func WithDriverResource(t bool) Option {
|
||||
return optionFunc(func(o *options) {
|
||||
o.useDriverResource = t
|
||||
})
|
||||
}
|
||||
|
||||
func (f optionFunc) apply(o *options) {
|
||||
f(o)
|
||||
}
|
||||
|
||||
func newDefaultOption() *options {
|
||||
return &options{
|
||||
conflictBehavior: "fail",
|
||||
useDriverResource: true,
|
||||
expires: time.Now().UTC().Add(time.Duration(1) * time.Hour),
|
||||
}
|
||||
}
|
||||
130
pkg/filemanager/driver/onedrive/types.go
Normal file
130
pkg/filemanager/driver/onedrive/types.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// RespError 接口返回错误
|
||||
type RespError struct {
|
||||
APIError APIError `json:"error"`
|
||||
}
|
||||
|
||||
// APIError 接口返回的错误内容
|
||||
type APIError struct {
|
||||
Code string `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// UploadSessionResponse 分片上传会话
|
||||
type UploadSessionResponse struct {
|
||||
DataContext string `json:"@odata.context"`
|
||||
ExpirationDateTime string `json:"expirationDateTime"`
|
||||
NextExpectedRanges []string `json:"nextExpectedRanges"`
|
||||
UploadURL string `json:"uploadUrl"`
|
||||
}
|
||||
|
||||
// FileInfo 文件元信息
|
||||
type FileInfo struct {
|
||||
Name string `json:"name"`
|
||||
Size int64 `json:"size"`
|
||||
Image imageInfo `json:"image"`
|
||||
ParentReference parentReference `json:"parentReference"`
|
||||
DownloadURL string `json:"@microsoft.graph.downloadUrl"`
|
||||
File *file `json:"file"`
|
||||
Folder *folder `json:"folder"`
|
||||
}
|
||||
|
||||
type file struct {
|
||||
MimeType string `json:"mimeType"`
|
||||
}
|
||||
|
||||
type folder struct {
|
||||
ChildCount int `json:"childCount"`
|
||||
}
|
||||
|
||||
type imageInfo struct {
|
||||
Height int `json:"height"`
|
||||
Width int `json:"width"`
|
||||
}
|
||||
|
||||
type parentReference struct {
|
||||
Path string `json:"path"`
|
||||
Name string `json:"name"`
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// UploadResult 上传结果
|
||||
type UploadResult struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Size uint64 `json:"size"`
|
||||
}
|
||||
|
||||
// BatchRequests 批量操作请求
|
||||
type BatchRequests struct {
|
||||
Requests []BatchRequest `json:"requests"`
|
||||
}
|
||||
|
||||
// BatchRequest 批量操作单个请求
|
||||
type BatchRequest struct {
|
||||
ID string `json:"id"`
|
||||
Method string `json:"method"`
|
||||
URL string `json:"url"`
|
||||
Body interface{} `json:"body,omitempty"`
|
||||
Headers map[string]string `json:"headers,omitempty"`
|
||||
}
|
||||
|
||||
// BatchResponses 批量操作响应
|
||||
type BatchResponses struct {
|
||||
Responses []BatchResponse `json:"responses"`
|
||||
}
|
||||
|
||||
// BatchResponse 批量操作单个响应
|
||||
type BatchResponse struct {
|
||||
ID string `json:"id"`
|
||||
Status int `json:"status"`
|
||||
}
|
||||
|
||||
// ThumbResponse 获取缩略图的响应
|
||||
type ThumbResponse struct {
|
||||
Value []map[string]interface{} `json:"value"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// ListResponse 列取子项目响应
|
||||
type ListResponse struct {
|
||||
Value []FileInfo `json:"value"`
|
||||
Context string `json:"@odata.context"`
|
||||
}
|
||||
|
||||
// oauthEndpoint OAuth接口地址
|
||||
type oauthEndpoint struct {
|
||||
token url.URL
|
||||
authorize url.URL
|
||||
}
|
||||
|
||||
// OAuthError OAuth相关接口的错误响应
|
||||
type OAuthError struct {
|
||||
ErrorType string `json:"error"`
|
||||
ErrorDescription string `json:"error_description"`
|
||||
CorrelationID string `json:"correlation_id"`
|
||||
}
|
||||
|
||||
// Site SharePoint 站点信息
|
||||
type Site struct {
|
||||
Description string `json:"description"`
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
DisplayName string `json:"displayName"`
|
||||
WebUrl string `json:"webUrl"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
gob.Register(Credential{})
|
||||
}
|
||||
|
||||
// Error 实现error接口
|
||||
func (err RespError) Error() string {
|
||||
return err.APIError.Message
|
||||
}
|
||||
122
pkg/filemanager/driver/oss/callback.go
Normal file
122
pkg/filemanager/driver/oss/callback.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/md5"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cache"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
pubKeyHeader = "x-oss-pub-key-url"
|
||||
pubKeyPrefix = "http://gosspublic.alicdn.com/"
|
||||
pubKeyPrefixHttps = "https://gosspublic.alicdn.com/"
|
||||
pubKeyCacheKey = "oss_public_key"
|
||||
)
|
||||
|
||||
// GetPublicKey 从回调请求或缓存中获取OSS的回调签名公钥
|
||||
func GetPublicKey(r *http.Request, kv cache.Driver, client request.Client) ([]byte, error) {
|
||||
var pubKey []byte
|
||||
|
||||
// 尝试从缓存中获取
|
||||
pub, exist := kv.Get(pubKeyCacheKey)
|
||||
if exist {
|
||||
return pub.([]byte), nil
|
||||
}
|
||||
|
||||
// 从请求中获取
|
||||
pubURL, err := base64.StdEncoding.DecodeString(r.Header.Get(pubKeyHeader))
|
||||
if err != nil {
|
||||
return pubKey, err
|
||||
}
|
||||
|
||||
// 确保这个 public key 是由 OSS 颁发的
|
||||
if !strings.HasPrefix(string(pubURL), pubKeyPrefix) &&
|
||||
!strings.HasPrefix(string(pubURL), pubKeyPrefixHttps) {
|
||||
return pubKey, errors.New("public key url invalid")
|
||||
}
|
||||
|
||||
// 获取公钥
|
||||
body, err := client.Request("GET", string(pubURL), nil).
|
||||
CheckHTTPResponse(200).
|
||||
GetResponse()
|
||||
if err != nil {
|
||||
return pubKey, err
|
||||
}
|
||||
|
||||
// 写入缓存
|
||||
_ = kv.Set(pubKeyCacheKey, []byte(body), 86400*7)
|
||||
|
||||
return []byte(body), nil
|
||||
}
|
||||
|
||||
func getRequestMD5(r *http.Request) ([]byte, error) {
|
||||
var byteMD5 []byte
|
||||
|
||||
// 获取请求正文
|
||||
body, err := io.ReadAll(r.Body)
|
||||
r.Body.Close()
|
||||
if err != nil {
|
||||
return byteMD5, err
|
||||
}
|
||||
r.Body = io.NopCloser(bytes.NewReader(body))
|
||||
|
||||
strURLPathDecode, err := url.PathUnescape(r.URL.Path)
|
||||
if err != nil {
|
||||
return byteMD5, err
|
||||
}
|
||||
|
||||
strAuth := fmt.Sprintf("%s\n%s", strURLPathDecode, string(body))
|
||||
md5Ctx := md5.New()
|
||||
md5Ctx.Write([]byte(strAuth))
|
||||
byteMD5 = md5Ctx.Sum(nil)
|
||||
|
||||
return byteMD5, nil
|
||||
}
|
||||
|
||||
// VerifyCallbackSignature 验证OSS回调请求
|
||||
func VerifyCallbackSignature(r *http.Request, kv cache.Driver, client request.Client) error {
|
||||
bytePublicKey, err := GetPublicKey(r, kv, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
byteMD5, err := getRequestMD5(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
strAuthorizationBase64 := r.Header.Get("authorization")
|
||||
if strAuthorizationBase64 == "" {
|
||||
return errors.New("no authorization field in Request header")
|
||||
}
|
||||
authorization, _ := base64.StdEncoding.DecodeString(strAuthorizationBase64)
|
||||
|
||||
pubBlock, _ := pem.Decode(bytePublicKey)
|
||||
if pubBlock == nil {
|
||||
return errors.New("pubBlock not exist")
|
||||
}
|
||||
pubInterface, err := x509.ParsePKIXPublicKey(pubBlock.Bytes)
|
||||
if (pubInterface == nil) || (err != nil) {
|
||||
return err
|
||||
}
|
||||
pub := pubInterface.(*rsa.PublicKey)
|
||||
|
||||
errorVerifyPKCS1v15 := rsa.VerifyPKCS1v15(pub, crypto.MD5, byteMD5, authorization)
|
||||
if errorVerifyPKCS1v15 != nil {
|
||||
return errorVerifyPKCS1v15
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
359
pkg/filemanager/driver/oss/media.go
Normal file
359
pkg/filemanager/driver/oss/media.go
Normal file
@@ -0,0 +1,359 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/samber/lo"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
imageInfoProcess = "image/info"
|
||||
videoInfoProcess = "video/info"
|
||||
audioInfoProcess = "audio/info"
|
||||
mediaInfoTTL = time.Duration(10) * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
supportedImageExt = []string{"jpg", "jpeg", "png", "gif", "bmp", "webp", "tiff", "heic", "heif"}
|
||||
supportedAudioExt = []string{"mp3", "wav", "flac", "aac", "m4a", "ogg", "wma", "ape", "alac", "amr", "opus"}
|
||||
supportedVideoExt = []string{"mp4", "mkv", "avi", "mov", "flv", "wmv", "rmvb", "webm", "3gp", "mpg", "mpeg", "m4v", "ts", "m3u8", "vob", "f4v", "rm", "asf", "divx", "ogv", "dat", "mts", "m2ts", "swf", "avi", "3g2", "m2v", "m4p", "m4b", "m4r", "m4v", "m4a"}
|
||||
)
|
||||
|
||||
type (
|
||||
ImageProp struct {
|
||||
Value string `json:"value"`
|
||||
}
|
||||
ImageInfo map[string]ImageProp
|
||||
|
||||
Error struct {
|
||||
XMLName xml.Name `xml:"Error"`
|
||||
Text string `xml:",chardata"`
|
||||
Code string `xml:"Code"`
|
||||
Message string `xml:"Message"`
|
||||
RequestId string `xml:"RequestId"`
|
||||
HostId string `xml:"HostId"`
|
||||
EC string `xml:"EC"`
|
||||
RecommendDoc string `xml:"RecommendDoc"`
|
||||
}
|
||||
|
||||
StreamMediaInfo struct {
|
||||
RequestID string `json:"RequestId"`
|
||||
Language string `json:"Language"`
|
||||
Title string `json:"Title"`
|
||||
VideoStreams []VideoStream `json:"VideoStreams"`
|
||||
AudioStreams []AudioStream `json:"AudioStreams"`
|
||||
Subtitles []Subtitle `json:"Subtitles"`
|
||||
StreamCount int64 `json:"StreamCount"`
|
||||
ProgramCount int64 `json:"ProgramCount"`
|
||||
FormatName string `json:"FormatName"`
|
||||
FormatLongName string `json:"FormatLongName"`
|
||||
Size int64 `json:"Size"`
|
||||
StartTime float64 `json:"StartTime"`
|
||||
Bitrate int64 `json:"Bitrate"`
|
||||
Artist string `json:"Artist"`
|
||||
AlbumArtist string `json:"AlbumArtist"`
|
||||
Composer string `json:"Composer"`
|
||||
Performer string `json:"Performer"`
|
||||
Album string `json:"Album"`
|
||||
Duration float64 `json:"Duration"`
|
||||
ProduceTime string `json:"ProduceTime"`
|
||||
LatLong string `json:"LatLong"`
|
||||
VideoWidth int64 `json:"VideoWidth"`
|
||||
VideoHeight int64 `json:"VideoHeight"`
|
||||
Addresses []Address `json:"Addresses"`
|
||||
}
|
||||
|
||||
Address struct {
|
||||
Language string `json:"Language"`
|
||||
AddressLine string `json:"AddressLine"`
|
||||
Country string `json:"Country"`
|
||||
Province string `json:"Province"`
|
||||
City string `json:"City"`
|
||||
District string `json:"District"`
|
||||
Township string `json:"Township"`
|
||||
}
|
||||
|
||||
AudioStream struct {
|
||||
Index int `json:"Index"`
|
||||
Language string `json:"Language"`
|
||||
CodecName string `json:"CodecName"`
|
||||
CodecLongName string `json:"CodecLongName"`
|
||||
CodecTimeBase string `json:"CodecTimeBase"`
|
||||
CodecTagString string `json:"CodecTagString"`
|
||||
CodecTag string `json:"CodecTag"`
|
||||
TimeBase string `json:"TimeBase"`
|
||||
StartTime float64 `json:"StartTime"`
|
||||
Duration float64 `json:"Duration"`
|
||||
Bitrate int64 `json:"Bitrate"`
|
||||
FrameCount int64 `json:"FrameCount"`
|
||||
Lyric string `json:"Lyric"`
|
||||
SampleFormat string `json:"SampleFormat"`
|
||||
SampleRate int64 `json:"SampleRate"`
|
||||
Channels int64 `json:"Channels"`
|
||||
ChannelLayout string `json:"ChannelLayout"`
|
||||
}
|
||||
|
||||
Subtitle struct {
|
||||
Index int64 `json:"Index"`
|
||||
Language string `json:"Language"`
|
||||
CodecName string `json:"CodecName"`
|
||||
CodecLongName string `json:"CodecLongName"`
|
||||
CodecTagString string `json:"CodecTagString"`
|
||||
CodecTag string `json:"CodecTag"`
|
||||
StartTime float64 `json:"StartTime"`
|
||||
Duration float64 `json:"Duration"`
|
||||
Bitrate int64 `json:"Bitrate"`
|
||||
Content string `json:"Content"`
|
||||
Width int64 `json:"Width"`
|
||||
Height int64 `json:"Height"`
|
||||
}
|
||||
|
||||
VideoStream struct {
|
||||
Index int `json:"Index"`
|
||||
Language string `json:"Language"`
|
||||
CodecName string `json:"CodecName"`
|
||||
CodecLongName string `json:"CodecLongName"`
|
||||
Profile string `json:"Profile"`
|
||||
CodecTimeBase string `json:"CodecTimeBase"`
|
||||
CodecTagString string `json:"CodecTagString"`
|
||||
CodecTag string `json:"CodecTag"`
|
||||
Width int `json:"Width"`
|
||||
Height int `json:"Height"`
|
||||
HasBFrames int `json:"HasBFrames"`
|
||||
SampleAspectRatio string `json:"SampleAspectRatio"`
|
||||
DisplayAspectRatio string `json:"DisplayAspectRatio"`
|
||||
PixelFormat string `json:"PixelFormat"`
|
||||
Level int `json:"Level"`
|
||||
FrameRate string `json:"FrameRate"`
|
||||
AverageFrameRate string `json:"AverageFrameRate"`
|
||||
TimeBase string `json:"TimeBase"`
|
||||
StartTime float64 `json:"StartTime"`
|
||||
Duration float64 `json:"Duration"`
|
||||
Bitrate int64 `json:"Bitrate"`
|
||||
FrameCount int64 `json:"FrameCount"`
|
||||
Rotate string `json:"Rotate"`
|
||||
BitDepth int `json:"BitDepth"`
|
||||
ColorSpace string `json:"ColorSpace"`
|
||||
ColorRange string `json:"ColorRange"`
|
||||
ColorTransfer string `json:"ColorTransfer"`
|
||||
ColorPrimaries string `json:"ColorPrimaries"`
|
||||
}
|
||||
)
|
||||
|
||||
func (handler *Driver) extractIMMMeta(ctx context.Context, path, category string) ([]driver.MediaMeta, error) {
|
||||
resp, err := handler.extractMediaInfo(ctx, path, category, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var info StreamMediaInfo
|
||||
if err := json.Unmarshal([]byte(resp), &info); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal media info: %w", err)
|
||||
}
|
||||
|
||||
streams := lo.Map(info.VideoStreams, func(stream VideoStream, index int) mediameta.Stream {
|
||||
bitrate := ""
|
||||
if stream.Bitrate != 0 {
|
||||
bitrate = strconv.FormatInt(stream.Bitrate, 10)
|
||||
}
|
||||
return mediameta.Stream{
|
||||
Index: stream.Index,
|
||||
CodecName: stream.CodecName,
|
||||
CodecLongName: stream.CodecLongName,
|
||||
CodecType: "video",
|
||||
Width: stream.Width,
|
||||
Height: stream.Height,
|
||||
Duration: strconv.FormatFloat(stream.Duration, 'f', -1, 64),
|
||||
Bitrate: bitrate,
|
||||
}
|
||||
})
|
||||
streams = append(streams, lo.Map(info.AudioStreams, func(stream AudioStream, index int) mediameta.Stream {
|
||||
bitrate := ""
|
||||
if stream.Bitrate != 0 {
|
||||
bitrate = strconv.FormatInt(stream.Bitrate, 10)
|
||||
}
|
||||
return mediameta.Stream{
|
||||
Index: stream.Index,
|
||||
CodecName: stream.CodecName,
|
||||
CodecLongName: stream.CodecLongName,
|
||||
CodecType: "audio",
|
||||
Duration: strconv.FormatFloat(stream.Duration, 'f', -1, 64),
|
||||
Bitrate: bitrate,
|
||||
}
|
||||
})...)
|
||||
|
||||
metas := make([]driver.MediaMeta, 0)
|
||||
metas = append(metas, mediameta.ProbeMetaTransform(&mediameta.FFProbeMeta{
|
||||
Format: &mediameta.Format{
|
||||
FormatName: info.FormatName,
|
||||
FormatLongName: info.FormatLongName,
|
||||
Duration: strconv.FormatFloat(info.Duration, 'f', -1, 64),
|
||||
Bitrate: strconv.FormatInt(info.Bitrate, 10),
|
||||
},
|
||||
Streams: streams,
|
||||
})...)
|
||||
|
||||
if info.Artist != "" {
|
||||
metas = append(metas, driver.MediaMeta{
|
||||
Key: mediameta.MusicArtist,
|
||||
Value: info.Artist,
|
||||
Type: driver.MediaTypeMusic,
|
||||
})
|
||||
}
|
||||
|
||||
if info.AlbumArtist != "" {
|
||||
metas = append(metas, driver.MediaMeta{
|
||||
Key: mediameta.MusicAlbumArtists,
|
||||
Value: info.AlbumArtist,
|
||||
Type: driver.MediaTypeMusic,
|
||||
})
|
||||
}
|
||||
|
||||
if info.Composer != "" {
|
||||
metas = append(metas, driver.MediaMeta{
|
||||
Key: mediameta.MusicComposer,
|
||||
Value: info.Composer,
|
||||
Type: driver.MediaTypeMusic,
|
||||
})
|
||||
}
|
||||
|
||||
if info.Album != "" {
|
||||
metas = append(metas, driver.MediaMeta{
|
||||
Key: mediameta.MusicAlbum,
|
||||
Value: info.Album,
|
||||
Type: driver.MediaTypeMusic,
|
||||
})
|
||||
}
|
||||
|
||||
return metas, nil
|
||||
}
|
||||
|
||||
func (handler *Driver) extractImageMeta(ctx context.Context, path string) ([]driver.MediaMeta, error) {
|
||||
resp, err := handler.extractMediaInfo(ctx, path, imageInfoProcess, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var imageInfo ImageInfo
|
||||
if err := json.Unmarshal([]byte(resp), &imageInfo); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal media info: %w", err)
|
||||
}
|
||||
|
||||
metas := make([]driver.MediaMeta, 0)
|
||||
exifMap := lo.MapEntries(imageInfo, func(key string, value ImageProp) (string, string) {
|
||||
return key, value.Value
|
||||
})
|
||||
metas = append(metas, mediameta.ExtractExifMap(exifMap, time.Time{})...)
|
||||
metas = append(metas, parseGpsInfo(imageInfo)...)
|
||||
for i := 0; i < len(metas); i++ {
|
||||
metas[i].Type = driver.MetaTypeExif
|
||||
}
|
||||
|
||||
return metas, nil
|
||||
}
|
||||
|
||||
// extractMediaInfo Sends API calls to OSS IMM service to extract media info.
|
||||
func (handler *Driver) extractMediaInfo(ctx context.Context, path string, category string, forceSign bool) (string, error) {
|
||||
mediaOption := []oss.Option{oss.Process(category)}
|
||||
mediaInfoExpire := time.Now().Add(mediaInfoTTL)
|
||||
thumbURL, err := handler.signSourceURL(
|
||||
ctx,
|
||||
path,
|
||||
&mediaInfoExpire,
|
||||
mediaOption,
|
||||
forceSign,
|
||||
)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to sign media info url: %w", err)
|
||||
}
|
||||
|
||||
resp, err := handler.httpClient.
|
||||
Request(http.MethodGet, thumbURL, nil, request.WithContext(ctx)).
|
||||
CheckHTTPResponse(http.StatusOK).
|
||||
GetResponseIgnoreErr()
|
||||
if err != nil {
|
||||
return "", handleOssError(resp, err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func parseGpsInfo(imageInfo ImageInfo) []driver.MediaMeta {
|
||||
latitude := imageInfo["GPSLatitude"] // 31deg 16.26808'
|
||||
longitude := imageInfo["GPSLongitude"] // 120deg 42.91039'
|
||||
latRef := imageInfo["GPSLatitudeRef"] // North
|
||||
lonRef := imageInfo["GPSLongitudeRef"] // East
|
||||
|
||||
// Make sure all value exist in map
|
||||
if latitude.Value == "" || longitude.Value == "" || latRef.Value == "" || lonRef.Value == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
lat := parseRawGPS(latitude.Value, latRef.Value)
|
||||
lon := parseRawGPS(longitude.Value, lonRef.Value)
|
||||
if !math.IsNaN(lat) && !math.IsNaN(lon) {
|
||||
lat, lng := mediameta.NormalizeGPS(lat, lon)
|
||||
return []driver.MediaMeta{{
|
||||
Key: mediameta.GpsLat,
|
||||
Value: fmt.Sprintf("%f", lat),
|
||||
}, {
|
||||
Key: mediameta.GpsLng,
|
||||
Value: fmt.Sprintf("%f", lng),
|
||||
}}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseRawGPS(gpsStr string, ref string) float64 {
|
||||
elem := strings.Split(gpsStr, " ")
|
||||
if len(elem) < 1 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var (
|
||||
deg float64
|
||||
minutes float64
|
||||
seconds float64
|
||||
)
|
||||
|
||||
deg, _ = strconv.ParseFloat(strings.TrimSuffix(elem[0], "deg"), 64)
|
||||
if len(elem) >= 2 {
|
||||
minutes, _ = strconv.ParseFloat(strings.TrimSuffix(elem[1], "'"), 64)
|
||||
}
|
||||
if len(elem) >= 3 {
|
||||
seconds, _ = strconv.ParseFloat(strings.TrimSuffix(elem[2], "\""), 64)
|
||||
}
|
||||
|
||||
decimal := deg + minutes/60.0 + seconds/3600.0
|
||||
|
||||
if ref == "South" || ref == "West" {
|
||||
return -decimal
|
||||
}
|
||||
|
||||
return decimal
|
||||
}
|
||||
|
||||
func handleOssError(resp string, originErr error) error {
|
||||
if resp == "" {
|
||||
return originErr
|
||||
}
|
||||
|
||||
var err Error
|
||||
if err := xml.Unmarshal([]byte(resp), &err); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal oss error: %w", err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("oss error: %s", err.Message)
|
||||
}
|
||||
548
pkg/filemanager/driver/oss/oss.go
Normal file
548
pkg/filemanager/driver/oss/oss.go
Normal file
@@ -0,0 +1,548 @@
|
||||
package oss
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk/backoff"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/samber/lo"
|
||||
)
|
||||
|
||||
// UploadPolicy 阿里云OSS上传策略
|
||||
type UploadPolicy struct {
|
||||
Expiration string `json:"expiration"`
|
||||
Conditions []interface{} `json:"conditions"`
|
||||
}
|
||||
|
||||
// CallbackPolicy 回调策略
|
||||
type CallbackPolicy struct {
|
||||
CallbackURL string `json:"callbackUrl"`
|
||||
CallbackBody string `json:"callbackBody"`
|
||||
CallbackBodyType string `json:"callbackBodyType"`
|
||||
CallbackSNI bool `json:"callbackSNI"`
|
||||
}
|
||||
|
||||
// Driver 阿里云OSS策略适配器
|
||||
type Driver struct {
|
||||
policy *ent.StoragePolicy
|
||||
|
||||
client *oss.Client
|
||||
bucket *oss.Bucket
|
||||
settings setting.Provider
|
||||
l logging.Logger
|
||||
config conf.ConfigProvider
|
||||
mime mime.MimeDetector
|
||||
httpClient request.Client
|
||||
|
||||
chunkSize int64
|
||||
}
|
||||
|
||||
type key int
|
||||
|
||||
const (
|
||||
chunkRetrySleep = time.Duration(5) * time.Second
|
||||
uploadIdParam = "uploadId"
|
||||
partNumberParam = "partNumber"
|
||||
callbackParam = "callback"
|
||||
completeAllHeader = "x-oss-complete-all"
|
||||
maxDeleteBatch = 1000
|
||||
|
||||
// MultiPartUploadThreshold 服务端使用分片上传的阈值
|
||||
MultiPartUploadThreshold int64 = 5 * (1 << 30) // 5GB
|
||||
)
|
||||
|
||||
var (
|
||||
features = &boolset.BooleanSet{}
|
||||
)
|
||||
|
||||
func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider,
|
||||
config conf.ConfigProvider, l logging.Logger, mime mime.MimeDetector) (*Driver, error) {
|
||||
chunkSize := policy.Settings.ChunkSize
|
||||
if policy.Settings.ChunkSize == 0 {
|
||||
chunkSize = 25 << 20 // 25 MB
|
||||
}
|
||||
|
||||
driver := &Driver{
|
||||
policy: policy,
|
||||
settings: settings,
|
||||
chunkSize: chunkSize,
|
||||
config: config,
|
||||
l: l,
|
||||
mime: mime,
|
||||
httpClient: request.NewClient(config, request.WithLogger(l)),
|
||||
}
|
||||
|
||||
return driver, driver.InitOSSClient(false)
|
||||
}
|
||||
|
||||
// CORS 创建跨域策略
|
||||
func (handler *Driver) CORS() error {
|
||||
return handler.client.SetBucketCORS(handler.policy.BucketName, []oss.CORSRule{
|
||||
{
|
||||
AllowedOrigin: []string{"*"},
|
||||
AllowedMethod: []string{
|
||||
"GET",
|
||||
"POST",
|
||||
"PUT",
|
||||
"DELETE",
|
||||
"HEAD",
|
||||
},
|
||||
ExposeHeader: []string{},
|
||||
AllowedHeader: []string{"*"},
|
||||
MaxAgeSeconds: 3600,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// InitOSSClient 初始化OSS鉴权客户端
|
||||
func (handler *Driver) InitOSSClient(forceUsePublicEndpoint bool) error {
|
||||
if handler.policy == nil {
|
||||
return errors.New("empty policy")
|
||||
}
|
||||
|
||||
opt := make([]oss.ClientOption, 0)
|
||||
|
||||
// 决定是否使用内网 Endpoint
|
||||
endpoint := handler.policy.Server
|
||||
if handler.policy.Settings.ServerSideEndpoint != "" && !forceUsePublicEndpoint {
|
||||
endpoint = handler.policy.Settings.ServerSideEndpoint
|
||||
} else if handler.policy.Settings.UseCname {
|
||||
opt = append(opt, oss.UseCname(true))
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
|
||||
endpoint = "https://" + endpoint
|
||||
}
|
||||
|
||||
// 初始化客户端
|
||||
client, err := oss.New(endpoint, handler.policy.AccessKey, handler.policy.SecretKey, opt...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
handler.client = client
|
||||
|
||||
// 初始化存储桶
|
||||
bucket, err := client.Bucket(handler.policy.BucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
handler.bucket = bucket
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//// List 列出OSS上的文件
|
||||
//func (handler *Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) {
|
||||
// // 列取文件
|
||||
// base = strings.TrimPrefix(base, "/")
|
||||
// if base != "" {
|
||||
// base += "/"
|
||||
// }
|
||||
//
|
||||
// var (
|
||||
// delimiter string
|
||||
// marker string
|
||||
// objects []oss.ObjectProperties
|
||||
// commons []string
|
||||
// )
|
||||
// if !recursive {
|
||||
// delimiter = "/"
|
||||
// }
|
||||
//
|
||||
// for {
|
||||
// subRes, err := handler.bucket.ListObjects(oss.Marker(marker), oss.Prefix(base),
|
||||
// oss.MaxKeys(1000), oss.Delimiter(delimiter))
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// objects = append(objects, subRes.Objects...)
|
||||
// commons = append(commons, subRes.CommonPrefixes...)
|
||||
// marker = subRes.NextMarker
|
||||
// if marker == "" {
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // 处理列取结果
|
||||
// res := make([]response.Object, 0, len(objects)+len(commons))
|
||||
// // 处理目录
|
||||
// for _, object := range commons {
|
||||
// rel, err := filepath.Rel(base, object)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// res = append(res, response.Object{
|
||||
// Name: path.Base(object),
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Size: 0,
|
||||
// IsDir: true,
|
||||
// LastModify: time.Now(),
|
||||
// })
|
||||
// }
|
||||
// // 处理文件
|
||||
// for _, object := range objects {
|
||||
// rel, err := filepath.Rel(base, object.Key)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// res = append(res, response.Object{
|
||||
// Name: path.Base(object.Key),
|
||||
// Source: object.Key,
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Size: uint64(object.Size),
|
||||
// IsDir: false,
|
||||
// LastModify: object.LastModified,
|
||||
// })
|
||||
// }
|
||||
//
|
||||
// return res, nil
|
||||
//}
|
||||
|
||||
// Get 获取文件
|
||||
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Put 将文件流保存到指定目录
|
||||
func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
||||
defer file.Close()
|
||||
|
||||
// 凭证有效期
|
||||
credentialTTL := handler.settings.UploadSessionTTL(ctx)
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
// 是否允许覆盖
|
||||
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
|
||||
options := []oss.Option{
|
||||
oss.WithContext(ctx),
|
||||
oss.Expires(time.Now().Add(credentialTTL * time.Second)),
|
||||
oss.ForbidOverWrite(!overwrite),
|
||||
oss.ContentType(mimeType),
|
||||
}
|
||||
|
||||
// 小文件直接上传
|
||||
if file.Props.Size < MultiPartUploadThreshold {
|
||||
return handler.bucket.PutObject(file.Props.SavePath, file, options...)
|
||||
}
|
||||
|
||||
// 超过阈值时使用分片上传
|
||||
imur, err := handler.bucket.InitiateMultipartUpload(file.Props.SavePath, options...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initiate multipart upload: %w", err)
|
||||
}
|
||||
|
||||
parts := make([]oss.UploadPart, 0)
|
||||
|
||||
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{
|
||||
Max: handler.settings.ChunkRetryLimit(ctx),
|
||||
Sleep: chunkRetrySleep,
|
||||
}, handler.settings.UseChunkBuffer(ctx), handler.l, handler.settings.TempPath(ctx))
|
||||
|
||||
uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error {
|
||||
part, err := handler.bucket.UploadPart(imur, content, current.Length(), current.Index()+1, oss.WithContext(ctx))
|
||||
if err == nil {
|
||||
parts = append(parts, part)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for chunks.Next() {
|
||||
if err := chunks.Process(uploadFunc); err != nil {
|
||||
handler.cancelUpload(imur)
|
||||
return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = handler.bucket.CompleteMultipartUpload(imur, parts, oss.ForbidOverWrite(!overwrite), oss.WithContext(ctx))
|
||||
if err != nil {
|
||||
handler.cancelUpload(imur)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete 删除一个或多个文件,
|
||||
// 返回未删除的文件
|
||||
func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
|
||||
groups := lo.Chunk(files, maxDeleteBatch)
|
||||
failed := make([]string, 0)
|
||||
var lastError error
|
||||
for index, group := range groups {
|
||||
handler.l.Debug("Process delete group #%d: %v", index, group)
|
||||
// 删除文件
|
||||
delRes, err := handler.bucket.DeleteObjects(group)
|
||||
if err != nil {
|
||||
failed = append(failed, group...)
|
||||
lastError = err
|
||||
continue
|
||||
}
|
||||
|
||||
// 统计未删除的文件
|
||||
failed = append(failed, util.SliceDifference(files, delRes.DeletedObjects)...)
|
||||
}
|
||||
|
||||
if len(failed) > 0 && lastError == nil {
|
||||
lastError = fmt.Errorf("failed to delete files: %v", failed)
|
||||
}
|
||||
|
||||
return failed, lastError
|
||||
}
|
||||
|
||||
// Thumb 获取文件缩略图
|
||||
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
usePublicEndpoint := true
|
||||
if forceUsePublicEndpoint, ok := ctx.Value(driver.ForceUsePublicEndpointCtx{}).(bool); ok {
|
||||
usePublicEndpoint = forceUsePublicEndpoint
|
||||
}
|
||||
|
||||
// 初始化客户端
|
||||
if err := handler.InitOSSClient(usePublicEndpoint); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
w, h := handler.settings.ThumbSize(ctx)
|
||||
thumbParam := fmt.Sprintf("image/resize,m_lfit,h_%d,w_%d", h, w)
|
||||
thumbOption := []oss.Option{oss.Process(thumbParam)}
|
||||
thumbURL, err := handler.signSourceURL(
|
||||
ctx,
|
||||
e.Source(),
|
||||
expire,
|
||||
thumbOption,
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return thumbURL, nil
|
||||
}
|
||||
|
||||
// Source 获取外链URL
|
||||
func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
|
||||
// 初始化客户端
|
||||
usePublicEndpoint := true
|
||||
if forceUsePublicEndpoint, ok := ctx.Value(driver.ForceUsePublicEndpointCtx{}).(bool); ok {
|
||||
usePublicEndpoint = forceUsePublicEndpoint
|
||||
}
|
||||
if err := handler.InitOSSClient(usePublicEndpoint); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 添加各项设置
|
||||
var signOptions = make([]oss.Option, 0, 2)
|
||||
if args.IsDownload {
|
||||
encodedFilename := url.PathEscape(args.DisplayName)
|
||||
signOptions = append(signOptions, oss.ResponseContentDisposition(fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`,
|
||||
encodedFilename, encodedFilename)))
|
||||
}
|
||||
if args.Speed > 0 {
|
||||
// Byte 转换为 bit
|
||||
args.Speed *= 8
|
||||
|
||||
// OSS对速度值有范围限制
|
||||
if args.Speed < 819200 {
|
||||
args.Speed = 819200
|
||||
}
|
||||
if args.Speed > 838860800 {
|
||||
args.Speed = 838860800
|
||||
}
|
||||
signOptions = append(signOptions, oss.TrafficLimitParam(args.Speed))
|
||||
}
|
||||
|
||||
return handler.signSourceURL(ctx, e.Source(), args.Expire, signOptions, false)
|
||||
}
|
||||
|
||||
func (handler *Driver) signSourceURL(ctx context.Context, path string, expire *time.Time, options []oss.Option, forceSign bool) (string, error) {
|
||||
ttl := int64(86400 * 365 * 20)
|
||||
if expire != nil {
|
||||
ttl = int64(time.Until(*expire).Seconds())
|
||||
}
|
||||
|
||||
signedURL, err := handler.bucket.SignURL(path, oss.HTTPGet, ttl, options...)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 将最终生成的签名URL域名换成用户自定义的加速域名(如果有)
|
||||
finalURL, err := url.Parse(signedURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 公有空间替换掉Key及不支持的头
|
||||
if !handler.policy.IsPrivate && !forceSign {
|
||||
query := finalURL.Query()
|
||||
query.Del("OSSAccessKeyId")
|
||||
query.Del("Signature")
|
||||
query.Del("response-content-disposition")
|
||||
query.Del("x-oss-traffic-limit")
|
||||
finalURL.RawQuery = query.Encode()
|
||||
}
|
||||
return finalURL.String(), nil
|
||||
}
|
||||
|
||||
// Token 获取上传策略和认证Token
|
||||
func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
|
||||
// 初始化客户端
|
||||
if err := handler.InitOSSClient(true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 生成回调地址
|
||||
siteURL := handler.settings.SiteURL(setting.UseFirstSiteUrl(ctx))
|
||||
// 在从机端创建上传会话
|
||||
uploadSession.ChunkSize = handler.chunkSize
|
||||
uploadSession.Callback = routes.MasterSlaveCallbackUrl(siteURL, types.PolicyTypeOss, uploadSession.Props.UploadSessionID, uploadSession.CallbackSecret).String()
|
||||
|
||||
// 回调策略
|
||||
callbackPolicy := CallbackPolicy{
|
||||
CallbackURL: uploadSession.Callback,
|
||||
CallbackBody: `{"name":${x:fname},"source_name":${object},"size":${size},"pic_info":"${imageInfo.width},${imageInfo.height}"}`,
|
||||
CallbackBodyType: "application/json",
|
||||
CallbackSNI: true,
|
||||
}
|
||||
callbackPolicyJSON, err := json.Marshal(callbackPolicy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode callback policy: %w", err)
|
||||
}
|
||||
callbackPolicyEncoded := base64.StdEncoding.EncodeToString(callbackPolicyJSON)
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
// 初始化分片上传
|
||||
options := []oss.Option{
|
||||
oss.WithContext(ctx),
|
||||
oss.Expires(uploadSession.Props.ExpireAt),
|
||||
oss.ForbidOverWrite(true),
|
||||
oss.ContentType(mimeType),
|
||||
}
|
||||
imur, err := handler.bucket.InitiateMultipartUpload(file.Props.SavePath, options...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize multipart upload: %w", err)
|
||||
}
|
||||
uploadSession.UploadID = imur.UploadID
|
||||
|
||||
// 为每个分片签名上传 URL
|
||||
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{}, false, handler.l, "")
|
||||
urls := make([]string, chunks.Num())
|
||||
ttl := int64(time.Until(uploadSession.Props.ExpireAt).Seconds())
|
||||
for chunks.Next() {
|
||||
err := chunks.Process(func(c *chunk.ChunkGroup, chunk io.Reader) error {
|
||||
signedURL, err := handler.bucket.SignURL(file.Props.SavePath, oss.HTTPPut,
|
||||
ttl,
|
||||
oss.AddParam(partNumberParam, strconv.Itoa(c.Index()+1)),
|
||||
oss.AddParam(uploadIdParam, imur.UploadID),
|
||||
oss.ContentType("application/octet-stream"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
urls[c.Index()] = signedURL
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// 签名完成分片上传的URL
|
||||
completeURL, err := handler.bucket.SignURL(file.Props.SavePath, oss.HTTPPost, ttl,
|
||||
oss.ContentType("application/octet-stream"),
|
||||
oss.AddParam(uploadIdParam, imur.UploadID),
|
||||
oss.Expires(time.Now().Add(time.Duration(ttl)*time.Second)),
|
||||
oss.SetHeader(completeAllHeader, "yes"),
|
||||
oss.ForbidOverWrite(true),
|
||||
oss.AddParam(callbackParam, callbackPolicyEncoded))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &fs.UploadCredential{
|
||||
UploadID: imur.UploadID,
|
||||
UploadURLs: urls,
|
||||
CompleteURL: completeURL,
|
||||
SessionID: uploadSession.Props.UploadSessionID,
|
||||
ChunkSize: handler.chunkSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// 取消上传凭证
|
||||
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
|
||||
return handler.bucket.AbortMultipartUpload(oss.InitiateMultipartUploadResult{UploadID: uploadSession.UploadID, Key: uploadSession.Props.SavePath}, oss.WithContext(ctx))
|
||||
}
|
||||
|
||||
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *Driver) Capabilities() *driver.Capabilities {
|
||||
mediaMetaExts := handler.policy.Settings.MediaMetaExts
|
||||
if !handler.policy.Settings.NativeMediaProcessing {
|
||||
mediaMetaExts = nil
|
||||
}
|
||||
return &driver.Capabilities{
|
||||
StaticFeatures: features,
|
||||
MediaMetaSupportedExts: mediaMetaExts,
|
||||
MediaMetaProxy: handler.policy.Settings.MediaMetaGeneratorProxy,
|
||||
ThumbSupportedExts: handler.policy.Settings.ThumbExts,
|
||||
ThumbProxy: handler.policy.Settings.ThumbGeneratorProxy,
|
||||
ThumbSupportAllExts: handler.policy.Settings.ThumbSupportAllExts,
|
||||
ThumbMaxSize: handler.policy.Settings.ThumbMaxSize,
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
if util.ContainsString(supportedImageExt, ext) {
|
||||
return handler.extractImageMeta(ctx, path)
|
||||
}
|
||||
|
||||
if util.ContainsString(supportedVideoExt, ext) {
|
||||
return handler.extractIMMMeta(ctx, path, videoInfoProcess)
|
||||
}
|
||||
|
||||
if util.ContainsString(supportedAudioExt, ext) {
|
||||
return handler.extractIMMMeta(ctx, path, audioInfoProcess)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unsupported media type in oss: %s", ext)
|
||||
}
|
||||
|
||||
func (handler *Driver) LocalPath(ctx context.Context, path string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (handler *Driver) cancelUpload(imur oss.InitiateMultipartUploadResult) {
|
||||
if err := handler.bucket.AbortMultipartUpload(imur); err != nil {
|
||||
handler.l.Warning("failed to abort multipart upload: %s", err)
|
||||
}
|
||||
}
|
||||
183
pkg/filemanager/driver/qiniu/media.go
Normal file
183
pkg/filemanager/driver/qiniu/media.go
Normal file
@@ -0,0 +1,183 @@
|
||||
package qiniu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/samber/lo"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
exifParam = "exif"
|
||||
avInfoParam = "avinfo"
|
||||
mediaInfoTTL = time.Duration(10) * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
supportedImageExt = []string{"jpg", "jpeg", "png", "gif", "bmp", "webp", "tiff"}
|
||||
)
|
||||
|
||||
type (
|
||||
ImageProp struct {
|
||||
Value string `json:"val"`
|
||||
}
|
||||
ImageInfo map[string]ImageProp
|
||||
QiniuMediaError struct {
|
||||
Error string `json:"error"`
|
||||
Code int `json:"code"`
|
||||
}
|
||||
)
|
||||
|
||||
func (handler *Driver) extractAvMeta(ctx context.Context, path string) ([]driver.MediaMeta, error) {
|
||||
resp, err := handler.extractMediaInfo(ctx, path, avInfoParam)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var avInfo *mediameta.FFProbeMeta
|
||||
if err := json.Unmarshal([]byte(resp), &avInfo); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal media info: %w", err)
|
||||
}
|
||||
|
||||
metas := mediameta.ProbeMetaTransform(avInfo)
|
||||
if artist, ok := avInfo.Format.Tags["artist"]; ok {
|
||||
metas = append(metas, driver.MediaMeta{
|
||||
Key: mediameta.Artist,
|
||||
Value: artist,
|
||||
Type: driver.MediaTypeMusic,
|
||||
})
|
||||
}
|
||||
|
||||
if album, ok := avInfo.Format.Tags["album"]; ok {
|
||||
metas = append(metas, driver.MediaMeta{
|
||||
Key: mediameta.MusicAlbum,
|
||||
Value: album,
|
||||
Type: driver.MediaTypeMusic,
|
||||
})
|
||||
}
|
||||
|
||||
if title, ok := avInfo.Format.Tags["title"]; ok {
|
||||
metas = append(metas, driver.MediaMeta{
|
||||
Key: mediameta.MusicTitle,
|
||||
Value: title,
|
||||
Type: driver.MediaTypeMusic,
|
||||
})
|
||||
}
|
||||
|
||||
return metas, nil
|
||||
}
|
||||
|
||||
func (handler *Driver) extractImageMeta(ctx context.Context, path string) ([]driver.MediaMeta, error) {
|
||||
resp, err := handler.extractMediaInfo(ctx, path, exifParam)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var imageInfo ImageInfo
|
||||
if err := json.Unmarshal([]byte(resp), &imageInfo); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal media info: %w", err)
|
||||
}
|
||||
|
||||
metas := make([]driver.MediaMeta, 0)
|
||||
exifMap := lo.MapEntries(imageInfo, func(key string, value ImageProp) (string, string) {
|
||||
return key, value.Value
|
||||
})
|
||||
metas = append(metas, mediameta.ExtractExifMap(exifMap, time.Time{})...)
|
||||
metas = append(metas, parseGpsInfo(imageInfo)...)
|
||||
for i := 0; i < len(metas); i++ {
|
||||
metas[i].Type = driver.MetaTypeExif
|
||||
}
|
||||
|
||||
return metas, nil
|
||||
}
|
||||
|
||||
func (handler *Driver) extractMediaInfo(ctx context.Context, path string, param string) (string, error) {
|
||||
mediaInfoExpire := time.Now().Add(mediaInfoTTL)
|
||||
ediaInfoUrl := handler.signSourceURL(fmt.Sprintf("%s?%s", path, param), &mediaInfoExpire)
|
||||
resp, err := handler.httpClient.
|
||||
Request(http.MethodGet, ediaInfoUrl, nil, request.WithContext(ctx)).
|
||||
CheckHTTPResponse(http.StatusOK).
|
||||
GetResponseIgnoreErr()
|
||||
if err != nil {
|
||||
return "", unmarshalError(resp, err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func unmarshalError(resp string, originErr error) error {
|
||||
if resp == "" {
|
||||
return originErr
|
||||
}
|
||||
|
||||
var err QiniuMediaError
|
||||
if err := json.Unmarshal([]byte(resp), &err); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal qiniu error: %w", err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("qiniu error: %s", err.Error)
|
||||
}
|
||||
|
||||
func parseGpsInfo(imageInfo ImageInfo) []driver.MediaMeta {
|
||||
latitude := imageInfo["GPSLatitude"] // 31, 16.2680820, 0
|
||||
longitude := imageInfo["GPSLongitude"] // 120, 42.9103939, 0
|
||||
latRef := imageInfo["GPSLatitudeRef"] // N
|
||||
lonRef := imageInfo["GPSLongitudeRef"] // E
|
||||
|
||||
// Make sure all value exist in map
|
||||
if latitude.Value == "" || longitude.Value == "" || latRef.Value == "" || lonRef.Value == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
lat := parseRawGPS(latitude.Value, latRef.Value)
|
||||
lon := parseRawGPS(longitude.Value, lonRef.Value)
|
||||
if !math.IsNaN(lat) && !math.IsNaN(lon) {
|
||||
lat, lng := mediameta.NormalizeGPS(lat, lon)
|
||||
return []driver.MediaMeta{{
|
||||
Key: mediameta.GpsLat,
|
||||
Value: fmt.Sprintf("%f", lat),
|
||||
}, {
|
||||
Key: mediameta.GpsLng,
|
||||
Value: fmt.Sprintf("%f", lng),
|
||||
}}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseRawGPS(gpsStr string, ref string) float64 {
|
||||
elem := strings.Split(gpsStr, ", ")
|
||||
if len(elem) < 1 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var (
|
||||
deg float64
|
||||
minutes float64
|
||||
seconds float64
|
||||
)
|
||||
|
||||
deg, _ = strconv.ParseFloat(elem[0], 64)
|
||||
if len(elem) >= 2 {
|
||||
minutes, _ = strconv.ParseFloat(elem[1], 64)
|
||||
}
|
||||
if len(elem) >= 3 {
|
||||
seconds, _ = strconv.ParseFloat(elem[2], 64)
|
||||
}
|
||||
|
||||
decimal := deg + minutes/60.0 + seconds/3600.0
|
||||
|
||||
if ref == "S" || ref == "W" {
|
||||
return -decimal
|
||||
}
|
||||
|
||||
return decimal
|
||||
}
|
||||
428
pkg/filemanager/driver/qiniu/qiniu.go
Normal file
428
pkg/filemanager/driver/qiniu/qiniu.go
Normal file
@@ -0,0 +1,428 @@
|
||||
package qiniu
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk/backoff"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/util"
|
||||
"github.com/qiniu/go-sdk/v7/auth/qbox"
|
||||
"github.com/qiniu/go-sdk/v7/storage"
|
||||
"github.com/samber/lo"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
chunkRetrySleep = time.Duration(5) * time.Second
|
||||
maxDeleteBatch = 1000
|
||||
trafficLimitParam = "X-Qiniu-Traffic-Limit"
|
||||
)
|
||||
|
||||
var (
|
||||
features = &boolset.BooleanSet{}
|
||||
)
|
||||
|
||||
// Driver 本地策略适配器
|
||||
type Driver struct {
|
||||
policy *ent.StoragePolicy
|
||||
|
||||
mac *qbox.Mac
|
||||
cfg *storage.Config
|
||||
bucket *storage.BucketManager
|
||||
settings setting.Provider
|
||||
l logging.Logger
|
||||
config conf.ConfigProvider
|
||||
mime mime.MimeDetector
|
||||
httpClient request.Client
|
||||
|
||||
chunkSize int64
|
||||
}
|
||||
|
||||
func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider,
|
||||
config conf.ConfigProvider, l logging.Logger, mime mime.MimeDetector) (*Driver, error) {
|
||||
chunkSize := policy.Settings.ChunkSize
|
||||
if policy.Settings.ChunkSize == 0 {
|
||||
chunkSize = 25 << 20 // 25 MB
|
||||
}
|
||||
|
||||
mac := qbox.NewMac(policy.AccessKey, policy.SecretKey)
|
||||
cfg := &storage.Config{UseHTTPS: true}
|
||||
|
||||
driver := &Driver{
|
||||
policy: policy,
|
||||
settings: settings,
|
||||
chunkSize: chunkSize,
|
||||
config: config,
|
||||
l: l,
|
||||
mime: mime,
|
||||
mac: mac,
|
||||
cfg: cfg,
|
||||
bucket: storage.NewBucketManager(mac, cfg),
|
||||
httpClient: request.NewClient(config, request.WithLogger(l)),
|
||||
}
|
||||
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
//
|
||||
//// List 列出给定路径下的文件
|
||||
//func (handler *Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) {
|
||||
// base = strings.TrimPrefix(base, "/")
|
||||
// if base != "" {
|
||||
// base += "/"
|
||||
// }
|
||||
//
|
||||
// var (
|
||||
// delimiter string
|
||||
// marker string
|
||||
// objects []storage.ListItem
|
||||
// commons []string
|
||||
// )
|
||||
// if !recursive {
|
||||
// delimiter = "/"
|
||||
// }
|
||||
//
|
||||
// for {
|
||||
// entries, folders, nextMarker, hashNext, err := handler.bucket.ListFiles(
|
||||
// handler.policy.BucketName,
|
||||
// base, delimiter, marker, 1000)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// objects = append(objects, entries...)
|
||||
// commons = append(commons, folders...)
|
||||
// if !hashNext {
|
||||
// break
|
||||
// }
|
||||
// marker = nextMarker
|
||||
// }
|
||||
//
|
||||
// // 处理列取结果
|
||||
// res := make([]response.Object, 0, len(objects)+len(commons))
|
||||
// // 处理目录
|
||||
// for _, object := range commons {
|
||||
// rel, err := filepath.Rel(base, object)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// res = append(res, response.Object{
|
||||
// Name: path.Base(object),
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Size: 0,
|
||||
// IsDir: true,
|
||||
// LastModify: time.Now(),
|
||||
// })
|
||||
// }
|
||||
// // 处理文件
|
||||
// for _, object := range objects {
|
||||
// rel, err := filepath.Rel(base, object.Key)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// res = append(res, response.Object{
|
||||
// Name: path.Base(object.Key),
|
||||
// Source: object.Key,
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Size: uint64(object.Fsize),
|
||||
// IsDir: false,
|
||||
// LastModify: time.Unix(object.PutTime/10000000, 0),
|
||||
// })
|
||||
// }
|
||||
//
|
||||
// return res, nil
|
||||
//}
|
||||
|
||||
// Put 将文件流保存到指定目录
|
||||
func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
||||
defer file.Close()
|
||||
|
||||
// 凭证有效期
|
||||
credentialTTL := handler.settings.UploadSessionTTL(ctx)
|
||||
|
||||
// 是否允许覆盖
|
||||
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
|
||||
|
||||
// 生成上传策略
|
||||
scope := handler.policy.BucketName
|
||||
if overwrite {
|
||||
scope = fmt.Sprintf("%s:%s", handler.policy.BucketName, file.Props.SavePath)
|
||||
}
|
||||
putPolicy := storage.PutPolicy{
|
||||
// 指定为覆盖策略
|
||||
Scope: scope,
|
||||
SaveKey: file.Props.SavePath,
|
||||
ForceSaveKey: true,
|
||||
FsizeLimit: file.Props.Size,
|
||||
Expires: uint64(time.Now().Add(credentialTTL).Unix()),
|
||||
}
|
||||
upToken := putPolicy.UploadToken(handler.mac)
|
||||
|
||||
// 初始化分片上传
|
||||
resumeUploader := storage.NewResumeUploaderV2(handler.cfg)
|
||||
upHost, err := resumeUploader.UpHost(handler.policy.AccessKey, handler.policy.BucketName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get upload host: %w", err)
|
||||
}
|
||||
|
||||
ret := &storage.InitPartsRet{}
|
||||
err = resumeUploader.InitParts(ctx, upToken, upHost, handler.policy.BucketName, file.Props.SavePath, true, ret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initiate multipart upload: %w", err)
|
||||
}
|
||||
|
||||
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{
|
||||
Max: handler.settings.ChunkRetryLimit(ctx),
|
||||
Sleep: chunkRetrySleep,
|
||||
}, handler.settings.UseChunkBuffer(ctx), handler.l, handler.settings.TempPath(ctx))
|
||||
|
||||
parts := make([]*storage.UploadPartsRet, 0, chunks.Num())
|
||||
|
||||
uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error {
|
||||
partRet := &storage.UploadPartsRet{}
|
||||
err := resumeUploader.UploadParts(
|
||||
ctx, upToken, upHost, handler.policy.BucketName, file.Props.SavePath, true, ret.UploadID,
|
||||
int64(current.Index()+1), "", partRet, content, int(current.Length()))
|
||||
if err == nil {
|
||||
parts = append(parts, partRet)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for chunks.Next() {
|
||||
if err := chunks.Process(uploadFunc); err != nil {
|
||||
_ = handler.cancelUpload(upHost, file.Props.SavePath, ret.UploadID, upToken)
|
||||
return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err)
|
||||
}
|
||||
}
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
err = resumeUploader.CompleteParts(ctx, upToken, upHost, nil, handler.policy.BucketName,
|
||||
file.Props.SavePath, true, ret.UploadID, &storage.RputV2Extra{
|
||||
MimeType: mimeType,
|
||||
Progresses: lo.Map(parts, func(part *storage.UploadPartsRet, i int) storage.UploadPartInfo {
|
||||
return storage.UploadPartInfo{
|
||||
Etag: part.Etag,
|
||||
PartNumber: int64(i) + 1,
|
||||
}
|
||||
}),
|
||||
})
|
||||
if err != nil {
|
||||
_ = handler.cancelUpload(upHost, file.Props.SavePath, ret.UploadID, upToken)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete 删除一个或多个文件,
|
||||
// 返回未删除的文件
|
||||
func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
|
||||
groups := lo.Chunk(files, maxDeleteBatch)
|
||||
failed := make([]string, 0)
|
||||
var lastError error
|
||||
|
||||
for index, group := range groups {
|
||||
handler.l.Debug("Process delete group #%d: %v", index, group)
|
||||
// 删除文件
|
||||
rets, err := handler.bucket.BatchWithContext(ctx, handler.policy.BucketName, lo.Map(group, func(key string, index int) string {
|
||||
return storage.URIDelete(handler.policy.BucketName, key)
|
||||
}))
|
||||
|
||||
// 处理删除结果
|
||||
if err != nil {
|
||||
for k, ret := range rets {
|
||||
if ret.Code != 200 && ret.Code != 612 {
|
||||
failed = append(failed, group[k])
|
||||
lastError = err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(failed) > 0 && lastError == nil {
|
||||
lastError = fmt.Errorf("failed to delete files: %v", failed)
|
||||
}
|
||||
|
||||
return failed, lastError
|
||||
}
|
||||
|
||||
// Thumb 获取文件缩略图
|
||||
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
w, h := handler.settings.ThumbSize(ctx)
|
||||
|
||||
thumb := fmt.Sprintf("%s?imageView2/1/w/%d/h/%d", e.Source(), w, h)
|
||||
return handler.signSourceURL(
|
||||
thumb,
|
||||
expire,
|
||||
), nil
|
||||
}
|
||||
|
||||
// Source 获取外链URL
|
||||
func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
|
||||
path := e.Source()
|
||||
|
||||
query := url.Values{}
|
||||
|
||||
// 加入下载相关设置
|
||||
if args.IsDownload {
|
||||
query.Add("attname", args.DisplayName)
|
||||
}
|
||||
|
||||
if args.Speed > 0 {
|
||||
// Byte 转换为 bit
|
||||
args.Speed *= 8
|
||||
|
||||
// Qiniu 对速度值有范围限制
|
||||
if args.Speed < 819200 {
|
||||
args.Speed = 819200
|
||||
}
|
||||
if args.Speed > 838860800 {
|
||||
args.Speed = 838860800
|
||||
}
|
||||
query.Add(trafficLimitParam, fmt.Sprintf("%d", args.Speed))
|
||||
}
|
||||
|
||||
if len(query) > 0 {
|
||||
path = path + "?" + query.Encode()
|
||||
}
|
||||
|
||||
// 取得原始文件地址
|
||||
return handler.signSourceURL(path, args.Expire), nil
|
||||
}
|
||||
|
||||
func (handler *Driver) signSourceURL(path string, expire *time.Time) string {
|
||||
var sourceURL string
|
||||
if handler.policy.IsPrivate {
|
||||
deadline := time.Now().Add(time.Duration(24) * time.Hour * 365 * 20).Unix()
|
||||
if expire != nil {
|
||||
deadline = expire.Unix()
|
||||
}
|
||||
sourceURL = storage.MakePrivateURL(handler.mac, handler.policy.Settings.ProxyServer, path, deadline)
|
||||
} else {
|
||||
sourceURL = storage.MakePublicURL(handler.policy.Settings.ProxyServer, path)
|
||||
}
|
||||
return sourceURL
|
||||
}
|
||||
|
||||
// Token 获取上传策略和认证Token
|
||||
func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
|
||||
// 生成回调地址
|
||||
siteURL := handler.settings.SiteURL(setting.UseFirstSiteUrl(ctx))
|
||||
apiUrl := routes.MasterSlaveCallbackUrl(siteURL, types.PolicyTypeQiniu, uploadSession.Props.UploadSessionID, uploadSession.CallbackSecret).String()
|
||||
|
||||
// 创建上传策略
|
||||
putPolicy := storage.PutPolicy{
|
||||
Scope: fmt.Sprintf("%s:%s", handler.policy.BucketName, file.Props.SavePath),
|
||||
CallbackURL: apiUrl,
|
||||
CallbackBody: `{"size":$(fsize),"pic_info":"$(imageInfo.width),$(imageInfo.height)"}`,
|
||||
CallbackBodyType: "application/json",
|
||||
SaveKey: file.Props.SavePath,
|
||||
ForceSaveKey: true,
|
||||
FsizeLimit: file.Props.Size,
|
||||
Expires: uint64(file.Props.ExpireAt.Unix()),
|
||||
}
|
||||
|
||||
// 初始化分片上传
|
||||
upToken := putPolicy.UploadToken(handler.mac)
|
||||
resumeUploader := storage.NewResumeUploaderV2(handler.cfg)
|
||||
upHost, err := resumeUploader.UpHost(handler.policy.AccessKey, handler.policy.BucketName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get upload host: %w", err)
|
||||
}
|
||||
|
||||
ret := &storage.InitPartsRet{}
|
||||
err = resumeUploader.InitParts(ctx, upToken, upHost, handler.policy.BucketName, file.Props.SavePath, true, ret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initiate multipart upload: %w", err)
|
||||
}
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
uploadSession.UploadID = ret.UploadID
|
||||
return &fs.UploadCredential{
|
||||
UploadID: ret.UploadID,
|
||||
UploadURLs: []string{getUploadUrl(upHost, handler.policy.BucketName, file.Props.SavePath, ret.UploadID)},
|
||||
Credential: upToken,
|
||||
SessionID: uploadSession.Props.UploadSessionID,
|
||||
ChunkSize: handler.chunkSize,
|
||||
MimeType: mimeType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// 取消上传凭证
|
||||
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
|
||||
resumeUploader := storage.NewResumeUploaderV2(handler.cfg)
|
||||
return resumeUploader.Client.CallWith(ctx, nil, "DELETE", uploadSession.UploadURL, http.Header{"Authorization": {"UpToken " + uploadSession.Credential}}, nil, 0)
|
||||
}
|
||||
|
||||
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *Driver) Capabilities() *driver.Capabilities {
|
||||
mediaMetaExts := handler.policy.Settings.MediaMetaExts
|
||||
if !handler.policy.Settings.NativeMediaProcessing {
|
||||
mediaMetaExts = nil
|
||||
}
|
||||
return &driver.Capabilities{
|
||||
StaticFeatures: features,
|
||||
MediaMetaSupportedExts: mediaMetaExts,
|
||||
MediaMetaProxy: handler.policy.Settings.MediaMetaGeneratorProxy,
|
||||
ThumbSupportedExts: handler.policy.Settings.ThumbExts,
|
||||
ThumbProxy: handler.policy.Settings.ThumbGeneratorProxy,
|
||||
ThumbSupportAllExts: handler.policy.Settings.ThumbSupportAllExts,
|
||||
ThumbMaxSize: handler.policy.Settings.ThumbMaxSize,
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
if util.ContainsString(supportedImageExt, ext) {
|
||||
return handler.extractImageMeta(ctx, path)
|
||||
}
|
||||
|
||||
return handler.extractAvMeta(ctx, path)
|
||||
}
|
||||
|
||||
func (handler *Driver) LocalPath(ctx context.Context, path string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (handler *Driver) cancelUpload(upHost, savePath, uploadId, upToken string) error {
|
||||
resumeUploader := storage.NewResumeUploaderV2(handler.cfg)
|
||||
uploadUrl := getUploadUrl(upHost, handler.policy.BucketName, savePath, uploadId)
|
||||
err := resumeUploader.Client.CallWith(context.Background(), nil, "DELETE", uploadUrl, http.Header{"Authorization": {"UpToken " + upToken}}, nil, 0)
|
||||
if err != nil {
|
||||
handler.l.Error("Failed to cancel upload session for %q: %s", savePath, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func getUploadUrl(upHost, bucket, key, uploadId string) string {
|
||||
return upHost + "/buckets/" + bucket + "/objects/" + base64.URLEncoding.EncodeToString([]byte(key)) + "/uploads/" + uploadId
|
||||
}
|
||||
266
pkg/filemanager/driver/remote/client.go
Normal file
266
pkg/filemanager/driver/remote/client.go
Normal file
@@ -0,0 +1,266 @@
|
||||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/application/constants"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/auth"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk/backoff"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/gofrs/uuid"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
OverwriteHeader = constants.CrHeaderPrefix + "Overwrite"
|
||||
chunkRetrySleep = time.Duration(5) * time.Second
|
||||
)
|
||||
|
||||
// Client to operate uploading to remote slave server
|
||||
type Client interface {
|
||||
// CreateUploadSession creates remote upload session
|
||||
CreateUploadSession(ctx context.Context, session *fs.UploadSession, overwrite bool) error
|
||||
// GetUploadURL signs an url for uploading file
|
||||
GetUploadURL(ctx context.Context, expires time.Time, sessionID string) (string, string, error)
|
||||
// Upload uploads file to remote server
|
||||
Upload(ctx context.Context, file *fs.UploadRequest) error
|
||||
// DeleteUploadSession deletes remote upload session
|
||||
DeleteUploadSession(ctx context.Context, sessionID string) error
|
||||
// MediaMeta gets media meta from remote server
|
||||
MediaMeta(ctx context.Context, src, ext string) ([]driver.MediaMeta, error)
|
||||
// DeleteFiles deletes files from remote server
|
||||
DeleteFiles(ctx context.Context, files ...string) ([]string, error)
|
||||
}
|
||||
|
||||
type DeleteFileRequest struct {
|
||||
Files []string `json:"files"`
|
||||
}
|
||||
|
||||
// NewClient creates new Client from given policy
|
||||
func NewClient(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider, config conf.ConfigProvider, l logging.Logger) (Client, error) {
|
||||
if policy.Edges.Node == nil {
|
||||
return nil, fmt.Errorf("remote storage policy %d has no node", policy.ID)
|
||||
}
|
||||
|
||||
authInstance := auth.HMACAuth{[]byte(policy.Edges.Node.SlaveKey)}
|
||||
serverURL, err := url.Parse(policy.Edges.Node.Server)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
base, _ := url.Parse(constants.APIPrefixSlave)
|
||||
|
||||
return &remoteClient{
|
||||
policy: policy,
|
||||
authInstance: authInstance,
|
||||
httpClient: request.NewClient(
|
||||
config,
|
||||
request.WithEndpoint(serverURL.ResolveReference(base).String()),
|
||||
request.WithCredential(authInstance, int64(settings.SlaveRequestSignTTL(ctx))),
|
||||
request.WithSlaveMeta(policy.Edges.Node.ID),
|
||||
request.WithMasterMeta(settings.SiteBasic(ctx).ID, settings.SiteURL(setting.UseFirstSiteUrl(ctx)).String()),
|
||||
request.WithCorrelationID(),
|
||||
),
|
||||
settings: settings,
|
||||
l: l,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type remoteClient struct {
|
||||
policy *ent.StoragePolicy
|
||||
authInstance auth.Auth
|
||||
httpClient request.Client
|
||||
settings setting.Provider
|
||||
l logging.Logger
|
||||
}
|
||||
|
||||
func (c *remoteClient) Upload(ctx context.Context, file *fs.UploadRequest) error {
|
||||
ttl := c.settings.UploadSessionTTL(ctx)
|
||||
session := &fs.UploadSession{
|
||||
Props: file.Props.Copy(),
|
||||
Policy: c.policy,
|
||||
}
|
||||
session.Props.UploadSessionID = uuid.Must(uuid.NewV4()).String()
|
||||
session.Props.ExpireAt = time.Now().Add(ttl)
|
||||
|
||||
// Create upload session
|
||||
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
|
||||
if err := c.CreateUploadSession(ctx, session, overwrite); err != nil {
|
||||
return fmt.Errorf("failed to create upload session: %w", err)
|
||||
}
|
||||
|
||||
// Initial chunk groups
|
||||
chunks := chunk.NewChunkGroup(file, c.policy.Settings.ChunkSize, &backoff.ConstantBackoff{
|
||||
Max: c.settings.ChunkRetryLimit(ctx),
|
||||
Sleep: chunkRetrySleep,
|
||||
}, c.settings.UseChunkBuffer(ctx), c.l, c.settings.TempPath(ctx))
|
||||
|
||||
uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error {
|
||||
return c.uploadChunk(ctx, session.Props.UploadSessionID, current.Index(), content, overwrite, current.Length())
|
||||
}
|
||||
|
||||
// upload chunks
|
||||
for chunks.Next() {
|
||||
if err := chunks.Process(uploadFunc); err != nil {
|
||||
if err := c.DeleteUploadSession(ctx, session.Props.UploadSessionID); err != nil {
|
||||
c.l.Warning("failed to delete upload session: %s", err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *remoteClient) DeleteUploadSession(ctx context.Context, sessionID string) error {
|
||||
resp, err := c.httpClient.Request(
|
||||
"DELETE",
|
||||
"upload/"+sessionID,
|
||||
nil,
|
||||
request.WithContext(ctx),
|
||||
request.WithLogger(logging.FromContext(ctx)),
|
||||
).CheckHTTPResponse(200).DecodeResponse()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Code != 0 {
|
||||
return serializer.NewErrorFromResponse(resp)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *remoteClient) DeleteFiles(ctx context.Context, files ...string) ([]string, error) {
|
||||
req := &DeleteFileRequest{
|
||||
Files: files,
|
||||
}
|
||||
|
||||
reqStr, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return files, fmt.Errorf("failed to marshal delete request: %w", err)
|
||||
}
|
||||
|
||||
resp, err := c.httpClient.Request(
|
||||
"DELETE",
|
||||
"file",
|
||||
bytes.NewReader(reqStr),
|
||||
request.WithContext(ctx),
|
||||
request.WithLogger(logging.FromContext(ctx)),
|
||||
).CheckHTTPResponse(200).DecodeResponse()
|
||||
if err != nil {
|
||||
return files, err
|
||||
}
|
||||
|
||||
if resp.Code != 0 {
|
||||
var failed []string
|
||||
failed = files
|
||||
if resp.Code == serializer.CodeNotFullySuccess {
|
||||
resp.GobDecode(&failed)
|
||||
}
|
||||
return failed, fmt.Errorf(resp.Error)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *remoteClient) MediaMeta(ctx context.Context, src, ext string) ([]driver.MediaMeta, error) {
|
||||
resp, err := c.httpClient.Request(
|
||||
http.MethodGet,
|
||||
routes.SlaveMediaMetaRoute(src, ext),
|
||||
nil,
|
||||
request.WithContext(ctx),
|
||||
request.WithLogger(c.l),
|
||||
).CheckHTTPResponse(200).DecodeResponse()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.Code != 0 {
|
||||
return nil, fmt.Errorf(resp.Error)
|
||||
}
|
||||
|
||||
var metas []driver.MediaMeta
|
||||
resp.GobDecode(&metas)
|
||||
return metas, nil
|
||||
}
|
||||
|
||||
func (c *remoteClient) CreateUploadSession(ctx context.Context, session *fs.UploadSession, overwrite bool) error {
|
||||
reqBodyEncoded, err := json.Marshal(map[string]interface{}{
|
||||
"session": session,
|
||||
"overwrite": overwrite,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bodyReader := strings.NewReader(string(reqBodyEncoded))
|
||||
resp, err := c.httpClient.Request(
|
||||
"PUT",
|
||||
"upload",
|
||||
bodyReader,
|
||||
request.WithContext(ctx),
|
||||
request.WithLogger(c.l),
|
||||
).CheckHTTPResponse(200).DecodeResponse()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Code != 0 {
|
||||
return serializer.NewErrorFromResponse(resp)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *remoteClient) GetUploadURL(ctx context.Context, expires time.Time, sessionID string) (string, string, error) {
|
||||
base, err := url.Parse(c.policy.Edges.Node.Server)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", routes.SlaveUploadUrl(base, sessionID).String(), nil)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
req = auth.SignRequest(ctx, c.authInstance, req, &expires)
|
||||
return req.URL.String(), req.Header["Authorization"][0], nil
|
||||
}
|
||||
|
||||
func (c *remoteClient) uploadChunk(ctx context.Context, sessionID string, index int, chunk io.Reader, overwrite bool, size int64) error {
|
||||
resp, err := c.httpClient.Request(
|
||||
"POST",
|
||||
fmt.Sprintf("upload/%s?chunk=%d", sessionID, index),
|
||||
chunk,
|
||||
request.WithContext(ctx),
|
||||
request.WithTimeout(time.Duration(0)),
|
||||
request.WithContentLength(size),
|
||||
request.WithHeader(map[string][]string{OverwriteHeader: {fmt.Sprintf("%t", overwrite)}}),
|
||||
).CheckHTTPResponse(200).DecodeResponse()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.Code != 0 {
|
||||
return serializer.NewErrorFromResponse(resp)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
273
pkg/filemanager/driver/remote/remote.go
Normal file
273
pkg/filemanager/driver/remote/remote.go
Normal file
@@ -0,0 +1,273 @@
|
||||
package remote
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/auth"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
features = &boolset.BooleanSet{}
|
||||
)
|
||||
|
||||
// Driver 远程存储策略适配器
|
||||
type Driver struct {
|
||||
Client request.Client
|
||||
Policy *ent.StoragePolicy
|
||||
AuthInstance auth.Auth
|
||||
|
||||
uploadClient Client
|
||||
config conf.ConfigProvider
|
||||
settings setting.Provider
|
||||
}
|
||||
|
||||
// New initializes a new Driver from policy
|
||||
func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider,
|
||||
config conf.ConfigProvider, l logging.Logger) (*Driver, error) {
|
||||
client, err := NewClient(ctx, policy, settings, config, l)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Driver{
|
||||
Policy: policy,
|
||||
Client: request.NewClient(config),
|
||||
AuthInstance: auth.HMACAuth{[]byte(policy.Edges.Node.SlaveKey)},
|
||||
uploadClient: client,
|
||||
settings: settings,
|
||||
config: config,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//// List 列取文件
|
||||
//func (handler *Driver) List(ctx context.Context, path string, recursive bool) ([]response.Object, error) {
|
||||
// var res []response.Object
|
||||
//
|
||||
// reqBody := serializer.ListRequest{
|
||||
// Path: path,
|
||||
// Recursive: recursive,
|
||||
// }
|
||||
// reqBodyEncoded, err := json.Marshal(reqBody)
|
||||
// if err != nil {
|
||||
// return res, err
|
||||
// }
|
||||
//
|
||||
// // 发送列表请求
|
||||
// bodyReader := strings.NewReader(string(reqBodyEncoded))
|
||||
// signTTL := model.GetIntSetting("slave_api_timeout", 60)
|
||||
// resp, err := handler.Client.Request(
|
||||
// "POST",
|
||||
// handler.getAPIUrl("list"),
|
||||
// bodyReader,
|
||||
// request.WithCredential(handler.AuthInstance, int64(signTTL)),
|
||||
// request.WithMasterMeta(handler.settings.SiteBasic(ctx).ID, handler.settings.SiteURL(setting.UseFirstSiteUrl(ctx)).String()),
|
||||
// ).CheckHTTPResponse(200).DecodeResponse()
|
||||
// if err != nil {
|
||||
// return res, err
|
||||
// }
|
||||
//
|
||||
// // 处理列取结果
|
||||
// if resp.Code != 0 {
|
||||
// return res, errors.New(resp.Error)
|
||||
// }
|
||||
//
|
||||
// if resStr, ok := resp.Data.(string); ok {
|
||||
// err = json.Unmarshal([]byte(resStr), &res)
|
||||
// if err != nil {
|
||||
// return res, err
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// return res, nil
|
||||
//}
|
||||
|
||||
// getAPIUrl 获取接口请求地址
|
||||
func (handler *Driver) getAPIUrl(scope string, routes ...string) string {
|
||||
serverURL, err := url.Parse(handler.Policy.Edges.Node.Server)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
var controller *url.URL
|
||||
|
||||
switch scope {
|
||||
case "delete":
|
||||
controller, _ = url.Parse("/api/v3/slave/delete")
|
||||
case "thumb":
|
||||
controller, _ = url.Parse("/api/v3/slave/thumb")
|
||||
case "list":
|
||||
controller, _ = url.Parse("/api/v3/slave/list")
|
||||
default:
|
||||
controller = serverURL
|
||||
}
|
||||
|
||||
for _, r := range routes {
|
||||
controller.Path = path.Join(controller.Path, r)
|
||||
}
|
||||
|
||||
return serverURL.ResolveReference(controller).String()
|
||||
}
|
||||
|
||||
// Open 获取文件内容
|
||||
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
|
||||
//// 尝试获取速度限制
|
||||
//speedLimit := 0
|
||||
//if user, ok := ctx.Value(fsctx.UserCtx).(model.User); ok {
|
||||
// speedLimit = user.Group.SpeedLimit
|
||||
//}
|
||||
//
|
||||
//// 获取文件源地址
|
||||
//downloadURL, err := handler.Source(ctx, path, nil, true, int64(speedLimit))
|
||||
//if err != nil {
|
||||
// return nil, err
|
||||
//}
|
||||
//
|
||||
//// 获取文件数据流
|
||||
//resp, err := handler.Client.Request(
|
||||
// "GET",
|
||||
// downloadURL,
|
||||
// nil,
|
||||
// request.WithContext(ctx),
|
||||
// request.WithTimeout(time.Duration(0)),
|
||||
// request.WithMasterMeta(handler.settings.SiteBasic(ctx).ID, handler.settings.SiteURL(ctx).String()),
|
||||
//).CheckHTTPResponse(200).GetRSCloser()
|
||||
//if err != nil {
|
||||
// return nil, err
|
||||
//}
|
||||
//
|
||||
//resp.SetFirstFakeChunk()
|
||||
//
|
||||
//// 尝试获取文件大小
|
||||
//if file, ok := ctx.Value(fsctx.FileModelCtx).(model.File); ok {
|
||||
// resp.SetContentLength(int64(file.Size))
|
||||
//}
|
||||
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (handler *Driver) LocalPath(ctx context.Context, path string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Put 将文件流保存到指定目录
|
||||
func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
||||
defer file.Close()
|
||||
|
||||
return handler.uploadClient.Upload(ctx, file)
|
||||
}
|
||||
|
||||
// Delete 删除一个或多个文件,
|
||||
// 返回未删除的文件,及遇到的最后一个错误
|
||||
func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
|
||||
failed, err := handler.uploadClient.DeleteFiles(ctx, files...)
|
||||
if err != nil {
|
||||
return failed, err
|
||||
}
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
// Thumb 获取文件缩略图
|
||||
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
serverURL, err := url.Parse(handler.Policy.Edges.Node.Server)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parse server url failed: %w", err)
|
||||
}
|
||||
|
||||
thumbURL := routes.SlaveThumbUrl(serverURL, e.Source(), ext)
|
||||
signedThumbURL, err := auth.SignURI(ctx, handler.AuthInstance, thumbURL.String(), expire)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return signedThumbURL.String(), nil
|
||||
}
|
||||
|
||||
// Source 获取外链URL
|
||||
func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
|
||||
server, err := url.Parse(handler.Policy.Edges.Node.Server)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
nodeId := 0
|
||||
if handler.config.System().Mode == conf.SlaveMode {
|
||||
nodeId = handler.Policy.NodeID
|
||||
}
|
||||
|
||||
base := routes.SlaveFileContentUrl(
|
||||
server,
|
||||
e.Source(),
|
||||
args.DisplayName,
|
||||
args.IsDownload,
|
||||
args.Speed,
|
||||
nodeId,
|
||||
)
|
||||
internalProxyed, err := auth.SignURI(ctx, handler.AuthInstance, base.String(), args.Expire)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to sign internal slave content URL: %w", err)
|
||||
}
|
||||
|
||||
return internalProxyed.String(), nil
|
||||
}
|
||||
|
||||
// Token 获取上传策略和认证Token
|
||||
func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
|
||||
siteURL := handler.settings.SiteURL(setting.UseFirstSiteUrl(ctx))
|
||||
// 在从机端创建上传会话
|
||||
uploadSession.Callback = routes.MasterSlaveCallbackUrl(siteURL, types.PolicyTypeRemote, uploadSession.Props.UploadSessionID, uploadSession.CallbackSecret).String()
|
||||
if err := handler.uploadClient.CreateUploadSession(ctx, uploadSession, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 获取上传地址
|
||||
uploadURL, sign, err := handler.uploadClient.GetUploadURL(ctx, uploadSession.Props.ExpireAt, uploadSession.Props.UploadSessionID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sign upload url: %w", err)
|
||||
}
|
||||
|
||||
return &fs.UploadCredential{
|
||||
SessionID: uploadSession.Props.UploadSessionID,
|
||||
ChunkSize: handler.Policy.Settings.ChunkSize,
|
||||
UploadURLs: []string{uploadURL},
|
||||
Credential: sign,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// 取消上传凭证
|
||||
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
|
||||
return handler.uploadClient.DeleteUploadSession(ctx, uploadSession.Props.UploadSessionID)
|
||||
}
|
||||
|
||||
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *Driver) Capabilities() *driver.Capabilities {
|
||||
return &driver.Capabilities{
|
||||
StaticFeatures: features,
|
||||
MediaMetaSupportedExts: handler.Policy.Settings.MediaMetaExts,
|
||||
MediaMetaProxy: handler.Policy.Settings.MediaMetaGeneratorProxy,
|
||||
ThumbSupportedExts: handler.Policy.Settings.ThumbExts,
|
||||
ThumbProxy: handler.Policy.Settings.ThumbGeneratorProxy,
|
||||
ThumbMaxSize: handler.Policy.Settings.ThumbMaxSize,
|
||||
ThumbSupportAllExts: handler.Policy.Settings.ThumbSupportAllExts,
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
return handler.uploadClient.MediaMeta(ctx, path, ext)
|
||||
}
|
||||
514
pkg/filemanager/driver/s3/s3.go
Normal file
514
pkg/filemanager/driver/s3/s3.go
Normal file
@@ -0,0 +1,514 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/chunk/backoff"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/samber/lo"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
)
|
||||
|
||||
// Driver S3 compatible driver
|
||||
type Driver struct {
|
||||
policy *ent.StoragePolicy
|
||||
chunkSize int64
|
||||
|
||||
settings setting.Provider
|
||||
l logging.Logger
|
||||
config conf.ConfigProvider
|
||||
mime mime.MimeDetector
|
||||
|
||||
sess *session.Session
|
||||
svc *s3.S3
|
||||
}
|
||||
|
||||
// UploadPolicy S3上传策略
|
||||
type UploadPolicy struct {
|
||||
Expiration string `json:"expiration"`
|
||||
Conditions []interface{} `json:"conditions"`
|
||||
}
|
||||
|
||||
// MetaData 文件信息
|
||||
type MetaData struct {
|
||||
Size int64
|
||||
Etag string
|
||||
}
|
||||
|
||||
var (
|
||||
features = &boolset.BooleanSet{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
boolset.Sets(map[driver.HandlerCapability]bool{
|
||||
driver.HandlerCapabilityUploadSentinelRequired: true,
|
||||
}, features)
|
||||
}
|
||||
|
||||
func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider,
|
||||
config conf.ConfigProvider, l logging.Logger, mime mime.MimeDetector) (*Driver, error) {
|
||||
chunkSize := policy.Settings.ChunkSize
|
||||
if policy.Settings.ChunkSize == 0 {
|
||||
chunkSize = 25 << 20 // 25 MB
|
||||
}
|
||||
|
||||
driver := &Driver{
|
||||
policy: policy,
|
||||
settings: settings,
|
||||
chunkSize: chunkSize,
|
||||
config: config,
|
||||
l: l,
|
||||
mime: mime,
|
||||
}
|
||||
|
||||
sess, err := session.NewSession(&aws.Config{
|
||||
Credentials: credentials.NewStaticCredentials(policy.AccessKey, policy.SecretKey, ""),
|
||||
Endpoint: &policy.Server,
|
||||
Region: &policy.Settings.Region,
|
||||
S3ForcePathStyle: &policy.Settings.S3ForcePathStyle,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
driver.sess = sess
|
||||
driver.svc = s3.New(sess)
|
||||
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
//// List 列出给定路径下的文件
|
||||
//func (handler *Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) {
|
||||
// // 初始化列目录参数
|
||||
// base = strings.TrimPrefix(base, "/")
|
||||
// if base != "" {
|
||||
// base += "/"
|
||||
// }
|
||||
//
|
||||
// opt := &s3.ListObjectsInput{
|
||||
// Bucket: &handler.policy.BucketName,
|
||||
// Prefix: &base,
|
||||
// MaxKeys: aws.Int64(1000),
|
||||
// }
|
||||
//
|
||||
// // 是否为递归列出
|
||||
// if !recursive {
|
||||
// opt.Delimiter = aws.String("/")
|
||||
// }
|
||||
//
|
||||
// var (
|
||||
// objects []*s3.Object
|
||||
// commons []*s3.CommonPrefix
|
||||
// )
|
||||
//
|
||||
// for {
|
||||
// res, err := handler.svc.ListObjectsWithContext(ctx, opt)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// objects = append(objects, res.Contents...)
|
||||
// commons = append(commons, res.CommonPrefixes...)
|
||||
//
|
||||
// // 如果本次未列取完,则继续使用marker获取结果
|
||||
// if *res.IsTruncated {
|
||||
// opt.Marker = res.NextMarker
|
||||
// } else {
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // 处理列取结果
|
||||
// res := make([]response.Object, 0, len(objects)+len(commons))
|
||||
//
|
||||
// // 处理目录
|
||||
// for _, object := range commons {
|
||||
// rel, err := filepath.Rel(*opt.Prefix, *object.Prefix)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// res = append(res, response.Object{
|
||||
// Name: path.Base(*object.Prefix),
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Size: 0,
|
||||
// IsDir: true,
|
||||
// LastModify: time.Now(),
|
||||
// })
|
||||
// }
|
||||
// // 处理文件
|
||||
// for _, object := range objects {
|
||||
// rel, err := filepath.Rel(*opt.Prefix, *object.Key)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// res = append(res, response.Object{
|
||||
// Name: path.Base(*object.Key),
|
||||
// Source: *object.Key,
|
||||
// RelativePath: filepath.ToSlash(rel),
|
||||
// Size: uint64(*object.Size),
|
||||
// IsDir: false,
|
||||
// LastModify: time.Now(),
|
||||
// })
|
||||
// }
|
||||
//
|
||||
// return res, nil
|
||||
//
|
||||
//}
|
||||
|
||||
// Open 打开文件
|
||||
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Put 将文件流保存到指定目录
|
||||
func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
||||
defer file.Close()
|
||||
|
||||
// 是否允许覆盖
|
||||
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
|
||||
if !overwrite {
|
||||
// Check for duplicated file
|
||||
if _, err := handler.Meta(ctx, file.Props.SavePath); err == nil {
|
||||
return fs.ErrFileExisted
|
||||
}
|
||||
}
|
||||
|
||||
uploader := s3manager.NewUploader(handler.sess, func(u *s3manager.Uploader) {
|
||||
u.PartSize = handler.chunkSize
|
||||
})
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
_, err := uploader.UploadWithContext(ctx, &s3manager.UploadInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &file.Props.SavePath,
|
||||
Body: io.LimitReader(file, file.Props.Size),
|
||||
ContentType: aws.String(mimeType),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete 删除一个或多个文件,
|
||||
// 返回未删除的文件,及遇到的最后一个错误
|
||||
func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
|
||||
failed := make([]string, 0, len(files))
|
||||
batchSize := handler.policy.Settings.S3DeleteBatchSize
|
||||
if batchSize == 0 {
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html
|
||||
// The request can contain a list of up to 1000 keys that you want to delete.
|
||||
batchSize = 1000
|
||||
}
|
||||
|
||||
var lastErr error
|
||||
|
||||
groups := lo.Chunk(files, batchSize)
|
||||
for _, group := range groups {
|
||||
if len(group) == 1 {
|
||||
// Invoke single file delete API
|
||||
_, err := handler.svc.DeleteObjectWithContext(ctx, &s3.DeleteObjectInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &group[0],
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
if aerr, ok := err.(awserr.Error); ok {
|
||||
// Ignore NoSuchKey error
|
||||
if aerr.Code() == s3.ErrCodeNoSuchKey {
|
||||
continue
|
||||
}
|
||||
}
|
||||
failed = append(failed, group[0])
|
||||
lastErr = err
|
||||
}
|
||||
} else {
|
||||
// Invoke batch delete API
|
||||
res, err := handler.svc.DeleteObjects(
|
||||
&s3.DeleteObjectsInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Delete: &s3.Delete{
|
||||
Objects: lo.Map(group, func(s string, i int) *s3.ObjectIdentifier {
|
||||
return &s3.ObjectIdentifier{Key: &s}
|
||||
}),
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
failed = append(failed, group...)
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
|
||||
for _, v := range res.Errors {
|
||||
handler.l.Debug("Failed to delete file: %s, Code:%s, Message:%s", v.Key, v.Code, v.Key)
|
||||
failed = append(failed, *v.Key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return failed, lastErr
|
||||
|
||||
}
|
||||
|
||||
// Thumb 获取文件缩略图
|
||||
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
return "", errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Source 获取外链URL
|
||||
func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
|
||||
var contentDescription *string
|
||||
if args.IsDownload {
|
||||
encodedFilename := url.PathEscape(args.DisplayName)
|
||||
contentDescription = aws.String(fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`,
|
||||
encodedFilename, encodedFilename))
|
||||
}
|
||||
|
||||
req, _ := handler.svc.GetObjectRequest(
|
||||
&s3.GetObjectInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: aws.String(e.Source()),
|
||||
ResponseContentDisposition: contentDescription,
|
||||
})
|
||||
|
||||
ttl := time.Duration(604800) * time.Second // 7 days
|
||||
if args.Expire != nil {
|
||||
ttl = time.Until(*args.Expire)
|
||||
}
|
||||
signedURL, err := req.Presign(ttl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 将最终生成的签名URL域名换成用户自定义的加速域名(如果有)
|
||||
finalURL, err := url.Parse(signedURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// 公有空间替换掉Key及不支持的头
|
||||
if !handler.policy.IsPrivate {
|
||||
finalURL.RawQuery = ""
|
||||
}
|
||||
|
||||
return finalURL.String(), nil
|
||||
}
|
||||
|
||||
// Token 获取上传策略和认证Token
|
||||
func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
|
||||
// Check for duplicated file
|
||||
if _, err := handler.Meta(ctx, file.Props.SavePath); err == nil {
|
||||
return nil, fs.ErrFileExisted
|
||||
}
|
||||
|
||||
// 生成回调地址
|
||||
siteURL := handler.settings.SiteURL(setting.UseFirstSiteUrl(ctx))
|
||||
// 在从机端创建上传会话
|
||||
uploadSession.ChunkSize = handler.chunkSize
|
||||
uploadSession.Callback = routes.MasterSlaveCallbackUrl(siteURL, types.PolicyTypeS3, uploadSession.Props.UploadSessionID, uploadSession.CallbackSecret).String()
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
// 创建分片上传
|
||||
res, err := handler.svc.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &uploadSession.Props.SavePath,
|
||||
Expires: &uploadSession.Props.ExpireAt,
|
||||
ContentType: aws.String(mimeType),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create multipart upload: %w", err)
|
||||
}
|
||||
|
||||
uploadSession.UploadID = *res.UploadId
|
||||
|
||||
// 为每个分片签名上传 URL
|
||||
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{}, false, handler.l, "")
|
||||
urls := make([]string, chunks.Num())
|
||||
for chunks.Next() {
|
||||
err := chunks.Process(func(c *chunk.ChunkGroup, chunk io.Reader) error {
|
||||
signedReq, _ := handler.svc.UploadPartRequest(&s3.UploadPartInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &uploadSession.Props.SavePath,
|
||||
PartNumber: aws.Int64(int64(c.Index() + 1)),
|
||||
ContentLength: aws.Int64(c.Length()),
|
||||
UploadId: res.UploadId,
|
||||
})
|
||||
|
||||
signedURL, err := signedReq.Presign(time.Until(uploadSession.Props.ExpireAt))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
urls[c.Index()] = signedURL
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// 签名完成分片上传的请求URL
|
||||
signedReq, _ := handler.svc.CompleteMultipartUploadRequest(&s3.CompleteMultipartUploadInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &file.Props.SavePath,
|
||||
UploadId: res.UploadId,
|
||||
})
|
||||
|
||||
signedURL, err := signedReq.Presign(time.Until(uploadSession.Props.ExpireAt))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 生成上传凭证
|
||||
return &fs.UploadCredential{
|
||||
UploadID: *res.UploadId,
|
||||
UploadURLs: urls,
|
||||
CompleteURL: signedURL,
|
||||
SessionID: uploadSession.Props.UploadSessionID,
|
||||
ChunkSize: handler.chunkSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Meta 获取文件信息
|
||||
func (handler *Driver) Meta(ctx context.Context, path string) (*MetaData, error) {
|
||||
res, err := handler.svc.HeadObjectWithContext(ctx,
|
||||
&s3.HeadObjectInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &path,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &MetaData{
|
||||
Size: *res.ContentLength,
|
||||
Etag: *res.ETag,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
// CORS 创建跨域策略
|
||||
func (handler *Driver) CORS() error {
|
||||
rule := s3.CORSRule{
|
||||
AllowedMethods: aws.StringSlice([]string{
|
||||
"GET",
|
||||
"POST",
|
||||
"PUT",
|
||||
"DELETE",
|
||||
"HEAD",
|
||||
}),
|
||||
AllowedOrigins: aws.StringSlice([]string{"*"}),
|
||||
AllowedHeaders: aws.StringSlice([]string{"*"}),
|
||||
ExposeHeaders: aws.StringSlice([]string{"ETag"}),
|
||||
MaxAgeSeconds: aws.Int64(3600),
|
||||
}
|
||||
|
||||
_, err := handler.svc.PutBucketCors(&s3.PutBucketCorsInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
CORSConfiguration: &s3.CORSConfiguration{
|
||||
CORSRules: []*s3.CORSRule{&rule},
|
||||
},
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// 取消上传凭证
|
||||
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
|
||||
_, err := handler.svc.AbortMultipartUploadWithContext(ctx, &s3.AbortMultipartUploadInput{
|
||||
UploadId: &uploadSession.UploadID,
|
||||
Bucket: &handler.policy.BucketName,
|
||||
Key: &uploadSession.Props.SavePath,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (handler *Driver) cancelUpload(key, id *string) {
|
||||
if _, err := handler.svc.AbortMultipartUpload(&s3.AbortMultipartUploadInput{
|
||||
Bucket: &handler.policy.BucketName,
|
||||
UploadId: id,
|
||||
Key: key,
|
||||
}); err != nil {
|
||||
handler.l.Warning("failed to abort multipart upload: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) Capabilities() *driver.Capabilities {
|
||||
return &driver.Capabilities{
|
||||
StaticFeatures: features,
|
||||
MediaMetaProxy: handler.policy.Settings.MediaMetaGeneratorProxy,
|
||||
ThumbProxy: handler.policy.Settings.ThumbGeneratorProxy,
|
||||
MaxSourceExpire: time.Duration(604800) * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (handler *Driver) LocalPath(ctx context.Context, path string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
|
||||
if session.SentinelTaskID == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make sure uploaded file size is correct
|
||||
res, err := handler.Meta(ctx, session.Props.SavePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get uploaded file size: %w", err)
|
||||
}
|
||||
|
||||
if res.Size != session.Props.Size {
|
||||
return serializer.NewError(
|
||||
serializer.CodeMetaMismatch,
|
||||
fmt.Sprintf("File size not match, expected: %d, actual: %d", session.Props.Size, res.Size),
|
||||
nil,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
func (r Reader) Read(p []byte) (int, error) {
|
||||
return r.r.Read(p)
|
||||
}
|
||||
154
pkg/filemanager/driver/upyun/media.go
Normal file
154
pkg/filemanager/driver/upyun/media.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package upyun
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/samber/lo"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
mediaInfoTTL = time.Duration(10) * time.Minute
|
||||
)
|
||||
|
||||
type (
|
||||
ImageInfo struct {
|
||||
Exif map[string]string `json:"EXIF"`
|
||||
}
|
||||
)
|
||||
|
||||
func (handler *Driver) extractImageMeta(ctx context.Context, path string) ([]driver.MediaMeta, error) {
|
||||
resp, err := handler.extractMediaInfo(ctx, path, "!/meta")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fmt.Println(resp)
|
||||
|
||||
var imageInfo ImageInfo
|
||||
if err := json.Unmarshal([]byte(resp), &imageInfo); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal image info: %w", err)
|
||||
}
|
||||
|
||||
metas := make([]driver.MediaMeta, 0, len(imageInfo.Exif))
|
||||
exifMap := lo.MapEntries(imageInfo.Exif, func(key string, value string) (string, string) {
|
||||
switch key {
|
||||
case "0xA434":
|
||||
key = "LensModel"
|
||||
}
|
||||
return key, value
|
||||
})
|
||||
metas = append(metas, mediameta.ExtractExifMap(exifMap, time.Time{})...)
|
||||
metas = append(metas, parseGpsInfo(imageInfo.Exif)...)
|
||||
|
||||
for i := 0; i < len(metas); i++ {
|
||||
metas[i].Type = driver.MetaTypeExif
|
||||
}
|
||||
|
||||
return metas, nil
|
||||
}
|
||||
|
||||
func (handler *Driver) extractMediaInfo(ctx context.Context, path string, param string) (string, error) {
|
||||
mediaInfoExpire := time.Now().Add(mediaInfoTTL)
|
||||
mediaInfoUrl, err := handler.signURL(ctx, path+param, nil, &mediaInfoExpire)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
resp, err := handler.httpClient.
|
||||
Request(http.MethodGet, mediaInfoUrl, nil, request.WithContext(ctx)).
|
||||
CheckHTTPResponse(http.StatusOK).
|
||||
GetResponseIgnoreErr()
|
||||
if err != nil {
|
||||
return "", unmarshalError(resp, err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func unmarshalError(resp string, err error) error {
|
||||
return fmt.Errorf("upyun error: %s", err)
|
||||
}
|
||||
|
||||
func parseGpsInfo(imageInfo map[string]string) []driver.MediaMeta {
|
||||
latitude := imageInfo["GPSLatitude"] // 31/1, 162680820/10000000, 0/1
|
||||
longitude := imageInfo["GPSLongitude"] // 120/1, 429103939/10000000, 0/1
|
||||
latRef := imageInfo["GPSLatitudeRef"] // N
|
||||
lonRef := imageInfo["GPSLongitudeRef"] // E
|
||||
|
||||
// Make sure all value exist in map
|
||||
if latitude == "" || longitude == "" || latRef == "" || lonRef == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
lat := parseRawGPS(latitude, latRef)
|
||||
lon := parseRawGPS(longitude, lonRef)
|
||||
if !math.IsNaN(lat) && !math.IsNaN(lon) {
|
||||
lat, lng := mediameta.NormalizeGPS(lat, lon)
|
||||
return []driver.MediaMeta{{
|
||||
Key: mediameta.GpsLat,
|
||||
Value: fmt.Sprintf("%f", lat),
|
||||
}, {
|
||||
Key: mediameta.GpsLng,
|
||||
Value: fmt.Sprintf("%f", lng),
|
||||
}}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseRawGPS(gpsStr string, ref string) float64 {
|
||||
elem := strings.Split(gpsStr, ",")
|
||||
if len(elem) < 1 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var (
|
||||
deg float64
|
||||
minutes float64
|
||||
seconds float64
|
||||
)
|
||||
|
||||
deg = getGpsElemValue(elem[0])
|
||||
if len(elem) >= 2 {
|
||||
minutes = getGpsElemValue(elem[1])
|
||||
}
|
||||
if len(elem) >= 3 {
|
||||
seconds = getGpsElemValue(elem[2])
|
||||
}
|
||||
|
||||
decimal := deg + minutes/60.0 + seconds/3600.0
|
||||
|
||||
if ref == "S" || ref == "W" {
|
||||
return -decimal
|
||||
}
|
||||
|
||||
return decimal
|
||||
}
|
||||
|
||||
func getGpsElemValue(elm string) float64 {
|
||||
elements := strings.Split(elm, "/")
|
||||
if len(elements) != 2 {
|
||||
return 0
|
||||
}
|
||||
|
||||
numerator, err := strconv.ParseFloat(elements[0], 64)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
denominator, err := strconv.ParseFloat(elements[1], 64)
|
||||
if err != nil || denominator == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
return numerator / denominator
|
||||
}
|
||||
382
pkg/filemanager/driver/upyun/upyun.go
Normal file
382
pkg/filemanager/driver/upyun/upyun.go
Normal file
@@ -0,0 +1,382 @@
|
||||
package upyun
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/hmac"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"github.com/cloudreve/Cloudreve/v4/inventory/types"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/conf"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/mime"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/logging"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/request"
|
||||
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/upyun/go-sdk/upyun"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type (
|
||||
// UploadPolicy 又拍云上传策略
|
||||
UploadPolicy struct {
|
||||
Bucket string `json:"bucket"`
|
||||
SaveKey string `json:"save-key"`
|
||||
Expiration int64 `json:"expiration"`
|
||||
CallbackURL string `json:"notify-url"`
|
||||
ContentLength uint64 `json:"content-length"`
|
||||
ContentLengthRange string `json:"content-length-range,omitempty"`
|
||||
AllowFileType string `json:"allow-file-type,omitempty"`
|
||||
}
|
||||
// Driver 又拍云策略适配器
|
||||
Driver struct {
|
||||
policy *ent.StoragePolicy
|
||||
|
||||
up *upyun.UpYun
|
||||
settings setting.Provider
|
||||
l logging.Logger
|
||||
config conf.ConfigProvider
|
||||
mime mime.MimeDetector
|
||||
httpClient request.Client
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
features = &boolset.BooleanSet{}
|
||||
)
|
||||
|
||||
func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provider,
|
||||
config conf.ConfigProvider, l logging.Logger, mime mime.MimeDetector) (*Driver, error) {
|
||||
driver := &Driver{
|
||||
policy: policy,
|
||||
settings: settings,
|
||||
config: config,
|
||||
l: l,
|
||||
mime: mime,
|
||||
httpClient: request.NewClient(config, request.WithLogger(l)),
|
||||
up: upyun.NewUpYun(&upyun.UpYunConfig{
|
||||
Bucket: policy.BucketName,
|
||||
Operator: policy.AccessKey,
|
||||
Password: policy.SecretKey,
|
||||
}),
|
||||
}
|
||||
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
//func (handler *Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) {
|
||||
// base = strings.TrimPrefix(base, "/")
|
||||
//
|
||||
// // 用于接受SDK返回对象的chan
|
||||
// objChan := make(chan *upyun.FileInfo)
|
||||
// objects := []*upyun.FileInfo{}
|
||||
//
|
||||
// // 列取配置
|
||||
// listConf := &upyun.GetObjectsConfig{
|
||||
// Path: "/" + base,
|
||||
// ObjectsChan: objChan,
|
||||
// MaxListTries: 1,
|
||||
// }
|
||||
// // 递归列取时不限制递归次数
|
||||
// if recursive {
|
||||
// listConf.MaxListLevel = -1
|
||||
// }
|
||||
//
|
||||
// // 启动一个goroutine收集列取的对象信
|
||||
// wg := &sync.WaitGroup{}
|
||||
// wg.Add(1)
|
||||
// go func(input chan *upyun.FileInfo, output *[]*upyun.FileInfo, wg *sync.WaitGroup) {
|
||||
// defer wg.Done()
|
||||
// for {
|
||||
// file, ok := <-input
|
||||
// if !ok {
|
||||
// return
|
||||
// }
|
||||
// *output = append(*output, file)
|
||||
// }
|
||||
// }(objChan, &objects, wg)
|
||||
//
|
||||
// up := upyun.NewUpYun(&upyun.UpYunConfig{
|
||||
// Bucket: handler.policy.BucketName,
|
||||
// Operator: handler.policy.AccessKey,
|
||||
// Password: handler.policy.SecretKey,
|
||||
// })
|
||||
//
|
||||
// err := up.List(listConf)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
//
|
||||
// wg.Wait()
|
||||
//
|
||||
// // 汇总处理列取结果
|
||||
// res := make([]response.Object, 0, len(objects))
|
||||
// for _, object := range objects {
|
||||
// res = append(res, response.Object{
|
||||
// Name: path.Base(object.Name),
|
||||
// RelativePath: object.Name,
|
||||
// Source: path.Join(base, object.Name),
|
||||
// Size: uint64(object.Size),
|
||||
// IsDir: object.IsDir,
|
||||
// LastModify: object.Time,
|
||||
// })
|
||||
// }
|
||||
//
|
||||
// return res, nil
|
||||
//}
|
||||
|
||||
func (handler *Driver) Open(ctx context.Context, path string) (*os.File, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Put 将文件流保存到指定目录
|
||||
func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
|
||||
defer file.Close()
|
||||
|
||||
// 是否允许覆盖
|
||||
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
|
||||
if !overwrite {
|
||||
if _, err := handler.up.GetInfo(file.Props.SavePath); err == nil {
|
||||
return fs.ErrFileExisted
|
||||
}
|
||||
}
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
err := handler.up.Put(&upyun.PutObjectConfig{
|
||||
Path: file.Props.SavePath,
|
||||
Reader: file,
|
||||
Headers: map[string]string{
|
||||
"Content-Type": mimeType,
|
||||
},
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete 删除一个或多个文件,
|
||||
// 返回未删除的文件,及遇到的最后一个错误
|
||||
func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, error) {
|
||||
failed := make([]string, 0)
|
||||
var lastErr error
|
||||
|
||||
for _, file := range files {
|
||||
if err := handler.up.Delete(&upyun.DeleteObjectConfig{
|
||||
Path: file,
|
||||
Async: true,
|
||||
}); err != nil {
|
||||
filteredErr := strings.ReplaceAll(err.Error(), file, "")
|
||||
if strings.Contains(filteredErr, "Not found") ||
|
||||
strings.Contains(filteredErr, "NoSuchKey") {
|
||||
continue
|
||||
}
|
||||
|
||||
failed = append(failed, file)
|
||||
lastErr = err
|
||||
}
|
||||
}
|
||||
|
||||
return failed, lastErr
|
||||
}
|
||||
|
||||
// Thumb 获取文件缩略图
|
||||
func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string, e fs.Entity) (string, error) {
|
||||
w, h := handler.settings.ThumbSize(ctx)
|
||||
|
||||
thumbParam := fmt.Sprintf("!/fwfh/%dx%d", w, h)
|
||||
thumbURL, err := handler.signURL(ctx, e.Source()+thumbParam, nil, expire)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return thumbURL, nil
|
||||
}
|
||||
|
||||
// Source 获取外链URL
|
||||
func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.GetSourceArgs) (string, error) {
|
||||
query := url.Values{}
|
||||
|
||||
// 如果是下载文件URL
|
||||
if args.IsDownload {
|
||||
query.Add("_upd", args.DisplayName)
|
||||
}
|
||||
|
||||
return handler.signURL(ctx, e.Source(), &query, args.Expire)
|
||||
}
|
||||
|
||||
func (handler *Driver) signURL(ctx context.Context, path string, query *url.Values, expire *time.Time) (string, error) {
|
||||
sourceURL, err := url.Parse(handler.policy.Settings.ProxyServer)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fileKey, err := url.Parse(url.PathEscape(path))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
sourceURL = sourceURL.ResolveReference(fileKey)
|
||||
if query != nil {
|
||||
sourceURL.RawQuery = query.Encode()
|
||||
|
||||
}
|
||||
|
||||
if !handler.policy.IsPrivate {
|
||||
// 未开启Token防盗链时,直接返回
|
||||
return sourceURL.String(), nil
|
||||
}
|
||||
|
||||
etime := time.Now().Add(time.Duration(24) * time.Hour * 365 * 20).Unix()
|
||||
if expire != nil {
|
||||
etime = expire.Unix()
|
||||
}
|
||||
signStr := fmt.Sprintf(
|
||||
"%s&%d&%s",
|
||||
handler.policy.Settings.Token,
|
||||
etime,
|
||||
sourceURL.Path,
|
||||
)
|
||||
signMd5 := fmt.Sprintf("%x", md5.Sum([]byte(signStr)))
|
||||
finalSign := signMd5[12:20] + strconv.FormatInt(etime, 10)
|
||||
|
||||
// 将签名添加到URL中
|
||||
q := sourceURL.Query()
|
||||
q.Add("_upt", finalSign)
|
||||
sourceURL.RawQuery = q.Encode()
|
||||
|
||||
return sourceURL.String(), nil
|
||||
}
|
||||
|
||||
// Token 获取上传策略和认证Token
|
||||
func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSession, file *fs.UploadRequest) (*fs.UploadCredential, error) {
|
||||
if _, err := handler.up.GetInfo(file.Props.SavePath); err == nil {
|
||||
return nil, fs.ErrFileExisted
|
||||
}
|
||||
|
||||
// 生成回调地址
|
||||
siteURL := handler.settings.SiteURL(setting.UseFirstSiteUrl(ctx))
|
||||
apiUrl := routes.MasterSlaveCallbackUrl(siteURL, types.PolicyTypeUpyun, uploadSession.Props.UploadSessionID, uploadSession.CallbackSecret).String()
|
||||
|
||||
// 上传策略
|
||||
putPolicy := UploadPolicy{
|
||||
Bucket: handler.policy.BucketName,
|
||||
SaveKey: file.Props.SavePath,
|
||||
Expiration: uploadSession.Props.ExpireAt.Unix(),
|
||||
CallbackURL: apiUrl,
|
||||
ContentLength: uint64(file.Props.Size),
|
||||
ContentLengthRange: fmt.Sprintf("0,%d", file.Props.Size),
|
||||
}
|
||||
|
||||
// 生成上传凭证
|
||||
policyJSON, err := json.Marshal(putPolicy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
policyEncoded := base64.StdEncoding.EncodeToString(policyJSON)
|
||||
|
||||
// 生成签名
|
||||
elements := []string{"POST", "/" + handler.policy.BucketName, policyEncoded}
|
||||
signStr := sign(handler.policy.AccessKey, handler.policy.SecretKey, elements)
|
||||
|
||||
mimeType := file.Props.MimeType
|
||||
if mimeType == "" {
|
||||
handler.mime.TypeByName(file.Props.Uri.Name())
|
||||
}
|
||||
|
||||
return &fs.UploadCredential{
|
||||
UploadPolicy: policyEncoded,
|
||||
UploadURLs: []string{"https://v0.api.upyun.com/" + handler.policy.BucketName},
|
||||
Credential: signStr,
|
||||
MimeType: mimeType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// 取消上传凭证
|
||||
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *Driver) Capabilities() *driver.Capabilities {
|
||||
mediaMetaExts := handler.policy.Settings.MediaMetaExts
|
||||
if !handler.policy.Settings.NativeMediaProcessing {
|
||||
mediaMetaExts = nil
|
||||
}
|
||||
return &driver.Capabilities{
|
||||
StaticFeatures: features,
|
||||
MediaMetaSupportedExts: mediaMetaExts,
|
||||
MediaMetaProxy: handler.policy.Settings.MediaMetaGeneratorProxy,
|
||||
ThumbSupportedExts: handler.policy.Settings.ThumbExts,
|
||||
ThumbProxy: handler.policy.Settings.ThumbGeneratorProxy,
|
||||
ThumbMaxSize: handler.policy.Settings.ThumbMaxSize,
|
||||
ThumbSupportAllExts: handler.policy.Settings.ThumbSupportAllExts,
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *Driver) MediaMeta(ctx context.Context, path, ext string) ([]driver.MediaMeta, error) {
|
||||
return handler.extractImageMeta(ctx, path)
|
||||
}
|
||||
|
||||
func (handler *Driver) LocalPath(ctx context.Context, path string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func ValidateCallback(c *gin.Context, session *fs.UploadSession) error {
|
||||
body, err := io.ReadAll(c.Request.Body)
|
||||
c.Request.Body.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read request body: %w", err)
|
||||
}
|
||||
|
||||
c.Request.Body = io.NopCloser(bytes.NewReader(body))
|
||||
contentMD5 := c.Request.Header.Get("Content-Md5")
|
||||
date := c.Request.Header.Get("Date")
|
||||
actualSignature := c.Request.Header.Get("Authorization")
|
||||
actualContentMD5 := fmt.Sprintf("%x", md5.Sum(body))
|
||||
if actualContentMD5 != contentMD5 {
|
||||
return errors.New("MD5 mismatch")
|
||||
}
|
||||
|
||||
// Compare signature
|
||||
signature := sign(session.Policy.AccessKey, session.Policy.SecretKey, []string{
|
||||
"POST",
|
||||
c.Request.URL.Path,
|
||||
date,
|
||||
contentMD5,
|
||||
})
|
||||
if signature != actualSignature {
|
||||
return errors.New("Signature not match")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sign 计算又拍云的签名头
|
||||
func sign(ak, sk string, elements []string) string {
|
||||
password := fmt.Sprintf("%x", md5.Sum([]byte(sk)))
|
||||
mac := hmac.New(sha1.New, []byte(password))
|
||||
value := strings.Join(elements, "&")
|
||||
mac.Write([]byte(value))
|
||||
signStr := base64.StdEncoding.EncodeToString((mac.Sum(nil)))
|
||||
return fmt.Sprintf("UPYUN %s:%s", ak, signStr)
|
||||
}
|
||||
37
pkg/filemanager/driver/util.go
Normal file
37
pkg/filemanager/driver/util.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/cloudreve/Cloudreve/v4/ent"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func ApplyProxyIfNeeded(policy *ent.StoragePolicy, srcUrl *url.URL) (*url.URL, error) {
|
||||
// For custom proxy, generate a new proxyed URL:
|
||||
// [Proxy Scheme][Proxy Host][Proxy Port][ProxyPath + OriginSrcPath][OriginSrcQuery + ProxyQuery]
|
||||
if policy.Settings.CustomProxy {
|
||||
proxy, err := url.Parse(policy.Settings.ProxyServer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse proxy URL: %w", err)
|
||||
}
|
||||
proxy.Path = path.Join(proxy.Path, strings.TrimPrefix(srcUrl.Path, "/"))
|
||||
q := proxy.Query()
|
||||
if len(q) == 0 {
|
||||
proxy.RawQuery = srcUrl.RawQuery
|
||||
} else {
|
||||
// Merge query parameters
|
||||
srcQ := srcUrl.Query()
|
||||
for k, _ := range srcQ {
|
||||
q.Set(k, srcQ.Get(k))
|
||||
}
|
||||
|
||||
proxy.RawQuery = q.Encode()
|
||||
}
|
||||
|
||||
srcUrl = proxy
|
||||
}
|
||||
|
||||
return srcUrl, nil
|
||||
}
|
||||
Reference in New Issue
Block a user