Compare commits

...

10 Commits

Author SHA1 Message Date
Leonmmcoset
6cf9d0a1ac upload 2025-10-21 20:27:45 +08:00
Leonmmcoset
fe76d14835 change 2025-10-19 22:28:45 +08:00
4f0511bdfe change "assets" folder to right url 2025-10-19 13:34:43 +00:00
Aaron Liu
21cdafb2af fix(oss): traffic limit should be in query instead of headers (#2977) 2025-10-16 07:46:22 +08:00
Aaron Liu
e29237d593 fix(webdav): error code for missing parent in mkcol should be 409 instead of 404 (#2953) 2025-10-15 10:28:31 +08:00
Aaron Liu
46897e2880 fix(oss): presigned multipart upload mismatch 2025-10-14 10:21:43 +08:00
Aaron Liu
213eaa54dd update submodule 2025-10-14 09:29:24 +08:00
Aaron Liu
e7d6fb25e4 feat(oss): upgrade to SDK v2 (#2963) 2025-10-14 08:49:45 +08:00
Darren Yu
e3e08a9b75 feat(share): adapt to keep specified path in V3 sharing link (#2958) 2025-10-12 10:28:40 +08:00
酸柠檬猹Char
78f7ec8b08 fix: Some containers won't auto restart in the current Docker Compose (#2932)
Add "restart: unless-stopped" to the database and redis container.
2025-09-27 22:04:38 +08:00
17 changed files with 303 additions and 127 deletions

2
.build/build-assets.sh Executable file → Normal file
View File

@@ -6,7 +6,7 @@ export NODE_OPTIONS="--max-old-space-size=8192"
cd assets cd assets
rm -rf build rm -rf build
yarn install --network-timeout 1000000 yarn install --network-timeout 1000000
yarn version --new-version $1 --no-git-tag-version # yarn version --new-version $1 --no-git-tag-version
yarn run build yarn run build
# Copy the build files to the application directory # Copy the build files to the application directory

0
.build/entrypoint.sh Executable file → Normal file
View File

2
.gitmodules vendored
View File

@@ -1,3 +1,3 @@
[submodule "assets"] [submodule "assets"]
path = assets path = assets
url = https://github.com/cloudreve/frontend.git url = http://leonmmcoset.jjxmm.win:2000/MiaoStars/cloudreve-assets.git

2
assets

Submodule assets updated: 0bf85fa0ab...921570f229

BIN
assets.zip Normal file

Binary file not shown.

54
build.sh Normal file
View File

@@ -0,0 +1,54 @@
#!/bin/bash
set -e
# 1. 记录起始时间(用 %s 取整数秒,避免小数运算依赖 bc
start_seconds=$(date +%s)
start_datetime=$(date +"%Y-%m-%d %H:%M:%S")
echo "========================================"
echo "Build started at: $start_datetime"
echo "========================================"
# 2. 构建前端(统计耗时)
echo -e "\n[1/2] Starting frontend build..."
frontend_start=$(date +%s) # 前端开始时间(整数秒)
chmod +x ./.build/build-assets.sh
./.build/build-assets.sh
# 计算前端耗时(整数秒,直接用 bash 内置减法,无需 bc
frontend_end=$(date +%s)
frontend_duration=$((frontend_end - frontend_start))
echo "[1/2] Frontend build completed! (Time: ${frontend_duration}s)"
# 3. 构建后端(统计耗时)
echo -e "\n[2/2] Starting backend build..."
backend_start=$(date +%s) # 后端开始时间(整数秒)
# 获取 Git 版本和 Commit
export COMMIT_SHA=$(git rev-parse --short HEAD)
export VERSION=$(git describe --tags)
echo "Backend version: $VERSION, Commit: $COMMIT_SHA"
# 编译 Go 后端
go build -a -o cloudreve \
-ldflags "-s -w -X 'github.com/cloudreve/Cloudreve/v4/application/constants.BackendVersion=$VERSION' -X 'github.com/cloudreve/Cloudreve/v4/application/constants.LastCommit=$COMMIT_SHA'"
# 计算后端耗时
backend_end=$(date +%s)
backend_duration=$((backend_end - backend_start))
echo "[2/2] Backend build completed! (Time: ${backend_duration}s)"
# 4. 总耗时统计(整数秒,直观易懂)
end_seconds=$(date +%s)
total_duration=$((end_seconds - start_seconds))
end_datetime=$(date +"%Y-%m-%d %H:%M:%S")
echo -e "\n========================================"
echo "Build successfully completed!"
echo "Total duration: ${total_duration}s"
echo "Started at: $start_datetime"
echo "Ended at: $end_datetime"
echo "========================================"

View File

@@ -5,7 +5,7 @@ services:
depends_on: depends_on:
- postgresql - postgresql
- redis - redis
restart: always restart: unless-stopped
ports: ports:
- 5212:5212 - 5212:5212
- 6888:6888 - 6888:6888
@@ -26,6 +26,7 @@ services:
# backup & consult https://www.postgresql.org/docs/current/pgupgrade.html # backup & consult https://www.postgresql.org/docs/current/pgupgrade.html
image: postgres:17 image: postgres:17
container_name: postgresql container_name: postgresql
restart: unless-stopped
environment: environment:
- POSTGRES_USER=cloudreve - POSTGRES_USER=cloudreve
- POSTGRES_DB=cloudreve - POSTGRES_DB=cloudreve
@@ -36,6 +37,7 @@ services:
redis: redis:
image: redis:latest image: redis:latest
container_name: redis container_name: redis
restart: unless-stopped
volumes: volumes:
- redis_data:/data - redis_data:/data

4
go.mod
View File

@@ -6,8 +6,9 @@ require (
entgo.io/ent v0.13.0 entgo.io/ent v0.13.0
github.com/Masterminds/semver/v3 v3.3.1 github.com/Masterminds/semver/v3 v3.3.1
github.com/abslant/gzip v0.0.9 github.com/abslant/gzip v0.0.9
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.3.0
github.com/aws/aws-sdk-go v1.31.5 github.com/aws/aws-sdk-go v1.31.5
github.com/bodgit/sevenzip v1.6.0
github.com/cloudflare/cfssl v1.6.1 github.com/cloudflare/cfssl v1.6.1
github.com/dhowden/tag v0.0.0-20230630033851-978a0926ee25 github.com/dhowden/tag v0.0.0-20230630033851-978a0926ee25
github.com/dsoprea/go-exif/v3 v3.0.1 github.com/dsoprea/go-exif/v3 v3.0.1
@@ -70,7 +71,6 @@ require (
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3 // indirect github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3 // indirect
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
github.com/bodgit/plumbing v1.3.0 // indirect github.com/bodgit/plumbing v1.3.0 // indirect
github.com/bodgit/sevenzip v1.6.0 // indirect
github.com/bodgit/windows v1.0.1 // indirect github.com/bodgit/windows v1.0.1 // indirect
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
github.com/bytedance/sonic v1.11.6 // indirect github.com/bytedance/sonic v1.11.6 // indirect

4
go.sum
View File

@@ -100,8 +100,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible h1:8psS8a+wKfiLt1iVDX79F7Y6wUM49Lcha2FMXt4UM8g= github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.3.0 h1:wQlqotpyjYPjJz+Noh5bRu7Snmydk8SKC5Z6u1CR20Y=
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/aliyun/alibabacloud-oss-go-sdk-v2 v1.3.0/go.mod h1:FTzydeQVmR24FI0D6XWUOMKckjXehM/jgMn1xC+DA9M=
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3 h1:8PmGpDEZl9yDpcdEr6Odf23feCxK3LNUNMxjXg41pZQ= github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3 h1:8PmGpDEZl9yDpcdEr6Odf23feCxK3LNUNMxjXg41pZQ=
github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/andybalholm/brotli v1.1.2-0.20250424173009-453214e765f3/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=

BIN
go1.25.0.linux-amd64.tar.gz Normal file

Binary file not shown.

10
node_modules/.yarn-integrity generated vendored Normal file
View File

@@ -0,0 +1,10 @@
{
"systemParams": "linux-x64-109",
"modulesFolders": [],
"flags": [],
"linkedModules": [],
"topLevelPatterns": [],
"lockfileEntries": {},
"files": [],
"artifacts": {}
}

View File

@@ -5,16 +5,17 @@ import (
"encoding/json" "encoding/json"
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
"github.com/cloudreve/Cloudreve/v4/pkg/request"
"github.com/samber/lo"
"math" "math"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
"github.com/cloudreve/Cloudreve/v4/pkg/mediameta"
"github.com/cloudreve/Cloudreve/v4/pkg/request"
"github.com/samber/lo"
) )
const ( const (
@@ -265,13 +266,14 @@ func (handler *Driver) extractImageMeta(ctx context.Context, path string) ([]dri
// extractMediaInfo Sends API calls to OSS IMM service to extract media info. // extractMediaInfo Sends API calls to OSS IMM service to extract media info.
func (handler *Driver) extractMediaInfo(ctx context.Context, path string, category string, forceSign bool) (string, error) { func (handler *Driver) extractMediaInfo(ctx context.Context, path string, category string, forceSign bool) (string, error) {
mediaOption := []oss.Option{oss.Process(category)}
mediaInfoExpire := time.Now().Add(mediaInfoTTL) mediaInfoExpire := time.Now().Add(mediaInfoTTL)
thumbURL, err := handler.signSourceURL( thumbURL, err := handler.signSourceURL(
ctx, ctx,
path, path,
&mediaInfoExpire, &mediaInfoExpire,
mediaOption, &oss.GetObjectRequest{
Process: oss.Ptr(category),
},
forceSign, forceSign,
) )
if err != nil { if err != nil {

View File

@@ -15,7 +15,8 @@ import (
"strings" "strings"
"time" "time"
"github.com/aliyun/aliyun-oss-go-sdk/oss" "github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss"
"github.com/aliyun/alibabacloud-oss-go-sdk-v2/oss/credentials"
"github.com/cloudreve/Cloudreve/v4/ent" "github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory/types" "github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset" "github.com/cloudreve/Cloudreve/v4/pkg/boolset"
@@ -52,7 +53,6 @@ type Driver struct {
policy *ent.StoragePolicy policy *ent.StoragePolicy
client *oss.Client client *oss.Client
bucket *oss.Bucket
settings setting.Provider settings setting.Provider
l logging.Logger l logging.Logger
config conf.ConfigProvider config conf.ConfigProvider
@@ -65,12 +65,11 @@ type Driver struct {
type key int type key int
const ( const (
chunkRetrySleep = time.Duration(5) * time.Second chunkRetrySleep = time.Duration(5) * time.Second
uploadIdParam = "uploadId" maxDeleteBatch = 1000
partNumberParam = "partNumber" completeAllHeader = "x-oss-complete-all"
callbackParam = "callback" forbidOverwriteHeader = "x-oss-forbid-overwrite"
completeAllHeader = "x-oss-complete-all" trafficLimitHeader = "x-oss-traffic-limit"
maxDeleteBatch = 1000
// MultiPartUploadThreshold 服务端使用分片上传的阈值 // MultiPartUploadThreshold 服务端使用分片上传的阈值
MultiPartUploadThreshold int64 = 5 * (1 << 30) // 5GB MultiPartUploadThreshold int64 = 5 * (1 << 30) // 5GB
@@ -102,21 +101,27 @@ func New(ctx context.Context, policy *ent.StoragePolicy, settings setting.Provid
// CORS 创建跨域策略 // CORS 创建跨域策略
func (handler *Driver) CORS() error { func (handler *Driver) CORS() error {
return handler.client.SetBucketCORS(handler.policy.BucketName, []oss.CORSRule{ _, err := handler.client.PutBucketCors(context.Background(), &oss.PutBucketCorsRequest{
{ Bucket: &handler.policy.BucketName,
AllowedOrigin: []string{"*"}, CORSConfiguration: &oss.CORSConfiguration{
AllowedMethod: []string{ CORSRules: []oss.CORSRule{
"GET", {
"POST", AllowedOrigins: []string{"*"},
"PUT", AllowedMethods: []string{
"DELETE", "GET",
"HEAD", "POST",
"PUT",
"DELETE",
"HEAD",
},
ExposeHeaders: []string{},
AllowedHeaders: []string{"*"},
MaxAgeSeconds: oss.Ptr(int64(3600)),
},
}, },
ExposeHeader: []string{}, }})
AllowedHeader: []string{"*"},
MaxAgeSeconds: 3600, return err
},
})
} }
// InitOSSClient 初始化OSS鉴权客户端 // InitOSSClient 初始化OSS鉴权客户端
@@ -125,34 +130,28 @@ func (handler *Driver) InitOSSClient(forceUsePublicEndpoint bool) error {
return errors.New("empty policy") return errors.New("empty policy")
} }
opt := make([]oss.ClientOption, 0)
// 决定是否使用内网 Endpoint // 决定是否使用内网 Endpoint
endpoint := handler.policy.Server endpoint := handler.policy.Server
useCname := false
if handler.policy.Settings.ServerSideEndpoint != "" && !forceUsePublicEndpoint { if handler.policy.Settings.ServerSideEndpoint != "" && !forceUsePublicEndpoint {
endpoint = handler.policy.Settings.ServerSideEndpoint endpoint = handler.policy.Settings.ServerSideEndpoint
} else if handler.policy.Settings.UseCname { } else if handler.policy.Settings.UseCname {
opt = append(opt, oss.UseCname(true)) useCname = true
} }
if !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") { if !strings.HasPrefix(endpoint, "http://") && !strings.HasPrefix(endpoint, "https://") {
endpoint = "https://" + endpoint endpoint = "https://" + endpoint
} }
cfg := oss.LoadDefaultConfig().
WithCredentialsProvider(credentials.NewStaticCredentialsProvider(handler.policy.AccessKey, handler.policy.SecretKey, "")).
WithEndpoint(endpoint).
WithRegion(handler.policy.Settings.Region).
WithUseCName(useCname)
// 初始化客户端 // 初始化客户端
client, err := oss.New(endpoint, handler.policy.AccessKey, handler.policy.SecretKey, opt...) client := oss.NewClient(cfg)
if err != nil {
return err
}
handler.client = client handler.client = client
// 初始化存储桶
bucket, err := client.Bucket(handler.policy.BucketName)
if err != nil {
return err
}
handler.bucket = bucket
return nil return nil
} }
@@ -166,38 +165,40 @@ func (handler *Driver) List(ctx context.Context, base string, onProgress driver.
var ( var (
delimiter string delimiter string
marker string
objects []oss.ObjectProperties objects []oss.ObjectProperties
commons []string commons []oss.CommonPrefix
) )
if !recursive { if !recursive {
delimiter = "/" delimiter = "/"
} }
for { p := handler.client.NewListObjectsPaginator(&oss.ListObjectsRequest{
subRes, err := handler.bucket.ListObjects(oss.Marker(marker), oss.Prefix(base), Bucket: &handler.policy.BucketName,
oss.MaxKeys(1000), oss.Delimiter(delimiter)) Prefix: &base,
MaxKeys: 1000,
Delimiter: &delimiter,
})
for p.HasNext() {
page, err := p.NextPage(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
objects = append(objects, subRes.Objects...)
commons = append(commons, subRes.CommonPrefixes...) objects = append(objects, page.Contents...)
marker = subRes.NextMarker commons = append(commons, page.CommonPrefixes...)
if marker == "" {
break
}
} }
// 处理列取结果 // 处理列取结果
res := make([]fs.PhysicalObject, 0, len(objects)+len(commons)) res := make([]fs.PhysicalObject, 0, len(objects)+len(commons))
// 处理目录 // 处理目录
for _, object := range commons { for _, object := range commons {
rel, err := filepath.Rel(base, object) rel, err := filepath.Rel(base, *object.Prefix)
if err != nil { if err != nil {
continue continue
} }
res = append(res, fs.PhysicalObject{ res = append(res, fs.PhysicalObject{
Name: path.Base(object), Name: path.Base(*object.Prefix),
RelativePath: filepath.ToSlash(rel), RelativePath: filepath.ToSlash(rel),
Size: 0, Size: 0,
IsDir: true, IsDir: true,
@@ -208,17 +209,17 @@ func (handler *Driver) List(ctx context.Context, base string, onProgress driver.
// 处理文件 // 处理文件
for _, object := range objects { for _, object := range objects {
rel, err := filepath.Rel(base, object.Key) rel, err := filepath.Rel(base, *object.Key)
if err != nil { if err != nil {
continue continue
} }
res = append(res, fs.PhysicalObject{ res = append(res, fs.PhysicalObject{
Name: path.Base(object.Key), Name: path.Base(*object.Key),
Source: object.Key, Source: *object.Key,
RelativePath: filepath.ToSlash(rel), RelativePath: filepath.ToSlash(rel),
Size: object.Size, Size: object.Size,
IsDir: false, IsDir: false,
LastModify: object.LastModified, LastModify: *object.LastModified,
}) })
} }
onProgress(len(res)) onProgress(len(res))
@@ -245,25 +246,34 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
// 是否允许覆盖 // 是否允许覆盖
overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite overwrite := file.Mode&fs.ModeOverwrite == fs.ModeOverwrite
options := []oss.Option{ forbidOverwrite := oss.Ptr(strconv.FormatBool(!overwrite))
oss.WithContext(ctx), exipires := oss.Ptr(time.Now().Add(credentialTTL * time.Second).Format(time.RFC3339))
oss.Expires(time.Now().Add(credentialTTL * time.Second)),
oss.ForbidOverWrite(!overwrite),
oss.ContentType(mimeType),
}
// 小文件直接上传 // 小文件直接上传
if file.Props.Size < MultiPartUploadThreshold { if file.Props.Size < MultiPartUploadThreshold {
return handler.bucket.PutObject(file.Props.SavePath, file, options...) _, err := handler.client.PutObject(ctx, &oss.PutObjectRequest{
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
Body: file,
ForbidOverwrite: forbidOverwrite,
ContentType: oss.Ptr(mimeType),
})
return err
} }
// 超过阈值时使用分片上传 // 超过阈值时使用分片上传
imur, err := handler.bucket.InitiateMultipartUpload(file.Props.SavePath, options...) imur, err := handler.client.InitiateMultipartUpload(ctx, &oss.InitiateMultipartUploadRequest{
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
ContentType: oss.Ptr(mimeType),
ForbidOverwrite: forbidOverwrite,
Expires: exipires,
})
if err != nil { if err != nil {
return fmt.Errorf("failed to initiate multipart upload: %w", err) return fmt.Errorf("failed to initiate multipart upload: %w", err)
} }
parts := make([]oss.UploadPart, 0) parts := make([]*oss.UploadPartResult, 0)
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{ chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{
Max: handler.settings.ChunkRetryLimit(ctx), Max: handler.settings.ChunkRetryLimit(ctx),
@@ -271,7 +281,13 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
}, handler.settings.UseChunkBuffer(ctx), handler.l, handler.settings.TempPath(ctx)) }, handler.settings.UseChunkBuffer(ctx), handler.l, handler.settings.TempPath(ctx))
uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error { uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error {
part, err := handler.bucket.UploadPart(imur, content, current.Length(), current.Index()+1, oss.WithContext(ctx)) part, err := handler.client.UploadPart(ctx, &oss.UploadPartRequest{
Bucket: &handler.policy.BucketName,
Key: &file.Props.SavePath,
UploadId: imur.UploadId,
PartNumber: int32(current.Index() + 1),
Body: content,
})
if err == nil { if err == nil {
parts = append(parts, part) parts = append(parts, part)
} }
@@ -280,14 +296,27 @@ func (handler *Driver) Put(ctx context.Context, file *fs.UploadRequest) error {
for chunks.Next() { for chunks.Next() {
if err := chunks.Process(uploadFunc); err != nil { if err := chunks.Process(uploadFunc); err != nil {
handler.cancelUpload(imur) handler.cancelUpload(*imur)
return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err) return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err)
} }
} }
_, err = handler.bucket.CompleteMultipartUpload(imur, parts, oss.ForbidOverWrite(!overwrite), oss.WithContext(ctx)) _, err = handler.client.CompleteMultipartUpload(ctx, &oss.CompleteMultipartUploadRequest{
Bucket: &handler.policy.BucketName,
Key: imur.Key,
UploadId: imur.UploadId,
CompleteMultipartUpload: &oss.CompleteMultipartUpload{
Parts: lo.Map(parts, func(part *oss.UploadPartResult, i int) oss.UploadPart {
return oss.UploadPart{
PartNumber: int32(i + 1),
ETag: part.ETag,
}
}),
},
ForbidOverwrite: oss.Ptr(strconv.FormatBool(!overwrite)),
})
if err != nil { if err != nil {
handler.cancelUpload(imur) handler.cancelUpload(*imur)
} }
return err return err
@@ -302,7 +331,12 @@ func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, e
for index, group := range groups { for index, group := range groups {
handler.l.Debug("Process delete group #%d: %v", index, group) handler.l.Debug("Process delete group #%d: %v", index, group)
// 删除文件 // 删除文件
delRes, err := handler.bucket.DeleteObjects(group) delRes, err := handler.client.DeleteMultipleObjects(ctx, &oss.DeleteMultipleObjectsRequest{
Bucket: &handler.policy.BucketName,
Objects: lo.Map(group, func(v string, i int) oss.DeleteObject {
return oss.DeleteObject{Key: &v}
}),
})
if err != nil { if err != nil {
failed = append(failed, group...) failed = append(failed, group...)
lastError = err lastError = err
@@ -310,7 +344,14 @@ func (handler *Driver) Delete(ctx context.Context, files ...string) ([]string, e
} }
// 统计未删除的文件 // 统计未删除的文件
failed = append(failed, util.SliceDifference(files, delRes.DeletedObjects)...) failed = append(
failed,
util.SliceDifference(files,
lo.Map(delRes.DeletedObjects, func(v oss.DeletedInfo, i int) string {
return *v.Key
}),
)...,
)
} }
if len(failed) > 0 && lastError == nil { if len(failed) > 0 && lastError == nil {
@@ -343,12 +384,14 @@ func (handler *Driver) Thumb(ctx context.Context, expire *time.Time, ext string,
thumbParam += fmt.Sprintf("/format,%s", enco.Format) thumbParam += fmt.Sprintf("/format,%s", enco.Format)
} }
thumbOption := []oss.Option{oss.Process(thumbParam)} req := &oss.GetObjectRequest{
Process: oss.Ptr(thumbParam),
}
thumbURL, err := handler.signSourceURL( thumbURL, err := handler.signSourceURL(
ctx, ctx,
e.Source(), e.Source(),
expire, expire,
thumbOption, req,
false, false,
) )
if err != nil { if err != nil {
@@ -370,11 +413,11 @@ func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.Get
} }
// 添加各项设置 // 添加各项设置
var signOptions = make([]oss.Option, 0, 2) req := &oss.GetObjectRequest{}
if args.IsDownload { if args.IsDownload {
encodedFilename := url.PathEscape(args.DisplayName) encodedFilename := url.PathEscape(args.DisplayName)
signOptions = append(signOptions, oss.ResponseContentDisposition(fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, req.ResponseContentDisposition = oss.Ptr(fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`,
encodedFilename, encodedFilename))) encodedFilename, encodedFilename))
} }
if args.Speed > 0 { if args.Speed > 0 {
// Byte 转换为 bit // Byte 转换为 bit
@@ -387,25 +430,35 @@ func (handler *Driver) Source(ctx context.Context, e fs.Entity, args *driver.Get
if args.Speed > 838860800 { if args.Speed > 838860800 {
args.Speed = 838860800 args.Speed = 838860800
} }
signOptions = append(signOptions, oss.TrafficLimitParam(args.Speed)) req.Parameters = map[string]string{
trafficLimitHeader: strconv.FormatInt(args.Speed, 10),
}
} }
return handler.signSourceURL(ctx, e.Source(), args.Expire, signOptions, false) return handler.signSourceURL(ctx, e.Source(), args.Expire, req, false)
} }
func (handler *Driver) signSourceURL(ctx context.Context, path string, expire *time.Time, options []oss.Option, forceSign bool) (string, error) { func (handler *Driver) signSourceURL(ctx context.Context, path string, expire *time.Time, req *oss.GetObjectRequest, forceSign bool) (string, error) {
ttl := int64(86400 * 365 * 20) ttl := time.Duration(24) * time.Hour * 365 * 20
if expire != nil { if expire != nil {
ttl = int64(time.Until(*expire).Seconds()) ttl = time.Until(*expire)
} }
signedURL, err := handler.bucket.SignURL(path, oss.HTTPGet, ttl, options...) if req == nil {
req = &oss.GetObjectRequest{}
}
req.Bucket = &handler.policy.BucketName
req.Key = &path
// signedURL, err := handler.client.Presign(path, oss.HTTPGet, ttl, options...)
result, err := handler.client.Presign(ctx, req, oss.PresignExpires(ttl))
if err != nil { if err != nil {
return "", err return "", err
} }
// 将最终生成的签名URL域名换成用户自定义的加速域名如果有 // 将最终生成的签名URL域名换成用户自定义的加速域名如果有
finalURL, err := url.Parse(signedURL) finalURL, err := url.Parse(result.URL)
if err != nil { if err != nil {
return "", err return "", err
} }
@@ -416,7 +469,7 @@ func (handler *Driver) signSourceURL(ctx context.Context, path string, expire *t
query.Del("OSSAccessKeyId") query.Del("OSSAccessKeyId")
query.Del("Signature") query.Del("Signature")
query.Del("response-content-disposition") query.Del("response-content-disposition")
query.Del("x-oss-traffic-limit") query.Del(trafficLimitHeader)
finalURL.RawQuery = query.Encode() finalURL.RawQuery = query.Encode()
} }
return finalURL.String(), nil return finalURL.String(), nil
@@ -454,34 +507,41 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
} }
// 初始化分片上传 // 初始化分片上传
options := []oss.Option{ imur, err := handler.client.InitiateMultipartUpload(ctx, &oss.InitiateMultipartUploadRequest{
oss.WithContext(ctx), Bucket: &handler.policy.BucketName,
oss.Expires(uploadSession.Props.ExpireAt), Key: &file.Props.SavePath,
oss.ForbidOverWrite(true), ContentType: oss.Ptr(mimeType),
oss.ContentType(mimeType), ForbidOverwrite: oss.Ptr(strconv.FormatBool(true)),
} Expires: oss.Ptr(uploadSession.Props.ExpireAt.Format(time.RFC3339)),
imur, err := handler.bucket.InitiateMultipartUpload(file.Props.SavePath, options...) })
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to initialize multipart upload: %w", err) return nil, fmt.Errorf("failed to initialize multipart upload: %w", err)
} }
uploadSession.UploadID = imur.UploadID uploadSession.UploadID = *imur.UploadId
// 为每个分片签名上传 URL // 为每个分片签名上传 URL
chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{}, false, handler.l, "") chunks := chunk.NewChunkGroup(file, handler.chunkSize, &backoff.ConstantBackoff{}, false, handler.l, "")
urls := make([]string, chunks.Num()) urls := make([]string, chunks.Num())
ttl := int64(time.Until(uploadSession.Props.ExpireAt).Seconds()) ttl := time.Until(uploadSession.Props.ExpireAt)
for chunks.Next() { for chunks.Next() {
err := chunks.Process(func(c *chunk.ChunkGroup, chunk io.Reader) error { err := chunks.Process(func(c *chunk.ChunkGroup, chunk io.Reader) error {
signedURL, err := handler.bucket.SignURL(file.Props.SavePath, oss.HTTPPut, signedURL, err := handler.client.Presign(ctx, &oss.UploadPartRequest{
ttl, Bucket: &handler.policy.BucketName,
oss.AddParam(partNumberParam, strconv.Itoa(c.Index()+1)), Key: &file.Props.SavePath,
oss.AddParam(uploadIdParam, imur.UploadID), UploadId: imur.UploadId,
oss.ContentType("application/octet-stream")) PartNumber: int32(c.Index() + 1),
Body: chunk,
RequestCommon: oss.RequestCommon{
Headers: map[string]string{
"Content-Type": "application/octet-stream",
},
},
}, oss.PresignExpires(ttl))
if err != nil { if err != nil {
return err return err
} }
urls[c.Index()] = signedURL urls[c.Index()] = signedURL.URL
return nil return nil
}) })
if err != nil { if err != nil {
@@ -490,29 +550,43 @@ func (handler *Driver) Token(ctx context.Context, uploadSession *fs.UploadSessio
} }
// 签名完成分片上传的URL // 签名完成分片上传的URL
completeURL, err := handler.bucket.SignURL(file.Props.SavePath, oss.HTTPPost, ttl, completeURL, err := handler.client.Presign(ctx, &oss.CompleteMultipartUploadRequest{
oss.ContentType("application/octet-stream"), Bucket: &handler.policy.BucketName,
oss.AddParam(uploadIdParam, imur.UploadID), Key: &file.Props.SavePath,
oss.Expires(time.Now().Add(time.Duration(ttl)*time.Second)), UploadId: imur.UploadId,
oss.SetHeader(completeAllHeader, "yes"), RequestCommon: oss.RequestCommon{
oss.ForbidOverWrite(true), Parameters: map[string]string{
oss.AddParam(callbackParam, callbackPolicyEncoded)) "callback": callbackPolicyEncoded,
},
Headers: map[string]string{
"Content-Type": "application/octet-stream",
completeAllHeader: "yes",
forbidOverwriteHeader: "true",
},
},
}, oss.PresignExpires(ttl))
if err != nil { if err != nil {
return nil, err return nil, err
} }
return &fs.UploadCredential{ return &fs.UploadCredential{
UploadID: imur.UploadID, UploadID: *imur.UploadId,
UploadURLs: urls, UploadURLs: urls,
CompleteURL: completeURL, CompleteURL: completeURL.URL,
SessionID: uploadSession.Props.UploadSessionID, SessionID: uploadSession.Props.UploadSessionID,
ChunkSize: handler.chunkSize, ChunkSize: handler.chunkSize,
Callback: callbackPolicyEncoded,
}, nil }, nil
} }
// 取消上传凭证 // 取消上传凭证
func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error { func (handler *Driver) CancelToken(ctx context.Context, uploadSession *fs.UploadSession) error {
return handler.bucket.AbortMultipartUpload(oss.InitiateMultipartUploadResult{UploadID: uploadSession.UploadID, Key: uploadSession.Props.SavePath}, oss.WithContext(ctx)) _, err := handler.client.AbortMultipartUpload(ctx, &oss.AbortMultipartUploadRequest{
Bucket: &handler.policy.BucketName,
Key: &uploadSession.Props.SavePath,
UploadId: &uploadSession.UploadID,
})
return err
} }
func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error { func (handler *Driver) CompleteUpload(ctx context.Context, session *fs.UploadSession) error {
@@ -556,7 +630,11 @@ func (handler *Driver) LocalPath(ctx context.Context, path string) string {
} }
func (handler *Driver) cancelUpload(imur oss.InitiateMultipartUploadResult) { func (handler *Driver) cancelUpload(imur oss.InitiateMultipartUploadResult) {
if err := handler.bucket.AbortMultipartUpload(imur); err != nil { if _, err := handler.client.AbortMultipartUpload(context.Background(), &oss.AbortMultipartUploadRequest{
Bucket: &handler.policy.BucketName,
Key: imur.Key,
UploadId: imur.UploadId,
}); err != nil {
handler.l.Warning("failed to abort multipart upload: %s", err) handler.l.Warning("failed to abort multipart upload: %s", err)
} }
} }

View File

@@ -244,8 +244,8 @@ type (
UploadURLs []string `json:"upload_urls,omitempty"` UploadURLs []string `json:"upload_urls,omitempty"`
Credential string `json:"credential,omitempty"` Credential string `json:"credential,omitempty"`
UploadID string `json:"uploadID,omitempty"` UploadID string `json:"uploadID,omitempty"`
Callback string `json:"callback,omitempty"` // 回调地址 Callback string `json:"callback,omitempty"`
Uri string `json:"uri,omitempty"` // 存储路径 Uri string `json:"uri,omitempty"` // 存储路径
AccessKey string `json:"ak,omitempty"` AccessKey string `json:"ak,omitempty"`
KeyTime string `json:"keyTime,omitempty"` // COS用有效期 KeyTime string `json:"keyTime,omitempty"` // COS用有效期
CompleteURL string `json:"completeURL,omitempty"` CompleteURL string `json:"completeURL,omitempty"`

View File

@@ -212,7 +212,13 @@ func handleMkcol(c *gin.Context, user *ent.User, fm manager.FileManager) (status
_, err = fm.Create(ctx, uri, types.FileTypeFolder, dbfs.WithNoChainedCreation(), dbfs.WithErrorOnConflict()) _, err = fm.Create(ctx, uri, types.FileTypeFolder, dbfs.WithNoChainedCreation(), dbfs.WithErrorOnConflict())
if err != nil { if err != nil {
return purposeStatusCodeFromError(err), err code := purposeStatusCodeFromError(err)
if code == http.StatusNotFound {
// When the MKCOL operation creates a new collection resource, all ancestors MUST already exist,
// or the method MUST fail with a 409 (Conflict) status code.
return http.StatusConflict, err
}
return code, err
} }
return http.StatusCreated, nil return http.StatusCreated, nil

View File

@@ -2,6 +2,7 @@ package share
import ( import (
"context" "context"
"strings"
"github.com/cloudreve/Cloudreve/v4/application/dependency" "github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/ent" "github.com/cloudreve/Cloudreve/v4/ent"
@@ -25,7 +26,26 @@ type (
) )
func (s *ShortLinkRedirectService) RedirectTo(c *gin.Context) string { func (s *ShortLinkRedirectService) RedirectTo(c *gin.Context) string {
return routes.MasterShareLongUrl(s.ID, s.Password).String() shareLongUrl := routes.MasterShareLongUrl(s.ID, s.Password)
shortLinkQuery := c.Request.URL.Query() // Query in ShortLink, adapt to Cloudreve V3
shareLongUrlQuery := shareLongUrl.Query()
userSpecifiedPath := shortLinkQuery.Get("path")
if userSpecifiedPath != "" {
masterPath := shareLongUrlQuery.Get("path")
masterPath += "/" + strings.TrimPrefix(userSpecifiedPath, "/")
shareLongUrlQuery.Set("path", masterPath)
}
shortLinkQuery.Del("path") // 防止用户指定的 Path 就是空字符串
for k, vals := range shortLinkQuery {
shareLongUrlQuery[k] = append(shareLongUrlQuery[k], vals...)
}
shareLongUrl.RawQuery = shareLongUrlQuery.Encode()
return shareLongUrl.String()
} }
type ( type (

4
yarn.lock Normal file
View File

@@ -0,0 +1,4 @@
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1