Init V4 community edition (#2265)

* Init V4 community edition

* Init V4 community edition
This commit is contained in:
AaronLiu
2025-04-20 17:31:25 +08:00
committed by GitHub
parent da4e44b77a
commit 21d158db07
597 changed files with 119415 additions and 41692 deletions

View File

@@ -1,68 +0,0 @@
package explorer
import (
"context"
"github.com/cloudreve/Cloudreve/v3/pkg/filesystem"
"github.com/cloudreve/Cloudreve/v3/pkg/serializer"
"github.com/gin-gonic/gin"
)
// DirectoryService 创建新目录服务
type DirectoryService struct {
Path string `uri:"path" json:"path" binding:"required,min=1,max=65535"`
}
// ListDirectory 列出目录内容
func (service *DirectoryService) ListDirectory(c *gin.Context) serializer.Response {
// 创建文件系统
fs, err := filesystem.NewFileSystemFromContext(c)
if err != nil {
return serializer.Err(serializer.CodeCreateFSError, "", err)
}
defer fs.Recycle()
// 上下文
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// 获取子项目
objects, err := fs.List(ctx, service.Path, nil)
if err != nil {
return serializer.Err(serializer.CodeNotSet, err.Error(), err)
}
var parentID uint
if len(fs.DirTarget) > 0 {
parentID = fs.DirTarget[0].ID
}
return serializer.Response{
Code: 0,
Data: serializer.BuildObjectList(parentID, objects, fs.Policy),
}
}
// CreateDirectory 创建目录
func (service *DirectoryService) CreateDirectory(c *gin.Context) serializer.Response {
// 创建文件系统
fs, err := filesystem.NewFileSystemFromContext(c)
if err != nil {
return serializer.Err(serializer.CodeCreateFSError, "", err)
}
defer fs.Recycle()
// 上下文
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// 创建目录
_, err = fs.CreateDirectory(ctx, service.Path)
if err != nil {
return serializer.Err(serializer.CodeCreateFolderFailed, err.Error(), err)
}
return serializer.Response{
Code: 0,
}
}

118
service/explorer/entity.go Normal file
View File

@@ -0,0 +1,118 @@
package explorer
import (
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/gin-gonic/gin"
)
type (
EntityDownloadParameterCtx struct{}
EntityDownloadService struct {
Name string `uri:"name" binding:"required"`
SpeedLimit int64 `uri:"speed"`
Src string `uri:"src"`
}
)
// Serve serves file content
func (s *EntityDownloadService) Serve(c *gin.Context) error {
dep := dependency.FromContext(c)
user := inventory.UserFromContext(c)
m := manager.NewFileManager(dep, user)
defer m.Recycle()
entitySource, err := m.GetEntitySource(c, hashid.FromContext(c))
if err != nil {
return fmt.Errorf("failed to get entity source: %w", err)
}
defer entitySource.Close()
// Set cache header for public resource
settings := dep.SettingProvider()
maxAge := settings.PublicResourceMaxAge(c)
c.Header("Cache-Control", fmt.Sprintf("public, max-age=%d", maxAge))
isDownload := c.Query(routes.IsDownloadQuery) != ""
isThumb := c.Query(routes.IsThumbQuery) != ""
entitySource.Serve(c.Writer, c.Request,
entitysource.WithSpeedLimit(s.SpeedLimit),
entitysource.WithDownload(isDownload),
entitysource.WithDisplayName(s.Name),
entitysource.WithContext(c),
entitysource.WithThumb(isThumb),
)
return nil
}
type (
SetCurrentVersionParamCtx struct{}
SetCurrentVersionService struct {
Uri string `uri:"uri" binding:"required"`
Version string `uri:"version" binding:"required"`
}
)
// Set sets the current version of the file
func (s *SetCurrentVersionService) Set(c *gin.Context) error {
dep := dependency.FromContext(c)
user := inventory.UserFromContext(c)
m := manager.NewFileManager(dep, user)
defer m.Recycle()
uri, err := fs.NewUriFromString(s.Uri)
if err != nil {
return serializer.NewError(serializer.CodeParamErr, "unknown uri", err)
}
versionId, err := dep.HashIDEncoder().Decode(s.Version, hashid.EntityID)
if err != nil {
return serializer.NewError(serializer.CodeParamErr, "unknown version id", err)
}
if err := m.SetCurrentVersion(c, uri, versionId); err != nil {
return fmt.Errorf("failed to set current version: %w", err)
}
return nil
}
type (
DeleteVersionParamCtx struct{}
DeleteVersionService struct {
Uri string `uri:"uri" binding:"required"`
Version string `uri:"version" binding:"required"`
}
)
// Delete deletes the version of the file
func (s *DeleteVersionService) Delete(c *gin.Context) error {
dep := dependency.FromContext(c)
user := inventory.UserFromContext(c)
m := manager.NewFileManager(dep, user)
defer m.Recycle()
uri, err := fs.NewUriFromString(s.Uri)
if err != nil {
return serializer.NewError(serializer.CodeParamErr, "unknown uri", err)
}
versionId, err := dep.HashIDEncoder().Decode(s.Version, hashid.EntityID)
if err != nil {
return serializer.NewError(serializer.CodeParamErr, "unknown version id", err)
}
if err := m.DeleteVersion(c, uri, versionId); err != nil {
return fmt.Errorf("failed to delete version: %w", err)
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,37 @@
package explorer
import (
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/gin-gonic/gin"
)
type (
PatchMetadataService struct {
Uris []string `json:"uris" binding:"required"`
Patches []fs.MetadataPatch `json:"patches" binding:"required,dive"`
}
PatchMetadataParameterCtx struct{}
)
func (s *PatchMetadataService) GetUris() []string {
return s.Uris
}
func (s *PatchMetadataService) Patch(c *gin.Context) error {
dep := dependency.FromContext(c)
user := inventory.UserFromContext(c)
m := manager.NewFileManager(dep, user)
defer m.Recycle()
uris, err := fs.NewUriFromStrings(s.Uris...)
if err != nil {
return serializer.NewError(serializer.CodeParamErr, "unknown uri", err)
}
return m.PatchMedata(c, uris, s.Patches...)
}

View File

@@ -1,467 +0,0 @@
package explorer
import (
"context"
"encoding/gob"
"fmt"
"math"
"path"
"strings"
"time"
model "github.com/cloudreve/Cloudreve/v3/models"
"github.com/cloudreve/Cloudreve/v3/pkg/auth"
"github.com/cloudreve/Cloudreve/v3/pkg/cache"
"github.com/cloudreve/Cloudreve/v3/pkg/filesystem"
"github.com/cloudreve/Cloudreve/v3/pkg/hashid"
"github.com/cloudreve/Cloudreve/v3/pkg/serializer"
"github.com/cloudreve/Cloudreve/v3/pkg/task"
"github.com/cloudreve/Cloudreve/v3/pkg/util"
"github.com/gin-gonic/gin"
)
// ItemMoveService 处理多文件/目录移动
type ItemMoveService struct {
SrcDir string `json:"src_dir" binding:"required,min=1,max=65535"`
Src ItemIDService `json:"src"`
Dst string `json:"dst" binding:"required,min=1,max=65535"`
}
// ItemRenameService 处理多文件/目录重命名
type ItemRenameService struct {
Src ItemIDService `json:"src"`
NewName string `json:"new_name" binding:"required,min=1,max=255"`
}
// ItemService 处理多文件/目录相关服务
type ItemService struct {
Items []uint `json:"items"`
Dirs []uint `json:"dirs"`
}
// ItemIDService 处理多文件/目录相关服务字段值为HashID可通过Raw()方法获取原始ID
type ItemIDService struct {
Items []string `json:"items"`
Dirs []string `json:"dirs"`
Source *ItemService
Force bool `json:"force"`
UnlinkOnly bool `json:"unlink"`
}
// ItemCompressService 文件压缩任务服务
type ItemCompressService struct {
Src ItemIDService `json:"src"`
Dst string `json:"dst" binding:"required,min=1,max=65535"`
Name string `json:"name" binding:"required,min=1,max=255"`
}
// ItemDecompressService 文件解压缩任务服务
type ItemDecompressService struct {
Src string `json:"src"`
Dst string `json:"dst" binding:"required,min=1,max=65535"`
Encoding string `json:"encoding"`
}
// ItemPropertyService 获取对象属性服务
type ItemPropertyService struct {
ID string `binding:"required"`
TraceRoot bool `form:"trace_root"`
IsFolder bool `form:"is_folder"`
}
func init() {
gob.Register(ItemIDService{})
}
// Raw 批量解码HashID获取原始ID
func (service *ItemIDService) Raw() *ItemService {
if service.Source != nil {
return service.Source
}
service.Source = &ItemService{
Dirs: make([]uint, 0, len(service.Dirs)),
Items: make([]uint, 0, len(service.Items)),
}
for _, folder := range service.Dirs {
id, err := hashid.DecodeHashID(folder, hashid.FolderID)
if err == nil {
service.Source.Dirs = append(service.Source.Dirs, id)
}
}
for _, file := range service.Items {
id, err := hashid.DecodeHashID(file, hashid.FileID)
if err == nil {
service.Source.Items = append(service.Source.Items, id)
}
}
return service.Source
}
// CreateDecompressTask 创建文件解压缩任务
func (service *ItemDecompressService) CreateDecompressTask(c *gin.Context) serializer.Response {
// 创建文件系统
fs, err := filesystem.NewFileSystemFromContext(c)
if err != nil {
return serializer.Err(serializer.CodeCreateFSError, "", err)
}
defer fs.Recycle()
// 检查用户组权限
if !fs.User.Group.OptionsSerialized.ArchiveTask {
return serializer.Err(serializer.CodeGroupNotAllowed, "", nil)
}
// 存放目录是否存在
if exist, _ := fs.IsPathExist(service.Dst); !exist {
return serializer.Err(serializer.CodeParentNotExist, "", nil)
}
// 压缩包是否存在
exist, file := fs.IsFileExist(service.Src)
if !exist {
return serializer.Err(serializer.CodeFileNotFound, "", nil)
}
// 文件尺寸限制
if fs.User.Group.OptionsSerialized.DecompressSize != 0 && file.Size > fs.User.Group.
OptionsSerialized.DecompressSize {
return serializer.Err(serializer.CodeFileTooLarge, "", nil)
}
// 支持的压缩格式后缀
var (
suffixes = []string{".zip", ".gz", ".xz", ".tar", ".rar"}
matched bool
)
for _, suffix := range suffixes {
if strings.HasSuffix(file.Name, suffix) {
matched = true
break
}
}
if !matched {
return serializer.Err(serializer.CodeUnsupportedArchiveType, "", nil)
}
// 创建任务
job, err := task.NewDecompressTask(fs.User, service.Src, service.Dst, service.Encoding)
if err != nil {
return serializer.Err(serializer.CodeCreateTaskError, "", err)
}
task.TaskPoll.Submit(job)
return serializer.Response{}
}
// CreateCompressTask 创建文件压缩任务
func (service *ItemCompressService) CreateCompressTask(c *gin.Context) serializer.Response {
// 创建文件系统
fs, err := filesystem.NewFileSystemFromContext(c)
if err != nil {
return serializer.Err(serializer.CodeCreateFSError, "", err)
}
defer fs.Recycle()
// 检查用户组权限
if !fs.User.Group.OptionsSerialized.ArchiveTask {
return serializer.Err(serializer.CodeGroupNotAllowed, "", nil)
}
// 补齐压缩文件扩展名(如果没有)
if !strings.HasSuffix(service.Name, ".zip") {
service.Name += ".zip"
}
// 存放目录是否存在,是否重名
if exist, _ := fs.IsPathExist(service.Dst); !exist {
return serializer.Err(serializer.CodeParentNotExist, "", nil)
}
if exist, _ := fs.IsFileExist(path.Join(service.Dst, service.Name)); exist {
return serializer.ParamErr("File "+service.Name+" already exist", nil)
}
// 检查文件名合法性
if !fs.ValidateLegalName(context.Background(), service.Name) {
return serializer.Err(serializer.CodeIllegalObjectName, "", nil)
}
if !fs.ValidateExtension(context.Background(), service.Name) {
return serializer.Err(serializer.CodeFileTypeNotAllowed, "", nil)
}
// 递归列出待压缩子目录
folders, err := model.GetRecursiveChildFolder(service.Src.Raw().Dirs, fs.User.ID, true)
if err != nil {
return serializer.DBErr("Failed to list folders", err)
}
// 列出所有待压缩文件
files, err := model.GetChildFilesOfFolders(&folders)
if err != nil {
return serializer.DBErr("Failed to list files", err)
}
// 计算待压缩文件大小
var totalSize uint64
for i := 0; i < len(files); i++ {
totalSize += files[i].Size
}
// 文件尺寸限制
if fs.User.Group.OptionsSerialized.CompressSize != 0 && totalSize > fs.User.Group.
OptionsSerialized.CompressSize {
return serializer.Err(serializer.CodeFileTooLarge, "", nil)
}
// 按照平均压缩率计算用户空间是否足够
compressRatio := 0.4
spaceNeeded := uint64(math.Round(float64(totalSize) * compressRatio))
if fs.User.GetRemainingCapacity() < spaceNeeded {
return serializer.Err(serializer.CodeInsufficientCapacity, "", err)
}
// 创建任务
job, err := task.NewCompressTask(fs.User, path.Join(service.Dst, service.Name), service.Src.Raw().Dirs,
service.Src.Raw().Items)
if err != nil {
return serializer.Err(serializer.CodeCreateTaskError, "", err)
}
task.TaskPoll.Submit(job)
return serializer.Response{}
}
// Archive 创建归档
func (service *ItemIDService) Archive(ctx context.Context, c *gin.Context) serializer.Response {
// 创建文件系统
fs, err := filesystem.NewFileSystemFromContext(c)
if err != nil {
return serializer.Err(serializer.CodeCreateFSError, "", err)
}
defer fs.Recycle()
// 检查用户组权限
if !fs.User.Group.OptionsSerialized.ArchiveDownload {
return serializer.Err(serializer.CodeGroupNotAllowed, "", nil)
}
// 创建打包下载会话
ttl := model.GetIntSetting("archive_timeout", 30)
downloadSessionID := util.RandStringRunes(16)
cache.Set("archive_"+downloadSessionID, *service, ttl)
cache.Set("archive_user_"+downloadSessionID, *fs.User, ttl)
signURL, err := auth.SignURI(
auth.General,
fmt.Sprintf("/api/v3/file/archive/%s/archive.zip", downloadSessionID),
int64(ttl),
)
return serializer.Response{
Code: 0,
Data: signURL.String(),
}
}
// Delete 删除对象
func (service *ItemIDService) Delete(ctx context.Context, c *gin.Context) serializer.Response {
// 创建文件系统
fs, err := filesystem.NewFileSystemFromContext(c)
if err != nil {
return serializer.Err(serializer.CodePolicyNotAllowed, err.Error(), err)
}
defer fs.Recycle()
force, unlink := false, false
if fs.User.Group.OptionsSerialized.AdvanceDelete {
force = service.Force
unlink = service.UnlinkOnly
}
// 删除对象
items := service.Raw()
err = fs.Delete(ctx, items.Dirs, items.Items, force, unlink)
if err != nil {
return serializer.Err(serializer.CodeNotSet, err.Error(), err)
}
return serializer.Response{
Code: 0,
}
}
// Move 移动对象
func (service *ItemMoveService) Move(ctx context.Context, c *gin.Context) serializer.Response {
// 创建文件系统
fs, err := filesystem.NewFileSystemFromContext(c)
if err != nil {
return serializer.Err(serializer.CodeCreateFSError, "", err)
}
defer fs.Recycle()
// 移动对象
items := service.Src.Raw()
err = fs.Move(ctx, items.Dirs, items.Items, service.SrcDir, service.Dst)
if err != nil {
return serializer.Err(serializer.CodeNotSet, err.Error(), err)
}
return serializer.Response{
Code: 0,
}
}
// Copy 复制对象
func (service *ItemMoveService) Copy(ctx context.Context, c *gin.Context) serializer.Response {
// 复制操作只能对一个目录或文件对象进行操作
if len(service.Src.Items)+len(service.Src.Dirs) > 1 {
return filesystem.ErrOneObjectOnly
}
// 创建文件系统
fs, err := filesystem.NewFileSystemFromContext(c)
if err != nil {
return serializer.Err(serializer.CodeCreateFSError, "", err)
}
defer fs.Recycle()
// 复制对象
err = fs.Copy(ctx, service.Src.Raw().Dirs, service.Src.Raw().Items, service.SrcDir, service.Dst)
if err != nil {
return serializer.Err(serializer.CodeNotSet, err.Error(), err)
}
return serializer.Response{
Code: 0,
}
}
// Rename 重命名对象
func (service *ItemRenameService) Rename(ctx context.Context, c *gin.Context) serializer.Response {
// 重命名作只能对一个目录或文件对象进行操作
if len(service.Src.Items)+len(service.Src.Dirs) > 1 {
return filesystem.ErrOneObjectOnly
}
// 创建文件系统
fs, err := filesystem.NewFileSystemFromContext(c)
if err != nil {
return serializer.Err(serializer.CodeCreateFSError, "", err)
}
defer fs.Recycle()
// 重命名对象
err = fs.Rename(ctx, service.Src.Raw().Dirs, service.Src.Raw().Items, service.NewName)
if err != nil {
return serializer.Err(serializer.CodeNotSet, err.Error(), err)
}
return serializer.Response{
Code: 0,
}
}
// GetProperty 获取对象的属性
func (service *ItemPropertyService) GetProperty(ctx context.Context, c *gin.Context) serializer.Response {
userCtx, _ := c.Get("user")
user := userCtx.(*model.User)
var props serializer.ObjectProps
props.QueryDate = time.Now()
// 如果是文件对象
if !service.IsFolder {
res, err := hashid.DecodeHashID(service.ID, hashid.FileID)
if err != nil {
return serializer.Err(serializer.CodeNotFound, "", err)
}
file, err := model.GetFilesByIDs([]uint{res}, user.ID)
if err != nil {
return serializer.DBErr("Failed to query file records", err)
}
props.CreatedAt = file[0].CreatedAt
props.UpdatedAt = file[0].UpdatedAt
props.Policy = file[0].GetPolicy().Name
props.Size = file[0].Size
// 查找父目录
if service.TraceRoot {
parent, err := model.GetFoldersByIDs([]uint{file[0].FolderID}, user.ID)
if err != nil {
return serializer.DBErr("Parent folder record not exist", err)
}
if err := parent[0].TraceRoot(); err != nil {
return serializer.DBErr("Failed to trace root folder", err)
}
props.Path = path.Join(parent[0].Position, parent[0].Name)
}
} else {
res, err := hashid.DecodeHashID(service.ID, hashid.FolderID)
if err != nil {
return serializer.Err(serializer.CodeNotFound, "", err)
}
folder, err := model.GetFoldersByIDs([]uint{res}, user.ID)
if err != nil {
return serializer.DBErr("Failed to query folder records", err)
}
props.CreatedAt = folder[0].CreatedAt
props.UpdatedAt = folder[0].UpdatedAt
// 如果对象是目录, 先尝试返回缓存结果
if cacheRes, ok := cache.Get(fmt.Sprintf("folder_props_%d", res)); ok {
res := cacheRes.(serializer.ObjectProps)
res.CreatedAt = props.CreatedAt
res.UpdatedAt = props.UpdatedAt
return serializer.Response{Data: res}
}
// 统计子目录
childFolders, err := model.GetRecursiveChildFolder([]uint{folder[0].ID},
user.ID, true)
if err != nil {
return serializer.DBErr("Failed to list child folders", err)
}
props.ChildFolderNum = len(childFolders) - 1
// 统计子文件
files, err := model.GetChildFilesOfFolders(&childFolders)
if err != nil {
return serializer.DBErr("Failed to list child files", err)
}
// 统计子文件个数和大小
props.ChildFileNum = len(files)
for i := 0; i < len(files); i++ {
props.Size += files[i].Size
}
// 查找父目录
if service.TraceRoot {
if err := folder[0].TraceRoot(); err != nil {
return serializer.DBErr("Failed to list child folders", err)
}
props.Path = folder[0].Position
}
// 如果列取对象是目录,则缓存结果
cache.Set(fmt.Sprintf("folder_props_%d", res), props,
model.GetIntSetting("folder_props_timeout", 300))
}
return serializer.Response{
Code: 0,
Data: props,
}
}

75
service/explorer/pin.go Normal file
View File

@@ -0,0 +1,75 @@
package explorer
import (
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/gin-gonic/gin"
"github.com/samber/lo"
)
type (
PinFileService struct {
Uri string `json:"uri" binding:"required"`
Name string `json:"name"`
}
PinFileParameterCtx struct{}
)
// PinFileService pins new uri to sidebar
func (service *PinFileService) PinFile(c *gin.Context) error {
dep := dependency.FromContext(c)
user := inventory.UserFromContext(c)
userClient := dep.UserClient()
uri, err := fs.NewUriFromString(service.Uri)
if err != nil {
return serializer.NewError(serializer.CodeParamErr, "unknown uri", err)
}
uriStr := uri.String()
for _, pin := range user.Settings.Pined {
if pin.Uri == uriStr {
if pin.Name != service.Name {
return serializer.NewError(serializer.CodeObjectExist, "uri already pinned with different name", nil)
}
return nil
}
}
user.Settings.Pined = append(user.Settings.Pined, types.PinedFile{
Uri: uriStr,
Name: service.Name,
})
if err := userClient.SaveSettings(c, user); err != nil {
return serializer.NewError(serializer.CodeDBError, "failed to save settings", err)
}
return nil
}
// UnpinFile removes uri from sidebar
func (service *PinFileService) UnpinFile(c *gin.Context) error {
dep := dependency.FromContext(c)
user := inventory.UserFromContext(c)
userClient := dep.UserClient()
uri, err := fs.NewUriFromString(service.Uri)
if err != nil {
return serializer.NewError(serializer.CodeParamErr, "unknown uri", err)
}
uriStr := uri.String()
user.Settings.Pined = lo.Filter(user.Settings.Pined, func(pin types.PinedFile, index int) bool {
return pin.Uri != uriStr
})
if err := userClient.SaveSettings(c, user); err != nil {
return serializer.NewError(serializer.CodeDBError, "failed to save settings", err)
}
return nil
}

View File

@@ -0,0 +1,436 @@
package explorer
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/url"
"time"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/auth"
"github.com/cloudreve/Cloudreve/v4/pkg/boolset"
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
"github.com/cloudreve/Cloudreve/v4/pkg/util"
"github.com/cloudreve/Cloudreve/v4/service/user"
"github.com/gin-gonic/gin"
"github.com/gofrs/uuid"
"github.com/samber/lo"
)
type DirectLinkResponse struct {
Link string `json:"link"`
FileUrl string `json:"file_url"`
}
func BuildDirectLinkResponse(links []manager.DirectLink) []DirectLinkResponse {
if len(links) == 0 {
return nil
}
var res []DirectLinkResponse
for _, link := range links {
res = append(res, DirectLinkResponse{
Link: link.Url,
FileUrl: link.File.Uri(false).String(),
})
}
return res
}
const PathMyRedacted = "redacted"
type TaskResponse struct {
CreatedAt time.Time `json:"created_at,"`
UpdatedAt time.Time `json:"updated_at"`
ID string `json:"id"`
Status string `json:"status"`
Type string `json:"type"`
Node *user.Node `json:"node,omitempty"`
Summary *queue.Summary `json:"summary,omitempty"`
Error string `json:"error,omitempty"`
ErrorHistory []string `json:"error_history,omitempty"`
Duration int64 `json:"duration,omitempty"`
ResumeTime int64 `json:"resume_time,omitempty"`
RetryCount int `json:"retry_count,omitempty"`
}
type TaskListResponse struct {
Tasks []TaskResponse `json:"tasks"`
Pagination *inventory.PaginationResults `json:"pagination"`
}
func BuildTaskListResponse(tasks []queue.Task, res *inventory.ListTaskResult, nodeMap map[int]*ent.Node, hasher hashid.Encoder) *TaskListResponse {
return &TaskListResponse{
Pagination: res.PaginationResults,
Tasks: lo.Map(tasks, func(t queue.Task, index int) TaskResponse {
var (
node *ent.Node
s = t.Summarize(hasher)
)
if s.NodeID > 0 {
node = nodeMap[s.NodeID]
}
return *BuildTaskResponse(t, node, hasher)
}),
}
}
func BuildTaskResponse(task queue.Task, node *ent.Node, hasher hashid.Encoder) *TaskResponse {
model := task.Model()
t := &TaskResponse{
Status: string(task.Status()),
CreatedAt: model.CreatedAt,
UpdatedAt: model.UpdatedAt,
ID: hashid.EncodeTaskID(hasher, task.ID()),
Type: task.Type(),
Summary: task.Summarize(hasher),
Error: auth.RedactSensitiveValues(model.PublicState.Error),
ErrorHistory: lo.Map(model.PublicState.ErrorHistory, func(s string, index int) string {
return auth.RedactSensitiveValues(s)
}),
Duration: model.PublicState.ExecutedDuration.Milliseconds(),
ResumeTime: model.PublicState.ResumeTime,
RetryCount: model.PublicState.RetryCount,
}
if node != nil {
t.Node = user.BuildNode(node, hasher)
}
return t
}
type UploadSessionResponse struct {
SessionID string `json:"session_id"`
UploadID string `json:"upload_id"`
ChunkSize int64 `json:"chunk_size"` // 分块大小0 为部分快
Expires int64 `json:"expires"` // 上传凭证过期时间, Unix 时间戳
UploadURLs []string `json:"upload_urls,omitempty"`
Credential string `json:"credential,omitempty"`
AccessKey string `json:"ak,omitempty"`
KeyTime string `json:"keyTime,omitempty"` // COS用有效期
CompleteURL string `json:"completeURL,omitempty"`
StoragePolicy *StoragePolicy `json:"storage_policy,omitempty"`
Uri string `json:"uri"`
CallbackSecret string `json:"callback_secret"`
MimeType string `json:"mime_type,omitempty"`
UploadPolicy string `json:"upload_policy,omitempty"`
}
func BuildUploadSessionResponse(session *fs.UploadCredential, hasher hashid.Encoder) *UploadSessionResponse {
return &UploadSessionResponse{
SessionID: session.SessionID,
ChunkSize: session.ChunkSize,
Expires: session.Expires,
UploadURLs: session.UploadURLs,
Credential: session.Credential,
CompleteURL: session.CompleteURL,
Uri: session.Uri,
UploadID: session.UploadID,
StoragePolicy: BuildStoragePolicy(session.StoragePolicy, hasher),
CallbackSecret: session.CallbackSecret,
MimeType: session.MimeType,
UploadPolicy: session.UploadPolicy,
}
}
// WopiFileInfo Response for `CheckFileInfo`
type WopiFileInfo struct {
// Required
BaseFileName string
Version string
Size int64
// Breadcrumb
BreadcrumbBrandName string
BreadcrumbBrandUrl string
BreadcrumbFolderName string
BreadcrumbFolderUrl string
// Post Message
FileSharingPostMessage bool
FileVersionPostMessage bool
ClosePostMessage bool
PostMessageOrigin string
// Other miscellaneous properties
FileNameMaxLength int
LastModifiedTime string
// User metadata
IsAnonymousUser bool
UserFriendlyName string
UserId string
OwnerId string
// Permission
ReadOnly bool
UserCanRename bool
UserCanReview bool
UserCanWrite bool
SupportsRename bool
SupportsReviewing bool
SupportsUpdate bool
SupportsLocks bool
EnableShare bool
}
type ViewerSessionResponse struct {
Session *manager.ViewerSession `json:"session"`
WopiSrc string `json:"wopi_src,omitempty"`
}
type ListResponse struct {
Files []FileResponse `json:"files"`
Parent FileResponse `json:"parent,omitempty"`
Pagination *inventory.PaginationResults `json:"pagination"`
Props *fs.NavigatorProps `json:"props"`
// ContextHint is used to speed up following operations under this listed directory.
// It persists some intermedia state so that the following request don't need to query database again.
// All the operations under this directory that supports context hint should carry this value in header
// as X-Cr-Context-Hint.
ContextHint *uuid.UUID `json:"context_hint"`
RecursionLimitReached bool `json:"recursion_limit_reached,omitempty"`
MixedType bool `json:"mixed_type"`
SingleFileView bool `json:"single_file_view,omitempty"`
StoragePolicy *StoragePolicy `json:"storage_policy,omitempty"`
}
type FileResponse struct {
Type int `json:"type"`
ID string `json:"id"`
Name string `json:"name"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
Size int64 `json:"size"`
Metadata map[string]string `json:"metadata"`
Path string `json:"path,omitempty"`
Shared bool `json:"shared,omitempty"`
Capability *boolset.BooleanSet `json:"capability,omitempty"`
Owned bool `json:"owned,omitempty"`
PrimaryEntity string `json:"primary_entity,omitempty"`
FolderSummary *fs.FolderSummary `json:"folder_summary,omitempty"`
ExtendedInfo *ExtendedInfo `json:"extended_info,omitempty"`
}
type ExtendedInfo struct {
StoragePolicy *StoragePolicy `json:"storage_policy,omitempty"`
StorageUsed int64 `json:"storage_used"`
Shares []Share `json:"shares,omitempty"`
Entities []Entity `json:"entities,omitempty"`
}
type StoragePolicy struct {
ID string `json:"id"`
Name string `json:"name"`
AllowedSuffix []string `json:"allowed_suffix,omitempty"`
Type types.PolicyType `json:"type"`
MaxSize int64 `json:"max_size"`
Relay bool `json:"relay,omitempty"`
}
type Entity struct {
ID string `json:"id"`
Size int64 `json:"size"`
Type types.EntityType `json:"type"`
CreatedAt time.Time `json:"created_at"`
StoragePolicy *StoragePolicy `json:"storage_policy,omitempty"`
CreatedBy *user.User `json:"created_by,omitempty"`
}
type Share struct {
ID string `json:"id"`
Name string `json:"name,omitempty"`
RemainDownloads *int `json:"remain_downloads,omitempty"`
Visited int `json:"visited"`
Downloaded int `json:"downloaded,omitempty"`
Expires *time.Time `json:"expires,omitempty"`
Unlocked bool `json:"unlocked"`
SourceType *types.FileType `json:"source_type,omitempty"`
Owner user.User `json:"owner"`
CreatedAt time.Time `json:"created_at,omitempty"`
Expired bool `json:"expired"`
Url string `json:"url"`
// Only viewable by owner
IsPrivate bool `json:"is_private,omitempty"`
Password string `json:"password,omitempty"`
// Only viewable if explicitly unlocked by owner
SourceUri string `json:"source_uri,omitempty"`
}
func BuildShare(s *ent.Share, base *url.URL, hasher hashid.Encoder, requester *ent.User, owner *ent.User,
name string, t types.FileType, unlocked bool) *Share {
redactLevel := user.RedactLevelAnonymous
if !inventory.IsAnonymousUser(requester) {
redactLevel = user.RedactLevelUser
}
res := Share{
Name: name,
ID: hashid.EncodeShareID(hasher, s.ID),
Unlocked: unlocked,
Owner: user.BuildUserRedacted(owner, redactLevel, hasher),
Expired: inventory.IsShareExpired(s) != nil,
Url: BuildShareLink(s, hasher, base),
CreatedAt: s.CreatedAt,
Visited: s.Views,
SourceType: util.ToPtr(t),
}
if unlocked {
res.RemainDownloads = s.RemainDownloads
res.Downloaded = s.Downloads
res.Expires = s.Expires
res.Password = s.Password
}
if requester.ID == owner.ID {
res.IsPrivate = s.Password != ""
}
return &res
}
func BuildListResponse(ctx context.Context, u *ent.User, parent fs.File, res *fs.ListFileResult, hasher hashid.Encoder) *ListResponse {
r := &ListResponse{
Files: lo.Map(res.Files, func(f fs.File, index int) FileResponse {
return *BuildFileResponse(ctx, u, f, hasher, res.Props.Capability)
}),
Pagination: res.Pagination,
Props: res.Props,
ContextHint: res.ContextHint,
RecursionLimitReached: res.RecursionLimitReached,
MixedType: res.MixedType,
SingleFileView: res.SingleFileView,
StoragePolicy: BuildStoragePolicy(res.StoragePolicy, hasher),
}
if !res.Parent.IsNil() {
r.Parent = *BuildFileResponse(ctx, u, res.Parent, hasher, res.Props.Capability)
}
return r
}
func BuildFileResponse(ctx context.Context, u *ent.User, f fs.File, hasher hashid.Encoder, cap *boolset.BooleanSet) *FileResponse {
var owner *ent.User
if f != nil {
owner = f.Owner()
}
if cap == nil {
cap = f.Capabilities()
}
res := &FileResponse{
Type: int(f.Type()),
ID: hashid.EncodeFileID(hasher, f.ID()),
Name: f.DisplayName(),
CreatedAt: f.CreatedAt(),
UpdatedAt: f.UpdatedAt(),
Size: f.Size(),
Metadata: f.Metadata(),
Path: f.Uri(false).String(),
Shared: f.Shared(),
Capability: cap,
Owned: owner == nil || owner.ID == u.ID,
FolderSummary: f.FolderSummary(),
ExtendedInfo: BuildExtendedInfo(ctx, u, f, hasher),
PrimaryEntity: hashid.EncodeEntityID(hasher, f.PrimaryEntityID()),
}
return res
}
func BuildExtendedInfo(ctx context.Context, u *ent.User, f fs.File, hasher hashid.Encoder) *ExtendedInfo {
extendedInfo := f.ExtendedInfo()
if extendedInfo == nil {
return nil
}
ext := &ExtendedInfo{
StoragePolicy: BuildStoragePolicy(extendedInfo.StoragePolicy, hasher),
StorageUsed: extendedInfo.StorageUsed,
Entities: lo.Map(f.Entities(), func(e fs.Entity, index int) Entity {
return BuildEntity(extendedInfo, e, hasher)
}),
}
dep := dependency.FromContext(ctx)
base := dep.SettingProvider().SiteURL(ctx)
if u.ID == f.OwnerID() {
// Only owner can see the shares settings.
ext.Shares = lo.Map(extendedInfo.Shares, func(s *ent.Share, index int) Share {
return *BuildShare(s, base, hasher, u, u, f.DisplayName(), f.Type(), true)
})
}
return ext
}
func BuildEntity(extendedInfo *fs.FileExtendedInfo, e fs.Entity, hasher hashid.Encoder) Entity {
var u *user.User
createdBy := e.CreatedBy()
if createdBy != nil {
userRedacted := user.BuildUserRedacted(e.CreatedBy(), user.RedactLevelAnonymous, hasher)
u = &userRedacted
}
return Entity{
ID: hashid.EncodeEntityID(hasher, e.ID()),
Type: e.Type(),
CreatedAt: e.CreatedAt(),
StoragePolicy: BuildStoragePolicy(extendedInfo.EntityStoragePolicies[e.PolicyID()], hasher),
Size: e.Size(),
CreatedBy: u,
}
}
func BuildShareLink(s *ent.Share, hasher hashid.Encoder, base *url.URL) string {
shareId := hashid.EncodeShareID(hasher, s.ID)
return routes.MasterShareUrl(base, shareId, s.Password).String()
}
func BuildStoragePolicy(sp *ent.StoragePolicy, hasher hashid.Encoder) *StoragePolicy {
if sp == nil {
return nil
}
return &StoragePolicy{
ID: hashid.EncodePolicyID(hasher, sp.ID),
Name: sp.Name,
Type: types.PolicyType(sp.Type),
MaxSize: sp.MaxSize,
AllowedSuffix: sp.Settings.FileType,
Relay: sp.Settings.Relay,
}
}
func WriteEventSourceHeader(c *gin.Context) {
c.Header("Content-Type", "text/event-stream")
c.Header("Cache-Control", "no-cache")
c.Header("X-Accel-Buffering", "no")
}
// WriteEventSource writes a Server-Sent Event to the client.
func WriteEventSource(c *gin.Context, event string, data any) {
c.Writer.Write([]byte(fmt.Sprintf("event: %s\n", event)))
c.Writer.Write([]byte("data:"))
json.NewEncoder(c.Writer).Encode(data)
c.Writer.Write([]byte("\n"))
c.Writer.Flush()
}
var ErrSSETakeOver = errors.New("SSE take over")

View File

@@ -1,88 +0,0 @@
package explorer
import (
"context"
"strings"
model "github.com/cloudreve/Cloudreve/v3/models"
"github.com/cloudreve/Cloudreve/v3/pkg/filesystem"
"github.com/cloudreve/Cloudreve/v3/pkg/hashid"
"github.com/cloudreve/Cloudreve/v3/pkg/serializer"
"github.com/gin-gonic/gin"
)
// ItemSearchService 文件搜索服务
type ItemSearchService struct {
Type string `uri:"type" binding:"required"`
Keywords string `uri:"keywords" binding:"required"`
Path string `form:"path"`
}
// Search 执行搜索
func (service *ItemSearchService) Search(c *gin.Context) serializer.Response {
// 创建文件系统
fs, err := filesystem.NewFileSystemFromContext(c)
if err != nil {
return serializer.Err(serializer.CodeCreateFSError, "", err)
}
defer fs.Recycle()
if service.Path != "" {
ok, parent := fs.IsPathExist(service.Path)
if !ok {
return serializer.Err(serializer.CodeParentNotExist, "", nil)
}
fs.Root = parent
}
switch service.Type {
case "keywords":
return service.SearchKeywords(c, fs, "%"+service.Keywords+"%")
case "image":
return service.SearchKeywords(c, fs, "%.bmp", "%.iff", "%.png", "%.gif", "%.jpg", "%.jpeg", "%.psd", "%.svg", "%.webp")
case "video":
return service.SearchKeywords(c, fs, "%.mp4", "%.flv", "%.avi", "%.wmv", "%.mkv", "%.rm", "%.rmvb", "%.mov", "%.ogv")
case "audio":
return service.SearchKeywords(c, fs, "%.mp3", "%.flac", "%.ape", "%.wav", "%.acc", "%.ogg", "%.midi", "%.mid")
case "doc":
return service.SearchKeywords(c, fs, "%.txt", "%.md", "%.pdf", "%.doc", "%.docx", "%.ppt", "%.pptx", "%.xls", "%.xlsx", "%.pub")
case "tag":
if tid, err := hashid.DecodeHashID(service.Keywords, hashid.TagID); err == nil {
if tag, err := model.GetTagsByID(tid, fs.User.ID); err == nil {
if tag.Type == model.FileTagType {
exp := strings.Split(tag.Expression, "\n")
expInput := make([]interface{}, len(exp))
for i := 0; i < len(exp); i++ {
expInput[i] = exp[i]
}
return service.SearchKeywords(c, fs, expInput...)
}
}
}
return serializer.Err(serializer.CodeNotFound, "", nil)
default:
return serializer.ParamErr("Unknown search type", nil)
}
}
// SearchKeywords 根据关键字搜索文件
func (service *ItemSearchService) SearchKeywords(c *gin.Context, fs *filesystem.FileSystem, keywords ...interface{}) serializer.Response {
// 上下文
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// 获取子项目
objects, err := fs.Search(ctx, keywords...)
if err != nil {
return serializer.Err(serializer.CodeNotSet, err.Error(), err)
}
return serializer.Response{
Code: 0,
Data: map[string]interface{}{
"parent": 0,
"objects": objects,
},
}
}

View File

@@ -1,25 +1,20 @@
package explorer
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
"net/url"
"time"
model "github.com/cloudreve/Cloudreve/v3/models"
"github.com/cloudreve/Cloudreve/v3/pkg/cache"
"github.com/cloudreve/Cloudreve/v3/pkg/cluster"
"github.com/cloudreve/Cloudreve/v3/pkg/filesystem"
"github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx"
"github.com/cloudreve/Cloudreve/v3/pkg/serializer"
"github.com/cloudreve/Cloudreve/v3/pkg/task"
"github.com/cloudreve/Cloudreve/v3/pkg/task/slavetask"
"github.com/cloudreve/Cloudreve/v3/pkg/util"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/driver/local"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/gin-gonic/gin"
"github.com/jinzhu/gorm"
"github.com/samber/lo"
"strings"
)
// SlaveDownloadService 从机文件下載服务
@@ -46,148 +41,211 @@ type SlaveListService struct {
Recursive bool `json:"recursive"`
}
// ServeFile 通过签名的URL下载从机文件
func (service *SlaveDownloadService) ServeFile(ctx context.Context, c *gin.Context, isDownload bool) serializer.Response {
// 创建文件系统
fs, err := filesystem.NewAnonymousFileSystem()
// SlaveServe serves file content
func (s *EntityDownloadService) SlaveServe(c *gin.Context) error {
dep := dependency.FromContext(c)
m := manager.NewFileManager(dep, nil)
defer m.Recycle()
src, err := base64.URLEncoding.DecodeString(s.Src)
if err != nil {
return serializer.Err(serializer.CodeCreateFSError, "", err)
return fmt.Errorf("failed to decode src: %w", err)
}
defer fs.Recycle()
// 解码文件路径
fileSource, err := base64.RawURLEncoding.DecodeString(service.PathEncoded)
entity, err := local.NewLocalFileEntity(types.EntityTypeVersion, string(src))
if err != nil {
return serializer.Err(serializer.CodeFileNotFound, "", err)
return fs.ErrPathNotExist.WithError(err)
}
// 根据URL里的信息创建一个文件对象和用户对象
file := model.File{
Name: service.Name,
SourceName: string(fileSource),
Policy: model.Policy{
Model: gorm.Model{ID: 1},
Type: "local",
},
}
fs.User = &model.User{
Group: model.Group{SpeedLimit: service.Speed},
}
fs.FileTarget = []model.File{file}
// 开始处理下载
ctx = context.WithValue(ctx, fsctx.GinCtx, c)
rs, err := fs.GetDownloadContent(ctx, 0)
entitySource, err := m.GetEntitySource(c, 0, fs.WithEntity(entity))
if err != nil {
return serializer.Err(serializer.CodeNotSet, err.Error(), err)
}
defer rs.Close()
// 设置下载文件名
if isDownload {
c.Header("Content-Disposition", "attachment; filename=\""+url.PathEscape(fs.FileTarget[0].Name)+"\"")
return fmt.Errorf("failed to get entity source: %w", err)
}
// 发送文件
http.ServeContent(c.Writer, c.Request, fs.FileTarget[0].Name, time.Now(), rs)
defer entitySource.Close()
return serializer.Response{}
// Set cache header for public resource
settings := dep.SettingProvider()
maxAge := settings.PublicResourceMaxAge(c)
c.Header("Cache-Control", fmt.Sprintf("public, max-age=%d", maxAge))
isDownload := c.Query(routes.IsDownloadQuery) != ""
entitySource.Serve(c.Writer, c.Request,
entitysource.WithSpeedLimit(s.SpeedLimit),
entitysource.WithDownload(isDownload),
entitysource.WithDisplayName(s.Name),
entitysource.WithContext(c),
)
return nil
}
// Delete 通过签名的URL删除从机文件
func (service *SlaveFilesService) Delete(ctx context.Context, c *gin.Context) serializer.Response {
// 创建文件系统
fs, err := filesystem.NewAnonymousFileSystem()
if err != nil {
return serializer.Err(serializer.CodeCreateFSError, "", err)
type (
SlaveCreateUploadSessionParamCtx struct{}
// SlaveCreateUploadSessionService 从机上传会话服务
SlaveCreateUploadSessionService struct {
Session fs.UploadSession `json:"session" binding:"required"`
Overwrite bool `json:"overwrite"`
}
defer fs.Recycle()
// 删除文件
failed, err := fs.Handler.Delete(ctx, service.Files)
if err != nil {
// 将Data字段写为字符串方便主控端解析
data, _ := json.Marshal(serializer.RemoteDeleteRequest{Files: failed})
return serializer.Response{
Code: serializer.CodeNotFullySuccess,
Data: string(data),
Msg: fmt.Sprintf("Failed to delete %d files(s)", len(failed)),
Error: err.Error(),
}
}
return serializer.Response{}
}
// Thumb 通过签名URL获取从机文件缩略图
func (service *SlaveFileService) Thumb(ctx context.Context, c *gin.Context) serializer.Response {
// 创建文件系统
fs, err := filesystem.NewAnonymousFileSystem()
if err != nil {
return serializer.Err(serializer.CodeCreateFSError, "", err)
}
defer fs.Recycle()
// 解码文件路径
fileSource, err := base64.RawURLEncoding.DecodeString(service.PathEncoded)
if err != nil {
return serializer.Err(serializer.CodeFileNotFound, "", err)
}
fs.FileTarget = []model.File{{SourceName: string(fileSource), Name: fmt.Sprintf("%s.%s", fileSource, service.Ext), PicInfo: "1,1"}}
// 获取缩略图
resp, err := fs.GetThumb(ctx, 0)
if err != nil {
return serializer.Err(serializer.CodeNotSet, "Failed to get thumb", err)
}
defer resp.Content.Close()
http.ServeContent(c.Writer, c.Request, "thumb.png", time.Now(), resp.Content)
return serializer.Response{}
}
// CreateTransferTask 创建从机文件转存任务
func CreateTransferTask(c *gin.Context, req *serializer.SlaveTransferReq) serializer.Response {
if id, ok := c.Get("MasterSiteID"); ok {
job := &slavetask.TransferTask{
Req: req,
MasterID: id.(string),
}
if err := cluster.DefaultController.SubmitTask(job.MasterID, job, req.Hash(job.MasterID), func(job interface{}) {
task.TaskPoll.Submit(job.(task.Job))
}); err != nil {
return serializer.Err(serializer.CodeCreateTaskError, "", err)
}
return serializer.Response{}
}
return serializer.ParamErr("未知的主机节点ID", nil)
}
// SlaveListService 从机上传会话服务
type SlaveCreateUploadSessionService struct {
Session serializer.UploadSession `json:"session" binding:"required"`
TTL int64 `json:"ttl"`
Overwrite bool `json:"overwrite"`
}
)
// Create 从机创建上传会话
func (service *SlaveCreateUploadSessionService) Create(ctx context.Context, c *gin.Context) serializer.Response {
if !service.Overwrite && util.Exists(service.Session.SavePath) {
return serializer.Err(serializer.CodeConflict, "placeholder file already exist", nil)
func (service *SlaveCreateUploadSessionService) Create(c *gin.Context) error {
mode := fs.ModeNone
if service.Overwrite {
mode = fs.ModeOverwrite
}
err := cache.Set(
filesystem.UploadSessionCachePrefix+service.Session.Key,
service.Session,
int(service.TTL),
)
req := &fs.UploadRequest{
Mode: mode,
Props: service.Session.Props.Copy(),
}
dep := dependency.FromContext(c)
m := manager.NewFileManager(dep, nil)
_, err := m.CreateUploadSession(c, req, fs.WithUploadSession(&service.Session))
if err != nil {
return serializer.Err(serializer.CodeCacheOperation, "Failed to create upload session in slave node", err)
return serializer.NewError(serializer.CodeCacheOperation, "Failed to create upload session in slave node", err)
}
return serializer.Response{}
return nil
}
type (
SlaveMetaParamCtx struct{}
SlaveMetaService struct {
Src string `uri:"src" binding:"required"`
Ext string `uri:"ext" binding:"required"`
}
)
// MediaMeta retrieves media metadata
func (s *SlaveMetaService) MediaMeta(c *gin.Context) ([]driver.MediaMeta, error) {
dep := dependency.FromContext(c)
m := manager.NewFileManager(dep, nil)
defer m.Recycle()
src, err := base64.URLEncoding.DecodeString(s.Src)
if err != nil {
return nil, fmt.Errorf("failed to decode src: %w", err)
}
entity, err := local.NewLocalFileEntity(types.EntityTypeVersion, string(src))
if err != nil {
return nil, fs.ErrPathNotExist.WithError(err)
}
entitySource, err := m.GetEntitySource(c, 0, fs.WithEntity(entity))
if err != nil {
return nil, fmt.Errorf("failed to get entity source: %w", err)
}
defer entitySource.Close()
extractor := dep.MediaMetaExtractor(c)
res, err := extractor.Extract(c, s.Ext, entitySource)
if err != nil {
return nil, fmt.Errorf("failed to extract media meta: %w", err)
}
return res, nil
}
type (
SlaveThumbParamCtx struct{}
SlaveThumbService struct {
Src string `uri:"src" binding:"required"`
Ext string `uri:"ext" binding:"required"`
}
)
func (s *SlaveThumbService) Thumb(c *gin.Context) error {
dep := dependency.FromContext(c)
m := manager.NewFileManager(dep, nil)
defer m.Recycle()
src, err := base64.URLEncoding.DecodeString(s.Src)
if err != nil {
return fmt.Errorf("failed to decode src: %w", err)
}
settings := dep.SettingProvider()
var entity fs.Entity
entity, err = local.NewLocalFileEntity(types.EntityTypeThumbnail, string(src)+settings.ThumbSlaveSidecarSuffix(c))
if err != nil {
srcEntity, err := local.NewLocalFileEntity(types.EntityTypeVersion, string(src))
if err != nil {
return fs.ErrPathNotExist.WithError(err)
}
entity, err = m.SubmitAndAwaitThumbnailTask(c, nil, s.Ext, srcEntity)
if err != nil {
return fmt.Errorf("failed to submit and await thumbnail task: %w", err)
}
}
entitySource, err := m.GetEntitySource(c, 0, fs.WithEntity(entity))
if err != nil {
return fmt.Errorf("failed to get thumb entity source: %w", err)
}
defer entitySource.Close()
// Set cache header for public resource
maxAge := settings.PublicResourceMaxAge(c)
c.Header("Cache-Control", fmt.Sprintf("public, max-age=%d", maxAge))
entitySource.Serve(c.Writer, c.Request,
entitysource.WithContext(c),
)
return nil
}
type (
SlaveDeleteUploadSessionParamCtx struct{}
SlaveDeleteUploadSessionService struct {
ID string `uri:"sessionId" binding:"required"`
}
)
// Delete deletes an upload session from slave node
func (service *SlaveDeleteUploadSessionService) Delete(c *gin.Context) error {
dep := dependency.FromContext(c)
m := manager.NewFileManager(dep, nil)
defer m.Recycle()
err := m.CancelUploadSession(c, nil, service.ID)
if err != nil {
return fmt.Errorf("slave failed to delete upload session: %w", err)
}
return nil
}
type (
SlaveDeleteFileParamCtx struct{}
SlaveDeleteFileService struct {
Files []string `json:"files" binding:"required,gt=0"`
}
)
func (service *SlaveDeleteFileService) Delete(c *gin.Context) ([]string, error) {
dep := dependency.FromContext(c)
m := manager.NewFileManager(dep, nil)
defer m.Recycle()
d := m.LocalDriver(nil)
// Try to delete thumbnail sidecar
sidecarSuffix := dep.SettingProvider().ThumbSlaveSidecarSuffix(c)
failed, err := d.Delete(c, lo.Map(service.Files, func(item string, index int) string {
return item + sidecarSuffix
})...)
if err != nil {
dep.Logger().Warning("Failed to delete thumbnail sidecar [%s]: %s", strings.Join(failed, ", "), err)
}
failed, err = d.Delete(c, service.Files...)
if err != nil {
return failed, fmt.Errorf("slave failed to delete file: %w", err)
}
return nil, nil
}

View File

@@ -1,88 +0,0 @@
package explorer
import (
"fmt"
"strings"
model "github.com/cloudreve/Cloudreve/v3/models"
"github.com/cloudreve/Cloudreve/v3/pkg/hashid"
"github.com/cloudreve/Cloudreve/v3/pkg/serializer"
"github.com/gin-gonic/gin"
)
// FilterTagCreateService 文件分类标签创建服务
type FilterTagCreateService struct {
Expression string `json:"expression" binding:"required,min=1,max=65535"`
Icon string `json:"icon" binding:"required,min=1,max=255"`
Name string `json:"name" binding:"required,min=1,max=255"`
Color string `json:"color" binding:"hexcolor|rgb|rgba|hsl"`
}
// LinkTagCreateService 目录快捷方式标签创建服务
type LinkTagCreateService struct {
Path string `json:"path" binding:"required,min=1,max=65535"`
Name string `json:"name" binding:"required,min=1,max=255"`
}
// TagService 标签服务
type TagService struct {
}
// Delete 删除标签
func (service *TagService) Delete(c *gin.Context, user *model.User) serializer.Response {
id, _ := c.Get("object_id")
if err := model.DeleteTagByID(id.(uint), user.ID); err != nil {
return serializer.DBErr("Failed to delete a tag", err)
}
return serializer.Response{}
}
// Create 创建标签
func (service *LinkTagCreateService) Create(c *gin.Context, user *model.User) serializer.Response {
// 创建标签
tag := model.Tag{
Name: service.Name,
Icon: "FolderHeartOutline",
Type: model.DirectoryLinkType,
Expression: service.Path,
UserID: user.ID,
}
id, err := tag.Create()
if err != nil {
return serializer.DBErr("Failed to create a tag", err)
}
return serializer.Response{
Data: hashid.HashID(id, hashid.TagID),
}
}
// Create 创建标签
func (service *FilterTagCreateService) Create(c *gin.Context, user *model.User) serializer.Response {
// 分割表达式将通配符转换为SQL内的%
expressions := strings.Split(service.Expression, "\n")
for i := 0; i < len(expressions); i++ {
expressions[i] = strings.ReplaceAll(expressions[i], "*", "%")
if expressions[i] == "" {
return serializer.ParamErr(fmt.Sprintf("The %d line contains an empty match expression", i+1), nil)
}
}
// 创建标签
tag := model.Tag{
Name: service.Name,
Icon: service.Icon,
Color: service.Color,
Type: model.FileTagType,
Expression: strings.Join(expressions, "\n"),
UserID: user.ID,
}
id, err := tag.Create()
if err != nil {
return serializer.DBErr("Failed to create a tag", err)
}
return serializer.Response{
Data: hashid.HashID(id, hashid.TagID),
}
}

View File

@@ -3,167 +3,158 @@ package explorer
import (
"context"
"fmt"
model "github.com/cloudreve/Cloudreve/v3/models"
"github.com/cloudreve/Cloudreve/v3/pkg/auth"
"github.com/cloudreve/Cloudreve/v3/pkg/cache"
"github.com/cloudreve/Cloudreve/v3/pkg/filesystem"
"github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/local"
"github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx"
"github.com/cloudreve/Cloudreve/v3/pkg/hashid"
"github.com/cloudreve/Cloudreve/v3/pkg/serializer"
"github.com/cloudreve/Cloudreve/v3/pkg/util"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/cluster"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/request"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/gin-gonic/gin"
"io/ioutil"
"strconv"
"strings"
"time"
)
// CreateUploadSessionService 获取上传凭证服务
type CreateUploadSessionService struct {
Path string `json:"path" binding:"required"`
Size uint64 `json:"size" binding:"min=0"`
Name string `json:"name" binding:"required"`
PolicyID string `json:"policy_id" binding:"required"`
LastModified int64 `json:"last_modified"`
MimeType string `json:"mime_type"`
}
type (
CreateUploadSessionParameterCtx struct{}
CreateUploadSessionService struct {
Uri string `json:"uri" binding:"required"`
Size int64 `json:"size" binding:"min=0"`
LastModified int64 `json:"last_modified"`
MimeType string `json:"mime_type"`
PolicyID string `json:"policy_id"`
Metadata map[string]string `json:"metadata" binding:"max=256"`
EntityType string `json:"entity_type" binding:"eq=|eq=live_photo|eq=version"`
}
)
// Create 创建新的上传会话
func (service *CreateUploadSessionService) Create(ctx context.Context, c *gin.Context) serializer.Response {
// 创建文件系统
fs, err := filesystem.NewFileSystemFromContext(c)
func (service *CreateUploadSessionService) Create(c context.Context) (*UploadSessionResponse, error) {
dep := dependency.FromContext(c)
user := inventory.UserFromContext(c)
m := manager.NewFileManager(dep, user)
defer m.Recycle()
uri, err := fs.NewUriFromString(service.Uri)
if err != nil {
return serializer.Err(serializer.CodeCreateFSError, "", err)
return nil, serializer.NewError(serializer.CodeParamErr, "unknown uri", err)
}
// 取得存储策略的ID
rawID, err := hashid.DecodeHashID(service.PolicyID, hashid.PolicyID)
var entityType *types.EntityType
switch service.EntityType {
case "live_photo":
livePhoto := types.EntityTypeLivePhoto
entityType = &livePhoto
case "version":
version := types.EntityTypeVersion
entityType = &version
}
hasher := dep.HashIDEncoder()
policyId, err := hasher.Decode(service.PolicyID, hashid.PolicyID)
if err != nil {
return serializer.Err(serializer.CodePolicyNotExist, "", err)
return nil, serializer.NewError(serializer.CodeParamErr, "unknown policy id", err)
}
if fs.Policy.ID != rawID {
return serializer.Err(serializer.CodePolicyNotAllowed, "存储策略发生变化,请刷新文件列表并重新添加此任务", nil)
uploadRequest := &fs.UploadRequest{
Props: &fs.UploadProps{
Uri: uri,
Size: service.Size,
MimeType: service.MimeType,
Metadata: service.Metadata,
EntityType: entityType,
PreferredStoragePolicy: policyId,
},
}
file := &fsctx.FileStream{
Size: service.Size,
Name: service.Name,
VirtualPath: service.Path,
File: ioutil.NopCloser(strings.NewReader("")),
MimeType: service.MimeType,
}
if service.LastModified > 0 {
lastModified := time.UnixMilli(service.LastModified)
file.LastModified = &lastModified
uploadRequest.Props.LastModified = &lastModified
}
credential, err := fs.CreateUploadSession(ctx, file)
credential, err := m.CreateUploadSession(c, uploadRequest)
if err != nil {
return serializer.Err(serializer.CodeNotSet, err.Error(), err)
return nil, err
}
return serializer.Response{
Code: 0,
Data: credential,
}
return BuildUploadSessionResponse(credential, hasher), nil
}
// UploadService 本机及从机策略上传服务
type UploadService struct {
ID string `uri:"sessionId" binding:"required"`
Index int `uri:"index" form:"index" binding:"min=0"`
}
type (
UploadParameterCtx struct{}
// UploadService 本机及从机策略上传服务
UploadService struct {
ID string `uri:"sessionId" binding:"required"`
Index int `uri:"index" form:"index" binding:"min=0"`
}
)
// LocalUpload 处理本机文件分片上传
func (service *UploadService) LocalUpload(ctx context.Context, c *gin.Context) serializer.Response {
uploadSessionRaw, ok := cache.Get(filesystem.UploadSessionCachePrefix + service.ID)
func (service *UploadService) LocalUpload(c *gin.Context) error {
dep := dependency.FromContext(c)
kv := dep.KV()
uploadSessionRaw, ok := kv.Get(manager.UploadSessionCachePrefix + service.ID)
if !ok {
return serializer.Err(serializer.CodeUploadSessionExpired, "", nil)
return serializer.NewError(serializer.CodeUploadSessionExpired, "", nil)
}
uploadSession := uploadSessionRaw.(serializer.UploadSession)
uploadSession := uploadSessionRaw.(fs.UploadSession)
fs, err := filesystem.NewFileSystemFromContext(c)
user := inventory.UserFromContext(c)
m := manager.NewFileManager(dep, user)
defer m.Recycle()
if uploadSession.UID != user.ID {
return serializer.NewError(serializer.CodeUploadSessionExpired, "", nil)
}
// Confirm upload session and chunk index
placeholder, err := m.ConfirmUploadSession(c, &uploadSession, service.Index)
if err != nil {
return serializer.Err(serializer.CodePolicyNotAllowed, err.Error(), err)
return err
}
if uploadSession.UID != fs.User.ID {
return serializer.Err(serializer.CodeUploadSessionExpired, "", nil)
}
// 查找上传会话创建的占位文件
file, err := model.GetFilesByUploadSession(service.ID, fs.User.ID)
if err != nil {
return serializer.Err(serializer.CodeUploadSessionExpired, "", err)
}
// 重设 fs 存储策略
if !uploadSession.Policy.IsTransitUpload(uploadSession.Size) {
return serializer.Err(serializer.CodePolicyNotAllowed, "", err)
}
fs.Policy = &uploadSession.Policy
if err := fs.DispatchHandler(); err != nil {
return serializer.Err(serializer.CodePolicyNotExist, "", err)
}
expectedSizeStart := file.Size
actualSizeStart := uint64(service.Index) * uploadSession.Policy.OptionsSerialized.ChunkSize
if uploadSession.Policy.OptionsSerialized.ChunkSize == 0 && service.Index > 0 {
return serializer.Err(serializer.CodeInvalidChunkIndex, "Chunk index cannot be greater than 0", nil)
}
if expectedSizeStart < actualSizeStart {
return serializer.Err(serializer.CodeInvalidChunkIndex, "Chunk must be uploaded in order", nil)
}
if expectedSizeStart > actualSizeStart {
util.Log().Info("Trying to overwrite chunk[%d] Start=%d", service.Index, actualSizeStart)
}
return processChunkUpload(ctx, c, fs, &uploadSession, service.Index, file, fsctx.Append)
return processChunkUpload(c, m, &uploadSession, service.Index, placeholder, fs.ModeOverwrite)
}
// SlaveUpload 处理从机文件分片上传
func (service *UploadService) SlaveUpload(ctx context.Context, c *gin.Context) serializer.Response {
uploadSessionRaw, ok := cache.Get(filesystem.UploadSessionCachePrefix + service.ID)
func (service *UploadService) SlaveUpload(c *gin.Context) error {
dep := dependency.FromContext(c)
kv := dep.KV()
uploadSessionRaw, ok := kv.Get(manager.UploadSessionCachePrefix + service.ID)
if !ok {
return serializer.Err(serializer.CodeUploadSessionExpired, "", nil)
return serializer.NewError(serializer.CodeUploadSessionExpired, "", nil)
}
uploadSession := uploadSessionRaw.(serializer.UploadSession)
uploadSession := uploadSessionRaw.(fs.UploadSession)
fs, err := filesystem.NewAnonymousFileSystem()
if err != nil {
return serializer.Err(serializer.CodeCreateFSError, "", err)
}
fs.Handler = local.Driver{}
// 解析需要的参数
// Parse chunk index from query
service.Index, _ = strconv.Atoi(c.Query("chunk"))
mode := fsctx.Append
if c.GetHeader(auth.CrHeaderPrefix+"Overwrite") == "true" {
mode |= fsctx.Overwrite
}
return processChunkUpload(ctx, c, fs, &uploadSession, service.Index, nil, mode)
m := manager.NewFileManager(dep, nil)
defer m.Recycle()
return processChunkUpload(c, m, &uploadSession, service.Index, nil, fs.ModeOverwrite)
}
func processChunkUpload(ctx context.Context, c *gin.Context, fs *filesystem.FileSystem, session *serializer.UploadSession, index int, file *model.File, mode fsctx.WriteMode) serializer.Response {
func processChunkUpload(c *gin.Context, m manager.FileManager, session *fs.UploadSession, index int, file fs.File, mode fs.WriteMode) error {
// 取得并校验文件大小是否符合分片要求
chunkSize := session.Policy.OptionsSerialized.ChunkSize
isLastChunk := session.Policy.OptionsSerialized.ChunkSize == 0 || uint64(index+1)*chunkSize >= session.Size
chunkSize := session.ChunkSize
isLastChunk := session.ChunkSize == 0 || int64(index+1)*chunkSize >= session.Props.Size
expectedLength := chunkSize
if isLastChunk {
expectedLength = session.Size - uint64(index)*chunkSize
expectedLength = session.Props.Size - int64(index)*chunkSize
}
fileSize, err := strconv.ParseUint(c.Request.Header.Get("Content-Length"), 10, 64)
rc, fileSize, err := request.SniffContentLength(c.Request)
if err != nil || (expectedLength != fileSize) {
return serializer.Err(
return serializer.NewError(
serializer.CodeInvalidContentLength,
fmt.Sprintf("Invalid Content-Length (expected: %d)", expectedLength),
err,
@@ -172,121 +163,60 @@ func processChunkUpload(ctx context.Context, c *gin.Context, fs *filesystem.File
// 非首个分片时需要允许覆盖
if index > 0 {
mode |= fsctx.Overwrite
mode |= fs.ModeOverwrite
}
fileData := fsctx.FileStream{
MimeType: c.Request.Header.Get("Content-Type"),
File: c.Request.Body,
Size: fileSize,
Name: session.Name,
VirtualPath: session.VirtualPath,
SavePath: session.SavePath,
Mode: mode,
AppendStart: chunkSize * uint64(index),
Model: file,
LastModified: session.LastModified,
}
// 给文件系统分配钩子
fs.Use("AfterUploadCanceled", filesystem.HookTruncateFileTo(fileData.AppendStart))
fs.Use("AfterValidateFailed", filesystem.HookTruncateFileTo(fileData.AppendStart))
if file != nil {
fs.Use("BeforeUpload", filesystem.HookValidateCapacity)
fs.Use("AfterUpload", filesystem.HookChunkUploaded)
fs.Use("AfterValidateFailed", filesystem.HookChunkUploadFailed)
if isLastChunk {
fs.Use("AfterUpload", filesystem.HookPopPlaceholderToFile(""))
fs.Use("AfterUpload", filesystem.HookDeleteUploadSession(session.Key))
}
} else {
if isLastChunk {
fs.Use("AfterUpload", filesystem.SlaveAfterUpload(session))
fs.Use("AfterUpload", filesystem.HookDeleteUploadSession(session.Key))
}
req := &fs.UploadRequest{
File: rc,
Offset: chunkSize * int64(index),
Props: session.Props.Copy(),
Mode: mode,
}
// 执行上传
uploadCtx := context.WithValue(ctx, fsctx.GinCtx, c)
err = fs.Upload(uploadCtx, &fileData)
ctx := context.WithValue(c, cluster.SlaveNodeIDCtx{}, strconv.Itoa(session.Policy.NodeID))
err = m.Upload(ctx, req, session.Policy)
if err != nil {
return serializer.Err(serializer.CodeUploadFailed, err.Error(), err)
return err
}
return serializer.Response{}
if rc, ok := req.File.(request.LimitReaderCloser); ok {
if rc.Count() != expectedLength {
err := fmt.Errorf("uploaded data(%d) does not match purposed size(%d)", rc.Count(), req.Props.Size)
return serializer.NewError(serializer.CodeIOFailed, "Uploaded data does not match purposed size", err)
}
}
// Finish upload
if isLastChunk {
_, err := m.CompleteUpload(ctx, session)
if err != nil {
return fmt.Errorf("failed to complete upload: %w", err)
}
}
return nil
}
// UploadSessionService 上传会话服务
type UploadSessionService struct {
ID string `uri:"sessionId" binding:"required"`
}
type (
DeleteUploadSessionParameterCtx struct{}
DeleteUploadSessionService struct {
ID string `json:"id" binding:"required"`
Uri string `json:"uri" binding:"required"`
}
)
// Delete 删除指定上传会话
func (service *UploadSessionService) Delete(ctx context.Context, c *gin.Context) serializer.Response {
// 创建文件系统
fs, err := filesystem.NewFileSystemFromContext(c)
// Delete deletes the specified upload session
func (service *DeleteUploadSessionService) Delete(c *gin.Context) error {
dep := dependency.FromContext(c)
user := inventory.UserFromContext(c)
m := manager.NewFileManager(dep, user)
defer m.Recycle()
uri, err := fs.NewUriFromString(service.Uri)
if err != nil {
return serializer.Err(serializer.CodeCreateFSError, "", err)
}
defer fs.Recycle()
// 查找需要删除的上传会话的占位文件
file, err := model.GetFilesByUploadSession(service.ID, fs.User.ID)
if err != nil {
return serializer.Err(serializer.CodeUploadSessionExpired, "", err)
return serializer.NewError(serializer.CodeParamErr, "unknown uri", err)
}
// 删除文件
if err := fs.Delete(ctx, []uint{}, []uint{file.ID}, false, false); err != nil {
return serializer.Err(serializer.CodeInternalSetting, "Failed to delete upload session", err)
}
return serializer.Response{}
}
// SlaveDelete 从机删除指定上传会话
func (service *UploadSessionService) SlaveDelete(ctx context.Context, c *gin.Context) serializer.Response {
// 创建文件系统
fs, err := filesystem.NewAnonymousFileSystem()
if err != nil {
return serializer.Err(serializer.CodeCreateFSError, "", err)
}
defer fs.Recycle()
session, ok := cache.Get(filesystem.UploadSessionCachePrefix + service.ID)
if !ok {
return serializer.Err(serializer.CodeUploadSessionExpired, "", nil)
}
if _, err := fs.Handler.Delete(ctx, []string{session.(serializer.UploadSession).SavePath}); err != nil {
return serializer.Err(serializer.CodeInternalSetting, "Failed to delete temp file", err)
}
cache.Deletes([]string{service.ID}, filesystem.UploadSessionCachePrefix)
return serializer.Response{}
}
// DeleteAllUploadSession 删除当前用户的全部上传绘会话
func DeleteAllUploadSession(ctx context.Context, c *gin.Context) serializer.Response {
// 创建文件系统
fs, err := filesystem.NewFileSystemFromContext(c)
if err != nil {
return serializer.Err(serializer.CodeCreateFSError, "", err)
}
defer fs.Recycle()
// 查找需要删除的上传会话的占位文件
files := model.GetUploadPlaceholderFiles(fs.User.ID)
fileIDs := make([]uint, len(files))
for i, file := range files {
fileIDs[i] = file.ID
}
// 删除文件
if err := fs.Delete(ctx, []uint{}, fileIDs, false, false); err != nil {
return serializer.Err(serializer.CodeInternalSetting, "Failed to cleanup upload session", err)
}
return serializer.Response{}
return m.CancelUploadSession(c, uri, service.ID)
}

394
service/explorer/viewer.go Normal file
View File

@@ -0,0 +1,394 @@
package explorer
import (
"errors"
"fmt"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/cluster/routes"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/lock"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager/entitysource"
"github.com/cloudreve/Cloudreve/v4/pkg/hashid"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/cloudreve/Cloudreve/v4/pkg/setting"
"github.com/cloudreve/Cloudreve/v4/pkg/wopi"
"github.com/gin-gonic/gin"
"net/http"
"time"
)
type WopiService struct {
}
func prepareFs(c *gin.Context) (*fs.URI, manager.FileManager, *ent.User, *manager.ViewerSessionCache, dependency.Dep, error) {
dep := dependency.FromContext(c)
user := inventory.UserFromContext(c)
m := manager.NewFileManager(dep, user)
defer m.Recycle()
viewerSession := manager.ViewerSessionFromContext(c)
uri, err := fs.NewUriFromString(viewerSession.Uri)
if err != nil {
return nil, nil, nil, nil, nil, serializer.NewError(serializer.CodeParamErr, "unknown uri", err)
}
return uri, m, user, viewerSession, dep, nil
}
func (service *WopiService) Unlock(c *gin.Context) error {
_, m, _, _, dep, err := prepareFs(c)
if err != nil {
return err
}
l := dep.Logger()
lockToken := c.GetHeader(wopi.LockTokenHeader)
if err = m.Unlock(c, lockToken); err != nil {
l.Debug("WOPI unlock, not locked or not match: %w", err)
c.Status(http.StatusConflict)
c.Header(wopi.LockTokenHeader, "")
return nil
}
return nil
}
func (service *WopiService) RefreshLock(c *gin.Context) error {
uri, m, _, _, dep, err := prepareFs(c)
if err != nil {
return err
}
l := dep.Logger()
// Make sure file exists and readable
file, err := m.Get(c, uri, dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityLockFile))
if err != nil {
return fmt.Errorf("failed to get file: %w", err)
}
lockToken := c.GetHeader(wopi.LockTokenHeader)
release, _, err := m.ConfirmLock(c, file, file.Uri(false), lockToken)
if err != nil {
// File not locked for token not match
l.Debug("WOPI refresh lock, not locked or not match: %w", err)
c.Status(http.StatusConflict)
c.Header(wopi.LockTokenHeader, "")
return nil
}
// refresh lock
release()
_, err = m.Refresh(c, wopi.LockDuration, lockToken)
if err != nil {
return err
}
c.Header(wopi.LockTokenHeader, lockToken)
return nil
}
func (service *WopiService) Lock(c *gin.Context) error {
uri, m, user, viewerSession, dep, err := prepareFs(c)
if err != nil {
return err
}
l := dep.Logger()
// Make sure file exists and readable
file, err := m.Get(c, uri, dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityLockFile))
if err != nil {
return fmt.Errorf("failed to get file: %w", err)
}
lockToken := c.GetHeader(wopi.LockTokenHeader)
release, _, err := m.ConfirmLock(c, file, file.Uri(false), lockToken)
if err != nil {
// File not locked for token not match
// Try to lock using given token
app := lock.Application{
Type: string(fs.ApplicationViewer),
ViewerID: viewerSession.ViewerID,
}
_, err = m.Lock(c, wopi.LockDuration, user, true, app, file.Uri(false), lockToken)
if err != nil {
// Token not match
var lockConflict lock.ConflictError
if errors.As(err, &lockConflict) {
c.Status(http.StatusConflict)
c.Header(wopi.LockTokenHeader, lockConflict[0].Token)
l.Debug("WOPI lock, lock conflict: %w", err)
return nil
}
return fmt.Errorf("failed to lock file: %w", err)
}
// Lock success, return the token
c.Header(wopi.LockTokenHeader, lockToken)
return nil
}
// refresh lock
release()
_, err = m.Refresh(c, wopi.LockDuration, lockToken)
if err != nil {
return err
}
c.Header(wopi.LockTokenHeader, lockToken)
return nil
}
func (service *WopiService) PutContent(c *gin.Context) error {
uri, m, user, viewerSession, _, err := prepareFs(c)
if err != nil {
return err
}
// Make sure file exists and readable
file, err := m.Get(c, uri, dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityUploadFile))
if err != nil {
return fmt.Errorf("failed to get file: %w", err)
}
var lockSession fs.LockSession
lockToken := c.GetHeader(wopi.LockTokenHeader)
if lockToken != "" {
// File not locked for token not match
release, ls, err := m.ConfirmLock(c, file, file.Uri(false), lockToken)
if err != nil {
// File not locked for token not match
// Try to lock using given token
app := lock.Application{
Type: string(fs.ApplicationViewer),
ViewerID: viewerSession.ViewerID,
}
ls, err := m.Lock(c, wopi.LockDuration, user, true, app, file.Uri(false), lockToken)
if err != nil {
// Token not match
// If the file is currently locked and the X-WOPI-Lock value doesn't match the lock currently on the file, the host must
//
// Return a lock mismatch response (409 Conflict)
// Include an X-WOPI-Lock response header containing the value of the current lock on the file.
var lockConflict lock.ConflictError
if errors.As(err, &lockConflict) {
c.Status(http.StatusConflict)
c.Header(wopi.LockTokenHeader, lockConflict[0].Token)
return nil
}
return fmt.Errorf("failed to lock file: %w", err)
}
// In cases where the file is unlocked, the host must set X-WOPI-Lock to the empty string.
c.Header(wopi.LockTokenHeader, "")
_ = m.Unlock(c, ls.LastToken())
} else {
defer release()
}
lockSession = ls
}
subService := FileUpdateService{
Uri: viewerSession.Uri,
}
res, err := subService.PutContent(c, lockSession)
if err != nil {
var appErr serializer.AppError
if errors.As(err, &appErr) {
switch appErr.Code {
case serializer.CodeFileTooLarge:
c.Status(http.StatusRequestEntityTooLarge)
c.Header(wopi.ServerErrorHeader, err.Error())
case serializer.CodeNotFound:
c.Status(http.StatusNotFound)
c.Header(wopi.ServerErrorHeader, err.Error())
case 0:
c.Status(http.StatusOK)
default:
return err
}
return nil
}
return err
}
c.Header(wopi.ItemVersionHeader, res.PrimaryEntity)
return nil
}
func (service *WopiService) GetFile(c *gin.Context) error {
uri, m, _, viewerSession, dep, err := prepareFs(c)
if err != nil {
return err
}
// Make sure file exists and readable
file, err := m.Get(c, uri, dbfs.WithExtendedInfo(), dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityDownloadFile))
if err != nil {
return fmt.Errorf("failed to get file: %w", err)
}
versionType := types.EntityTypeVersion
find, targetEntity := fs.FindDesiredEntity(file, viewerSession.Version, dep.HashIDEncoder(), &versionType)
if !find {
return serializer.NewError(serializer.CodeNotFound, "version not found", nil)
}
if targetEntity.Size() > dep.SettingProvider().MaxOnlineEditSize(c) {
return fs.ErrFileSizeTooBig
}
entitySource, err := m.GetEntitySource(c, targetEntity.ID(), fs.WithEntity(targetEntity))
if err != nil {
return fmt.Errorf("failed to get entity source: %w", err)
}
defer entitySource.Close()
entitySource.Serve(c.Writer, c.Request,
entitysource.WithContext(c),
)
return nil
}
func (service *WopiService) FileInfo(c *gin.Context) (*WopiFileInfo, error) {
uri, m, user, viewerSession, dep, err := prepareFs(c)
if err != nil {
return nil, err
}
hasher := dep.HashIDEncoder()
settings := dep.SettingProvider()
opts := []fs.Option{
dbfs.WithFilePublicMetadata(),
dbfs.WithExtendedInfo(),
dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityDownloadFile, dbfs.NavigatorCapabilityInfo, dbfs.NavigatorCapabilityUploadFile),
}
file, err := m.Get(c, uri, opts...)
if err != nil {
return nil, fmt.Errorf("failed to get file: %w", err)
}
if file == nil {
return nil, serializer.NewError(serializer.CodeNotFound, "file not found", nil)
}
versionType := types.EntityTypeVersion
find, targetEntity := fs.FindDesiredEntity(file, viewerSession.Version, hasher, &versionType)
if !find {
return nil, serializer.NewError(serializer.CodeNotFound, "version not found", nil)
}
canEdit := file.PrimaryEntityID() == targetEntity.ID() && file.OwnerID() == user.ID
siteUrl := settings.SiteURL(c)
info := &WopiFileInfo{
BaseFileName: file.DisplayName(),
Version: hashid.EncodeEntityID(hasher, targetEntity.ID()),
BreadcrumbBrandName: settings.SiteBasic(c).Name,
BreadcrumbBrandUrl: siteUrl.String(),
FileSharingPostMessage: file.OwnerID() == user.ID,
EnableShare: file.OwnerID() == user.ID,
FileVersionPostMessage: true,
ClosePostMessage: true,
PostMessageOrigin: "*",
FileNameMaxLength: dbfs.MaxFileNameLength,
LastModifiedTime: file.UpdatedAt().Format(time.RFC3339),
IsAnonymousUser: inventory.IsAnonymousUser(user),
UserFriendlyName: user.Nick,
UserId: hashid.EncodeUserID(hasher, user.ID),
ReadOnly: !canEdit,
Size: targetEntity.Size(),
OwnerId: hashid.EncodeUserID(hasher, file.OwnerID()),
SupportsRename: true,
SupportsReviewing: true,
SupportsLocks: true,
UserCanReview: canEdit,
UserCanWrite: canEdit,
BreadcrumbFolderName: uri.Dir(),
BreadcrumbFolderUrl: routes.FrontendHomeUrl(siteUrl, uri.DirUri().String()).String(),
}
return info, nil
}
type (
CreateViewerSessionService struct {
Uri string `json:"uri" form:"uri" binding:"required"`
Version string `json:"version" form:"version"`
ViewerID string `json:"viewer_id" form:"viewer_id" binding:"required"`
PreferredAction setting.ViewerAction `json:"preferred_action" form:"preferred_action" binding:"required"`
}
CreateViewerSessionParamCtx struct{}
)
func (s *CreateViewerSessionService) Create(c *gin.Context) (*ViewerSessionResponse, error) {
dep := dependency.FromContext(c)
user := inventory.UserFromContext(c)
m := manager.NewFileManager(dep, user)
defer m.Recycle()
uri, err := fs.NewUriFromString(s.Uri)
if err != nil {
return nil, serializer.NewError(serializer.CodeParamErr, "unknown uri", err)
}
// Find the given viewer
viewers := dep.SettingProvider().FileViewers(c)
var targetViewer *setting.Viewer
for _, group := range viewers {
for _, viewer := range group.Viewers {
if viewer.ID == s.ViewerID && !viewer.Disabled {
targetViewer = &viewer
break
}
}
if targetViewer != nil {
break
}
}
if targetViewer == nil {
return nil, serializer.NewError(serializer.CodeParamErr, "unknown viewer id", err)
}
viewerSession, err := m.CreateViewerSession(c, uri, s.Version, targetViewer)
if err != nil {
return nil, err
}
res := &ViewerSessionResponse{Session: viewerSession}
if targetViewer.Type == setting.ViewerTypeWopi {
// For WOPI viewer, generate WOPI src
wopiSrc, err := wopi.GenerateWopiSrc(c, s.PreferredAction, targetViewer, viewerSession)
if err != nil {
return nil, serializer.NewError(serializer.CodeInternalSetting, "failed to generate wopi src", err)
}
res.WopiSrc = wopiSrc.String()
}
return res, nil
}

View File

@@ -1,138 +0,0 @@
package explorer
import (
"errors"
"fmt"
"github.com/cloudreve/Cloudreve/v3/middleware"
model "github.com/cloudreve/Cloudreve/v3/models"
"github.com/cloudreve/Cloudreve/v3/pkg/filesystem"
"github.com/cloudreve/Cloudreve/v3/pkg/hashid"
"github.com/cloudreve/Cloudreve/v3/pkg/serializer"
"github.com/cloudreve/Cloudreve/v3/pkg/wopi"
"github.com/gin-gonic/gin"
"net/http"
"time"
)
type WopiService struct {
}
func (service *WopiService) Rename(c *gin.Context) error {
fs, _, err := service.prepareFs(c)
if err != nil {
return err
}
defer fs.Recycle()
return fs.Rename(c, []uint{}, []uint{c.MustGet("object_id").(uint)}, c.GetHeader(wopi.RenameRequestHeader))
}
func (service *WopiService) GetFile(c *gin.Context) error {
fs, _, err := service.prepareFs(c)
if err != nil {
return err
}
defer fs.Recycle()
resp, err := fs.Preview(c, fs.FileTarget[0].ID, true)
if err != nil {
return fmt.Errorf("failed to pull file content: %w", err)
}
// 重定向到文件源
if resp.Redirect {
return fmt.Errorf("redirect not supported in WOPI")
}
// 直接返回文件内容
defer resp.Content.Close()
c.Header("Cache-Control", "no-cache")
http.ServeContent(c.Writer, c.Request, fs.FileTarget[0].Name, fs.FileTarget[0].UpdatedAt, resp.Content)
return nil
}
func (service *WopiService) FileInfo(c *gin.Context) (*serializer.WopiFileInfo, error) {
fs, session, err := service.prepareFs(c)
if err != nil {
return nil, err
}
defer fs.Recycle()
parent, err := model.GetFoldersByIDs([]uint{fs.FileTarget[0].FolderID}, fs.User.ID)
if err != nil {
return nil, err
}
if len(parent) == 0 {
return nil, fmt.Errorf("failed to find parent folder")
}
parent[0].TraceRoot()
siteUrl := model.GetSiteURL()
// Generate url for parent folder
parentUrl := model.GetSiteURL()
parentUrl.Path = "/home"
query := parentUrl.Query()
query.Set("path", parent[0].Position)
parentUrl.RawQuery = query.Encode()
info := &serializer.WopiFileInfo{
BaseFileName: fs.FileTarget[0].Name,
Version: fs.FileTarget[0].Model.UpdatedAt.String(),
BreadcrumbBrandName: model.GetSettingByName("siteName"),
BreadcrumbBrandUrl: siteUrl.String(),
FileSharingPostMessage: false,
PostMessageOrigin: "*",
FileNameMaxLength: 256,
LastModifiedTime: fs.FileTarget[0].Model.UpdatedAt.Format(time.RFC3339),
IsAnonymousUser: true,
ReadOnly: true,
ClosePostMessage: true,
Size: int64(fs.FileTarget[0].Size),
OwnerId: hashid.HashID(fs.FileTarget[0].UserID, hashid.UserID),
}
if session.Action == wopi.ActionEdit {
info.FileSharingPostMessage = true
info.IsAnonymousUser = false
info.SupportsRename = true
info.SupportsReviewing = true
info.SupportsUpdate = true
info.UserFriendlyName = fs.User.Nick
info.UserId = hashid.HashID(fs.User.ID, hashid.UserID)
info.UserCanRename = true
info.UserCanReview = true
info.UserCanWrite = true
info.ReadOnly = false
info.BreadcrumbFolderName = parent[0].Name
info.BreadcrumbFolderUrl = parentUrl.String()
}
return info, nil
}
func (service *WopiService) prepareFs(c *gin.Context) (*filesystem.FileSystem, *wopi.SessionCache, error) {
// 创建文件系统
fs, err := filesystem.NewFileSystemFromContext(c)
if err != nil {
return nil, nil, err
}
session := c.MustGet(middleware.WopiSessionCtx).(*wopi.SessionCache)
if err := fs.SetTargetFileByIDs([]uint{session.FileID}); err != nil {
fs.Recycle()
return nil, nil, fmt.Errorf("failed to find file: %w", err)
}
maxSize := model.GetIntSetting("maxEditSize", 0)
if maxSize > 0 && fs.FileTarget[0].Size > uint64(maxSize) {
return nil, nil, errors.New("file too large")
}
return fs, session, nil
}

View File

@@ -0,0 +1,398 @@
package explorer
import (
"encoding/gob"
"time"
"github.com/cloudreve/Cloudreve/v4/application/dependency"
"github.com/cloudreve/Cloudreve/v4/ent"
"github.com/cloudreve/Cloudreve/v4/ent/task"
"github.com/cloudreve/Cloudreve/v4/inventory"
"github.com/cloudreve/Cloudreve/v4/inventory/types"
"github.com/cloudreve/Cloudreve/v4/pkg/downloader"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/fs/dbfs"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/manager"
"github.com/cloudreve/Cloudreve/v4/pkg/filemanager/workflows"
"github.com/cloudreve/Cloudreve/v4/pkg/queue"
"github.com/cloudreve/Cloudreve/v4/pkg/serializer"
"github.com/gin-gonic/gin"
"github.com/gofrs/uuid"
"github.com/samber/lo"
"golang.org/x/tools/container/intsets"
)
// ItemMoveService 处理多文件/目录移动
type ItemMoveService struct {
SrcDir string `json:"src_dir" binding:"required,min=1,max=65535"`
Src ItemIDService `json:"src"`
Dst string `json:"dst" binding:"required,min=1,max=65535"`
}
// ItemRenameService 处理多文件/目录重命名
type ItemRenameService struct {
Src ItemIDService `json:"src"`
NewName string `json:"new_name" binding:"required,min=1,max=255"`
}
// ItemService 处理多文件/目录相关服务
type ItemService struct {
Items []uint `json:"items"`
Dirs []uint `json:"dirs"`
}
// ItemIDService 处理多文件/目录相关服务字段值为HashID可通过Raw()方法获取原始ID
type ItemIDService struct {
Items []string `json:"items"`
Dirs []string `json:"dirs"`
Source *ItemService
Force bool `json:"force"`
UnlinkOnly bool `json:"unlink"`
}
// ItemDecompressService 文件解压缩任务服务
type ItemDecompressService struct {
Src string `json:"src"`
Dst string `json:"dst" binding:"required,min=1,max=65535"`
Encoding string `json:"encoding"`
}
// ItemPropertyService 获取对象属性服务
type ItemPropertyService struct {
ID string `binding:"required"`
TraceRoot bool `form:"trace_root"`
IsFolder bool `form:"is_folder"`
}
func init() {
gob.Register(ItemIDService{})
}
type (
DownloadWorkflowService struct {
Src []string `json:"src"`
SrcFile string `json:"src_file"`
Dst string `json:"dst" binding:"required"`
}
CreateDownloadParamCtx struct{}
)
func (service *DownloadWorkflowService) CreateDownloadTask(c *gin.Context) ([]*TaskResponse, error) {
dep := dependency.FromContext(c)
user := inventory.UserFromContext(c)
hasher := dep.HashIDEncoder()
m := manager.NewFileManager(dep, user)
defer m.Recycle()
if !user.Edges.Group.Permissions.Enabled(int(types.GroupPermissionRemoteDownload)) {
return nil, serializer.NewError(serializer.CodeGroupNotAllowed, "Group not allowed to download files", nil)
}
// Src must be set
if service.SrcFile == "" && len(service.Src) == 0 {
return nil, serializer.NewError(serializer.CodeParamErr, "No source files", nil)
}
// Only one of src and src_file can be set
if service.SrcFile != "" && len(service.Src) > 0 {
return nil, serializer.NewError(serializer.CodeParamErr, "Invalid source files", nil)
}
dst, err := fs.NewUriFromString(service.Dst)
if err != nil {
return nil, serializer.NewError(serializer.CodeParamErr, "Invalid destination", err)
}
// Validate dst
_, err = m.Get(c, dst, dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityCreateFile))
if err != nil {
return nil, serializer.NewError(serializer.CodeParamErr, "Invalid destination", err)
}
// 检查批量任务数量
limit := user.Edges.Group.Settings.Aria2BatchSize
if limit > 0 && len(service.Src) > limit {
return nil, serializer.NewError(serializer.CodeBatchAria2Size, "", nil)
}
// Validate src file
if service.SrcFile != "" {
src, err := fs.NewUriFromString(service.SrcFile)
if err != nil {
return nil, serializer.NewError(serializer.CodeParamErr, "Invalid source file uri", err)
}
_, err = m.Get(c, src, dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityDownloadFile))
if err != nil {
return nil, serializer.NewError(serializer.CodeParamErr, "Invalid source file", err)
}
}
// batch creating tasks
ae := serializer.NewAggregateError()
tasks := make([]queue.Task, 0, len(service.Src))
for _, src := range service.Src {
if src == "" {
continue
}
t, err := workflows.NewRemoteDownloadTask(c, src, service.SrcFile, service.Dst)
if err != nil {
ae.Add(src, err)
continue
}
if err := dep.RemoteDownloadQueue(c).QueueTask(c, t); err != nil {
ae.Add(src, err)
}
tasks = append(tasks, t)
}
if service.SrcFile != "" {
t, err := workflows.NewRemoteDownloadTask(c, "", service.SrcFile, service.Dst)
if err != nil {
ae.Add(service.SrcFile, err)
}
if err := dep.RemoteDownloadQueue(c).QueueTask(c, t); err != nil {
ae.Add(service.SrcFile, err)
}
tasks = append(tasks, t)
}
return lo.Map(tasks, func(item queue.Task, index int) *TaskResponse {
return BuildTaskResponse(item, nil, hasher)
}), ae.Aggregate()
}
type (
ArchiveWorkflowService struct {
Src []string `json:"src" binding:"required"`
Dst string `json:"dst" binding:"required"`
Encoding string `json:"encoding"`
}
CreateArchiveParamCtx struct{}
)
func (service *ArchiveWorkflowService) CreateExtractTask(c *gin.Context) (*TaskResponse, error) {
dep := dependency.FromContext(c)
user := inventory.UserFromContext(c)
hasher := dep.HashIDEncoder()
m := manager.NewFileManager(dep, user)
defer m.Recycle()
if !user.Edges.Group.Permissions.Enabled(int(types.GroupPermissionArchiveTask)) {
return nil, serializer.NewError(serializer.CodeGroupNotAllowed, "Group not allowed to compress files", nil)
}
dst, err := fs.NewUriFromString(service.Dst)
if err != nil {
return nil, serializer.NewError(serializer.CodeParamErr, "Invalid destination", err)
}
if len(service.Src) == 0 {
return nil, serializer.NewError(serializer.CodeParamErr, "No source files", nil)
}
// Validate destination
if _, err := m.Get(c, dst, dbfs.WithRequiredCapabilities(dbfs.NavigatorCapabilityCreateFile)); err != nil {
return nil, serializer.NewError(serializer.CodeParamErr, "Invalid destination", err)
}
// Create task
t, err := workflows.NewExtractArchiveTask(c, service.Src[0], service.Dst, service.Encoding)
if err != nil {
return nil, serializer.NewError(serializer.CodeCreateTaskError, "Failed to create task", err)
}
if err := dep.IoIntenseQueue(c).QueueTask(c, t); err != nil {
return nil, serializer.NewError(serializer.CodeCreateTaskError, "Failed to queue task", err)
}
return BuildTaskResponse(t, nil, hasher), nil
}
// CreateCompressTask Create task to create an archive file
func (service *ArchiveWorkflowService) CreateCompressTask(c *gin.Context) (*TaskResponse, error) {
dep := dependency.FromContext(c)
user := inventory.UserFromContext(c)
hasher := dep.HashIDEncoder()
m := manager.NewFileManager(dep, user)
defer m.Recycle()
if !user.Edges.Group.Permissions.Enabled(int(types.GroupPermissionArchiveTask)) {
return nil, serializer.NewError(serializer.CodeGroupNotAllowed, "Group not allowed to compress files", nil)
}
dst, err := fs.NewUriFromString(service.Dst)
if err != nil {
return nil, serializer.NewError(serializer.CodeParamErr, "Invalid destination", err)
}
// Create a placeholder file then delete it to validate the destination
session, err := m.PrepareUpload(c, &fs.UploadRequest{
Props: &fs.UploadProps{
Uri: dst,
Size: 0,
UploadSessionID: uuid.Must(uuid.NewV4()).String(),
ExpireAt: time.Now().Add(time.Second * 3600),
},
})
if err != nil {
return nil, err
}
m.OnUploadFailed(c, session)
// Create task
t, err := workflows.NewCreateArchiveTask(c, service.Src, service.Dst)
if err != nil {
return nil, serializer.NewError(serializer.CodeCreateTaskError, "Failed to create task", err)
}
if err := dep.IoIntenseQueue(c).QueueTask(c, t); err != nil {
return nil, serializer.NewError(serializer.CodeCreateTaskError, "Failed to queue task", err)
}
return BuildTaskResponse(t, nil, hasher), nil
}
type (
ListTaskService struct {
PageSize int `form:"page_size" binding:"required,min=10,max=100"`
Category string `form:"category" binding:"required,eq=general|eq=downloading|eq=downloaded"`
NextPageToken string `form:"next_page_token"`
}
ListTaskParamCtx struct{}
)
func (service *ListTaskService) ListTasks(c *gin.Context) (*TaskListResponse, error) {
dep := dependency.FromContext(c)
user := inventory.UserFromContext(c)
hasher := dep.HashIDEncoder()
taskClient := dep.TaskClient()
args := &inventory.ListTaskArgs{
PaginationArgs: &inventory.PaginationArgs{
UseCursorPagination: true,
PageToken: service.NextPageToken,
PageSize: service.PageSize,
},
Types: []string{queue.CreateArchiveTaskType, queue.ExtractArchiveTaskType, queue.RelocateTaskType},
UserID: user.ID,
}
if service.Category != "general" {
args.Types = []string{queue.RemoteDownloadTaskType}
if service.Category == "downloading" {
args.PageSize = intsets.MaxInt
args.Status = []task.Status{task.StatusSuspending, task.StatusProcessing, task.StatusQueued}
} else if service.Category == "downloaded" {
args.Status = []task.Status{task.StatusCanceled, task.StatusError, task.StatusCompleted}
}
}
// Get tasks
res, err := taskClient.List(c, args)
if err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to query tasks", err)
}
tasks := make([]queue.Task, 0, len(res.Tasks))
nodeMap := make(map[int]*ent.Node)
for _, t := range res.Tasks {
task, err := queue.NewTaskFromModel(t)
if err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to parse task", err)
}
summary := task.Summarize(hasher)
if summary != nil && summary.NodeID > 0 {
if _, ok := nodeMap[summary.NodeID]; !ok {
nodeMap[summary.NodeID] = nil
}
}
tasks = append(tasks, task)
}
// Get nodes
nodes, err := dep.NodeClient().ListActiveNodes(c, lo.Keys(nodeMap))
if err != nil {
return nil, serializer.NewError(serializer.CodeDBError, "Failed to query nodes", err)
}
for _, n := range nodes {
nodeMap[n.ID] = n
}
// Build response
return BuildTaskListResponse(tasks, res, nodeMap, hasher), nil
}
func TaskPhaseProgress(c *gin.Context, taskID int) (queue.Progresses, error) {
dep := dependency.FromContext(c)
u := inventory.UserFromContext(c)
r := dep.TaskRegistry()
t, found := r.Get(taskID)
if !found || t.Owner().ID != u.ID {
return queue.Progresses{}, nil
}
return t.Progress(c), nil
}
func CancelDownloadTask(c *gin.Context, taskID int) error {
dep := dependency.FromContext(c)
u := inventory.UserFromContext(c)
r := dep.TaskRegistry()
t, found := r.Get(taskID)
if !found || t.Owner().ID != u.ID {
return serializer.NewError(serializer.CodeNotFound, "Task not found", nil)
}
if downloadTask, ok := t.(*workflows.RemoteDownloadTask); ok {
if err := downloadTask.CancelDownload(c); err != nil {
return serializer.NewError(serializer.CodeInternalSetting, "Failed to cancel download task", err)
}
}
return nil
}
type (
SetDownloadFilesService struct {
Files []*downloader.SetFileToDownloadArgs `json:"files" binding:"required"`
}
SetDownloadFilesParamCtx struct{}
)
func (service *SetDownloadFilesService) SetDownloadFiles(c *gin.Context, taskID int) error {
dep := dependency.FromContext(c)
u := inventory.UserFromContext(c)
r := dep.TaskRegistry()
t, found := r.Get(taskID)
if !found || t.Owner().ID != u.ID {
return serializer.NewError(serializer.CodeNotFound, "Task not found", nil)
}
status := t.Status()
summary := t.Summarize(dep.HashIDEncoder())
// Task must be in processing state
if status != task.StatusSuspending && status != task.StatusProcessing {
return serializer.NewError(serializer.CodeNotFound, "Task not in processing state", nil)
}
// Task must in monitoring loop
if summary.Phase != workflows.RemoteDownloadTaskPhaseMonitor {
return serializer.NewError(serializer.CodeNotFound, "Task not in monitoring loop", nil)
}
if downloadTask, ok := t.(*workflows.RemoteDownloadTask); ok {
if err := downloadTask.SetDownloadTarget(c, service.Files...); err != nil {
return serializer.NewError(serializer.CodeInternalSetting, "Failed to set download files", err)
}
}
return nil
}