diff --git a/drivers/alias/driver.go b/drivers/alias/driver.go index 07999e581..672bd00b1 100644 --- a/drivers/alias/driver.go +++ b/drivers/alias/driver.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "math/rand" "net/url" stdpath "path" "strings" @@ -16,6 +17,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/sign" "github.com/OpenListTeam/OpenList/v4/internal/stream" + "github.com/OpenListTeam/OpenList/v4/pkg/http_range" "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/server/common" ) @@ -23,10 +25,9 @@ import ( type Alias struct { model.Storage Addition - rootOrder []string - pathMap map[string][]string - autoFlatten bool - oneKey string + rootOrder []string + pathMap map[string][]string + root model.Obj } func (d *Alias) Config() driver.Config { @@ -38,9 +39,6 @@ func (d *Alias) GetAddition() driver.Additional { } func (d *Alias) Init(ctx context.Context) error { - if d.Paths == "" { - return errors.New("paths is required") - } paths := strings.Split(d.Paths, "\n") d.rootOrder = make([]string, 0, len(paths)) d.pathMap = make(map[string][]string) @@ -50,19 +48,50 @@ func (d *Alias) Init(ctx context.Context) error { continue } k, v := getPair(path) - if _, ok := d.pathMap[k]; !ok { + temp, ok := d.pathMap[k] + if !ok { d.rootOrder = append(d.rootOrder, k) } - d.pathMap[k] = append(d.pathMap[k], v) + d.pathMap[k] = append(temp, v) } - if len(d.pathMap) == 1 { - for k := range d.pathMap { - d.oneKey = k + + switch len(d.rootOrder) { + case 0: + return errors.New("paths is required") + case 1: + paths := d.pathMap[d.rootOrder[0]] + roots := make(BalancedObjs, 0, len(paths)) + roots = append(roots, &model.Object{ + Name: "root", + Path: paths[0], + IsFolder: true, + Modified: d.Modified, + Mask: model.Locked, + }) + for _, path := range paths[1:] { + roots = append(roots, &model.Object{ + Path: path, + }) + } + d.root = roots + default: + d.root = &model.Object{ + Name: "root", + Path: "/", + IsFolder: true, + Modified: d.Modified, + Mask: model.ReadOnly, } - d.autoFlatten = true - } else { - d.oneKey = "" - d.autoFlatten = false + } + + if !utils.SliceContains(ValidReadConflictPolicy, d.ReadConflictPolicy) { + d.ReadConflictPolicy = FirstRWP + } + if !utils.SliceContains(ValidWriteConflictPolicy, d.WriteConflictPolicy) { + d.WriteConflictPolicy = DisabledWP + } + if !utils.SliceContains(ValidPutConflictPolicy, d.PutConflictPolicy) { + d.PutConflictPolicy = DisabledWP } return nil } @@ -70,305 +99,292 @@ func (d *Alias) Init(ctx context.Context) error { func (d *Alias) Drop(ctx context.Context) error { d.rootOrder = nil d.pathMap = nil + d.root = nil return nil } -func (Addition) GetRootPath() string { - return "/" +func (d *Alias) GetRoot(ctx context.Context) (model.Obj, error) { + if d.root == nil { + return nil, errs.StorageNotInit + } + return d.root, nil } +// 通过op.Get调用的话,path一定是子路径(/开头) func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) { - root, sub := d.getRootAndPath(path) - dsts, ok := d.pathMap[root] - if !ok { + roots, sub := d.getRootsAndPath(path) + if len(roots) == 0 { return nil, errs.ObjectNotFound } - var ret *model.Object - provider := "" - var mask model.ObjMask - for _, dst := range dsts { - rawPath := stdpath.Join(dst, sub) + for idx, root := range roots { + rawPath := stdpath.Join(root, sub) obj, err := fs.Get(ctx, rawPath, &fs.GetArgs{NoLog: true}) if err != nil { continue } - storage, err := fs.GetStorage(rawPath, &fs.GetStoragesArgs{}) - if ret == nil { - mask = model.GetObjMask(obj) - mask &^= model.Temp - ret = &model.Object{ - Path: path, - Name: obj.GetName(), - Size: obj.GetSize(), - Modified: obj.ModTime(), - IsFolder: obj.IsDir(), - HashInfo: obj.GetHash(), - } - if !d.ProviderPassThrough || err != nil { - break + mask := model.GetObjMask(obj) &^ model.Temp + if sub == "" { + // 根目录 + mask |= model.Locked | model.Virtual + } + ret := model.Object{ + Path: rawPath, + Name: obj.GetName(), + Size: obj.GetSize(), + Modified: obj.ModTime(), + IsFolder: obj.IsDir(), + HashInfo: obj.GetHash(), + Mask: mask, + } + obj = &ret + if d.ProviderPassThrough && !obj.IsDir() { + if storage, err := fs.GetStorage(rawPath, &fs.GetStoragesArgs{}); err == nil { + obj = &model.ObjectProvider{ + Object: ret, + Provider: model.Provider{ + Provider: storage.Config().Name, + }, + } } - provider = storage.Config().Name - } else if err != nil || provider != storage.GetStorage().Driver { - provider = "" - break } + + roots = roots[idx+1:] + var objs BalancedObjs + if idx > 0 { + objs = make(BalancedObjs, 0, len(roots)+2) + } else { + objs = make(BalancedObjs, 0, len(roots)+1) + } + objs = append(objs, obj) + if idx > 0 { + objs = append(objs, nil) + } + for _, d := range roots { + objs = append(objs, &tempObj{model.Object{ + Path: stdpath.Join(d, sub), + }}) + } + return objs, nil } - if ret == nil { - return nil, errs.ObjectNotFound - } - if provider != "" { - return model.ObjAddMask(&model.ObjectProvider{ - Object: *ret, - Provider: model.Provider{ - Provider: provider, - }, - }, mask), nil - } - return model.ObjAddMask(ret, mask), nil + return nil, errs.ObjectNotFound } func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { - path := dir.GetPath() - if utils.PathEqual(path, "/") && !d.autoFlatten { - return d.listRoot(ctx, args.WithStorageDetails && d.DetailsPassThrough, args.Refresh), nil - } - root, sub := d.getRootAndPath(path) - dsts, ok := d.pathMap[root] + dirs, ok := dir.(BalancedObjs) if !ok { - return nil, errs.ObjectNotFound + return d.listRoot(ctx, args.WithStorageDetails && d.DetailsPassThrough, args.Refresh), nil } - var objs []model.Obj - for _, dst := range dsts { - tmp, err := fs.List(ctx, stdpath.Join(dst, sub), &fs.ListArgs{ + + // 因为alias是NoCache且Get方法不会返回NotSupport或NotImplement错误 + // 所以这里对象不会传回到alias,也就不需要返回BalancedObjs了 + objMap := make(map[string]model.Obj) + for _, dir := range dirs { + if dir == nil { + continue + } + dirPath := dir.GetPath() + tmp, err := fs.List(ctx, dirPath, &fs.ListArgs{ NoLog: true, Refresh: args.Refresh, WithStorageDetails: args.WithStorageDetails && d.DetailsPassThrough, }) - if err == nil { - tmp, err = utils.SliceConvert(tmp, func(obj model.Obj) (model.Obj, error) { - objRes := model.Object{ - Name: obj.GetName(), - Path: stdpath.Join(path, obj.GetName()), - Size: obj.GetSize(), - Modified: obj.ModTime(), - IsFolder: obj.IsDir(), - } - mask := model.GetObjMask(obj) - mask &^= model.Temp - if thumb, ok := model.GetThumb(obj); ok { - return model.ObjAddMask(&model.ObjThumb{ - Object: objRes, - Thumbnail: model.Thumbnail{ - Thumbnail: thumb, - }, - }, mask), nil + if err != nil { + continue + } + for _, obj := range tmp { + name := obj.GetName() + if _, exists := objMap[name]; exists { + continue + } + mask := model.GetObjMask(obj) &^ model.Temp + objRes := model.Object{ + Name: name, + Path: stdpath.Join(dirPath, name), + Size: obj.GetSize(), + Modified: obj.ModTime(), + IsFolder: obj.IsDir(), + Mask: mask, + } + var objRet model.Obj + if thumb, ok := model.GetThumb(obj); ok { + objRet = &model.ObjThumb{ + Object: objRes, + Thumbnail: model.Thumbnail{ + Thumbnail: thumb, + }, } - if details, ok := model.GetStorageDetails(obj); ok { - return model.ObjAddMask(&model.ObjStorageDetails{ - Obj: &objRes, - StorageDetailsWithName: *details, - }, mask), nil + } else { + objRet = &objRes + } + if details, ok := model.GetStorageDetails(obj); ok { + objRet = &model.ObjStorageDetails{ + Obj: objRet, + StorageDetailsWithName: *details, } - return model.ObjAddMask(&objRes, mask), nil - }) - } - if err == nil { - objs = append(objs, tmp...) + } + objMap[name] = objRet } } + objs := make([]model.Obj, 0, len(objMap)) + for _, obj := range objMap { + objs = append(objs, obj) + } return objs, nil } func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { - root, sub := d.getRootAndPath(file.GetPath()) - dsts, ok := d.pathMap[root] - if !ok { - return nil, errs.ObjectNotFound - } - // proxy || ftp,s3 - if common.GetApiUrl(ctx) == "" { - args.Redirect = false - } - for _, dst := range dsts { - reqPath := stdpath.Join(dst, sub) - link, fi, err := d.link(ctx, reqPath, args) + if d.ReadConflictPolicy == AllRWP && !args.Redirect { + files, err := d.getAllObjs(ctx, file, getWriteAndPutFilterFunc(AllRWP)) if err != nil { - continue + return nil, err } - if link == nil { - // 重定向且需要通过代理 - return &model.Link{ - URL: fmt.Sprintf("%s/p%s?sign=%s", - common.GetApiUrl(ctx), - utils.EncodePath(reqPath, true), - sign.Sign(reqPath)), - }, nil + linkClosers := make([]io.Closer, 0, len(files)) + rrf := make([]model.RangeReaderIF, 0, len(files)) + for _, f := range files { + link, fi, err := d.link(ctx, f.GetPath(), args) + if err != nil { + continue + } + if fi.GetSize() != files.GetSize() { + _ = link.Close() + continue + } + l := *link // 复制一份,避免修改到原始link + if l.ContentLength == 0 { + l.ContentLength = fi.GetSize() + } + if d.DownloadConcurrency > 0 { + l.Concurrency = d.DownloadConcurrency + } + if d.DownloadPartSize > 0 { + l.PartSize = d.DownloadPartSize * utils.KB + } + rr, err := stream.GetRangeReaderFromLink(l.ContentLength, &l) + if err != nil { + _ = link.Close() + continue + } + linkClosers = append(linkClosers, link) + rrf = append(rrf, rr) } - - resultLink := *link - resultLink.SyncClosers = utils.NewSyncClosers(link) - if args.Redirect { - return &resultLink, nil + rr := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { + return rrf[rand.Intn(len(rrf))].RangeRead(ctx, httpRange) } + return &model.Link{ + RangeReader: stream.RangeReaderFunc(rr), + SyncClosers: utils.NewSyncClosers(linkClosers...), + }, nil + } - if resultLink.ContentLength == 0 { - resultLink.ContentLength = fi.GetSize() - } - if d.DownloadConcurrency > 0 { - resultLink.Concurrency = d.DownloadConcurrency - } - if d.DownloadPartSize > 0 { - resultLink.PartSize = d.DownloadPartSize * utils.KB - } + reqPath := d.getBalancedPath(ctx, file) + link, fi, err := d.link(ctx, reqPath, args) + if err != nil { + return nil, err + } + if link == nil { + // 重定向且需要通过代理 + return &model.Link{ + URL: fmt.Sprintf("%s/p%s?sign=%s", + common.GetApiUrl(ctx), + utils.EncodePath(reqPath, true), + sign.Sign(reqPath)), + }, nil + } + resultLink := *link // 复制一份,避免修改到原始link + resultLink.SyncClosers = utils.NewSyncClosers(link) + if args.Redirect { return &resultLink, nil } - return nil, errs.ObjectNotFound + if resultLink.ContentLength == 0 { + resultLink.ContentLength = fi.GetSize() + } + if d.DownloadConcurrency > 0 { + resultLink.Concurrency = d.DownloadConcurrency + } + if d.DownloadPartSize > 0 { + resultLink.PartSize = d.DownloadPartSize * utils.KB + } + return &resultLink, nil } func (d *Alias) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { - root, sub := d.getRootAndPath(args.Obj.GetPath()) - dsts, ok := d.pathMap[root] - if !ok { - return nil, errs.ObjectNotFound - } - for _, dst := range dsts { - rawPath := stdpath.Join(dst, sub) - storage, actualPath, err := op.GetStorageAndActualPath(rawPath) - if err != nil { - continue - } - return op.Other(ctx, storage, model.FsOtherArgs{ - Path: actualPath, - Method: args.Method, - Data: args.Data, - }) + // Other 不应负载均衡,这是因为前端是否调用 /fs/other 的判断条件是返回的 provider 的值 + // 而 ProviderPassThrough 开启时,返回的 provider 固定为第一个 obj 的后端驱动 + storage, actualPath, err := op.GetStorageAndActualPath(args.Obj.GetPath()) + if err != nil { + return nil, err } - return nil, errs.NotImplement + return op.Other(ctx, storage, model.FsOtherArgs{ + Path: actualPath, + Method: args.Method, + Data: args.Data, + }) } func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { - if !d.Writable { - return errs.PermissionDenied - } - reqPath, err := d.getReqPath(ctx, parentDir, true) + objs, err := d.getWriteObjs(ctx, parentDir) if err == nil { - for _, path := range reqPath { - err = errors.Join(err, fs.MakeDir(ctx, stdpath.Join(*path, dirName))) + for _, obj := range objs { + err = errors.Join(err, fs.MakeDir(ctx, stdpath.Join(obj.GetPath(), dirName))) } - return err - } - if errs.IsNotImplementError(err) { - return errors.New("same-name dirs cannot make sub-dir") } return err } func (d *Alias) Move(ctx context.Context, srcObj, dstDir model.Obj) error { - if !d.Writable { - return errs.PermissionDenied - } - srcPath, err := d.getReqPath(ctx, srcObj, false) - if errs.IsNotImplementError(err) { - return errors.New("same-name files cannot be moved") - } - if err != nil { - return err - } - dstPath, err := d.getReqPath(ctx, dstDir, true) - if errs.IsNotImplementError(err) { - return errors.New("same-name dirs cannot be moved to") - } - if err != nil { - return err - } - if len(srcPath) == len(dstPath) { - for i := range srcPath { - _, e := fs.Move(ctx, *srcPath[i], *dstPath[i]) + srcs, dsts, err := d.getMoveObjs(ctx, srcObj, dstDir) + if err == nil { + for i, dst := range dsts { + src := srcs[i] + _, e := fs.Move(ctx, src.GetPath(), dst.GetPath()) + err = errors.Join(err, e) + } + srcs = srcs[len(dsts):] + for _, src := range srcs { + e := fs.Remove(ctx, src.GetPath()) err = errors.Join(err, e) } - return err - } else { - return errors.New("parallel paths mismatch") } + return err } func (d *Alias) Rename(ctx context.Context, srcObj model.Obj, newName string) error { - if !d.Writable { - return errs.PermissionDenied - } - reqPath, err := d.getReqPath(ctx, srcObj, false) + objs, err := d.getWriteObjs(ctx, srcObj) if err == nil { - for _, path := range reqPath { - err = errors.Join(err, fs.Rename(ctx, *path, newName)) + for _, obj := range objs { + err = errors.Join(err, fs.Rename(ctx, obj.GetPath(), newName)) } - return err - } - if errs.IsNotImplementError(err) { - return errors.New("same-name files cannot be Rename") } return err } func (d *Alias) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { - if !d.Writable { - return errs.PermissionDenied - } - srcPath, err := d.getReqPath(ctx, srcObj, false) - if errs.IsNotImplementError(err) { - return errors.New("same-name files cannot be copied") - } - if err != nil { - return err - } - dstPath, err := d.getReqPath(ctx, dstDir, true) - if errs.IsNotImplementError(err) { - return errors.New("same-name dirs cannot be copied to") - } - if err != nil { - return err - } - if len(srcPath) == len(dstPath) { - for i := range srcPath { - _, e := fs.Copy(ctx, *srcPath[i], *dstPath[i]) - err = errors.Join(err, e) - } - return err - } else if len(srcPath) == 1 || !d.ProtectSameName { - for _, path := range dstPath { - _, e := fs.Copy(ctx, *srcPath[0], *path) + srcs, dsts, err := d.getCopyObjs(ctx, srcObj, dstDir) + if err == nil { + for i, src := range srcs { + dst := dsts[i] + _, e := fs.Copy(ctx, src.GetPath(), dst.GetPath()) err = errors.Join(err, e) } - return err - } else { - return errors.New("parallel paths mismatch") } + return err } func (d *Alias) Remove(ctx context.Context, obj model.Obj) error { - if !d.Writable { - return errs.PermissionDenied - } - reqPath, err := d.getReqPath(ctx, obj, false) + objs, err := d.getWriteObjs(ctx, obj) if err == nil { - for _, path := range reqPath { - err = errors.Join(err, fs.Remove(ctx, *path)) + for _, obj := range objs { + err = errors.Join(err, fs.Remove(ctx, obj.GetPath())) } - return err - } - if errs.IsNotImplementError(err) { - return errors.New("same-name files cannot be Delete") } return err } func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error { - if !d.Writable { - return errs.PermissionDenied - } - reqPath, err := d.getReqPath(ctx, dstDir, true) + objs, err := d.getPutObjs(ctx, dstDir) if err == nil { - if len(reqPath) == 1 { - storage, reqActualPath, err := op.GetStorageAndActualPath(*reqPath[0]) + if len(objs) == 1 { + storage, reqActualPath, err := op.GetStorageAndActualPath(objs.GetPath()) if err != nil { return err } @@ -382,10 +398,10 @@ func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, if err != nil { return err } - count := float64(len(reqPath) + 1) + count := float64(len(objs) + 1) up(100 / count) - for i, path := range reqPath { - err = errors.Join(err, fs.PutDirectly(ctx, *path, &stream.FileStream{ + for i, obj := range objs { + err = errors.Join(err, fs.PutDirectly(ctx, obj.GetPath(), &stream.FileStream{ Obj: s, Mimetype: s.GetMimetype(), Reader: file, @@ -399,55 +415,40 @@ func (d *Alias) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, return err } } - if errs.IsNotImplementError(err) { - return errors.New("same-name dirs cannot be Put") - } return err } func (d *Alias) PutURL(ctx context.Context, dstDir model.Obj, name, url string) error { - if !d.Writable { - return errs.PermissionDenied - } - reqPath, err := d.getReqPath(ctx, dstDir, true) + objs, err := d.getPutObjs(ctx, dstDir) if err == nil { - for _, path := range reqPath { - err = errors.Join(err, fs.PutURL(ctx, *path, name, url)) + for _, obj := range objs { + err = errors.Join(err, fs.PutURL(ctx, obj.GetPath(), name, url)) } return err } - if errs.IsNotImplementError(err) { - return errors.New("same-name files cannot offline download") - } return err } func (d *Alias) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) { - root, sub := d.getRootAndPath(obj.GetPath()) - dsts, ok := d.pathMap[root] - if !ok { - return nil, errs.ObjectNotFound + reqPath := d.getBalancedPath(ctx, obj) + if reqPath == "" { + return nil, errs.NotFile } - for _, dst := range dsts { - meta, err := d.getArchiveMeta(ctx, dst, sub, args) - if err == nil { - return meta, nil - } + meta, err := d.getArchiveMeta(ctx, reqPath, args) + if err == nil { + return meta, nil } return nil, errs.NotImplement } func (d *Alias) ListArchive(ctx context.Context, obj model.Obj, args model.ArchiveInnerArgs) ([]model.Obj, error) { - root, sub := d.getRootAndPath(obj.GetPath()) - dsts, ok := d.pathMap[root] - if !ok { - return nil, errs.ObjectNotFound + reqPath := d.getBalancedPath(ctx, obj) + if reqPath == "" { + return nil, errs.NotFile } - for _, dst := range dsts { - l, err := d.listArchive(ctx, dst, sub, args) - if err == nil { - return l, nil - } + l, err := d.listArchive(ctx, reqPath, args) + if err == nil { + return l, nil } return nil, errs.NotImplement } @@ -456,77 +457,48 @@ func (d *Alias) Extract(ctx context.Context, obj model.Obj, args model.ArchiveIn // alias的两个驱动,一个支持驱动提取,一个不支持,如何兼容? // 如果访问的是不支持驱动提取的驱动内的压缩文件,GetArchiveMeta就会返回errs.NotImplement,提取URL前缀就会是/ae,Extract就不会被调用 // 如果访问的是支持驱动提取的驱动内的压缩文件,GetArchiveMeta就会返回有效值,提取URL前缀就会是/ad,Extract就会被调用 - root, sub := d.getRootAndPath(obj.GetPath()) - dsts, ok := d.pathMap[root] - if !ok { - return nil, errs.ObjectNotFound + reqPath := d.getBalancedPath(ctx, obj) + if reqPath == "" { + return nil, errs.NotFile } - for _, dst := range dsts { - reqPath := stdpath.Join(dst, sub) - link, err := d.extract(ctx, reqPath, args) - if err != nil { - continue - } - if link == nil { - return &model.Link{ - URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s", - common.GetApiUrl(ctx), - utils.EncodePath(reqPath, true), - utils.EncodePath(args.InnerPath, true), - url.QueryEscape(args.Password), - sign.SignArchive(reqPath)), - }, nil - } - resultLink := *link - resultLink.SyncClosers = utils.NewSyncClosers(link) - return &resultLink, nil - } - return nil, errs.NotImplement + link, err := d.extract(ctx, reqPath, args) + if err != nil { + return nil, errs.NotImplement + } + if link == nil { + return &model.Link{ + URL: fmt.Sprintf("%s/ap%s?inner=%s&pass=%s&sign=%s", + common.GetApiUrl(ctx), + utils.EncodePath(reqPath, true), + utils.EncodePath(args.InnerPath, true), + url.QueryEscape(args.Password), + sign.SignArchive(reqPath)), + }, nil + } + resultLink := *link + resultLink.SyncClosers = utils.NewSyncClosers(link) + return &resultLink, nil } func (d *Alias) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) error { - if !d.Writable { - return errs.PermissionDenied - } - srcPath, err := d.getReqPath(ctx, srcObj, false) - if errs.IsNotImplementError(err) { - return errors.New("same-name files cannot be decompressed") - } - if err != nil { - return err - } - dstPath, err := d.getReqPath(ctx, dstDir, true) - if errs.IsNotImplementError(err) { - return errors.New("same-name dirs cannot be decompressed to") - } - if err != nil { - return err - } - if len(srcPath) == len(dstPath) { - for i := range srcPath { - _, e := fs.ArchiveDecompress(ctx, *srcPath[i], *dstPath[i], args) - err = errors.Join(err, e) - } - return err - } else if len(srcPath) == 1 || !d.ProtectSameName { - for _, path := range dstPath { - _, e := fs.ArchiveDecompress(ctx, *srcPath[0], *path, args) + srcs, dsts, err := d.getCopyObjs(ctx, srcObj, dstDir) + if err == nil { + for i, src := range srcs { + dst := dsts[i] + _, e := fs.ArchiveDecompress(ctx, src.GetPath(), dst.GetPath(), args) err = errors.Join(err, e) } - return err - } else { - return errors.New("parallel paths mismatch") } + return err } func (d *Alias) ResolveLinkCacheMode(path string) driver.LinkCacheMode { - root, sub := d.getRootAndPath(path) - dsts, ok := d.pathMap[root] - if !ok { + roots, sub := d.getRootsAndPath(path) + if len(roots) == 0 { return 0 } - for _, dst := range dsts { - storage, actualPath, err := op.GetStorageAndActualPath(stdpath.Join(dst, sub)) + for _, root := range roots { + storage, actualPath, err := op.GetStorageAndActualPath(stdpath.Join(root, sub)) if err != nil { continue } diff --git a/drivers/alias/meta.go b/drivers/alias/meta.go index 763e66472..72eb3c877 100644 --- a/drivers/alias/meta.go +++ b/drivers/alias/meta.go @@ -6,17 +6,15 @@ import ( ) type Addition struct { - // Usually one of two - // driver.RootPath - // define other - Paths string `json:"paths" required:"true" type:"text"` - ProtectSameName bool `json:"protect_same_name" default:"true" required:"false" help:"Protects same-name files from Delete or Rename"` - ParallelWrite bool `json:"parallel_write" type:"bool" default:"false"` - DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"` - DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"` - Writable bool `json:"writable" type:"bool" default:"false"` - ProviderPassThrough bool `json:"provider_pass_through" type:"bool" default:"false"` - DetailsPassThrough bool `json:"details_pass_through" type:"bool" default:"false"` + Paths string `json:"paths" required:"true" type:"text"` + ReadConflictPolicy string `json:"read_conflict_policy" type:"select" options:"first,random,all" default:"first"` + WriteConflictPolicy string `json:"write_conflict_policy" type:"select" options:"disabled,first,deterministic,deterministic_or_all,all,all_strict" default:"disabled" help:"How the driver handles identical backend paths when renaming, removing, or making directories."` + PutConflictPolicy string `json:"put_conflict_policy" type:"select" options:"disabled,first,deterministic,deterministic_or_all,all,all_strict,random,quota,quota_strict" default:"disabled" help:"How the driver handles identical backend paths when uploading, copying, moving, or decompressing."` + FileConsistencyCheck bool `json:"file_consistency_check" type:"bool" default:"false"` + DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"` + DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"` + ProviderPassThrough bool `json:"provider_pass_through" type:"bool" default:"false"` + DetailsPassThrough bool `json:"details_pass_through" type:"bool" default:"false"` } var config = driver.Config{ @@ -31,10 +29,6 @@ var config = driver.Config{ func init() { op.RegisterDriver(func() driver.Driver { - return &Alias{ - Addition: Addition{ - ProtectSameName: true, - }, - } + return &Alias{} }) } diff --git a/drivers/alias/types.go b/drivers/alias/types.go index e560393da..9fade0c5b 100644 --- a/drivers/alias/types.go +++ b/drivers/alias/types.go @@ -1 +1,78 @@ package alias + +import ( + "time" + + "github.com/OpenListTeam/OpenList/v4/internal/model" + "github.com/OpenListTeam/OpenList/v4/pkg/utils" + "github.com/pkg/errors" +) + +const ( + DisabledWP = "disabled" + FirstRWP = "first" + DeterministicWP = "deterministic" + DeterministicOrAllWP = "deterministic_or_all" + AllRWP = "all" + AllStrictWP = "all_strict" + RandomBalancedRP = "random" + BalancedByQuotaP = "quota" + BalancedByQuotaStrictP = "quota_strict" +) + +var ( + ValidReadConflictPolicy = []string{FirstRWP, RandomBalancedRP, AllRWP} + ValidWriteConflictPolicy = []string{DisabledWP, FirstRWP, DeterministicWP, DeterministicOrAllWP, AllRWP, + AllStrictWP} + ValidPutConflictPolicy = []string{DisabledWP, FirstRWP, DeterministicWP, DeterministicOrAllWP, AllRWP, + AllStrictWP, RandomBalancedRP, BalancedByQuotaP, BalancedByQuotaStrictP} +) + +var ( + ErrPathConflict = errors.New("path conflict") + ErrSamePathLeak = errors.New("leak some of same-name dirs") + ErrNoEnoughSpace = errors.New("none of same-name dirs has enough space") + ErrNotEnoughSrcObjs = errors.New("cannot move fewer objs to more paths, please try copying") +) + +type BalancedObjs []model.Obj + +func (b BalancedObjs) GetSize() int64 { + return b[0].GetSize() +} + +func (b BalancedObjs) ModTime() time.Time { + return b[0].ModTime() +} + +func (b BalancedObjs) CreateTime() time.Time { + return b[0].CreateTime() +} + +func (b BalancedObjs) IsDir() bool { + return b[0].IsDir() +} + +func (b BalancedObjs) GetHash() utils.HashInfo { + return b[0].GetHash() +} + +func (b BalancedObjs) GetName() string { + return b[0].GetName() +} + +func (b BalancedObjs) GetPath() string { + return b[0].GetPath() +} + +func (b BalancedObjs) GetID() string { + return b[0].GetID() +} + +func (b BalancedObjs) Unwrap() model.Obj { + return b[0] +} + +var _ model.Obj = (BalancedObjs)(nil) + +type tempObj struct{ model.Object } diff --git a/drivers/alias/util.go b/drivers/alias/util.go index bc66b0d3f..23b10e994 100644 --- a/drivers/alias/util.go +++ b/drivers/alias/util.go @@ -2,7 +2,7 @@ package alias import ( "context" - "errors" + "math/rand" stdpath "path" "strings" "time" @@ -13,6 +13,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/server/common" + "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) @@ -26,14 +27,15 @@ func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model detailsChan := make(chan detailWithIndex, len(d.pathMap)) workerCount := 0 for _, k := range d.rootOrder { - obj := model.Object{ + obj := &model.Object{ Name: k, Path: "/" + k, IsFolder: true, Modified: d.Modified, + Mask: model.Locked | model.Virtual, } idx := len(objs) - objs = append(objs, model.ObjAddMask(&obj, model.Virtual)) + objs = append(objs, obj) v := d.pathMap[k] if !withDetails || len(v) != 1 { continue @@ -42,6 +44,7 @@ func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model if err != nil { continue } + obj.Modified = remoteDriver.GetStorage().Modified _, ok := remoteDriver.(driver.WithDetails) if !ok { continue @@ -78,26 +81,22 @@ func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model // do others that not defined in Driver interface func getPair(path string) (string, string) { - // path = strings.TrimSpace(path) - if strings.Contains(path, ":") { - pair := strings.SplitN(path, ":", 2) - if !strings.Contains(pair[0], "/") { - return pair[0], pair[1] - } + if name, path, ok := strings.Cut(path, ":"); ok && !strings.Contains(name, "/") { + return name, path } return stdpath.Base(path), path } -func (d *Alias) getRootAndPath(path string) (string, string) { - if d.autoFlatten { - return d.oneKey, path +func (d *Alias) getRootsAndPath(path string) (roots []string, sub string) { + if len(d.rootOrder) == 1 { + return d.pathMap[d.rootOrder[0]], path } path = strings.TrimPrefix(path, "/") - parts := strings.SplitN(path, "/", 2) - if len(parts) == 1 { - return parts[0], "" + before, after, ok := strings.Cut(path, "/") + if !ok { + return d.pathMap[path], "" } - return parts[0], parts[1] + return d.pathMap[before], after } func (d *Alias) link(ctx context.Context, reqPath string, args model.LinkArgs) (*model.Link, model.Obj, error) { @@ -105,59 +104,350 @@ func (d *Alias) link(ctx context.Context, reqPath string, args model.LinkArgs) ( if err != nil { return nil, nil, err } - if !args.Redirect { - return op.Link(ctx, storage, reqActualPath, args) + if args.Redirect && common.ShouldProxy(storage, stdpath.Base(reqPath)) { + return nil, nil, nil } - obj, err := fs.Get(ctx, reqPath, &fs.GetArgs{NoLog: true}) - if err != nil { - return nil, nil, err + return op.Link(ctx, storage, reqActualPath, args) +} + +func isConsistent(a, b model.Obj) bool { + if a.GetSize() != b.GetSize() { + return false } - if common.ShouldProxy(storage, stdpath.Base(reqPath)) { - return nil, obj, nil + for ht, v := range a.GetHash().All() { + ah := b.GetHash().GetHash(ht) + if ah != "" && ah != v { + return false + } } - return op.Link(ctx, storage, reqActualPath, args) + return true } -func (d *Alias) getReqPath(ctx context.Context, obj model.Obj, isParent bool) ([]*string, error) { - root, sub := d.getRootAndPath(obj.GetPath()) - if sub == "" && !isParent { - return nil, errs.NotSupport +func (d *Alias) getAllObjs(ctx context.Context, bObj model.Obj, ifContinue func(err error) (bool, error)) (BalancedObjs, error) { + objs := bObj.(BalancedObjs) + length := 0 + for _, o := range objs { + var err error + var obj model.Obj + temp, isTemp := o.(*tempObj) + if isTemp { + obj, err = fs.Get(ctx, o.GetPath(), &fs.GetArgs{NoLog: true}) + if err == nil { + if !bObj.IsDir() { + if obj.IsDir() { + err = errs.NotFile + } else if d.FileConsistencyCheck && !isConsistent(bObj, obj) { + err = errs.ObjectNotFound + } + } else if !obj.IsDir() { + err = errs.NotFolder + } + } + } else if o == nil { + err = errs.ObjectNotFound + } + + cont, err := ifContinue(err) + if err != nil { + if cont { + continue + } + return nil, err + } + if isTemp { + objRes := temp.Object + // objRes.Name = obj.GetName() + // objRes.Size = obj.GetSize() + // objRes.Modified = obj.ModTime() + // objRes.HashInfo = obj.GetHash() + objs[length] = &objRes + } else { + objs[length] = o + } + length++ + if !cont { + break + } } - dsts, ok := d.pathMap[root] - all := true - if !ok { + if length == 0 { return nil, errs.ObjectNotFound } - var reqPath []*string - for _, dst := range dsts { - path := stdpath.Join(dst, sub) - _, err := fs.Get(ctx, path, &fs.GetArgs{NoLog: true}) + return objs[:length], nil +} + +func (d *Alias) getBalancedPath(ctx context.Context, file model.Obj) string { + if d.ReadConflictPolicy == FirstRWP { + return file.GetPath() + } + files := file.(BalancedObjs) + if rand.Intn(len(files)) == 0 { + return file.GetPath() + } + files, _ = d.getAllObjs(ctx, file, getWriteAndPutFilterFunc(AllRWP)) + return files[rand.Intn(len(files))].GetPath() +} + +func getWriteAndPutFilterFunc(policy string) func(error) (bool, error) { + if policy == AllRWP { + return func(err error) (bool, error) { + return true, err + } + } + all := true + l := 0 + return func(err error) (bool, error) { if err != nil { + switch policy { + case AllStrictWP: + return false, ErrSamePathLeak + case DeterministicOrAllWP: + if l >= 2 { + return false, ErrSamePathLeak + } + } all = false - if d.ProtectSameName && d.ParallelWrite && len(reqPath) >= 2 { - return nil, errs.NotImplement + } else { + switch policy { + case FirstRWP: + return false, nil + case DeterministicWP: + if l > 0 { + return false, ErrPathConflict + } + case DeterministicOrAllWP: + if l > 0 && !all { + return false, ErrSamePathLeak + } } + l += 1 + } + return true, err + } +} + +func (d *Alias) getWriteObjs(ctx context.Context, obj model.Obj) (BalancedObjs, error) { + if d.WriteConflictPolicy == DisabledWP { + return nil, errs.PermissionDenied + } + return d.getAllObjs(ctx, obj, getWriteAndPutFilterFunc(d.WriteConflictPolicy)) +} + +func (d *Alias) getPutObjs(ctx context.Context, obj model.Obj) (BalancedObjs, error) { + if d.PutConflictPolicy == DisabledWP { + return nil, errs.PermissionDenied + } + objs, err := d.getAllObjs(ctx, obj, getWriteAndPutFilterFunc(d.PutConflictPolicy)) + if err != nil { + return nil, err + } + strict := false + switch d.PutConflictPolicy { + case RandomBalancedRP: + ri := rand.Intn(len(objs)) + return objs[ri : ri+1], nil + case BalancedByQuotaStrictP: + strict = true + fallthrough + case BalancedByQuotaP: + objs, ok := getRandomObjByQuotaBalanced(ctx, objs, strict, uint64(obj.GetSize())) + if !ok { + return nil, ErrNoEnoughSpace + } + return objs, nil + default: + return objs, nil + } +} + +func getRandomObjByQuotaBalanced(ctx context.Context, reqPath BalancedObjs, strict bool, objSize uint64) (BalancedObjs, bool) { + // Get all space + details := make([]*model.StorageDetails, len(reqPath)) + detailsChan := make(chan detailWithIndex, len(reqPath)) + workerCount := 0 + for i, p := range reqPath { + s, err := fs.GetStorage(p.GetPath(), &fs.GetStoragesArgs{}) + if err != nil { continue } - if !d.ProtectSameName && !d.ParallelWrite { - return []*string{&path}, nil + if _, ok := s.(driver.WithDetails); !ok { + continue } - reqPath = append(reqPath, &path) - if d.ProtectSameName && !d.ParallelWrite && len(reqPath) >= 2 { - return nil, errs.NotImplement + workerCount++ + go func(dri driver.Driver, i int) { + d, e := op.GetStorageDetails(ctx, dri) + if e != nil { + if !errors.Is(e, errs.NotImplement) && !errors.Is(e, errs.StorageNotInit) { + log.Errorf("failed get %s storage details: %+v", dri.GetStorage().MountPath, e) + } + } + detailsChan <- detailWithIndex{idx: i, val: d} + }(s, i) + } + for workerCount > 0 { + select { + case r := <-detailsChan: + details[r.idx] = r.val + workerCount-- + case <-time.After(time.Second): + workerCount = 0 } - if d.ProtectSameName && d.ParallelWrite && len(reqPath) >= 2 && !all { - return nil, errs.NotImplement + } + + // Try select one that has space info + selected, ok := selectRandom(details, func(d *model.StorageDetails) uint64 { + if d == nil || d.FreeSpace < objSize { + return 0 + } + return d.FreeSpace + }) + if !ok { + if strict { + return nil, false + } else { + // No strict mode, return any of non-details ones + noDetails := make([]int, 0, len(details)) + for i, d := range details { + if d == nil { + noDetails = append(noDetails, i) + } + } + if len(noDetails) == 0 { + return nil, false + } + selected = noDetails[rand.Intn(len(noDetails))] } } - if len(reqPath) == 0 { - return nil, errs.ObjectNotFound + return reqPath[selected : selected+1], true +} + +func selectRandom[Item any](arr []Item, getWeight func(Item) uint64) (int, bool) { + var totalWeight uint64 = 0 + for _, i := range arr { + totalWeight += getWeight(i) + } + if totalWeight == 0 { + return 0, false + } + r := rand.Uint64() % totalWeight + for i, item := range arr { + w := getWeight(item) + if r < w { + return i, true + } + r -= w + } + return 0, false +} + +func (d *Alias) getCopyObjs(ctx context.Context, srcObj, dstDir model.Obj) (BalancedObjs, BalancedObjs, error) { + if d.PutConflictPolicy == DisabledWP { + return nil, nil, errs.PermissionDenied + } + dstObjs, err := d.getAllObjs(ctx, dstDir, getWriteAndPutFilterFunc(d.PutConflictPolicy)) + if err != nil { + return nil, nil, err + } + dstStorageMap := make(map[string][]model.Obj) + allocatingDst := make(map[model.Obj]struct{}) + for _, o := range dstObjs { + storage, e := fs.GetStorage(o.GetPath(), &fs.GetStoragesArgs{}) + if e != nil { + return nil, nil, errors.WithMessagef(e, "cannot copy to virtual path [%s]", o.GetPath()) + } + mp := storage.GetStorage().MountPath + dstStorageMap[mp] = append(dstStorageMap[mp], o) + allocatingDst[o] = struct{}{} + } + tmpSrcObjs, err := d.getAllObjs(ctx, srcObj, getWriteAndPutFilterFunc(AllRWP)) + if err != nil { + return nil, nil, err + } + srcObjs := make(BalancedObjs, 0, len(dstObjs)) + for _, src := range tmpSrcObjs { + storage, e := fs.GetStorage(src.GetPath(), &fs.GetStoragesArgs{}) + if e != nil { + continue + } + mp := storage.GetStorage().MountPath + if tmp, ok := dstStorageMap[mp]; ok { + for _, dst := range tmp { + dstObjs[len(srcObjs)] = dst + srcObjs = append(srcObjs, src) + delete(allocatingDst, dst) + } + delete(dstStorageMap, mp) + } + } + dstObjs = dstObjs[:len(srcObjs)] + for dst := range allocatingDst { + src := tmpSrcObjs[0] + if d.ReadConflictPolicy == RandomBalancedRP || d.ReadConflictPolicy == AllRWP { + src = tmpSrcObjs[rand.Intn(len(tmpSrcObjs))] + } + srcObjs = append(srcObjs, src) + dstObjs = append(dstObjs, dst) + } + return srcObjs, dstObjs, nil +} + +func (d *Alias) getMoveObjs(ctx context.Context, srcObj, dstDir model.Obj) (BalancedObjs, BalancedObjs, error) { + if d.PutConflictPolicy == DisabledWP { + return nil, nil, errs.PermissionDenied + } + dstObjs, err := d.getAllObjs(ctx, dstDir, getWriteAndPutFilterFunc(d.PutConflictPolicy)) + if err != nil { + return nil, nil, err + } + tmpSrcObjs, err := d.getAllObjs(ctx, srcObj, getWriteAndPutFilterFunc(AllRWP)) + if err != nil { + return nil, nil, err + } + if len(tmpSrcObjs) < len(dstObjs) { + return nil, nil, ErrNotEnoughSrcObjs + } + dstStorageMap := make(map[string][]model.Obj) + allocatingDst := make(map[model.Obj]struct{}) + for _, o := range dstObjs { + storage, e := fs.GetStorage(o.GetPath(), &fs.GetStoragesArgs{}) + if e != nil { + return nil, nil, errors.WithMessagef(e, "cannot move to virtual path [%s]", o.GetPath()) + } + mp := storage.GetStorage().MountPath + dstStorageMap[mp] = append(dstStorageMap[mp], o) + allocatingDst[o] = struct{}{} + } + srcObjs := make(BalancedObjs, 0, len(tmpSrcObjs)) + restSrcObjs := make(BalancedObjs, 0, len(tmpSrcObjs)-len(dstObjs)) + for _, src := range tmpSrcObjs { + storage, e := fs.GetStorage(src.GetPath(), &fs.GetStoragesArgs{}) + if e != nil { + continue + } + mp := storage.GetStorage().MountPath + if tmp, ok := dstStorageMap[mp]; ok { + dst := tmp[0] + if len(tmp) == 1 { + delete(dstStorageMap, mp) + } else { + dstStorageMap[mp] = tmp[1:] + } + dstObjs[len(srcObjs)] = dst + srcObjs = append(srcObjs, src) + delete(allocatingDst, dst) + } else { + restSrcObjs = append(restSrcObjs, src) + } + } + dstObjs = dstObjs[:len(srcObjs)] + // len(restSrcObjs) >= len(allocatingDst) + srcObjs = append(srcObjs, restSrcObjs...) + for dst := range allocatingDst { + dstObjs = append(dstObjs, dst) } - return reqPath, nil + return srcObjs, dstObjs, nil } -func (d *Alias) getArchiveMeta(ctx context.Context, dst, sub string, args model.ArchiveArgs) (model.ArchiveMeta, error) { - reqPath := stdpath.Join(dst, sub) +func (d *Alias) getArchiveMeta(ctx context.Context, reqPath string, args model.ArchiveArgs) (model.ArchiveMeta, error) { storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath) if err != nil { return nil, err @@ -171,8 +461,7 @@ func (d *Alias) getArchiveMeta(ctx context.Context, dst, sub string, args model. return nil, errs.NotImplement } -func (d *Alias) listArchive(ctx context.Context, dst, sub string, args model.ArchiveInnerArgs) ([]model.Obj, error) { - reqPath := stdpath.Join(dst, sub) +func (d *Alias) listArchive(ctx context.Context, reqPath string, args model.ArchiveInnerArgs) ([]model.Obj, error) { storage, reqActualPath, err := op.GetStorageAndActualPath(reqPath) if err != nil { return nil, err diff --git a/drivers/chunk/driver.go b/drivers/chunk/driver.go index 60c5790fe..b544391dd 100644 --- a/drivers/chunk/driver.go +++ b/drivers/chunk/driver.go @@ -270,17 +270,13 @@ func (d *Chunk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( // 检查0号块不等于-1 以支持空文件 // 如果块数量大于1 最后一块不可能为0 // 只检查中间块是否有0 - for i, l := 0, len(chunkFile.chunkSizes)-2; ; i++ { - if i == 0 { - if chunkFile.chunkSizes[i] == -1 { - return nil, fmt.Errorf("chunk part[%d] are missing", i) - } - } else if chunkFile.chunkSizes[i] == 0 { + if chunkFile.chunkSizes[0] == -1 { + return nil, fmt.Errorf("chunk part[%d] are missing", 0) + } + for i, l := 1, len(chunkFile.chunkSizes)-1; i < l; i++ { + if chunkFile.chunkSizes[i] == 0 { return nil, fmt.Errorf("chunk part[%d] are missing", i) } - if i >= l { - break - } } fileSize := chunkFile.GetSize() mergedRrf := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { diff --git a/drivers/crypt/driver.go b/drivers/crypt/driver.go index 47ac0c672..9e8b5c5c7 100644 --- a/drivers/crypt/driver.go +++ b/drivers/crypt/driver.go @@ -136,7 +136,6 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([ if !d.ShowHidden && strings.HasPrefix(name, ".") { continue } - mask &^= model.Temp objRes := &model.Object{ Path: stdpath.Join(remoteFullPath, obj.GetName()), Name: name, @@ -144,10 +143,11 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([ Modified: obj.ModTime(), IsFolder: obj.IsDir(), Ctime: obj.CreateTime(), + Mask: mask &^ model.Temp, // discarding hash as it's encrypted } if !d.Thumbnail || !strings.HasPrefix(args.ReqPath, "/") { - result = append(result, model.ObjAddMask(objRes, mask)) + result = append(result, objRes) continue } thumbPath := stdpath.Join(args.ReqPath, ".thumbnails", name+".webp") @@ -155,12 +155,12 @@ func (d *Crypt) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([ common.GetApiUrl(ctx), utils.EncodePath(thumbPath, true), sign.Sign(thumbPath)) - result = append(result, model.ObjAddMask(&model.ObjThumb{ + result = append(result, &model.ObjThumb{ Object: *objRes, Thumbnail: model.Thumbnail{ Thumbnail: thumb, }, - }, mask)) + }) } return result, nil @@ -196,8 +196,7 @@ func (d *Crypt) Get(ctx context.Context, path string) (model.Obj, error) { size := remoteObj.GetSize() name := remoteObj.GetName() - mask := model.GetObjMask(remoteObj) - mask &^= model.Temp + mask := model.GetObjMask(remoteObj) &^ model.Temp if mask&model.Virtual == 0 { if !remoteObj.IsDir() { decryptedSize, err := d.cipher.DecryptedSize(size) @@ -221,15 +220,15 @@ func (d *Crypt) Get(ctx context.Context, path string) (model.Obj, error) { } } } - obj := &model.Object{ + return &model.Object{ Path: remoteFullPath, Name: name, Size: size, Modified: remoteObj.ModTime(), IsFolder: remoteObj.IsDir(), Ctime: remoteObj.CreateTime(), - } - return model.ObjAddMask(obj, mask), nil + Mask: mask, + }, nil } // https://github.com/rclone/rclone/blob/v1.67.0/backend/crypt/cipher.go#L37 diff --git a/drivers/strm/hook.go b/drivers/strm/hook.go index 8c69a35b0..8e6d1c43a 100644 --- a/drivers/strm/hook.go +++ b/drivers/strm/hook.go @@ -24,7 +24,7 @@ var strmTrie = patricia.NewTrie() func UpdateLocalStrm(ctx context.Context, path string, objs []model.Obj) { path = utils.FixAndCleanPath(path) updateLocal := func(driver *Strm, basePath string, objs []model.Obj) { - relParent := strings.TrimPrefix(basePath, driver.MountPath) + relParent := strings.TrimPrefix(basePath, utils.GetActualMountPath(driver.MountPath)) localParentPath := stdpath.Join(driver.SaveStrmLocalPath, relParent) for _, obj := range objs { localPath := stdpath.Join(localParentPath, obj.GetName()) diff --git a/internal/bootstrap/patch.go b/internal/bootstrap/patch.go index 1e76190c8..e8baeeac9 100644 --- a/internal/bootstrap/patch.go +++ b/internal/bootstrap/patch.go @@ -2,7 +2,6 @@ package bootstrap import ( "fmt" - "strings" "github.com/OpenListTeam/OpenList/v4/internal/bootstrap/patch" diff --git a/internal/bootstrap/patch/all.go b/internal/bootstrap/patch/all.go index e85dd55cc..d5b94cb4b 100644 --- a/internal/bootstrap/patch/all.go +++ b/internal/bootstrap/patch/all.go @@ -4,6 +4,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/bootstrap/patch/v3_24_0" "github.com/OpenListTeam/OpenList/v4/internal/bootstrap/patch/v3_32_0" "github.com/OpenListTeam/OpenList/v4/internal/bootstrap/patch/v3_41_0" + "github.com/OpenListTeam/OpenList/v4/internal/bootstrap/patch/v4_1_8" ) type VersionPatches struct { @@ -32,4 +33,10 @@ var UpgradePatches = []VersionPatches{ v3_41_0.GrantAdminPermissions, }, }, + { + Version: "v4.1.8", + Patches: []func(){ + v4_1_8.FixAliasConfig, + }, + }, } diff --git a/internal/bootstrap/patch/v4_1_8/alias.go b/internal/bootstrap/patch/v4_1_8/alias.go new file mode 100644 index 000000000..e41742654 --- /dev/null +++ b/internal/bootstrap/patch/v4_1_8/alias.go @@ -0,0 +1,81 @@ +package v4_1_8 + +import ( + "github.com/OpenListTeam/OpenList/v4/internal/db" + "github.com/OpenListTeam/OpenList/v4/pkg/utils" +) + +// FixAliasConfig upgrade the old version of the Addition of the Alias driver +func FixAliasConfig() { + storages, _, err := db.GetStorages(1, -1) + if err != nil { + utils.Log.Errorf("[FixAliasConfig] failed to get storages: %s", err.Error()) + return + } + for _, s := range storages { + if s.Driver != "Alias" { + continue + } + addition := make(map[string]any) + err = utils.Json.UnmarshalFromString(s.Addition, &addition) + if err != nil { + utils.Log.Errorf("[FixAliasConfig] failed to unmarshal addition of [%d]%s: %s", s.ID, s.MountPath, err.Error()) + continue + } + if _, ok := addition["read_conflict_policy"]; ok { + utils.Log.Infof("[FixAliasConfig] skip fixing [%d]%s because the addition already has \"read_conflict_policy\" key", s.ID, s.MountPath) + continue + } + var protectSameName, parallelWrite, writable bool + protectSameNameAny, ok := addition["protect_same_name"] + if ok { + delete(addition, "protect_same_name") + protectSameName, ok = protectSameNameAny.(bool) + } + if !ok { + protectSameName = false + } + parallelWriteAny, ok := addition["parallel_write"] + if ok { + delete(addition, "parallel_write") + parallelWrite, ok = parallelWriteAny.(bool) + } + if !ok { + parallelWrite = false + } + writableAny, ok := addition["writable"] + if ok { + delete(addition, "writable") + writable, ok = writableAny.(bool) + } + if !ok { + writable = false + } + if !writable { + addition["write_conflict_policy"] = "disabled" + addition["put_conflict_policy"] = "disabled" + } else if !protectSameName && !parallelWrite { + addition["write_conflict_policy"] = "first" + addition["put_conflict_policy"] = "first" + } else if protectSameName && !parallelWrite { + addition["write_conflict_policy"] = "deterministic" + addition["put_conflict_policy"] = "deterministic" + } else if !protectSameName && parallelWrite { + addition["write_conflict_policy"] = "all" + addition["put_conflict_policy"] = "all" + } else { + addition["write_conflict_policy"] = "deterministic_or_all" + addition["put_conflict_policy"] = "deterministic_or_all" + } + addition["read_conflict_policy"] = "first" + s.Addition, err = utils.Json.MarshalToString(addition) + if err != nil { + utils.Log.Errorf("[FixAliasConfig] failed to marshal addition of [%d]%s: %s", s.ID, s.MountPath, err.Error()) + continue + } + err = db.UpdateStorage(&s) + if err != nil { + utils.Log.Errorf("[FixAliasConfig] failed to update storage [%d]%s: %s", s.ID, s.MountPath, err.Error()) + } + } +} diff --git a/internal/fs/get.go b/internal/fs/get.go index 459282ce3..8a920065e 100644 --- a/internal/fs/get.go +++ b/internal/fs/get.go @@ -3,7 +3,6 @@ package fs import ( "context" stdpath "path" - "time" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/op" @@ -15,9 +14,10 @@ func get(ctx context.Context, path string, args *GetArgs) (model.Obj, error) { path = utils.FixAndCleanPath(path) // maybe a virtual file if path != "/" { - virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, stdpath.Dir(path), !args.WithStorageDetails, false) + dir, name := stdpath.Split(path) + virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, dir, !args.WithStorageDetails, false, name) for _, f := range virtualFiles { - if f.GetName() == stdpath.Base(path) { + if f.GetName() == name { return f, nil } } @@ -28,9 +28,8 @@ func get(ctx context.Context, path string, args *GetArgs) (model.Obj, error) { if path == "/" { return &model.Object{ Name: "root", - Size: 0, - Modified: time.Time{}, IsFolder: true, + Mask: model.ReadOnly | model.Virtual, }, nil } return nil, errors.WithMessage(err, "failed get storage") diff --git a/internal/fs/list.go b/internal/fs/list.go index fc3b25ab5..1f92c7d46 100644 --- a/internal/fs/list.go +++ b/internal/fs/list.go @@ -15,7 +15,7 @@ import ( func list(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) { meta, _ := ctx.Value(conf.MetaKey).(*model.Meta) user, _ := ctx.Value(conf.UserKey).(*model.User) - virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, path, !args.WithStorageDetails, args.Refresh) + virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, path, !args.WithStorageDetails, args.Refresh, "") storage, actualPath, err := op.GetStorageAndActualPath(path) if err != nil && len(virtualFiles) == 0 { return nil, errors.WithMessage(err, "failed get storage") diff --git a/internal/model/obj.go b/internal/model/obj.go index 74c140d9f..ba7467f74 100644 --- a/internal/model/obj.go +++ b/internal/model/obj.go @@ -238,51 +238,49 @@ func (om *ObjMerge) Reset() { type ObjMask uint8 +func (m ObjMask) GetObjMask() ObjMask { + return m +} + const ( Virtual ObjMask = 1 << iota + NoRename + NoRemove + NoMove + NoCopy + NoWrite Temp ) +const ( + Locked = NoRename | NoRemove | NoMove + ReadOnly = Locked | NoWrite // NoRename | NoDelete | NoMove | NoWrite +) -type maskObj struct { +type ObjWrapMask struct { Obj - mask ObjMask + Mask ObjMask } -func (m *maskObj) Unwrap() Obj { +func (m *ObjWrapMask) Unwrap() Obj { return m.Obj } +func (m *ObjWrapMask) GetObjMask() ObjMask { + return m.Mask +} -func getMaskObj(obj Obj) *maskObj { +func GetObjMask(obj Obj) ObjMask { for { switch o := obj.(type) { - case *maskObj: - return o + case interface{ GetObjMask() ObjMask }: + return o.GetObjMask() case ObjUnwrap: obj = o.Unwrap() default: - return nil + return 0 } } } + func ObjHasMask(obj Obj, mask ObjMask) bool { - if m := getMaskObj(obj); m != nil { - return m.mask&mask == mask - } - return false -} -func ObjAddMask(obj Obj, mask ObjMask) Obj { - if mask == 0 { - return obj - } - if m := getMaskObj(obj); m != nil { - m.mask |= mask - return obj - } - return &maskObj{Obj: obj, mask: mask} -} -func GetObjMask(obj Obj) ObjMask { - if m := getMaskObj(obj); m != nil { - return m.mask - } - return 0 + return GetObjMask(obj)&mask != 0 } diff --git a/internal/model/object.go b/internal/model/object.go index 8e5cdf047..b6cb0d7f7 100644 --- a/internal/model/object.go +++ b/internal/model/object.go @@ -28,6 +28,7 @@ type Object struct { Ctime time.Time // file create time IsFolder bool HashInfo utils.HashInfo + Mask ObjMask } func (o *Object) GetName() string { @@ -68,6 +69,10 @@ func (o *Object) GetHash() utils.HashInfo { return o.HashInfo } +func (o *Object) GetObjMask() ObjMask { + return o.Mask +} + type Thumbnail struct { Thumbnail string } diff --git a/internal/op/cache.go b/internal/op/cache.go index 75b3c60c0..d8d32a74b 100644 --- a/internal/op/cache.go +++ b/internal/op/cache.go @@ -8,6 +8,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/cache" "github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/model" + "github.com/OpenListTeam/OpenList/v4/pkg/utils" ) type CacheManager struct { @@ -32,28 +33,7 @@ func NewCacheManager() *CacheManager { var Cache = NewCacheManager() func Key(storage driver.Driver, path string) string { - return stdpath.Join(storage.GetStorage().MountPath, path) -} - -// update object in dirCache. -// if it's a directory, remove all its children from dirCache too. -// if it's a file, remove its link from linkCache. -func (cm *CacheManager) updateDirectoryObject(storage driver.Driver, dirPath string, oldObj model.Obj, newObj model.Obj) { - key := Key(storage, dirPath) - if !oldObj.IsDir() { - cm.linkCache.DeleteKey(stdpath.Join(key, oldObj.GetName())) - cm.linkCache.DeleteKey(stdpath.Join(key, newObj.GetName())) - } - if storage.Config().NoCache { - return - } - - if cache, exist := cm.dirCache.Get(key); exist { - if oldObj.IsDir() { - cm.deleteDirectoryTree(stdpath.Join(key, oldObj.GetName())) - } - cache.UpdateObject(oldObj.GetName(), newObj) - } + return utils.GetFullPath(storage.GetStorage().MountPath, path) } // recursively delete directory and its children from dirCache @@ -153,15 +133,15 @@ func (cm *CacheManager) SetStorageDetails(storage driver.Driver, details *model. return } expiration := time.Minute * time.Duration(storage.GetStorage().CacheExpiration) - cm.detailCache.SetWithTTL(storage.GetStorage().MountPath, details, expiration) + cm.detailCache.SetWithTTL(utils.GetActualMountPath(storage.GetStorage().MountPath), details, expiration) } func (cm *CacheManager) GetStorageDetails(storage driver.Driver) (*model.StorageDetails, bool) { - return cm.detailCache.Get(storage.GetStorage().MountPath) + return cm.detailCache.Get(utils.GetActualMountPath(storage.GetStorage().MountPath)) } func (cm *CacheManager) InvalidateStorageDetails(storage driver.Driver) { - cm.detailCache.Delete(storage.GetStorage().MountPath) + cm.detailCache.Delete(utils.GetActualMountPath(storage.GetStorage().MountPath)) } // clears all caches diff --git a/internal/op/fs.go b/internal/op/fs.go index ddfedccb8..5116bbef5 100644 --- a/internal/op/fs.go +++ b/internal/op/fs.go @@ -86,19 +86,19 @@ func list(ctx context.Context, storage driver.Driver, path string, args model.Li if len(customCachePolicies) > 0 { configPolicies := strings.Split(customCachePolicies, "\n") for _, configPolicy := range configPolicies { - policy := strings.Split(strings.TrimSpace(configPolicy), ":") - if len(policy) != 2 { + pattern, ttlstr, ok := strings.Cut(strings.TrimSpace(configPolicy), ":") + if !ok { log.Warnf("Malformed custom cache policy entry: %s in storage %s for path %s. Expected format: pattern:ttl", configPolicy, storage.GetStorage().MountPath, path) continue } - if match, err1 := doublestar.Match(policy[0], path); err1 != nil { - log.Warnf("Invalid glob pattern in custom cache policy: %s, error: %v", policy[0], err1) + if match, err1 := doublestar.Match(pattern, path); err1 != nil { + log.Warnf("Invalid glob pattern in custom cache policy: %s, error: %v", pattern, err1) continue } else if !match { continue } - if configTtl, err1 := strconv.ParseInt(policy[1], 10, 64); err1 == nil { + if configTtl, err1 := strconv.ParseInt(ttlstr, 10, 64); err1 == nil { ttl = int(configTtl) break } @@ -142,19 +142,21 @@ func Get(ctx context.Context, storage driver.Driver, path string, excludeTempObj } return rootObj, nil } - switch r := storage.GetAddition().(type) { + switch r := storage.(type) { case driver.IRootId: return &model.Object{ ID: r.GetRootId(), Name: RootName, Modified: storage.GetStorage().Modified, IsFolder: true, + Mask: model.Locked, }, nil case driver.IRootPath: return &model.Object{ Path: r.GetRootPath(), Name: RootName, Modified: storage.GetStorage().Modified, + Mask: model.Locked, IsFolder: true, }, nil } @@ -233,10 +235,10 @@ func Link(ctx context.Context, storage driver.Driver, path string, args model.Li mode = storage.(driver.LinkCacheModeResolver).ResolveLinkCacheMode(path) } typeKey := args.Type - if mode&driver.LinkCacheIP == driver.LinkCacheIP { + if mode&driver.LinkCacheIP != 0 { typeKey += "/" + args.IP } - if mode&driver.LinkCacheUA == driver.LinkCacheUA { + if mode&driver.LinkCacheUA != 0 { typeKey += "/" + args.Header.Get("User-Agent") } key := Key(storage, path) @@ -328,6 +330,9 @@ func MakeDir(ctx context.Context, storage driver.Driver, path string) error { if err != nil { return nil, errors.WithMessagef(err, "failed to get parent dir [%s]", parentPath) } + if model.ObjHasMask(parentDir, model.NoWrite) { + return nil, errors.WithStack(errs.PermissionDenied) + } var newObj model.Obj switch s := storage.(type) { @@ -347,12 +352,13 @@ func MakeDir(ctx context.Context, storage driver.Driver, path string) error { if dirCache, exist := Cache.dirCache.Get(Key(storage, parentPath)); exist { if newObj == nil { t := time.Now() - newObj = model.ObjAddMask(&model.Object{ + newObj = &model.Object{ Name: dirName, IsFolder: true, Modified: t, Ctime: t, - }, model.Temp) + Mask: model.Temp, + } } dirCache.UpdateObject("", wrapObjName(storage, newObj)) } @@ -378,11 +384,17 @@ func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string if err != nil { return errors.WithMessage(err, "failed to get src object") } + if model.ObjHasMask(srcRawObj, model.NoMove) { + return errors.WithStack(errs.PermissionDenied) + } srcObj := model.UnwrapObjName(srcRawObj) dstDir, err := GetUnwrap(ctx, storage, dstDirPath) if err != nil { return errors.WithMessage(err, "failed to get dst dir") } + if model.ObjHasMask(dstDir, model.NoWrite) { + return errors.WithStack(errs.PermissionDenied) + } var newObj model.Obj switch s := storage.(type) { @@ -412,9 +424,11 @@ func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string } if cache, exist := Cache.dirCache.Get(dstKey); exist { if newObj == nil { - newObj = model.ObjAddMask(srcObj, model.Temp) + newObj = &model.ObjWrapMask{Obj: srcRawObj, Mask: model.Temp} + } else { + newObj = wrapObjName(storage, newObj) } - cache.UpdateObject(srcRawObj.GetName(), wrapObjName(storage, newObj)) + cache.UpdateObject(srcRawObj.GetName(), newObj) } } @@ -441,6 +455,9 @@ func Rename(ctx context.Context, storage driver.Driver, srcPath, dstName string) if err != nil { return errors.WithMessage(err, "failed to get src object") } + if model.ObjHasMask(srcRawObj, model.NoRename) { + return errors.WithStack(errs.PermissionDenied) + } srcObj := model.UnwrapObjName(srcRawObj) var newObj model.Obj @@ -455,10 +472,24 @@ func Rename(ctx context.Context, storage driver.Driver, srcPath, dstName string) if err != nil { return errors.WithStack(err) } - if newObj == nil { - newObj = model.ObjAddMask(&model.ObjWrapName{Name: dstName, Obj: srcObj}, model.Temp) + + dirKey := Key(storage, stdpath.Dir(srcPath)) + if !srcRawObj.IsDir() { + Cache.linkCache.DeleteKey(stdpath.Join(dirKey, srcRawObj.GetName())) + Cache.linkCache.DeleteKey(stdpath.Join(dirKey, dstName)) + } + if !storage.Config().NoCache { + if cache, exist := Cache.dirCache.Get(dirKey); exist { + if srcRawObj.IsDir() { + Cache.deleteDirectoryTree(stdpath.Join(dirKey, srcRawObj.GetName())) + } + if newObj == nil { + newObj = &model.ObjWrapMask{Obj: &model.ObjWrapName{Name: dstName, Obj: srcObj}, Mask: model.Temp} + } + newObj = wrapObjName(storage, newObj) + cache.UpdateObject(srcRawObj.GetName(), newObj) + } } - Cache.updateDirectoryObject(storage, stdpath.Dir(srcPath), srcRawObj, wrapObjName(storage, newObj)) if ctx.Value(conf.SkipHookKey) != nil || !needHandleObjsUpdateHook() { return nil @@ -486,11 +517,17 @@ func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string if err != nil { return errors.WithMessage(err, "failed to get src object") } + // if model.ObjHasMask(srcRawObj, model.NoCopy) { + // return errors.WithStack(errs.PermissionDenied) + // } srcObj := model.UnwrapObjName(srcRawObj) dstDir, err := GetUnwrap(ctx, storage, dstDirPath) if err != nil { return errors.WithMessage(err, "failed to get dst dir") } + if model.ObjHasMask(dstDir, model.NoWrite) { + return errors.WithStack(errs.PermissionDenied) + } var newObj model.Obj switch s := storage.(type) { @@ -512,9 +549,11 @@ func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string if !storage.Config().NoCache { if cache, exist := Cache.dirCache.Get(dstKey); exist { if newObj == nil { - newObj = model.ObjAddMask(srcObj, model.Temp) + newObj = &model.ObjWrapMask{Obj: srcRawObj, Mask: model.Temp} + } else { + newObj = wrapObjName(storage, newObj) } - cache.UpdateObject(srcRawObj.GetName(), wrapObjName(storage, newObj)) + cache.UpdateObject(srcRawObj.GetName(), newObj) } } @@ -546,6 +585,9 @@ func Remove(ctx context.Context, storage driver.Driver, path string) error { } return errors.WithMessage(err, "failed to get object") } + if model.ObjHasMask(rawObj, model.NoRemove) { + return errors.WithStack(errs.PermissionDenied) + } dirPath := stdpath.Dir(path) switch s := storage.(type) { @@ -606,6 +648,9 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod if err != nil { return errors.WithMessagef(err, "failed to get dir [%s]", dstDirPath) } + if model.ObjHasMask(parentDir, model.NoWrite) { + return errors.WithStack(errs.PermissionDenied) + } // if up is nil, set a default to prevent panic if up == nil { up = func(p float64) {} @@ -631,12 +676,13 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod if !storage.Config().NoCache { if cache, exist := Cache.dirCache.Get(Key(storage, dstDirPath)); exist { if newObj == nil { - newObj = model.ObjAddMask(&model.Object{ + newObj = &model.Object{ Name: file.GetName(), Size: file.GetSize(), Modified: file.ModTime(), Ctime: file.CreateTime(), - }, model.Temp) + Mask: model.Temp, + } } newObj = wrapObjName(storage, newObj) cache.UpdateObject(newObj.GetName(), newObj) @@ -681,6 +727,9 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url if err != nil { return errors.WithMessagef(err, "failed to get dir [%s]", dstDirPath) } + if model.ObjHasMask(dstDir, model.NoWrite) { + return errors.WithStack(errs.PermissionDenied) + } var newObj model.Obj switch s := storage.(type) { case driver.PutURLResult: @@ -696,11 +745,12 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url if cache, exist := Cache.dirCache.Get(Key(storage, dstDirPath)); exist { if newObj == nil { t := time.Now() - newObj = model.ObjAddMask(&model.Object{ + newObj = &model.Object{ Name: dstName, Modified: t, Ctime: t, - }, model.Temp) + Mask: model.Temp, + } } newObj = wrapObjName(storage, newObj) cache.UpdateObject(newObj.GetName(), newObj) diff --git a/internal/op/storage.go b/internal/op/storage.go index 453fa11f6..20abfa49d 100644 --- a/internal/op/storage.go +++ b/internal/op/storage.go @@ -341,16 +341,17 @@ func getStoragesByPath(path string) []driver.Driver { // for example, there are: /a/b,/a/c,/a/d/e,/a/b.balance1,/av // GetStorageVirtualFilesByPath(/a) => b,c,d func GetStorageVirtualFilesByPath(prefix string) []model.Obj { - return getStorageVirtualFilesByPath(prefix, func(_ driver.Driver, obj model.Obj) model.Obj { - return obj - }) + return getStorageVirtualFilesByPath(prefix, nil, "") } -func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string, hideDetails, refresh bool) []model.Obj { +func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string, hideDetails, refresh bool, filterByName string) []model.Obj { if hideDetails { - return GetStorageVirtualFilesByPath(prefix) + return getStorageVirtualFilesByPath(prefix, nil, filterByName) } return getStorageVirtualFilesByPath(prefix, func(d driver.Driver, obj model.Obj) model.Obj { + if _, ok := obj.(*model.ObjStorageDetails); ok { + return obj + } ret := &model.ObjStorageDetails{ Obj: obj, StorageDetailsWithName: model.StorageDetailsWithName{ @@ -374,10 +375,10 @@ func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string, case <-time.After(time.Second): } return ret - }) + }, filterByName) } -func getStorageVirtualFilesByPath(prefix string, rootCallback func(driver.Driver, model.Obj) model.Obj) []model.Obj { +func getStorageVirtualFilesByPath(prefix string, rootCallback func(driver.Driver, model.Obj) model.Obj, filterByName string) []model.Obj { files := make([]model.Obj, 0) storages := storagesMap.Values() sort.Slice(storages, func(i, j int) bool { @@ -387,45 +388,60 @@ func getStorageVirtualFilesByPath(prefix string, rootCallback func(driver.Driver return storages[i].GetStorage().Order < storages[j].GetStorage().Order }) - prefix = utils.FixAndCleanPath(prefix) + if !strings.HasSuffix(prefix, "/") { + prefix += "/" + } set := make(map[string]int) var wg sync.WaitGroup for _, v := range storages { - mountPath := utils.GetActualMountPath(v.GetStorage().MountPath) // Exclude prefix itself and non prefix - if len(prefix) >= len(mountPath) || !utils.IsSubPath(prefix, mountPath) { + p, found := strings.CutPrefix(utils.GetActualMountPath(v.GetStorage().MountPath), prefix) + if !found || p == "" { + continue + } + name, _, found := strings.Cut(p, "/") + if filterByName != "" && name != filterByName { + continue + } + + if idx, ok := set[name]; ok { + if !found { + files[idx].(*model.Object).Mask = model.Locked | model.Virtual + if rootCallback != nil { + wg.Add(1) + go func() { + defer wg.Done() + files[idx] = rootCallback(v, files[idx]) + }() + } + } continue } - names := strings.SplitN(strings.TrimPrefix(mountPath[len(prefix):], "/"), "/", 2) - idx, ok := set[names[0]] - if !ok { - set[names[0]] = len(files) - obj := model.ObjAddMask(&model.Object{ - Name: names[0], - Size: 0, - Modified: v.GetStorage().Modified, - IsFolder: true, - }, model.Virtual) - if len(names) == 1 { - idx = len(files) - files = append(files, obj) + set[name] = len(files) + obj := &model.Object{ + Name: name, + Modified: v.GetStorage().Modified, + IsFolder: true, + } + if !found { + idx := len(files) + obj.Mask = model.Locked | model.Virtual + files = append(files, obj) + if rootCallback != nil { wg.Add(1) go func() { defer wg.Done() files[idx] = rootCallback(v, files[idx]) }() - } else { - files = append(files, obj) } - } else if len(names) == 1 { - wg.Add(1) - go func() { - defer wg.Done() - files[idx] = rootCallback(v, files[idx]) - }() + } else { + obj.Mask = model.ReadOnly | model.Virtual + files = append(files, obj) } } - wg.Wait() + if rootCallback != nil { + wg.Wait() + } return files }