From 63a46fe1709639a62769f6cb9429869069621d9e Mon Sep 17 00:00:00 2001 From: YangSen-qn Date: Mon, 18 Nov 2024 17:11:56 +0800 Subject: [PATCH 1/3] add some log --- iqshell/common/flow/flow.go | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/iqshell/common/flow/flow.go b/iqshell/common/flow/flow.go index 558e109a..aa78a88c 100644 --- a/iqshell/common/flow/flow.go +++ b/iqshell/common/flow/flow.go @@ -106,12 +106,16 @@ func (f *Flow) Start() { hasMore, workInfo, err := f.WorkProvider.Provide() log.DebugF("work producer get work, hasMore:%v, workInfo: %+v, err: %+v", hasMore, workInfo, err) if err != nil { + workInfoData := "" + if workInfo != nil { + workInfoData = workInfo.Data + } if err.Code == data.ErrorCodeParamMissing || err.Code == data.ErrorCodeLineHeader { - log.DebugF("work producer get work, skip:%s because:%s", workInfo, err) + log.DebugF("work producer get work, skip:%s because:%s", workInfoData, err) f.notifyWorkSkip(workInfo, nil, err) } else { - log.DebugF("work producer get work fail, error:%s info:%s", err, workInfo) + log.DebugF("work producer get work fail, error:%s info:%s", err, workInfoData) f.notifyWorkFail(workInfo, err) } continue @@ -119,7 +123,7 @@ func (f *Flow) Start() { if workInfo == nil || workInfo.Work == nil { if !hasMore { - log.Debug("work producer get work completed") + log.Info("work producer get work completed") break } else { log.Info("work producer get work fail: work in empty") @@ -197,15 +201,13 @@ func (f *Flow) Start() { } workCount := len(workList) - - log.DebugF("work consumer get works, count:%d", workCount) + log.InfoF("work consumer get works, count:%s", workCount) _ = f.limitAcquire(workCount) // workRecordList 有数据则长度和 workList 长度相同 workRecordList, workErr := worker.DoWork(workList) f.limitRelease(workCount) - - log.DebugF("work consumer handle works, count:%d error:%+v", workCount, workErr) + log.InfoF("work consumer handle works, count:%s error:%+v", workCount, workErr) if len(workRecordList) == 0 && workErr != nil { log.ErrorF("Do Worker Error:%+v", workErr) From f1658b380e0d6f85b1ce79f34b47caa2cf5d39a9 Mon Sep 17 00:00:00 2001 From: YangSen-qn Date: Tue, 17 Jun 2025 14:18:16 +0800 Subject: [PATCH 2/3] chore: optimize log --- iqshell/common/flow/flow.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/iqshell/common/flow/flow.go b/iqshell/common/flow/flow.go index aa78a88c..b4f17280 100644 --- a/iqshell/common/flow/flow.go +++ b/iqshell/common/flow/flow.go @@ -123,7 +123,7 @@ func (f *Flow) Start() { if workInfo == nil || workInfo.Work == nil { if !hasMore { - log.Info("work producer get work completed") + log.Debug("work producer get work completed") break } else { log.Info("work producer get work fail: work in empty") @@ -201,13 +201,13 @@ func (f *Flow) Start() { } workCount := len(workList) - log.InfoF("work consumer get works, count:%s", workCount) + log.DebugF("work consumer get works, count:%d", workCount) _ = f.limitAcquire(workCount) // workRecordList 有数据则长度和 workList 长度相同 workRecordList, workErr := worker.DoWork(workList) f.limitRelease(workCount) - log.InfoF("work consumer handle works, count:%s error:%+v", workCount, workErr) + log.DebugF("work consumer handle works, count:%d error:%+v", workCount, workErr) if len(workRecordList) == 0 && workErr != nil { log.ErrorF("Do Worker Error:%+v", workErr) From bbc72643a5a3984e6f6d30329dade6ce9b3ce00c Mon Sep 17 00:00:00 2001 From: YangSen-qn Date: Fri, 21 Nov 2025 13:53:42 +0800 Subject: [PATCH 3/3] download: cdn friendly & get support sign & optimize listbucket2 & support intelligent tiering --- cmd/asyncfetch.go | 2 +- cmd/bucket.go | 2 +- cmd/download.go | 7 + cmd/rs.go | 6 +- cmd/rsbatch.go | 1 + cmd/upload.go | 8 +- cmd_test/bucket_domain_test.go | 5 +- cmd_test/change_type_test.go | 57 ++++---- cmd_test/copy_test.go | 4 +- cmd_test/download_get_test.go | 9 +- cmd_test/download_test.go | 21 --- cmd_test/fetch_test.go | 3 +- docs/abfetch.md | 2 +- docs/batchchlifecycle.md | 1 + docs/batchchtype.md | 4 +- docs/chlifecycle.md | 1 + docs/chtype.md | 4 +- docs/fput.md | 2 +- docs/get.md | 2 +- docs/listbucket2.md | 2 +- docs/qdownload.md | 3 +- docs/qdownload2.md | 1 - docs/qupload.md | 2 +- docs/qupload2.md | 2 +- docs/rput.md | 4 +- docs/sync.md | 2 +- go.mod | 2 +- go.sum | 4 +- iqshell/common/flow/flow.go | 14 +- iqshell/common/flow/work_provider_reader.go | 8 +- iqshell/common/utils/file.go | 40 +++-- iqshell/storage/bucket/list.go | 2 +- iqshell/storage/bucket/operations/list.go | 2 +- iqshell/storage/object/download/downloader.go | 1 + .../object/download/operations/batch.go | 13 ++ .../object/download/operations/download.go | 18 ++- iqshell/storage/object/download/url.go | 11 ++ iqshell/storage/object/lifecycle.go | 18 ++- .../storage/object/operations/lifecycle.go | 40 ++--- iqshell/storage/object/operations/status.go | 7 +- iqshell/storage/object/status.go | 2 + iqshell/storage/object/upload/uploader.go | 3 +- iqshell/storage/servers/all_buckets.go | 138 +++++++++++++++--- .../storage/servers/operations/bucket_list.go | 43 +++--- 44 files changed, 339 insertions(+), 184 deletions(-) diff --git a/cmd/asyncfetch.go b/cmd/asyncfetch.go index 52205341..ab13df2d 100644 --- a/cmd/asyncfetch.go +++ b/cmd/asyncfetch.go @@ -33,7 +33,7 @@ func asyncFetchCmdBuilder(cfg *iqshell.Config) *cobra.Command { cmd.Flags().StringVarP(&info.CallbackBody, "callback-body", "b", "", "callback body") cmd.Flags().StringVarP(&info.CallbackHost, "callback-host", "T", "", "callback HOST") - cmd.Flags().IntVarP(&info.FileType, "file-type", "", 0, "storage type, 0:STANDARD storage, 1:IA storage, 2:ARCHIVE storage, 3:DEEP_ARCHIVE storage, 4:ARCHIVE_IR storage") + cmd.Flags().IntVarP(&info.FileType, "file-type", "", 0, "storage type, 0:STANDARD storage, 1:IA storage, 2:ARCHIVE storage, 3:DEEP_ARCHIVE storage, 4:ARCHIVE_IR storage, 5:INTELLIGENT_TIERING") cmd.Flags().IntVarP(&info.FileType, "storage-type", "g", 0, "storage type, same to --file-type") _ = cmd.Flags().MarkDeprecated("storage-type", "use --file-type instead") // 废弃 storage-type diff --git a/cmd/bucket.go b/cmd/bucket.go index 9d985895..a82bb681 100644 --- a/cmd/bucket.go +++ b/cmd/bucket.go @@ -137,7 +137,7 @@ var listBucketCmd2Builder = func(cfg *iqshell.Config) *cobra.Command { cmd.Flags().StringVarP(&info.StartDate, "start", "s", "", "start date with format yyyy-mm-dd-hh-MM-ss, all files will be listed according to the prefix and then filtered.") cmd.Flags().StringVarP(&info.EndDate, "end", "e", "", "end date with format yyyy-mm-dd-hh-MM-ss, all files will be listed according to the prefix and then filtered.") - cmd.Flags().StringVarP(&info.FileTypes, "file-types", "", "", "Specify storage type, separated by comma, all files will be listed according to the prefix and then filtered. 0:STANDARD storage, 1:IA storage, 2:ARCHIVE storage, 3:DEEP_ARCHIVE storage 4:ARCHIVE_IR storage") + cmd.Flags().StringVarP(&info.FileTypes, "file-types", "", "", "Specify storage type, separated by comma, all files will be listed according to the prefix and then filtered. 0:STANDARD storage, 1:IA storage, 2:ARCHIVE storage, 3:DEEP_ARCHIVE storage, 4:ARCHIVE_IR storage, 5:INTELLIGENT_TIERING") cmd.Flags().StringVarP(&info.FileTypes, "storages", "", "", "Specify storage type, same to --file-types") _ = cmd.Flags().MarkDeprecated("storages", "use --file-types instead") // 废弃 storages diff --git a/cmd/download.go b/cmd/download.go index 2d2987b2..24912ecc 100644 --- a/cmd/download.go +++ b/cmd/download.go @@ -99,6 +99,9 @@ have already in local disk and need to skip download or not.`, cmd.Flags().StringVarP(&LogFile, "log-file", "", "", "the output file of the download log is output to the file specified by record_root by default, and the specific file path can be seen in the terminal output") cmd.Flags().IntVarP(&LogRotate, "log-rotate", "", 7, "the switching period of the download log file, the unit is day,") + // 标记废弃 + _ = cmd.Flags().MarkDeprecated("public", "deprecated") + return cmd } @@ -130,6 +133,10 @@ var getCmdBuilder = func(cfg *iqshell.Config) *cobra.Command { cmd.Flags().Int64VarP(&info.SliceSize, "slice-size", "", 4*utils.MB, "slice size that download using slices. when you use --enable-slice option, the file will be cut into data blocks according to the slice size, then the data blocks will be downloaded concurrently, and finally these data blocks will be spliced into a file. Unit: B") cmd.Flags().IntVarP(&info.SliceConcurrentCount, "slice-concurrent-count", "", 10, "the count of concurrently downloaded slices.") cmd.Flags().BoolVarP(&info.RemoveTempWhileError, "remove-temp-while-error", "", false, "remove download temp file while error happened, default is false") + + // 标记废弃 + _ = cmd.Flags().MarkDeprecated("public", "deprecated") + return cmd } diff --git a/cmd/rs.go b/cmd/rs.go index fcd9205d..7ff90fba 100644 --- a/cmd/rs.go +++ b/cmd/rs.go @@ -70,7 +70,7 @@ var deleteCmdBuilder = func(cfg *iqshell.Config) *cobra.Command { var changeLifecycleCmdBuilder = func(cfg *iqshell.Config) *cobra.Command { var info = &operations.ChangeLifecycleInfo{} var cmd = &cobra.Command{ - Use: "chlifecycle [--to-ia-after-days ] [--to-archive-after-days ] [--to-deep-archive-after-days ] [--delete-after-days ]", + Use: "chlifecycle [--to-ia-after-days ] [--to-archive-after-days ] [--to-deep-archive-after-days ] [--to-intelligent-tiering-after-days ] [--delete-after-days ]", Short: "Set the lifecycle of a file.", Long: `Set the lifecycle of a file. Lifecycle value must great than or equal to -1, unit: day. * less than -1: there's no point and it won't trigger any effect @@ -92,6 +92,7 @@ var changeLifecycleCmdBuilder = func(cfg *iqshell.Config) *cobra.Command { cmd.Flags().IntVarP(&info.ToArchiveIRAfterDays, "to-archive-ir-after-days", "", 0, "to ARCHIVE_IR storage after some days. the range is -1 or bigger than 0. -1 means cancel to ARCHIVE_IR storage") cmd.Flags().IntVarP(&info.ToArchiveAfterDays, "to-archive-after-days", "", 0, "to ARCHIVE storage after some days. the range is -1 or bigger than 0. -1 means cancel to ARCHIVE storage") cmd.Flags().IntVarP(&info.ToDeepArchiveAfterDays, "to-deep-archive-after-days", "", 0, "to DEEP_ARCHIVE storage after some days. the range is -1 or bigger than 0. -1 means cancel to DEEP_ARCHIVE storage") + cmd.Flags().IntVarP(&info.ToIntelligentTieringAfterDays, "to-intelligent-tiering-after-days", "", 0, "to INTELLIGENT_TIERING storage after some days. the range is -1 or bigger than 0. -1 means cancel to INTELLIGENT_TIERING storage") cmd.Flags().IntVarP(&info.DeleteAfterDays, "delete-after-days", "", 0, "delete after some days. the range is -1 or bigger than 0. -1 means cancel to delete") return cmd } @@ -245,7 +246,8 @@ And 0 means STANDARD storage, while 1 means IA storage, while 2 means ARCHIVE storage. while 3 means DEEP_ARCHIVE storage. -while 4 means ARCHIVE_IR storage.`, +while 4 means ARCHIVE_IR storage. +while 5 means INTELLIGENT_TIERING storage`, Example: `change storage type of A.png(bucket:bucketA key:A.png) to ARCHIVE storage qshell chtype bucketA A.png 2 and you can check result by command: diff --git a/cmd/rsbatch.go b/cmd/rsbatch.go index 56a9cd28..0a86d7b5 100644 --- a/cmd/rsbatch.go +++ b/cmd/rsbatch.go @@ -179,6 +179,7 @@ Lifecycle value must great than or equal to -1, unit: day. cmd.Flags().IntVarP(&info.ToArchiveIRAfterDays, "to-archive-ir-after-days", "", 0, "to ARCHIVE_IR storage after some days. the range is -1 or bigger than 0. -1 means cancel to ARCHIVE_IR storage") cmd.Flags().IntVarP(&info.ToArchiveAfterDays, "to-archive-after-days", "", 0, "to ARCHIVE storage after some days. the range is -1 or bigger than 0. -1 means cancel to ARCHIVE storage") cmd.Flags().IntVarP(&info.ToDeepArchiveAfterDays, "to-deep-archive-after-days", "", 0, "to DEEP_ARCHIVE storage after some days. the range is -1 or bigger than 0. -1 means cancel to DEEP_ARCHIVE storage") + cmd.Flags().IntVarP(&info.ToIntelligentTieringAfterDays, "to-intelligent-tiering-after-days", "", 0, "to INTELLIGENT_TIERING storage after some days. the range is -1 or bigger than 0. -1 means cancel to INTELLIGENT_TIERING storage") cmd.Flags().IntVarP(&info.DeleteAfterDays, "delete-after-days", "", 0, "delete after some days. the range is -1 or bigger than 0. -1 means cancel to delete") setBatchCmdDefaultFlags(cmd, &info.BatchInfo) return cmd diff --git a/cmd/upload.go b/cmd/upload.go index 38515d63..b3c8c303 100644 --- a/cmd/upload.go +++ b/cmd/upload.go @@ -96,7 +96,7 @@ var upload2CmdBuilder = func(cfg *iqshell.Config) *cobra.Command { cmd.Flags().StringVar(&LogLevel, "log-level", "debug", "log level") cmd.Flags().IntVar(&LogRotate, "log-rotate", 7, "log rotate days") - cmd.Flags().IntVarP(&info.FileType, "file-type", "", 0, "set storage type of file, 0:STANDARD storage, 1:IA storage, 2:ARCHIVE storage, 3:DEEP_ARCHIVE storage, 4:ARCHIVE_IR storage") + cmd.Flags().IntVarP(&info.FileType, "file-type", "", 0, "set storage type of file, 0:STANDARD storage, 1:IA storage, 2:ARCHIVE storage, 3:DEEP_ARCHIVE storage, 4:ARCHIVE_IR storage, 5:INTELLIGENT_TIERING") cmd.Flags().IntVarP(&info.FileType, "storage", "", 0, "set storage type of file, same to --file-type") _ = cmd.Flags().MarkDeprecated("storage", "use --file-type instead") // 废弃 storage @@ -149,7 +149,7 @@ var syncCmdBuilder = func(cfg *iqshell.Config) *cobra.Command { cmd.Flags().StringVarP(&info.UpHost, "up-host", "u", "", "upload host") cmd.Flags().BoolVarP(&info.Accelerate, "accelerate", "", false, "enable uploading acceleration") - cmd.Flags().IntVarP(&info.FileType, "file-type", "", 0, "set storage type of file, 0:STANDARD storage, 1:IA storage, 2:ARCHIVE storage, 3:DEEP_ARCHIVE storage, 4:ARCHIVE_IR storage") + cmd.Flags().IntVarP(&info.FileType, "file-type", "", 0, "set storage type of file, 0:STANDARD storage, 1:IA storage, 2:ARCHIVE storage, 3:DEEP_ARCHIVE storage, 4:ARCHIVE_IR storage, 5:INTELLIGENT_TIERING") cmd.Flags().IntVarP(&info.FileType, "storage", "s", 0, "set storage type of file, same to --file-type") _ = cmd.Flags().MarkDeprecated("storage", "use --file-type instead") // 废弃 storage @@ -200,7 +200,7 @@ var formUploadCmdBuilder = func(cfg *iqshell.Config) *cobra.Command { cmd.Flags().BoolVarP(&info.Overwrite, "overwrite", "", false, "overwrite the file of same key in bucket") cmd.Flags().StringVarP(&info.MimeType, "mimetype", "t", "", "file mime type") - cmd.Flags().IntVarP(&info.FileType, "file-type", "", 0, "set storage type of file, 0:STANDARD storage, 1:IA storage, 2:ARCHIVE storage, 3:DEEP_ARCHIVE storage, 4:ARCHIVE_IR storage") + cmd.Flags().IntVarP(&info.FileType, "file-type", "", 0, "set storage type of file, 0:STANDARD storage, 1:IA storage, 2:ARCHIVE storage, 3:DEEP_ARCHIVE storage, 4:ARCHIVE_IR storage, 5:INTELLIGENT_TIERING") cmd.Flags().IntVarP(&info.FileType, "storage", "s", 0, "set storage type of file, same to --file-type") _ = cmd.Flags().MarkDeprecated("storage", "use --file-type instead") // 废弃 storage @@ -259,7 +259,7 @@ var resumeUploadCmdBuilder = func(cfg *iqshell.Config) *cobra.Command { cmd.Flags().Int64VarP(&info.ChunkSize, "v2-part-size", "", data.BLOCK_SIZE, "the part size when use resumable upload v2 APIs to upload, same to --resumable-api-v2-part-size") _ = cmd.Flags().MarkDeprecated("v2-part-size", "use --resumable-api-v2-part-size instead") - cmd.Flags().IntVarP(&info.FileType, "file-type", "", 0, "set storage type of file, 0:STANDARD storage, 1:IA storage, 2:ARCHIVE storage, 3:DEEP_ARCHIVE storage, 4:ARCHIVE_IR storage") + cmd.Flags().IntVarP(&info.FileType, "file-type", "", 0, "set storage type of file, 0:STANDARD storage, 1:IA storage, 2:ARCHIVE storage, 3:DEEP_ARCHIVE storage, 4:ARCHIVE_IR storage, 5:INTELLIGENT_TIERING") cmd.Flags().IntVarP(&info.FileType, "storage", "s", 0, "set storage type of file, same to --file-type") _ = cmd.Flags().MarkDeprecated("storage", "use --file-type instead") // 废弃 storage diff --git a/cmd_test/bucket_domain_test.go b/cmd_test/bucket_domain_test.go index 1cdd0afd..a0a65b29 100644 --- a/cmd_test/bucket_domain_test.go +++ b/cmd_test/bucket_domain_test.go @@ -3,9 +3,10 @@ package cmd import ( - "github.com/qiniu/qshell/v2/cmd_test/test" "strings" "testing" + + "github.com/qiniu/qshell/v2/cmd_test/test" ) func TestBucketDomain(t *testing.T) { @@ -15,7 +16,7 @@ func TestBucketDomain(t *testing.T) { } if !strings.Contains(result, test.BucketDomain) { - t.Fatal("no expected domain:%", test.BucketDomain) + t.Fatal("no expected domain:", result) } return diff --git a/cmd_test/change_type_test.go b/cmd_test/change_type_test.go index 7ab29c36..82162bd3 100644 --- a/cmd_test/change_type_test.go +++ b/cmd_test/change_type_test.go @@ -4,10 +4,11 @@ package cmd import ( "fmt" - "github.com/qiniu/qshell/v2/cmd_test/test" "path/filepath" "strings" "testing" + + "github.com/qiniu/qshell/v2/cmd_test/test" ) func TestChangeType(t *testing.T) { @@ -100,6 +101,19 @@ func TestBatchChangeType(t *testing.T) { defer func() { test.RemoveFile(successLogPath) test.RemoveFile(failLogPath) + + //back + batchConfig = "" + for _, key := range test.Keys { + batchConfig += key + "\t" + "1" + "\n" + } + + path, err = test.CreateFileWithContent("batch_chtype.txt", batchConfig) + if err != nil { + t.Fatal("create chtype config file error:", err) + } + + test.RunCmdWithError("batchchtype", test.Bucket, "-i", path, "-y") }() if !test.IsFileHasContent(successLogPath) { @@ -109,19 +123,6 @@ func TestBatchChangeType(t *testing.T) { if !test.IsFileHasContent(failLogPath) { t.Fatal("batch result: fail log to file error: file empty") } - - //back - batchConfig = "" - for _, key := range test.Keys { - batchConfig += key + "\t" + "1" + "\n" - } - - path, err = test.CreateFileWithContent("batch_chtype.txt", batchConfig) - if err != nil { - t.Fatal("create chtype config file error:", err) - } - - test.RunCmdWithError("batchchtype", test.Bucket, "-i", path, "-y") } func TestBatchChangeTypeRecord(t *testing.T) { @@ -139,6 +140,21 @@ func TestBatchChangeTypeRecord(t *testing.T) { t.Fatal("create batch move config file error:", err) } + defer func() { + //back + batchConfig = "" + for _, key := range test.Keys { + batchConfig += key + "\t" + "1" + "\n" + } + + path, err = test.CreateFileWithContent("batch_chtype.txt", batchConfig) + if err != nil { + t.Fatal("create chtype config file error:", err) + } + + test.RunCmdWithError("batchchtype", test.Bucket, "-i", path, "-y") + }() + test.RunCmdWithError("batchchtype", test.Bucket, "-i", path, "--enable-record", @@ -183,19 +199,6 @@ func TestBatchChangeTypeRecord(t *testing.T) { fmt.Println("=========================== result end ===========================") t.Fatal("batch result: should redo because set --record-redo-while-error") } - - //back - batchConfig = "" - for _, key := range test.Keys { - batchConfig += key + "\t" + "1" + "\n" - } - - path, err = test.CreateFileWithContent("batch_chtype.txt", batchConfig) - if err != nil { - t.Fatal("create chtype config file error:", err) - } - - test.RunCmdWithError("batchchtype", test.Bucket, "-i", path, "-y") } func TestBatchChangeTypeDocument(t *testing.T) { diff --git a/cmd_test/copy_test.go b/cmd_test/copy_test.go index 70c98223..39d7c82e 100644 --- a/cmd_test/copy_test.go +++ b/cmd_test/copy_test.go @@ -4,10 +4,11 @@ package cmd import ( "fmt" - "github.com/qiniu/qshell/v2/cmd_test/test" "path/filepath" "strings" "testing" + + "github.com/qiniu/qshell/v2/cmd_test/test" ) func TestCopy(t *testing.T) { @@ -96,7 +97,6 @@ func TestBatchCopy(t *testing.T) { "--worker", "4", "--min-worker", "10", "--worker-count-increase-period", "50", - "-w", "-y") defer func() { test.RemoveFile(successLogPath) diff --git a/cmd_test/download_get_test.go b/cmd_test/download_get_test.go index 2b31c71e..05215048 100644 --- a/cmd_test/download_get_test.go +++ b/cmd_test/download_get_test.go @@ -17,8 +17,7 @@ func TestGetImage(t *testing.T) { t.Fatal("get result path error:", err) } path := filepath.Join(resultPath, test.ImageKey) - ret, errs := test.RunCmdWithError("get", test.Bucket, test.ImageKey, - "--public", + _, errs := test.RunCmdWithError("get", test.Bucket, test.ImageKey, "-o", path, "-d") defer test.RemoveFile(path) @@ -26,9 +25,6 @@ func TestGetImage(t *testing.T) { if len(errs) > 0 { t.Fail() } - if !strings.Contains(ret, ".qiniucs.com") { - t.Fatal("get file: should get io src domain") - } if !test.IsFileHasContent(path) { t.Fatal("get file content can't be empty") } @@ -44,7 +40,6 @@ func TestGetImageAndCheck(t *testing.T) { // 因为有源站域名,所以经过重试下载会成功 result, errs := test.RunCmdWithError("get", test.Bucket, test.ImageKey, "--check-size", - "--public", "-d", "-o", path) defer test.RemoveFile(path) @@ -186,7 +181,7 @@ func TestGetNoExistDomain(t *testing.T) { func TestGetNoExistBucket(t *testing.T) { _, errs := test.RunCmdWithError("get", test.BucketNotExist, test.Key) - if !strings.Contains(errs, "no such bucket") { + if !(strings.Contains(errs, "no such bucket") || strings.Contains(errs, "no such entry")) { t.Fail() } } diff --git a/cmd_test/download_test.go b/cmd_test/download_test.go index c9025f88..94c9d461 100644 --- a/cmd_test/download_test.go +++ b/cmd_test/download_test.go @@ -33,7 +33,6 @@ func TestDownloadWithKeyFile(t *testing.T) { Prefix: "hell", Suffixes: ".json", IoHost: "", - Public: true, CheckHash: true, Referer: "", CdnDomain: "", @@ -73,7 +72,6 @@ func TestDownloadFromBucket(t *testing.T) { Prefix: "hello3,hello5,hello7", Suffixes: "", IoHost: utils.Endpoint(false, test.BucketDomain), - Public: true, CheckSize: true, Referer: "", CdnDomain: "", @@ -114,7 +112,6 @@ func TestDownloadWithDomain(t *testing.T) { Prefix: "hello3,hello5,hello7", Suffixes: "", IoHost: utils.Endpoint(false, test.BucketDomain), - Public: true, CheckSize: true, Referer: "", Domain: utils.Endpoint(false, test.BucketDomain), @@ -186,7 +183,6 @@ func TestDownloadNoDomain(t *testing.T) { Bucket: test.Bucket, Prefix: "hello3,hello5,hello7", Suffixes: "", - Public: true, CheckHash: true, Referer: "", CdnDomain: "", @@ -283,7 +279,6 @@ func TestDownload2AllFilesFromBucket(t *testing.T) { "--bucket", test.Bucket, "--dest-dir", destDir, "--suffixes", ".json", - "--public", "--log-file", logPath, "--log-level", "info", "-c", "4", @@ -355,10 +350,6 @@ func TestDownload2WithKeyFile(t *testing.T) { } logContent := test.FileContent(logPath) - if !strings.Contains(logContent, "?e=") { - t.Fatal("download url should private") - } - if !strings.Contains(logContent, "work consumer 3 start") { t.Fatal("download should have consumer 3") } @@ -397,7 +388,6 @@ func TestDownload2PublicWithKeyFile(t *testing.T) { "--key-file", keysFilePath, "--log-file", logPath, "--log-level", "debug", - "--public", "-c", "4", "-d") if test.FileCountInDir(destDir) < 2 { @@ -408,11 +398,6 @@ func TestDownload2PublicWithKeyFile(t *testing.T) { t.Fatal("log file should has content") } - logContent := test.FileContent(logPath) - if strings.Contains(logContent, "?e=") { - t.Fatal("download url should public") - } - return } @@ -445,7 +430,6 @@ func TestDownload2PublicWithDomain(t *testing.T) { "--key-file", keysFilePath, "--log-file", logPath, "--log-level", "debug", - "--public", "-c", "4", "-d") if test.FileCountInDir(destDir) < 2 { @@ -456,11 +440,6 @@ func TestDownload2PublicWithDomain(t *testing.T) { t.Fatal("log file should has content") } - logContent := test.FileContent(logPath) - if strings.Contains(logContent, "?e=") { - t.Fatal("download url should public") - } - return } diff --git a/cmd_test/fetch_test.go b/cmd_test/fetch_test.go index 420d17f8..8fcc331a 100644 --- a/cmd_test/fetch_test.go +++ b/cmd_test/fetch_test.go @@ -3,10 +3,11 @@ package cmd import ( - "github.com/qiniu/qshell/v2/cmd_test/test" "path/filepath" "strings" "testing" + + "github.com/qiniu/qshell/v2/cmd_test/test" ) func TestFetch(t *testing.T) { diff --git a/docs/abfetch.md b/docs/abfetch.md index 1c567e30..17b61137 100644 --- a/docs/abfetch.md +++ b/docs/abfetch.md @@ -37,7 +37,7 @@ $ qshell abfetch --doc - -b/--callback-body:回调的 http Body。 【可选】 - -T/--callback-host:回调时的 HOST 头。 【可选】 - -a/--callback-url:回调的请求地址。 【可选】 -- --file-type:抓取的资源存储在七牛存储空间的类型,0:普通存储 1:低频存储 2:归档存储 3:深度归档 4:归档直读存储, 默认为: 0。 【可选】 +- --file-type:抓取的资源存储在七牛存储空间的类型,0:标准存储 1:低频存储 2:归档存储 3:深度归档 4:归档直读存储 5:智能分层存储, 默认为: 0。 【可选】 - -c/--thread-count:指定抓取时使用的线程数目,默认:20。 【可选】 - --overwrite:是否覆盖空间已有文件,默认为 `false`。 【可选】 - -s/--success-list:指定一个文件的路径,如果资源抓取成功,则将资源信息写入此文件;默认不导出。 【可选】 diff --git a/docs/batchchlifecycle.md b/docs/batchchlifecycle.md index 2038774e..02b8bd8b 100644 --- a/docs/batchchlifecycle.md +++ b/docs/batchchlifecycle.md @@ -47,6 +47,7 @@ $ qshell batchchlifecycle --doc - --to-archive-ir-after-days:指定文件上传后并在设置的时间后转换到 `归档直读存储类型`;值范围为 -1 或者大于 0,设置为 -1 表示取消已设置的转 `归档直读存储` 的生命周期规则,单位:天【可选】 - --to-archive-after-days:指定文件上传后并在设置的时间后转换到 `归档存储类型`;值范围为 -1 或者大于 0,设置为 -1 表示取消已设置的转 `归档存储` 的生命周期规则,单位:天【可选】 - --to-deep-archive-after-days:指定文件上传后并在设置的时间后转换到 `深度归档存储类型`;值范围为 -1 或者大于 0,设置为 -1 表示取消已设置的转 `深度归档存储` 的生命周期规则,单位:天【可选】 +- --to-intelligent-tiering-after-days:指定文件上传后并在设置的时间后转换到 `智能归档存储类型`;值范围为 -1 或者大于 0,设置为 -1 表示取消已设置的转 `智能归档存储` 的生命周期规则,单位:天【可选】 - --delete-after-days:指定文件上传后并在设置的时间后进行 `过期删除`,删除后不可恢复;值范围为 -1 或者大于 0,设置为 -1 表示取消已设置的 `过期删除` 的生命周期规则,单位:天【可选】 - -y/--force:该选项控制工具的默认行为。默认情况下,对于批量操作,工具会要求使用者输入一个验证码,确认下要进行批量文件操作了,避免操作失误的发生。如果不需要这个验证码的提示过程可以使用此选项。【可选】 - -s/--success-list:该选项指定一个文件,程序会把操作成功的资源信息导入到该文件;默认不导出。【可选】 diff --git a/docs/batchchtype.md b/docs/batchchtype.md index 1bd509a5..ae769fbc 100644 --- a/docs/batchchtype.md +++ b/docs/batchchtype.md @@ -1,5 +1,5 @@ # 简介 -`batchchtype` 命令用来为空间中的文件设置存储类型。该操作发生在同一个空间中。(将文件设置为 **深度归档存储** 或者 **归档存储** 或者 **归档直读存储** 或者 **低频存储** 或者 **普通存储**,默认:文件为 **普通存储**) +`batchchtype` 命令用来为空间中的文件设置存储类型。该操作发生在同一个空间中。(将文件设置为 **深度归档存储** 或者 **归档存储** 或者 **归档直读存储** 或者 **低频存储** 或者 **标准存储** 或者 **智能分层存储**,默认:文件为 **标准存储**) # 格式 ``` @@ -23,7 +23,7 @@ $ qshell batchchtype --doc - Bucket:空间名,可以为公开空间或私有空间。【必选】 # 选项 -- -i/--input-file:接受一个文件, 文件内容每行包含 `原文件名` 和 `存储类型`,存储类型用数字表示,0 为普通存储,1 为低频存储,2 为归档存储,3 为深度归档存储,4 为归档直读存储。每行多个元素名之间用分割符分隔(默认 tab 制表符); 如果需要自定义分割符,可以使用 `-F` 或 `--sep` 选项指定自定义的分隔符。如果没有通过该选项指定该文件参数, 从标准输入读取内容。每行包含 `文件名` 和 `存储类型`;具体格式如下:(【可选】) +- -i/--input-file:接受一个文件, 文件内容每行包含 `原文件名` 和 `存储类型`,存储类型用数字表示,0:标准存储 1:低频存储 2:归档存储 3:深度归档 4:归档直读存储 5:智能分层存储。每行多个元素名之间用分割符分隔(默认 tab 制表符); 如果需要自定义分割符,可以使用 `-F` 或 `--sep` 选项指定自定义的分隔符。如果没有通过该选项指定该文件参数, 从标准输入读取内容。每行包含 `文件名` 和 `存储类型`;具体格式如下:(【可选】) ``` 1 // :文件名,:分割符,1:低频存储。 ``` diff --git a/docs/chlifecycle.md b/docs/chlifecycle.md index 6446f7b8..2d4bd975 100644 --- a/docs/chlifecycle.md +++ b/docs/chlifecycle.md @@ -39,6 +39,7 @@ $ qshell chlifecycle --doc - --to-archive-ir-after-days:指定文件上传后并在设置的时间后转换到 `归档直读存储类型`;值范围为 -1 或者大于 0,设置为 -1 表示取消已设置的转 `归档直读存储` 的生命周期规则,单位:天【可选】 - --to-archive-after-days:指定文件上传后并在设置的时间后转换到 `归档存储类型`;值范围为 -1 或者大于 0,设置为 -1 表示取消已设置的转 `归档存储` 的生命周期规则,单位:天【可选】 - --to-deep-archive-after-days:指定文件上传后并在设置的时间后转换到 `深度归档存储类型`;值范围为 -1 或者大于 0,设置为 -1 表示取消已设置的转 `深度归档存储` 的生命周期规则,单位:天【可选】 +- --to-intelligent-tiering-after-days:指定文件上传后并在设置的时间后转换到 `智能归档存储类型`;值范围为 -1 或者大于 0,设置为 -1 表示取消已设置的转 `智能归档存储` 的生命周期规则,单位:天【可选】 - --delete-after-days:指定文件上传后并在设置的时间后进行 `过期删除`,删除后不可恢复;值范围为 -1 或者大于 0,设置为 -1 表示取消已设置的 `过期删除` 的生命周期规则,单位:天【可选】 diff --git a/docs/chtype.md b/docs/chtype.md index 170c098e..2b916498 100644 --- a/docs/chtype.md +++ b/docs/chtype.md @@ -22,10 +22,10 @@ $ qshell chtype --doc # 参数 - Bucket:空间名,可以为公开空间或私有空间。【必选】 - Key:空间中的文件名。【必选】 -- FileType:给文件指定的新的存储类型,其中可选值为 `0` 代表 `普通存储`,`1` 代表 `低频存储`,`2` 代表 `归档存储`,`3` 代表 `深度归档存储`,`4` 代表 `归档直读存储`。【必选】 +- FileType:给文件指定的新的存储类型,其中可选值为 0:标准存储 1:低频存储 2:归档存储 3:深度归档 4:归档直读存储 5:智能分层存储。【必选】 注: -`归档存储` 或 `深度归档存储` 直接转 `普通存储` 或 `低频存储` 会失败,需要先通过 restorear 命令恢复后再转。 +`归档存储` 或 `深度归档存储` 直接转 `标准存储` 或 `低频存储` 会失败,需要先通过 restorear 命令恢复后再转。 # 示例 修改 `if-pbl` 空间中 `qiniu.png` 图片的存储类型为 `低频存储` diff --git a/docs/fput.md b/docs/fput.md index a19a47a3..c0043a88 100644 --- a/docs/fput.md +++ b/docs/fput.md @@ -32,7 +32,7 @@ $ qshell fput --doc - --accelerate:启用上传加速。【可选】 - --overwrite:是否覆盖空间已有文件,默认为 `false`。 【可选】 - -t/--mimetype:指定文件的 MimeType。 【可选】 -- --file-type:文件存储类型,默认为 `0`(标准存储),`1` 为低频存储,`2` 为归档存储,`3` 为深度归档存储,`4` 为归档直读存储。 【可选】 +- --file-type:文件存储类型,0:标准存储 1:低频存储 2:归档存储 3:深度归档 4:归档直读存储 5:智能分层存储,默认为 0。【可选】 - -u/--up-host: 指定上传域名。 【可选】 - -l/--callback-urls:上传回调地址, 可以指定多个地址,以逗号分隔。 【可选】 - -T/--callback-host:上传回调的 HOST, 必须和 CallbackUrls 一起指定。 【可选】 diff --git a/docs/get.md b/docs/get.md index 35b5b359..f07f4d89 100644 --- a/docs/get.md +++ b/docs/get.md @@ -30,7 +30,7 @@ $ qshell get --doc - -o/--outfile:保存在本地的文件路径;不指定,保存在当前文件夹,文件名使用存储空间中的名字【可选】 - --domain:指定下载请求的域名,当指定了下载域名则仅使用此下载域名进行下载;默认为空,此时 qshell 下载使用域名的优先级:1.bucket 绑定的 CDN 域名(qshell 内部查询,无需配置) 2.bucket 绑定的源站域名(qshell 内部查询,无需配置) 3. 七牛源站域名(qshell 内部查询,无需配置),当优先级高的域名下载失败后会尝试使用优先级低的域名进行下载。【可选】 - --get-file-api: 当存储服务端支持 getfile 接口时才有效。【可选】 -- --public:空间是否为公开空间;为 `true` 时为公有空间,公有空间下载时不会对下载 URL 进行签名,可以提升 CDN 域名性能,默认为 `false`(私有空间)【可选】 +- --public:空间是否为公开空间;为 `true` 时为公有空间,公有空间下载时不会对下载 URL 进行签名,可以提升 CDN 域名性能,默认为 `false`(私有空间),已废弃【可选】 - --check-size: 下载后检测本地文件和服务端文件 size 的一致性。【可选】 - --check-hash: 下载后检测本地文件和服务端文件 hash 的一致性。【可选】 - --enable-slice: 是否开启切片下载,需要注意 `--slice-file-size-threshold` 切片阈值选项的配置,只有开启切片下载,并且下载的文件大小大于切片阈值方会启动切片下载。默认不开启。【可选】 diff --git a/docs/listbucket2.md b/docs/listbucket2.md index 9c1b7fc9..d17fd59f 100644 --- a/docs/listbucket2.md +++ b/docs/listbucket2.md @@ -40,7 +40,7 @@ $ qshell listbucket2 --doc - --output-file-max-size:每个输出文件的最大尺寸,大于此值会自动创建新的文件(新文件的文件名规律示例,源文件:/x/x/a.txt,新建文件为:/x/x/a-${index}.txt,index 为创建文件的序列号,从 0 开始),0:不限制单个输出文件的尺寸,单位:B,默认:0。 【可选】 - --start:根据列举前缀列举整个空间,然后从中筛选出文件上传日期在 之后的文件;格式:yyyy-mm-dd-hh-MM-ss eg:2022-01-10-08-30-20 。【可选】 - --end:根据列举前缀列举整个空间, 然后从中筛选出文件上传日期在之前的文件;格式:yyyy-mm-dd-hh-MM-ss eg:2022-01-10-08-30-20 。【可选】 -- --file-types:根据列举前缀列举整个空间,然后从中筛选出满足七牛存储类型的文件;配置多个存储类型时中间用逗号隔开(eg: 1,2,3);`0`:`普通存储`,`1`:`低频存储`,`2`:`归档存储`,`3`:`深度归档存储`,`4`:`归档直读存储`。 +- --file-types:根据列举前缀列举整个空间,然后从中筛选出满足七牛存储类型的文件;配置多个存储类型时中间用逗号隔开(eg: 1,2,3);0:标准存储 1:低频存储 2:归档存储 3:深度归档 4:归档直读存储 5:智能分层存储。 - --mimetypes:根据列举前缀列举整个空间,然后从中筛选出满足 MimeType 的文件;配置多个 MimeType 时中间用逗号隔开(eg: image/*,video/)。 - --min-file-size:根据列举前缀列举整个空间,然后从中筛选出文件大小大于该值的文件;单位:B 。 - --max-file-size:根据列举前缀列举整个空间,然后从中筛选出文件大小小于该值的文件;单位:B 。 diff --git a/docs/qdownload.md b/docs/qdownload.md index 8680a16e..920614f8 100644 --- a/docs/qdownload.md +++ b/docs/qdownload.md @@ -49,7 +49,6 @@ $ qshell qdownload --doc "check_hash" : false, "domain" : "down.example.com", "referer" : "http://www.example.com", - "public" : true, "remove_temp_while_error": false, "log_file" : "download.log", "log_level" : "info", @@ -70,7 +69,7 @@ $ qshell qdownload --doc - check_hash:是否验证 hash,如果开启可能会耗费较长时间,默认为 `false` 【可选】 - domain:指定下载请求的域名,当指定了下载域名则仅使用此下载域名进行下载;默认为空,此时 qshell 下载使用域名的优先级:1.bucket 绑定的 CDN 域名(qshell 内部查询,无需配置) 2.bucket 绑定的源站域名(qshell 内部查询,无需配置) 3. 七牛源站域名(qshell 内部查询,无需配置),当优先级高的域名下载失败后会尝试使用优先级低的域名进行下载。【可选】 - referer:如果下载请求域名配置了域名白名单防盗链,需要指定一个允许访问的 referer 地址;默认为空 【可选】 -- public:空间是否为公开空间;为 `true` 时为公有空间,公有空间下载时不会对下载 URL 进行签名,可以提升 CDN 域名性能,默认为 `false`(私有空间)【可选】 +- public:空间是否为公开空间;为 `true` 时为公有空间,公有空间下载时不会对下载 URL 进行签名,可以提升 CDN 域名性能,默认为 `false`(私有空间),已废弃【可选】 - enable_slice: 是否开启切片下载,需要注意 `slice_file_size_threshold` 切片阈值选项的配置,只有开启切片下载,并且下载的文件大小大于切片阈值方会启动切片下载。默认不开启。【可选】 - slice_size: 切片大小;当使用切片下载时,每个切片的大小;单位:B。默认为 4194304,也即 4MB。【可选】 - slice_concurrent_count: 切片下载的并发度;默认为 10 【可选】 diff --git a/docs/qdownload2.md b/docs/qdownload2.md index 3a2a3bf1..b2e63684 100644 --- a/docs/qdownload2.md +++ b/docs/qdownload2.md @@ -38,7 +38,6 @@ Flags: --log-level string download log output level, optional values are debug,info,warn and error (default "debug") --log-rotate int the switching period of the download log file, the unit is day, (default 7) --prefix string only download files with the specified prefix - --public whether the space is a public space --record-root string path to save download record information, including log files and download progress files; the default is download directory --referer string if the CDN domain name is configured with domain name whitelist anti-leech, you need to specify a referer address that allows access --remove-temp-while-error when the download encounters an error, delete the previously downloaded part of the file cache diff --git a/docs/qupload.md b/docs/qupload.md index f2b08cc2..9bc465da 100644 --- a/docs/qupload.md +++ b/docs/qupload.md @@ -81,7 +81,7 @@ $ qshell qupload --doc - log_file:上传日志的输出文件,默认为输出到 `record_root` 指定的文件中,具体文件路径可以在终端输出看到。 【可选】 - log_rotate:上传日志文件的切换周期,单位为天,默认为 7 天即切换到新的上传日志文件。 【可选】 - log_stdout:上传日志是否同时输出一份到标准终端,默认为 `true`。 【可选】 -- file_type:文件存储类型;`0`:标准存储,`1`:低频存储,`2`:归档存储,`3`:深度归档存储,`4`:归档直读存储;默认为 `0`(标准存储)。 【可选】 +- file_type:文件存储类型;0:标准存储 1:低频存储 2:归档存储 3:深度归档 4:归档直读存储 5:智能分层存储;默认为 0。 【可选】 - delete_on_success:上传成功的文件,同时删除本地文件,以达到节约磁盘的目的,比如日志归档的场景,默认为 `false`,如果需要开启功能,设置为 `true` 即可。【可选】 - resumable_api_v2:使用分片 V2 进行上传,默认为 `false` 使用分片 V1 。【可选】 - resumable_api_v2_part_size:使用分片 V2 进行上传时定制分片大小,默认 4194304(4M) 。【可选】 diff --git a/docs/qupload2.md b/docs/qupload2.md index 48890084..7a136a23 100644 --- a/docs/qupload2.md +++ b/docs/qupload2.md @@ -41,7 +41,7 @@ Flags: --end-user string Owner identification -e, --failure-list string upload failure file list --file-list string file list to upload - --file-type int set storage type of file, 0:STANDARD storage, 1:IA storage, 2:ARCHIVE storage, 3:DEEP_ARCHIVE storage, 4:ARCHIVE_IR storage + --file-type int set storage type of file, 0:STANDARD storage, 1:IA storage, 2:ARCHIVE storage, 3:DEEP_ARCHIVE storage, 4:ARCHIVE_IR storage, 5:INTELLIGENT_TIERING -h, --help help for qupload2 --ignore-dir ignore the dir in the dest file key --key-prefix string key prefix prepended to dest file key diff --git a/docs/rput.md b/docs/rput.md index 9d05b66e..11967200 100644 --- a/docs/rput.md +++ b/docs/rput.md @@ -10,7 +10,7 @@ qshell rput [--overwrite] [--v2] [--mimetype ] [--callback-urls ] [--callback-host ] [--file-type ] ``` -其中 `Overwrite`,`MimeType`,`FileType` (0: 标准存储, 1: 低频存储, 2: 归档存储, 3: 深度归档存储, 4: 归档直读存储)参数可根据需要指定一个或者多个,参数顺序随意,程序会自动识别。 +其中 `Overwrite`,`MimeType`,`FileType` 参数可根据需要指定一个或者多个,参数顺序随意,程序会自动识别。 # 帮助文档 可以在命令行输入如下命令获取帮助文档: @@ -34,7 +34,7 @@ $ qshell rput --doc - --accelerate:启用上传加速。【可选】 - --overwrite:是否覆盖空间已有文件,默认为 `false`。 【可选】 - -t/--mimetype:指定文件的 MimeType 。【可选】 -- --file-type:文件存储类型;0: 标准存储, 1: 低频存储, 2: 归档存储, 3: 深度归档存储, 4: 归档直读存储;默认为`0`(标准存储)。 【可选】 +- --file-type:文件存储类型;0:标准存储 1:低频存储 2:归档存储 3:深度归档 4:归档直读存储 5:智能分层存储;默认为 0。 【可选】 - --resumable-api-v2:使用分片上传 API V2 进行上传,默认为 `false`, 使用 V1 上传。【可选】 - --resumable-api-v2-part-size:使用分片上传 API V2 进行上传时的分片大小,默认为 4M 。【可选】 - --sequential-read-file: 文件读为顺序读,不涉及跳读;开启后,上传中的分片数据会被加载至内存。此选项可能会增加挂载网络文件系统的文件上传速度。默认是:false。【可选】 diff --git a/docs/sync.md b/docs/sync.md index 8603ba22..b61765c0 100644 --- a/docs/sync.md +++ b/docs/sync.md @@ -33,7 +33,7 @@ $ qshell sync --doc - --accelerate:启用上传加速。【可选】 - -k/--key:该资源保存在空间中的 key,不配置时使用资源 Url 中文件名作为存储的 key。 【可选】 - -u/--uphost:上传入口的 IP 地址,一般在大文件的情况下,可以指定上传入口的 IP 来减少 DNS 环节,提升同步速度。 【可选】 -- --file-type:文件存储类型,默认为 `0` (标准存储),`1` 为低频存储,`2` 为归档存储,`3` 为深度归档存储,`4` 为归档直读存储【可选】 +- --file-type:文件存储类型,0:标准存储 1:低频存储 2:归档存储 3:深度归档 4:归档直读存储 5:智能分层存储;默认为 0【可选】 - --resumable-api-v2:使用分片 v2 进行上传;默认使用 v1。 【可选】 - --resumable-api-v2-part-size:使用分片上传 API V2 进行上传时的分片大小,默认为 4M 。【可选】 - --overwrite:是否覆盖空间已有文件,默认为 `false`。 【可选】 diff --git a/go.mod b/go.mod index db07f4e7..56efa263 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ require ( github.com/astaxie/beego v1.12.3 github.com/aws/aws-sdk-go v1.37.31 github.com/mitchellh/go-homedir v1.1.0 - github.com/qiniu/go-sdk/v7 v7.25.4 + github.com/qiniu/go-sdk/v7 v7.25.5 github.com/schollz/progressbar/v3 v3.8.6 github.com/spf13/cast v1.3.1 github.com/spf13/cobra v1.1.3 diff --git a/go.sum b/go.sum index 75156ae2..8dbb5441 100644 --- a/go.sum +++ b/go.sum @@ -277,8 +277,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/qiniu/dyn v1.3.0/go.mod h1:E8oERcm8TtwJiZvkQPbcAh0RL8jO1G0VXJMW3FAWdkk= -github.com/qiniu/go-sdk/v7 v7.25.4 h1:ulCKlTEyrZzmNytXweOrnva49+Q4+ASjYBCSXhkRWTo= -github.com/qiniu/go-sdk/v7 v7.25.4/go.mod h1:dmKtJ2ahhPWFVi9o1D5GemmWoh/ctuB9peqTowyTO8o= +github.com/qiniu/go-sdk/v7 v7.25.5 h1:BZAZhrYC7vrw9NPnNbFi1K8xucoz29s7sEGWap77i2Q= +github.com/qiniu/go-sdk/v7 v7.25.5/go.mod h1:dmKtJ2ahhPWFVi9o1D5GemmWoh/ctuB9peqTowyTO8o= github.com/qiniu/x v1.10.5/go.mod h1:03Ni9tj+N2h2aKnAz+6N0Xfl8FwMEDRC2PAlxekASDs= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= diff --git a/iqshell/common/flow/flow.go b/iqshell/common/flow/flow.go index b4f17280..55b03b32 100644 --- a/iqshell/common/flow/flow.go +++ b/iqshell/common/flow/flow.go @@ -106,16 +106,18 @@ func (f *Flow) Start() { hasMore, workInfo, err := f.WorkProvider.Provide() log.DebugF("work producer get work, hasMore:%v, workInfo: %+v, err: %+v", hasMore, workInfo, err) if err != nil { - workInfoData := "" - if workInfo != nil { - workInfoData = workInfo.Data - } if err.Code == data.ErrorCodeParamMissing || err.Code == data.ErrorCodeLineHeader { - log.DebugF("work producer get work, skip:%s because:%s", workInfoData, err) + log.DebugF("work producer get work, skip:%s because:%s", workInfo, err) f.notifyWorkSkip(workInfo, nil, err) } else { - log.DebugF("work producer get work fail, error:%s info:%s", err, workInfoData) + // 没有读到任何数据 + if workInfo == nil || len(workInfo.Data) == 0 { + log.ErrorF("work producer get work fail: %s", err) + break + } + + log.DebugF("work producer get work fail, error:%s info:%s", err, workInfo) f.notifyWorkFail(workInfo, err) } continue diff --git a/iqshell/common/flow/work_provider_reader.go b/iqshell/common/flow/work_provider_reader.go index 6ba2e562..d5232c3c 100644 --- a/iqshell/common/flow/work_provider_reader.go +++ b/iqshell/common/flow/work_provider_reader.go @@ -2,11 +2,12 @@ package flow import ( "bufio" - "github.com/qiniu/qshell/v2/iqshell/common/alert" - "github.com/qiniu/qshell/v2/iqshell/common/data" "io" "strings" "sync" + + "github.com/qiniu/qshell/v2/iqshell/common/alert" + "github.com/qiniu/qshell/v2/iqshell/common/data" ) func NewReaderWorkProvider(reader io.Reader, creator WorkCreator) (WorkProvider, *data.CodeError) { @@ -52,5 +53,8 @@ func (p *readerWorkProvider) provide() (hasMore bool, work *WorkInfo, err *data. Work: w, }, e } + if sErr := p.scanner.Err(); sErr != nil { + return true, &WorkInfo{}, data.ConvertError(sErr) + } return false, &WorkInfo{}, nil } diff --git a/iqshell/common/utils/file.go b/iqshell/common/utils/file.go index c989db09..5068fb11 100644 --- a/iqshell/common/utils/file.go +++ b/iqshell/common/utils/file.go @@ -2,10 +2,12 @@ package utils import ( "bufio" + "context" "encoding/json" "fmt" "io" "io/ioutil" + "net/http" "os" "path/filepath" "strconv" @@ -129,8 +131,13 @@ func NetworkFileLength(srcResUrl string) (fileSize int64, err *data.CodeError) { } func GetNetworkFileInfo(srcResUrl string) (*NetworkFileInfo, *data.CodeError) { - - resp, respErr := client.DefaultStorageClient().Head(srcResUrl) + // 为了对 CDN 友好,此处使用 GET 方法获取文件大小,可以进行缓存,防止有些链接反复回源(比如:图片瘦身) + request, err := http.NewRequest("GET", srcResUrl, nil) + if err != nil { + return nil, data.NewEmptyError().AppendDescF("create request error:%v", err) + } + request.Header.Set("Range", "bytes=0-0") + resp, respErr := client.DefaultStorageClient().Do(context.Background(), request) if respErr != nil { return nil, data.NewEmptyError().AppendDescF("New head request failed, %s", respErr.Error()) } @@ -141,23 +148,34 @@ func GetNetworkFileInfo(srcResUrl string) (*NetworkFileInfo, *data.CodeError) { } }() + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusRequestedRangeNotSatisfiable { + return nil, data.NewError(resp.StatusCode, fmt.Sprintf("unexpected status code %d for get file info %s", resp.StatusCode, srcResUrl)) + } + file := &NetworkFileInfo{ Size: -1, Hash: "", } - contentLength := resp.Header.Get("Content-Length") - if contentLength != "" { - if size, pErr := strconv.ParseInt(contentLength, 10, 64); pErr != nil { - return nil, data.NewEmptyError().AppendDescF("parse network file(%s) size error:%v", srcResUrl, pErr) - } else { - file.Size = size - } + + if resp.StatusCode == http.StatusRequestedRangeNotSatisfiable { + // 文件 Range 超限 + file.Size = 0 } else { - return file, data.NewEmptyError().AppendDescF("network file(%s) hasn't Content-Length", srcResUrl) + // 从Content-Range获取文件总大小 + contentRange := resp.Header.Get("Content-Range") + if contentRange != "" { + // 解析Content-Range格式: bytes start-end/total + parts := strings.Split(contentRange, "/") + if len(parts) == 2 { + if total, pErr := strconv.ParseInt(parts[1], 10, 64); pErr == nil { + file.Size = total + } + } + } } etag := resp.Header.Get("ETag") - if contentLength != "" { + if etag != "" { file.Hash = ParseEtag(etag) } else { return nil, data.NewEmptyError().AppendDescF("network file(%s) hasn't Etag", srcResUrl) diff --git a/iqshell/storage/bucket/list.go b/iqshell/storage/bucket/list.go index df376158..80c1a90e 100644 --- a/iqshell/storage/bucket/list.go +++ b/iqshell/storage/bucket/list.go @@ -26,7 +26,7 @@ type ListApiInfo struct { StartTime time.Time // list item 的 put time 区间的开始时间 【闭区间】 EndTime time.Time // list item 的 put time 区间的终止时间 【闭区间】 Suffixes []string // list item 必须包含后缀 - FileTypes []int // list item 存储类型,多个使用逗号隔开, 0:普通存储 1:低频存储 2:归档存储 3:深度归档存储 4:归档直读存储 + FileTypes []int // list item 存储类型,多个使用逗号隔开, 0:标准存储 1:低频存储 2:归档存储 3:深度归档存储 4:归档直读存储 5:智能分层存储 MimeTypes []string // list item Mimetype类型,多个使用逗号隔开 MinFileSize int64 // 文件最小值,单位: B MaxFileSize int64 // 文件最大值,单位: B diff --git a/iqshell/storage/bucket/operations/list.go b/iqshell/storage/bucket/operations/list.go index 624542e5..6f0a42e3 100644 --- a/iqshell/storage/bucket/operations/list.go +++ b/iqshell/storage/bucket/operations/list.go @@ -25,7 +25,7 @@ type ListInfo struct { StartDate string // list item 的 put time 区间的开始时间 【闭区间】 【可选】 EndDate string // list item 的 put time 区间的终止时间 【闭区间】 【可选】 Suffixes string // list item 必须包含后缀 【可选】 - FileTypes string // list item 存储类型,多个使用逗号隔开, 0:普通存储 1:低频存储 2:归档存储 3:深度归档存储 4:归档直读存储【可选】 + FileTypes string // list item 存储类型,多个使用逗号隔开, 0:标准存储 1:低频存储 2:归档存储 3:深度归档存储 4:归档直读存储 5:智能分层存储【可选】 MimeTypes string // list item Mimetype类型,多个使用逗号隔开 【可选】 MinFileSize string // 文件最小值,单位: B 【可选】 MaxFileSize string // 文件最大值,单位: B 【可选】 diff --git a/iqshell/storage/object/download/downloader.go b/iqshell/storage/object/download/downloader.go index 107416f3..07c4dedd 100644 --- a/iqshell/storage/object/download/downloader.go +++ b/iqshell/storage/object/download/downloader.go @@ -281,6 +281,7 @@ func downloadTempFileWithDownloader(dl downloader, fInfo *fileInfo, info *Downlo if cErr != nil { return cErr } + log.DebugF("get file size: %s", downloadUrl) if file, err := utils.GetNetworkFileInfo(downloadUrl); err != nil { return err } else if info.CheckHash && info.FileHash != file.Hash { diff --git a/iqshell/storage/object/download/operations/batch.go b/iqshell/storage/object/download/operations/batch.go index 94d0b8cf..10d802b6 100644 --- a/iqshell/storage/object/download/operations/batch.go +++ b/iqshell/storage/object/download/operations/batch.go @@ -15,6 +15,7 @@ import ( "github.com/qiniu/qshell/v2/iqshell/common/log" "github.com/qiniu/qshell/v2/iqshell/common/utils" "github.com/qiniu/qshell/v2/iqshell/common/workspace" + "github.com/qiniu/qshell/v2/iqshell/storage/bucket" "github.com/qiniu/qshell/v2/iqshell/storage/object/download" ) @@ -109,6 +110,18 @@ func BatchDownload(cfg *iqshell.Config, info BatchDownloadInfo) { return } + if !info.Public { + bucketInfo, err := bucket.GetBucketInfo(bucket.GetBucketApiInfo{ + Bucket: info.Bucket, + }) + if err != nil { + data.SetCmdStatusError() + log.ErrorF("get bucket info error:%v", err) + return + } + info.Public = bucketInfo.Private == 0 + } + // 配置 locker if e := locker.TryLock(); e != nil { data.SetCmdStatusError() diff --git a/iqshell/storage/object/download/operations/download.go b/iqshell/storage/object/download/operations/download.go index 1bac05c2..cf5de35f 100644 --- a/iqshell/storage/object/download/operations/download.go +++ b/iqshell/storage/object/download/operations/download.go @@ -2,16 +2,18 @@ package operations import ( "fmt" + "os" + "time" + "github.com/qiniu/qshell/v2/iqshell" "github.com/qiniu/qshell/v2/iqshell/common/alert" "github.com/qiniu/qshell/v2/iqshell/common/data" "github.com/qiniu/qshell/v2/iqshell/common/log" "github.com/qiniu/qshell/v2/iqshell/common/progress" "github.com/qiniu/qshell/v2/iqshell/common/workspace" + "github.com/qiniu/qshell/v2/iqshell/storage/bucket" "github.com/qiniu/qshell/v2/iqshell/storage/object" "github.com/qiniu/qshell/v2/iqshell/storage/object/download" - "os" - "time" ) type DownloadInfo struct { @@ -52,6 +54,18 @@ func DownloadFile(cfg *iqshell.Config, info DownloadInfo) { info.ToFile = info.Key } + if !info.IsPublic { + bucketInfo, err := bucket.GetBucketInfo(bucket.GetBucketApiInfo{ + Bucket: info.Bucket, + }) + if err != nil { + data.SetCmdStatusError() + log.ErrorF("get bucket info error:%v", err) + return + } + info.IsPublic = bucketInfo.Private == 0 + } + fileStatus, err := object.Status(object.StatusApiInfo{ Bucket: info.Bucket, Key: info.Key, diff --git a/iqshell/storage/object/download/url.go b/iqshell/storage/object/download/url.go index bfbe9d31..4f18b71e 100644 --- a/iqshell/storage/object/download/url.go +++ b/iqshell/storage/object/download/url.go @@ -111,6 +111,17 @@ func createDownloadUrl(info *DownloadApiInfo) (string, *data.CodeError) { } urlString = utils.Endpoint(useHttps, info.Host) urlString = strings.Join([]string{urlString, "getfile", mac.AccessKey, info.Bucket, url.PathEscape(info.Key)}, "/") + // 源站域名需要签名 + if !info.IsPublicBucket { + if u, e := PublicUrlToPrivate(PublicUrlToPrivateApiInfo{ + PublicUrl: urlString, + Deadline: time.Now().Add(60 * time.Minute).Unix(), + }); e != nil { + return "", e + } else { + urlString = u.Url + } + } } else { urlString = PublicUrl(UrlApiInfo{ BucketDomain: info.Host, diff --git a/iqshell/storage/object/lifecycle.go b/iqshell/storage/object/lifecycle.go index 4aa52101..bfc57708 100644 --- a/iqshell/storage/object/lifecycle.go +++ b/iqshell/storage/object/lifecycle.go @@ -13,13 +13,14 @@ import ( // ChangeLifecycleApiInfo // 相关链接:https://developer.qiniu.com/kodo/8062/modify-object-life-cycle type ChangeLifecycleApiInfo struct { - Bucket string `json:"bucket"` - Key string `json:"key"` - ToIAAfterDays int `json:"to_ia_after_days"` // 转换到 低频存储类型,设置为 -1 表示取消 - ToArchiveIRAfterDays int `json:"to_archive_ir_after_days"` // 转换到 归档直读存储类型, 设置为 -1 表示取消 - ToArchiveAfterDays int `json:"to_archive_after_days"` // 转换到 归档存储类型, 设置为 -1 表示取消 - ToDeepArchiveAfterDays int `json:"to_deep_archive_after_days"` // 转换到 深度归档存储类型, 设置为 -1 表示取消 - DeleteAfterDays int `json:"delete_after_days"` // 过期删除,删除后不可恢复,设置为 -1 表示取消 + Bucket string `json:"bucket"` + Key string `json:"key"` + ToIAAfterDays int `json:"to_ia_after_days"` // 转换到 低频存储类型,设置为 -1 表示取消 + ToArchiveIRAfterDays int `json:"to_archive_ir_after_days"` // 转换到 归档直读存储类型, 设置为 -1 表示取消 + ToArchiveAfterDays int `json:"to_archive_after_days"` // 转换到 归档存储类型, 设置为 -1 表示取消 + ToDeepArchiveAfterDays int `json:"to_deep_archive_after_days"` // 转换到 深度归档存储类型, 设置为 -1 表示取消 + ToIntelligentTieringAfterDays int `json:"to_intelligent_tiering_after_days"` // 转换到 智能分层存储类型, 设置为 -1 表示取消 + DeleteAfterDays int `json:"delete_after_days"` // 过期删除,删除后不可恢复,设置为 -1 表示取消 } func (l *ChangeLifecycleApiInfo) GetBucket() string { @@ -44,6 +45,9 @@ func (l *ChangeLifecycleApiInfo) ToOperation() (string, *data.CodeError) { if l.ToDeepArchiveAfterDays != 0 { lifecycleSetting += fmt.Sprintf("/toDeepArchiveAfterDays/%d", l.ToDeepArchiveAfterDays) } + if l.ToIntelligentTieringAfterDays != 0 { + lifecycleSetting += fmt.Sprintf("/toIntelligentTieringAfterDays/%d", l.ToIntelligentTieringAfterDays) + } if l.DeleteAfterDays != 0 { lifecycleSetting += fmt.Sprintf("/deleteAfterDays/%d", l.DeleteAfterDays) } diff --git a/iqshell/storage/object/operations/lifecycle.go b/iqshell/storage/object/operations/lifecycle.go index fc48ddc8..d8210b42 100644 --- a/iqshell/storage/object/operations/lifecycle.go +++ b/iqshell/storage/object/operations/lifecycle.go @@ -53,9 +53,9 @@ func ChangeLifecycle(cfg *iqshell.Config, info *ChangeLifecycleInfo) { } if result.IsSuccess() { - lifecycleValues := []int{info.ToIAAfterDays, info.ToArchiveIRAfterDays, info.ToArchiveAfterDays, + lifecycleValues := []int{info.ToIAAfterDays, info.ToIntelligentTieringAfterDays, info.ToArchiveIRAfterDays, info.ToArchiveAfterDays, info.ToDeepArchiveAfterDays, info.DeleteAfterDays} - lifecycleDescs := []string{"to IA storage", "to ARCHIVE_IR storage", "to ARCHIVE storage", + lifecycleDescs := []string{"to IA storage", "to IntelligentTiering storage", "to ARCHIVE_IR storage", "to ARCHIVE storage", "to DEEP_ARCHIVE storage", "delete"} log.InfoF("Change lifecycle Success, [%s:%s]", info.Bucket, info.Key) for i := 0; i < len(lifecycleValues); i++ { @@ -78,13 +78,14 @@ func ChangeLifecycle(cfg *iqshell.Config, info *ChangeLifecycleInfo) { } type BatchChangeLifecycleInfo struct { - BatchInfo batch.Info // - Bucket string // - ToIAAfterDays int // 转换到 低频存储类型,设置为 -1 表示取消 - ToArchiveIRAfterDays int // 转换到 归档直读存储类型, 设置为 -1 表示取消 - ToArchiveAfterDays int // 转换到 归档存储类型, 设置为 -1 表示取消 - ToDeepArchiveAfterDays int // 转换到 深度归档存储类型, 设置为 -1 表示取消 - DeleteAfterDays int // 过期删除,删除后不可恢复,设置为 -1 表示取消 + BatchInfo batch.Info // + Bucket string // + ToIAAfterDays int // 转换到 低频存储类型,设置为 -1 表示取消 + ToArchiveIRAfterDays int // 转换到 归档直读存储类型, 设置为 -1 表示取消 + ToArchiveAfterDays int // 转换到 归档存储类型, 设置为 -1 表示取消 + ToDeepArchiveAfterDays int // 转换到 深度归档存储类型, 设置为 -1 表示取消 + ToIntelligentTieringAfterDays int // 转换到 智能分层存储类型, 设置为 -1 表示取消 + DeleteAfterDays int // 过期删除,删除后不可恢复,设置为 -1 表示取消 } func (info *BatchChangeLifecycleInfo) Check() *data.CodeError { @@ -100,6 +101,7 @@ func (info *BatchChangeLifecycleInfo) Check() *data.CodeError { info.ToArchiveIRAfterDays == 0 && info.ToArchiveAfterDays == 0 && info.ToDeepArchiveAfterDays == 0 && + info.ToIntelligentTieringAfterDays == 0 && info.DeleteAfterDays == 0 { return data.NewEmptyError().AppendDesc("must set at least one value of lifecycle") } @@ -109,8 +111,9 @@ func (info *BatchChangeLifecycleInfo) Check() *data.CodeError { func BatchChangeLifecycle(cfg *iqshell.Config, info BatchChangeLifecycleInfo) { cfg.JobPathBuilder = func(cmdPath string) string { - jobId := utils.Md5Hex(fmt.Sprintf("%s:%s:%d:%d:%d:%d:%s", cfg.CmdCfg.CmdId, info.Bucket, - info.ToIAAfterDays, info.ToArchiveAfterDays, info.ToDeepArchiveAfterDays, info.DeleteAfterDays, + jobId := utils.Md5Hex(fmt.Sprintf("%s:%s:%d:%d:%d:%d:%d:%d:%s", cfg.CmdCfg.CmdId, info.Bucket, + info.ToIAAfterDays, info.ToArchiveIRAfterDays, info.ToArchiveAfterDays, + info.ToDeepArchiveAfterDays, info.ToIntelligentTieringAfterDays, info.DeleteAfterDays, info.BatchInfo.InputFile)) return filepath.Join(cmdPath, jobId) } @@ -142,13 +145,14 @@ func BatchChangeLifecycle(cfg *iqshell.Config, info BatchChangeLifecycleInfo) { return nil, alert.Error("key invalid", "") } return &object.ChangeLifecycleApiInfo{ - Bucket: info.Bucket, - Key: listObject.Key, - ToIAAfterDays: info.ToIAAfterDays, - ToArchiveIRAfterDays: info.ToArchiveIRAfterDays, - ToArchiveAfterDays: info.ToArchiveAfterDays, - ToDeepArchiveAfterDays: info.ToDeepArchiveAfterDays, - DeleteAfterDays: info.DeleteAfterDays, + Bucket: info.Bucket, + Key: listObject.Key, + ToIAAfterDays: info.ToIAAfterDays, + ToArchiveIRAfterDays: info.ToArchiveIRAfterDays, + ToArchiveAfterDays: info.ToArchiveAfterDays, + ToDeepArchiveAfterDays: info.ToDeepArchiveAfterDays, + ToIntelligentTieringAfterDays: info.ToIntelligentTieringAfterDays, + DeleteAfterDays: info.DeleteAfterDays, }, nil }). OnResult(func(operationInfo string, operation batch.Operation, result *batch.OperationResult) { diff --git a/iqshell/storage/object/operations/status.go b/iqshell/storage/object/operations/status.go index b08d59f0..1070817e 100644 --- a/iqshell/storage/object/operations/status.go +++ b/iqshell/storage/object/operations/status.go @@ -125,9 +125,9 @@ func getResultInfo(bucket, key string, status object.StatusResult) string { statInfo := "" fieldAdder := func(name string, value interface{}, desc string) { if len(desc) == 0 { - statInfo += fmt.Sprintf("%-25s%v\r\n", name+":", value) + statInfo += fmt.Sprintf("%-35s%v\r\n", name+":", value) } else { - statInfo += fmt.Sprintf("%-25s%v -> %s\r\n", name+":", value, desc) + statInfo += fmt.Sprintf("%-35s%v -> %s\r\n", name+":", value, desc) } } @@ -167,6 +167,7 @@ func getResultInfo(bucket, key string, status object.StatusResult) string { } lifecycleFieldAdder("Expiration", status.Expiration) lifecycleFieldAdder("TransitionToIA", status.TransitionToIA) + lifecycleFieldAdder("TransitionToIntelligentTiering", status.TransitionToIntelligentTiering) lifecycleFieldAdder("TransitionToArchiveIR", status.TransitionToArchiveIR) lifecycleFieldAdder("TransitionToArchive", status.TransitionToARCHIVE) lifecycleFieldAdder("TransitionToDeepArchive", status.TransitionToDeepArchive) @@ -176,7 +177,7 @@ func getResultInfo(bucket, key string, status object.StatusResult) string { return statInfo } -var objectTypes = []string{"标准存储", "低频存储", "归档存储", "深度归档存储", "归档直读存储"} +var objectTypes = []string{"标准存储", "低频存储", "归档存储", "深度归档存储", "归档直读存储", "智能分层存储"} func getFileTypeDescription(fileTypes int) string { typeString := "未知类型" diff --git a/iqshell/storage/object/status.go b/iqshell/storage/object/status.go index 7e579583..bc69edfc 100644 --- a/iqshell/storage/object/status.go +++ b/iqshell/storage/object/status.go @@ -47,6 +47,8 @@ type StatusResult struct { Expiration int64 `json:"expiration"` // 文件生命周期中转为低频存储的日期,int64 类型,Unix 时间戳格式 TransitionToIA int64 `json:"transitionToIA"` + // 文件生命周期中转为智能分层存储的日期,int64 类型,Unix 时间戳格式 + TransitionToIntelligentTiering int64 `json:"transitionToIntelligentTiering"` // 文件生命周期中转为归档存储的日期,int64 类型,Unix 时间戳格式 TransitionToARCHIVE int64 `json:"transitionToARCHIVE"` // 文件生命周期中转为归档存储的日期,int64 类型,Unix 时间戳格式 diff --git a/iqshell/storage/object/upload/uploader.go b/iqshell/storage/object/upload/uploader.go index b49e7139..8db4d113 100644 --- a/iqshell/storage/object/upload/uploader.go +++ b/iqshell/storage/object/upload/uploader.go @@ -117,7 +117,8 @@ type Uploader interface { func Upload(info *ApiInfo) (res *ApiResult, err *data.CodeError) { err = info.Check() if err != nil { - log.WarningF("upload: info init error:%v", err) + log.ErrorF("upload: info init error:%v", err) + return nil, err } exist := false diff --git a/iqshell/storage/servers/all_buckets.go b/iqshell/storage/servers/all_buckets.go index bff5cd9e..7e33d145 100644 --- a/iqshell/storage/servers/all_buckets.go +++ b/iqshell/storage/servers/all_buckets.go @@ -21,48 +21,80 @@ type BucketQuota struct { } type BucketInfo struct { - Id *data.String `json:"id"` - Tbl *data.String `json:"tbl"` - CTime *data.Int `json:"ctime"` // 返回为 0 - FileNum *data.Int64 `json:"file_num"` - StorageSize *data.Int64 `json:"storage_size"` - Region *data.String `json:"region"` - Global *data.Bool `json:"global"` - Perm *data.Int `json:"perm"` - ShareUsers []*UserInfo `json:"share_users"` - Versioning *data.Bool `json:"versioning"` - AllowNullKey *data.Bool `json:"allow_nullkey"` - EncryptionEnabled *data.Bool `json:"encryption_enabled"` - NotAllowAccessByTbl *data.Bool `json:"not_allow_access_by_tbl"` + Name *data.String `json:"name"` + Tbl *data.String `json:"tbl"` + FileNum *data.Int64 `json:"file_num"` + StorageSize *data.Int64 `json:"storage_size"` + Region *data.String `json:"region"` + + //CTime *data.String `json:"ctime"` + //Global *data.Bool `json:"global"` + //Perm *data.Int `json:"perm"` + //ShareUsers []*UserInfo `json:"share_users"` + //Versioning *data.Bool `json:"versioning"` + //AllowNullKey *data.Bool `json:"allow_nullkey"` + //EncryptionEnabled *data.Bool `json:"encryption_enabled"` + //NotAllowAccessByTbl *data.Bool `json:"not_allow_access_by_tbl"` } +func (i *BucketInfo) BucketName() string { + if i.Name != nil { + return i.Name.Value() + } + + if i.Tbl != nil { + return i.Tbl.Value() + } + + return "" +} func (i *BucketInfo) DescriptionString() string { - return fmt.Sprintf("%s", i.Tbl.Value()) + return fmt.Sprintf("%s", i.BucketName()) } func (i *BucketInfo) DetailDescriptionString() string { sizeString := utils.FormatFileSize(i.StorageSize.Value()) - return fmt.Sprintf("%-20s\t%-10d\t%-10s\t%s", i.Region.Value(), i.FileNum.Value(), sizeString, i.Tbl.Value()) + return fmt.Sprintf("%-20s\t%-10d\t%-10s\t%s", i.Region.Value(), i.FileNum.Value(), sizeString, i.BucketName()) } func BucketInfoDetailDescriptionStringFormat() string { return fmt.Sprintf("%-20s\t%-10s\t%-10s\t%s", "Region", "FileNum", "StorageSize", "Bucket") } +type BucketsResponse struct { + NextMarker string `json:"next_marker"` + IsTruncated bool `json:"is_truncated"` + Buckets []BucketInfo `json:"buckets"` +} + type ListApiInfo struct { Region string + Marker string + Limit int + Detail bool } -// AllBuckets List list 所有 bucket -func AllBuckets(info ListApiInfo) (buckets []BucketInfo, err *data.CodeError) { - return allBuckets(workspace.GetConfig(), info) +type BucketHandler func(bucket *BucketInfo, err *data.CodeError) + +// AllBuckets List 所有 bucket +func AllBuckets(info ListApiInfo, handler BucketHandler) { + // 分页获取没有更详细的信息,所以不能使用分页 + if info.Detail { + allBuckets(workspace.GetConfig(), info, handler) + return + } + + // 对于只需要 bucket 名的情况,走分页获取 + allBucketsByPage(workspace.GetConfig(), info, handler) } -func allBuckets(cfg *config.Config, info ListApiInfo) ([]BucketInfo, *data.CodeError) { +// allBuckets 一次获取获取所有 Bucket +func allBuckets(cfg *config.Config, info ListApiInfo, handler BucketHandler) { // https://github.com/qbox/product/blob/master/kodo/bucket/tblmgr.md#v3buckets%E8%8E%B7%E5%8F%96%E7%94%A8%E6%88%B7%E7%AC%A6%E5%90%88%E6%9D%A1%E4%BB%B6%E7%9A%84%E7%A9%BA%E9%97%B4%E4%BF%A1%E6%81%AF%E5%8C%85%E6%8B%AC%E7%A9%BA%E9%97%B4%E6%96%87%E4%BB%B6%E4%BF%A1%E6%81%AF bucketManager, err := bucket.GetBucketManager() if err != nil { - return nil, err + handler(nil, err) + return } ucHost := cfg.Hosts.GetOneUc() @@ -71,6 +103,68 @@ func allBuckets(cfg *config.Config, info ListApiInfo) ([]BucketInfo, *data.CodeE reqURL = fmt.Sprintf("%s®ion=%s", reqURL, info.Region) } var buckets []BucketInfo - e := bucketManager.Client.CredentialedCall(workspace.GetContext(), bucketManager.Mac, auth.TokenQiniu, &buckets, "POST", reqURL, nil) - return buckets, data.ConvertError(e) + rErr := bucketManager.Client.CredentialedCall(workspace.GetContext(), bucketManager.Mac, auth.TokenQiniu, &buckets, "POST", reqURL, nil) + if rErr != nil { + handler(nil, data.ConvertError(rErr)) + return + } + + for _, b := range buckets { + handler(&b, nil) + } +} + +// allBucketsByPage 分页获取 Bucket +func allBucketsByPage(cfg *config.Config, info ListApiInfo, handler BucketHandler) { + if handler == nil { + return + } + + marker := info.Marker + for { + info.Marker = marker + resp, err := allBucketsOnePage(cfg, info) + if err != nil { + handler(nil, err) + break + } + + for _, b := range resp.Buckets { + handler(&b, nil) + } + + if !resp.IsTruncated || len(resp.NextMarker) == 0 { + break + } + + marker = resp.NextMarker + } +} + +func allBucketsOnePage(cfg *config.Config, info ListApiInfo) (*BucketsResponse, *data.CodeError) { + // 支持分页:https://github.com/qbox/product/blob/master/kodo/bucket/tblmgr.md#bucketsapiversionv4-%E6%94%AF%E6%8C%81%E5%88%86%E9%A1%B5%E8%BF%94%E5%9B%9E%E8%A1%A8%E4%BF%A1%E6%81%AF + bucketManager, err := bucket.GetBucketManager() + if err != nil { + return nil, err + } + + if info.Limit <= 0 { + info.Limit = 50 + } + + ucHost := cfg.Hosts.GetOneUc() + reqURL := fmt.Sprintf("%s/buckets?apiVersion=v4&limit=%d", utils.Endpoint(cfg.UseHttps.Value(), ucHost), info.Limit) + if len(info.Region) > 0 { + reqURL = fmt.Sprintf("%s®ion=%s", reqURL, info.Region) + } + if len(info.Marker) > 0 { + reqURL = fmt.Sprintf("%s&marker=%s", reqURL, info.Marker) + } + + var resp BucketsResponse + e := bucketManager.Client.CredentialedCall(workspace.GetContext(), bucketManager.Mac, auth.TokenQiniu, &resp, "GET", reqURL, nil) + if e != nil { + return nil, data.ConvertError(e) + } + return &resp, nil } diff --git a/iqshell/storage/servers/operations/bucket_list.go b/iqshell/storage/servers/operations/bucket_list.go index e5a9c398..badb46ce 100644 --- a/iqshell/storage/servers/operations/bucket_list.go +++ b/iqshell/storage/servers/operations/bucket_list.go @@ -7,17 +7,16 @@ import ( "github.com/qiniu/qshell/v2/iqshell/storage/servers" ) -type ListInfo struct { - servers.ListApiInfo - - Detail bool -} +type ListInfo servers.ListApiInfo func (info *ListInfo) Check() *data.CodeError { + if info.Limit <= 0 { + info.Limit = 50 + } return nil } -// List list 所有 bucket +// List 列举所有 bucket func List(cfg *iqshell.Config, info ListInfo) { if shouldContinue := iqshell.CheckAndLoad(cfg, iqshell.CheckAndLoadInfo{ Checker: &info, @@ -25,24 +24,22 @@ func List(cfg *iqshell.Config, info ListInfo) { return } - buckets, err := servers.AllBuckets(info.ListApiInfo) - if err != nil { - data.SetCmdStatusError() - log.ErrorF("Get buckets error: %v", err) - return - } else if len(buckets) == 0 { - log.Warning("No buckets found") - return - } + log.AlertF("%s", servers.BucketInfoDetailDescriptionStringFormat()) + servers.AllBuckets(servers.ListApiInfo(info), func(bucket *servers.BucketInfo, err *data.CodeError) { + if err != nil { + data.SetCmdStatusError() + log.ErrorF("Get buckets error: %v", err) + return + } - if info.Detail { - log.AlertF("%s", servers.BucketInfoDetailDescriptionStringFormat()) - for _, b := range buckets { - log.AlertF("%s", b.DetailDescriptionString()) + if bucket == nil { + return } - } else { - for _, b := range buckets { - log.AlertF("%s", b.DescriptionString()) + + if info.Detail { + log.AlertF("%s", bucket.DetailDescriptionString()) + } else { + log.AlertF("%s", bucket.DescriptionString()) } - } + }) }