From 4947f39e746b0aa7f9c6c8a3d4afc1168ad2eaa2 Mon Sep 17 00:00:00 2001 From: Zichao Lin Date: Sun, 25 Feb 2024 08:30:34 +0800 Subject: [PATCH] init --- .github/FUNDING.yml | 1 + .github/ISSUE_TEMPLATE/bug_report.md | 38 + .github/ISSUE_TEMPLATE/feature_request.md | 20 + .github/stale.yml | 61 + .github/workflows/build.yml | 31 + .github/workflows/docker-release.yml | 57 + .github/workflows/test.yml | 35 + .gitignore | 33 + .gitmodules | 3 + .goreleaser.yaml | 121 + Dockerfile | 17 + LICENSE | 674 ++++++ README.md | 37 + README_zh-CN.md | 104 + assets | 1 + bootstrap/app.go | 125 ++ bootstrap/constant/constant.go | 3 + bootstrap/embed.go | 432 ++++ bootstrap/fs.go | 75 + bootstrap/init.go | 133 ++ bootstrap/script.go | 18 + bootstrap/static.go | 136 ++ docker-compose.yml | 45 + go.mod | 180 ++ go.sum | 1494 ++++++++++++ main.go | 162 ++ middleware/auth.go | 323 +++ middleware/captcha.go | 127 ++ middleware/cluster.go | 62 + middleware/common.go | 77 + middleware/file.go | 30 + middleware/frontend.go | 84 + middleware/mock.go | 24 + middleware/session.go | 68 + middleware/share.go | 139 ++ middleware/wopi.go | 70 + models/defaults.go | 186 ++ models/dialects/dialect_sqlite.go | 288 +++ models/download.go | 128 ++ models/file.go | 525 +++++ models/folder.go | 365 +++ models/group.go | 89 + models/init.go | 106 + models/migration.go | 221 ++ models/node.go | 91 + models/order.go | 59 + models/policy.go | 267 +++ models/redeem.go | 27 + models/report.go | 21 + models/scripts/init.go | 10 + models/scripts/invoker/invoker.go | 38 + models/scripts/reset.go | 31 + models/scripts/storage.go | 33 + models/scripts/upgrade-pro.go | 22 + models/scripts/upgrade.go | 43 + models/setting.go | 110 + models/share.go | 280 +++ models/source_link.go | 47 + models/storage_pack.go | 91 + models/tag.go | 53 + models/task.go | 73 + models/user.go | 429 ++++ models/user_authn.go | 79 + models/webdav.go | 53 + paksource.sh | 4 + pkg/aria2/aria2.go | 67 + pkg/aria2/common/common.go | 119 + pkg/aria2/monitor/monitor.go | 320 +++ pkg/aria2/rpc/README.md | 257 +++ pkg/aria2/rpc/call.go | 274 +++ pkg/aria2/rpc/client.go | 656 ++++++ pkg/aria2/rpc/const.go | 39 + pkg/aria2/rpc/json2.go | 116 + pkg/aria2/rpc/notification.go | 44 + pkg/aria2/rpc/proc.go | 42 + pkg/aria2/rpc/proto.go | 40 + pkg/aria2/rpc/resp.go | 104 + pkg/auth/auth.go | 145 ++ pkg/auth/hmac.go | 54 + pkg/authn/auth.go | 16 + pkg/balancer/balancer.go | 15 + pkg/balancer/errors.go | 8 + pkg/balancer/roundrobin.go | 30 + pkg/cache/driver.go | 104 + pkg/cache/memo.go | 181 ++ pkg/cache/redis.go | 227 ++ pkg/cluster/controller.go | 210 ++ pkg/cluster/errors.go | 12 + pkg/cluster/master.go | 272 +++ pkg/cluster/node.go | 60 + pkg/cluster/pool.go | 213 ++ pkg/cluster/slave.go | 451 ++++ pkg/conf/conf.go | 156 ++ pkg/conf/defaults.go | 55 + pkg/conf/version.go | 22 + pkg/crontab/collect.go | 99 + pkg/crontab/init.go | 53 + pkg/crontab/vas.go | 83 + pkg/email/init.go | 52 + pkg/email/mail.go | 38 + pkg/email/smtp.go | 122 + pkg/email/template.go | 50 + pkg/filesystem/archive.go | 309 +++ pkg/filesystem/chunk/backoff/backoff.go | 74 + pkg/filesystem/chunk/chunk.go | 167 ++ pkg/filesystem/driver/cos/handler.go | 427 ++++ pkg/filesystem/driver/cos/scf.go | 134 ++ pkg/filesystem/driver/googledrive/client.go | 73 + pkg/filesystem/driver/googledrive/handler.go | 65 + pkg/filesystem/driver/googledrive/oauth.go | 154 ++ pkg/filesystem/driver/googledrive/types.go | 43 + pkg/filesystem/driver/handler.go | 52 + pkg/filesystem/driver/local/handler.go | 292 +++ pkg/filesystem/driver/onedrive/api.go | 595 +++++ pkg/filesystem/driver/onedrive/client.go | 78 + pkg/filesystem/driver/onedrive/handler.go | 238 ++ pkg/filesystem/driver/onedrive/lock.go | 25 + pkg/filesystem/driver/onedrive/oauth.go | 192 ++ pkg/filesystem/driver/onedrive/options.go | 59 + pkg/filesystem/driver/onedrive/types.go | 140 ++ pkg/filesystem/driver/oss/callback.go | 117 + pkg/filesystem/driver/oss/handler.go | 501 +++++ pkg/filesystem/driver/qiniu/handler.go | 354 +++ pkg/filesystem/driver/remote/client.go | 195 ++ pkg/filesystem/driver/remote/handler.go | 311 +++ pkg/filesystem/driver/s3/handler.go | 440 ++++ .../driver/shadow/masterinslave/errors.go | 7 + .../driver/shadow/masterinslave/handler.go | 60 + .../driver/shadow/slaveinmaster/errors.go | 9 + .../driver/shadow/slaveinmaster/handler.go | 124 + pkg/filesystem/driver/upyun/handler.go | 358 +++ pkg/filesystem/errors.go | 25 + pkg/filesystem/file.go | 387 ++++ pkg/filesystem/filesystem.go | 295 +++ pkg/filesystem/fsctx/context.go | 44 + pkg/filesystem/fsctx/stream.go | 123 + pkg/filesystem/hooks.go | 320 +++ pkg/filesystem/image.go | 218 ++ pkg/filesystem/manage.go | 479 ++++ pkg/filesystem/oauth/mutex.go | 25 + pkg/filesystem/oauth/token.go | 8 + pkg/filesystem/path.go | 84 + pkg/filesystem/relocate.go | 102 + pkg/filesystem/response/common.go | 32 + pkg/filesystem/tests/file1.txt | 0 pkg/filesystem/tests/file2.txt | 0 pkg/filesystem/tests/test.zip | Bin 0 -> 154 bytes pkg/filesystem/upload.go | 243 ++ pkg/filesystem/validator.go | 66 + pkg/hashid/hash.go | 70 + pkg/mocks/cachemock/mock.go | 37 + pkg/mocks/controllermock/c.go | 43 + pkg/mocks/mocks.go | 151 ++ pkg/mocks/remoteclientmock/mock.go | 33 + pkg/mocks/requestmock/request.go | 15 + pkg/mocks/thumbmock/thumb.go | 25 + pkg/mocks/wopimock/mock.go | 21 + pkg/mq/mq.go | 160 ++ pkg/payment/alipay.go | 43 + pkg/payment/custom.go | 93 + pkg/payment/order.go | 171 ++ pkg/payment/payjs.go | 31 + pkg/payment/purchase.go | 137 ++ pkg/payment/score.go | 45 + pkg/payment/wechat.go | 88 + pkg/qq/connect.go | 211 ++ pkg/recaptcha/recaptcha.go | 183 ++ pkg/request/options.go | 137 ++ pkg/request/request.go | 263 +++ pkg/request/tpslimiter.go | 39 + pkg/serializer/aria2.go | 117 + pkg/serializer/auth.go | 21 + pkg/serializer/error.go | 262 +++ pkg/serializer/explorer.go | 132 ++ pkg/serializer/response.go | 35 + pkg/serializer/setting.go | 113 + pkg/serializer/share.go | 139 ++ pkg/serializer/slave.go | 68 + pkg/serializer/upload.go | 64 + pkg/serializer/user.go | 172 ++ pkg/serializer/vas.go | 158 ++ pkg/sessionstore/kv.go | 136 ++ pkg/sessionstore/sessionstore.go | 22 + pkg/task/compress.go | 175 ++ pkg/task/decompress.go | 131 ++ pkg/task/errors.go | 8 + pkg/task/import.go | 220 ++ pkg/task/job.go | 127 ++ pkg/task/pool.go | 68 + pkg/task/recycle.go | 130 ++ pkg/task/relocate.go | 176 ++ pkg/task/slavetask/transfer.go | 138 ++ pkg/task/tranfer.go | 192 ++ pkg/task/worker.go | 44 + pkg/thumb/builtin.go | 193 ++ pkg/thumb/ffmpeg.go | 93 + pkg/thumb/libreoffice.go | 99 + pkg/thumb/pipeline.go | 122 + pkg/thumb/tester.go | 74 + pkg/thumb/vips.go | 78 + pkg/util/common.go | 124 + pkg/util/io.go | 46 + pkg/util/logger.go | 150 ++ pkg/util/path.go | 58 + pkg/util/session.go | 39 + pkg/util/ztool.go | 35 + pkg/vol/vol.go | 46 + pkg/webdav/file.go | 206 ++ pkg/webdav/if.go | 173 ++ pkg/webdav/internal/xml/README | 11 + pkg/webdav/internal/xml/marshal.go | 1223 ++++++++++ pkg/webdav/internal/xml/read.go | 692 ++++++ pkg/webdav/internal/xml/typeinfo.go | 371 +++ pkg/webdav/internal/xml/xml.go | 1998 +++++++++++++++++ pkg/webdav/lock.go | 445 ++++ pkg/webdav/prop.go | 530 +++++ pkg/webdav/webdav.go | 905 ++++++++ pkg/webdav/xml.go | 519 +++++ pkg/wopi/discovery.go | 101 + pkg/wopi/types.go | 70 + pkg/wopi/wopi.go | 224 ++ routers/controllers/admin.go | 596 +++++ routers/controllers/aria2.go | 97 + routers/controllers/callback.go | 140 ++ routers/controllers/directory.go | 28 + routers/controllers/file.go | 387 ++++ routers/controllers/main.go | 69 + routers/controllers/objects.go | 84 + routers/controllers/share.go | 259 +++ routers/controllers/site.go | 167 ++ routers/controllers/slave.go | 246 ++ routers/controllers/tag.go | 39 + routers/controllers/user.go | 445 ++++ routers/controllers/vas.go | 214 ++ routers/controllers/webdav.go | 138 ++ routers/controllers/wopi.go | 77 + routers/router.go | 864 +++++++ service/admin/aria2.go | 71 + service/admin/file.go | 208 ++ service/admin/group.go | 117 + service/admin/list.go | 22 + service/admin/node.go | 142 ++ service/admin/order.go | 75 + service/admin/policy.go | 360 +++ service/admin/report.go | 72 + service/admin/share.go | 80 + service/admin/site.go | 199 ++ service/admin/task.go | 159 ++ service/admin/user.go | 179 ++ service/admin/vas.go | 98 + service/aria2/add.go | 157 ++ service/aria2/manage.go | 172 ++ service/callback/oauth.go | 132 ++ service/callback/upload.go | 261 +++ service/explorer/directory.go | 73 + service/explorer/file.go | 537 +++++ service/explorer/objects.go | 529 +++++ service/explorer/search.go | 88 + service/explorer/slave.go | 193 ++ service/explorer/tag.go | 88 + service/explorer/upload.go | 298 +++ service/explorer/wopi.go | 138 ++ service/node/fabric.go | 77 + service/setting/webdav.go | 159 ++ service/share/manage.go | 152 ++ service/share/visit.go | 477 ++++ service/user/login.go | 205 ++ service/user/register.go | 129 ++ service/user/setting.go | 396 ++++ service/vas/purchase.go | 235 ++ service/vas/qq.go | 113 + service/vas/quota.go | 17 + update.md | 10 + 273 files changed, 45396 insertions(+) create mode 100644 .github/FUNDING.yml create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/stale.yml create mode 100644 .github/workflows/build.yml create mode 100644 .github/workflows/docker-release.yml create mode 100644 .github/workflows/test.yml create mode 100644 .gitignore create mode 100644 .gitmodules create mode 100644 .goreleaser.yaml create mode 100644 Dockerfile create mode 100644 LICENSE create mode 100644 README.md create mode 100644 README_zh-CN.md create mode 160000 assets create mode 100644 bootstrap/app.go create mode 100644 bootstrap/constant/constant.go create mode 100644 bootstrap/embed.go create mode 100644 bootstrap/fs.go create mode 100644 bootstrap/init.go create mode 100644 bootstrap/script.go create mode 100644 bootstrap/static.go create mode 100644 docker-compose.yml create mode 100644 go.mod create mode 100644 go.sum create mode 100644 main.go create mode 100644 middleware/auth.go create mode 100644 middleware/captcha.go create mode 100644 middleware/cluster.go create mode 100644 middleware/common.go create mode 100644 middleware/file.go create mode 100644 middleware/frontend.go create mode 100644 middleware/mock.go create mode 100644 middleware/session.go create mode 100644 middleware/share.go create mode 100644 middleware/wopi.go create mode 100644 models/defaults.go create mode 100644 models/dialects/dialect_sqlite.go create mode 100644 models/download.go create mode 100644 models/file.go create mode 100644 models/folder.go create mode 100644 models/group.go create mode 100644 models/init.go create mode 100644 models/migration.go create mode 100644 models/node.go create mode 100644 models/order.go create mode 100644 models/policy.go create mode 100644 models/redeem.go create mode 100644 models/report.go create mode 100644 models/scripts/init.go create mode 100644 models/scripts/invoker/invoker.go create mode 100644 models/scripts/reset.go create mode 100644 models/scripts/storage.go create mode 100644 models/scripts/upgrade-pro.go create mode 100644 models/scripts/upgrade.go create mode 100644 models/setting.go create mode 100644 models/share.go create mode 100644 models/source_link.go create mode 100644 models/storage_pack.go create mode 100644 models/tag.go create mode 100644 models/task.go create mode 100644 models/user.go create mode 100644 models/user_authn.go create mode 100644 models/webdav.go create mode 100644 paksource.sh create mode 100644 pkg/aria2/aria2.go create mode 100644 pkg/aria2/common/common.go create mode 100644 pkg/aria2/monitor/monitor.go create mode 100644 pkg/aria2/rpc/README.md create mode 100644 pkg/aria2/rpc/call.go create mode 100644 pkg/aria2/rpc/client.go create mode 100644 pkg/aria2/rpc/const.go create mode 100644 pkg/aria2/rpc/json2.go create mode 100644 pkg/aria2/rpc/notification.go create mode 100644 pkg/aria2/rpc/proc.go create mode 100644 pkg/aria2/rpc/proto.go create mode 100644 pkg/aria2/rpc/resp.go create mode 100644 pkg/auth/auth.go create mode 100644 pkg/auth/hmac.go create mode 100644 pkg/authn/auth.go create mode 100644 pkg/balancer/balancer.go create mode 100644 pkg/balancer/errors.go create mode 100644 pkg/balancer/roundrobin.go create mode 100644 pkg/cache/driver.go create mode 100644 pkg/cache/memo.go create mode 100644 pkg/cache/redis.go create mode 100644 pkg/cluster/controller.go create mode 100644 pkg/cluster/errors.go create mode 100644 pkg/cluster/master.go create mode 100644 pkg/cluster/node.go create mode 100644 pkg/cluster/pool.go create mode 100644 pkg/cluster/slave.go create mode 100644 pkg/conf/conf.go create mode 100644 pkg/conf/defaults.go create mode 100644 pkg/conf/version.go create mode 100644 pkg/crontab/collect.go create mode 100644 pkg/crontab/init.go create mode 100644 pkg/crontab/vas.go create mode 100644 pkg/email/init.go create mode 100644 pkg/email/mail.go create mode 100644 pkg/email/smtp.go create mode 100644 pkg/email/template.go create mode 100644 pkg/filesystem/archive.go create mode 100644 pkg/filesystem/chunk/backoff/backoff.go create mode 100644 pkg/filesystem/chunk/chunk.go create mode 100644 pkg/filesystem/driver/cos/handler.go create mode 100644 pkg/filesystem/driver/cos/scf.go create mode 100644 pkg/filesystem/driver/googledrive/client.go create mode 100644 pkg/filesystem/driver/googledrive/handler.go create mode 100644 pkg/filesystem/driver/googledrive/oauth.go create mode 100644 pkg/filesystem/driver/googledrive/types.go create mode 100644 pkg/filesystem/driver/handler.go create mode 100644 pkg/filesystem/driver/local/handler.go create mode 100644 pkg/filesystem/driver/onedrive/api.go create mode 100644 pkg/filesystem/driver/onedrive/client.go create mode 100644 pkg/filesystem/driver/onedrive/handler.go create mode 100644 pkg/filesystem/driver/onedrive/lock.go create mode 100644 pkg/filesystem/driver/onedrive/oauth.go create mode 100644 pkg/filesystem/driver/onedrive/options.go create mode 100644 pkg/filesystem/driver/onedrive/types.go create mode 100644 pkg/filesystem/driver/oss/callback.go create mode 100644 pkg/filesystem/driver/oss/handler.go create mode 100644 pkg/filesystem/driver/qiniu/handler.go create mode 100644 pkg/filesystem/driver/remote/client.go create mode 100644 pkg/filesystem/driver/remote/handler.go create mode 100644 pkg/filesystem/driver/s3/handler.go create mode 100644 pkg/filesystem/driver/shadow/masterinslave/errors.go create mode 100644 pkg/filesystem/driver/shadow/masterinslave/handler.go create mode 100644 pkg/filesystem/driver/shadow/slaveinmaster/errors.go create mode 100644 pkg/filesystem/driver/shadow/slaveinmaster/handler.go create mode 100644 pkg/filesystem/driver/upyun/handler.go create mode 100644 pkg/filesystem/errors.go create mode 100644 pkg/filesystem/file.go create mode 100644 pkg/filesystem/filesystem.go create mode 100644 pkg/filesystem/fsctx/context.go create mode 100644 pkg/filesystem/fsctx/stream.go create mode 100644 pkg/filesystem/hooks.go create mode 100644 pkg/filesystem/image.go create mode 100644 pkg/filesystem/manage.go create mode 100644 pkg/filesystem/oauth/mutex.go create mode 100644 pkg/filesystem/oauth/token.go create mode 100644 pkg/filesystem/path.go create mode 100644 pkg/filesystem/relocate.go create mode 100644 pkg/filesystem/response/common.go create mode 100644 pkg/filesystem/tests/file1.txt create mode 100644 pkg/filesystem/tests/file2.txt create mode 100644 pkg/filesystem/tests/test.zip create mode 100644 pkg/filesystem/upload.go create mode 100644 pkg/filesystem/validator.go create mode 100644 pkg/hashid/hash.go create mode 100644 pkg/mocks/cachemock/mock.go create mode 100644 pkg/mocks/controllermock/c.go create mode 100644 pkg/mocks/mocks.go create mode 100644 pkg/mocks/remoteclientmock/mock.go create mode 100644 pkg/mocks/requestmock/request.go create mode 100644 pkg/mocks/thumbmock/thumb.go create mode 100644 pkg/mocks/wopimock/mock.go create mode 100644 pkg/mq/mq.go create mode 100644 pkg/payment/alipay.go create mode 100644 pkg/payment/custom.go create mode 100644 pkg/payment/order.go create mode 100644 pkg/payment/payjs.go create mode 100644 pkg/payment/purchase.go create mode 100644 pkg/payment/score.go create mode 100644 pkg/payment/wechat.go create mode 100644 pkg/qq/connect.go create mode 100644 pkg/recaptcha/recaptcha.go create mode 100644 pkg/request/options.go create mode 100644 pkg/request/request.go create mode 100644 pkg/request/tpslimiter.go create mode 100644 pkg/serializer/aria2.go create mode 100644 pkg/serializer/auth.go create mode 100644 pkg/serializer/error.go create mode 100644 pkg/serializer/explorer.go create mode 100644 pkg/serializer/response.go create mode 100644 pkg/serializer/setting.go create mode 100644 pkg/serializer/share.go create mode 100644 pkg/serializer/slave.go create mode 100644 pkg/serializer/upload.go create mode 100644 pkg/serializer/user.go create mode 100644 pkg/serializer/vas.go create mode 100644 pkg/sessionstore/kv.go create mode 100644 pkg/sessionstore/sessionstore.go create mode 100644 pkg/task/compress.go create mode 100644 pkg/task/decompress.go create mode 100644 pkg/task/errors.go create mode 100644 pkg/task/import.go create mode 100644 pkg/task/job.go create mode 100644 pkg/task/pool.go create mode 100644 pkg/task/recycle.go create mode 100644 pkg/task/relocate.go create mode 100644 pkg/task/slavetask/transfer.go create mode 100644 pkg/task/tranfer.go create mode 100644 pkg/task/worker.go create mode 100644 pkg/thumb/builtin.go create mode 100644 pkg/thumb/ffmpeg.go create mode 100644 pkg/thumb/libreoffice.go create mode 100644 pkg/thumb/pipeline.go create mode 100644 pkg/thumb/tester.go create mode 100644 pkg/thumb/vips.go create mode 100644 pkg/util/common.go create mode 100644 pkg/util/io.go create mode 100644 pkg/util/logger.go create mode 100644 pkg/util/path.go create mode 100644 pkg/util/session.go create mode 100644 pkg/util/ztool.go create mode 100644 pkg/vol/vol.go create mode 100644 pkg/webdav/file.go create mode 100644 pkg/webdav/if.go create mode 100644 pkg/webdav/internal/xml/README create mode 100644 pkg/webdav/internal/xml/marshal.go create mode 100644 pkg/webdav/internal/xml/read.go create mode 100644 pkg/webdav/internal/xml/typeinfo.go create mode 100644 pkg/webdav/internal/xml/xml.go create mode 100644 pkg/webdav/lock.go create mode 100644 pkg/webdav/prop.go create mode 100644 pkg/webdav/webdav.go create mode 100644 pkg/webdav/xml.go create mode 100644 pkg/wopi/discovery.go create mode 100644 pkg/wopi/types.go create mode 100644 pkg/wopi/wopi.go create mode 100644 routers/controllers/admin.go create mode 100644 routers/controllers/aria2.go create mode 100644 routers/controllers/callback.go create mode 100644 routers/controllers/directory.go create mode 100644 routers/controllers/file.go create mode 100644 routers/controllers/main.go create mode 100644 routers/controllers/objects.go create mode 100644 routers/controllers/share.go create mode 100644 routers/controllers/site.go create mode 100644 routers/controllers/slave.go create mode 100644 routers/controllers/tag.go create mode 100644 routers/controllers/user.go create mode 100644 routers/controllers/vas.go create mode 100644 routers/controllers/webdav.go create mode 100644 routers/controllers/wopi.go create mode 100644 routers/router.go create mode 100644 service/admin/aria2.go create mode 100644 service/admin/file.go create mode 100644 service/admin/group.go create mode 100644 service/admin/list.go create mode 100644 service/admin/node.go create mode 100644 service/admin/order.go create mode 100644 service/admin/policy.go create mode 100644 service/admin/report.go create mode 100644 service/admin/share.go create mode 100644 service/admin/site.go create mode 100644 service/admin/task.go create mode 100644 service/admin/user.go create mode 100644 service/admin/vas.go create mode 100644 service/aria2/add.go create mode 100644 service/aria2/manage.go create mode 100644 service/callback/oauth.go create mode 100644 service/callback/upload.go create mode 100644 service/explorer/directory.go create mode 100644 service/explorer/file.go create mode 100644 service/explorer/objects.go create mode 100644 service/explorer/search.go create mode 100644 service/explorer/slave.go create mode 100644 service/explorer/tag.go create mode 100644 service/explorer/upload.go create mode 100644 service/explorer/wopi.go create mode 100644 service/node/fabric.go create mode 100644 service/setting/webdav.go create mode 100644 service/share/manage.go create mode 100644 service/share/visit.go create mode 100644 service/user/login.go create mode 100644 service/user/register.go create mode 100644 service/user/setting.go create mode 100644 service/vas/purchase.go create mode 100644 service/vas/qq.go create mode 100644 service/vas/quota.go create mode 100644 update.md diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 0000000..f6be209 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1 @@ +custom: ["https://cloudreve.org/pricing"] diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..dd84ea7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,38 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Desktop (please complete the following information):** + - OS: [e.g. iOS] + - Browser [e.g. chrome, safari] + - Version [e.g. 22] + +**Smartphone (please complete the following information):** + - Device: [e.g. iPhone6] + - OS: [e.g. iOS8.1] + - Browser [e.g. stock browser, safari] + - Version [e.g. 22] + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..bbcbbe7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/stale.yml b/.github/stale.yml new file mode 100644 index 0000000..e998aaa --- /dev/null +++ b/.github/stale.yml @@ -0,0 +1,61 @@ +# Configuration for probot-stale - https://github.com/probot/stale + +# Number of days of inactivity before an Issue or Pull Request becomes stale +daysUntilStale: 360 + +# Number of days of inactivity before an Issue or Pull Request with the stale label is closed. +# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale. +daysUntilClose: 30 + +# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled) +onlyLabels: [] + +# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable +exemptLabels: + - pinned + - security + - "[Status] Maybe Later" + +# Set to true to ignore issues in a project (defaults to false) +exemptProjects: true + +# Set to true to ignore issues in a milestone (defaults to false) +exemptMilestones: true + +# Set to true to ignore issues with an assignee (defaults to false) +exemptAssignees: true + +# Label to use when marking as stale +staleLabel: wontfix + +# Comment to post when marking as stale. Set to `false` to disable +markComment: > + This issue has been automatically marked as stale because it has not had + recent activity. It will be closed if no further activity occurs. Thank you + for your contributions. + +# Comment to post when removing the stale label. +# unmarkComment: > +# Your comment here. + +# Comment to post when closing a stale Issue or Pull Request. +# closeComment: > +# Your comment here. + +# Limit the number of actions per hour, from 1-30. Default is 30 +limitPerRun: 30 + +# Limit to only `issues` or `pulls` +# only: issues + +# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls': +# pulls: +# daysUntilStale: 30 +# markComment: > +# This pull request has been automatically marked as stale because it has not had +# recent activity. It will be closed if no further activity occurs. Thank you +# for your contributions. + +# issues: +# exemptLabels: +# - confirmed diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..21a187c --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,31 @@ +name: Build + +on: workflow_dispatch + +jobs: + build: + name: Build + runs-on: ubuntu-latest + steps: + - name: Set up Go 1.20 + uses: actions/setup-go@v2 + with: + go-version: "1.20" + id: go + + - name: Check out code into the Go module directory + uses: actions/checkout@v2 + with: + clean: false + submodules: "recursive" + - run: | + git fetch --prune --unshallow --tags + + - name: Build and Release + uses: goreleaser/goreleaser-action@v4 + with: + distribution: goreleaser + version: latest + args: release --clean --skip-validate + env: + GITHUB_TOKEN: ${{ secrets.RELEASE_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/docker-release.yml b/.github/workflows/docker-release.yml new file mode 100644 index 0000000..6788ea1 --- /dev/null +++ b/.github/workflows/docker-release.yml @@ -0,0 +1,57 @@ +name: Build and push docker image + +on: + push: + tags: + - 3.* # triggered on every push with tag 3.* + workflow_dispatch: # or just on button clicked + +jobs: + docker-build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - run: git fetch --prune --unshallow + - name: Setup Environments + id: envs + run: | + CLOUDREVE_LATEST_TAG=$(git describe --tags --abbrev=0) + DOCKER_IMAGE="cloudreve/cloudreve" + + echo "RELEASE_VERSION=${GITHUB_REF#refs}" + TAGS="${DOCKER_IMAGE}:latest,${DOCKER_IMAGE}:${CLOUDREVE_LATEST_TAG}" + + echo "CLOUDREVE_LATEST_TAG:${CLOUDREVE_LATEST_TAG}" + echo ::set-output name=tags::${TAGS} + - name: Setup QEMU Emulator + uses: docker/setup-qemu-action@master + with: + platforms: all + - name: Setup Docker Buildx Command + id: buildx + uses: docker/setup-buildx-action@master + - name: Login to Dockerhub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + - name: Build Docker Image and Push + id: docker_build + uses: docker/build-push-action@v2 + with: + push: true + builder: ${{ steps.buildx.outputs.name }} + context: . + file: ./Dockerfile + platforms: linux/amd64,linux/arm64,linux/arm/v7 + tags: ${{ steps.envs.outputs.tags }} + - name: Update Docker Hub Description + uses: peter-evans/dockerhub-description@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + repository: cloudreve/cloudreve + short-description: ${{ github.event.repository.description }} + - name: Image Digest + run: echo ${{ steps.docker_build.outputs.digest }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..17a6ecf --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,35 @@ +name: Test + +on: + pull_request: + branches: + - master + push: + branches: [master] + +jobs: + test: + name: Test + runs-on: ubuntu-latest + steps: + - name: Set up Go 1.20 + uses: actions/setup-go@v2 + with: + go-version: "1.20" + id: go + + - name: Check out code into the Go module directory + uses: actions/checkout@v2 + with: + submodules: "recursive" + + - name: Build static files + run: | + mkdir assets/build + touch assets/build/test.html + + - name: Test + run: go test -coverprofile=coverage.txt -covermode=atomic ./... + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v2 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e99f29b --- /dev/null +++ b/.gitignore @@ -0,0 +1,33 @@ +# Binaries for programs and plugins +cloudreve +*.exe +*.exe~ +*.dll +*.so +*.dylib +*.db +*.bin +/release/ +assets.zip + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Development enviroment +.idea/* +uploads/* +temp + +# Version control +version.lock + +# Config file +*.ini +conf/conf.ini +/statik/ +.vscode/ + +dist/ diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..5254225 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "assets"] + path = assets + url = https://git.loliquq.cn/earthjasonlin/CloudrevePlus-frontend.git diff --git a/.goreleaser.yaml b/.goreleaser.yaml new file mode 100644 index 0000000..8631ace --- /dev/null +++ b/.goreleaser.yaml @@ -0,0 +1,121 @@ +env: + - CI=false + - GENERATE_SOURCEMAP=false +before: + hooks: + - go mod tidy + - sh -c "cd assets && rm -rf build && yarn install --network-timeout 1000000 && yarn run build && cd ../ && zip -r - assets/build >assets.zip" +builds: + - + env: + - CGO_ENABLED=0 + + binary: cloudreve + + ldflags: + - -X 'github.com/cloudreve/Cloudreve/v3/pkg/conf.BackendVersion={{.Tag}}' -X 'github.com/cloudreve/Cloudreve/v3/pkg/conf.LastCommit={{.ShortCommit}}' + + goos: + - linux + - windows + - darwin + + goarch: + - amd64 + - arm + - arm64 + + goarm: + - 5 + - 6 + - 7 + + ignore: + - goos: windows + goarm: 5 + - goos: windows + goarm: 6 + - goos: windows + goarm: 7 + +archives: + - format: tar.gz + # this name template makes the OS and Arch compatible with the results of uname. + name_template: >- + cloudreve_{{.Tag}}_{{- .Os }}_{{ .Arch }} + {{- if .Arm }}v{{ .Arm }}{{ end }} + # use zip for windows archives + format_overrides: + - goos: windows + format: zip +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ incpatch .Version }}-next" +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' + +release: + draft: true + prerelease: auto + target_commitish: '{{ .Commit }}' + name_template: "{{.Version}}" + +dockers: + - + dockerfile: Dockerfile + use: buildx + build_flag_templates: + - "--platform=linux/amd64" + goos: linux + goarch: amd64 + goamd64: v1 + image_templates: + - "cloudreve/cloudreve:{{ .Tag }}-amd64" + - + dockerfile: Dockerfile + use: buildx + build_flag_templates: + - "--platform=linux/arm64" + goos: linux + goarch: arm64 + image_templates: + - "cloudreve/cloudreve:{{ .Tag }}-arm64" + - + dockerfile: Dockerfile + use: buildx + build_flag_templates: + - "--platform=linux/arm/v6" + goos: linux + goarch: arm + goarm: '6' + image_templates: + - "cloudreve/cloudreve:{{ .Tag }}-armv6" + - + dockerfile: Dockerfile + use: buildx + build_flag_templates: + - "--platform=linux/arm/v7" + goos: linux + goarch: arm + goarm: '7' + image_templates: + - "cloudreve/cloudreve:{{ .Tag }}-armv7" + +docker_manifests: + - name_template: "cloudreve/cloudreve:latest" + image_templates: + - "cloudreve/cloudreve:{{ .Tag }}-amd64" + - "cloudreve/cloudreve:{{ .Tag }}-arm64" + - "cloudreve/cloudreve:{{ .Tag }}-armv6" + - "cloudreve/cloudreve:{{ .Tag }}-armv7" + - name_template: "cloudreve/cloudreve:{{ .Tag }}" + image_templates: + - "cloudreve/cloudreve:{{ .Tag }}-amd64" + - "cloudreve/cloudreve:{{ .Tag }}-arm64" + - "cloudreve/cloudreve:{{ .Tag }}-armv6" + - "cloudreve/cloudreve:{{ .Tag }}-armv7" \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..7b2f5ae --- /dev/null +++ b/Dockerfile @@ -0,0 +1,17 @@ +FROM alpine:latest + +WORKDIR /cloudreve +COPY cloudreve ./cloudreve + +RUN apk update \ + && apk add --no-cache tzdata \ + && cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime \ + && echo "Asia/Shanghai" > /etc/timezone \ + && chmod +x ./cloudreve \ + && mkdir -p /data/aria2 \ + && chmod -R 766 /data/aria2 + +EXPOSE 5212 +VOLUME ["/cloudreve/uploads", "/cloudreve/avatar", "/data"] + +ENTRYPOINT ["./cloudreve"] diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..20d40b6 --- /dev/null +++ b/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..d0aa22e --- /dev/null +++ b/README.md @@ -0,0 +1,37 @@ +## CloudrevePlus +### 简介 ++ 🌩 支持多家云存储的云盘系统 ++ 基于 [3.8.3开源版本](https://github.com/cloudreve/Cloudreve/releases/tag/3.8.3) 二次开发 ++ 拉取主线最新版源码 ++ 更新依赖至较新版本 ++ 合并部分pr + - [frontend#167](https://github.com/cloudreve/frontend/pull/167) + - [backend#1911](https://github.com/cloudreve/Cloudreve/pull/1911) + - [backend#1949](https://github.com/cloudreve/Cloudreve/pull/1949) ++ 修复部分已知Bug ++ 添加一些实用功能 + +### 使用 ++ 无需修改启动脚本,正常运行即可 ++ 使用原有社区版数据库需备份后执行以下命令: + ``` + ./cloudreveplus --database-script OSSToPlus + ``` + +### 编译 ++ 还是如果不需要修改前端,直接构建后端即可,前端包已预置 ++ 前端 + - 环境:NodeJS v16.20 * + - 进入 assets 目录:`cd assets` + - 安装依赖:`yarn install` * + - 构建静态:`yarn build` * + - 打包文件:`bash pakstatics.sh` + - (注:包管理器一定要用yarn,否则会报错) ++ 后端 + - 环境:Golang >= 1.18,越新越好 + - 进入源码目录 + - 构建程序:`go build -ldflags "-s -w" -tags "go_json" .` + +### 其它 ++ 未经完整测试,建议不要用于生产环境 ++ “仅供交流学习使用,严禁用于非法目的,否则造成一切后果自负” diff --git a/README_zh-CN.md b/README_zh-CN.md new file mode 100644 index 0000000..2e91895 --- /dev/null +++ b/README_zh-CN.md @@ -0,0 +1,104 @@ +[English Version](https://github.com/cloudreve/Cloudreve/blob/master/README.md) + +

+
+ +
+ Cloudreve +
+

+ +

支持多家云存储驱动的公有云文件系统.

+ +

+ + GitHub Test Workflow + + + + + + + + + + + +

+

+ 主页 • + 演示站 • + 讨论社区 • + 文档 • + 下载 • + Telegram 群组 • + 许可证 +

+ + +![Screenshot](https://raw.githubusercontent.com/cloudreve/docs/master/images/homepage.png) + +## :sparkles: 特性 + +* :cloud: 支持本机、从机、七牛、阿里云 OSS、腾讯云 COS、又拍云、OneDrive (包括世纪互联版) 、S3兼容协议 作为存储端 +* :outbox_tray: 上传/下载 支持客户端直传,支持下载限速 +* 💾 可对接 Aria2 离线下载,可使用多个从机节点分担下载任务 +* 📚 在线 压缩/解压缩、多文件打包下载 +* 💻 覆盖全部存储策略的 WebDAV 协议支持 +* :zap: 拖拽上传、目录上传、流式上传处理 +* :card_file_box: 文件拖拽管理 +* :family_woman_girl_boy: 多用户、用户组、多存储策略 +* :link: 创建文件、目录的分享链接,可设定自动过期 +* :eye_speech_bubble: 视频、图像、音频、 ePub 在线预览,文本、Office 文档在线编辑 +* :art: 自定义配色、黑暗模式、PWA 应用、全站单页应用、国际化支持 +* :rocket: All-In-One 打包,开箱即用 +* 🌈 ... ... + +## :hammer_and_wrench: 部署 + +下载适用于您目标机器操作系统、CPU架构的主程序,直接运行即可。 + +```shell +# 解压程序包 +tar -zxvf cloudreve_VERSION_OS_ARCH.tar.gz + +# 赋予执行权限 +chmod +x ./cloudreve + +# 启动 Cloudreve +./cloudreve +``` + +以上为最简单的部署示例,您可以参考 [文档 - 起步](https://docs.cloudreve.org/) 进行更为完善的部署。 + +## :gear: 构建 + +自行构建前需要拥有 `Go >= 1.18`、`node.js`、`yarn`、`zip`, [goreleaser](https://goreleaser.com/intro/) 等必要依赖。 + +#### 安装 goreleaser + +```shell +go install github.com/goreleaser/goreleaser@latest +``` + +#### 克隆代码 + +```shell +git clone --recurse-submodules https://github.com/cloudreve/Cloudreve.git +``` + +#### 编译项目 + +```shell +goreleaser build --clean --single-target --snapshot +``` + +## :alembic: 技术栈 + +* [Go](https://golang.org/) + [Gin](https://github.com/gin-gonic/gin) +* [React](https://github.com/facebook/react) + [Redux](https://github.com/reduxjs/redux) + [Material-UI](https://github.com/mui-org/material-ui) + +## :scroll: 许可证 + +GPL V3 diff --git a/assets b/assets new file mode 160000 index 0000000..20c1fa0 --- /dev/null +++ b/assets @@ -0,0 +1 @@ +Subproject commit 20c1fa08dc78024a19242ca67431aae61cb4eebd diff --git a/bootstrap/app.go b/bootstrap/app.go new file mode 100644 index 0000000..d9d1470 --- /dev/null +++ b/bootstrap/app.go @@ -0,0 +1,125 @@ +package bootstrap + +import ( + // "bytes" + // "crypto/aes" + // "crypto/cipher" + // "encoding/gob" + // "encoding/json" + "fmt" + // "io/ioutil" + // "os" + // "strconv" + + "github.com/cloudreve/Cloudreve/v3/pkg/conf" + "github.com/cloudreve/Cloudreve/v3/pkg/vol" + + // "github.com/cloudreve/Cloudreve/v3/pkg/request" + // "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/gin-gonic/gin" +) + +var matrix []byte +var APPID string + +// InitApplication 初始化应用常量 +func InitApplication() { + fmt.Print(` + ___ _ _ + / __\ | ___ _ _ __| |_ __ _____ _____ + / / | |/ _ \| | | |/ _ | '__/ _ \ \ / / _ \ +/ /___| | (_) | |_| | (_| | | | __/\ V / __/ +\____/|_|\___/ \__,_|\__,_|_| \___| \_/ \___| + + V` + conf.BackendVersion + ` Commit #` + conf.LastCommit + ` Plus=` + conf.IsPlus + ` +================================================ + +`) + // data, err := ioutil.ReadFile(util.RelativePath(string([]byte{107, 101, 121, 46, 98, 105, 110}))) + // if err != nil { + // util.Log().Panic("%s", err) + // } + + //table := deSign(data) + //constant.HashIDTable = table["table"].([]int) + //APPID = table["id"].(string) + //matrix = table["pic"].([]byte) + APPID = `2018110303550773058` + matrix = []byte{137, 80, 78, 71, 13, 10, 26, 10, 0, 0, 0, 13, 73, 72, 68, 82, 0, 0, 2, 10, 0, 0, 1, 16, 8, 6, 0, 0, 0, 209, 100, 92, 53, 0, 0, 6, 112, 73, 68, 65, 84, 120, 156, 237, 214, 209, 142, 212, 54, 0, 64, 81, 187, 223, 4, 69, 162, 255, 255, 93, 230, 97, 50, 187, 147, 196, 119, 89, 84, 169, 165, 234, 57, 66, 66, 12, 153, 141, 99, 59, 222, 59, 191, 126, 251, 107, 141, 15, 173, 177, 214, 28, 99, 172, 49, 231, 124, 255, 116, 141, 49, 142, 127, 206, 211, 135, 99, 172, 57, 199, 124, 253, 254, 241, 175, 247, 207, 158, 151, 174, 49, 94, 175, 93, 107, 60, 110, 53, 199, 203, 173, 198, 26, 235, 254, 217, 26, 99, 30, 223, 63, 127, 56, 199, 120, 25, 219, 219, 253, 47, 151, 62, 199, 186, 249, 161, 99, 141, 251, 248, 231, 186, 223, 107, 141, 57, 142, 63, 207, 75, 199, 115, 50, 207, 227, 31, 99, 158, 198, 244, 28, 234, 58, 223, 107, 55, 166, 199, 211, 31, 227, 127, 157, 255, 199, 55, 215, 188, 204, 255, 28, 227, 50, 211, 99, 173, 121, 251, 248, 121, 233, 245, 251, 235, 184, 240, 190, 126, 231, 89, 121, 60, 235, 121, 79, 188, 173, 255, 120, 93, 171, 143, 238, 191, 110, 123, 229, 88, 213, 235, 19, 92, 198, 52, 114, 82, 31, 207, 243, 153, 231, 127, 95, 207, 211, 253, 215, 126, 255, 94, 215, 127, 191, 84, 253, 252, 99, 94, 118, 213, 118, 169, 30, 243, 113, 29, 211, 110, 254, 31, 235, 127, 25, 192, 118, 81, 251, 253, 217, 191, 127, 113, 255, 117, 93, 235, 113, 159, 255, 231, 11, 112, 154, 148, 15, 246, 255, 230, 249, 55, 155, 242, 56, 127, 110, 175, 197, 126, 255, 205, 57, 206, 75, 117, 188, 63, 215, 93, 241, 193, 249, 51, 175, 247, 223, 158, 95, 235, 184, 199, 79, 95, 170, 237, 248, 247, 75, 85, 251, 239, 57, 212, 203, 250, 199, 253, 215, 237, 252, 252, 236, 82, 237, 222, 191, 62, 191, 119, 231, 199, 120, 94, 121, 157, 255, 203, 248, 173, 223, 63, 181, 126, 239, 191, 107, 62, 241, 2, 30, 99, 157, 183, 115, 109, 126, 249, 246, 125, 237, 254, 227, 250, 178, 239, 39, 225, 163, 75, 47, 155, 224, 237, 192, 189, 140, 247, 111, 77, 194, 175, 109, 162, 253, 38, 156, 247, 159, 154, 191, 4, 206, 7, 240, 207, 54, 193, 53, 2, 238, 191, 196, 226, 16, 61, 14, 246, 243, 237, 118, 27, 246, 119, 142, 184, 205, 33, 178, 221, 152, 247, 251, 111, 131, 173, 94, 226, 205, 47, 241, 109, 196, 125, 244, 18, 237, 38, 96, 179, 169, 183, 251, 247, 250, 11, 244, 229, 251, 187, 3, 119, 31, 140, 215, 91, 109, 158, 255, 109, 175, 238, 46, 189, 63, 255, 24, 243, 118, 136, 61, 246, 218, 252, 220, 253, 71, 29, 120, 191, 16, 236, 231, 31, 26, 247, 223, 205, 223, 216, 30, 10, 255, 233, 253, 191, 157, 191, 62, 127, 106, 78, 119, 231, 215, 245, 210, 218, 255, 189, 127, 199, 245, 195, 199, 223, 187, 243, 123, 156, 173, 99, 173, 230, 237, 251, 251, 253, 119, 63, 171, 227, 253, 25, 251, 181, 218, 142, 127, 140, 177, 198, 206, 203, 92, 175, 245, 54, 203, 251, 107, 199, 237, 217, 120, 55, 191, 254, 249, 189, 230, 13, 0, 248, 159, 251, 227, 223, 30, 0, 0, 240, 251, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 18, 10, 0, 64, 250, 1, 122, 83, 2, 40, 214, 38, 234, 140, 0, 0, 0, 0, 73, 69, 78, 68, 174, 66, 96, 130} + vol.ClientSecret = APPID +} + +// InitCustomRoute 初始化自定义路由 +func InitCustomRoute(group *gin.RouterGroup) { + group.GET(string([]byte{98, 103}), func(c *gin.Context) { + c.Header("content-type", "image/png") + c.Writer.Write(matrix) + }) + group.GET("id", func(c *gin.Context) { + c.String(200, APPID) + }) +} + +// func deSign(data []byte) map[string]interface{} { +// res := decode(data, seed()) +// dec := gob.NewDecoder(bytes.NewReader(res)) +// obj := map[string]interface{}{} +// err := dec.Decode(&obj) +// if err != nil { +// util.Log().Panic("您仍在使用旧版的授权文件,请前往 https://pro.cloudreve.org/ 登录下载最新的授权文件") +// os.Exit(-1) +// } +// return checkKeyUpdate(obj) +// } + +// func checkKeyUpdate(table map[string]interface{}) map[string]interface{} { +// if table["version"].(string) != conf.KeyVersion { +// util.Log().Info("正在自动更新授权文件...") +// reqBody := map[string]string{ +// "secret": table["secret"].(string), +// "id": table["id"].(string), +// } +// reqBodyString, _ := json.Marshal(reqBody) +// client := request.NewClient() +// resp := client.Request("POST", "https://pro.cloudreve.org/Api/UpdateKey", +// bytes.NewReader(reqBodyString)).CheckHTTPResponse(200) +// if resp.Err != nil { +// util.Log().Panic("授权文件更新失败, %s", resp.Err) +// } +// keyContent, _ := ioutil.ReadAll(resp.Response.Body) +// ioutil.WriteFile(util.RelativePath(string([]byte{107, 101, 121, 46, 98, 105, 110})), keyContent, os.ModePerm) + +// return deSign(keyContent) +// } + +// return table +// } + +// func seed() []byte { +// res := []int{8} +// s := "20210323" +// m := 1 << 20 +// a := 9 +// b := 7 +// for i := 1; i < 23; i++ { +// res = append(res, (a*res[i-1]+b)%m) +// s += strconv.Itoa(res[i]) +// } +// return []byte(s) +// } + +// func decode(cryted []byte, key []byte) []byte { +// block, _ := aes.NewCipher(key[:32]) +// blockSize := block.BlockSize() +// blockMode := cipher.NewCBCDecrypter(block, key[:blockSize]) +// orig := make([]byte, len(cryted)) +// blockMode.CryptBlocks(orig, cryted) +// orig = pKCS7UnPadding(orig) +// return orig +// } + +// func pKCS7UnPadding(origData []byte) []byte { +// length := len(origData) +// unpadding := int(origData[length-1]) +// return origData[:(length - unpadding)] +// } diff --git a/bootstrap/constant/constant.go b/bootstrap/constant/constant.go new file mode 100644 index 0000000..0d78500 --- /dev/null +++ b/bootstrap/constant/constant.go @@ -0,0 +1,3 @@ +package constant + +// var HashIDTable = []int{0, 1, 2, 3, 4, 5} diff --git a/bootstrap/embed.go b/bootstrap/embed.go new file mode 100644 index 0000000..71f7567 --- /dev/null +++ b/bootstrap/embed.go @@ -0,0 +1,432 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package embed provides access to files embedded in the running Go program. +// +// Go source files that import "embed" can use the //go:embed directive +// to initialize a variable of type string, []byte, or FS with the contents of +// files read from the package directory or subdirectories at compile time. +// +// For example, here are three ways to embed a file named hello.txt +// and then print its contents at run time. +// +// Embedding one file into a string: +// +// import _ "embed" +// +// //go:embed hello.txt +// var s string +// print(s) +// +// Embedding one file into a slice of bytes: +// +// import _ "embed" +// +// //go:embed hello.txt +// var b []byte +// print(string(b)) +// +// Embedded one or more files into a file system: +// +// import "embed" +// +// //go:embed hello.txt +// var f embed.FS +// data, _ := f.ReadFile("hello.txt") +// print(string(data)) +// +// # Directives +// +// A //go:embed directive above a variable declaration specifies which files to embed, +// using one or more path.Match patterns. +// +// The directive must immediately precede a line containing the declaration of a single variable. +// Only blank lines and ‘//’ line comments are permitted between the directive and the declaration. +// +// The type of the variable must be a string type, or a slice of a byte type, +// or FS (or an alias of FS). +// +// For example: +// +// package server +// +// import "embed" +// +// // content holds our static web server content. +// //go:embed image/* template/* +// //go:embed html/index.html +// var content embed.FS +// +// The Go build system will recognize the directives and arrange for the declared variable +// (in the example above, content) to be populated with the matching files from the file system. +// +// The //go:embed directive accepts multiple space-separated patterns for +// brevity, but it can also be repeated, to avoid very long lines when there are +// many patterns. The patterns are interpreted relative to the package directory +// containing the source file. The path separator is a forward slash, even on +// Windows systems. Patterns may not contain ‘.’ or ‘..’ or empty path elements, +// nor may they begin or end with a slash. To match everything in the current +// directory, use ‘*’ instead of ‘.’. To allow for naming files with spaces in +// their names, patterns can be written as Go double-quoted or back-quoted +// string literals. +// +// If a pattern names a directory, all files in the subtree rooted at that directory are +// embedded (recursively), except that files with names beginning with ‘.’ or ‘_’ +// are excluded. So the variable in the above example is almost equivalent to: +// +// // content is our static web server content. +// //go:embed image template html/index.html +// var content embed.FS +// +// The difference is that ‘image/*’ embeds ‘image/.tempfile’ while ‘image’ does not. +// Neither embeds ‘image/dir/.tempfile’. +// +// If a pattern begins with the prefix ‘all:’, then the rule for walking directories is changed +// to include those files beginning with ‘.’ or ‘_’. For example, ‘all:image’ embeds +// both ‘image/.tempfile’ and ‘image/dir/.tempfile’. +// +// The //go:embed directive can be used with both exported and unexported variables, +// depending on whether the package wants to make the data available to other packages. +// It can only be used with variables at package scope, not with local variables. +// +// Patterns must not match files outside the package's module, such as ‘.git/*’ or symbolic links. +// Patterns must not match files whose names include the special punctuation characters " * < > ? ` ' | / \ and :. +// Matches for empty directories are ignored. After that, each pattern in a //go:embed line +// must match at least one file or non-empty directory. +// +// If any patterns are invalid or have invalid matches, the build will fail. +// +// # Strings and Bytes +// +// The //go:embed line for a variable of type string or []byte can have only a single pattern, +// and that pattern can match only a single file. The string or []byte is initialized with +// the contents of that file. +// +// The //go:embed directive requires importing "embed", even when using a string or []byte. +// In source files that don't refer to embed.FS, use a blank import (import _ "embed"). +// +// # File Systems +// +// For embedding a single file, a variable of type string or []byte is often best. +// The FS type enables embedding a tree of files, such as a directory of static +// web server content, as in the example above. +// +// FS implements the io/fs package's FS interface, so it can be used with any package that +// understands file systems, including net/http, text/template, and html/template. +// +// For example, given the content variable in the example above, we can write: +// +// http.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.FS(content)))) +// +// template.ParseFS(content, "*.tmpl") +// +// # Tools +// +// To support tools that analyze Go packages, the patterns found in //go:embed lines +// are available in “go list” output. See the EmbedPatterns, TestEmbedPatterns, +// and XTestEmbedPatterns fields in the “go help list” output. +package bootstrap + +import ( + "errors" + "io" + "io/fs" + "time" +) + +// An FS is a read-only collection of files, usually initialized with a //go:embed directive. +// When declared without a //go:embed directive, an FS is an empty file system. +// +// An FS is a read-only value, so it is safe to use from multiple goroutines +// simultaneously and also safe to assign values of type FS to each other. +// +// FS implements fs.FS, so it can be used with any package that understands +// file system interfaces, including net/http, text/template, and html/template. +// +// See the package documentation for more details about initializing an FS. +type FS struct { + // The compiler knows the layout of this struct. + // See cmd/compile/internal/staticdata's WriteEmbed. + // + // The files list is sorted by name but not by simple string comparison. + // Instead, each file's name takes the form "dir/elem" or "dir/elem/". + // The optional trailing slash indicates that the file is itself a directory. + // The files list is sorted first by dir (if dir is missing, it is taken to be ".") + // and then by base, so this list of files: + // + // p + // q/ + // q/r + // q/s/ + // q/s/t + // q/s/u + // q/v + // w + // + // is actually sorted as: + // + // p # dir=. elem=p + // q/ # dir=. elem=q + // w/ # dir=. elem=w + // q/r # dir=q elem=r + // q/s/ # dir=q elem=s + // q/v # dir=q elem=v + // q/s/t # dir=q/s elem=t + // q/s/u # dir=q/s elem=u + // + // This order brings directory contents together in contiguous sections + // of the list, allowing a directory read to use binary search to find + // the relevant sequence of entries. + files *[]file +} + +// split splits the name into dir and elem as described in the +// comment in the FS struct above. isDir reports whether the +// final trailing slash was present, indicating that name is a directory. +func split(name string) (dir, elem string, isDir bool) { + if name[len(name)-1] == '/' { + isDir = true + name = name[:len(name)-1] + } + i := len(name) - 1 + for i >= 0 && name[i] != '/' { + i-- + } + if i < 0 { + return ".", name, isDir + } + return name[:i], name[i+1:], isDir +} + +// trimSlash trims a trailing slash from name, if present, +// returning the possibly shortened name. +func trimSlash(name string) string { + if len(name) > 0 && name[len(name)-1] == '/' { + return name[:len(name)-1] + } + return name +} + +var ( + _ fs.ReadDirFS = FS{} + _ fs.ReadFileFS = FS{} +) + +// A file is a single file in the FS. +// It implements fs.FileInfo and fs.DirEntry. +type file struct { + // The compiler knows the layout of this struct. + // See cmd/compile/internal/staticdata's WriteEmbed. + name string + data string + hash [16]byte // truncated SHA256 hash +} + +var ( + _ fs.FileInfo = (*file)(nil) + _ fs.DirEntry = (*file)(nil) +) + +func (f *file) Name() string { _, elem, _ := split(f.name); return elem } +func (f *file) Size() int64 { return int64(len(f.data)) } +func (f *file) ModTime() time.Time { return time.Time{} } +func (f *file) IsDir() bool { _, _, isDir := split(f.name); return isDir } +func (f *file) Sys() any { return nil } +func (f *file) Type() fs.FileMode { return f.Mode().Type() } +func (f *file) Info() (fs.FileInfo, error) { return f, nil } + +func (f *file) Mode() fs.FileMode { + if f.IsDir() { + return fs.ModeDir | 0555 + } + return 0444 +} + +// dotFile is a file for the root directory, +// which is omitted from the files list in a FS. +var dotFile = &file{name: "./"} + +// lookup returns the named file, or nil if it is not present. +func (f FS) lookup(name string) *file { + if !fs.ValidPath(name) { + // The compiler should never emit a file with an invalid name, + // so this check is not strictly necessary (if name is invalid, + // we shouldn't find a match below), but it's a good backstop anyway. + return nil + } + if name == "." { + return dotFile + } + if f.files == nil { + return nil + } + + // Binary search to find where name would be in the list, + // and then check if name is at that position. + dir, elem, _ := split(name) + files := *f.files + i := sortSearch(len(files), func(i int) bool { + idir, ielem, _ := split(files[i].name) + return idir > dir || idir == dir && ielem >= elem + }) + if i < len(files) && trimSlash(files[i].name) == name { + return &files[i] + } + return nil +} + +// readDir returns the list of files corresponding to the directory dir. +func (f FS) readDir(dir string) []file { + if f.files == nil { + return nil + } + // Binary search to find where dir starts and ends in the list + // and then return that slice of the list. + files := *f.files + i := sortSearch(len(files), func(i int) bool { + idir, _, _ := split(files[i].name) + return idir >= dir + }) + j := sortSearch(len(files), func(j int) bool { + jdir, _, _ := split(files[j].name) + return jdir > dir + }) + return files[i:j] +} + +// Open opens the named file for reading and returns it as an fs.File. +// +// The returned file implements io.Seeker when the file is not a directory. +func (f FS) Open(name string) (fs.File, error) { + file := f.lookup(name) + if file == nil { + return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist} + } + if file.IsDir() { + return &openDir{file, f.readDir(name), 0}, nil + } + return &openFile{file, 0}, nil +} + +// ReadDir reads and returns the entire named directory. +func (f FS) ReadDir(name string) ([]fs.DirEntry, error) { + file, err := f.Open(name) + if err != nil { + return nil, err + } + dir, ok := file.(*openDir) + if !ok { + return nil, &fs.PathError{Op: "read", Path: name, Err: errors.New("not a directory")} + } + list := make([]fs.DirEntry, len(dir.files)) + for i := range list { + list[i] = &dir.files[i] + } + return list, nil +} + +// ReadFile reads and returns the content of the named file. +func (f FS) ReadFile(name string) ([]byte, error) { + file, err := f.Open(name) + if err != nil { + return nil, err + } + ofile, ok := file.(*openFile) + if !ok { + return nil, &fs.PathError{Op: "read", Path: name, Err: errors.New("is a directory")} + } + return []byte(ofile.f.data), nil +} + +// An openFile is a regular file open for reading. +type openFile struct { + f *file // the file itself + offset int64 // current read offset +} + +var ( + _ io.Seeker = (*openFile)(nil) +) + +func (f *openFile) Close() error { return nil } +func (f *openFile) Stat() (fs.FileInfo, error) { return f.f, nil } + +func (f *openFile) Read(b []byte) (int, error) { + if f.offset >= int64(len(f.f.data)) { + return 0, io.EOF + } + if f.offset < 0 { + return 0, &fs.PathError{Op: "read", Path: f.f.name, Err: fs.ErrInvalid} + } + n := copy(b, f.f.data[f.offset:]) + f.offset += int64(n) + return n, nil +} + +func (f *openFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case 0: + // offset += 0 + case 1: + offset += f.offset + case 2: + offset += int64(len(f.f.data)) + } + if offset < 0 || offset > int64(len(f.f.data)) { + return 0, &fs.PathError{Op: "seek", Path: f.f.name, Err: fs.ErrInvalid} + } + f.offset = offset + return offset, nil +} + +// An openDir is a directory open for reading. +type openDir struct { + f *file // the directory file itself + files []file // the directory contents + offset int // the read offset, an index into the files slice +} + +func (d *openDir) Close() error { return nil } +func (d *openDir) Stat() (fs.FileInfo, error) { return d.f, nil } + +func (d *openDir) Read([]byte) (int, error) { + return 0, &fs.PathError{Op: "read", Path: d.f.name, Err: errors.New("is a directory")} +} + +func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) { + n := len(d.files) - d.offset + if n == 0 { + if count <= 0 { + return nil, nil + } + return nil, io.EOF + } + if count > 0 && n > count { + n = count + } + list := make([]fs.DirEntry, n) + for i := range list { + list[i] = &d.files[d.offset+i] + } + d.offset += n + return list, nil +} + +// sortSearch is like sort.Search, avoiding an import. +func sortSearch(n int, f func(int) bool) int { + // Define f(-1) == false and f(n) == true. + // Invariant: f(i-1) == false, f(j) == true. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if !f(h) { + i = h + 1 // preserves f(i-1) == false + } else { + j = h // preserves f(j) == true + } + } + // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. + return i +} diff --git a/bootstrap/fs.go b/bootstrap/fs.go new file mode 100644 index 0000000..a82396c --- /dev/null +++ b/bootstrap/fs.go @@ -0,0 +1,75 @@ +package bootstrap + +import ( + "archive/zip" + "crypto/sha256" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/pkg/errors" + "io" + "io/fs" + "sort" + "strings" +) + +func NewFS(zipContent string) fs.FS { + zipReader, err := zip.NewReader(strings.NewReader(zipContent), int64(len(zipContent))) + if err != nil { + util.Log().Panic("Static resource is not a valid zip file: %s", err) + } + + var files []file + err = fs.WalkDir(zipReader, ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return errors.Errorf("无法获取[%s]的信息, %s, 跳过...", path, err) + } + + if path == "." { + return nil + } + + var f file + if d.IsDir() { + f.name = path + "/" + } else { + f.name = path + + rc, err := zipReader.Open(path) + if err != nil { + return errors.Errorf("无法打开文件[%s], %s, 跳过...", path, err) + } + defer rc.Close() + + data, err := io.ReadAll(rc) + if err != nil { + return errors.Errorf("无法读取文件[%s], %s, 跳过...", path, err) + } + + f.data = string(data) + + hash := sha256.Sum256(data) + for i := range f.hash { + f.hash[i] = ^hash[i] + } + } + files = append(files, f) + return nil + }) + if err != nil { + util.Log().Panic("初始化静态资源失败: %s", err) + } + + sort.Slice(files, func(i, j int) bool { + fi, fj := files[i], files[j] + di, ei, _ := split(fi.name) + dj, ej, _ := split(fj.name) + + if di != dj { + return di < dj + } + return ei < ej + }) + + var embedFS FS + embedFS.files = &files + return embedFS +} diff --git a/bootstrap/init.go b/bootstrap/init.go new file mode 100644 index 0000000..2718ccb --- /dev/null +++ b/bootstrap/init.go @@ -0,0 +1,133 @@ +package bootstrap + +import ( + "io/fs" + "path/filepath" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/models/scripts" + "github.com/cloudreve/Cloudreve/v3/pkg/aria2" + "github.com/cloudreve/Cloudreve/v3/pkg/auth" + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/cluster" + "github.com/cloudreve/Cloudreve/v3/pkg/conf" + "github.com/cloudreve/Cloudreve/v3/pkg/crontab" + "github.com/cloudreve/Cloudreve/v3/pkg/email" + "github.com/cloudreve/Cloudreve/v3/pkg/mq" + "github.com/cloudreve/Cloudreve/v3/pkg/task" + "github.com/cloudreve/Cloudreve/v3/pkg/wopi" + "github.com/gin-gonic/gin" +) + +// Init 初始化启动 +func Init(path string, statics fs.FS) { + InitApplication() + conf.Init(path) + // Debug 关闭时,切换为生产模式 + if !conf.SystemConfig.Debug { + gin.SetMode(gin.ReleaseMode) + } + + dependencies := []struct { + mode string + factory func() + }{ + { + "both", + func() { + scripts.Init() + }, + }, + { + "both", + func() { + cache.Init() + }, + }, + { + "slave", + func() { + model.InitSlaveDefaults() + }, + }, + { + "slave", + func() { + cache.InitSlaveOverwrites() + }, + }, + { + "master", + func() { + model.Init() + }, + }, + { + "both", + func() { + cache.Restore(filepath.Join(model.GetSettingByName("temp_path"), cache.DefaultCacheFile)) + }, + }, + { + "both", + func() { + task.Init() + }, + }, + { + "master", + func() { + cluster.Init() + }, + }, + { + "master", + func() { + aria2.Init(false, cluster.Default, mq.GlobalMQ) + }, + }, + { + "master", + func() { + email.Init() + }, + }, + { + "master", + func() { + crontab.Init() + }, + }, + { + "master", + func() { + InitStatic(statics) + }, + }, + { + "slave", + func() { + cluster.InitController() + }, + }, + { + "both", + func() { + auth.Init() + }, + }, + { + "master", + func() { + wopi.Init() + }, + }, + } + + for _, dependency := range dependencies { + if dependency.mode == conf.SystemConfig.Mode || dependency.mode == "both" { + dependency.factory() + } + } + +} diff --git a/bootstrap/script.go b/bootstrap/script.go new file mode 100644 index 0000000..6f0ac92 --- /dev/null +++ b/bootstrap/script.go @@ -0,0 +1,18 @@ +package bootstrap + +import ( + "context" + "github.com/cloudreve/Cloudreve/v3/models/scripts/invoker" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +func RunScript(name string) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if err := invoker.RunDBScript(name, ctx); err != nil { + util.Log().Error("Failed to execute database script: %s", err) + return + } + + util.Log().Info("Finish executing database script %q.", name) +} diff --git a/bootstrap/static.go b/bootstrap/static.go new file mode 100644 index 0000000..4989b97 --- /dev/null +++ b/bootstrap/static.go @@ -0,0 +1,136 @@ +package bootstrap + +import ( + "bufio" + "encoding/json" + "io" + "io/fs" + "net/http" + "path/filepath" + + "github.com/pkg/errors" + + "github.com/cloudreve/Cloudreve/v3/pkg/conf" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + + "github.com/gin-contrib/static" +) + +const StaticFolder = "statics" + +type GinFS struct { + FS http.FileSystem +} + +type staticVersion struct { + Name string `json:"name"` + Version string `json:"version"` +} + +// StaticFS 内置静态文件资源 +var StaticFS static.ServeFileSystem + +// Open 打开文件 +func (b *GinFS) Open(name string) (http.File, error) { + return b.FS.Open(name) +} + +// Exists 文件是否存在 +func (b *GinFS) Exists(prefix string, filepath string) bool { + if _, err := b.FS.Open(filepath); err != nil { + return false + } + return true +} + +// InitStatic 初始化静态资源文件 +func InitStatic(statics fs.FS) { + if util.Exists(util.RelativePath(StaticFolder)) { + util.Log().Info("Folder with name \"statics\" already exists, it will be used to serve static files.") + StaticFS = static.LocalFile(util.RelativePath("statics"), false) + } else { + // 初始化静态资源 + embedFS, err := fs.Sub(statics, "assets/build") + if err != nil { + util.Log().Panic("Failed to initialize static resources: %s", err) + } + + StaticFS = &GinFS{ + FS: http.FS(embedFS), + } + } + // 检查静态资源的版本 + f, err := StaticFS.Open("version.json") + if err != nil { + util.Log().Warning("Missing version identifier file in static resources, please delete \"statics\" folder and rebuild it.") + return + } + + b, err := io.ReadAll(f) + if err != nil { + util.Log().Warning("Failed to read version identifier file in static resources, please delete \"statics\" folder and rebuild it.") + return + } + + var v staticVersion + if err := json.Unmarshal(b, &v); err != nil { + util.Log().Warning("Failed to parse version identifier file in static resources: %s", err) + return + } + + staticName := "cloudreve-frontend" + if conf.IsPlus == "true" { + staticName += "-plus" + } + + if v.Name != staticName { + util.Log().Warning("Static resource version mismatch, please delete \"statics\" folder and rebuild it.") + return + } + + if v.Version != conf.RequiredStaticVersion { + util.Log().Warning("Static resource version mismatch [Current %s, Desired: %s],please delete \"statics\" folder and rebuild it.", v.Version, conf.RequiredStaticVersion) + return + } +} + +// Eject 抽离内置静态资源 +func Eject(statics fs.FS) { + // 初始化静态资源 + embedFS, err := fs.Sub(statics, "assets/build") + if err != nil { + util.Log().Panic("Failed to initialize static resources: %s", err) + } + + // var walk func(relPath string, d fs.DirEntry, err error) error + walk := func(relPath string, d fs.DirEntry, err error) error { + if err != nil { + return errors.Errorf("Failed to read info of %q: %s, skipping...", relPath, err) + } + + if !d.IsDir() { + // 写入文件 + out, err := util.CreatNestedFile(filepath.Join(util.RelativePath(""), StaticFolder, relPath)) + + if err != nil { + return errors.Errorf("Failed to create file %q: %s, skipping...", relPath, err) + } + defer out.Close() + + util.Log().Info("Ejecting %q...", relPath) + obj, _ := embedFS.Open(relPath) + if _, err := io.Copy(out, bufio.NewReader(obj)); err != nil { + return errors.Errorf("Cannot write file %q: %s, skipping...", relPath, err) + } + } + return nil + } + + // util.Log().Info("开始导出内置静态资源...") + err = fs.WalkDir(embedFS, ".", walk) + if err != nil { + util.Log().Error("Error occurs while ejecting static resources: %s", err) + return + } + util.Log().Info("Finish ejecting static resources.") +} diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..cc94bef --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,45 @@ +version: "3.8" +services: + redis: + container_name: redis + image: bitnami/redis:latest + restart: unless-stopped + environment: + - ALLOW_EMPTY_PASSWORD=yes + volumes: + - redis_data:/bitnami/redis/data + + cloudreve: + container_name: cloudreve + image: cloudreve/cloudreve:latest + restart: unless-stopped + ports: + - "5212:5212" + volumes: + - temp_data:/data + - ./cloudreve/uploads:/cloudreve/uploads + - ./cloudreve/conf.ini:/cloudreve/conf.ini + - ./cloudreve/cloudreve.db:/cloudreve/cloudreve.db + - ./cloudreve/avatar:/cloudreve/avatar + depends_on: + - aria2 + + aria2: + container_name: aria2 + image: p3terx/aria2-pro # third party image, please keep notice what you are doing + restart: unless-stopped + environment: + - RPC_SECRET=your_aria_rpc_token # aria rpc token, customize your own + - RPC_PORT=6800 + volumes: + - ./aria2/config:/config + - temp_data:/data +volumes: + redis_data: + driver: local + temp_data: + driver: local + driver_opts: + type: none + device: $PWD/data + o: bind diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..ece85eb --- /dev/null +++ b/go.mod @@ -0,0 +1,180 @@ +module github.com/cloudreve/Cloudreve/v3 + +go 1.18 + +require ( + github.com/HFO4/aliyun-oss-go-sdk v2.2.3+incompatible + github.com/aws/aws-sdk-go v1.31.5 + github.com/duo-labs/webauthn v0.0.0-20220330035159-03696f3d4499 + github.com/fatih/color v1.16.0 + github.com/gin-contrib/cors v1.3.0 + github.com/gin-contrib/gzip v0.0.2-0.20200226035851-25bef2ef21e8 + github.com/gin-contrib/sessions v0.0.5 + github.com/gin-contrib/static v0.0.0-20191128031702-f81c604d8ac2 + github.com/gin-gonic/gin v1.8.1 + github.com/glebarez/go-sqlite v1.20.3 + github.com/go-ini/ini v1.50.0 + github.com/go-mail/mail v2.3.1+incompatible + github.com/go-playground/validator/v10 v10.11.0 + github.com/gofrs/uuid v4.0.0+incompatible + github.com/gomodule/redigo v2.0.0+incompatible + github.com/google/go-querystring v1.0.0 + github.com/google/uuid v1.3.0 + github.com/gorilla/securecookie v1.1.1 + github.com/gorilla/sessions v1.2.1 + github.com/gorilla/websocket v1.4.2 + github.com/hashicorp/go-version v1.3.0 + github.com/iGoogle-ink/gopay v1.5.36 + github.com/jinzhu/gorm v1.9.11 + github.com/juju/ratelimit v1.0.1 + github.com/mholt/archiver/v4 v4.0.0-alpha.6 + github.com/mojocn/base64Captcha v0.0.0-20190801020520-752b1cd608b2 + github.com/pkg/errors v0.9.1 + github.com/pquerna/otp v1.2.0 + github.com/qingwg/payjs v0.0.0-20190928033402-c53dbe16b371 + github.com/qiniu/go-sdk/v7 v7.11.1 + github.com/robfig/cron/v3 v3.0.1 + github.com/samber/lo v1.38.1 + github.com/smartwalle/alipay/v3 v3.2.20 + github.com/speps/go-hashids v2.0.0+incompatible + github.com/stretchr/testify v1.8.3 + github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/captcha v1.0.393 + github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.393 + github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/scf v1.0.393 + github.com/tencentyun/cos-go-sdk-v5 v0.0.0-20200120023323-87ff3bc489ac + github.com/upyun/go-sdk v2.1.0+incompatible + golang.org/x/image v0.0.0-20211028202545-6944b10bf410 + golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba + google.golang.org/api v0.45.0 +) + +require ( + cloud.google.com/go v0.81.0 // indirect + github.com/andybalholm/brotli v1.0.4 // indirect + github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bgentry/speakeasy v0.1.0 // indirect + github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect + github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect + github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/cloudflare/cfssl v1.6.1 // indirect + github.com/cncf/udpa/go v0.0.0-20210322005330-6414d713912e // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.3.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3 // indirect + github.com/dsnet/compress v0.0.1 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d // indirect + github.com/envoyproxy/protoc-gen-validate v0.6.1 // indirect + github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect + github.com/fullstorydev/grpcurl v1.8.1 // indirect + github.com/fxamacker/cbor/v2 v2.4.0 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-sql-driver/mysql v1.6.0 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.1.0 // indirect + github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/mock v1.5.0 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/btree v1.0.1 // indirect + github.com/google/certificate-transparency-go v1.1.2-0.20210511102531-373a877eec92 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/googleapis/gax-go/v2 v2.0.5 // indirect + github.com/gorilla/context v1.1.1 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jhump/protoreflect v1.8.2 // indirect + github.com/jinzhu/inflection v1.0.0 // indirect + github.com/jmespath/go-jmespath v0.3.0 // indirect + github.com/jonboulle/clockwork v0.2.2 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.15.1 // indirect + github.com/klauspost/pgzip v1.2.5 // indirect + github.com/leodido/go-urn v1.2.4 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.12 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.1.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mozillazg/go-httpheader v0.2.1 // indirect + github.com/nwaples/rardecode/v2 v2.0.0-beta.2 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/pierrec/lz4/v4 v4.1.14 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.10.0 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.24.0 // indirect + github.com/prometheus/procfs v0.6.0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230126093431-47fa9a501578 // indirect + github.com/rivo/uniseg v0.2.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/satori/go.uuid v1.2.0 // indirect + github.com/sirupsen/logrus v1.8.1 // indirect + github.com/smartwalle/ncrypto v1.0.4 // indirect + github.com/smartwalle/ngx v1.0.9 // indirect + github.com/smartwalle/nsign v1.0.9 // indirect + github.com/soheilhy/cmux v0.1.5 // indirect + github.com/spf13/cobra v1.1.3 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/therootcompany/xz v1.0.1 // indirect + github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 // indirect + github.com/ugorji/go/codec v1.2.11 // indirect + github.com/ulikunitz/xz v0.5.10 // indirect + github.com/urfave/cli v1.22.5 // indirect + github.com/x448/float16 v0.8.4 // indirect + github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect + go.etcd.io/bbolt v1.3.5 // indirect + go.etcd.io/etcd/api/v3 v3.5.0-alpha.0 // indirect + go.etcd.io/etcd/client/v2 v2.305.0-alpha.0 // indirect + go.etcd.io/etcd/client/v3 v3.5.0-alpha.0 // indirect + go.etcd.io/etcd/etcdctl/v3 v3.5.0-alpha.0 // indirect + go.etcd.io/etcd/pkg/v3 v3.5.0-alpha.0 // indirect + go.etcd.io/etcd/raft/v3 v3.5.0-alpha.0 // indirect + go.etcd.io/etcd/server/v3 v3.5.0-alpha.0 // indirect + go.etcd.io/etcd/tests/v3 v3.5.0-alpha.0 // indirect + go.etcd.io/etcd/v3 v3.5.0-alpha.0 // indirect + go.opencensus.io v0.23.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.7.0 // indirect + go.uber.org/zap v1.16.0 // indirect + golang.org/x/crypto v0.9.0 // indirect + golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 // indirect + golang.org/x/mod v0.8.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/text v0.9.0 // indirect + golang.org/x/tools v0.6.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20210510173355-fb37daa5cd7a // indirect + google.golang.org/grpc v1.37.0 // indirect + google.golang.org/protobuf v1.30.0 // indirect + gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect + gopkg.in/cheggaaa/pb.v1 v1.0.28 // indirect + gopkg.in/mail.v2 v2.3.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + modernc.org/libc v1.22.2 // indirect + modernc.org/mathutil v1.5.0 // indirect + modernc.org/memory v1.5.0 // indirect + modernc.org/sqlite v1.20.3 // indirect + sigs.k8s.io/yaml v1.2.0 // indirect + +) + +replace github.com/gomodule/redigo v2.0.0+incompatible => github.com/gomodule/redigo v1.8.9 diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..ca78810 --- /dev/null +++ b/go.sum @@ -0,0 +1,1494 @@ +bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= +bitbucket.org/liamstask/goose v0.0.0-20150115234039-8488cc47d90c/go.mod h1:hSVuE3qU7grINVSwrmzHfpg9k87ALBk+XaualNyUzI4= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/spanner v1.17.0/go.mod h1:+17t2ixFwRG4lWRwE+5kipDR9Ef07Jkmc8z0IbMDKUs= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +code.gitea.io/sdk/gitea v0.11.3/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUrR6JDY= +contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= +contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0= +contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw= +contrib.go.opencensus.io/exporter/stackdriver v0.13.5/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= +contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= +contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU= +github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= +github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0= +github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= +github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= +github.com/GeertJohan/go.rice v1.0.2/go.mod h1:af5vUNlDNkCjOZeSGFgIJxDje9qdjsO6hshx0gTmZt4= +github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo= +github.com/HFO4/aliyun-oss-go-sdk v2.2.3+incompatible h1:aX/+gJM2dAMDDy3JqWS0DJn3JfOUchf4k37P5TbBKU8= +github.com/HFO4/aliyun-oss-go-sdk v2.2.3+incompatible/go.mod h1:8KDiKVrHK/UbXAhj+iQGp1m40rQa+UAvzBi7m22KywI= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= +github.com/apache/beam v2.28.0+incompatible/go.mod h1:/8NX3Qi8vGstDLLaeaU7+lzVEu/ACaQhYjeefzQ0y1o= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apex/log v1.1.4/go.mod h1:AlpoD9aScyQfJDVHmLMEcx4oU6LqzkWp4Mg9GdAcEvQ= +github.com/apex/logs v0.0.4/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= +github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= +github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.31.5 h1:DFA7BzTydO4etqsTja+x7UfkOKQUv1xzEluLvNk81L0= +github.com/aws/aws-sdk-go v1.31.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGrrHhcQw= +github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d h1:S2NE3iHSwP0XV47EEXL8mWmRdEfGscSJ+7EgePNgt0s= +github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a/go.mod h1:rzgs2ZOiguV6/NpiDgADjRLPNyZlApIWxKpkT+X8SdY= +github.com/cloudflare/cfssl v1.6.1 h1:aIOUjpeuDJOpWjVJFP2ByplF53OgqG8I1S40Ggdlk3g= +github.com/cloudflare/cfssl v1.6.1/go.mod h1:ENhCj4Z17+bY2XikpxVmTHDg/C2IsG2Q0ZBeXpAqhCk= +github.com/cloudflare/redoctober v0.0.0-20201013214028-99c99a8e7544/go.mod h1:6Se34jNoqrd8bTxrmJB2Bg2aoZ2CdSXonils9NsiNgo= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210322005330-6414d713912e h1:xjKi0OrdbKVCLWRoF2SGNnv9todhp+zQlvRHhsb14R4= +github.com/cncf/udpa/go v0.0.0-20210322005330-6414d713912e/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= +github.com/daaku/go.zipexe v1.0.1/go.mod h1:5xWogtqlYnfBXkSB1o9xysukNP9GTvaNkqzUZbt3Bw8= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3 h1:tkum0XDgfR0jcVVXuTsYv/erY2NnEDqwRojbxR1rBYA= +github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= +github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q= +github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= +github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= +github.com/duo-labs/webauthn v0.0.0-20220330035159-03696f3d4499 h1:jaQHuGKk9NVcfu9VbA7ygslr/7utxdYs47i4osBhZP8= +github.com/duo-labs/webauthn v0.0.0-20220330035159-03696f3d4499/go.mod h1:UMk1JMDgQDcdI2vQz+WJOIUTSjIq07qSepAVgc93rUc= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d h1:QyzYnTnPE15SQyUeqU6qLbWxMkwyAyu+vGksa0b7j00= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.3.0-java/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.1 h1:4CF52PCseTFt4bE+Yk3dIpdVi7XWuPVMhPtm4FaIJPM= +github.com/envoyproxy/protoc-gen-validate v0.6.1/go.mod h1:txg5va2Qkip90uYoSKH+nkAAmXrb2j3iq4FLwdrCbXQ= +github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y= +github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= +github.com/etcd-io/gofail v0.0.0-20190801230047-ad7f989257ca/go.mod h1:49H/RkXP8pKaZy4h0d+NW16rSLhyVBt4o6VLJbmOqDE= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fullstorydev/grpcurl v1.8.0/go.mod h1:Mn2jWbdMrQGJQ8UD62uNyMumT2acsZUCkZIqFxsQf1o= +github.com/fullstorydev/grpcurl v1.8.1 h1:Pp648wlTTg3OKySeqxM5pzh8XF6vLqrm8wRq66+5Xo0= +github.com/fullstorydev/grpcurl v1.8.1/go.mod h1:3BWhvHZwNO7iLXaQlojdg5NA6SxUDePli4ecpK1N7gw= +github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88= +github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/cors v1.3.0 h1:PolezCc89peu+NgkIWt9OB01Kbzt6IP0J/JvkG6xxlg= +github.com/gin-contrib/cors v1.3.0/go.mod h1:artPvLlhkF7oG06nK8v3U8TNz6IeX+w1uzCSEId5/Vc= +github.com/gin-contrib/gzip v0.0.2-0.20200226035851-25bef2ef21e8 h1:/DnKeA2+K83hkii3nqMJ5koknI+/qlojjxgcSyiAyJw= +github.com/gin-contrib/gzip v0.0.2-0.20200226035851-25bef2ef21e8/go.mod h1:M+xPw/lXk+uAU4iYVnwPZs0iIpR/KwSQSXcJabN+gPs= +github.com/gin-contrib/sessions v0.0.5 h1:CATtfHmLMQrMNpJRgzjWXD7worTh7g7ritsQfmF+0jE= +github.com/gin-contrib/sessions v0.0.5/go.mod h1:vYAuaUPqie3WUSsft6HUlCjlwwoJQs97miaG2+7neKY= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-contrib/static v0.0.0-20191128031702-f81c604d8ac2 h1:xLG16iua01X7Gzms9045s2Y2niNpvSY/Zb1oBwgNYZY= +github.com/gin-contrib/static v0.0.0-20191128031702-f81c604d8ac2/go.mod h1:VhW/Ch/3FhimwZb8Oj+qJmdMmoB8r7lmJ5auRjm50oQ= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= +github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= +github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8= +github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= +github.com/glebarez/go-sqlite v1.20.3 h1:89BkqGOXR9oRmG58ZrzgoY/Fhy5x0M+/WV48U5zVrZ4= +github.com/glebarez/go-sqlite v1.20.3/go.mod h1:u3N6D/wftiAzIOJtZl6BmedqxmmkDfH3q+ihjqxC9u0= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-ini/ini v1.50.0 h1:ogX6RS8VstVN8MJcwhEP78hHhWaI3klN02+97bByabY= +github.com/go-ini/ini v1.50.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-mail/mail v2.3.1+incompatible h1:UzNOn0k5lpfVtO31cK3hn6I4VEVGhe3lX8AJBAxXExM= +github.com/go-mail/mail v2.3.1+incompatible/go.mod h1:VPWjmmNyRsWXQZHVHT3g0YbIINUkSmuKOiLIDkWbL6M= +github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk= +github.com/go-playground/validator/v10 v10.11.0 h1:0W+xRM511GY47Yy3bZUbJVitCNg2BOGlCyvTqsp/xIw= +github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= +github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.1.0 h1:XUgk2Ex5veyVFVeLm0xhusUTQybEbexJXrvPNOKkSY0= +github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v0.0.0-20210429001901-424d2337a529/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws= +github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= +github.com/google/certificate-transparency-go v1.1.2-0.20210422104406-9f33727a7a18/go.mod h1:6CKh9dscIRoqc2kC6YUFICHZMT9NrClyPrRVFrdw1QQ= +github.com/google/certificate-transparency-go v1.1.2-0.20210511102531-373a877eec92 h1:806qveZBQtRNHroYHyg6yrsjqBJh9kIB4nfmB8uJnak= +github.com/google/certificate-transparency-go v1.1.2-0.20210511102531-373a877eec92/go.mod h1:kXWPsHVPSKVuxPPG69BRtumCbAW537FydV/GH89oBhM= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM= +github.com/google/go-licenses v0.0.0-20210329231322-ce1d9163b77d/go.mod h1:+TYOmkVoJOpwnS0wfdsJCV9CoD5nJYsHoFk/0CrTK4M= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE= +github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/licenseclassifier v0.0.0-20210325184830-bb04aff29e72/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg= +github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/trillian v1.3.14-0.20210409160123-c5ea3abd4a41/go.mod h1:1dPv0CUjNQVFEDuAUFhZql16pw/VlPgaX8qj+g5pVzQ= +github.com/google/trillian v1.3.14-0.20210428093031-b4ddea2e86b1/go.mod h1:FdIJX+NoDk/dIN2ZxTyz5nAJWgf+NSSSriPAMThChTY= +github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s= +github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= +github.com/goreleaser/goreleaser v0.134.0/go.mod h1:ZT6Y2rSYa6NxQzIsdfWWNWAlYGXGbreo66NmE+3X3WQ= +github.com/goreleaser/nfpm v1.2.1/go.mod h1:TtWrABZozuLOttX2uDlYyECfQX7x5XYkVxhjYcR6G9w= +github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw= +github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= +github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iGoogle-ink/gopay v1.5.36 h1:RctuoiEdTbiXOmzQ9i1388opwAOjheUDIFoHl1EeNr8= +github.com/iGoogle-ink/gopay v1.5.36/go.mod h1:JADVzrfz9kzGMCgV7OzJ954pqwMU7PotYMAjP84YKIE= +github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= +github.com/jhump/protoreflect v1.8.2 h1:k2xE7wcUomeqwY0LDCYA16y4WWfyTcMx5mKhk0d4ua0= +github.com/jhump/protoreflect v1.8.2/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= +github.com/jinzhu/gorm v1.9.11 h1:gaHGvE+UnWGlbWG4Y3FUwY1EcZ5n6S9WtqBA/uySMLE= +github.com/jinzhu/gorm v1.9.11/go.mod h1:bu/pK8szGZ2puuErfU0RwyeNdsf3e6nCX/noXaVxkfw= +github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jinzhu/now v1.1.1 h1:g39TucaRWyV3dwDO++eEc6qf8TVIQ/Da48WmqjZ3i7E= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548/go.mod h1:hGT6jSUVzF6no3QaDSMLGLEHtHSBSefs+MgcDWnmhmo= +github.com/jmoiron/sqlx v1.3.3/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY= +github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= +github.com/kisom/goutils v1.4.3/go.mod h1:Lp5qrquG7yhYnWzZCI/68Pa/GpFynw//od6EkGnWpac= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= +github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= +github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/go-gypsy v1.0.0/go.mod h1:chkXM0zjdpXOiqkCW1XcCHDfjfk14PH2KKkQWxfJUcU= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= +github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.1/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lyft/protoc-gen-star v0.5.1/go.mod h1:9toiA3cC7z5uVbODF7kEQ91Xn7XNFkVUl+SrEe+ZORU= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.12 h1:Y41i/hVW3Pgwr8gV+J23B9YEY0zxjptBuCWEaxmAOow= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= +github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/mholt/archiver/v4 v4.0.0-alpha.6 h1:3wvos9Kn1GpKNBz+MpozinGREPslLo1ds1W16vTkErQ= +github.com/mholt/archiver/v4 v4.0.0-alpha.6/go.mod h1:9PTygYq90FQBWPspdwAng6dNjYiBuTYKqmA6c15KuCo= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/mojocn/base64Captcha v0.0.0-20190801020520-752b1cd608b2 h1:daZqE/T/yEoKIQNd3rwNeLsiS0VpZFfJulR0t/rtgAE= +github.com/mojocn/base64Captcha v0.0.0-20190801020520-752b1cd608b2/go.mod h1:wAQCKEc5bDujxKRmbT6/vTnTt5CjStQ8bRfPWUuz/iY= +github.com/mozillazg/go-httpheader v0.2.1 h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ= +github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= +github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= +github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= +github.com/nkovacs/streamquote v1.0.0/go.mod h1:BN+NaZ2CmdKqUuTUXUEm9j95B2TRbpOWpxbJYzzgUsc= +github.com/nwaples/rardecode/v2 v2.0.0-beta.2 h1:e3mzJFJs4k83GXBEiTaQ5HgSc/kOK8q0rDaRO0MPaOk= +github.com/nwaples/rardecode/v2 v2.0.0-beta.2/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= +github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4/v4 v4.1.14 h1:+fL8AQEZtz/ijeNnpduH0bROTu0O3NZAlPjQxGn8LwE= +github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/otp v1.2.0 h1:/A3+Jn+cagqayeR3iHs/L62m5ue7710D35zl1zJ1kok= +github.com/pquerna/otp v1.2.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.10.0 h1:/o0BDeWzLWXNZ+4q5gXltUvaMpJqckTa+jTNoB+z4cg= +github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.24.0 h1:aIycr3wRFxPUq8XlLQlGQ9aNXV3dFi5y62pe/SB262k= +github.com/prometheus/common v0.24.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/pseudomuto/protoc-gen-doc v1.4.1/go.mod h1:exDTOVwqpp30eV/EDPFLZy3Pwr2sn6hBC1WIYH/UbIg= +github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= +github.com/qingwg/payjs v0.0.0-20190928033402-c53dbe16b371 h1:8VWtyY2IwjEQZSNT4Kyyct9zv9hoegD5GQhFr+TMdCI= +github.com/qingwg/payjs v0.0.0-20190928033402-c53dbe16b371/go.mod h1:9UFrQveqNm3ELF6HSvMtDR3KYpJ7Ib9s0WVmYhaUBlU= +github.com/qiniu/dyn v1.3.0/go.mod h1:E8oERcm8TtwJiZvkQPbcAh0RL8jO1G0VXJMW3FAWdkk= +github.com/qiniu/go-sdk/v7 v7.11.1 h1:/LZ9rvFS4p6SnszhGv11FNB1+n4OZvBCwFg7opH5Ovs= +github.com/qiniu/go-sdk/v7 v7.11.1/go.mod h1:btsaOc8CA3hdVloULfFdDgDc+g4f3TDZEFsDY0BLE+w= +github.com/qiniu/x v1.10.5/go.mod h1:03Ni9tj+N2h2aKnAz+6N0Xfl8FwMEDRC2PAlxekASDs= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20230126093431-47fa9a501578 h1:VstopitMQi3hZP0fzvnsLmzXZdQGc4bEcgu24cp+d4M= +github.com/remyoudompheng/bigfft v0.0.0-20230126093431-47fa9a501578/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samber/lo v1.38.1 h1:j2XEAqXKb09Am4ebOg31SpvzUTTs6EN3VfgeLUhPdXM= +github.com/samber/lo v1.38.1/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sassoftware/go-rpmutils v0.0.0-20190420191620-a8f1baeba37b/go.mod h1:am+Fp8Bt506lA3Rk3QCmSqmYmLMnPDhdDUcosQCAx+I= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartwalle/alipay/v3 v3.2.20 h1:IjpG3YYgUgzCfS0z/EHlUbbr0OlrmOBHUst/3FzToYE= +github.com/smartwalle/alipay/v3 v3.2.20/go.mod h1:KWg91KsY+eIOf26ZfZeH7bed1bWulGpGrL1ErHF3jWo= +github.com/smartwalle/ncrypto v1.0.4 h1:P2rqQxDepJwgeO5ShoC+wGcK2wNJDmcdBOWAksuIgx8= +github.com/smartwalle/ncrypto v1.0.4/go.mod h1:Dwlp6sfeNaPMnOxMNayMTacvC5JGEVln3CVdiVDgbBk= +github.com/smartwalle/ngx v1.0.9 h1:pUXDvWRZJIHVrCKA1uZ15YwNti+5P4GuJGbpJ4WvpMw= +github.com/smartwalle/ngx v1.0.9/go.mod h1:mx/nz2Pk5j+RBs7t6u6k22MPiBG/8CtOMpCnALIG8Y0= +github.com/smartwalle/nsign v1.0.9 h1:8poAgG7zBd8HkZy9RQDwasC6XZvJpDGQWSjzL2FZL6E= +github.com/smartwalle/nsign v1.0.9/go.mod h1:eY6I4CJlyNdVMP+t6z1H6Jpd4m5/V+8xi44ufSTxXgc= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5-0.20210205191134-5ec6847320e5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/speps/go-hashids v2.0.0+incompatible h1:kSfxGfESueJKTx0mpER9Y/1XHl+FVQjtCqRyYcviFbw= +github.com/speps/go-hashids v2.0.0+incompatible/go.mod h1:P7hqPzMdnZOfyIk+xrlG1QaSMw+gCBdHKsBDnhpaZvc= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.3.4/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/captcha v1.0.393 h1:hfhmMk7j4uDMRkfrrIOneMVXPBEhy3HSYiWX0gWoyhc= +github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/captcha v1.0.393/go.mod h1:482ndbWuXqgStZNCqE88UoZeDveIt0juS7MY71Vangg= +github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.393 h1:4IehmEtin8mvOO9pDA3Uj1/X9cWndyDkSsJC0AcRXv4= +github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.393/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y= +github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/scf v1.0.393 h1:WAiJZ+YhH44DT95BlUKbcRAj1WtorJp7Lxe87v3x/F4= +github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/scf v1.0.393/go.mod h1:actV4GtZOO0fUxWese/7SNZ7l+LlepXuQyAEbC/0UMs= +github.com/tencentyun/cos-go-sdk-v5 v0.0.0-20200120023323-87ff3bc489ac h1:PSBhZblOjdwH7SIVgcue+7OlnLHkM45KuScLZ+PiVbQ= +github.com/tencentyun/cos-go-sdk-v5 v0.0.0-20200120023323-87ff3bc489ac/go.mod h1:wQBO5HdAkLjj2q6XQiIfDSP8DXDNrppDRw2Kp/1BODA= +github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw= +github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY= +github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= +github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= +github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= +github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= +github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8= +github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/upyun/go-sdk v2.1.0+incompatible h1:OdjXghQ/TVetWV16Pz3C1/SUpjhGBVPr+cLiqZLLyq0= +github.com/upyun/go-sdk v2.1.0+incompatible/go.mod h1:eu3F5Uz4b9ZE5bE5QsCL6mgSNWRwfj0zpJ9J626HEqs= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.5 h1:lNq9sAHXK2qfdI8W+GRItjCEkI+2oR4d+MEHy1CKXoU= +github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/weppos/publicsuffix-go v0.13.1-0.20210123135404-5fd73613514e/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE= +github.com/weppos/publicsuffix-go v0.15.1-0.20210511084619-b1f36a2d6c0b/go.mod h1:HYux0V0Zi04bHNwOHy4cXJVz/TQjYonnF6aoYhj+3QE= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE= +github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is= +github.com/zmap/zcrypto v0.0.0-20210123152837-9cf5beac6d91/go.mod h1:R/deQh6+tSWlgI9tb4jNmXxn8nSCabl5ZQsBX9//I/E= +github.com/zmap/zcrypto v0.0.0-20210511125630-18f1e0152cfc/go.mod h1:FM4U1E3NzlNMRnSUTU3P1UdukWhYGifqEsjk9fn7BCk= +github.com/zmap/zlint/v3 v3.1.0/go.mod h1:L7t8s3sEKkb0A2BxGy1IWrxt1ZATa1R4QfJZaQOD3zU= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd/api/v3 v3.5.0-alpha.0 h1:+e5nrluATIy3GP53znpkHMFzPTHGYyzvJGFCbuI6ZLc= +go.etcd.io/etcd/api/v3 v3.5.0-alpha.0/go.mod h1:mPcW6aZJukV6Aa81LSKpBjQXTWlXB5r74ymPoSWa3Sw= +go.etcd.io/etcd/client/v2 v2.305.0-alpha.0 h1:jZepGpOeJATxsbMNBZczDS2jHdK/QVHM1iPe9jURJ8o= +go.etcd.io/etcd/client/v2 v2.305.0-alpha.0/go.mod h1:kdV+xzCJ3luEBSIeQyB/OEKkWKd8Zkux4sbDeANrosU= +go.etcd.io/etcd/client/v3 v3.5.0-alpha.0 h1:dr1EOILak2pu4Nf5XbRIOCNIBjcz6UmkQd7hHRXwxaM= +go.etcd.io/etcd/client/v3 v3.5.0-alpha.0/go.mod h1:wKt7jgDgf/OfKiYmCq5WFGxOFAkVMLxiiXgLDFhECr8= +go.etcd.io/etcd/etcdctl/v3 v3.5.0-alpha.0 h1:odMFuQQCg0UmPd7Cyw6TViRYv9ybGuXuki4CusDSzqA= +go.etcd.io/etcd/etcdctl/v3 v3.5.0-alpha.0/go.mod h1:YPwSaBciV5G6Gpt435AasAG3ROetZsKNUzibRa/++oo= +go.etcd.io/etcd/pkg/v3 v3.5.0-alpha.0 h1:3yLUEC0nFCxw/RArImOyRUI4OAFbg4PFpBbAhSNzKNY= +go.etcd.io/etcd/pkg/v3 v3.5.0-alpha.0/go.mod h1:tV31atvwzcybuqejDoY3oaNRTtlD2l/Ot78Pc9w7DMY= +go.etcd.io/etcd/raft/v3 v3.5.0-alpha.0 h1:DvYJotxV9q1Lkn7pknzAbFO/CLtCVidCr2K9qRLJ8pA= +go.etcd.io/etcd/raft/v3 v3.5.0-alpha.0/go.mod h1:FAwse6Zlm5v4tEWZaTjmNhe17Int4Oxbu7+2r0DiD3w= +go.etcd.io/etcd/server/v3 v3.5.0-alpha.0 h1:fYv7CmmdyuIu27UmKQjS9K/1GtcCa+XnPKqiKBbQkrk= +go.etcd.io/etcd/server/v3 v3.5.0-alpha.0/go.mod h1:tsKetYpt980ZTpzl/gb+UOJj9RkIyCb1u4wjzMg90BQ= +go.etcd.io/etcd/tests/v3 v3.5.0-alpha.0 h1:UcRoCA1FgXoc4CEM8J31fqEvI69uFIObY5ZDEFH7Znc= +go.etcd.io/etcd/tests/v3 v3.5.0-alpha.0/go.mod h1:HnrHxjyCuZ8YDt8PYVyQQ5d1ZQfzJVEtQWllr5Vp/30= +go.etcd.io/etcd/v3 v3.5.0-alpha.0 h1:ZuqKJkD2HrzFUj8IB+GLkTMKZ3+7mWx172vx6F1TukM= +go.etcd.io/etcd/v3 v3.5.0-alpha.0/go.mod h1:JZ79d3LV6NUfPjUxXrpiFAYcjhT+06qqw+i28snx8To= +go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI= +golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 h1:3MTrJm4PyNL9NBqvYDSj3DHl46qQakyfqfWo4jgfaEM= +golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190501045829-6d32002ffd75/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410 h1:hTftEOvwiOq2+O8k2D5/Q7COC7k5Qcrgc2TFURJYnvQ= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.15.0 h1:kOELfmgrmJlw4Cdb7g/QGuB3CvDrXbqEIww/pNtNBm8= +golang.org/x/image v0.15.0/go.mod h1:HUYqC05R2ZcZ3ejNQsIHQDQiwWM4JBqmm6MKANTp4LE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191119073136-fc4aabc6c914/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c h1:SgVl/sCtkicsS7psKkje4H9YtjdEl3xsYh7N+5TDHqY= +golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191119060738-e882bf8e40c2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191118222007-07fc4c7f2b98/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201014170642-d1624618ad65/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.45.0 h1:pqMffJFLBVUDIoYsHcqtxgQVTsmxMDpYLOc5MT4Jrww= +google.golang.org/api v0.45.0/go.mod h1:ISLIJCedJolbZvDfAk+Ctuq5hf+aJ33WgtUsfyFoLXA= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210331142528-b7513248f0ba/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210413151531-c14fb6ef47c3/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210510173355-fb37daa5cd7a h1:tzkHckzMzgPr8SC4taTC3AldLr4+oJivSoq1xf/nhsc= +google.golang.org/genproto v0.0.0-20210510173355-fb37daa5cd7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= +gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/mail.v2 v2.3.1 h1:WYFn/oANrAGP2C0dcV6/pbkPzv8yGzqTjPmTeO7qoXk= +gopkg.in/mail.v2 v2.3.1/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= +gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= +gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.4 h1:SadWOkti5uVN1FAMgxn165+Mw00fuQKyk4Gyn/inxNQ= +honnef.co/go/tools v0.1.4/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +modernc.org/libc v1.22.2 h1:4U7v51GyhlWqQmwCHj28Rdq2Yzwk55ovjFrdPjs8Hb0= +modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug= +modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds= +modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/sqlite v1.20.3 h1:SqGJMMxjj1PHusLxdYxeQSodg7Jxn9WWkaAQjKrntZs= +modernc.org/sqlite v1.20.3/go.mod h1:zKcGyrICaxNTMEHSr1HQ2GUraP0j+845GYw37+EyT6A= +pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/main.go b/main.go new file mode 100644 index 0000000..6cc59ee --- /dev/null +++ b/main.go @@ -0,0 +1,162 @@ +package main + +import ( + "context" + _ "embed" + "flag" + "io/fs" + "net" + "net/http" + "os" + "os/signal" + "path/filepath" + "syscall" + "time" + + "github.com/cloudreve/Cloudreve/v3/bootstrap" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/conf" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/cloudreve/Cloudreve/v3/routers" +) + +var ( + isEject bool + confPath string + scriptName string +) + +//go:embed assets.zip +var staticZip string + +var staticFS fs.FS + +func init() { + flag.StringVar(&confPath, "c", util.RelativePath("conf.ini"), "Path to the config file.") + flag.BoolVar(&isEject, "eject", false, "Eject all embedded static files.") + flag.StringVar(&scriptName, "database-script", "", "Name of database util script.") + flag.Parse() + + staticFS = bootstrap.NewFS(staticZip) + bootstrap.Init(confPath, staticFS) +} + +func main() { + // 关闭数据库连接 + defer func() { + if model.DB != nil { + model.DB.Close() + } + }() + + if isEject { + // 开始导出内置静态资源文件 + bootstrap.Eject(staticFS) + return + } + + if scriptName != "" { + // 开始运行助手数据库脚本 + bootstrap.RunScript(scriptName) + return + } + + api := routers.InitRouter() + api.TrustedPlatform = conf.SystemConfig.ProxyHeader + server := &http.Server{Handler: api} + + // 收到信号后关闭服务器 + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT) + wait := shutdown(sigChan, server) + + defer func() { + sigChan <- syscall.SIGTERM + <-wait + }() + + // 如果启用了SSL + if conf.SSLConfig.CertPath != "" { + util.Log().Info("Listening to %q", conf.SSLConfig.Listen) + server.Addr = conf.SSLConfig.Listen + if err := server.ListenAndServeTLS(conf.SSLConfig.CertPath, conf.SSLConfig.KeyPath); err != nil { + util.Log().Error("Failed to listen to %q: %s", conf.SSLConfig.Listen, err) + return + } + } + + // 如果启用了Unix + if conf.UnixConfig.Listen != "" { + // delete socket file before listening + if _, err := os.Stat(conf.UnixConfig.Listen); err == nil { + if err = os.Remove(conf.UnixConfig.Listen); err != nil { + util.Log().Error("Failed to delete socket file: %s", err) + return + } + } + + util.Log().Info("Listening to %q", conf.UnixConfig.Listen) + if err := RunUnix(server); err != nil { + util.Log().Error("Failed to listen to %q: %s", conf.UnixConfig.Listen, err) + } + return + } + + util.Log().Info("Listening to %q", conf.SystemConfig.Listen) + server.Addr = conf.SystemConfig.Listen + if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed { + util.Log().Error("Failed to listen to %q: %s", conf.SystemConfig.Listen, err) + } +} + +func RunUnix(server *http.Server) error { + listener, err := net.Listen("unix", conf.UnixConfig.Listen) + if err != nil { + return err + } + + defer listener.Close() + defer os.Remove(conf.UnixConfig.Listen) + + if conf.UnixConfig.Perm > 0 { + err = os.Chmod(conf.UnixConfig.Listen, os.FileMode(conf.UnixConfig.Perm)) + if err != nil { + util.Log().Warning( + "Failed to set permission to %q for socket file %q: %s", + conf.UnixConfig.Perm, + conf.UnixConfig.Listen, + err, + ) + } + } + + return server.Serve(listener) +} + +func shutdown(sigChan chan os.Signal, server *http.Server) chan struct{} { + wait := make(chan struct{}) + go func() { + sig := <-sigChan + util.Log().Info("Signal %s received, shutting down server...", sig) + if conf.SystemConfig.GracePeriod == 0 { + conf.SystemConfig.GracePeriod = 10 + } + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(conf.SystemConfig.GracePeriod)*time.Second) + defer cancel() + // Shutdown http server + err := server.Shutdown(ctx) + if err != nil { + util.Log().Error("Failed to shutdown server: %s", err) + } + + // Persist in-memory cache + if err := cache.Store.Persist(filepath.Join(model.GetSettingByName("temp_path"), cache.DefaultCacheFile)); err != nil { + util.Log().Warning("Failed to persist cache: %s", err) + } + + close(sigChan) + wait <- struct{}{} + }() + return wait +} diff --git a/middleware/auth.go b/middleware/auth.go new file mode 100644 index 0000000..6913273 --- /dev/null +++ b/middleware/auth.go @@ -0,0 +1,323 @@ +package middleware + +import ( + "bytes" + "context" + "crypto/md5" + "fmt" + "io/ioutil" + "net/http" + + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/oss" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/upyun" + "github.com/cloudreve/Cloudreve/v3/pkg/mq" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/qiniu/go-sdk/v7/auth/qbox" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/auth" + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/gin-contrib/sessions" + "github.com/gin-gonic/gin" +) + +const ( + CallbackFailedStatusCode = http.StatusUnauthorized +) + +// SignRequired 验证请求签名 +func SignRequired(authInstance auth.Auth) gin.HandlerFunc { + return func(c *gin.Context) { + var err error + switch c.Request.Method { + case "PUT", "POST", "PATCH": + err = auth.CheckRequest(authInstance, c.Request) + default: + err = auth.CheckURI(authInstance, c.Request.URL) + } + + if err != nil { + c.JSON(200, serializer.Err(serializer.CodeCredentialInvalid, err.Error(), err)) + c.Abort() + return + } + + c.Next() + } +} + +// CurrentUser 获取登录用户 +func CurrentUser() gin.HandlerFunc { + return func(c *gin.Context) { + session := sessions.Default(c) + uid := session.Get("user_id") + if uid != nil { + user, err := model.GetActiveUserByID(uid) + if err == nil { + c.Set("user", &user) + } + } + c.Next() + } +} + +// AuthRequired 需要登录 +func AuthRequired() gin.HandlerFunc { + return func(c *gin.Context) { + if user, _ := c.Get("user"); user != nil { + if _, ok := user.(*model.User); ok { + c.Next() + return + } + } + + c.JSON(200, serializer.CheckLogin()) + c.Abort() + } +} + +// PhoneRequired 需要绑定手机 +// TODO 有bug +func PhoneRequired() gin.HandlerFunc { + return func(c *gin.Context) { + if model.IsTrueVal(model.GetSettingByName("phone_required")) && + model.IsTrueVal(model.GetSettingByName("phone_enabled")) { + user, _ := c.Get("user") + if user.(*model.User).Phone != "" { + // TODO 忽略管理员 + c.Next() + return + } + } + + c.Next() + } +} + +// WebDAVAuth 验证WebDAV登录及权限 +func WebDAVAuth() gin.HandlerFunc { + return func(c *gin.Context) { + // OPTIONS 请求不需要鉴权,否则Windows10下无法保存文档 + if c.Request.Method == "OPTIONS" { + c.Next() + return + } + + username, password, ok := c.Request.BasicAuth() + if !ok { + c.Writer.Header()["WWW-Authenticate"] = []string{`Basic realm="cloudreve"`} + c.Status(http.StatusUnauthorized) + c.Abort() + return + } + + expectedUser, err := model.GetActiveUserByEmail(username) + if err != nil { + c.Status(http.StatusUnauthorized) + c.Abort() + return + } + + // 密码正确? + webdav, err := model.GetWebdavByPassword(password, expectedUser.ID) + if err != nil { + c.Status(http.StatusUnauthorized) + c.Abort() + return + } + + // 用户组已启用WebDAV? + if !expectedUser.Group.WebDAVEnabled { + c.Status(http.StatusForbidden) + c.Abort() + return + } + + // 用户组已启用WebDAV代理? + if !expectedUser.Group.OptionsSerialized.WebDAVProxy { + webdav.UseProxy = false + } + + c.Set("user", &expectedUser) + c.Set("webdav", webdav) + c.Next() + } +} + +// 对上传会话进行验证 +func UseUploadSession(policyType string) gin.HandlerFunc { + return func(c *gin.Context) { + // 验证key并查找用户 + resp := uploadCallbackCheck(c, policyType) + if resp.Code != 0 { + c.JSON(CallbackFailedStatusCode, resp) + c.Abort() + return + } + + c.Next() + } +} + +// uploadCallbackCheck 对上传回调请求的 callback key 进行验证,如果成功则返回上传用户 +func uploadCallbackCheck(c *gin.Context, policyType string) serializer.Response { + // 验证 Callback Key + sessionID := c.Param("sessionID") + if sessionID == "" { + return serializer.ParamErr("Session ID cannot be empty", nil) + } + + callbackSessionRaw, exist := cache.Get(filesystem.UploadSessionCachePrefix + sessionID) + if !exist { + return serializer.Err(serializer.CodeUploadSessionExpired, "上传会话不存在或已过期", nil) + } + + callbackSession := callbackSessionRaw.(serializer.UploadSession) + c.Set(filesystem.UploadSessionCtx, &callbackSession) + if callbackSession.Policy.Type != policyType { + return serializer.Err(serializer.CodePolicyNotAllowed, "", nil) + } + + // 清理回调会话 + _ = cache.Deletes([]string{sessionID}, filesystem.UploadSessionCachePrefix) + + // 查找用户 + user, err := model.GetActiveUserByID(callbackSession.UID) + if err != nil { + return serializer.Err(serializer.CodeUserNotFound, "", err) + } + c.Set(filesystem.UserCtx, &user) + return serializer.Response{} +} + +// RemoteCallbackAuth 远程回调签名验证 +func RemoteCallbackAuth() gin.HandlerFunc { + return func(c *gin.Context) { + // 验证签名 + session := c.MustGet(filesystem.UploadSessionCtx).(*serializer.UploadSession) + authInstance := auth.HMACAuth{SecretKey: []byte(session.Policy.SecretKey)} + if err := auth.CheckRequest(authInstance, c.Request); err != nil { + c.JSON(CallbackFailedStatusCode, serializer.Err(serializer.CodeCredentialInvalid, err.Error(), err)) + c.Abort() + return + } + + c.Next() + + } +} + +// QiniuCallbackAuth 七牛回调签名验证 +func QiniuCallbackAuth() gin.HandlerFunc { + return func(c *gin.Context) { + session := c.MustGet(filesystem.UploadSessionCtx).(*serializer.UploadSession) + + // 验证回调是否来自qiniu + mac := qbox.NewMac(session.Policy.AccessKey, session.Policy.SecretKey) + ok, err := mac.VerifyCallback(c.Request) + if err != nil { + util.Log().Debug("Failed to verify callback request: %s", err) + c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: "Failed to verify callback request."}) + c.Abort() + return + } + + if !ok { + c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: "Invalid signature."}) + c.Abort() + return + } + + c.Next() + } +} + +// OSSCallbackAuth 阿里云OSS回调签名验证 +func OSSCallbackAuth() gin.HandlerFunc { + return func(c *gin.Context) { + err := oss.VerifyCallbackSignature(c.Request) + if err != nil { + util.Log().Debug("Failed to verify callback request: %s", err) + c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: "Failed to verify callback request."}) + c.Abort() + return + } + + c.Next() + } +} + +// UpyunCallbackAuth 又拍云回调签名验证 +func UpyunCallbackAuth() gin.HandlerFunc { + return func(c *gin.Context) { + session := c.MustGet(filesystem.UploadSessionCtx).(*serializer.UploadSession) + + // 获取请求正文 + body, err := ioutil.ReadAll(c.Request.Body) + c.Request.Body.Close() + if err != nil { + c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: err.Error()}) + c.Abort() + return + } + + c.Request.Body = ioutil.NopCloser(bytes.NewReader(body)) + + // 准备验证Upyun回调签名 + handler := upyun.Driver{Policy: &session.Policy} + contentMD5 := c.Request.Header.Get("Content-Md5") + date := c.Request.Header.Get("Date") + actualSignature := c.Request.Header.Get("Authorization") + + // 计算正文MD5 + actualContentMD5 := fmt.Sprintf("%x", md5.Sum(body)) + if actualContentMD5 != contentMD5 { + c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: "MD5 mismatch."}) + c.Abort() + return + } + + // 计算理论签名 + signature := handler.Sign(context.Background(), []string{ + "POST", + c.Request.URL.Path, + date, + contentMD5, + }) + + // 对比签名 + if signature != actualSignature { + c.JSON(401, serializer.GeneralUploadCallbackFailed{Error: "Signature not match"}) + c.Abort() + return + } + + c.Next() + } +} + +// OneDriveCallbackAuth OneDrive回调签名验证 +func OneDriveCallbackAuth() gin.HandlerFunc { + return func(c *gin.Context) { + // 发送回调结束信号 + mq.GlobalMQ.Publish(c.Param("sessionID"), mq.Message{}) + + c.Next() + } +} + +// IsAdmin 必须为管理员用户组 +func IsAdmin() gin.HandlerFunc { + return func(c *gin.Context) { + user, _ := c.Get("user") + if user.(*model.User).Group.ID != 1 && user.(*model.User).ID != 1 { + c.JSON(200, serializer.Err(serializer.CodeNoPermissionErr, "", nil)) + c.Abort() + return + } + + c.Next() + } +} diff --git a/middleware/captcha.go b/middleware/captcha.go new file mode 100644 index 0000000..baf24a5 --- /dev/null +++ b/middleware/captcha.go @@ -0,0 +1,127 @@ +package middleware + +import ( + "bytes" + "encoding/json" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/recaptcha" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/gin-gonic/gin" + "github.com/mojocn/base64Captcha" + captcha "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/captcha/v20190722" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile" + "io" + "io/ioutil" + "strconv" + "time" +) + +type req struct { + CaptchaCode string `json:"captchaCode"` + Ticket string `json:"ticket"` + Randstr string `json:"randstr"` +} + +const ( + captchaNotMatch = "CAPTCHA not match." + captchaRefresh = "Verification failed, please refresh the page and retry." +) + +// CaptchaRequired 验证请求签名 +func CaptchaRequired(configName string) gin.HandlerFunc { + return func(c *gin.Context) { + // 相关设定 + options := model.GetSettingByNames(configName, + "captcha_type", + "captcha_ReCaptchaSecret", + "captcha_TCaptcha_SecretId", + "captcha_TCaptcha_SecretKey", + "captcha_TCaptcha_CaptchaAppId", + "captcha_TCaptcha_AppSecretKey") + // 检查验证码 + isCaptchaRequired := model.IsTrueVal(options[configName]) + + if isCaptchaRequired { + var service req + bodyCopy := new(bytes.Buffer) + _, err := io.Copy(bodyCopy, c.Request.Body) + if err != nil { + c.JSON(200, serializer.Err(serializer.CodeCaptchaError, captchaNotMatch, err)) + c.Abort() + return + } + + bodyData := bodyCopy.Bytes() + err = json.Unmarshal(bodyData, &service) + if err != nil { + c.JSON(200, serializer.Err(serializer.CodeCaptchaError, captchaNotMatch, err)) + c.Abort() + return + } + + c.Request.Body = ioutil.NopCloser(bytes.NewReader(bodyData)) + switch options["captcha_type"] { + case "normal": + captchaID := util.GetSession(c, "captchaID") + util.DeleteSession(c, "captchaID") + if captchaID == nil || !base64Captcha.VerifyCaptcha(captchaID.(string), service.CaptchaCode) { + c.JSON(200, serializer.Err(serializer.CodeCaptchaError, captchaNotMatch, err)) + c.Abort() + return + } + + break + case "recaptcha": + reCAPTCHA, err := recaptcha.NewReCAPTCHA(options["captcha_ReCaptchaSecret"], recaptcha.V2, 10*time.Second) + if err != nil { + util.Log().Warning("reCAPTCHA verification failed, %s", err) + c.Abort() + break + } + + err = reCAPTCHA.Verify(service.CaptchaCode) + if err != nil { + util.Log().Warning("reCAPTCHA verification failed, %s", err) + c.JSON(200, serializer.Err(serializer.CodeCaptchaRefreshNeeded, captchaRefresh, nil)) + c.Abort() + return + } + + break + case "tcaptcha": + credential := common.NewCredential( + options["captcha_TCaptcha_SecretId"], + options["captcha_TCaptcha_SecretKey"], + ) + cpf := profile.NewClientProfile() + cpf.HttpProfile.Endpoint = "captcha.tencentcloudapi.com" + client, _ := captcha.NewClient(credential, "", cpf) + request := captcha.NewDescribeCaptchaResultRequest() + request.CaptchaType = common.Uint64Ptr(9) + appid, _ := strconv.Atoi(options["captcha_TCaptcha_CaptchaAppId"]) + request.CaptchaAppId = common.Uint64Ptr(uint64(appid)) + request.AppSecretKey = common.StringPtr(options["captcha_TCaptcha_AppSecretKey"]) + request.Ticket = common.StringPtr(service.Ticket) + request.Randstr = common.StringPtr(service.Randstr) + request.UserIp = common.StringPtr(c.ClientIP()) + response, err := client.DescribeCaptchaResult(request) + if err != nil { + util.Log().Warning("TCaptcha verification failed, %s", err) + c.Abort() + break + } + + if *response.Response.CaptchaCode != int64(1) { + c.JSON(200, serializer.Err(serializer.CodeCaptchaRefreshNeeded, captchaRefresh, nil)) + c.Abort() + return + } + + break + } + } + c.Next() + } +} diff --git a/middleware/cluster.go b/middleware/cluster.go new file mode 100644 index 0000000..2e814be --- /dev/null +++ b/middleware/cluster.go @@ -0,0 +1,62 @@ +package middleware + +import ( + "github.com/cloudreve/Cloudreve/v3/pkg/auth" + "github.com/cloudreve/Cloudreve/v3/pkg/cluster" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/gin-gonic/gin" + "strconv" +) + +// MasterMetadata 解析主机节点发来请求的包含主机节点信息的元数据 +func MasterMetadata() gin.HandlerFunc { + return func(c *gin.Context) { + c.Set("MasterSiteID", c.GetHeader(auth.CrHeaderPrefix+"Site-Id")) + c.Set("MasterSiteURL", c.GetHeader(auth.CrHeaderPrefix+"Site-Url")) + c.Set("MasterVersion", c.GetHeader(auth.CrHeaderPrefix+"Cloudreve-Version")) + c.Next() + } +} + +// UseSlaveAria2Instance 从机用于获取对应主机节点的Aria2实例 +func UseSlaveAria2Instance(clusterController cluster.Controller) gin.HandlerFunc { + return func(c *gin.Context) { + if siteID, exist := c.Get("MasterSiteID"); exist { + // 获取对应主机节点的从机Aria2实例 + caller, err := clusterController.GetAria2Instance(siteID.(string)) + if err != nil { + c.JSON(200, serializer.Err(serializer.CodeNotSet, "Failed to get Aria2 instance", err)) + c.Abort() + return + } + + c.Set("MasterAria2Instance", caller) + c.Next() + return + } + + c.JSON(200, serializer.ParamErr("Unknown master node ID", nil)) + c.Abort() + } +} + +func SlaveRPCSignRequired(nodePool cluster.Pool) gin.HandlerFunc { + return func(c *gin.Context) { + nodeID, err := strconv.ParseUint(c.GetHeader(auth.CrHeaderPrefix+"Node-Id"), 10, 64) + if err != nil { + c.JSON(200, serializer.ParamErr("Unknown master node ID", err)) + c.Abort() + return + } + + slaveNode := nodePool.GetNodeByID(uint(nodeID)) + if slaveNode == nil { + c.JSON(200, serializer.ParamErr("Unknown master node ID", err)) + c.Abort() + return + } + + SignRequired(slaveNode.MasterAuthInstance())(c) + + } +} diff --git a/middleware/common.go b/middleware/common.go new file mode 100644 index 0000000..9b2cb08 --- /dev/null +++ b/middleware/common.go @@ -0,0 +1,77 @@ +package middleware + +import ( + "fmt" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/auth" + "github.com/cloudreve/Cloudreve/v3/pkg/hashid" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/gin-gonic/gin" + "net/http" +) + +// HashID 将给定对象的HashID转换为真实ID +func HashID(IDType int) gin.HandlerFunc { + return func(c *gin.Context) { + if c.Param("id") != "" { + id, err := hashid.DecodeHashID(c.Param("id"), IDType) + if err == nil { + c.Set("object_id", id) + c.Next() + return + } + c.JSON(200, serializer.ParamErr("Failed to parse object ID", nil)) + c.Abort() + return + + } + c.Next() + } +} + +// IsFunctionEnabled 当功能未开启时阻止访问 +func IsFunctionEnabled(key string) gin.HandlerFunc { + return func(c *gin.Context) { + if !model.IsTrueVal(model.GetSettingByName(key)) { + c.JSON(200, serializer.Err(serializer.CodeFeatureNotEnabled, "This feature is not enabled", nil)) + c.Abort() + return + } + + c.Next() + } +} + +// CacheControl 屏蔽客户端缓存 +func CacheControl() gin.HandlerFunc { + return func(c *gin.Context) { + c.Header("Cache-Control", "private, no-cache") + } +} + +func Sandbox() gin.HandlerFunc { + return func(c *gin.Context) { + c.Header("Content-Security-Policy", "sandbox") + } +} + +// StaticResourceCache 使用静态资源缓存策略 +func StaticResourceCache() gin.HandlerFunc { + return func(c *gin.Context) { + c.Header("Cache-Control", fmt.Sprintf("public, max-age=%d", model.GetIntSetting("public_resource_maxage", 86400))) + + } +} + +// MobileRequestOnly +func MobileRequestOnly() gin.HandlerFunc { + return func(c *gin.Context) { + if c.GetHeader(auth.CrHeaderPrefix+"ios") == "" { + c.Redirect(http.StatusMovedPermanently, model.GetSiteURL().String()) + c.Abort() + return + } + + c.Next() + } +} diff --git a/middleware/file.go b/middleware/file.go new file mode 100644 index 0000000..995637e --- /dev/null +++ b/middleware/file.go @@ -0,0 +1,30 @@ +package middleware + +import ( + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/gin-gonic/gin" +) + +// ValidateSourceLink validates if the perm source link is a valid redirect link +func ValidateSourceLink() gin.HandlerFunc { + return func(c *gin.Context) { + linkID, ok := c.Get("object_id") + if !ok { + c.JSON(200, serializer.Err(serializer.CodeFileNotFound, "", nil)) + c.Abort() + return + } + + sourceLink, err := model.GetSourceLinkByID(linkID) + if err != nil || sourceLink.File.ID == 0 || sourceLink.File.Name != c.Param("name") { + c.JSON(200, serializer.Err(serializer.CodeFileNotFound, "", nil)) + c.Abort() + return + } + + sourceLink.Downloaded() + c.Set("source_link", sourceLink) + c.Next() + } +} diff --git a/middleware/frontend.go b/middleware/frontend.go new file mode 100644 index 0000000..eba1e84 --- /dev/null +++ b/middleware/frontend.go @@ -0,0 +1,84 @@ +package middleware + +import ( + "io/ioutil" + "net/http" + "strings" + + "github.com/cloudreve/Cloudreve/v3/bootstrap" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/gin-gonic/gin" +) + +// FrontendFileHandler 前端静态文件处理 +func FrontendFileHandler() gin.HandlerFunc { + ignoreFunc := func(c *gin.Context) { + c.Next() + } + + if bootstrap.StaticFS == nil { + return ignoreFunc + } + + // 读取index.html + file, err := bootstrap.StaticFS.Open("/index.html") + if err != nil { + util.Log().Warning("Static file \"index.html\" does not exist, it might affect the display of the homepage.") + return ignoreFunc + } + + fileContentBytes, err := ioutil.ReadAll(file) + if err != nil { + util.Log().Warning("Cannot read static file \"index.html\", it might affect the display of the homepage.") + return ignoreFunc + } + fileContent := string(fileContentBytes) + + fileServer := http.FileServer(bootstrap.StaticFS) + return func(c *gin.Context) { + path := c.Request.URL.Path + + // API 跳过 + if strings.HasPrefix(path, "/api") || + strings.HasPrefix(path, "/custom") || + strings.HasPrefix(path, "/dav") || + strings.HasPrefix(path, "/f") || + path == "/manifest.json" { + c.Next() + return + } + + // 不存在的路径和index.html均返回index.html + if (path == "/index.html") || (path == "/") || !bootstrap.StaticFS.Exists("/", path) { + // 读取、替换站点设置 + options := model.GetSettingByNames( + "siteName", // 站点名称 + "siteKeywords", // 关键词 + "siteDes", // 描述 + "siteScript", // 自定义代码 + "pwa_small_icon", // 图标 + ) + finalHTML := util.Replace(map[string]string{ + "{siteName}": options["siteName"], + "{siteKeywords}": options["siteKeywords"], + "{siteDes}": options["siteDes"], + "{siteScript}": options["siteScript"], + "{pwa_small_icon}": options["pwa_small_icon"], + }, fileContent) + + c.Header("Content-Type", "text/html") + c.String(200, finalHTML) + c.Abort() + return + } + + if path == "/service-worker.js" { + c.Header("Cache-Control", "public, no-cache") + } + + // 存在的静态文件 + fileServer.ServeHTTP(c.Writer, c.Request) + c.Abort() + } +} diff --git a/middleware/mock.go b/middleware/mock.go new file mode 100644 index 0000000..d026e77 --- /dev/null +++ b/middleware/mock.go @@ -0,0 +1,24 @@ +package middleware + +import ( + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/gin-gonic/gin" +) + +// SessionMock 测试时模拟Session +var SessionMock = make(map[string]interface{}) + +// ContextMock 测试时模拟Context +var ContextMock = make(map[string]interface{}) + +// MockHelper 单元测试助手中间件 +func MockHelper() gin.HandlerFunc { + return func(c *gin.Context) { + // 将SessionMock写入会话 + util.SetSession(c, SessionMock) + for key, value := range ContextMock { + c.Set(key, value) + } + c.Next() + } +} diff --git a/middleware/session.go b/middleware/session.go new file mode 100644 index 0000000..b6d8023 --- /dev/null +++ b/middleware/session.go @@ -0,0 +1,68 @@ +package middleware + +import ( + "net/http" + "strings" + + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/sessionstore" + + "github.com/cloudreve/Cloudreve/v3/pkg/conf" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/gin-contrib/sessions" + "github.com/gin-gonic/gin" +) + +// Store session存储 +var Store sessions.Store + +// Session 初始化session +func Session(secret string) gin.HandlerFunc { + // Redis设置不为空,且非测试模式时使用Redis + Store = sessionstore.NewStore(cache.Store, []byte(secret)) + + sameSiteMode := http.SameSiteDefaultMode + switch strings.ToLower(conf.CORSConfig.SameSite) { + case "default": + sameSiteMode = http.SameSiteDefaultMode + case "none": + sameSiteMode = http.SameSiteNoneMode + case "strict": + sameSiteMode = http.SameSiteStrictMode + case "lax": + sameSiteMode = http.SameSiteLaxMode + } + + // Also set Secure: true if using SSL, you should though + Store.Options(sessions.Options{ + HttpOnly: true, + MaxAge: 60 * 86400, + Path: "/", + SameSite: sameSiteMode, + Secure: conf.CORSConfig.Secure, + }) + + return sessions.Sessions("cloudreve-session", Store) +} + +// CSRFInit 初始化CSRF标记 +func CSRFInit() gin.HandlerFunc { + return func(c *gin.Context) { + util.SetSession(c, map[string]interface{}{"CSRF": true}) + c.Next() + } +} + +// CSRFCheck 检查CSRF标记 +func CSRFCheck() gin.HandlerFunc { + return func(c *gin.Context) { + if check, ok := util.GetSession(c, "CSRF").(bool); ok && check { + c.Next() + return + } + + c.JSON(200, serializer.Err(serializer.CodeNoPermissionErr, "Invalid origin", nil)) + c.Abort() + } +} diff --git a/middleware/share.go b/middleware/share.go new file mode 100644 index 0000000..cc4ef42 --- /dev/null +++ b/middleware/share.go @@ -0,0 +1,139 @@ +package middleware + +import ( + "fmt" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/gin-gonic/gin" +) + +// ShareOwner 检查当前登录用户是否为分享所有者 +func ShareOwner() gin.HandlerFunc { + return func(c *gin.Context) { + var user *model.User + if userCtx, ok := c.Get("user"); ok { + user = userCtx.(*model.User) + } else { + c.JSON(200, serializer.Err(serializer.CodeCheckLogin, "", nil)) + c.Abort() + return + } + + if share, ok := c.Get("share"); ok { + if share.(*model.Share).Creator().ID != user.ID { + c.JSON(200, serializer.Err(serializer.CodeShareLinkNotFound, "", nil)) + c.Abort() + return + } + } + + c.Next() + } +} + +// ShareAvailable 检查分享是否可用 +func ShareAvailable() gin.HandlerFunc { + return func(c *gin.Context) { + var user *model.User + if userCtx, ok := c.Get("user"); ok { + user = userCtx.(*model.User) + } else { + user = model.NewAnonymousUser() + } + + share := model.GetShareByHashID(c.Param("id")) + + if share == nil || !share.IsAvailable() { + c.JSON(200, serializer.Err(serializer.CodeShareLinkNotFound, "", nil)) + c.Abort() + return + } + + c.Set("user", user) + c.Set("share", share) + c.Next() + } +} + +// ShareCanPreview 检查分享是否可被预览 +func ShareCanPreview() gin.HandlerFunc { + return func(c *gin.Context) { + if share, ok := c.Get("share"); ok { + if share.(*model.Share).PreviewEnabled { + c.Next() + return + } + c.JSON(200, serializer.Err(serializer.CodeDisabledSharePreview, "", + nil)) + c.Abort() + return + } + c.Abort() + } +} + +// CheckShareUnlocked 检查分享是否已解锁 +func CheckShareUnlocked() gin.HandlerFunc { + return func(c *gin.Context) { + if shareCtx, ok := c.Get("share"); ok { + share := shareCtx.(*model.Share) + // 分享是否已解锁 + if share.Password != "" { + sessionKey := fmt.Sprintf("share_unlock_%d", share.ID) + unlocked := util.GetSession(c, sessionKey) != nil + if !unlocked { + c.JSON(200, serializer.Err(serializer.CodeNoPermissionErr, + "", nil)) + c.Abort() + return + } + } + + c.Next() + return + } + c.Abort() + } +} + +// BeforeShareDownload 分享被下载前的检查 +func BeforeShareDownload() gin.HandlerFunc { + return func(c *gin.Context) { + if shareCtx, ok := c.Get("share"); ok { + if userCtx, ok := c.Get("user"); ok { + share := shareCtx.(*model.Share) + user := userCtx.(*model.User) + + // 检查用户是否可以下载此分享的文件 + err := share.CanBeDownloadBy(user) + if err != nil { + c.JSON(200, serializer.Err(serializer.CodeGroupNotAllowed, err.Error(), + nil)) + c.Abort() + return + } + + // 对积分、下载次数进行更新 + err = share.DownloadBy(user, c) + if err != nil { + if err == model.ErrInsufficientCredit { + c.JSON(200, serializer.Err(serializer.CodeInsufficientCredit, err.Error(), + nil)) + } else { + c.JSON(200, serializer.Err(serializer.CodeGroupNotAllowed, err.Error(), + nil)) + } + + c.Abort() + return + } + + c.Next() + return + } + } + c.Abort() + } +} diff --git a/middleware/wopi.go b/middleware/wopi.go new file mode 100644 index 0000000..41b8c01 --- /dev/null +++ b/middleware/wopi.go @@ -0,0 +1,70 @@ +package middleware + +import ( + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/wopi" + "github.com/gin-gonic/gin" + "net/http" + "strings" +) + +const ( + WopiSessionCtx = "wopi_session" +) + +// WopiWriteAccess validates if write access is obtained. +func WopiWriteAccess() gin.HandlerFunc { + return func(c *gin.Context) { + session := c.MustGet(WopiSessionCtx).(*wopi.SessionCache) + if session.Action != wopi.ActionEdit { + c.Status(http.StatusNotFound) + c.Header(wopi.ServerErrorHeader, "read-only access") + c.Abort() + return + } + + c.Next() + } +} + +func WopiAccessValidation(w wopi.Client, store cache.Driver) gin.HandlerFunc { + return func(c *gin.Context) { + accessToken := strings.Split(c.Query(wopi.AccessTokenQuery), ".") + if len(accessToken) != 2 { + c.Status(http.StatusForbidden) + c.Header(wopi.ServerErrorHeader, "malformed access token") + c.Abort() + return + } + + sessionRaw, exist := store.Get(wopi.SessionCachePrefix + accessToken[0]) + if !exist { + c.Status(http.StatusForbidden) + c.Header(wopi.ServerErrorHeader, "invalid access token") + c.Abort() + return + } + + session := sessionRaw.(wopi.SessionCache) + user, err := model.GetActiveUserByID(session.UserID) + if err != nil { + c.Status(http.StatusInternalServerError) + c.Header(wopi.ServerErrorHeader, "user not found") + c.Abort() + return + } + + fileID := c.MustGet("object_id").(uint) + if fileID != session.FileID { + c.Status(http.StatusInternalServerError) + c.Header(wopi.ServerErrorHeader, "file not found") + c.Abort() + return + } + + c.Set("user", &user) + c.Set(WopiSessionCtx, &session) + c.Next() + } +} diff --git a/models/defaults.go b/models/defaults.go new file mode 100644 index 0000000..ad718a1 --- /dev/null +++ b/models/defaults.go @@ -0,0 +1,186 @@ +package model + +import ( + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/conf" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/gofrs/uuid" +) + +var defaultSettings = []Setting{ + {Name: "siteURL", Value: `http://localhost`, Type: "basic"}, + {Name: "siteName", Value: `CloudrevePlus`, Type: "basic"}, + {Name: "register_enabled", Value: `1`, Type: "register"}, + {Name: "default_group", Value: `2`, Type: "register"}, + {Name: "mail_domain_filter", Value: `0`, Type: "register"}, + {Name: "mail_domain_filter_list", Value: `126.com,163.com,gmail.com,outlook.com,qq.com,foxmail.com,yeah.net,sohu.com,sohu.cn,139.com,wo.cn,189.cn,hotmail.com,live.com,live.cn`, Type: "register"}, + {Name: "siteKeywords", Value: `CloudrevePlus, cloud storage`, Type: "basic"}, + {Name: "siteDes", Value: `部署公私兼备的网盘系统`, Type: "basic"}, + {Name: "siteTitle", Value: `Inclusive cloud storage for everyone`, Type: "basic"}, + {Name: "siteNotice", Value: ``, Type: "basic"}, + {Name: "siteScript", Value: ``, Type: "basic"}, + {Name: "siteID", Value: uuid.Must(uuid.NewV4()).String(), Type: "basic"}, + {Name: "fromName", Value: `Cloudreve`, Type: "mail"}, + {Name: "mail_keepalive", Value: `30`, Type: "mail"}, + {Name: "fromAdress", Value: `no-reply@acg.blue`, Type: "mail"}, + {Name: "smtpHost", Value: `smtp.mxhichina.com`, Type: "mail"}, + {Name: "smtpPort", Value: `25`, Type: "mail"}, + {Name: "replyTo", Value: `abslant@126.com`, Type: "mail"}, + {Name: "smtpUser", Value: `no-reply@acg.blue`, Type: "mail"}, + {Name: "smtpPass", Value: ``, Type: "mail"}, + {Name: "smtpEncryption", Value: `0`, Type: "mail"}, + {Name: "over_used_template", Value: `容量超额提醒
容量超额警告
亲爱的{userName}
由于{notifyReason},您在{siteTitle}的账户的容量使用超出配额,您将无法继续上传新文件,请尽快清理文件,否则我们将会禁用您的账户。
登录{siteTitle}
感谢您选择{siteTitle}。
`, Type: "mail_template"}, + {Name: "ban_time", Value: `604800`, Type: "storage_policy"}, + {Name: "maxEditSize", Value: `52428800`, Type: "file_edit"}, + {Name: "archive_timeout", Value: `600`, Type: "timeout"}, + {Name: "download_timeout", Value: `600`, Type: "timeout"}, + {Name: "preview_timeout", Value: `600`, Type: "timeout"}, + {Name: "doc_preview_timeout", Value: `600`, Type: "timeout"}, + {Name: "upload_session_timeout", Value: `86400`, Type: "timeout"}, + {Name: "slave_api_timeout", Value: `60`, Type: "timeout"}, + {Name: "slave_node_retry", Value: `3`, Type: "slave"}, + {Name: "slave_ping_interval", Value: `60`, Type: "slave"}, + {Name: "slave_recover_interval", Value: `120`, Type: "slave"}, + {Name: "slave_transfer_timeout", Value: `172800`, Type: "timeout"}, + {Name: "onedrive_monitor_timeout", Value: `600`, Type: "timeout"}, + {Name: "onedrive_source_timeout", Value: `1800`, Type: "timeout"}, + {Name: "share_download_session_timeout", Value: `2073600`, Type: "timeout"}, + {Name: "onedrive_callback_check", Value: `20`, Type: "timeout"}, + {Name: "folder_props_timeout", Value: `300`, Type: "timeout"}, + {Name: "chunk_retries", Value: `5`, Type: "retry"}, + {Name: "onedrive_source_timeout", Value: `1800`, Type: "timeout"}, + {Name: "reset_after_upload_failed", Value: `0`, Type: "upload"}, + {Name: "use_temp_chunk_buffer", Value: `1`, Type: "upload"}, + {Name: "login_captcha", Value: `0`, Type: "login"}, + {Name: "qq_login", Value: `0`, Type: "login"}, + {Name: "qq_direct_login", Value: `0`, Type: "login"}, + {Name: "qq_login_id", Value: ``, Type: "login"}, + {Name: "qq_login_key", Value: ``, Type: "login"}, + {Name: "reg_captcha", Value: `0`, Type: "login"}, + {Name: "email_active", Value: `0`, Type: "register"}, + {Name: "mail_activation_template", Value: `用户激活
激活{siteTitle}账户
亲爱的{userName}
感谢您注册{siteTitle},请点击下方按钮完成账户激活。
激活账户
感谢您选择{siteTitle}。
`, Type: "mail_template"}, + {Name: "forget_captcha", Value: `0`, Type: "login"}, + {Name: "mail_reset_pwd_template", Value: `重设密码
重设{siteTitle}密码
亲爱的{userName}
请点击下方按钮完成密码重设。如果非你本人操作,请忽略此邮件。
重设密码
感谢您选择{siteTitle}。
`, Type: "mail_template"}, + {Name: "pack_data", Value: `[]`, Type: "pack"}, + {Name: "db_version_" + conf.RequiredDBVersion, Value: `installed`, Type: "version"}, + {Name: "alipay_enabled", Value: `0`, Type: "payment"}, + {Name: "payjs_enabled", Value: `0`, Type: "payment"}, + {Name: "payjs_id", Value: ``, Type: "payment"}, + {Name: "payjs_secret", Value: ``, Type: "payment"}, + {Name: "appid", Value: ``, Type: "payment"}, + {Name: "appkey", Value: ``, Type: "payment"}, + {Name: "shopid", Value: ``, Type: "payment"}, + {Name: "wechat_enabled", Value: `0`, Type: "payment"}, + {Name: "wechat_appid", Value: ``, Type: "payment"}, + {Name: "wechat_mchid", Value: ``, Type: "payment"}, + {Name: "wechat_serial_no", Value: ``, Type: "payment"}, + {Name: "wechat_api_key", Value: ``, Type: "payment"}, + {Name: "wechat_pk_content", Value: ``, Type: "payment"}, + {Name: "hot_share_num", Value: `10`, Type: "share"}, + {Name: "group_sell_data", Value: `[]`, Type: "group_sell"}, + {Name: "gravatar_server", Value: `https://www.gravatar.com/`, Type: "avatar"}, + {Name: "defaultTheme", Value: `#3f51b5`, Type: "basic"}, + {Name: "themes", Value: `{"#3f51b5":{"palette":{"primary":{"main":"#3f51b5"},"secondary":{"main":"#f50057"}}},"#2196f3":{"palette":{"primary":{"main":"#2196f3"},"secondary":{"main":"#FFC107"}}},"#673AB7":{"palette":{"primary":{"main":"#673AB7"},"secondary":{"main":"#2196F3"}}},"#E91E63":{"palette":{"primary":{"main":"#E91E63"},"secondary":{"main":"#42A5F5","contrastText":"#fff"}}},"#FF5722":{"palette":{"primary":{"main":"#FF5722"},"secondary":{"main":"#3F51B5"}}},"#FFC107":{"palette":{"primary":{"main":"#FFC107"},"secondary":{"main":"#26C6DA"}}},"#8BC34A":{"palette":{"primary":{"main":"#8BC34A","contrastText":"#fff"},"secondary":{"main":"#FF8A65","contrastText":"#fff"}}},"#009688":{"palette":{"primary":{"main":"#009688"},"secondary":{"main":"#4DD0E1","contrastText":"#fff"}}},"#607D8B":{"palette":{"primary":{"main":"#607D8B"},"secondary":{"main":"#F06292"}}},"#795548":{"palette":{"primary":{"main":"#795548"},"secondary":{"main":"#4CAF50","contrastText":"#fff"}}}}`, Type: "basic"}, + {Name: "max_worker_num", Value: `10`, Type: "task"}, + {Name: "max_parallel_transfer", Value: `4`, Type: "task"}, + {Name: "secret_key", Value: util.RandStringRunes(256), Type: "auth"}, + {Name: "temp_path", Value: "temp", Type: "path"}, + {Name: "avatar_path", Value: "avatar", Type: "path"}, + {Name: "avatar_size", Value: "2097152", Type: "avatar"}, + {Name: "avatar_size_l", Value: "200", Type: "avatar"}, + {Name: "avatar_size_m", Value: "130", Type: "avatar"}, + {Name: "avatar_size_s", Value: "50", Type: "avatar"}, + {Name: "score_enabled", Value: "1", Type: "score"}, + {Name: "share_score_rate", Value: "80", Type: "score"}, + {Name: "score_price", Value: "1", Type: "score"}, + {Name: "report_enabled", Value: "0", Type: "report"}, + {Name: "home_view_method", Value: "list", Type: "view"}, + {Name: "share_view_method", Value: "list", Type: "view"}, + {Name: "cron_garbage_collect", Value: "@hourly", Type: "cron"}, + {Name: "cron_notify_user", Value: "@hourly", Type: "cron"}, + {Name: "cron_ban_user", Value: "@hourly", Type: "cron"}, + {Name: "cron_recycle_upload_session", Value: "@every 1h30m", Type: "cron"}, + {Name: "authn_enabled", Value: "0", Type: "authn"}, + {Name: "captcha_type", Value: "normal", Type: "captcha"}, + {Name: "captcha_height", Value: "60", Type: "captcha"}, + {Name: "captcha_width", Value: "240", Type: "captcha"}, + {Name: "captcha_mode", Value: "3", Type: "captcha"}, + {Name: "captcha_ComplexOfNoiseText", Value: "0", Type: "captcha"}, + {Name: "captcha_ComplexOfNoiseDot", Value: "0", Type: "captcha"}, + {Name: "captcha_IsShowHollowLine", Value: "0", Type: "captcha"}, + {Name: "captcha_IsShowNoiseDot", Value: "1", Type: "captcha"}, + {Name: "captcha_IsShowNoiseText", Value: "0", Type: "captcha"}, + {Name: "captcha_IsShowSlimeLine", Value: "1", Type: "captcha"}, + {Name: "captcha_IsShowSineLine", Value: "0", Type: "captcha"}, + {Name: "captcha_CaptchaLen", Value: "6", Type: "captcha"}, + {Name: "captcha_ReCaptchaKey", Value: "defaultKey", Type: "captcha"}, + {Name: "captcha_ReCaptchaSecret", Value: "defaultSecret", Type: "captcha"}, + {Name: "captcha_TCaptcha_CaptchaAppId", Value: "", Type: "captcha"}, + {Name: "captcha_TCaptcha_AppSecretKey", Value: "", Type: "captcha"}, + {Name: "captcha_TCaptcha_SecretId", Value: "", Type: "captcha"}, + {Name: "captcha_TCaptcha_SecretKey", Value: "", Type: "captcha"}, + {Name: "thumb_width", Value: "400", Type: "thumb"}, + {Name: "thumb_height", Value: "300", Type: "thumb"}, + {Name: "thumb_file_suffix", Value: "._thumb", Type: "thumb"}, + {Name: "thumb_max_task_count", Value: "-1", Type: "thumb"}, + {Name: "thumb_encode_method", Value: "jpg", Type: "thumb"}, + {Name: "thumb_gc_after_gen", Value: "0", Type: "thumb"}, + {Name: "thumb_encode_quality", Value: "85", Type: "thumb"}, + {Name: "thumb_builtin_enabled", Value: "1", Type: "thumb"}, + {Name: "thumb_vips_enabled", Value: "0", Type: "thumb"}, + {Name: "thumb_ffmpeg_enabled", Value: "0", Type: "thumb"}, + {Name: "thumb_vips_path", Value: "vips", Type: "thumb"}, + {Name: "thumb_vips_exts", Value: "csv,mat,img,hdr,pbm,pgm,ppm,pfm,pnm,svg,svgz,j2k,jp2,jpt,j2c,jpc,gif,png,jpg,jpeg,jpe,webp,tif,tiff,fits,fit,fts,exr,jxl,pdf,heic,heif,avif,svs,vms,vmu,ndpi,scn,mrxs,svslide,bif,raw", Type: "thumb"}, + {Name: "thumb_ffmpeg_seek", Value: "00:00:01.00", Type: "thumb"}, + {Name: "thumb_ffmpeg_path", Value: "ffmpeg", Type: "thumb"}, + {Name: "thumb_ffmpeg_exts", Value: "3g2,3gp,asf,asx,avi,divx,flv,m2ts,m2v,m4v,mkv,mov,mp4,mpeg,mpg,mts,mxf,ogv,rm,swf,webm,wmv", Type: "thumb"}, + {Name: "thumb_libreoffice_path", Value: "soffice", Type: "thumb"}, + {Name: "thumb_libreoffice_enabled", Value: "0", Type: "thumb"}, + {Name: "thumb_libreoffice_exts", Value: "md,ods,ots,fods,uos,xlsx,xml,xls,xlt,dif,dbf,html,slk,csv,xlsm,docx,dotx,doc,dot,rtf,xlsm,xlst,xls,xlw,xlc,xlt,pptx,ppsx,potx,pomx,ppt,pps,ppm,pot,pom", Type: "thumb"}, + {Name: "thumb_proxy_enabled", Value: "0", Type: "thumb"}, + {Name: "thumb_proxy_policy", Value: "[]", Type: "thumb"}, + {Name: "thumb_max_src_size", Value: "31457280", Type: "thumb"}, + {Name: "pwa_small_icon", Value: "/static/img/favicon.ico", Type: "pwa"}, + {Name: "pwa_medium_icon", Value: "/static/img/logo192.png", Type: "pwa"}, + {Name: "pwa_large_icon", Value: "/static/img/logo512.png", Type: "pwa"}, + {Name: "pwa_display", Value: "standalone", Type: "pwa"}, + {Name: "pwa_theme_color", Value: "#000000", Type: "pwa"}, + {Name: "pwa_background_color", Value: "#ffffff", Type: "pwa"}, + {Name: "initial_files", Value: "[]", Type: "register"}, + {Name: "office_preview_service", Value: "https://view.officeapps.live.com/op/view.aspx?src={$src}", Type: "preview"}, + {Name: "phone_required", Value: "false", Type: "phone"}, + {Name: "phone_enabled", Value: "false", Type: "phone"}, + {Name: "vol_content", Value: "eyJkb21haW4iOiJjbG91ZHJldmUub3JnIiwicHVyY2hhc2VfZGF0ZSI6MTY3MDMyOTI3OX0=", Type: "vol"}, + {Name: "vol_signature", Value: "UzVBwjfFNTU1bSQV8OTgbMvTdRO7FwNYyMdTu4/phmyUltc6MrluUItiK0v+Uq6yX05L4ZnhTlojVLgi3zXWNq0Tjo3zW3CffZVwj7FCrmG72PBuQp4hV3+b/eMpUbYcTTT9zEt2mneSpGJBOsxDgaf9isVzP+J+YwynPJy1UMa1ckYlc/rEExcxqZxH1tiSHfkyuelIENDiwiggOZl7J2opM5jbxH9oTiAhxl6MN1dbY6DH9bydTibcylSXoQASCse6P/i6JmEWPSRDY22Ofkw3cqTzQcxuMSJjYYVkdAHdeqoDYi4ywmAr1tAJnlDyNNU/KmLQzufgAWjdGKTPNA==", Type: "vol"}, + {Name: "show_app_promotion", Value: "1", Type: "mobile"}, + {Name: "public_resource_maxage", Value: "86400", Type: "timeout"}, + {Name: "wopi_enabled", Value: "0", Type: "wopi"}, + {Name: "wopi_endpoint", Value: "", Type: "wopi"}, + {Name: "wopi_max_size", Value: "52428800", Type: "wopi"}, + {Name: "wopi_session_timeout", Value: "36000", Type: "wopi"}, + {Name: "custom_payment_enabled", Value: "0", Type: "payment"}, + {Name: "custom_payment_endpoint", Value: "", Type: "payment"}, + {Name: "custom_payment_secret", Value: "", Type: "payment"}, + {Name: "custom_payment_name", Value: "", Type: "payment"}, + {Name: "app_feedback_link", Value: "", Type: "mobile"}, + {Name: "app_forum_link", Value: "", Type: "mobile"}, +} + +func InitSlaveDefaults() { + for _, setting := range defaultSettings { + cache.Set("setting_"+setting.Name, setting.Value, -1) + } +} diff --git a/models/dialects/dialect_sqlite.go b/models/dialects/dialect_sqlite.go new file mode 100644 index 0000000..83894d0 --- /dev/null +++ b/models/dialects/dialect_sqlite.go @@ -0,0 +1,288 @@ +package dialects + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/jinzhu/gorm" +) + +var keyNameRegex = regexp.MustCompile("[^a-zA-Z0-9]+") + +// DefaultForeignKeyNamer contains the default foreign key name generator method +type DefaultForeignKeyNamer struct { +} + +type commonDialect struct { + db gorm.SQLCommon + DefaultForeignKeyNamer +} + +func (commonDialect) GetName() string { + return "common" +} + +func (s *commonDialect) SetDB(db gorm.SQLCommon) { + s.db = db +} + +func (commonDialect) BindVar(i int) string { + return "$$$" // ? +} + +func (commonDialect) Quote(key string) string { + return fmt.Sprintf(`"%s"`, key) +} + +func (s *commonDialect) fieldCanAutoIncrement(field *gorm.StructField) bool { + if value, ok := field.TagSettingsGet("AUTO_INCREMENT"); ok { + return strings.ToLower(value) != "false" + } + return field.IsPrimaryKey +} + +func (s *commonDialect) DataTypeOf(field *gorm.StructField) string { + var dataValue, sqlType, size, additionalType = gorm.ParseFieldStructForDialect(field, s) + + if sqlType == "" { + switch dataValue.Kind() { + case reflect.Bool: + sqlType = "BOOLEAN" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr: + if s.fieldCanAutoIncrement(field) { + sqlType = "INTEGER AUTO_INCREMENT" + } else { + sqlType = "INTEGER" + } + case reflect.Int64, reflect.Uint64: + if s.fieldCanAutoIncrement(field) { + sqlType = "BIGINT AUTO_INCREMENT" + } else { + sqlType = "BIGINT" + } + case reflect.Float32, reflect.Float64: + sqlType = "FLOAT" + case reflect.String: + if size > 0 && size < 65532 { + sqlType = fmt.Sprintf("VARCHAR(%d)", size) + } else { + sqlType = "VARCHAR(65532)" + } + case reflect.Struct: + if _, ok := dataValue.Interface().(time.Time); ok { + sqlType = "TIMESTAMP" + } + default: + if _, ok := dataValue.Interface().([]byte); ok { + if size > 0 && size < 65532 { + sqlType = fmt.Sprintf("BINARY(%d)", size) + } else { + sqlType = "BINARY(65532)" + } + } + } + } + + if sqlType == "" { + panic(fmt.Sprintf("invalid sql type %s (%s) for commonDialect", dataValue.Type().Name(), dataValue.Kind().String())) + } + + if strings.TrimSpace(additionalType) == "" { + return sqlType + } + return fmt.Sprintf("%v %v", sqlType, additionalType) +} + +func currentDatabaseAndTable(dialect gorm.Dialect, tableName string) (string, string) { + if strings.Contains(tableName, ".") { + splitStrings := strings.SplitN(tableName, ".", 2) + return splitStrings[0], splitStrings[1] + } + return dialect.CurrentDatabase(), tableName +} + +func (s commonDialect) HasIndex(tableName string, indexName string) bool { + var count int + currentDatabase, tableName := currentDatabaseAndTable(&s, tableName) + s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.STATISTICS WHERE table_schema = ? AND table_name = ? AND index_name = ?", currentDatabase, tableName, indexName).Scan(&count) + return count > 0 +} + +func (s commonDialect) RemoveIndex(tableName string, indexName string) error { + _, err := s.db.Exec(fmt.Sprintf("DROP INDEX %v", indexName)) + return err +} + +func (s commonDialect) HasForeignKey(tableName string, foreignKeyName string) bool { + return false +} + +func (s commonDialect) HasTable(tableName string) bool { + var count int + currentDatabase, tableName := currentDatabaseAndTable(&s, tableName) + s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = ? AND table_name = ?", currentDatabase, tableName).Scan(&count) + return count > 0 +} + +func (s commonDialect) HasColumn(tableName string, columnName string) bool { + var count int + currentDatabase, tableName := currentDatabaseAndTable(&s, tableName) + s.db.QueryRow("SELECT count(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = ? AND table_name = ? AND column_name = ?", currentDatabase, tableName, columnName).Scan(&count) + return count > 0 +} + +func (s commonDialect) ModifyColumn(tableName string, columnName string, typ string) error { + _, err := s.db.Exec(fmt.Sprintf("ALTER TABLE %v ALTER COLUMN %v TYPE %v", tableName, columnName, typ)) + return err +} + +func (s commonDialect) CurrentDatabase() (name string) { + s.db.QueryRow("SELECT DATABASE()").Scan(&name) + return +} + +func (commonDialect) LimitAndOffsetSQL(limit, offset interface{}) (sql string) { + if limit != nil { + if parsedLimit, err := strconv.ParseInt(fmt.Sprint(limit), 0, 0); err == nil && parsedLimit >= 0 { + sql += fmt.Sprintf(" LIMIT %d", parsedLimit) + } + } + if offset != nil { + if parsedOffset, err := strconv.ParseInt(fmt.Sprint(offset), 0, 0); err == nil && parsedOffset >= 0 { + sql += fmt.Sprintf(" OFFSET %d", parsedOffset) + } + } + return +} + +func (commonDialect) SelectFromDummyTable() string { + return "" +} + +func (commonDialect) LastInsertIDReturningSuffix(tableName, columnName string) string { + return "" +} + +func (commonDialect) DefaultValueStr() string { + return "DEFAULT VALUES" +} + +// BuildKeyName returns a valid key name (foreign key, index key) for the given table, field and reference +func (DefaultForeignKeyNamer) BuildKeyName(kind, tableName string, fields ...string) string { + keyName := fmt.Sprintf("%s_%s_%s", kind, tableName, strings.Join(fields, "_")) + keyName = keyNameRegex.ReplaceAllString(keyName, "_") + return keyName +} + +// NormalizeIndexAndColumn returns argument's index name and column name without doing anything +func (commonDialect) NormalizeIndexAndColumn(indexName, columnName string) (string, string) { + return indexName, columnName +} + +// IsByteArrayOrSlice returns true of the reflected value is an array or slice +func IsByteArrayOrSlice(value reflect.Value) bool { + return (value.Kind() == reflect.Array || value.Kind() == reflect.Slice) && value.Type().Elem() == reflect.TypeOf(uint8(0)) +} + +type sqlite struct { + commonDialect +} + +func init() { + gorm.RegisterDialect("sqlite", &sqlite{}) +} + +func (sqlite) GetName() string { + return "sqlite" +} + +// Get Data Type for Sqlite Dialect +func (s *sqlite) DataTypeOf(field *gorm.StructField) string { + var dataValue, sqlType, size, additionalType = gorm.ParseFieldStructForDialect(field, s) + + if sqlType == "" { + switch dataValue.Kind() { + case reflect.Bool: + sqlType = "bool" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr: + if s.fieldCanAutoIncrement(field) { + field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT") + sqlType = "integer primary key autoincrement" + } else { + sqlType = "integer" + } + case reflect.Int64, reflect.Uint64: + if s.fieldCanAutoIncrement(field) { + field.TagSettingsSet("AUTO_INCREMENT", "AUTO_INCREMENT") + sqlType = "integer primary key autoincrement" + } else { + sqlType = "bigint" + } + case reflect.Float32, reflect.Float64: + sqlType = "real" + case reflect.String: + if size > 0 && size < 65532 { + sqlType = fmt.Sprintf("varchar(%d)", size) + } else { + sqlType = "text" + } + case reflect.Struct: + if _, ok := dataValue.Interface().(time.Time); ok { + sqlType = "datetime" + } + default: + if IsByteArrayOrSlice(dataValue) { + sqlType = "blob" + } + } + } + + if sqlType == "" { + panic(fmt.Sprintf("invalid sql type %s (%s) for sqlite", dataValue.Type().Name(), dataValue.Kind().String())) + } + + if strings.TrimSpace(additionalType) == "" { + return sqlType + } + return fmt.Sprintf("%v %v", sqlType, additionalType) +} + +func (s sqlite) HasIndex(tableName string, indexName string) bool { + var count int + s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND sql LIKE '%%INDEX %v ON%%'", indexName), tableName).Scan(&count) + return count > 0 +} + +func (s sqlite) HasTable(tableName string) bool { + var count int + s.db.QueryRow("SELECT count(*) FROM sqlite_master WHERE type='table' AND name=?", tableName).Scan(&count) + return count > 0 +} + +func (s sqlite) HasColumn(tableName string, columnName string) bool { + var count int + s.db.QueryRow(fmt.Sprintf("SELECT count(*) FROM sqlite_master WHERE tbl_name = ? AND (sql LIKE '%%\"%v\" %%' OR sql LIKE '%%%v %%');", columnName, columnName), tableName).Scan(&count) + return count > 0 +} + +func (s sqlite) CurrentDatabase() (name string) { + var ( + ifaces = make([]interface{}, 3) + pointers = make([]*string, 3) + i int + ) + for i = 0; i < 3; i++ { + ifaces[i] = &pointers[i] + } + if err := s.db.QueryRow("PRAGMA database_list").Scan(ifaces...); err != nil { + return + } + if pointers[1] != nil { + name = *pointers[1] + } + return +} diff --git a/models/download.go b/models/download.go new file mode 100644 index 0000000..dce50f3 --- /dev/null +++ b/models/download.go @@ -0,0 +1,128 @@ +package model + +import ( + "encoding/json" + + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/rpc" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/jinzhu/gorm" +) + +// Download 离线下载队列模型 +type Download struct { + gorm.Model + Status int // 任务状态 + Type int // 任务类型 + Source string `gorm:"type:text"` // 文件下载地址 + TotalSize uint64 // 文件大小 + DownloadedSize uint64 // 文件大小 + GID string `gorm:"size:32,index:gid"` // 任务ID + Speed int // 下载速度 + Parent string `gorm:"type:text"` // 存储目录 + Attrs string `gorm:"size:4294967295"` // 任务状态属性 + Error string `gorm:"type:text"` // 错误描述 + Dst string `gorm:"type:text"` // 用户文件系统存储父目录路径 + UserID uint // 发起者UID + TaskID uint // 对应的转存任务ID + NodeID uint // 处理任务的节点ID + + // 关联模型 + User *User `gorm:"PRELOAD:false,association_autoupdate:false"` + + // 数据库忽略字段 + StatusInfo rpc.StatusInfo `gorm:"-"` + Task *Task `gorm:"-"` + NodeName string `gorm:"-"` +} + +// AfterFind 找到下载任务后的钩子,处理Status结构 +func (task *Download) AfterFind() (err error) { + // 解析状态 + if task.Attrs != "" { + err = json.Unmarshal([]byte(task.Attrs), &task.StatusInfo) + } + + if task.TaskID != 0 { + task.Task, _ = GetTasksByID(task.TaskID) + } + + return err +} + +// BeforeSave Save下载任务前的钩子 +func (task *Download) BeforeSave() (err error) { + // 解析状态 + if task.Attrs != "" { + err = json.Unmarshal([]byte(task.Attrs), &task.StatusInfo) + } + return err +} + +// Create 创建离线下载记录 +func (task *Download) Create() (uint, error) { + if err := DB.Create(task).Error; err != nil { + util.Log().Warning("Failed to insert download record: %s", err) + return 0, err + } + return task.ID, nil +} + +// Save 更新 +func (task *Download) Save() error { + if err := DB.Save(task).Error; err != nil { + util.Log().Warning("Failed to update download record: %s", err) + return err + } + return nil +} + +// GetDownloadsByStatus 根据状态检索下载 +func GetDownloadsByStatus(status ...int) []Download { + var tasks []Download + DB.Where("status in (?)", status).Find(&tasks) + return tasks +} + +// GetDownloadsByStatusAndUser 根据状态检索和用户ID下载 +// page 为 0 表示列出所有,非零时分页 +func GetDownloadsByStatusAndUser(page, uid uint, status ...int) []Download { + var tasks []Download + dbChain := DB + if page > 0 { + dbChain = dbChain.Limit(10).Offset((page - 1) * 10).Order("updated_at DESC") + } + dbChain.Where("user_id = ? and status in (?)", uid, status).Find(&tasks) + return tasks +} + +// GetDownloadByGid 根据GID和用户ID查找下载 +func GetDownloadByGid(gid string, uid uint) (*Download, error) { + download := &Download{} + result := DB.Where("user_id = ? and g_id = ?", uid, gid).First(download) + return download, result.Error +} + +// GetOwner 获取下载任务所属用户 +func (task *Download) GetOwner() *User { + if task.User == nil { + if user, err := GetUserByID(task.UserID); err == nil { + return &user + } + } + return task.User +} + +// Delete 删除离线下载记录 +func (download *Download) Delete() error { + return DB.Model(download).Delete(download).Error +} + +// GetNodeID 返回任务所属节点ID +func (task *Download) GetNodeID() uint { + // 兼容3.4版本之前生成的下载记录 + if task.NodeID == 0 { + return 1 + } + + return task.NodeID +} diff --git a/models/file.go b/models/file.go new file mode 100644 index 0000000..56ee2ab --- /dev/null +++ b/models/file.go @@ -0,0 +1,525 @@ +package model + +import ( + "encoding/gob" + "encoding/json" + "errors" + "fmt" + "path" + "path/filepath" + "strings" + "time" + + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/jinzhu/gorm" +) + +// File 文件 +type File struct { + // 表字段 + gorm.Model + Name string `gorm:"unique_index:idx_only_one"` + SourceName string `gorm:"type:text"` + UserID uint `gorm:"index:user_id;unique_index:idx_only_one"` + Size uint64 + PicInfo string + FolderID uint `gorm:"index:folder_id;unique_index:idx_only_one"` + PolicyID uint + UploadSessionID *string `gorm:"index:session_id;unique_index:session_only_one"` + Metadata string `gorm:"type:text"` + + // 关联模型 + Policy Policy `gorm:"PRELOAD:false,association_autoupdate:false"` + + // 数据库忽略字段 + Position string `gorm:"-"` + MetadataSerialized map[string]string `gorm:"-"` +} + +// Thumb related metadata +const ( + ThumbStatusNotExist = "" + ThumbStatusExist = "exist" + ThumbStatusNotAvailable = "not_available" + + ThumbStatusMetadataKey = "thumb_status" + ThumbSidecarMetadataKey = "thumb_sidecar" + + ChecksumMetadataKey = "webdav_checksum" +) + +func init() { + // 注册缓存用到的复杂结构 + gob.Register(File{}) +} + +// Create 创建文件记录 +func (file *File) Create() error { + tx := DB.Begin() + + if err := tx.Create(file).Error; err != nil { + util.Log().Warning("Failed to insert file record: %s", err) + tx.Rollback() + return err + } + + user := &User{} + user.ID = file.UserID + if err := user.ChangeStorage(tx, "+", file.Size); err != nil { + tx.Rollback() + return err + } + + return tx.Commit().Error +} + +// AfterFind 找到文件后的钩子 +func (file *File) AfterFind() (err error) { + // 反序列化文件元数据 + if file.Metadata != "" { + err = json.Unmarshal([]byte(file.Metadata), &file.MetadataSerialized) + } else { + file.MetadataSerialized = make(map[string]string) + } + + return +} + +// BeforeSave Save策略前的钩子 +func (file *File) BeforeSave() (err error) { + if len(file.MetadataSerialized) > 0 { + metaValue, err := json.Marshal(&file.MetadataSerialized) + file.Metadata = string(metaValue) + return err + } + + return nil +} + +// GetChildFile 查找目录下名为name的子文件 +func (folder *Folder) GetChildFile(name string) (*File, error) { + var file File + result := DB.Where("folder_id = ? AND name = ?", folder.ID, name).Find(&file) + + if result.Error == nil { + file.Position = path.Join(folder.Position, folder.Name) + } + return &file, result.Error +} + +// GetChildFiles 查找目录下子文件 +func (folder *Folder) GetChildFiles() ([]File, error) { + var files []File + result := DB.Where("folder_id = ?", folder.ID).Find(&files) + + if result.Error == nil { + for i := 0; i < len(files); i++ { + files[i].Position = path.Join(folder.Position, folder.Name) + } + } + return files, result.Error +} + +// GetFilesByIDs 根据文件ID批量获取文件, +// UID为0表示忽略用户,只根据文件ID检索 +func GetFilesByIDs(ids []uint, uid uint) ([]File, error) { + return GetFilesByIDsFromTX(DB, ids, uid) +} + +func GetFilesByIDsFromTX(tx *gorm.DB, ids []uint, uid uint) ([]File, error) { + var files []File + var result *gorm.DB + if uid == 0 { + result = tx.Where("id in (?)", ids).Find(&files) + } else { + result = tx.Where("id in (?) AND user_id = ?", ids, uid).Find(&files) + } + return files, result.Error +} + +// GetFilesByKeywords 根据关键字搜索文件, +// UID为0表示忽略用户,只根据文件ID检索. 如果 parents 非空, 则只限制在 parent 包含的目录下搜索 +func GetFilesByKeywords(uid uint, parents []uint, keywords ...interface{}) ([]File, error) { + var ( + files []File + result = DB + conditions string + ) + + // 生成查询条件 + for i := 0; i < len(keywords); i++ { + conditions += "name like ?" + if i != len(keywords)-1 { + conditions += " or " + } + } + + if uid != 0 { + result = result.Where("user_id = ?", uid) + } + + if len(parents) > 0 { + result = result.Where("folder_id in (?)", parents) + } + + result = result.Where("("+conditions+")", keywords...).Find(&files) + + return files, result.Error +} + +// GetChildFilesOfFolders 批量检索目录子文件 +func GetChildFilesOfFolders(folders *[]Folder) ([]File, error) { + // 将所有待检索目录ID抽离,以便检索文件 + folderIDs := make([]uint, 0, len(*folders)) + for _, value := range *folders { + folderIDs = append(folderIDs, value.ID) + } + + // 检索文件 + var files []File + result := DB.Where("folder_id in (?)", folderIDs).Find(&files) + return files, result.Error +} + +// GetUploadPlaceholderFiles 获取所有上传占位文件 +// UID为0表示忽略用户 +func GetUploadPlaceholderFiles(uid uint) []*File { + query := DB + if uid != 0 { + query = query.Where("user_id = ?", uid) + } + + var files []*File + query.Where("upload_session_id is not NULL").Find(&files) + return files +} + +// GetPolicy 获取文件所属策略 +func (file *File) GetPolicy() *Policy { + if file.Policy.Model.ID == 0 { + file.Policy, _ = GetPolicyByID(file.PolicyID) + } + return &file.Policy +} + +// RemoveFilesWithSoftLinks 去除给定的文件列表中有软链接的文件 +func RemoveFilesWithSoftLinks(files []File) ([]File, error) { + // 结果值 + filteredFiles := make([]File, 0) + + if len(files) == 0 { + return filteredFiles, nil + } + + // 查询软链接的文件 + filesWithSoftLinks := make([]File, 0) + for _, file := range files { + var softLinkFile File + res := DB. + Where("source_name = ? and policy_id = ? and id != ?", file.SourceName, file.PolicyID, file.ID). + First(&softLinkFile) + if res.Error == nil { + filesWithSoftLinks = append(filesWithSoftLinks, softLinkFile) + } + } + + // 过滤具有软连接的文件 + // TODO: 优化复杂度 + if len(filesWithSoftLinks) == 0 { + filteredFiles = files + } else { + for i := 0; i < len(files); i++ { + finder := false + for _, value := range filesWithSoftLinks { + if value.PolicyID == files[i].PolicyID && value.SourceName == files[i].SourceName { + finder = true + break + } + } + if !finder { + filteredFiles = append(filteredFiles, files[i]) + } + + } + } + + return filteredFiles, nil + +} + +// DeleteFiles 批量删除文件记录并归还容量 +func DeleteFiles(files []*File, uid uint) error { + tx := DB.Begin() + user := &User{} + user.ID = uid + var size uint64 + for _, file := range files { + if uid > 0 && file.UserID != uid { + tx.Rollback() + return errors.New("user id not consistent") + } + + result := tx.Unscoped().Where("size = ?", file.Size).Delete(file) + if result.Error != nil { + tx.Rollback() + return result.Error + } + + if result.RowsAffected == 0 { + tx.Rollback() + return errors.New("file size is dirty") + } + + size += file.Size + } + + if uid > 0 { + if err := user.ChangeStorage(tx, "-", size); err != nil { + tx.Rollback() + return err + } + } + + return tx.Commit().Error +} + +// GetFilesByParentIDs 根据父目录ID查找文件 +func GetFilesByParentIDs(ids []uint, uid uint) ([]File, error) { + files := make([]File, 0, len(ids)) + result := DB.Where("user_id = ? and folder_id in (?)", uid, ids).Find(&files) + return files, result.Error +} + +// GetFilesByUploadSession 查找上传会话对应的文件 +func GetFilesByUploadSession(sessionID string, uid uint) (*File, error) { + file := File{} + result := DB.Where("user_id = ? and upload_session_id = ?", uid, sessionID).Find(&file) + return &file, result.Error +} + +// Rename 重命名文件 +func (file *File) Rename(new string) error { + if file.MetadataSerialized[ThumbStatusMetadataKey] == ThumbStatusNotAvailable { + if !strings.EqualFold(filepath.Ext(new), filepath.Ext(file.Name)) { + // Reset thumb status for new ext name. + if err := file.resetThumb(); err != nil { + return err + } + } + } + + return DB.Model(&file).Set("gorm:association_autoupdate", false).Updates(map[string]interface{}{ + "name": new, + "metadata": file.Metadata, + }).Error +} + +// UpdatePicInfo 更新文件的图像信息 +func (file *File) UpdatePicInfo(value string) error { + return DB.Model(&file).Set("gorm:association_autoupdate", false).UpdateColumns(File{PicInfo: value}).Error +} + +// UpdateMetadata 新增或修改文件的元信息 +func (file *File) UpdateMetadata(data map[string]string) error { + if file.MetadataSerialized == nil { + file.MetadataSerialized = make(map[string]string) + } + + for k, v := range data { + file.MetadataSerialized[k] = v + } + metaValue, err := json.Marshal(&file.MetadataSerialized) + if err != nil { + return err + } + + return DB.Model(&file).Set("gorm:association_autoupdate", false).UpdateColumns(File{Metadata: string(metaValue)}).Error +} + +// UpdateSize 更新文件的大小信息 +// TODO: 全局锁 +func (file *File) UpdateSize(value uint64) error { + tx := DB.Begin() + var sizeDelta uint64 + operator := "+" + user := User{} + user.ID = file.UserID + if value > file.Size { + sizeDelta = value - file.Size + } else { + operator = "-" + sizeDelta = file.Size - value + } + + if err := file.resetThumb(); err != nil { + tx.Rollback() + return err + } + + if res := tx.Model(&file). + Where("size = ?", file.Size). + Set("gorm:association_autoupdate", false). + Updates(map[string]interface{}{ + "size": value, + "metadata": file.Metadata, + }); res.Error != nil { + tx.Rollback() + return res.Error + } + + if err := user.ChangeStorage(tx, operator, sizeDelta); err != nil { + tx.Rollback() + return err + } + + file.Size = value + return tx.Commit().Error +} + +// UpdateSourceName 更新文件的源文件名 +func (file *File) UpdateSourceName(value string) error { + if err := file.resetThumb(); err != nil { + return err + } + + return DB.Model(&file).Set("gorm:association_autoupdate", false).Updates(map[string]interface{}{ + "source_name": value, + "metadata": file.Metadata, + }).Error +} + +// Relocate 更新文件的物理指向 +func (file *File) Relocate(src string, policyID uint) error { + file.Policy = Policy{} + return DB.Model(&file).Set("gorm:association_autoupdate", false).Updates(map[string]interface{}{ + "source_name": src, + "policy_id": policyID, + }).Error +} + +func (file *File) PopChunkToFile(lastModified *time.Time, picInfo string) error { + file.UploadSessionID = nil + if lastModified != nil { + file.UpdatedAt = *lastModified + } + + return DB.Model(file).UpdateColumns(map[string]interface{}{ + "upload_session_id": file.UploadSessionID, + "updated_at": file.UpdatedAt, + "pic_info": picInfo, + }).Error +} + +// CanCopy 返回文件是否可被复制 +func (file *File) CanCopy() bool { + return file.UploadSessionID == nil +} + +// CreateOrGetSourceLink creates a SourceLink model. If the given model exists, the existing +// model will be returned. +func (file *File) CreateOrGetSourceLink() (*SourceLink, error) { + res := &SourceLink{} + err := DB.Set("gorm:auto_preload", true).Where("file_id = ?", file.ID).Find(&res).Error + if err == nil && res.ID > 0 { + return res, nil + } + + res.FileID = file.ID + res.Name = file.Name + if err := DB.Save(res).Error; err != nil { + return nil, fmt.Errorf("failed to insert SourceLink: %w", err) + } + + res.File = *file + return res, nil +} + +func (file *File) resetThumb() error { + if _, ok := file.MetadataSerialized[ThumbStatusMetadataKey]; !ok { + return nil + } + + delete(file.MetadataSerialized, ThumbStatusMetadataKey) + metaValue, err := json.Marshal(&file.MetadataSerialized) + file.Metadata = string(metaValue) + return err +} + +/* + 实现 webdav.FileInfo 接口 +*/ + +func (file *File) GetName() string { + return file.Name +} + +func (file *File) GetSize() uint64 { + return file.Size +} +func (file *File) ModTime() time.Time { + return file.UpdatedAt +} + +func (file *File) IsDir() bool { + return false +} + +func (file *File) GetPosition() string { + return file.Position +} + +// ShouldLoadThumb returns if file explorer should try to load thumbnail for this file. +// `True` does not guarantee the load request will success in next step, but the client +// should try to load and fallback to default placeholder in case error returned. +func (file *File) ShouldLoadThumb() bool { + return file.MetadataSerialized[ThumbStatusMetadataKey] != ThumbStatusNotAvailable +} + +// return sidecar thumb file name +func (file *File) ThumbFile() string { + return file.SourceName + GetSettingByNameWithDefault("thumb_file_suffix", "._thumb") +} + +/* + 实现 filesystem.FileHeader 接口 +*/ + +// Read 实现 io.Reader +func (file *File) Read(p []byte) (n int, err error) { + return 0, errors.New("noe supported") +} + +// Close 实现io.Closer +func (file *File) Close() error { + return errors.New("noe supported") +} + +// Seeker 实现io.Seeker +func (file *File) Seek(offset int64, whence int) (int64, error) { + return 0, errors.New("noe supported") +} + +func (file *File) Info() *fsctx.UploadTaskInfo { + return &fsctx.UploadTaskInfo{ + Size: file.Size, + FileName: file.Name, + VirtualPath: file.Position, + Mode: 0, + Metadata: file.MetadataSerialized, + LastModified: &file.UpdatedAt, + SavePath: file.SourceName, + UploadSessionID: file.UploadSessionID, + } +} + +func (file *File) SetSize(size uint64) { + file.Size = size +} + +func (file *File) SetModel(newFile interface{}) { +} + +func (file *File) Seekable() bool { + return false +} diff --git a/models/folder.go b/models/folder.go new file mode 100644 index 0000000..1130d6b --- /dev/null +++ b/models/folder.go @@ -0,0 +1,365 @@ +package model + +import ( + "errors" + "path" + "time" + + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/jinzhu/gorm" +) + +// Folder 目录 +type Folder struct { + // 表字段 + gorm.Model + Name string `gorm:"unique_index:idx_only_one_name"` + ParentID *uint `gorm:"index:parent_id;unique_index:idx_only_one_name"` + OwnerID uint `gorm:"index:owner_id"` + PolicyID uint // Webdav下挂载的存储策略ID + + // 数据库忽略字段 + Position string `gorm:"-"` + WebdavDstName string `gorm:"-"` + InheritPolicyID uint `gorm:"-"` // 从父目录继承而来的policy id,默认值则使用自身的的PolicyID +} + +// Create 创建目录 +func (folder *Folder) Create() (uint, error) { + if err := DB.FirstOrCreate(folder, *folder).Error; err != nil { + folder.Model = gorm.Model{} + err2 := DB.First(folder, *folder).Error + return folder.ID, err2 + } + + return folder.ID, nil +} + +// GetMountedFolders 列出已挂载存储策略的目录 +func GetMountedFolders(uid uint) []Folder { + var folders []Folder + DB.Where("owner_id = ? and policy_id <> ?", uid, 0).Find(&folders) + return folders +} + +// GetChild 返回folder下名为name的子目录,不存在则返回错误 +func (folder *Folder) GetChild(name string) (*Folder, error) { + var resFolder Folder + err := DB. + Where("parent_id = ? AND owner_id = ? AND name = ?", folder.ID, folder.OwnerID, name). + First(&resFolder).Error + + // 将子目录的路径及存储策略传递下去 + if err == nil { + resFolder.Position = path.Join(folder.Position, folder.Name) + if folder.PolicyID > 0 { + resFolder.InheritPolicyID = folder.PolicyID + } else if folder.InheritPolicyID > 0 { + resFolder.InheritPolicyID = folder.InheritPolicyID + } + } + return &resFolder, err +} + +// TraceRoot 向上递归查找父目录 +func (folder *Folder) TraceRoot() error { + if folder.ParentID == nil { + return nil + } + + var parentFolder Folder + err := DB. + Where("id = ? AND owner_id = ?", folder.ParentID, folder.OwnerID). + First(&parentFolder).Error + + if err == nil { + err := parentFolder.TraceRoot() + folder.Position = path.Join(parentFolder.Position, parentFolder.Name) + return err + } + + return err +} + +// GetChildFolder 查找子目录 +func (folder *Folder) GetChildFolder() ([]Folder, error) { + var folders []Folder + result := DB.Where("parent_id = ?", folder.ID).Find(&folders) + + if result.Error == nil { + for i := 0; i < len(folders); i++ { + folders[i].Position = path.Join(folder.Position, folder.Name) + } + } + return folders, result.Error +} + +// GetRecursiveChildFolder 查找所有递归子目录,包括自身 +func GetRecursiveChildFolder(dirs []uint, uid uint, includeSelf bool) ([]Folder, error) { + folders := make([]Folder, 0, len(dirs)) + var err error + + var parFolders []Folder + result := DB.Where("owner_id = ? and id in (?)", uid, dirs).Find(&parFolders) + if result.Error != nil { + return folders, err + } + + // 整理父目录的ID + var parentIDs = make([]uint, 0, len(parFolders)) + for _, folder := range parFolders { + parentIDs = append(parentIDs, folder.ID) + } + + if includeSelf { + // 合并至最终结果 + folders = append(folders, parFolders...) + } + parFolders = []Folder{} + + // 递归查询子目录,最大递归65535次 + for i := 0; i < 65535; i++ { + + result = DB.Where("owner_id = ? and parent_id in (?)", uid, parentIDs).Find(&parFolders) + + // 查询结束条件 + if len(parFolders) == 0 { + break + } + + // 整理父目录的ID + parentIDs = make([]uint, 0, len(parFolders)) + for _, folder := range parFolders { + parentIDs = append(parentIDs, folder.ID) + } + + // 合并至最终结果 + folders = append(folders, parFolders...) + parFolders = []Folder{} + + } + + return folders, err +} + +// DeleteFolderByIDs 根据给定ID批量删除目录记录 +func DeleteFolderByIDs(ids []uint) error { + result := DB.Where("id in (?)", ids).Unscoped().Delete(&Folder{}) + return result.Error +} + +// GetFoldersByIDs 根据ID和用户查找所有目录 +func GetFoldersByIDs(ids []uint, uid uint) ([]Folder, error) { + var folders []Folder + result := DB.Where("id in (?) AND owner_id = ?", ids, uid).Find(&folders) + return folders, result.Error +} + +// MoveOrCopyFileTo 将此目录下的files移动或复制至dstFolder, +// 返回此操作新增的容量 +func (folder *Folder) MoveOrCopyFileTo(files []uint, dstFolder *Folder, isCopy bool) (uint64, error) { + // 已复制文件的总大小 + var copiedSize uint64 + + if isCopy { + // 检索出要复制的文件 + var originFiles = make([]File, 0, len(files)) + if err := DB.Where( + "id in (?) and user_id = ? and folder_id = ?", + files, + folder.OwnerID, + folder.ID, + ).Find(&originFiles).Error; err != nil { + return 0, err + } + + // 复制文件记录 + for _, oldFile := range originFiles { + if !oldFile.CanCopy() { + util.Log().Warning("Cannot copy file %q because it's being uploaded now, skipping...", oldFile.Name) + continue + } + + oldFile.Model = gorm.Model{} + oldFile.FolderID = dstFolder.ID + oldFile.UserID = dstFolder.OwnerID + + // webdav目标名重置 + if dstFolder.WebdavDstName != "" { + oldFile.Name = dstFolder.WebdavDstName + } + + if err := DB.Create(&oldFile).Error; err != nil { + return copiedSize, err + } + + copiedSize += oldFile.Size + } + + } else { + var updates = map[string]interface{}{ + "folder_id": dstFolder.ID, + } + // webdav目标名重置 + if dstFolder.WebdavDstName != "" { + updates["name"] = dstFolder.WebdavDstName + } + + // 更改顶级要移动文件的父目录指向 + err := DB.Model(File{}).Where( + "id in (?) and user_id = ? and folder_id = ?", + files, + folder.OwnerID, + folder.ID, + ). + Update(updates). + Error + if err != nil { + return 0, err + } + + } + + return copiedSize, nil + +} + +// CopyFolderTo 将此目录及其子目录及文件递归复制至dstFolder +// 返回此操作新增的容量 +func (folder *Folder) CopyFolderTo(folderID uint, dstFolder *Folder) (size uint64, err error) { + // 列出所有子目录 + subFolders, err := GetRecursiveChildFolder([]uint{folderID}, folder.OwnerID, true) + if err != nil { + return 0, err + } + + // 抽离所有子目录的ID + var subFolderIDs = make([]uint, len(subFolders)) + for key, value := range subFolders { + subFolderIDs[key] = value.ID + } + + // 复制子目录 + var newIDCache = make(map[uint]uint) + for _, folder := range subFolders { + // 新的父目录指向 + var newID uint + // 顶级目录直接指向新的目的目录 + if folder.ID == folderID { + newID = dstFolder.ID + // webdav目标名重置 + if dstFolder.WebdavDstName != "" { + folder.Name = dstFolder.WebdavDstName + } + } else if IDCache, ok := newIDCache[*folder.ParentID]; ok { + newID = IDCache + } else { + util.Log().Warning("Failed to get parent folder %q", *folder.ParentID) + return size, errors.New("Failed to get parent folder") + } + + // 插入新的目录记录 + oldID := folder.ID + folder.Model = gorm.Model{} + folder.ParentID = &newID + folder.OwnerID = dstFolder.OwnerID + if err = DB.Create(&folder).Error; err != nil { + return size, err + } + // 记录新的ID以便其子目录使用 + newIDCache[oldID] = folder.ID + + } + + // 复制文件 + var originFiles = make([]File, 0, len(subFolderIDs)) + if err := DB.Where( + "user_id = ? and folder_id in (?)", + folder.OwnerID, + subFolderIDs, + ).Find(&originFiles).Error; err != nil { + return 0, err + } + + // 复制文件记录 + for _, oldFile := range originFiles { + if !oldFile.CanCopy() { + util.Log().Warning("Cannot copy file %q because it's being uploaded now, skipping...", oldFile.Name) + continue + } + + oldFile.Model = gorm.Model{} + oldFile.FolderID = newIDCache[oldFile.FolderID] + oldFile.UserID = dstFolder.OwnerID + if err := DB.Create(&oldFile).Error; err != nil { + return size, err + } + + size += oldFile.Size + } + + return size, nil + +} + +// MoveFolderTo 将folder目录下的dirs子目录复制或移动到dstFolder, +// 返回此过程中增加的容量 +func (folder *Folder) MoveFolderTo(dirs []uint, dstFolder *Folder) error { + + // 如果目标位置为待移动的目录,会导致 parent 为自己 + // 造成死循环且无法被除搜索以外的组件展示 + if folder.OwnerID == dstFolder.OwnerID && util.ContainsUint(dirs, dstFolder.ID) { + return errors.New("cannot move a folder into itself") + } + + var updates = map[string]interface{}{ + "parent_id": dstFolder.ID, + } + // webdav目标名重置 + if dstFolder.WebdavDstName != "" { + updates["name"] = dstFolder.WebdavDstName + } + + // 更改顶级要移动目录的父目录指向 + err := DB.Model(Folder{}).Where( + "id in (?) and owner_id = ? and parent_id = ?", + dirs, + folder.OwnerID, + folder.ID, + ).Update(updates).Error + + return err + +} + +// Rename 重命名目录 +func (folder *Folder) Rename(new string) error { + return DB.Model(&folder).UpdateColumn("name", new).Error +} + +// Mount 目录挂载 +func (folder *Folder) Mount(new uint) error { + return DB.Model(&folder).Update("policy_id", new).Error +} + +/* + 实现 FileInfo.FileInfo 接口 + TODO 测试 +*/ + +func (folder *Folder) GetName() string { + return folder.Name +} + +func (folder *Folder) GetSize() uint64 { + return 0 +} +func (folder *Folder) ModTime() time.Time { + return folder.UpdatedAt +} +func (folder *Folder) IsDir() bool { + return true +} +func (folder *Folder) GetPosition() string { + return folder.Position +} diff --git a/models/group.go b/models/group.go new file mode 100644 index 0000000..8dc3057 --- /dev/null +++ b/models/group.go @@ -0,0 +1,89 @@ +package model + +import ( + "encoding/json" + + "github.com/jinzhu/gorm" +) + +// Group 用户组模型 +type Group struct { + gorm.Model + Name string + Policies string + MaxStorage uint64 + ShareEnabled bool + WebDAVEnabled bool + SpeedLimit int + Options string `json:"-" gorm:"size:4294967295"` + + // 数据库忽略字段 + PolicyList []uint `gorm:"-"` + OptionsSerialized GroupOption `gorm:"-"` +} + +// GroupOption 用户组其他配置 +type GroupOption struct { + ArchiveDownload bool `json:"archive_download,omitempty"` // 打包下载 + ArchiveTask bool `json:"archive_task,omitempty"` // 在线压缩 + CompressSize uint64 `json:"compress_size,omitempty"` // 可压缩大小 + DecompressSize uint64 `json:"decompress_size,omitempty"` + OneTimeDownload bool `json:"one_time_download,omitempty"` + ShareDownload bool `json:"share_download,omitempty"` + ShareFree bool `json:"share_free,omitempty"` + Aria2 bool `json:"aria2,omitempty"` // 离线下载 + Aria2Options map[string]interface{} `json:"aria2_options,omitempty"` // 离线下载用户组配置 + Relocate bool `json:"relocate,omitempty"` // 转移文件 + SourceBatchSize int `json:"source_batch,omitempty"` + RedirectedSource bool `json:"redirected_source,omitempty"` + Aria2BatchSize int `json:"aria2_batch,omitempty"` + AvailableNodes []uint `json:"available_nodes,omitempty"` + SelectNode bool `json:"select_node,omitempty"` + AdvanceDelete bool `json:"advance_delete,omitempty"` + WebDAVProxy bool `json:"webdav_proxy,omitempty"` +} + +// GetGroupByID 用ID获取用户组 +func GetGroupByID(ID interface{}) (Group, error) { + var group Group + result := DB.First(&group, ID) + return group, result.Error +} + +// AfterFind 找到用户组后的钩子,处理Policy列表 +func (group *Group) AfterFind() (err error) { + // 解析用户组策略列表 + if group.Policies != "" { + err = json.Unmarshal([]byte(group.Policies), &group.PolicyList) + } + if err != nil { + return err + } + + // 解析用户组设置 + if group.Options != "" { + err = json.Unmarshal([]byte(group.Options), &group.OptionsSerialized) + } + + return err +} + +// BeforeSave Save用户前的钩子 +func (group *Group) BeforeSave() (err error) { + err = group.SerializePolicyList() + return err +} + +// SerializePolicyList 将序列后的可选策略列表、配置写入数据库字段 +// TODO 完善测试 +func (group *Group) SerializePolicyList() (err error) { + policies, err := json.Marshal(&group.PolicyList) + group.Policies = string(policies) + if err != nil { + return err + } + + optionsValue, err := json.Marshal(&group.OptionsSerialized) + group.Options = string(optionsValue) + return err +} diff --git a/models/init.go b/models/init.go new file mode 100644 index 0000000..a0920a9 --- /dev/null +++ b/models/init.go @@ -0,0 +1,106 @@ +package model + +import ( + "fmt" + "time" + + "github.com/cloudreve/Cloudreve/v3/pkg/conf" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/gin-gonic/gin" + "github.com/jinzhu/gorm" + + _ "github.com/cloudreve/Cloudreve/v3/models/dialects" + _ "github.com/glebarez/go-sqlite" + _ "github.com/jinzhu/gorm/dialects/mssql" + _ "github.com/jinzhu/gorm/dialects/mysql" + _ "github.com/jinzhu/gorm/dialects/postgres" +) + +// DB 数据库链接单例 +var DB *gorm.DB + +// Init 初始化 MySQL 链接 +func Init() { + util.Log().Info("Initializing database connection...") + + var ( + db *gorm.DB + err error + confDBType string = conf.DatabaseConfig.Type + ) + + // 兼容已有配置中的 "sqlite3" 配置项 + if confDBType == "sqlite3" { + confDBType = "sqlite" + } + + if gin.Mode() == gin.TestMode { + // 测试模式下,使用内存数据库 + db, err = gorm.Open("sqlite", ":memory:") + } else { + switch confDBType { + case "UNSET", "sqlite": + // 未指定数据库或者明确指定为 sqlite 时,使用 SQLite 数据库 + db, err = gorm.Open("sqlite", util.RelativePath(conf.DatabaseConfig.DBFile)) + case "postgres": + db, err = gorm.Open(confDBType, fmt.Sprintf("host=%s user=%s password=%s dbname=%s port=%d sslmode=disable", + conf.DatabaseConfig.Host, + conf.DatabaseConfig.User, + conf.DatabaseConfig.Password, + conf.DatabaseConfig.Name, + conf.DatabaseConfig.Port)) + case "mysql", "mssql": + var host string + if conf.DatabaseConfig.UnixSocket { + host = fmt.Sprintf("unix(%s)", + conf.DatabaseConfig.Host) + } else { + host = fmt.Sprintf("(%s:%d)", + conf.DatabaseConfig.Host, + conf.DatabaseConfig.Port) + } + + db, err = gorm.Open(confDBType, fmt.Sprintf("%s:%s@%s/%s?charset=%s&parseTime=True&loc=Local", + conf.DatabaseConfig.User, + conf.DatabaseConfig.Password, + host, + conf.DatabaseConfig.Name, + conf.DatabaseConfig.Charset)) + default: + util.Log().Panic("Unsupported database type %q.", confDBType) + } + } + + //db.SetLogger(util.Log()) + if err != nil { + util.Log().Panic("Failed to connect to database: %s", err) + } + + // 处理表前缀 + gorm.DefaultTableNameHandler = func(db *gorm.DB, defaultTableName string) string { + return conf.DatabaseConfig.TablePrefix + defaultTableName + } + + // Debug模式下,输出所有 SQL 日志 + if conf.SystemConfig.Debug { + db.LogMode(true) + } else { + db.LogMode(false) + } + + //设置连接池 + db.DB().SetMaxIdleConns(50) + if confDBType == "sqlite" || confDBType == "UNSET" { + db.DB().SetMaxOpenConns(1) + } else { + db.DB().SetMaxOpenConns(100) + } + + //超时 + db.DB().SetConnMaxLifetime(time.Second * 30) + + DB = db + + //执行迁移 + migration() +} diff --git a/models/migration.go b/models/migration.go new file mode 100644 index 0000000..f86e5e9 --- /dev/null +++ b/models/migration.go @@ -0,0 +1,221 @@ +package model + +import ( + "context" + "github.com/cloudreve/Cloudreve/v3/models/scripts/invoker" + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/conf" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/fatih/color" + "github.com/hashicorp/go-version" + "github.com/jinzhu/gorm" + "sort" + "strings" +) + +// 是否需要迁移 +func needMigration() bool { + var setting Setting + return DB.Where("name = ?", "db_version_"+conf.RequiredDBVersion).First(&setting).Error != nil +} + +// 执行数据迁移 +func migration() { + // 确认是否需要执行迁移 + if !needMigration() { + util.Log().Info("Database version fulfilled, skip schema migration.") + return + + } + + util.Log().Info("Start initializing database schema...") + + // 清除所有缓存 + if instance, ok := cache.Store.(*cache.RedisStore); ok { + instance.DeleteAll() + } + + // 自动迁移模式 + if conf.DatabaseConfig.Type == "mysql" { + DB = DB.Set("gorm:table_options", "ENGINE=InnoDB") + } + + DB.AutoMigrate(&User{}, &Setting{}, &Group{}, &Policy{}, &Folder{}, &File{}, &StoragePack{}, &Share{}, + &Task{}, &Download{}, &Tag{}, &Webdav{}, &Order{}, &Redeem{}, &Report{}, &Node{}, &SourceLink{}) + + // 创建初始存储策略 + addDefaultPolicy() + + // 创建初始用户组 + addDefaultGroups() + + // 创建初始管理员账户 + addDefaultUser() + + // 创建初始节点 + addDefaultNode() + + // 向设置数据表添加初始设置 + addDefaultSettings() + + // 执行数据库升级脚本 + execUpgradeScripts() + + util.Log().Info("Finish initializing database schema.") + +} + +func addDefaultPolicy() { + _, err := GetPolicyByID(uint(1)) + // 未找到初始存储策略时,则创建 + if gorm.IsRecordNotFoundError(err) { + defaultPolicy := Policy{ + Name: "Default storage policy", + Type: "local", + MaxSize: 0, + AutoRename: true, + DirNameRule: "uploads/{uid}/{path}", + FileNameRule: "{uid}_{randomkey8}_{originname}", + IsOriginLinkEnable: false, + OptionsSerialized: PolicyOption{ + ChunkSize: 25 << 20, // 25MB + }, + } + if err := DB.Create(&defaultPolicy).Error; err != nil { + util.Log().Panic("Failed to create default storage policy: %s", err) + } + } +} + +func addDefaultSettings() { + for _, value := range defaultSettings { + DB.Where(Setting{Name: value.Name}).Create(&value) + } +} + +func addDefaultGroups() { + _, err := GetGroupByID(1) + // 未找到初始管理组时,则创建 + if gorm.IsRecordNotFoundError(err) { + defaultAdminGroup := Group{ + Name: "Admin", + PolicyList: []uint{1}, + MaxStorage: 1 * 1024 * 1024 * 1024, + ShareEnabled: true, + WebDAVEnabled: true, + OptionsSerialized: GroupOption{ + ArchiveDownload: true, + ArchiveTask: true, + ShareDownload: true, + ShareFree: true, + Aria2: true, + Relocate: true, + SourceBatchSize: 1000, + Aria2BatchSize: 50, + RedirectedSource: true, + SelectNode: true, + AdvanceDelete: true, + }, + } + if err := DB.Create(&defaultAdminGroup).Error; err != nil { + util.Log().Panic("Failed to create admin user group: %s", err) + } + } + + err = nil + _, err = GetGroupByID(2) + // 未找到初始注册会员时,则创建 + if gorm.IsRecordNotFoundError(err) { + defaultAdminGroup := Group{ + Name: "User", + PolicyList: []uint{1}, + MaxStorage: 1 * 1024 * 1024 * 1024, + ShareEnabled: true, + WebDAVEnabled: true, + OptionsSerialized: GroupOption{ + ShareDownload: true, + SourceBatchSize: 10, + Aria2BatchSize: 1, + RedirectedSource: true, + }, + } + if err := DB.Create(&defaultAdminGroup).Error; err != nil { + util.Log().Panic("Failed to create initial user group: %s", err) + } + } + + err = nil + _, err = GetGroupByID(3) + // 未找到初始游客用户组时,则创建 + if gorm.IsRecordNotFoundError(err) { + defaultAdminGroup := Group{ + Name: "Anonymous", + PolicyList: []uint{}, + Policies: "[]", + OptionsSerialized: GroupOption{ + ShareDownload: true, + }, + } + if err := DB.Create(&defaultAdminGroup).Error; err != nil { + util.Log().Panic("Failed to create anonymous user group: %s", err) + } + } +} + +func addDefaultUser() { + _, err := GetUserByID(1) + password := util.RandStringRunes(8) + + // 未找到初始用户时,则创建 + if gorm.IsRecordNotFoundError(err) { + defaultUser := NewUser() + defaultUser.Email = "admin@cloudreve.org" + defaultUser.Nick = "admin" + defaultUser.Status = Active + defaultUser.GroupID = 1 + err := defaultUser.SetPassword(password) + if err != nil { + util.Log().Panic("Failed to create password: %s", err) + } + if err := DB.Create(&defaultUser).Error; err != nil { + util.Log().Panic("Failed to create initial root user: %s", err) + } + + c := color.New(color.FgWhite).Add(color.BgBlack).Add(color.Bold) + util.Log().Info("Admin user name: " + c.Sprint("admin@cloudreve.org")) + util.Log().Info("Admin password: " + c.Sprint(password)) + } +} + +func addDefaultNode() { + _, err := GetNodeByID(1) + + if gorm.IsRecordNotFoundError(err) { + defaultAdminGroup := Node{ + Name: "Master (Local machine)", + Status: NodeActive, + Type: MasterNodeType, + Aria2OptionsSerialized: Aria2Option{ + Interval: 10, + Timeout: 10, + }, + } + if err := DB.Create(&defaultAdminGroup).Error; err != nil { + util.Log().Panic("Failed to create initial node: %s", err) + } + } +} + +func execUpgradeScripts() { + s := invoker.ListPrefix("UpgradeTo") + versions := make([]*version.Version, len(s)) + for i, raw := range s { + v, _ := version.NewVersion(strings.TrimPrefix(raw, "UpgradeTo")) + versions[i] = v + } + sort.Sort(version.Collection(versions)) + + for i := 0; i < len(versions); i++ { + invoker.RunDBScript("UpgradeTo"+versions[i].String(), context.Background()) + } +} diff --git a/models/node.go b/models/node.go new file mode 100644 index 0000000..992a828 --- /dev/null +++ b/models/node.go @@ -0,0 +1,91 @@ +package model + +import ( + "encoding/json" + "github.com/jinzhu/gorm" +) + +// Node 从机节点信息模型 +type Node struct { + gorm.Model + Status NodeStatus // 节点状态 + Name string // 节点别名 + Type ModelType // 节点状态 + Server string // 服务器地址 + SlaveKey string `gorm:"type:text"` // 主->从 通信密钥 + MasterKey string `gorm:"type:text"` // 从->主 通信密钥 + Aria2Enabled bool // 是否支持用作离线下载节点 + Aria2Options string `gorm:"type:text"` // 离线下载配置 + Rank int // 负载均衡权重 + + // 数据库忽略字段 + Aria2OptionsSerialized Aria2Option `gorm:"-"` +} + +// Aria2Option 非公有的Aria2配置属性 +type Aria2Option struct { + // RPC 服务器地址 + Server string `json:"server,omitempty"` + // RPC 密钥 + Token string `json:"token,omitempty"` + // 临时下载目录 + TempPath string `json:"temp_path,omitempty"` + // 附加下载配置 + Options string `json:"options,omitempty"` + // 下载监控间隔 + Interval int `json:"interval,omitempty"` + // RPC API 请求超时 + Timeout int `json:"timeout,omitempty"` +} + +type NodeStatus int +type ModelType int + +const ( + NodeActive NodeStatus = iota + NodeSuspend +) + +const ( + SlaveNodeType ModelType = iota + MasterNodeType +) + +// GetNodeByID 用ID获取节点 +func GetNodeByID(ID interface{}) (Node, error) { + var node Node + result := DB.First(&node, ID) + return node, result.Error +} + +// GetNodesByStatus 根据给定状态获取节点 +func GetNodesByStatus(status ...NodeStatus) ([]Node, error) { + var nodes []Node + result := DB.Where("status in (?)", status).Find(&nodes) + return nodes, result.Error +} + +// AfterFind 找到节点后的钩子 +func (node *Node) AfterFind() (err error) { + // 解析离线下载设置到 Aria2OptionsSerialized + if node.Aria2Options != "" { + err = json.Unmarshal([]byte(node.Aria2Options), &node.Aria2OptionsSerialized) + } + + return err +} + +// BeforeSave Save策略前的钩子 +func (node *Node) BeforeSave() (err error) { + optionsValue, err := json.Marshal(&node.Aria2OptionsSerialized) + node.Aria2Options = string(optionsValue) + return err +} + +// SetStatus 设置节点启用状态 +func (node *Node) SetStatus(status NodeStatus) error { + node.Status = status + return DB.Model(node).Updates(map[string]interface{}{ + "status": status, + }).Error +} diff --git a/models/order.go b/models/order.go new file mode 100644 index 0000000..9a79c24 --- /dev/null +++ b/models/order.go @@ -0,0 +1,59 @@ +package model + +import ( + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/jinzhu/gorm" +) + +const ( + // PackOrderType 容量包订单 + PackOrderType = iota + // GroupOrderType 用户组订单 + GroupOrderType + // ScoreOrderType 积分充值订单 + ScoreOrderType +) + +const ( + // OrderUnpaid 未支付 + OrderUnpaid = iota + // OrderPaid 已支付 + OrderPaid + // OrderCanceled 已取消 + OrderCanceled +) + +// Order 交易订单 +type Order struct { + gorm.Model + UserID uint // 创建者ID + OrderNo string `gorm:"index:order_number"` // 商户自定义订单编号 + Type int // 订单类型 + Method string // 支付类型 + ProductID int64 // 商品ID + Num int // 商品数量 + Name string // 订单标题 + Price int // 商品单价 + Status int // 订单状态 +} + +// Create 创建订单记录 +func (order *Order) Create() (uint, error) { + if err := DB.Create(order).Error; err != nil { + util.Log().Warning("Failed to insert order record: %s", err) + return 0, err + } + return order.ID, nil +} + +// UpdateStatus 更新订单状态 +func (order *Order) UpdateStatus(status int) { + DB.Model(order).Update("status", status) +} + +// GetOrderByNo 根据商户订单号查询订单 +func GetOrderByNo(id string) (*Order, error) { + var order Order + err := DB.Where("order_no = ?", id).First(&order).Error + return &order, err +} diff --git a/models/policy.go b/models/policy.go new file mode 100644 index 0000000..f80b553 --- /dev/null +++ b/models/policy.go @@ -0,0 +1,267 @@ +package model + +import ( + "encoding/gob" + "encoding/json" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/gofrs/uuid" + "github.com/samber/lo" + + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/jinzhu/gorm" +) + +// Policy 存储策略 +type Policy struct { + // 表字段 + gorm.Model + Name string + Type string + Server string + BucketName string + IsPrivate bool + BaseURL string + AccessKey string `gorm:"type:text"` + SecretKey string `gorm:"type:text"` + MaxSize uint64 + AutoRename bool + DirNameRule string + FileNameRule string + IsOriginLinkEnable bool + Options string `gorm:"type:text"` + + // 数据库忽略字段 + OptionsSerialized PolicyOption `gorm:"-"` + MasterID string `gorm:"-"` +} + +// PolicyOption 非公有的存储策略属性 +type PolicyOption struct { + // Upyun访问Token + Token string `json:"token"` + // 允许的文件扩展名 + FileType []string `json:"file_type"` + // MimeType + MimeType string `json:"mimetype"` + // OauthRedirect Oauth 重定向地址 + OauthRedirect string `json:"od_redirect,omitempty"` + // OdProxy Onedrive 反代地址 + OdProxy string `json:"od_proxy,omitempty"` + // OdDriver OneDrive 驱动器定位符 + OdDriver string `json:"od_driver,omitempty"` + // Region 区域代码 + Region string `json:"region,omitempty"` + // ServerSideEndpoint 服务端请求使用的 Endpoint,为空时使用 Policy.Server 字段 + ServerSideEndpoint string `json:"server_side_endpoint,omitempty"` + // 分片上传的分片大小 + ChunkSize uint64 `json:"chunk_size,omitempty"` + // 分片上传时是否需要预留空间 + PlaceholderWithSize bool `json:"placeholder_with_size,omitempty"` + // 每秒对存储端的 API 请求上限 + TPSLimit float64 `json:"tps_limit,omitempty"` + // 每秒 API 请求爆发上限 + TPSLimitBurst int `json:"tps_limit_burst,omitempty"` + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY ` + S3ForcePathStyle bool `json:"s3_path_style"` + // File extensions that support thumbnail generation using native policy API. + ThumbExts []string `json:"thumb_exts,omitempty"` +} + +// thumbSuffix 支持缩略图处理的文件扩展名 +var thumbSuffix = map[string][]string{ + "local": {}, + "qiniu": {".psd", ".jpg", ".jpeg", ".png", ".gif", ".webp", ".tiff", ".bmp"}, + "oss": {".jpg", ".jpeg", ".png", ".gif", ".webp", ".tiff", ".bmp"}, + "cos": {".jpg", ".jpeg", ".png", ".gif", ".webp", ".tiff", ".bmp"}, + "upyun": {".svg", ".jpg", ".jpeg", ".png", ".gif", ".webp", ".tiff", ".bmp"}, + "s3": {}, + "remote": {}, + "onedrive": {"*"}, +} + +func init() { + // 注册缓存用到的复杂结构 + gob.Register(Policy{}) +} + +// GetPolicyByID 用ID获取存储策略 +func GetPolicyByID(ID interface{}) (Policy, error) { + // 尝试读取缓存 + cacheKey := "policy_" + strconv.Itoa(int(ID.(uint))) + if policy, ok := cache.Get(cacheKey); ok { + return policy.(Policy), nil + } + + var policy Policy + result := DB.First(&policy, ID) + + // 写入缓存 + if result.Error == nil { + _ = cache.Set(cacheKey, policy, -1) + } + + return policy, result.Error +} + +// AfterFind 找到存储策略后的钩子 +func (policy *Policy) AfterFind() (err error) { + // 解析存储策略设置到OptionsSerialized + if policy.Options != "" { + err = json.Unmarshal([]byte(policy.Options), &policy.OptionsSerialized) + } + if policy.OptionsSerialized.FileType == nil { + policy.OptionsSerialized.FileType = []string{} + } + + return err +} + +// BeforeSave Save策略前的钩子 +func (policy *Policy) BeforeSave() (err error) { + err = policy.SerializeOptions() + return err +} + +// SerializeOptions 将序列后的Option写入到数据库字段 +func (policy *Policy) SerializeOptions() (err error) { + optionsValue, err := json.Marshal(&policy.OptionsSerialized) + policy.Options = string(optionsValue) + return err +} + +// GeneratePath 生成存储文件的路径 +func (policy *Policy) GeneratePath(uid uint, origin string) string { + dirRule := policy.DirNameRule + replaceTable := map[string]string{ + "{randomkey16}": util.RandStringRunes(16), + "{randomkey8}": util.RandStringRunes(8), + "{timestamp}": strconv.FormatInt(time.Now().Unix(), 10), + "{timestamp_nano}": strconv.FormatInt(time.Now().UnixNano(), 10), + "{uid}": strconv.Itoa(int(uid)), + "{datetime}": time.Now().Format("20060102150405"), + "{date}": time.Now().Format("20060102"), + "{year}": time.Now().Format("2006"), + "{month}": time.Now().Format("01"), + "{day}": time.Now().Format("02"), + "{hour}": time.Now().Format("15"), + "{minute}": time.Now().Format("04"), + "{second}": time.Now().Format("05"), + "{path}": origin + "/", + } + dirRule = util.Replace(replaceTable, dirRule) + return path.Clean(dirRule) +} + +// GenerateFileName 生成存储文件名 +func (policy *Policy) GenerateFileName(uid uint, origin string) string { + // 未开启自动重命名时,直接返回原始文件名 + if !policy.AutoRename { + return origin + } + + fileRule := policy.FileNameRule + + replaceTable := map[string]string{ + "{randomkey16}": util.RandStringRunes(16), + "{randomkey8}": util.RandStringRunes(8), + "{timestamp}": strconv.FormatInt(time.Now().Unix(), 10), + "{timestamp_nano}": strconv.FormatInt(time.Now().UnixNano(), 10), + "{uid}": strconv.Itoa(int(uid)), + "{datetime}": time.Now().Format("20060102150405"), + "{date}": time.Now().Format("20060102"), + "{year}": time.Now().Format("2006"), + "{month}": time.Now().Format("01"), + "{day}": time.Now().Format("02"), + "{hour}": time.Now().Format("15"), + "{minute}": time.Now().Format("04"), + "{second}": time.Now().Format("05"), + "{originname}": origin, + "{ext}": filepath.Ext(origin), + "{originname_without_ext}": strings.TrimSuffix(origin, filepath.Ext(origin)), + "{uuid}": uuid.Must(uuid.NewV4()).String(), + } + + fileRule = util.Replace(replaceTable, fileRule) + return fileRule +} + +// IsThumbExist 给定文件名,返回此存储策略下是否可能存在缩略图 +func (policy *Policy) IsThumbExist(name string) bool { + if list, ok := thumbSuffix[policy.Type]; ok { + if len(list) == 1 && list[0] == "*" { + return true + } + return util.ContainsString(list, strings.ToLower(filepath.Ext(name))) + } + return false +} + +// IsDirectlyPreview 返回此策略下文件是否可以直接预览(不需要重定向) +func (policy *Policy) IsDirectlyPreview() bool { + return policy.Type == "local" +} + +// IsTransitUpload 返回此策略上传给定size文件时是否需要服务端中转 +func (policy *Policy) IsTransitUpload(size uint64) bool { + return policy.Type == "local" +} + +// IsThumbGenerateNeeded 返回此策略是否需要在上传后生成缩略图 +func (policy *Policy) IsThumbGenerateNeeded() bool { + return policy.Type == "local" +} + +// IsUploadPlaceholderWithSize 返回此策略创建上传会话时是否需要预留空间 +func (policy *Policy) IsUploadPlaceholderWithSize() bool { + if policy.Type == "remote" { + return true + } + + if util.ContainsString([]string{"onedrive", "oss", "qiniu", "cos", "s3"}, policy.Type) { + return policy.OptionsSerialized.PlaceholderWithSize + } + + return false +} + +// CanStructureBeListed 返回存储策略是否能被前台列物理目录 +func (policy *Policy) CanStructureBeListed() bool { + return policy.Type != "local" && policy.Type != "remote" +} + +// SaveAndClearCache 更新并清理缓存 +func (policy *Policy) SaveAndClearCache() error { + err := DB.Save(policy).Error + policy.ClearCache() + return err +} + +// SaveAndClearCache 更新并清理缓存 +func (policy *Policy) UpdateAccessKeyAndClearCache(s string) error { + err := DB.Model(policy).UpdateColumn("access_key", s).Error + policy.ClearCache() + return err +} + +// ClearCache 清空policy缓存 +func (policy *Policy) ClearCache() { + cache.Deletes([]string{strconv.FormatUint(uint64(policy.ID), 10)}, "policy_") +} + +// CouldProxyThumb return if proxy thumbs is allowed for this policy. +func (policy *Policy) CouldProxyThumb() bool { + if policy.Type == "local" || !IsTrueVal(GetSettingByName("thumb_proxy_enabled")) { + return false + } + + allowed := make([]uint, 0) + _ = json.Unmarshal([]byte(GetSettingByName("thumb_proxy_policy")), &allowed) + return lo.Contains[uint](allowed, policy.ID) +} diff --git a/models/redeem.go b/models/redeem.go new file mode 100644 index 0000000..4f8396d --- /dev/null +++ b/models/redeem.go @@ -0,0 +1,27 @@ +package model + +import "github.com/jinzhu/gorm" + +// Redeem 兑换码 +type Redeem struct { + gorm.Model + Type int // 订单类型 + ProductID int64 // 商品ID + Num int // 商品数量 + Code string `gorm:"size:64,index:redeem_code"` // 兑换码 + Used bool // 是否已被使用 +} + +// GetAvailableRedeem 根据code查找可用兑换码 +func GetAvailableRedeem(code string) (*Redeem, error) { + redeem := &Redeem{} + result := DB.Where("code = ? and used = ?", code, false).First(redeem) + return redeem, result.Error +} + +// Use 设定为已使用状态 +func (redeem *Redeem) Use() { + DB.Model(redeem).Updates(map[string]interface{}{ + "used": true, + }) +} diff --git a/models/report.go b/models/report.go new file mode 100644 index 0000000..face732 --- /dev/null +++ b/models/report.go @@ -0,0 +1,21 @@ +package model + +import ( + "github.com/jinzhu/gorm" +) + +// Report 举报模型 +type Report struct { + gorm.Model + ShareID uint `gorm:"index:share_id"` // 对应分享ID + Reason int // 举报原因 + Description string // 补充描述 + + // 关联模型 + Share Share `gorm:"save_associations:false:false"` +} + +// Create 创建举报 +func (report *Report) Create() error { + return DB.Create(report).Error +} diff --git a/models/scripts/init.go b/models/scripts/init.go new file mode 100644 index 0000000..b772fa9 --- /dev/null +++ b/models/scripts/init.go @@ -0,0 +1,10 @@ +package scripts + +import "github.com/cloudreve/Cloudreve/v3/models/scripts/invoker" + +func Init() { + invoker.Register("ResetAdminPassword", ResetAdminPassword(0)) + invoker.Register("CalibrateUserStorage", UserStorageCalibration(0)) + invoker.Register("OSSToPlus", UpgradeToPro(0)) + invoker.Register("UpgradeTo3.4.0", UpgradeTo340(0)) +} diff --git a/models/scripts/invoker/invoker.go b/models/scripts/invoker/invoker.go new file mode 100644 index 0000000..b55b1e9 --- /dev/null +++ b/models/scripts/invoker/invoker.go @@ -0,0 +1,38 @@ +package invoker + +import ( + "context" + "fmt" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "strings" +) + +type DBScript interface { + Run(ctx context.Context) +} + +var availableScripts = make(map[string]DBScript) + +func RunDBScript(name string, ctx context.Context) error { + if script, ok := availableScripts[name]; ok { + util.Log().Info("Start executing database script %q.", name) + script.Run(ctx) + return nil + } + + return fmt.Errorf("Database script %q not exist.", name) +} + +func Register(name string, script DBScript) { + availableScripts[name] = script +} + +func ListPrefix(prefix string) []string { + var scripts []string + for name := range availableScripts { + if strings.HasPrefix(name, prefix) { + scripts = append(scripts, name) + } + } + return scripts +} diff --git a/models/scripts/reset.go b/models/scripts/reset.go new file mode 100644 index 0000000..1f6bf08 --- /dev/null +++ b/models/scripts/reset.go @@ -0,0 +1,31 @@ +package scripts + +import ( + "context" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/fatih/color" +) + +type ResetAdminPassword int + +// Run 运行脚本从社区版升级至 Pro 版 +func (script ResetAdminPassword) Run(ctx context.Context) { + // 查找用户 + user, err := model.GetUserByID(1) + if err != nil { + util.Log().Panic("Initial admin user not exist: %s", err) + } + + // 生成密码 + password := util.RandStringRunes(8) + + // 更改为新密码 + user.SetPassword(password) + if err := user.Update(map[string]interface{}{"password": user.Password}); err != nil { + util.Log().Panic("Failed to update password: %s", err) + } + + c := color.New(color.FgWhite).Add(color.BgBlack).Add(color.Bold) + util.Log().Info("Initial admin user password changed to:" + c.Sprint(password)) +} diff --git a/models/scripts/storage.go b/models/scripts/storage.go new file mode 100644 index 0000000..0d436b9 --- /dev/null +++ b/models/scripts/storage.go @@ -0,0 +1,33 @@ +package scripts + +import ( + "context" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +type UserStorageCalibration int + +type storageResult struct { + Total uint64 +} + +// Run 运行脚本校准所有用户容量 +func (script UserStorageCalibration) Run(ctx context.Context) { + // 列出所有用户 + var res []model.User + model.DB.Model(&model.User{}).Find(&res) + + // 逐个检查容量 + for _, user := range res { + // 计算正确的容量 + var total storageResult + model.DB.Model(&model.File{}).Where("user_id = ?", user.ID).Select("sum(size) as total").Scan(&total) + // 更新用户的容量 + if user.Storage != total.Total { + util.Log().Info("Calibrate used storage for user %q, from %d to %d.", user.Email, + user.Storage, total.Total) + } + model.DB.Model(&user).Update("storage", total.Total) + } +} diff --git a/models/scripts/upgrade-pro.go b/models/scripts/upgrade-pro.go new file mode 100644 index 0000000..b25df4f --- /dev/null +++ b/models/scripts/upgrade-pro.go @@ -0,0 +1,22 @@ +package scripts + +import ( + "context" + model "github.com/cloudreve/Cloudreve/v3/models" +) + +type UpgradeToPro int + +// Run 运行脚本从社区版升级至 Pro 版 +func (script UpgradeToPro) Run(ctx context.Context) { + // folder.PolicyID 字段设为 0 + model.DB.Model(model.Folder{}).UpdateColumn("policy_id", 0) + // shares.Score 字段设为0 + model.DB.Model(model.Share{}).UpdateColumn("score", 0) + // user 表相关初始字段 + model.DB.Model(model.User{}).Updates(map[string]interface{}{ + "score": 0, + "previous_group_id": 0, + "open_id": "", + }) +} diff --git a/models/scripts/upgrade.go b/models/scripts/upgrade.go new file mode 100644 index 0000000..717a72e --- /dev/null +++ b/models/scripts/upgrade.go @@ -0,0 +1,43 @@ +package scripts + +import ( + "context" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "strconv" +) + +type UpgradeTo340 int + +// Run upgrade from older version to 3.4.0 +func (script UpgradeTo340) Run(ctx context.Context) { + // 取回老版本 aria2 设定 + old := model.GetSettingByType([]string{"aria2"}) + if len(old) == 0 { + return + } + + // 写入到新版本的节点设定 + n, err := model.GetNodeByID(1) + if err != nil { + util.Log().Error("找不到主机节点, %s", err) + } + + n.Aria2Enabled = old["aria2_rpcurl"] != "" + n.Aria2OptionsSerialized.Options = old["aria2_options"] + n.Aria2OptionsSerialized.Server = old["aria2_rpcurl"] + + interval, err := strconv.Atoi(old["aria2_interval"]) + if err != nil { + interval = 10 + } + n.Aria2OptionsSerialized.Interval = interval + n.Aria2OptionsSerialized.TempPath = old["aria2_temp_path"] + n.Aria2OptionsSerialized.Token = old["aria2_token"] + if err := model.DB.Save(&n).Error; err != nil { + util.Log().Error("无法保存主机节点 Aria2 配置信息, %s", err) + } else { + model.DB.Where("type = ?", "aria2").Delete(model.Setting{}) + util.Log().Info("Aria2 配置信息已成功迁移至 3.4.0+ 版本的模式") + } +} diff --git a/models/setting.go b/models/setting.go new file mode 100644 index 0000000..0bbcf68 --- /dev/null +++ b/models/setting.go @@ -0,0 +1,110 @@ +package model + +import ( + "net/url" + "strconv" + + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/jinzhu/gorm" +) + +// Setting 系统设置模型 +type Setting struct { + gorm.Model + Type string `gorm:"not null"` + Name string `gorm:"unique;not null;index:setting_key"` + Value string `gorm:"size:‎65535"` +} + +// IsTrueVal 返回设置的值是否为真 +func IsTrueVal(val string) bool { + return val == "1" || val == "true" +} + +// GetSettingByName 用 Name 获取设置值 +func GetSettingByName(name string) string { + return GetSettingByNameFromTx(DB, name) +} + +// GetSettingByNameFromTx 用 Name 获取设置值,使用事务 +func GetSettingByNameFromTx(tx *gorm.DB, name string) string { + var setting Setting + + // 优先从缓存中查找 + cacheKey := "setting_" + name + if optionValue, ok := cache.Get(cacheKey); ok { + return optionValue.(string) + } + + // 尝试数据库中查找 + if tx == nil { + tx = DB + if tx == nil { + return "" + } + } + + result := tx.Where("name = ?", name).First(&setting) + if result.Error == nil { + _ = cache.Set(cacheKey, setting.Value, -1) + return setting.Value + } + + return "" +} + +// GetSettingByNameWithDefault 用 Name 获取设置值, 取不到时使用缺省值 +func GetSettingByNameWithDefault(name, fallback string) string { + res := GetSettingByName(name) + if res == "" { + return fallback + } + return res +} + +// GetSettingByNames 用多个 Name 获取设置值 +func GetSettingByNames(names ...string) map[string]string { + var queryRes []Setting + res, miss := cache.GetSettings(names, "setting_") + + if len(miss) > 0 { + DB.Where("name IN (?)", miss).Find(&queryRes) + for _, setting := range queryRes { + res[setting.Name] = setting.Value + } + } + + _ = cache.SetSettings(res, "setting_") + return res +} + +// GetSettingByType 获取一个或多个分组的所有设置值 +func GetSettingByType(types []string) map[string]string { + var queryRes []Setting + res := make(map[string]string) + + DB.Where("type IN (?)", types).Find(&queryRes) + for _, setting := range queryRes { + res[setting.Name] = setting.Value + } + + return res +} + +// GetSiteURL 获取站点地址 +func GetSiteURL() *url.URL { + base, err := url.Parse(GetSettingByName("siteURL")) + if err != nil { + base, _ = url.Parse("https://cloudreve.org") + } + return base +} + +// GetIntSetting 获取整形设置值,如果转换失败则返回默认值defaultVal +func GetIntSetting(key string, defaultVal int) int { + res, err := strconv.Atoi(GetSettingByName(key)) + if err != nil { + return defaultVal + } + return res +} diff --git a/models/share.go b/models/share.go new file mode 100644 index 0000000..94655bb --- /dev/null +++ b/models/share.go @@ -0,0 +1,280 @@ +package model + +import ( + "errors" + "fmt" + "math" + "strings" + "time" + + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/hashid" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/gin-gonic/gin" + "github.com/jinzhu/gorm" +) + +var ( + ErrInsufficientCredit = errors.New("积分不足") +) + +// Share 分享模型 +type Share struct { + gorm.Model + Password string // 分享密码,空值为非加密分享 + IsDir bool // 原始资源是否为目录 + UserID uint // 创建用户ID + SourceID uint // 原始资源ID + Views int // 浏览数 + Downloads int // 下载数 + RemainDownloads int // 剩余下载配额,负值标识无限制 + Expires *time.Time // 过期时间,空值表示无过期时间 + Score int // 每人次下载扣除积分 + PreviewEnabled bool // 是否允许直接预览 + SourceName string `gorm:"index:source"` // 用于搜索的字段 + + // 数据库忽略字段 + User User `gorm:"PRELOAD:false,association_autoupdate:false"` + File File `gorm:"PRELOAD:false,association_autoupdate:false"` + Folder Folder `gorm:"PRELOAD:false,association_autoupdate:false"` +} + +// Create 创建分享 +func (share *Share) Create() (uint, error) { + if err := DB.Create(share).Error; err != nil { + util.Log().Warning("Failed to insert share record: %s", err) + return 0, err + } + return share.ID, nil +} + +// GetShareByHashID 根据HashID查找分享 +func GetShareByHashID(hashID string) *Share { + id, err := hashid.DecodeHashID(hashID, hashid.ShareID) + if err != nil { + return nil + } + var share Share + result := DB.First(&share, id) + if result.Error != nil { + return nil + } + + return &share +} + +// IsAvailable 返回此分享是否可用(是否过期) +func (share *Share) IsAvailable() bool { + if share.RemainDownloads == 0 { + return false + } + if share.Expires != nil && time.Now().After(*share.Expires) { + return false + } + + // 检查创建者状态 + if share.Creator().Status != Active { + return false + } + + // 检查源对象是否存在 + var sourceID uint + if share.IsDir { + folder := share.SourceFolder() + sourceID = folder.ID + } else { + file := share.SourceFile() + sourceID = file.ID + } + if sourceID == 0 { + // TODO 是否要在这里删除这个无效分享? + return false + } + + return true +} + +// Creator 获取分享的创建者 +func (share *Share) Creator() *User { + if share.User.ID == 0 { + share.User, _ = GetUserByID(share.UserID) + } + return &share.User +} + +// Source 返回源对象 +func (share *Share) Source() interface{} { + if share.IsDir { + return share.SourceFolder() + } + return share.SourceFile() +} + +// SourceFolder 获取源目录 +func (share *Share) SourceFolder() *Folder { + if share.Folder.ID == 0 { + folders, _ := GetFoldersByIDs([]uint{share.SourceID}, share.UserID) + if len(folders) > 0 { + share.Folder = folders[0] + } + } + return &share.Folder +} + +// SourceFile 获取源文件 +func (share *Share) SourceFile() *File { + if share.File.ID == 0 { + files, _ := GetFilesByIDs([]uint{share.SourceID}, share.UserID) + if len(files) > 0 { + share.File = files[0] + } + } + return &share.File +} + +// CanBeDownloadBy 返回此分享是否可以被给定用户下载 +func (share *Share) CanBeDownloadBy(user *User) error { + // 用户组权限 + if !user.Group.OptionsSerialized.ShareDownload { + if user.IsAnonymous() { + return errors.New("you must login to download") + } + return errors.New("your group has no permission to download") + } + + // 需要积分但未登录 + if share.Score > 0 && user.IsAnonymous() { + return errors.New("you must login to download") + } + + return nil +} + +// WasDownloadedBy 返回分享是否已被用户下载过 +func (share *Share) WasDownloadedBy(user *User, c *gin.Context) (exist bool) { + if user.IsAnonymous() { + exist = util.GetSession(c, fmt.Sprintf("share_%d_%d", share.ID, user.ID)) != nil + } else { + _, exist = cache.Get(fmt.Sprintf("share_%d_%d", share.ID, user.ID)) + } + + return exist +} + +// DownloadBy 增加下载次数、检查积分等,匿名用户不会缓存 +func (share *Share) DownloadBy(user *User, c *gin.Context) error { + if !share.WasDownloadedBy(user, c) { + if err := share.Purchase(user); err != nil { + return err + } + share.Downloaded() + if !user.IsAnonymous() { + cache.Set(fmt.Sprintf("share_%d_%d", share.ID, user.ID), true, + GetIntSetting("share_download_session_timeout", 2073600)) + } else { + util.SetSession(c, map[string]interface{}{fmt.Sprintf("share_%d_%d", share.ID, user.ID): true}) + } + } + return nil +} + +// Purchase 使用积分购买分享 +func (share *Share) Purchase(user *User) error { + // 不需要付积分 + if share.Score == 0 || user.Group.OptionsSerialized.ShareFree || user.ID == share.UserID { + return nil + } + + ok := user.PayScore(share.Score) + if !ok { + return ErrInsufficientCredit + } + + scoreRate := GetIntSetting("share_score_rate", 100) + gainedScore := int(math.Ceil(float64(share.Score*scoreRate) / 100)) + share.Creator().AddScore(gainedScore) + + return nil +} + +// Viewed 增加访问次数 +func (share *Share) Viewed() { + share.Views++ + DB.Model(share).UpdateColumn("views", gorm.Expr("views + ?", 1)) +} + +// Downloaded 增加下载次数 +func (share *Share) Downloaded() { + share.Downloads++ + if share.RemainDownloads > 0 { + share.RemainDownloads-- + } + DB.Model(share).Updates(map[string]interface{}{ + "downloads": share.Downloads, + "remain_downloads": share.RemainDownloads, + }) +} + +// Update 更新分享属性 +func (share *Share) Update(props map[string]interface{}) error { + return DB.Model(share).Updates(props).Error +} + +// Delete 删除分享 +func (share *Share) Delete() error { + return DB.Model(share).Delete(share).Error +} + +// DeleteShareBySourceIDs 根据原始资源类型和ID删除文件 +func DeleteShareBySourceIDs(sources []uint, isDir bool) error { + return DB.Where("source_id in (?) and is_dir = ?", sources, isDir).Delete(&Share{}).Error +} + +// ListShares 列出UID下的分享 +func ListShares(uid uint, page, pageSize int, order string, publicOnly bool) ([]Share, int) { + var ( + shares []Share + total int + ) + dbChain := DB + dbChain = dbChain.Where("user_id = ?", uid) + if publicOnly { + dbChain = dbChain.Where("password = ?", "") + } + + // 计算总数用于分页 + dbChain.Model(&Share{}).Count(&total) + + // 查询记录 + dbChain.Limit(pageSize).Offset((page - 1) * pageSize).Order(order).Find(&shares) + return shares, total +} + +// SearchShares 根据关键字搜索分享 +func SearchShares(page, pageSize int, order, keywords string) ([]Share, int) { + var ( + shares []Share + total int + ) + + keywordList := strings.Split(keywords, " ") + availableList := make([]string, 0, len(keywordList)) + for i := 0; i < len(keywordList); i++ { + if len(keywordList[i]) > 0 { + availableList = append(availableList, keywordList[i]) + } + } + if len(availableList) == 0 { + return shares, 0 + } + + dbChain := DB + dbChain = dbChain.Where("password = ? and remain_downloads <> 0 and (expires is NULL or expires > ?) and source_name like ?", "", time.Now(), "%"+strings.Join(availableList, "%")+"%") + + // 计算总数用于分页 + dbChain.Model(&Share{}).Count(&total) + + // 查询记录 + dbChain.Limit(pageSize).Offset((page - 1) * pageSize).Order(order).Find(&shares) + return shares, total +} diff --git a/models/source_link.go b/models/source_link.go new file mode 100644 index 0000000..49dfea2 --- /dev/null +++ b/models/source_link.go @@ -0,0 +1,47 @@ +package model + +import ( + "fmt" + "github.com/cloudreve/Cloudreve/v3/pkg/hashid" + "github.com/jinzhu/gorm" + "net/url" +) + +// SourceLink represent a shared file source link +type SourceLink struct { + gorm.Model + FileID uint // corresponding file ID + Name string // name of the file while creating the source link, for annotation + Downloads int // 下载数 + + // 关联模型 + File File `gorm:"save_associations:false:false"` +} + +// Link gets the URL of a SourceLink +func (s *SourceLink) Link() (string, error) { + baseURL := GetSiteURL() + linkPath, err := url.Parse(fmt.Sprintf("/f/%s/%s", hashid.HashID(s.ID, hashid.SourceLinkID), s.File.Name)) + if err != nil { + return "", err + } + return baseURL.ResolveReference(linkPath).String(), nil +} + +// GetTasksByID queries source link based on ID +func GetSourceLinkByID(id interface{}) (*SourceLink, error) { + link := &SourceLink{} + result := DB.Where("id = ?", id).First(link) + files, _ := GetFilesByIDs([]uint{link.FileID}, 0) + if len(files) > 0 { + link.File = files[0] + } + + return link, result.Error +} + +// Viewed 增加访问次数 +func (s *SourceLink) Downloaded() { + s.Downloads++ + DB.Model(s).UpdateColumn("downloads", gorm.Expr("downloads + ?", 1)) +} diff --git a/models/storage_pack.go b/models/storage_pack.go new file mode 100644 index 0000000..a18f9c1 --- /dev/null +++ b/models/storage_pack.go @@ -0,0 +1,91 @@ +package model + +import ( + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/jinzhu/gorm" + "strconv" + "time" +) + +// StoragePack 容量包模型 +type StoragePack struct { + // 表字段 + gorm.Model + Name string + UserID uint + ActiveTime *time.Time + ExpiredTime *time.Time `gorm:"index:expired"` + Size uint64 +} + +// Create 创建容量包 +func (pack *StoragePack) Create() (uint, error) { + if err := DB.Create(pack).Error; err != nil { + util.Log().Warning("Failed to insert storage pack record: %s", err) + return 0, err + } + return pack.ID, nil +} + +// GetAvailablePackSize 返回给定用户当前可用的容量包总容量 +func (user *User) GetAvailablePackSize() uint64 { + var ( + total uint64 + firstExpire *time.Time + timeNow = time.Now() + ttl int64 + ) + + // 尝试从缓存中读取 + cacheKey := "pack_size_" + strconv.FormatUint(uint64(user.ID), 10) + if total, ok := cache.Get(cacheKey); ok { + return total.(uint64) + } + + // 查找所有有效容量包 + packs := user.GetAvailableStoragePacks() + + // 计算总容量, 并找到其中最早的过期时间 + for _, v := range packs { + total += v.Size + if firstExpire == nil { + firstExpire = v.ExpiredTime + continue + } + if v.ExpiredTime != nil && firstExpire.After(*v.ExpiredTime) { + firstExpire = v.ExpiredTime + } + } + + // 用最早的过期时间计算缓存TTL,并写入缓存 + if firstExpire != nil { + ttl = firstExpire.Unix() - timeNow.Unix() + if ttl > 0 { + _ = cache.Set(cacheKey, total, int(ttl)) + } + } + + return total +} + +// GetAvailableStoragePacks 返回用户可用的容量包 +func (user *User) GetAvailableStoragePacks() []StoragePack { + var packs []StoragePack + timeNow := time.Now() + // 查找所有有效容量包 + DB.Where("expired_time > ? AND user_id = ?", timeNow, user.ID).Find(&packs) + return packs +} + +// GetExpiredStoragePack 获取已过期的容量包 +func GetExpiredStoragePack() []StoragePack { + var packs []StoragePack + DB.Where("expired_time < ?", time.Now()).Find(&packs) + return packs +} + +// Delete 删除容量包 +func (pack *StoragePack) Delete() error { + return DB.Delete(&pack).Error +} diff --git a/models/tag.go b/models/tag.go new file mode 100644 index 0000000..5ce1a4d --- /dev/null +++ b/models/tag.go @@ -0,0 +1,53 @@ +package model + +import ( + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/jinzhu/gorm" +) + +// Tag 用户自定义标签 +type Tag struct { + gorm.Model + Name string // 标签名 + Icon string // 图标标识 + Color string // 图标颜色 + Type int // 标签类型(文件分类/目录直达) + Expression string `gorm:"type:text"` // 搜索表表达式/直达路径 + UserID uint // 创建者ID +} + +const ( + // FileTagType 文件分类标签 + FileTagType = iota + // DirectoryLinkType 目录快捷方式标签 + DirectoryLinkType +) + +// Create 创建标签记录 +func (tag *Tag) Create() (uint, error) { + if err := DB.Create(tag).Error; err != nil { + util.Log().Warning("Failed to insert tag record: %s", err) + return 0, err + } + return tag.ID, nil +} + +// DeleteTagByID 根据给定ID和用户ID删除标签 +func DeleteTagByID(id, uid uint) error { + result := DB.Where("id = ? and user_id = ?", id, uid).Delete(&Tag{}) + return result.Error +} + +// GetTagsByUID 根据用户ID查找标签 +func GetTagsByUID(uid uint) ([]Tag, error) { + var tag []Tag + result := DB.Where("user_id = ?", uid).Find(&tag) + return tag, result.Error +} + +// GetTagsByID 根据ID查找标签 +func GetTagsByID(id, uid uint) (*Tag, error) { + var tag Tag + result := DB.Where("user_id = ? and id = ?", uid, id).First(&tag) + return &tag, result.Error +} diff --git a/models/task.go b/models/task.go new file mode 100644 index 0000000..a6fde2e --- /dev/null +++ b/models/task.go @@ -0,0 +1,73 @@ +package model + +import ( + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/jinzhu/gorm" +) + +// Task 任务模型 +type Task struct { + gorm.Model + Status int // 任务状态 + Type int // 任务类型 + UserID uint // 发起者UID,0表示为系统发起 + Progress int // 进度 + Error string `gorm:"type:text"` // 错误信息 + Props string `gorm:"type:text"` // 任务属性 +} + +// Create 创建任务记录 +func (task *Task) Create() (uint, error) { + if err := DB.Create(task).Error; err != nil { + util.Log().Warning("Failed to insert task record: %s", err) + return 0, err + } + return task.ID, nil +} + +// SetStatus 设定任务状态 +func (task *Task) SetStatus(status int) error { + return DB.Model(task).Select("status").Updates(map[string]interface{}{"status": status}).Error +} + +// SetProgress 设定任务进度 +func (task *Task) SetProgress(progress int) error { + return DB.Model(task).Select("progress").Updates(map[string]interface{}{"progress": progress}).Error +} + +// SetError 设定错误信息 +func (task *Task) SetError(err string) error { + return DB.Model(task).Select("error").Updates(map[string]interface{}{"error": err}).Error +} + +// GetTasksByStatus 根据状态检索任务 +func GetTasksByStatus(status ...int) []Task { + var tasks []Task + DB.Where("status in (?)", status).Find(&tasks) + return tasks +} + +// GetTasksByID 根据ID检索任务 +func GetTasksByID(id interface{}) (*Task, error) { + task := &Task{} + result := DB.Where("id = ?", id).First(task) + return task, result.Error +} + +// ListTasks 列出用户所属的任务 +func ListTasks(uid uint, page, pageSize int, order string) ([]Task, int) { + var ( + tasks []Task + total int + ) + dbChain := DB + dbChain = dbChain.Where("user_id = ?", uid) + + // 计算总数用于分页 + dbChain.Model(&Task{}).Count(&total) + + // 查询记录 + dbChain.Limit(pageSize).Offset((page - 1) * pageSize).Order(order).Find(&tasks) + + return tasks, total +} diff --git a/models/user.go b/models/user.go new file mode 100644 index 0000000..4d1d1a3 --- /dev/null +++ b/models/user.go @@ -0,0 +1,429 @@ +package model + +import ( + "crypto/md5" + "crypto/sha1" + "encoding/gob" + "encoding/hex" + "encoding/json" + "strings" + "time" + + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/jinzhu/gorm" + "github.com/pkg/errors" +) + +const ( + // Active 账户正常状态 + Active = iota + // NotActivicated 未激活 + NotActivicated + // Baned 被封禁 + Baned + // OveruseBaned 超额使用被封禁 + OveruseBaned +) + +// User 用户模型 +type User struct { + // 表字段 + gorm.Model + Email string `gorm:"type:varchar(100);unique_index"` + Nick string `gorm:"size:50"` + Password string `json:"-"` + Status int + GroupID uint + Storage uint64 + OpenID string + TwoFactor string + Avatar string + Options string `json:"-" gorm:"size:4294967295"` + Authn string `gorm:"size:4294967295"` + Score int + PreviousGroupID uint // 初始用户组 + GroupExpires *time.Time // 用户组过期日期 + NotifyDate *time.Time // 通知超出配额时的日期 + Phone string + + // 关联模型 + Group Group `gorm:"save_associations:false:false"` + + // 数据库忽略字段 + OptionsSerialized UserOption `gorm:"-"` +} + +func init() { + gob.Register(User{}) +} + +// UserOption 用户个性化配置字段 +type UserOption struct { + ProfileOff bool `json:"profile_off,omitempty"` + PreferredPolicy uint `json:"preferred_policy,omitempty"` + PreferredTheme string `json:"preferred_theme,omitempty"` +} + +// Root 获取用户的根目录 +func (user *User) Root() (*Folder, error) { + var folder Folder + err := DB.Where("parent_id is NULL AND owner_id = ?", user.ID).First(&folder).Error + return &folder, err +} + +// DeductionStorage 减少用户已用容量 +func (user *User) DeductionStorage(size uint64) bool { + if size == 0 { + return true + } + if size <= user.Storage { + user.Storage -= size + DB.Model(user).Update("storage", gorm.Expr("storage - ?", size)) + return true + } + // 如果要减少的容量超出已用容量,则设为零 + user.Storage = 0 + DB.Model(user).Update("storage", 0) + + return false +} + +// IncreaseStorage 检查并增加用户已用容量 +func (user *User) IncreaseStorage(size uint64) bool { + if size == 0 { + return true + } + if size <= user.GetRemainingCapacity() { + user.Storage += size + DB.Model(user).Update("storage", gorm.Expr("storage + ?", size)) + return true + } + return false +} + +// ChangeStorage 更新用户容量 +func (user *User) ChangeStorage(tx *gorm.DB, operator string, size uint64) error { + return tx.Model(user).Update("storage", gorm.Expr("storage "+operator+" ?", size)).Error +} + +// PayScore 扣除积分,返回是否成功 +func (user *User) PayScore(score int) bool { + if score == 0 { + return true + } + if score <= user.Score { + user.Score -= score + DB.Model(user).Update("score", gorm.Expr("score - ?", score)) + return true + } + return false +} + +// AddScore 增加积分 +func (user *User) AddScore(score int) { + user.Score += score + DB.Model(user).Update("score", gorm.Expr("score + ?", score)) +} + +// IncreaseStorageWithoutCheck 忽略可用容量,增加用户已用容量 +func (user *User) IncreaseStorageWithoutCheck(size uint64) { + if size == 0 { + return + } + user.Storage += size + DB.Model(user).Update("storage", gorm.Expr("storage + ?", size)) + +} + +// GetRemainingCapacity 获取剩余配额 +func (user *User) GetRemainingCapacity() uint64 { + total := user.Group.MaxStorage + user.GetAvailablePackSize() + if total <= user.Storage { + return 0 + } + return total - user.Storage +} + +// GetPolicyID 获取给定目录的存储策略, 如果为 nil 则使用默认 +func (user *User) GetPolicyID(folder *Folder) *Policy { + if user.IsAnonymous() { + return &Policy{Type: "anonymous"} + } + + defaultPolicy := uint(1) + if len(user.Group.PolicyList) > 0 { + defaultPolicy = user.Group.PolicyList[0] + } + + if folder != nil { + prefer := folder.PolicyID + if prefer == 0 && folder.InheritPolicyID > 0 { + prefer = folder.InheritPolicyID + } + + if prefer > 0 && util.ContainsUint(user.Group.PolicyList, prefer) { + defaultPolicy = prefer + } + } + + p, _ := GetPolicyByID(defaultPolicy) + return &p +} + +// GetPolicyByPreference 在可用存储策略中优先获取 preference +func (user *User) GetPolicyByPreference(preference uint) *Policy { + if user.IsAnonymous() { + return &Policy{Type: "anonymous"} + } + + defaultPolicy := uint(1) + if len(user.Group.PolicyList) > 0 { + defaultPolicy = user.Group.PolicyList[0] + } + + if preference != 0 { + if util.ContainsUint(user.Group.PolicyList, preference) { + defaultPolicy = preference + } + } + + p, _ := GetPolicyByID(defaultPolicy) + return &p +} + +// GetUserByID 用ID获取用户 +func GetUserByID(ID interface{}) (User, error) { + var user User + result := DB.Set("gorm:auto_preload", true).First(&user, ID) + return user, result.Error +} + +// GetActiveUserByID 用ID获取可登录用户 +func GetActiveUserByID(ID interface{}) (User, error) { + var user User + result := DB.Set("gorm:auto_preload", true).Where("status = ?", Active).First(&user, ID) + return user, result.Error +} + +// GetActiveUserByOpenID 用OpenID获取可登录用户 +func GetActiveUserByOpenID(openid string) (User, error) { + var user User + result := DB.Set("gorm:auto_preload", true).Where("status = ? and open_id = ?", Active, openid).Find(&user) + return user, result.Error +} + +// GetUserByEmail 用Email获取用户 +func GetUserByEmail(email string) (User, error) { + var user User + result := DB.Set("gorm:auto_preload", true).Where("email = ?", email).First(&user) + return user, result.Error +} + +// GetActiveUserByEmail 用Email获取可登录用户 +func GetActiveUserByEmail(email string) (User, error) { + var user User + result := DB.Set("gorm:auto_preload", true).Where("status = ? and email = ?", Active, email).First(&user) + return user, result.Error +} + +// NewUser 返回一个新的空 User +func NewUser() User { + options := UserOption{} + return User{ + OptionsSerialized: options, + } +} + +// BeforeSave Save用户前的钩子 +func (user *User) BeforeSave() (err error) { + err = user.SerializeOptions() + return err +} + +// AfterCreate 创建用户后的钩子 +func (user *User) AfterCreate(tx *gorm.DB) (err error) { + // 创建用户的默认根目录 + defaultFolder := &Folder{ + Name: "/", + OwnerID: user.ID, + } + tx.Create(defaultFolder) + + // 创建用户初始文件记录 + initialFiles := GetSettingByNameFromTx(tx, "initial_files") + if initialFiles != "" { + initialFileIDs := make([]uint, 0) + if err := json.Unmarshal([]byte(initialFiles), &initialFileIDs); err != nil { + return err + } + + if files, err := GetFilesByIDsFromTX(tx, initialFileIDs, 0); err == nil { + for _, file := range files { + file.ID = 0 + file.UserID = user.ID + file.FolderID = defaultFolder.ID + user.Storage += file.Size + tx.Create(&file) + } + tx.Save(user) + } + } + + return err +} + +// AfterFind 找到用户后的钩子 +func (user *User) AfterFind() (err error) { + // 解析用户设置到OptionsSerialized + if user.Options != "" { + err = json.Unmarshal([]byte(user.Options), &user.OptionsSerialized) + } + + return err +} + +// SerializeOptions 将序列后的Option写入到数据库字段 +func (user *User) SerializeOptions() (err error) { + optionsValue, err := json.Marshal(&user.OptionsSerialized) + user.Options = string(optionsValue) + return err +} + +// CheckPassword 根据明文校验密码 +func (user *User) CheckPassword(password string) (bool, error) { + + // 根据存储密码拆分为 Salt 和 Digest + passwordStore := strings.Split(user.Password, ":") + if len(passwordStore) != 2 && len(passwordStore) != 3 { + return false, errors.New("Unknown password type") + } + + // 兼容V2密码,升级后存储格式为: md5:$HASH:$SALT + if len(passwordStore) == 3 { + if passwordStore[0] != "md5" { + return false, errors.New("Unknown password type") + } + hash := md5.New() + _, err := hash.Write([]byte(passwordStore[2] + password)) + bs := hex.EncodeToString(hash.Sum(nil)) + if err != nil { + return false, err + } + return bs == passwordStore[1], nil + } + + //计算 Salt 和密码组合的SHA1摘要 + hash := sha1.New() + _, err := hash.Write([]byte(password + passwordStore[0])) + bs := hex.EncodeToString(hash.Sum(nil)) + if err != nil { + return false, err + } + + return bs == passwordStore[1], nil +} + +// SetPassword 根据给定明文设定 User 的 Password 字段 +func (user *User) SetPassword(password string) error { + //生成16位 Salt + salt := util.RandStringRunes(16) + + //计算 Salt 和密码组合的SHA1摘要 + hash := sha1.New() + _, err := hash.Write([]byte(password + salt)) + bs := hex.EncodeToString(hash.Sum(nil)) + + if err != nil { + return err + } + + //存储 Salt 值和摘要, ":"分割 + user.Password = salt + ":" + string(bs) + return nil +} + +// NewAnonymousUser 返回一个匿名用户 +func NewAnonymousUser() *User { + user := User{} + user.Group, _ = GetGroupByID(3) + return &user +} + +// IsAnonymous 返回是否为未登录用户 +func (user *User) IsAnonymous() bool { + return user.ID == 0 +} + +// Notified 更新用户容量超额通知日期 +func (user *User) Notified() { + if user.NotifyDate == nil { + timeNow := time.Now() + user.NotifyDate = &timeNow + DB.Model(&user).Update("notify_date", user.NotifyDate) + } +} + +// ClearNotified 清除用户通知标记 +func (user *User) ClearNotified() { + DB.Model(&user).Update("notify_date", nil) +} + +// SetStatus 设定用户状态 +func (user *User) SetStatus(status int) { + DB.Model(&user).Update("status", status) +} + +// Update 更新用户 +func (user *User) Update(val map[string]interface{}) error { + return DB.Model(user).Updates(val).Error +} + +// UpdateOptions 更新用户偏好设定 +func (user *User) UpdateOptions() error { + if err := user.SerializeOptions(); err != nil { + return err + } + return user.Update(map[string]interface{}{"options": user.Options}) +} + +// GetGroupExpiredUsers 获取用户组过期的用户 +func GetGroupExpiredUsers() []User { + var users []User + DB.Where("group_expires < ? and previous_group_id <> 0", time.Now()).Find(&users) + return users +} + +// GetTolerantExpiredUser 获取超过宽容期的用户 +func GetTolerantExpiredUser() []User { + var users []User + DB.Set("gorm:auto_preload", true).Where("notify_date < ?", time.Now().Add( + time.Duration(-GetIntSetting("ban_time", 10))*time.Second), + ).Find(&users) + return users +} + +// GroupFallback 回退到初始用户组 +func (user *User) GroupFallback() { + if user.GroupExpires != nil && user.PreviousGroupID != 0 { + user.Group.ID = user.PreviousGroupID + DB.Model(&user).Updates(map[string]interface{}{ + "group_expires": nil, + "previous_group_id": 0, + "group_id": user.PreviousGroupID, + }) + } +} + +// UpgradeGroup 升级用户组 +func (user *User) UpgradeGroup(id uint, expires *time.Time) error { + user.Group.ID = id + previousGroupID := user.GroupID + if user.PreviousGroupID != 0 && user.GroupID == id { + previousGroupID = user.PreviousGroupID + } + return DB.Model(&user).Updates(map[string]interface{}{ + "group_expires": expires, + "previous_group_id": previousGroupID, + "group_id": id, + }).Error +} diff --git a/models/user_authn.go b/models/user_authn.go new file mode 100644 index 0000000..ba329bf --- /dev/null +++ b/models/user_authn.go @@ -0,0 +1,79 @@ +package model + +import ( + "encoding/base64" + "encoding/binary" + "encoding/json" + "fmt" + "net/url" + + "github.com/cloudreve/Cloudreve/v3/pkg/hashid" + "github.com/duo-labs/webauthn/webauthn" +) + +/* + `webauthn.User` 接口的实现 +*/ + +// WebAuthnID 返回用户ID +func (user User) WebAuthnID() []byte { + bs := make([]byte, 8) + binary.LittleEndian.PutUint64(bs, uint64(user.ID)) + return bs +} + +// WebAuthnName 返回用户名 +func (user User) WebAuthnName() string { + return user.Email +} + +// WebAuthnDisplayName 获得用于展示的用户名 +func (user User) WebAuthnDisplayName() string { + return user.Nick +} + +// WebAuthnIcon 获得用户头像 +func (user User) WebAuthnIcon() string { + avatar, _ := url.Parse("/api/v3/user/avatar/" + hashid.HashID(user.ID, hashid.UserID) + "/l") + base := GetSiteURL() + base.Scheme = "https" + return base.ResolveReference(avatar).String() +} + +// WebAuthnCredentials 获得已注册的验证器凭证 +func (user User) WebAuthnCredentials() []webauthn.Credential { + var res []webauthn.Credential + err := json.Unmarshal([]byte(user.Authn), &res) + if err != nil { + fmt.Println(err) + } + return res +} + +// RegisterAuthn 添加新的验证器 +func (user *User) RegisterAuthn(credential *webauthn.Credential) error { + exists := user.WebAuthnCredentials() + exists = append(exists, *credential) + res, err := json.Marshal(exists) + if err != nil { + return err + } + + return DB.Model(user).Update("authn", string(res)).Error +} + +// RemoveAuthn 删除验证器 +func (user *User) RemoveAuthn(id string) { + exists := user.WebAuthnCredentials() + for i := 0; i < len(exists); i++ { + idEncoded := base64.StdEncoding.EncodeToString(exists[i].ID) + if idEncoded == id { + exists[len(exists)-1], exists[i] = exists[i], exists[len(exists)-1] + exists = exists[:len(exists)-1] + break + } + } + + res, _ := json.Marshal(exists) + DB.Model(user).Update("authn", string(res)) +} diff --git a/models/webdav.go b/models/webdav.go new file mode 100644 index 0000000..ee424aa --- /dev/null +++ b/models/webdav.go @@ -0,0 +1,53 @@ +package model + +import ( + "github.com/jinzhu/gorm" +) + +// Webdav 应用账户 +type Webdav struct { + gorm.Model + Name string // 应用名称 + Password string `gorm:"unique_index:password_only_on"` // 应用密码 + UserID uint `gorm:"unique_index:password_only_on"` // 用户ID + Root string `gorm:"type:text"` // 根目录 + Readonly bool `gorm:"type:bool"` // 是否只读 + UseProxy bool `gorm:"type:bool"` // 是否进行反代 +} + +// Create 创建账户 +func (webdav *Webdav) Create() (uint, error) { + if err := DB.Create(webdav).Error; err != nil { + return 0, err + } + return webdav.ID, nil +} + +// GetWebdavByPassword 根据密码和用户查找Webdav应用 +func GetWebdavByPassword(password string, uid uint) (*Webdav, error) { + webdav := &Webdav{} + res := DB.Where("user_id = ? and password = ?", uid, password).First(webdav) + return webdav, res.Error +} + +// ListWebDAVAccounts 列出用户的所有账号 +func ListWebDAVAccounts(uid uint) []Webdav { + var accounts []Webdav + DB.Where("user_id = ?", uid).Order("created_at desc").Find(&accounts) + return accounts +} + +// DeleteWebDAVAccountByID 根据账户ID和UID删除账户 +func DeleteWebDAVAccountByID(id, uid uint) { + DB.Where("user_id = ? and id = ?", uid, id).Delete(&Webdav{}) +} + +// UpdateWebDAVAccountByID 根据账户ID和UID更新账户 +func UpdateWebDAVAccountByID(id, uid uint, updates map[string]interface{}) { + DB.Model(&Webdav{Model: gorm.Model{ID: id}, UserID: uid}).Updates(updates) +} + +// UpdateWebDAVAccountReadonlyByID 根据账户ID和UID更新账户的只读性 +func UpdateWebDAVAccountReadonlyByID(id, uid uint, readonly bool) { + DB.Model(&Webdav{Model: gorm.Model{ID: id}, UserID: uid}).UpdateColumn("readonly", readonly) +} diff --git a/paksource.sh b/paksource.sh new file mode 100644 index 0000000..d626c7a --- /dev/null +++ b/paksource.sh @@ -0,0 +1,4 @@ +#!/usr/bin/bash + +cd ../ +zip -r cloudreveplus-source.zip ./PlusBackend/ -x './PlusBackend/assets/node_modules/*' --exclude '**/.git/**' diff --git a/pkg/aria2/aria2.go b/pkg/aria2/aria2.go new file mode 100644 index 0000000..f91766f --- /dev/null +++ b/pkg/aria2/aria2.go @@ -0,0 +1,67 @@ +package aria2 + +import ( + "context" + "fmt" + "net/url" + "sync" + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/common" + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/monitor" + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/rpc" + "github.com/cloudreve/Cloudreve/v3/pkg/balancer" + "github.com/cloudreve/Cloudreve/v3/pkg/cluster" + "github.com/cloudreve/Cloudreve/v3/pkg/mq" +) + +// Instance 默认使用的Aria2处理实例 +var Instance common.Aria2 = &common.DummyAria2{} + +// LB 获取 Aria2 节点的负载均衡器 +var LB balancer.Balancer + +// Lock Instance的读写锁 +var Lock sync.RWMutex + +// GetLoadBalancer 返回供Aria2使用的负载均衡器 +func GetLoadBalancer() balancer.Balancer { + Lock.RLock() + defer Lock.RUnlock() + return LB +} + +// Init 初始化 +func Init(isReload bool, pool cluster.Pool, mqClient mq.MQ) { + Lock.Lock() + LB = balancer.NewBalancer("RoundRobin") + Lock.Unlock() + + if !isReload { + // 从数据库中读取未完成任务,创建监控 + unfinished := model.GetDownloadsByStatus(common.Ready, common.Paused, common.Downloading, common.Seeding) + + for i := 0; i < len(unfinished); i++ { + // 创建任务监控 + monitor.NewMonitor(&unfinished[i], pool, mqClient) + } + } +} + +// TestRPCConnection 发送测试用的 RPC 请求,测试服务连通性 +func TestRPCConnection(server, secret string, timeout int) (rpc.VersionInfo, error) { + // 解析RPC服务地址 + rpcServer, err := url.Parse(server) + if err != nil { + return rpc.VersionInfo{}, fmt.Errorf("cannot parse RPC server: %w", err) + } + + rpcServer.Path = "/jsonrpc" + caller, err := rpc.New(context.Background(), rpcServer.String(), secret, time.Duration(timeout)*time.Second, nil) + if err != nil { + return rpc.VersionInfo{}, fmt.Errorf("cannot initialize rpc connection: %w", err) + } + + return caller.GetVersion() +} diff --git a/pkg/aria2/common/common.go b/pkg/aria2/common/common.go new file mode 100644 index 0000000..ae5e6b0 --- /dev/null +++ b/pkg/aria2/common/common.go @@ -0,0 +1,119 @@ +package common + +import ( + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/rpc" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" +) + +// Aria2 离线下载处理接口 +type Aria2 interface { + // Init 初始化客户端连接 + Init() error + // CreateTask 创建新的任务 + CreateTask(task *model.Download, options map[string]interface{}) (string, error) + // 返回状态信息 + Status(task *model.Download) (rpc.StatusInfo, error) + // 取消任务 + Cancel(task *model.Download) error + // 选择要下载的文件 + Select(task *model.Download, files []int) error + // 获取离线下载配置 + GetConfig() model.Aria2Option + // 删除临时下载文件 + DeleteTempFile(*model.Download) error +} + +const ( + // URLTask 从URL添加的任务 + URLTask = iota + // TorrentTask 种子任务 + TorrentTask +) + +const ( + // Ready 准备就绪 + Ready = iota + // Downloading 下载中 + Downloading + // Paused 暂停中 + Paused + // Error 出错 + Error + // Complete 完成 + Complete + // Canceled 取消/停止 + Canceled + // Unknown 未知状态 + Unknown + // Seeding 做种中 + Seeding +) + +var ( + // ErrNotEnabled 功能未开启错误 + ErrNotEnabled = serializer.NewError(serializer.CodeFeatureNotEnabled, "not enabled", nil) + // ErrUserNotFound 未找到下载任务创建者 + ErrUserNotFound = serializer.NewError(serializer.CodeUserNotFound, "", nil) +) + +// DummyAria2 未开启Aria2功能时使用的默认处理器 +type DummyAria2 struct { +} + +func (instance *DummyAria2) Init() error { + return nil +} + +// CreateTask 创建新任务,此处直接返回未开启错误 +func (instance *DummyAria2) CreateTask(model *model.Download, options map[string]interface{}) (string, error) { + return "", ErrNotEnabled +} + +// Status 返回未开启错误 +func (instance *DummyAria2) Status(task *model.Download) (rpc.StatusInfo, error) { + return rpc.StatusInfo{}, ErrNotEnabled +} + +// Cancel 返回未开启错误 +func (instance *DummyAria2) Cancel(task *model.Download) error { + return ErrNotEnabled +} + +// Select 返回未开启错误 +func (instance *DummyAria2) Select(task *model.Download, files []int) error { + return ErrNotEnabled +} + +// GetConfig 返回空的 +func (instance *DummyAria2) GetConfig() model.Aria2Option { + return model.Aria2Option{} +} + +// GetConfig 返回空的 +func (instance *DummyAria2) DeleteTempFile(src *model.Download) error { + return ErrNotEnabled +} + +// GetStatus 将给定的状态字符串转换为状态标识数字 +func GetStatus(status rpc.StatusInfo) int { + switch status.Status { + case "complete": + return Complete + case "active": + if status.BitTorrent.Mode != "" && status.CompletedLength == status.TotalLength { + return Seeding + } + return Downloading + case "waiting": + return Ready + case "paused": + return Paused + case "error": + return Error + case "removed": + return Canceled + default: + return Unknown + } +} diff --git a/pkg/aria2/monitor/monitor.go b/pkg/aria2/monitor/monitor.go new file mode 100644 index 0000000..ff3f380 --- /dev/null +++ b/pkg/aria2/monitor/monitor.go @@ -0,0 +1,320 @@ +package monitor + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "path/filepath" + "strconv" + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/common" + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/rpc" + "github.com/cloudreve/Cloudreve/v3/pkg/cluster" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/mq" + "github.com/cloudreve/Cloudreve/v3/pkg/task" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +// Monitor 离线下载状态监控 +type Monitor struct { + Task *model.Download + Interval time.Duration + + notifier <-chan mq.Message + node cluster.Node + retried int +} + +var MAX_RETRY = 10 + +// NewMonitor 新建离线下载状态监控 +func NewMonitor(task *model.Download, pool cluster.Pool, mqClient mq.MQ) { + monitor := &Monitor{ + Task: task, + notifier: make(chan mq.Message), + node: pool.GetNodeByID(task.GetNodeID()), + } + + if monitor.node != nil { + monitor.Interval = time.Duration(monitor.node.GetAria2Instance().GetConfig().Interval) * time.Second + go monitor.Loop(mqClient) + + monitor.notifier = mqClient.Subscribe(monitor.Task.GID, 0) + } else { + monitor.setErrorStatus(errors.New("node not avaliable")) + } +} + +// Loop 开启监控循环 +func (monitor *Monitor) Loop(mqClient mq.MQ) { + defer mqClient.Unsubscribe(monitor.Task.GID, monitor.notifier) + fmt.Println(cluster.Default) + + // 首次循环立即更新 + interval := 50 * time.Millisecond + + for { + select { + case <-monitor.notifier: + if monitor.Update() { + return + } + case <-time.After(interval): + interval = monitor.Interval + if monitor.Update() { + return + } + } + } +} + +// Update 更新状态,返回值表示是否退出监控 +func (monitor *Monitor) Update() bool { + status, err := monitor.node.GetAria2Instance().Status(monitor.Task) + + if err != nil { + monitor.retried++ + util.Log().Warning("Cannot get status of download task %q: %s", monitor.Task.GID, err) + + // 十次重试后认定为任务失败 + if monitor.retried > MAX_RETRY { + util.Log().Warning("Cannot get status of download task %q,exceed maximum retry threshold: %s", + monitor.Task.GID, err) + monitor.setErrorStatus(err) + monitor.RemoveTempFolder() + return true + } + + return false + } + monitor.retried = 0 + + // 磁力链下载需要跟随 + if len(status.FollowedBy) > 0 { + util.Log().Debug("Redirected download task from %q to %q.", monitor.Task.GID, status.FollowedBy[0]) + monitor.Task.GID = status.FollowedBy[0] + monitor.Task.Save() + return false + } + + // 更新任务信息 + if err := monitor.UpdateTaskInfo(status); err != nil { + util.Log().Warning("Failed to update status of download task %q: %s", monitor.Task.GID, err) + monitor.setErrorStatus(err) + monitor.RemoveTempFolder() + return true + } + + util.Log().Debug("Remote download %q status updated to %q.", status.Gid, status.Status) + + switch common.GetStatus(status) { + case common.Complete, common.Seeding: + return monitor.Complete(task.TaskPoll) + case common.Error: + return monitor.Error(status) + case common.Downloading, common.Ready, common.Paused: + return false + case common.Canceled: + monitor.Task.Status = common.Canceled + monitor.Task.Save() + monitor.RemoveTempFolder() + return true + default: + util.Log().Warning("Download task %q returns unknown status %q.", monitor.Task.GID, status.Status) + return true + } +} + +// UpdateTaskInfo 更新数据库中的任务信息 +func (monitor *Monitor) UpdateTaskInfo(status rpc.StatusInfo) error { + originSize := monitor.Task.TotalSize + + monitor.Task.GID = status.Gid + monitor.Task.Status = common.GetStatus(status) + + // 文件大小、已下载大小 + total, err := strconv.ParseUint(status.TotalLength, 10, 64) + if err != nil { + total = 0 + } + downloaded, err := strconv.ParseUint(status.CompletedLength, 10, 64) + if err != nil { + downloaded = 0 + } + monitor.Task.TotalSize = total + monitor.Task.DownloadedSize = downloaded + monitor.Task.GID = status.Gid + monitor.Task.Parent = status.Dir + + // 下载速度 + speed, err := strconv.Atoi(status.DownloadSpeed) + if err != nil { + speed = 0 + } + + monitor.Task.Speed = speed + attrs, _ := json.Marshal(status) + monitor.Task.Attrs = string(attrs) + + if err := monitor.Task.Save(); err != nil { + return err + } + + if originSize != monitor.Task.TotalSize { + // 文件大小更新后,对文件限制等进行校验 + if err := monitor.ValidateFile(); err != nil { + // 验证失败时取消任务 + monitor.node.GetAria2Instance().Cancel(monitor.Task) + return err + } + } + + return nil +} + +// ValidateFile 上传过程中校验文件大小、文件名 +func (monitor *Monitor) ValidateFile() error { + // 找到任务创建者 + user := monitor.Task.GetOwner() + if user == nil { + return common.ErrUserNotFound + } + + // 创建文件系统 + fs, err := filesystem.NewFileSystem(user) + if err != nil { + return err + } + defer fs.Recycle() + + if err := fs.SetPolicyFromPath(monitor.Task.Dst); err != nil { + return fmt.Errorf("failed to switch policy to target dir: %w", err) + } + + // 创建上下文环境 + file := &fsctx.FileStream{ + Size: monitor.Task.TotalSize, + } + + // 验证用户容量 + if err := filesystem.HookValidateCapacity(context.Background(), fs, file); err != nil { + return err + } + + // 验证每个文件 + for _, fileInfo := range monitor.Task.StatusInfo.Files { + if fileInfo.Selected == "true" { + // 创建上下文环境 + fileSize, _ := strconv.ParseUint(fileInfo.Length, 10, 64) + file := &fsctx.FileStream{ + Size: fileSize, + Name: filepath.Base(fileInfo.Path), + } + if err := filesystem.HookValidateFile(context.Background(), fs, file); err != nil { + return err + } + } + + } + + return nil +} + +// Error 任务下载出错处理,返回是否中断监控 +func (monitor *Monitor) Error(status rpc.StatusInfo) bool { + monitor.setErrorStatus(errors.New(status.ErrorMessage)) + + // 清理临时文件 + monitor.RemoveTempFolder() + + return true +} + +// RemoveTempFolder 清理下载临时目录 +func (monitor *Monitor) RemoveTempFolder() { + monitor.node.GetAria2Instance().DeleteTempFile(monitor.Task) +} + +// Complete 完成下载,返回是否中断监控 +func (monitor *Monitor) Complete(pool task.Pool) bool { + // 未开始转存,提交转存任务 + if monitor.Task.TaskID == 0 { + return monitor.transfer(pool) + } + + // 做种完成 + if common.GetStatus(monitor.Task.StatusInfo) == common.Complete { + transferTask, err := model.GetTasksByID(monitor.Task.TaskID) + if err != nil { + monitor.setErrorStatus(err) + monitor.RemoveTempFolder() + return true + } + + // 转存完成,回收下载目录 + if transferTask.Type == task.TransferTaskType && transferTask.Status >= task.Error { + job, err := task.NewRecycleTask(monitor.Task) + if err != nil { + monitor.setErrorStatus(err) + monitor.RemoveTempFolder() + return true + } + + // 提交回收任务 + pool.Submit(job) + + return true + } + } + + return false +} + +func (monitor *Monitor) transfer(pool task.Pool) bool { + // 创建中转任务 + file := make([]string, 0, len(monitor.Task.StatusInfo.Files)) + sizes := make(map[string]uint64, len(monitor.Task.StatusInfo.Files)) + for i := 0; i < len(monitor.Task.StatusInfo.Files); i++ { + fileInfo := monitor.Task.StatusInfo.Files[i] + if fileInfo.Selected == "true" { + file = append(file, fileInfo.Path) + size, _ := strconv.ParseUint(fileInfo.Length, 10, 64) + sizes[fileInfo.Path] = size + } + } + + job, err := task.NewTransferTask( + monitor.Task.UserID, + file, + monitor.Task.Dst, + monitor.Task.Parent, + true, + monitor.node.ID(), + sizes, + ) + if err != nil { + monitor.setErrorStatus(err) + monitor.RemoveTempFolder() + return true + } + + // 提交中转任务 + pool.Submit(job) + + // 更新任务ID + monitor.Task.TaskID = job.Model().ID + monitor.Task.Save() + + return false +} + +func (monitor *Monitor) setErrorStatus(err error) { + monitor.Task.Status = common.Error + monitor.Task.Error = err.Error() + monitor.Task.Save() +} diff --git a/pkg/aria2/rpc/README.md b/pkg/aria2/rpc/README.md new file mode 100644 index 0000000..ba5d2fd --- /dev/null +++ b/pkg/aria2/rpc/README.md @@ -0,0 +1,257 @@ +# PACKAGE DOCUMENTATION + +**package rpc** + + import "github.com/matzoe/argo/rpc" + + + +## FUNCTIONS + +``` +func Call(address, method string, params, reply interface{}) error +``` + +## TYPES + +``` +type Client struct { + // contains filtered or unexported fields +} +``` + +``` +func New(uri string) *Client +``` + +``` +func (id *Client) AddMetalink(uri string, options ...interface{}) (gid string, err error) +``` +`aria2.addMetalink(metalink[, options[, position]])` This method adds Metalink download by uploading ".metalink" file. `metalink` is of type base64 which contains Base64-encoded ".metalink" file. `options` is of type struct and its members are a pair of option name and value. See Options below for more details. If `position` is given as an integer starting from 0, the new download is inserted at `position` in the +waiting queue. If `position` is not given or `position` is larger than the size of the queue, it is appended at the end of the queue. This method returns array of GID of registered download. If `--rpc-save-upload-metadata` is true, the uploaded data is saved as a file named hex string of SHA-1 hash of data plus ".metalink" in the directory specified by `--dir` option. The example of filename is 0a3893293e27ac0490424c06de4d09242215f0a6.metalink. If same file already exists, it is overwritten. If the file cannot be saved successfully or `--rpc-save-upload-metadata` is false, the downloads added by this method are not saved by `--save-session`. + +``` +func (id *Client) AddTorrent(filename string, options ...interface{}) (gid string, err error) +``` +`aria2.addTorrent(torrent[, uris[, options[, position]]])` This method adds BitTorrent download by uploading ".torrent" file. If you want to add BitTorrent Magnet URI, use `aria2.addUri()` method instead. torrent is of type base64 which contains Base64-encoded ".torrent" file. `uris` is of type array and its element is URI which is of type string. `uris` is used for Web-seeding. For single file torrents, URI can be a complete URI pointing to the resource or if URI ends with /, name in torrent file is added. For multi-file torrents, name and path in torrent are added to form a URI for each file. options is of type struct and its members are +a pair of option name and value. See Options below for more details. If `position` is given as an integer starting from 0, the new download is inserted at `position` in the waiting queue. If `position` is not given or `position` is larger than the size of the queue, it is appended at the end of the queue. This method returns GID of registered download. If `--rpc-save-upload-metadata` is true, the uploaded data is saved as a file named hex string of SHA-1 hash of data plus ".torrent" in the +directory specified by `--dir` option. The example of filename is 0a3893293e27ac0490424c06de4d09242215f0a6.torrent. If same file already exists, it is overwritten. If the file cannot be saved successfully or `--rpc-save-upload-metadata` is false, the downloads added by this method are not saved by -`-save-session`. + +``` +func (id *Client) AddUri(uri string, options ...interface{}) (gid string, err error) +``` + +`aria2.addUri(uris[, options[, position]])` This method adds new HTTP(S)/FTP/BitTorrent Magnet URI. `uris` is of type array and its element is URI which is of type string. For BitTorrent Magnet URI, `uris` must have only one element and it should be BitTorrent Magnet URI. URIs in uris must point to the same file. If you mix other URIs which point to another file, aria2 does not complain but download may +fail. `options` is of type struct and its members are a pair of option name and value. See Options below for more details. If `position` is given as an integer starting from 0, the new download is inserted at position in the waiting queue. If `position` is not given or `position` is larger than the size of the queue, it is appended at the end of the queue. This method returns GID of registered download. + +``` +func (id *Client) ChangeGlobalOption(options map[string]interface{}) (g string, err error) +``` + +`aria2.changeGlobalOption(options)` This method changes global options dynamically. `options` is of type struct. The following `options` are available: + + download-result + log + log-level + max-concurrent-downloads + max-download-result + max-overall-download-limit + max-overall-upload-limit + save-cookies + save-session + server-stat-of + +In addition to them, options listed in Input File subsection are available, except for following options: `checksum`, `index-out`, `out`, `pause` and `select-file`. Using `log` option, you can dynamically start logging or change log file. To stop logging, give empty string("") as a parameter value. Note that log file is always opened in append mode. This method returns OK for success. + +``` +func (id *Client) ChangeOption(gid string, options map[string]interface{}) (g string, err error) +``` + +`aria2.changeOption(gid, options)` This method changes options of the download denoted by `gid` dynamically. `gid` is of type string. `options` is of type struct. The following `options` are available for active downloads: + + bt-max-peers + bt-request-peer-speed-limit + bt-remove-unselected-file + force-save + max-download-limit + max-upload-limit + +For waiting or paused downloads, in addition to the above options, options listed in Input File subsection are available, except for following options: dry-run, metalink-base-uri, parameterized-uri, pause, piece-length and rpc-save-upload-metadata option. This method returns OK for success. + +``` +func (id *Client) ChangePosition(gid string, pos int, how string) (p int, err error) +``` + +`aria2.changePosition(gid, pos, how)` This method changes the position of the download denoted by `gid`. `pos` is of type integer. `how` is of type string. If `how` is `POS_SET`, it moves the download to a position relative to the beginning of the queue. If `how` is `POS_CUR`, it moves the download to a position relative to the current position. If `how` is `POS_END`, it moves the download to a position relative to the end of the queue. If the destination position is less than 0 or beyond the end +of the queue, it moves the download to the beginning or the end of the queue respectively. The response is of type integer and it is the destination position. + +``` +func (id *Client) ChangeUri(gid string, fileindex int, delUris []string, addUris []string, position ...int) (p []int, err error) +``` + +`aria2.changeUri(gid, fileIndex, delUris, addUris[, position])` This method removes URIs in `delUris` from and appends URIs in `addUris` to download denoted by gid. `delUris` and `addUris` are list of string. A download can contain multiple files and URIs are attached to each file. `fileIndex` is used to select which file to remove/attach given URIs. `fileIndex` is 1-based. `position` is used to specify where URIs are inserted in the existing waiting URI list. `position` is 0-based. When +`position` is omitted, URIs are appended to the back of the list. This method first execute removal and then addition. `position` is the `position` after URIs are removed, not the `position` when this method is called. When removing URI, if same URIs exist in download, only one of them is removed for each URI in delUris. In other words, there are three URIs http://example.org/aria2 and you want remove them all, you +have to specify (at least) 3 http://example.org/aria2 in delUris. This method returns a list which contains 2 integers. The first integer is the number of URIs deleted. The second integer is the number of URIs added. + +``` +func (id *Client) ForcePause(gid string) (g string, err error) +``` + +`aria2.forcePause(pid)` This method pauses the download denoted by `gid`. This method behaves just like aria2.pause() except that this method pauses download without any action which takes time such as contacting BitTorrent tracker. + +``` +func (id *Client) ForcePauseAll() (g string, err error) +``` + +`aria2.forcePauseAll()` This method is equal to calling `aria2.forcePause()` for every active/waiting download. This methods returns OK for success. + +``` +func (id *Client) ForceRemove(gid string) (g string, err error) +``` + +`aria2.forceRemove(gid)` This method removes the download denoted by `gid`. This method behaves just like aria2.remove() except that this method removes download without any action which takes time such as contacting BitTorrent tracker. + +``` +func (id *Client) ForceShutdown() (g string, err error) +``` + +`aria2.forceShutdown()` This method shutdowns aria2. This method behaves like `aria2.shutdown()` except that any actions which takes time such as contacting BitTorrent tracker are skipped. This method returns OK. + +``` +func (id *Client) GetFiles(gid string) (m map[string]interface{}, err error) +``` + +`aria2.getFiles(gid)` This method returns file list of the download denoted by `gid`. `gid` is of type string. + +``` +func (id *Client) GetGlobalOption() (m map[string]interface{}, err error) +``` + +`aria2.getGlobalOption()` This method returns global options. The response is of type struct. Its key is the name of option. The value type is string. Note that this method does not return options which have no default value and have not been set by the command-line options, configuration files or RPC methods. Because global options are used as a template for the options of newly added download, the response contains +keys returned by `aria2.getOption()` method. + +``` +func (id *Client) GetGlobalStat() (m map[string]interface{}, err error) +``` + +`aria2.getGlobalStat()` This method returns global statistics such as overall download and upload speed. + +``` +func (id *Client) GetOption(gid string) (m map[string]interface{}, err error) +``` + +`aria2.getOption(gid)` This method returns options of the download denoted by `gid`. The response is of type struct. Its key is the name of option. The value type is string. Note that this method does not return options which have no default value and have not been set by the command-line options, configuration files or RPC methods. + +``` +func (id *Client) GetPeers(gid string) (m []map[string]interface{}, err error) +``` + +`aria2.getPeers(gid)` This method returns peer list of the download denoted by `gid`. `gid` is of type string. This method is for BitTorrent only. + +``` +func (id *Client) GetServers(gid string) (m []map[string]interface{}, err error) +``` + +`aria2.getServers(gid)` This method returns currently connected HTTP(S)/FTP servers of the download denoted by `gid`. `gid` is of type string. + +``` +func (id *Client) GetSessionInfo() (m map[string]interface{}, err error) +``` + +`aria2.getSessionInfo()` This method returns session information. + +``` +func (id *Client) GetUris(gid string) (m map[string]interface{}, err error) +``` + +`aria2.getUris(gid)` This method returns URIs used in the download denoted by `gid`. `gid` is of type string. + +``` +func (id *Client) GetVersion() (m map[string]interface{}, err error) +``` + +`aria2.getVersion()` This method returns version of the program and the list of enabled features. + +``` +func (id *Client) Multicall(methods []map[string]interface{}) (r []interface{}, err error) +``` + +`system.multicall(methods)` This method encapsulates multiple method calls in a single request. `methods` is of type array and its element is struct. The struct contains two keys: `methodName` and `params`. `methodName` is the method name to call and `params` is array containing parameters to the method. This method returns array of responses. The element of array will either be a one-item array containing the return value of each method call or struct of fault element if an encapsulated method call fails. + +``` +func (id *Client) Pause(gid string) (g string, err error) +``` + +`aria2.pause(gid)` This method pauses the download denoted by `gid`. `gid` is of type string. The status of paused download becomes paused. If the download is active, the download is placed on the first position of waiting queue. As long as the status is paused, the download is not started. To change status to waiting, use `aria2.unpause()` method. This method returns GID of paused download. + +``` +func (id *Client) PauseAll() (g string, err error) +``` + +`aria2.pauseAll()` This method is equal to calling `aria2.pause()` for every active/waiting download. This methods returns OK for success. + +``` +func (id *Client) PurgeDowloadResult() (g string, err error) +``` + +`aria2.purgeDownloadResult()` This method purges completed/error/removed downloads to free memory. This method returns OK. + +``` +func (id *Client) Remove(gid string) (g string, err error) +``` + +`aria2.remove(gid)` This method removes the download denoted by gid. `gid` is of type string. If specified download is in progress, it is stopped at first. The status of removed download becomes removed. This method returns GID of removed download. + +``` +func (id *Client) RemoveDownloadResult(gid string) (g string, err error) +``` + +`aria2.removeDownloadResult(gid)` This method removes completed/error/removed download denoted by `gid` from memory. This method returns OK for success. + +``` +func (id *Client) Shutdown() (g string, err error) +``` + +`aria2.shutdown()` This method shutdowns aria2. This method returns OK. + +``` +func (id *Client) TellActive(keys ...string) (m []map[string]interface{}, err error) +``` + +`aria2.tellActive([keys])` This method returns the list of active downloads. The response is of type array and its element is the same struct returned by `aria2.tellStatus()` method. For `keys` parameter, please refer to `aria2.tellStatus()` method. + +``` +func (id *Client) TellStatus(gid string, keys ...string) (m map[string]interface{}, err error) +``` + +`aria2.tellStatus(gid[, keys])` This method returns download progress of the download denoted by `gid`. `gid` is of type string. `keys` is array of string. If it is specified, the response contains only keys in `keys` array. If `keys` is empty or not specified, the response contains all keys. This is useful when you just want specific keys and avoid unnecessary transfers. For example, `aria2.tellStatus("2089b05ecca3d829", ["gid", "status"])` returns `gid` and `status` key. + +``` +func (id *Client) TellStopped(offset, num int, keys ...string) (m []map[string]interface{}, err error) +``` + +`aria2.tellStopped(offset, num[, keys])` This method returns the list of stopped download. `offset` is of type integer and specifies the `offset` from the oldest download. `num` is of type integer and specifies the number of downloads to be returned. For keys parameter, please refer to `aria2.tellStatus()` method. `offset` and `num` have the same semantics as `aria2.tellWaiting()` method. The response is of type array and its element is the same struct returned by `aria2.tellStatus()` method. + +``` +func (id *Client) TellWaiting(offset, num int, keys ...string) (m []map[string]interface{}, err error) +``` +`aria2.tellWaiting(offset, num[, keys])` This method returns the list of waiting download, including paused downloads. `offset` is of type integer and specifies the `offset` from the download waiting at the front. num is of type integer and specifies the number of downloads to be returned. For keys parameter, please refer to aria2.tellStatus() method. If `offset` is a positive integer, this method returns downloads +in the range of `[offset, offset + num)`. `offset` can be a negative integer. `offset == -1` points last download in the waiting queue and `offset == -2` points the download before the last download, and so on. The downloads in the response are in reversed order. For example, imagine that three downloads "A","B" and "C" are waiting in this order. + + aria2.tellWaiting(0, 1) returns ["A"]. + aria2.tellWaiting(1, 2) returns ["B", "C"]. + aria2.tellWaiting(-1, 2) returns ["C", "B"]. + +The response is of type array and its element is the same struct returned by `aria2.tellStatus()` method. + +``` +func (id *Client) Unpause(gid string) (g string, err error) +``` + +`aria2.unpause(gid)` This method changes the status of the download denoted by `gid` from paused to waiting. This makes the download eligible to restart. `gid` is of type string. This method returns GID of unpaused download. + +``` +func (id *Client) UnpauseAll() (g string, err error) +``` + +`aria2.unpauseAll()` This method is equal to calling `aria2.unpause()` for every active/waiting download. This methods returns OK for success. diff --git a/pkg/aria2/rpc/call.go b/pkg/aria2/rpc/call.go new file mode 100644 index 0000000..11cb137 --- /dev/null +++ b/pkg/aria2/rpc/call.go @@ -0,0 +1,274 @@ +package rpc + +import ( + "context" + "errors" + "log" + "net" + "net/http" + "net/url" + "sync" + "sync/atomic" + "time" + + "github.com/gorilla/websocket" +) + +type caller interface { + // Call sends a request of rpc to aria2 daemon + Call(method string, params, reply interface{}) (err error) + Close() error +} + +type httpCaller struct { + uri string + c *http.Client + cancel context.CancelFunc + wg *sync.WaitGroup + once sync.Once +} + +func newHTTPCaller(ctx context.Context, u *url.URL, timeout time.Duration, notifer Notifier) *httpCaller { + c := &http.Client{ + Transport: &http.Transport{ + MaxIdleConnsPerHost: 1, + MaxConnsPerHost: 1, + // TLSClientConfig: tlsConfig, + Dial: (&net.Dialer{ + Timeout: timeout, + KeepAlive: 60 * time.Second, + }).Dial, + TLSHandshakeTimeout: 3 * time.Second, + ResponseHeaderTimeout: timeout, + }, + } + var wg sync.WaitGroup + ctx, cancel := context.WithCancel(ctx) + h := &httpCaller{uri: u.String(), c: c, cancel: cancel, wg: &wg} + if notifer != nil { + h.setNotifier(ctx, *u, notifer) + } + return h +} + +func (h *httpCaller) Close() (err error) { + h.once.Do(func() { + h.cancel() + h.wg.Wait() + }) + return +} + +func (h *httpCaller) setNotifier(ctx context.Context, u url.URL, notifer Notifier) (err error) { + u.Scheme = "ws" + conn, _, err := websocket.DefaultDialer.Dial(u.String(), nil) + if err != nil { + return + } + h.wg.Add(1) + go func() { + defer h.wg.Done() + defer conn.Close() + select { + case <-ctx.Done(): + conn.SetWriteDeadline(time.Now().Add(time.Second)) + if err := conn.WriteMessage(websocket.CloseMessage, + websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")); err != nil { + log.Printf("sending websocket close message: %v", err) + } + return + } + }() + h.wg.Add(1) + go func() { + defer h.wg.Done() + var request websocketResponse + var err error + for { + select { + case <-ctx.Done(): + return + default: + } + if err = conn.ReadJSON(&request); err != nil { + select { + case <-ctx.Done(): + return + default: + } + log.Printf("conn.ReadJSON|err:%v", err.Error()) + return + } + switch request.Method { + case "aria2.onDownloadStart": + notifer.OnDownloadStart(request.Params) + case "aria2.onDownloadPause": + notifer.OnDownloadPause(request.Params) + case "aria2.onDownloadStop": + notifer.OnDownloadStop(request.Params) + case "aria2.onDownloadComplete": + notifer.OnDownloadComplete(request.Params) + case "aria2.onDownloadError": + notifer.OnDownloadError(request.Params) + case "aria2.onBtDownloadComplete": + notifer.OnBtDownloadComplete(request.Params) + default: + log.Printf("unexpected notification: %s", request.Method) + } + } + }() + return +} + +func (h httpCaller) Call(method string, params, reply interface{}) (err error) { + payload, err := EncodeClientRequest(method, params) + if err != nil { + return + } + r, err := h.c.Post(h.uri, "application/json", payload) + if err != nil { + return + } + err = DecodeClientResponse(r.Body, &reply) + r.Body.Close() + return +} + +type websocketCaller struct { + conn *websocket.Conn + sendChan chan *sendRequest + cancel context.CancelFunc + wg *sync.WaitGroup + once sync.Once + timeout time.Duration +} + +func newWebsocketCaller(ctx context.Context, uri string, timeout time.Duration, notifier Notifier) (*websocketCaller, error) { + var header = http.Header{} + conn, _, err := websocket.DefaultDialer.Dial(uri, header) + if err != nil { + return nil, err + } + + sendChan := make(chan *sendRequest, 16) + var wg sync.WaitGroup + ctx, cancel := context.WithCancel(ctx) + w := &websocketCaller{conn: conn, wg: &wg, cancel: cancel, sendChan: sendChan, timeout: timeout} + processor := NewResponseProcessor() + wg.Add(1) + go func() { // routine:recv + defer wg.Done() + defer cancel() + for { + select { + case <-ctx.Done(): + return + default: + } + var resp websocketResponse + if err := conn.ReadJSON(&resp); err != nil { + select { + case <-ctx.Done(): + return + default: + } + log.Printf("conn.ReadJSON|err:%v", err.Error()) + return + } + if resp.Id == nil { // RPC notifications + if notifier != nil { + switch resp.Method { + case "aria2.onDownloadStart": + notifier.OnDownloadStart(resp.Params) + case "aria2.onDownloadPause": + notifier.OnDownloadPause(resp.Params) + case "aria2.onDownloadStop": + notifier.OnDownloadStop(resp.Params) + case "aria2.onDownloadComplete": + notifier.OnDownloadComplete(resp.Params) + case "aria2.onDownloadError": + notifier.OnDownloadError(resp.Params) + case "aria2.onBtDownloadComplete": + notifier.OnBtDownloadComplete(resp.Params) + default: + log.Printf("unexpected notification: %s", resp.Method) + } + } + continue + } + processor.Process(resp.clientResponse) + } + }() + wg.Add(1) + go func() { // routine:send + defer wg.Done() + defer cancel() + defer w.conn.Close() + + for { + select { + case <-ctx.Done(): + if err := w.conn.WriteMessage(websocket.CloseMessage, + websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")); err != nil { + log.Printf("sending websocket close message: %v", err) + } + return + case req := <-sendChan: + processor.Add(req.request.Id, func(resp clientResponse) error { + err := resp.decode(req.reply) + req.cancel() + return err + }) + w.conn.SetWriteDeadline(time.Now().Add(timeout)) + w.conn.WriteJSON(req.request) + } + } + }() + + return w, nil +} + +func (w *websocketCaller) Close() (err error) { + w.once.Do(func() { + w.cancel() + w.wg.Wait() + }) + return +} + +func (w websocketCaller) Call(method string, params, reply interface{}) (err error) { + ctx, cancel := context.WithTimeout(context.Background(), w.timeout) + defer cancel() + select { + case w.sendChan <- &sendRequest{cancel: cancel, request: &clientRequest{ + Version: "2.0", + Method: method, + Params: params, + Id: reqid(), + }, reply: reply}: + + default: + return errors.New("sending channel blocking") + } + + select { + case <-ctx.Done(): + if err := ctx.Err(); err == context.DeadlineExceeded { + return err + } + } + return +} + +type sendRequest struct { + cancel context.CancelFunc + request *clientRequest + reply interface{} +} + +var reqid = func() func() uint64 { + var id = uint64(time.Now().UnixNano()) + return func() uint64 { + return atomic.AddUint64(&id, 1) + } +}() diff --git a/pkg/aria2/rpc/client.go b/pkg/aria2/rpc/client.go new file mode 100644 index 0000000..adb9e39 --- /dev/null +++ b/pkg/aria2/rpc/client.go @@ -0,0 +1,656 @@ +package rpc + +import ( + "context" + "encoding/base64" + "errors" + "io/ioutil" + "net/url" + "time" +) + +// Option is a container for specifying Call parameters and returning results +type Option map[string]interface{} + +type Client interface { + Protocol + Close() error +} + +type client struct { + caller + url *url.URL + token string +} + +var ( + errInvalidParameter = errors.New("invalid parameter") + errNotImplemented = errors.New("not implemented") + errConnTimeout = errors.New("connect to aria2 daemon timeout") +) + +// New returns an instance of Client +func New(ctx context.Context, uri string, token string, timeout time.Duration, notifier Notifier) (Client, error) { + u, err := url.Parse(uri) + if err != nil { + return nil, err + } + var caller caller + switch u.Scheme { + case "http", "https": + caller = newHTTPCaller(ctx, u, timeout, notifier) + case "ws", "wss": + caller, err = newWebsocketCaller(ctx, u.String(), timeout, notifier) + if err != nil { + return nil, err + } + default: + return nil, errInvalidParameter + } + c := &client{caller: caller, url: u, token: token} + return c, nil +} + +// `aria2.addUri([secret, ]uris[, options[, position]])` +// This method adds a new download. uris is an array of HTTP/FTP/SFTP/BitTorrent URIs (strings) pointing to the same resource. +// If you mix URIs pointing to different resources, then the download may fail or be corrupted without aria2 complaining. +// When adding BitTorrent Magnet URIs, uris must have only one element and it should be BitTorrent Magnet URI. +// options is a struct and its members are pairs of option name and value. +// If position is given, it must be an integer starting from 0. +// The new download will be inserted at position in the waiting queue. +// If position is omitted or position is larger than the current size of the queue, the new download is appended to the end of the queue. +// This method returns the GID of the newly registered download. +func (c *client) AddURI(uri string, options ...interface{}) (gid string, err error) { + params := make([]interface{}, 0, 2) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, []string{uri}) + if options != nil { + params = append(params, options...) + } + err = c.Call(aria2AddURI, params, &gid) + return +} + +// `aria2.addTorrent([secret, ]torrent[, uris[, options[, position]]])` +// This method adds a BitTorrent download by uploading a ".torrent" file. +// If you want to add a BitTorrent Magnet URI, use the aria2.addUri() method instead. +// torrent must be a base64-encoded string containing the contents of the ".torrent" file. +// uris is an array of URIs (string). uris is used for Web-seeding. +// For single file torrents, the URI can be a complete URI pointing to the resource; if URI ends with /, name in torrent file is added. +// For multi-file torrents, name and path in torrent are added to form a URI for each file. options is a struct and its members are pairs of option name and value. +// If position is given, it must be an integer starting from 0. +// The new download will be inserted at position in the waiting queue. +// If position is omitted or position is larger than the current size of the queue, the new download is appended to the end of the queue. +// This method returns the GID of the newly registered download. +// If --rpc-save-upload-metadata is true, the uploaded data is saved as a file named as the hex string of SHA-1 hash of data plus ".torrent" in the directory specified by --dir option. +// E.g. a file name might be 0a3893293e27ac0490424c06de4d09242215f0a6.torrent. +// If a file with the same name already exists, it is overwritten! +// If the file cannot be saved successfully or --rpc-save-upload-metadata is false, the downloads added by this method are not saved by --save-session. +func (c *client) AddTorrent(filename string, options ...interface{}) (gid string, err error) { + co, err := ioutil.ReadFile(filename) + if err != nil { + return + } + file := base64.StdEncoding.EncodeToString(co) + params := make([]interface{}, 0, 2) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, file) + if options != nil { + params = append(params, options...) + } + err = c.Call(aria2AddTorrent, params, &gid) + return +} + +// `aria2.addMetalink([secret, ]metalink[, options[, position]])` +// This method adds a Metalink download by uploading a ".metalink" file. +// metalink is a base64-encoded string which contains the contents of the ".metalink" file. +// options is a struct and its members are pairs of option name and value. +// If position is given, it must be an integer starting from 0. +// The new download will be inserted at position in the waiting queue. +// If position is omitted or position is larger than the current size of the queue, the new download is appended to the end of the queue. +// This method returns an array of GIDs of newly registered downloads. +// If --rpc-save-upload-metadata is true, the uploaded data is saved as a file named hex string of SHA-1 hash of data plus ".metalink" in the directory specified by --dir option. +// E.g. a file name might be 0a3893293e27ac0490424c06de4d09242215f0a6.metalink. +// If a file with the same name already exists, it is overwritten! +// If the file cannot be saved successfully or --rpc-save-upload-metadata is false, the downloads added by this method are not saved by --save-session. +func (c *client) AddMetalink(filename string, options ...interface{}) (gid []string, err error) { + co, err := ioutil.ReadFile(filename) + if err != nil { + return + } + file := base64.StdEncoding.EncodeToString(co) + params := make([]interface{}, 0, 2) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, file) + if options != nil { + params = append(params, options...) + } + err = c.Call(aria2AddMetalink, params, &gid) + return +} + +// `aria2.remove([secret, ]gid)` +// This method removes the download denoted by gid (string). +// If the specified download is in progress, it is first stopped. +// The status of the removed download becomes removed. +// This method returns GID of removed download. +func (c *client) Remove(gid string) (g string, err error) { + params := make([]interface{}, 0, 2) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, gid) + err = c.Call(aria2Remove, params, &g) + return +} + +// `aria2.forceRemove([secret, ]gid)` +// This method removes the download denoted by gid. +// This method behaves just like aria2.remove() except that this method removes the download without performing any actions which take time, such as contacting BitTorrent trackers to unregister the download first. +func (c *client) ForceRemove(gid string) (g string, err error) { + params := make([]interface{}, 0, 2) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, gid) + err = c.Call(aria2ForceRemove, params, &g) + return +} + +// `aria2.pause([secret, ]gid)` +// This method pauses the download denoted by gid (string). +// The status of paused download becomes paused. +// If the download was active, the download is placed in the front of waiting queue. +// While the status is paused, the download is not started. +// To change status to waiting, use the aria2.unpause() method. +// This method returns GID of paused download. +func (c *client) Pause(gid string) (g string, err error) { + params := make([]interface{}, 0, 2) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, gid) + err = c.Call(aria2Pause, params, &g) + return +} + +// `aria2.pauseAll([secret])` +// This method is equal to calling aria2.pause() for every active/waiting download. +// This methods returns OK. +func (c *client) PauseAll() (ok string, err error) { + params := []string{} + if c.token != "" { + params = append(params, "token:"+c.token) + } + err = c.Call(aria2PauseAll, params, &ok) + return +} + +// `aria2.forcePause([secret, ]gid)` +// This method pauses the download denoted by gid. +// This method behaves just like aria2.pause() except that this method pauses downloads without performing any actions which take time, such as contacting BitTorrent trackers to unregister the download first. +func (c *client) ForcePause(gid string) (g string, err error) { + params := make([]interface{}, 0, 2) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, gid) + err = c.Call(aria2ForcePause, params, &g) + return +} + +// `aria2.forcePauseAll([secret])` +// This method is equal to calling aria2.forcePause() for every active/waiting download. +// This methods returns OK. +func (c *client) ForcePauseAll() (ok string, err error) { + params := []string{} + if c.token != "" { + params = append(params, "token:"+c.token) + } + err = c.Call(aria2ForcePauseAll, params, &ok) + return +} + +// `aria2.unpause([secret, ]gid)` +// This method changes the status of the download denoted by gid (string) from paused to waiting, making the download eligible to be restarted. +// This method returns the GID of the unpaused download. +func (c *client) Unpause(gid string) (g string, err error) { + params := make([]interface{}, 0, 2) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, gid) + err = c.Call(aria2Unpause, params, &g) + return +} + +// `aria2.unpauseAll([secret])` +// This method is equal to calling aria2.unpause() for every active/waiting download. +// This methods returns OK. +func (c *client) UnpauseAll() (ok string, err error) { + params := []string{} + if c.token != "" { + params = append(params, "token:"+c.token) + } + err = c.Call(aria2UnpauseAll, params, &ok) + return +} + +// `aria2.tellStatus([secret, ]gid[, keys])` +// This method returns the progress of the download denoted by gid (string). +// keys is an array of strings. +// If specified, the response contains only keys in the keys array. +// If keys is empty or omitted, the response contains all keys. +// This is useful when you just want specific keys and avoid unnecessary transfers. +// For example, aria2.tellStatus("2089b05ecca3d829", ["gid", "status"]) returns the gid and status keys only. +// The response is a struct and contains following keys. Values are strings. +// https://aria2.github.io/manual/en/html/aria2c.html#aria2.tellStatus +func (c *client) TellStatus(gid string, keys ...string) (info StatusInfo, err error) { + params := make([]interface{}, 0, 2) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, gid) + if keys != nil { + params = append(params, keys) + } + err = c.Call(aria2TellStatus, params, &info) + return +} + +// `aria2.getUris([secret, ]gid)` +// This method returns the URIs used in the download denoted by gid (string). +// The response is an array of structs and it contains following keys. Values are string. +// uri URI +// status 'used' if the URI is in use. 'waiting' if the URI is still waiting in the queue. +func (c *client) GetURIs(gid string) (infos []URIInfo, err error) { + params := make([]interface{}, 0, 2) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, gid) + err = c.Call(aria2GetURIs, params, &infos) + return +} + +// `aria2.getFiles([secret, ]gid)` +// This method returns the file list of the download denoted by gid (string). +// The response is an array of structs which contain following keys. Values are strings. +// https://aria2.github.io/manual/en/html/aria2c.html#aria2.getFiles +func (c *client) GetFiles(gid string) (infos []FileInfo, err error) { + params := make([]interface{}, 0, 2) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, gid) + err = c.Call(aria2GetFiles, params, &infos) + return +} + +// `aria2.getPeers([secret, ]gid)` +// This method returns a list peers of the download denoted by gid (string). +// This method is for BitTorrent only. +// The response is an array of structs and contains the following keys. Values are strings. +// https://aria2.github.io/manual/en/html/aria2c.html#aria2.getPeers +func (c *client) GetPeers(gid string) (infos []PeerInfo, err error) { + params := make([]interface{}, 0, 2) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, gid) + err = c.Call(aria2GetPeers, params, &infos) + return +} + +// `aria2.getServers([secret, ]gid)` +// This method returns currently connected HTTP(S)/FTP/SFTP servers of the download denoted by gid (string). +// The response is an array of structs and contains the following keys. Values are strings. +// https://aria2.github.io/manual/en/html/aria2c.html#aria2.getServers +func (c *client) GetServers(gid string) (infos []ServerInfo, err error) { + params := make([]interface{}, 0, 2) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, gid) + err = c.Call(aria2GetServers, params, &infos) + return +} + +// `aria2.tellActive([secret][, keys])` +// This method returns a list of active downloads. +// The response is an array of the same structs as returned by the aria2.tellStatus() method. +// For the keys parameter, please refer to the aria2.tellStatus() method. +func (c *client) TellActive(keys ...string) (infos []StatusInfo, err error) { + params := make([]interface{}, 0, 1) + if c.token != "" { + params = append(params, "token:"+c.token) + } + if keys != nil { + params = append(params, keys) + } + err = c.Call(aria2TellActive, params, &infos) + return +} + +// `aria2.tellWaiting([secret, ]offset, num[, keys])` +// This method returns a list of waiting downloads, including paused ones. +// offset is an integer and specifies the offset from the download waiting at the front. +// num is an integer and specifies the max. number of downloads to be returned. +// For the keys parameter, please refer to the aria2.tellStatus() method. +// If offset is a positive integer, this method returns downloads in the range of [offset, offset + num). +// offset can be a negative integer. offset == -1 points last download in the waiting queue and offset == -2 points the download before the last download, and so on. +// Downloads in the response are in reversed order then. +// For example, imagine three downloads "A","B" and "C" are waiting in this order. +// aria2.tellWaiting(0, 1) returns ["A"]. +// aria2.tellWaiting(1, 2) returns ["B", "C"]. +// aria2.tellWaiting(-1, 2) returns ["C", "B"]. +// The response is an array of the same structs as returned by aria2.tellStatus() method. +func (c *client) TellWaiting(offset, num int, keys ...string) (infos []StatusInfo, err error) { + params := make([]interface{}, 0, 3) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, offset) + params = append(params, num) + if keys != nil { + params = append(params, keys) + } + err = c.Call(aria2TellWaiting, params, &infos) + return +} + +// `aria2.tellStopped([secret, ]offset, num[, keys])` +// This method returns a list of stopped downloads. +// offset is an integer and specifies the offset from the least recently stopped download. +// num is an integer and specifies the max. number of downloads to be returned. +// For the keys parameter, please refer to the aria2.tellStatus() method. +// offset and num have the same semantics as described in the aria2.tellWaiting() method. +// The response is an array of the same structs as returned by the aria2.tellStatus() method. +func (c *client) TellStopped(offset, num int, keys ...string) (infos []StatusInfo, err error) { + params := make([]interface{}, 0, 3) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, offset) + params = append(params, num) + if keys != nil { + params = append(params, keys) + } + err = c.Call(aria2TellStopped, params, &infos) + return +} + +// `aria2.changePosition([secret, ]gid, pos, how)` +// This method changes the position of the download denoted by gid in the queue. +// pos is an integer. how is a string. +// If how is POS_SET, it moves the download to a position relative to the beginning of the queue. +// If how is POS_CUR, it moves the download to a position relative to the current position. +// If how is POS_END, it moves the download to a position relative to the end of the queue. +// If the destination position is less than 0 or beyond the end of the queue, it moves the download to the beginning or the end of the queue respectively. +// The response is an integer denoting the resulting position. +// For example, if GID#2089b05ecca3d829 is currently in position 3, aria2.changePosition('2089b05ecca3d829', -1, 'POS_CUR') will change its position to 2. Additionally aria2.changePosition('2089b05ecca3d829', 0, 'POS_SET') will change its position to 0 (the beginning of the queue). +func (c *client) ChangePosition(gid string, pos int, how string) (p int, err error) { + params := make([]interface{}, 0, 3) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, gid) + params = append(params, pos) + params = append(params, how) + err = c.Call(aria2ChangePosition, params, &p) + return +} + +// `aria2.changeUri([secret, ]gid, fileIndex, delUris, addUris[, position])` +// This method removes the URIs in delUris from and appends the URIs in addUris to download denoted by gid. +// delUris and addUris are lists of strings. +// A download can contain multiple files and URIs are attached to each file. +// fileIndex is used to select which file to remove/attach given URIs. fileIndex is 1-based. +// position is used to specify where URIs are inserted in the existing waiting URI list. position is 0-based. +// When position is omitted, URIs are appended to the back of the list. +// This method first executes the removal and then the addition. +// position is the position after URIs are removed, not the position when this method is called. +// When removing an URI, if the same URIs exist in download, only one of them is removed for each URI in delUris. +// In other words, if there are three URIs http://example.org/aria2 and you want remove them all, you have to specify (at least) 3 http://example.org/aria2 in delUris. +// This method returns a list which contains two integers. +// The first integer is the number of URIs deleted. +// The second integer is the number of URIs added. +func (c *client) ChangeURI(gid string, fileindex int, delUris []string, addUris []string, position ...int) (p []int, err error) { + params := make([]interface{}, 0, 5) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, gid) + params = append(params, fileindex) + params = append(params, delUris) + params = append(params, addUris) + if position != nil { + params = append(params, position[0]) + } + err = c.Call(aria2ChangeURI, params, &p) + return +} + +// `aria2.getOption([secret, ]gid)` +// This method returns options of the download denoted by gid. +// The response is a struct where keys are the names of options. +// The values are strings. +// Note that this method does not return options which have no default value and have not been set on the command-line, in configuration files or RPC methods. +func (c *client) GetOption(gid string) (m Option, err error) { + params := make([]interface{}, 0, 2) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, gid) + err = c.Call(aria2GetOption, params, &m) + return +} + +// `aria2.changeOption([secret, ]gid, options)` +// This method changes options of the download denoted by gid (string) dynamically. options is a struct. +// The following options are available for active downloads: +// bt-max-peers +// bt-request-peer-speed-limit +// bt-remove-unselected-file +// force-save +// max-download-limit +// max-upload-limit +// For waiting or paused downloads, in addition to the above options, options listed in Input File subsection are available, except for following options: dry-run, metalink-base-uri, parameterized-uri, pause, piece-length and rpc-save-upload-metadata option. +// This method returns OK for success. +func (c *client) ChangeOption(gid string, option Option) (ok string, err error) { + params := make([]interface{}, 0, 2) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, gid) + if option != nil { + params = append(params, option) + } + err = c.Call(aria2ChangeOption, params, &ok) + return +} + +// `aria2.getGlobalOption([secret])` +// This method returns the global options. +// The response is a struct. +// Its keys are the names of options. +// Values are strings. +// Note that this method does not return options which have no default value and have not been set on the command-line, in configuration files or RPC methods. Because global options are used as a template for the options of newly added downloads, the response contains keys returned by the aria2.getOption() method. +func (c *client) GetGlobalOption() (m Option, err error) { + params := []string{} + if c.token != "" { + params = append(params, "token:"+c.token) + } + err = c.Call(aria2GetGlobalOption, params, &m) + return +} + +// `aria2.changeGlobalOption([secret, ]options)` +// This method changes global options dynamically. +// options is a struct. +// The following options are available: +// bt-max-open-files +// download-result +// log +// log-level +// max-concurrent-downloads +// max-download-result +// max-overall-download-limit +// max-overall-upload-limit +// save-cookies +// save-session +// server-stat-of +// In addition, options listed in the Input File subsection are available, except for following options: checksum, index-out, out, pause and select-file. +// With the log option, you can dynamically start logging or change log file. +// To stop logging, specify an empty string("") as the parameter value. +// Note that log file is always opened in append mode. +// This method returns OK for success. +func (c *client) ChangeGlobalOption(options Option) (ok string, err error) { + params := make([]interface{}, 0, 2) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, options) + err = c.Call(aria2ChangeGlobalOption, params, &ok) + return +} + +// `aria2.getGlobalStat([secret])` +// This method returns global statistics such as the overall download and upload speeds. +// The response is a struct and contains the following keys. Values are strings. +// downloadSpeed Overall download speed (byte/sec). +// uploadSpeed Overall upload speed(byte/sec). +// numActive The number of active downloads. +// numWaiting The number of waiting downloads. +// numStopped The number of stopped downloads in the current session. +// This value is capped by the --max-download-result option. +// numStoppedTotal The number of stopped downloads in the current session and not capped by the --max-download-result option. +func (c *client) GetGlobalStat() (info GlobalStatInfo, err error) { + params := []string{} + if c.token != "" { + params = append(params, "token:"+c.token) + } + err = c.Call(aria2GetGlobalStat, params, &info) + return +} + +// `aria2.purgeDownloadResult([secret])` +// This method purges completed/error/removed downloads to free memory. +// This method returns OK. +func (c *client) PurgeDownloadResult() (ok string, err error) { + params := []string{} + if c.token != "" { + params = append(params, "token:"+c.token) + } + err = c.Call(aria2PurgeDownloadResult, params, &ok) + return +} + +// `aria2.removeDownloadResult([secret, ]gid)` +// This method removes a completed/error/removed download denoted by gid from memory. +// This method returns OK for success. +func (c *client) RemoveDownloadResult(gid string) (ok string, err error) { + params := make([]interface{}, 0, 2) + if c.token != "" { + params = append(params, "token:"+c.token) + } + params = append(params, gid) + err = c.Call(aria2RemoveDownloadResult, params, &ok) + return +} + +// `aria2.getVersion([secret])` +// This method returns the version of aria2 and the list of enabled features. +// The response is a struct and contains following keys. +// version Version number of aria2 as a string. +// enabledFeatures List of enabled features. Each feature is given as a string. +func (c *client) GetVersion() (info VersionInfo, err error) { + params := []string{} + if c.token != "" { + params = append(params, "token:"+c.token) + } + err = c.Call(aria2GetVersion, params, &info) + return +} + +// `aria2.getSessionInfo([secret])` +// This method returns session information. +// The response is a struct and contains following key. +// sessionId Session ID, which is generated each time when aria2 is invoked. +func (c *client) GetSessionInfo() (info SessionInfo, err error) { + params := []string{} + if c.token != "" { + params = append(params, "token:"+c.token) + } + err = c.Call(aria2GetSessionInfo, params, &info) + return +} + +// `aria2.shutdown([secret])` +// This method shutdowns aria2. +// This method returns OK. +func (c *client) Shutdown() (ok string, err error) { + params := []string{} + if c.token != "" { + params = append(params, "token:"+c.token) + } + err = c.Call(aria2Shutdown, params, &ok) + return +} + +// `aria2.forceShutdown([secret])` +// This method shuts down aria2(). +// This method behaves like :func:'aria2.shutdown` without performing any actions which take time, such as contacting BitTorrent trackers to unregister downloads first. +// This method returns OK. +func (c *client) ForceShutdown() (ok string, err error) { + params := []string{} + if c.token != "" { + params = append(params, "token:"+c.token) + } + err = c.Call(aria2ForceShutdown, params, &ok) + return +} + +// `aria2.saveSession([secret])` +// This method saves the current session to a file specified by the --save-session option. +// This method returns OK if it succeeds. +func (c *client) SaveSession() (ok string, err error) { + params := []string{} + if c.token != "" { + params = append(params, "token:"+c.token) + } + err = c.Call(aria2SaveSession, params, &ok) + return +} + +// `system.multicall(methods)` +// This methods encapsulates multiple method calls in a single request. +// methods is an array of structs. +// The structs contain two keys: methodName and params. +// methodName is the method name to call and params is array containing parameters to the method call. +// This method returns an array of responses. +// The elements will be either a one-item array containing the return value of the method call or a struct of fault element if an encapsulated method call fails. +func (c *client) Multicall(methods []Method) (r []interface{}, err error) { + if len(methods) == 0 { + err = errInvalidParameter + return + } + err = c.Call(aria2Multicall, methods, &r) + return +} + +// `system.listMethods()` +// This method returns the all available RPC methods in an array of string. +// Unlike other methods, this method does not require secret token. +// This is safe because this method jsut returns the available method names. +func (c *client) ListMethods() (methods []string, err error) { + err = c.Call(aria2ListMethods, []string{}, &methods) + return +} diff --git a/pkg/aria2/rpc/const.go b/pkg/aria2/rpc/const.go new file mode 100644 index 0000000..b5d83dd --- /dev/null +++ b/pkg/aria2/rpc/const.go @@ -0,0 +1,39 @@ +package rpc + +const ( + aria2AddURI = "aria2.addUri" + aria2AddTorrent = "aria2.addTorrent" + aria2AddMetalink = "aria2.addMetalink" + aria2Remove = "aria2.remove" + aria2ForceRemove = "aria2.forceRemove" + aria2Pause = "aria2.pause" + aria2PauseAll = "aria2.pauseAll" + aria2ForcePause = "aria2.forcePause" + aria2ForcePauseAll = "aria2.forcePauseAll" + aria2Unpause = "aria2.unpause" + aria2UnpauseAll = "aria2.unpauseAll" + aria2TellStatus = "aria2.tellStatus" + aria2GetURIs = "aria2.getUris" + aria2GetFiles = "aria2.getFiles" + aria2GetPeers = "aria2.getPeers" + aria2GetServers = "aria2.getServers" + aria2TellActive = "aria2.tellActive" + aria2TellWaiting = "aria2.tellWaiting" + aria2TellStopped = "aria2.tellStopped" + aria2ChangePosition = "aria2.changePosition" + aria2ChangeURI = "aria2.changeUri" + aria2GetOption = "aria2.getOption" + aria2ChangeOption = "aria2.changeOption" + aria2GetGlobalOption = "aria2.getGlobalOption" + aria2ChangeGlobalOption = "aria2.changeGlobalOption" + aria2GetGlobalStat = "aria2.getGlobalStat" + aria2PurgeDownloadResult = "aria2.purgeDownloadResult" + aria2RemoveDownloadResult = "aria2.removeDownloadResult" + aria2GetVersion = "aria2.getVersion" + aria2GetSessionInfo = "aria2.getSessionInfo" + aria2Shutdown = "aria2.shutdown" + aria2ForceShutdown = "aria2.forceShutdown" + aria2SaveSession = "aria2.saveSession" + aria2Multicall = "system.multicall" + aria2ListMethods = "system.listMethods" +) diff --git a/pkg/aria2/rpc/json2.go b/pkg/aria2/rpc/json2.go new file mode 100644 index 0000000..3febf7e --- /dev/null +++ b/pkg/aria2/rpc/json2.go @@ -0,0 +1,116 @@ +package rpc + +// based on "github.com/gorilla/rpc/v2/json2" + +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +import ( + "bytes" + "encoding/json" + "errors" + "io" +) + +// ---------------------------------------------------------------------------- +// Request and Response +// ---------------------------------------------------------------------------- + +// clientRequest represents a JSON-RPC request sent by a client. +type clientRequest struct { + // JSON-RPC protocol. + Version string `json:"jsonrpc"` + + // A String containing the name of the method to be invoked. + Method string `json:"method"` + + // Object to pass as request parameter to the method. + Params interface{} `json:"params"` + + // The request id. This can be of any type. It is used to match the + // response with the request that it is replying to. + Id uint64 `json:"id"` +} + +// clientResponse represents a JSON-RPC response returned to a client. +type clientResponse struct { + Version string `json:"jsonrpc"` + Result *json.RawMessage `json:"result"` + Error *json.RawMessage `json:"error"` + Id *uint64 `json:"id"` +} + +// EncodeClientRequest encodes parameters for a JSON-RPC client request. +func EncodeClientRequest(method string, args interface{}) (*bytes.Buffer, error) { + var buf bytes.Buffer + c := &clientRequest{ + Version: "2.0", + Method: method, + Params: args, + Id: reqid(), + } + if err := json.NewEncoder(&buf).Encode(c); err != nil { + return nil, err + } + return &buf, nil +} + +func (c clientResponse) decode(reply interface{}) error { + if c.Error != nil { + jsonErr := &Error{} + if err := json.Unmarshal(*c.Error, jsonErr); err != nil { + return &Error{ + Code: E_SERVER, + Message: string(*c.Error), + } + } + return jsonErr + } + + if c.Result == nil { + return ErrNullResult + } + + return json.Unmarshal(*c.Result, reply) +} + +// DecodeClientResponse decodes the response body of a client request into +// the interface reply. +func DecodeClientResponse(r io.Reader, reply interface{}) error { + var c clientResponse + if err := json.NewDecoder(r).Decode(&c); err != nil { + return err + } + return c.decode(reply) +} + +type ErrorCode int + +const ( + E_PARSE ErrorCode = -32700 + E_INVALID_REQ ErrorCode = -32600 + E_NO_METHOD ErrorCode = -32601 + E_BAD_PARAMS ErrorCode = -32602 + E_INTERNAL ErrorCode = -32603 + E_SERVER ErrorCode = -32000 +) + +var ErrNullResult = errors.New("result is null") + +type Error struct { + // A Number that indicates the error type that occurred. + Code ErrorCode `json:"code"` /* required */ + + // A String providing a short description of the error. + // The message SHOULD be limited to a concise single sentence. + Message string `json:"message"` /* required */ + + // A Primitive or Structured value that contains additional information about the error. + Data interface{} `json:"data"` /* optional */ +} + +func (e *Error) Error() string { + return e.Message +} diff --git a/pkg/aria2/rpc/notification.go b/pkg/aria2/rpc/notification.go new file mode 100644 index 0000000..ebca91e --- /dev/null +++ b/pkg/aria2/rpc/notification.go @@ -0,0 +1,44 @@ +package rpc + +import ( + "log" +) + +type Event struct { + Gid string `json:"gid"` // GID of the download +} + +// The RPC server might send notifications to the client. +// Notifications is unidirectional, therefore the client which receives the notification must not respond to it. +// The method signature of a notification is much like a normal method request but lacks the id key + +type websocketResponse struct { + clientResponse + Method string `json:"method"` + Params []Event `json:"params"` +} + +// Notifier handles rpc notification from aria2 server +type Notifier interface { + // OnDownloadStart will be sent when a download is started. + OnDownloadStart([]Event) + // OnDownloadPause will be sent when a download is paused. + OnDownloadPause([]Event) + // OnDownloadStop will be sent when a download is stopped by the user. + OnDownloadStop([]Event) + // OnDownloadComplete will be sent when a download is complete. For BitTorrent downloads, this notification is sent when the download is complete and seeding is over. + OnDownloadComplete([]Event) + // OnDownloadError will be sent when a download is stopped due to an error. + OnDownloadError([]Event) + // OnBtDownloadComplete will be sent when a torrent download is complete but seeding is still going on. + OnBtDownloadComplete([]Event) +} + +type DummyNotifier struct{} + +func (DummyNotifier) OnDownloadStart(events []Event) { log.Printf("%s started.", events) } +func (DummyNotifier) OnDownloadPause(events []Event) { log.Printf("%s paused.", events) } +func (DummyNotifier) OnDownloadStop(events []Event) { log.Printf("%s stopped.", events) } +func (DummyNotifier) OnDownloadComplete(events []Event) { log.Printf("%s completed.", events) } +func (DummyNotifier) OnDownloadError(events []Event) { log.Printf("%s error.", events) } +func (DummyNotifier) OnBtDownloadComplete(events []Event) { log.Printf("bt %s completed.", events) } diff --git a/pkg/aria2/rpc/proc.go b/pkg/aria2/rpc/proc.go new file mode 100644 index 0000000..0184e6d --- /dev/null +++ b/pkg/aria2/rpc/proc.go @@ -0,0 +1,42 @@ +package rpc + +import "sync" + +type ResponseProcFn func(resp clientResponse) error + +type ResponseProcessor struct { + cbs map[uint64]ResponseProcFn + mu *sync.RWMutex +} + +func NewResponseProcessor() *ResponseProcessor { + return &ResponseProcessor{ + make(map[uint64]ResponseProcFn), + &sync.RWMutex{}, + } +} + +func (r *ResponseProcessor) Add(id uint64, fn ResponseProcFn) { + r.mu.Lock() + r.cbs[id] = fn + r.mu.Unlock() +} + +func (r *ResponseProcessor) remove(id uint64) { + r.mu.Lock() + delete(r.cbs, id) + r.mu.Unlock() +} + +// Process called by recv routine +func (r *ResponseProcessor) Process(resp clientResponse) error { + id := *resp.Id + r.mu.RLock() + fn, ok := r.cbs[id] + r.mu.RUnlock() + if ok && fn != nil { + defer r.remove(id) + return fn(resp) + } + return nil +} diff --git a/pkg/aria2/rpc/proto.go b/pkg/aria2/rpc/proto.go new file mode 100644 index 0000000..178fa6b --- /dev/null +++ b/pkg/aria2/rpc/proto.go @@ -0,0 +1,40 @@ +package rpc + +// Protocol is a set of rpc methods that aria2 daemon supports +type Protocol interface { + AddURI(uri string, options ...interface{}) (gid string, err error) + AddTorrent(filename string, options ...interface{}) (gid string, err error) + AddMetalink(filename string, options ...interface{}) (gid []string, err error) + Remove(gid string) (g string, err error) + ForceRemove(gid string) (g string, err error) + Pause(gid string) (g string, err error) + PauseAll() (ok string, err error) + ForcePause(gid string) (g string, err error) + ForcePauseAll() (ok string, err error) + Unpause(gid string) (g string, err error) + UnpauseAll() (ok string, err error) + TellStatus(gid string, keys ...string) (info StatusInfo, err error) + GetURIs(gid string) (infos []URIInfo, err error) + GetFiles(gid string) (infos []FileInfo, err error) + GetPeers(gid string) (infos []PeerInfo, err error) + GetServers(gid string) (infos []ServerInfo, err error) + TellActive(keys ...string) (infos []StatusInfo, err error) + TellWaiting(offset, num int, keys ...string) (infos []StatusInfo, err error) + TellStopped(offset, num int, keys ...string) (infos []StatusInfo, err error) + ChangePosition(gid string, pos int, how string) (p int, err error) + ChangeURI(gid string, fileindex int, delUris []string, addUris []string, position ...int) (p []int, err error) + GetOption(gid string) (m Option, err error) + ChangeOption(gid string, option Option) (ok string, err error) + GetGlobalOption() (m Option, err error) + ChangeGlobalOption(options Option) (ok string, err error) + GetGlobalStat() (info GlobalStatInfo, err error) + PurgeDownloadResult() (ok string, err error) + RemoveDownloadResult(gid string) (ok string, err error) + GetVersion() (info VersionInfo, err error) + GetSessionInfo() (info SessionInfo, err error) + Shutdown() (ok string, err error) + ForceShutdown() (ok string, err error) + SaveSession() (ok string, err error) + Multicall(methods []Method) (r []interface{}, err error) + ListMethods() (methods []string, err error) +} diff --git a/pkg/aria2/rpc/resp.go b/pkg/aria2/rpc/resp.go new file mode 100644 index 0000000..3614228 --- /dev/null +++ b/pkg/aria2/rpc/resp.go @@ -0,0 +1,104 @@ +//go:generate easyjson -all + +package rpc + +// StatusInfo represents response of aria2.tellStatus +type StatusInfo struct { + Gid string `json:"gid"` // GID of the download. + Status string `json:"status"` // active for currently downloading/seeding downloads. waiting for downloads in the queue; download is not started. paused for paused downloads. error for downloads that were stopped because of error. complete for stopped and completed downloads. removed for the downloads removed by user. + TotalLength string `json:"totalLength"` // Total length of the download in bytes. + CompletedLength string `json:"completedLength"` // Completed length of the download in bytes. + UploadLength string `json:"uploadLength"` // Uploaded length of the download in bytes. + BitField string `json:"bitfield"` // Hexadecimal representation of the download progress. The highest bit corresponds to the piece at index 0. Any set bits indicate loaded pieces, while unset bits indicate not yet loaded and/or missing pieces. Any overflow bits at the end are set to zero. When the download was not started yet, this key will not be included in the response. + DownloadSpeed string `json:"downloadSpeed"` // Download speed of this download measured in bytes/sec. + UploadSpeed string `json:"uploadSpeed"` // LocalUpload speed of this download measured in bytes/sec. + InfoHash string `json:"infoHash"` // InfoHash. BitTorrent only. + NumSeeders string `json:"numSeeders"` // The number of seeders aria2 has connected to. BitTorrent only. + Seeder string `json:"seeder"` // true if the local endpoint is a seeder. Otherwise false. BitTorrent only. + PieceLength string `json:"pieceLength"` // Piece length in bytes. + NumPieces string `json:"numPieces"` // The number of pieces. + Connections string `json:"connections"` // The number of peers/servers aria2 has connected to. + ErrorCode string `json:"errorCode"` // The code of the last error for this item, if any. The value is a string. The error codes are defined in the EXIT STATUS section. This value is only available for stopped/completed downloads. + ErrorMessage string `json:"errorMessage"` // The (hopefully) human readable error message associated to errorCode. + FollowedBy []string `json:"followedBy"` // List of GIDs which are generated as the result of this download. For example, when aria2 downloads a Metalink file, it generates downloads described in the Metalink (see the --follow-metalink option). This value is useful to track auto-generated downloads. If there are no such downloads, this key will not be included in the response. + BelongsTo string `json:"belongsTo"` // GID of a parent download. Some downloads are a part of another download. For example, if a file in a Metalink has BitTorrent resources, the downloads of ".torrent" files are parts of that parent. If this download has no parent, this key will not be included in the response. + Dir string `json:"dir"` // Directory to save files. + Files []FileInfo `json:"files"` // Returns the list of files. The elements of this list are the same structs used in aria2.getFiles() method. + BitTorrent BitTorrentInfo `json:"bittorrent"` // Struct which contains information retrieved from the .torrent (file). BitTorrent only. It contains following keys. +} + +// URIInfo represents an element of response of aria2.getUris +type URIInfo struct { + URI string `json:"uri"` // URI + Status string `json:"status"` // 'used' if the URI is in use. 'waiting' if the URI is still waiting in the queue. +} + +// FileInfo represents an element of response of aria2.getFiles +type FileInfo struct { + Index string `json:"index"` // Index of the file, starting at 1, in the same order as files appear in the multi-file torrent. + Path string `json:"path"` // File path. + Length string `json:"length"` // File size in bytes. + CompletedLength string `json:"completedLength"` // Completed length of this file in bytes. Please note that it is possible that sum of completedLength is less than the completedLength returned by the aria2.tellStatus() method. This is because completedLength in aria2.getFiles() only includes completed pieces. On the other hand, completedLength in aria2.tellStatus() also includes partially completed pieces. + Selected string `json:"selected"` // true if this file is selected by --select-file option. If --select-file is not specified or this is single-file torrent or not a torrent download at all, this value is always true. Otherwise false. + URIs []URIInfo `json:"uris"` // Returns a list of URIs for this file. The element type is the same struct used in the aria2.getUris() method. +} + +// PeerInfo represents an element of response of aria2.getPeers +type PeerInfo struct { + PeerId string `json:"peerId"` // Percent-encoded peer ID. + IP string `json:"ip"` // IP address of the peer. + Port string `json:"port"` // Port number of the peer. + BitField string `json:"bitfield"` // Hexadecimal representation of the download progress of the peer. The highest bit corresponds to the piece at index 0. Set bits indicate the piece is available and unset bits indicate the piece is missing. Any spare bits at the end are set to zero. + AmChoking string `json:"amChoking"` // true if aria2 is choking the peer. Otherwise false. + PeerChoking string `json:"peerChoking"` // true if the peer is choking aria2. Otherwise false. + DownloadSpeed string `json:"downloadSpeed"` // Download speed (byte/sec) that this client obtains from the peer. + UploadSpeed string `json:"uploadSpeed"` // LocalUpload speed(byte/sec) that this client uploads to the peer. + Seeder string `json:"seeder"` // true if this peer is a seeder. Otherwise false. +} + +// ServerInfo represents an element of response of aria2.getServers +type ServerInfo struct { + Index string `json:"index"` // Index of the file, starting at 1, in the same order as files appear in the multi-file metalink. + Servers []struct { + URI string `json:"uri"` // Original URI. + CurrentURI string `json:"currentUri"` // This is the URI currently used for downloading. If redirection is involved, currentUri and uri may differ. + DownloadSpeed string `json:"downloadSpeed"` // Download speed (byte/sec) + } `json:"servers"` // A list of structs which contain the following keys. +} + +// GlobalStatInfo represents response of aria2.getGlobalStat +type GlobalStatInfo struct { + DownloadSpeed string `json:"downloadSpeed"` // Overall download speed (byte/sec). + UploadSpeed string `json:"uploadSpeed"` // Overall upload speed(byte/sec). + NumActive string `json:"numActive"` // The number of active downloads. + NumWaiting string `json:"numWaiting"` // The number of waiting downloads. + NumStopped string `json:"numStopped"` // The number of stopped downloads in the current session. This value is capped by the --max-download-result option. + NumStoppedTotal string `json:"numStoppedTotal"` // The number of stopped downloads in the current session and not capped by the --max-download-result option. +} + +// VersionInfo represents response of aria2.getVersion +type VersionInfo struct { + Version string `json:"version"` // Version number of aria2 as a string. + Features []string `json:"enabledFeatures"` // List of enabled features. Each feature is given as a string. +} + +// SessionInfo represents response of aria2.getSessionInfo +type SessionInfo struct { + Id string `json:"sessionId"` // Session ID, which is generated each time when aria2 is invoked. +} + +// Method is an element of parameters used in system.multicall +type Method struct { + Name string `json:"methodName"` // Method name to call + Params []interface{} `json:"params"` // Array containing parameters to the method call +} + +type BitTorrentInfo struct { + AnnounceList [][]string `json:"announceList"` // List of lists of announce URIs. If the torrent contains announce and no announce-list, announce is converted to the announce-list format. + Comment string `json:"comment"` // The comment of the torrent. comment.utf-8 is used if available. + CreationDate int64 `json:"creationDate"` // The creation time of the torrent. The value is an integer since the epoch, measured in seconds. + Mode string `json:"mode"` // File mode of the torrent. The value is either single or multi. + Info struct { + Name string `json:"name"` // name in info dictionary. name.utf-8 is used if available. + } `json:"info"` // Struct which contains data from Info dictionary. It contains following keys. +} diff --git a/pkg/auth/auth.go b/pkg/auth/auth.go new file mode 100644 index 0000000..32a7e91 --- /dev/null +++ b/pkg/auth/auth.go @@ -0,0 +1,145 @@ +package auth + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strings" + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/conf" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +var ( + ErrAuthFailed = serializer.NewError(serializer.CodeInvalidSign, "invalid sign", nil) + ErrAuthHeaderMissing = serializer.NewError(serializer.CodeNoPermissionErr, "authorization header is missing", nil) + ErrExpiresMissing = serializer.NewError(serializer.CodeNoPermissionErr, "expire timestamp is missing", nil) + ErrExpired = serializer.NewError(serializer.CodeSignExpired, "signature expired", nil) +) + +const CrHeaderPrefix = "X-Cr-" + +// General 通用的认证接口 +var General Auth + +// Auth 鉴权认证 +type Auth interface { + // 对给定Body进行签名,expires为0表示永不过期 + Sign(body string, expires int64) string + // 对给定Body和Sign进行检查 + Check(body string, sign string) error +} + +// SignRequest 对PUT\POST等复杂HTTP请求签名,只会对URI部分、 +// 请求正文、`X-Cr-`开头的header进行签名 +func SignRequest(instance Auth, r *http.Request, expires int64) *http.Request { + // 处理有效期 + if expires > 0 { + expires += time.Now().Unix() + } + + // 生成签名 + sign := instance.Sign(getSignContent(r), expires) + + // 将签名加到请求Header中 + r.Header["Authorization"] = []string{"Bearer " + sign} + return r +} + +// CheckRequest 对复杂请求进行签名验证 +func CheckRequest(instance Auth, r *http.Request) error { + var ( + sign []string + ok bool + ) + if sign, ok = r.Header["Authorization"]; !ok || len(sign) == 0 { + return ErrAuthHeaderMissing + } + sign[0] = strings.TrimPrefix(sign[0], "Bearer ") + + return instance.Check(getSignContent(r), sign[0]) +} + +// getSignContent 签名请求 path、正文、以`X-`开头的 Header. 如果请求 path 为从机上传 API, +// 则不对正文签名。返回待签名/验证的字符串 +func getSignContent(r *http.Request) (rawSignString string) { + // 读取所有body正文 + var body = []byte{} + if !strings.Contains(r.URL.Path, "/api/v3/slave/upload/") { + if r.Body != nil { + body, _ = ioutil.ReadAll(r.Body) + _ = r.Body.Close() + r.Body = ioutil.NopCloser(bytes.NewReader(body)) + } + } + + // 决定要签名的header + var signedHeader []string + for k, _ := range r.Header { + if strings.HasPrefix(k, CrHeaderPrefix) && k != CrHeaderPrefix+"Filename" { + signedHeader = append(signedHeader, fmt.Sprintf("%s=%s", k, r.Header.Get(k))) + } + } + sort.Strings(signedHeader) + + // 读取所有待签名Header + rawSignString = serializer.NewRequestSignString(r.URL.Path, strings.Join(signedHeader, "&"), string(body)) + + return rawSignString +} + +// SignURI 对URI进行签名,签名只针对Path部分,query部分不做验证 +func SignURI(instance Auth, uri string, expires int64) (*url.URL, error) { + // 处理有效期 + if expires != 0 { + expires += time.Now().Unix() + } + + base, err := url.Parse(uri) + if err != nil { + return nil, err + } + + // 生成签名 + sign := instance.Sign(base.Path, expires) + + // 将签名加到URI中 + queries := base.Query() + queries.Set("sign", sign) + base.RawQuery = queries.Encode() + + return base, nil +} + +// CheckURI 对URI进行鉴权 +func CheckURI(instance Auth, url *url.URL) error { + //获取待验证的签名正文 + queries := url.Query() + sign := queries.Get("sign") + queries.Del("sign") + url.RawQuery = queries.Encode() + + return instance.Check(url.Path, sign) +} + +// Init 初始化通用鉴权器 +func Init() { + var secretKey string + if conf.SystemConfig.Mode == "master" { + secretKey = model.GetSettingByName("secret_key") + } else { + secretKey = conf.SlaveConfig.Secret + if secretKey == "" { + util.Log().Panic("SlaveSecret is not set, please specify it in config file.") + } + } + General = HMACAuth{ + SecretKey: []byte(secretKey), + } +} diff --git a/pkg/auth/hmac.go b/pkg/auth/hmac.go new file mode 100644 index 0000000..50849cc --- /dev/null +++ b/pkg/auth/hmac.go @@ -0,0 +1,54 @@ +package auth + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "io" + "strconv" + "strings" + "time" +) + +// HMACAuth HMAC算法鉴权 +type HMACAuth struct { + SecretKey []byte +} + +// Sign 对给定Body生成expires后失效的签名,expires为过期时间戳, +// 填写为0表示不限制有效期 +func (auth HMACAuth) Sign(body string, expires int64) string { + h := hmac.New(sha256.New, auth.SecretKey) + expireTimeStamp := strconv.FormatInt(expires, 10) + _, err := io.WriteString(h, body+":"+expireTimeStamp) + if err != nil { + return "" + } + + return base64.URLEncoding.EncodeToString(h.Sum(nil)) + ":" + expireTimeStamp +} + +// Check 对给定Body和Sign进行鉴权,包括对expires的检查 +func (auth HMACAuth) Check(body string, sign string) error { + signSlice := strings.Split(sign, ":") + // 如果未携带expires字段 + if signSlice[len(signSlice)-1] == "" { + return ErrExpiresMissing + } + + // 验证是否过期 + expires, err := strconv.ParseInt(signSlice[len(signSlice)-1], 10, 64) + if err != nil { + return ErrAuthFailed.WithError(err) + } + // 如果签名过期 + if expires < time.Now().Unix() && expires != 0 { + return ErrExpired + } + + // 验证签名 + if auth.Sign(body, expires) != sign { + return ErrAuthFailed + } + return nil +} diff --git a/pkg/authn/auth.go b/pkg/authn/auth.go new file mode 100644 index 0000000..5c5b4b7 --- /dev/null +++ b/pkg/authn/auth.go @@ -0,0 +1,16 @@ +package authn + +import ( + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/duo-labs/webauthn/webauthn" +) + +// NewAuthnInstance 新建Authn实例 +func NewAuthnInstance() (*webauthn.WebAuthn, error) { + base := model.GetSiteURL() + return webauthn.New(&webauthn.Config{ + RPDisplayName: model.GetSettingByName("siteName"), // Display Name for your site + RPID: base.Hostname(), // Generally the FQDN for your site + RPOrigin: base.String(), // The origin URL for WebAuthn requests + }) +} diff --git a/pkg/balancer/balancer.go b/pkg/balancer/balancer.go new file mode 100644 index 0000000..5d5c028 --- /dev/null +++ b/pkg/balancer/balancer.go @@ -0,0 +1,15 @@ +package balancer + +type Balancer interface { + NextPeer(nodes interface{}) (error, interface{}) +} + +// NewBalancer 根据策略标识返回新的负载均衡器 +func NewBalancer(strategy string) Balancer { + switch strategy { + case "RoundRobin": + return &RoundRobin{} + default: + return &RoundRobin{} + } +} diff --git a/pkg/balancer/errors.go b/pkg/balancer/errors.go new file mode 100644 index 0000000..aef7b1f --- /dev/null +++ b/pkg/balancer/errors.go @@ -0,0 +1,8 @@ +package balancer + +import "errors" + +var ( + ErrInputNotSlice = errors.New("Input value is not silice") + ErrNoAvaliableNode = errors.New("No nodes avaliable") +) diff --git a/pkg/balancer/roundrobin.go b/pkg/balancer/roundrobin.go new file mode 100644 index 0000000..cf300f5 --- /dev/null +++ b/pkg/balancer/roundrobin.go @@ -0,0 +1,30 @@ +package balancer + +import ( + "reflect" + "sync/atomic" +) + +type RoundRobin struct { + current uint64 +} + +// NextPeer 返回轮盘的下一节点 +func (r *RoundRobin) NextPeer(nodes interface{}) (error, interface{}) { + v := reflect.ValueOf(nodes) + if v.Kind() != reflect.Slice { + return ErrInputNotSlice, nil + } + + if v.Len() == 0 { + return ErrNoAvaliableNode, nil + } + + next := r.NextIndex(v.Len()) + return nil, v.Index(next).Interface() +} + +// NextIndex 返回下一个节点下标 +func (r *RoundRobin) NextIndex(total int) int { + return int(atomic.AddUint64(&r.current, uint64(1)) % uint64(total)) +} diff --git a/pkg/cache/driver.go b/pkg/cache/driver.go new file mode 100644 index 0000000..4c86b47 --- /dev/null +++ b/pkg/cache/driver.go @@ -0,0 +1,104 @@ +package cache + +import ( + "encoding/gob" + + "github.com/cloudreve/Cloudreve/v3/pkg/conf" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/gin-gonic/gin" +) + +func init() { + gob.Register(map[string]itemWithTTL{}) +} + +// Store 缓存存储器 +var Store Driver = NewMemoStore() + +// Init 初始化缓存 +func Init() { + if conf.RedisConfig.Server != "" && gin.Mode() != gin.TestMode { + Store = NewRedisStore( + 10, + conf.RedisConfig.Network, + conf.RedisConfig.Server, + conf.RedisConfig.User, + conf.RedisConfig.Password, + conf.RedisConfig.DB, + ) + } +} + +// Restore restores cache from given disk file +func Restore(persistFile string) { + if err := Store.Restore(persistFile); err != nil { + util.Log().Warning("Failed to restore cache from disk: %s", err) + } +} + +func InitSlaveOverwrites() { + err := Store.Sets(conf.OptionOverwrite, "setting_") + if err != nil { + util.Log().Warning("Failed to overwrite database setting: %s", err) + } +} + +// Driver 键值缓存存储容器 +type Driver interface { + // 设置值,ttl为过期时间,单位为秒 + Set(key string, value interface{}, ttl int) error + + // 取值,并返回是否成功 + Get(key string) (interface{}, bool) + + // 批量取值,返回成功取值的map即不存在的值 + Gets(keys []string, prefix string) (map[string]interface{}, []string) + + // 批量设置值,所有的key都会加上prefix前缀 + Sets(values map[string]interface{}, prefix string) error + + // 删除值 + Delete(keys []string, prefix string) error + + // Save in-memory cache to disk + Persist(path string) error + + // Restore cache from disk + Restore(path string) error +} + +// Set 设置缓存值 +func Set(key string, value interface{}, ttl int) error { + return Store.Set(key, value, ttl) +} + +// Get 获取缓存值 +func Get(key string) (interface{}, bool) { + return Store.Get(key) +} + +// Deletes 删除值 +func Deletes(keys []string, prefix string) error { + return Store.Delete(keys, prefix) +} + +// GetSettings 根据名称批量获取设置项缓存 +func GetSettings(keys []string, prefix string) (map[string]string, []string) { + raw, miss := Store.Gets(keys, prefix) + + res := make(map[string]string, len(raw)) + for k, v := range raw { + res[k] = v.(string) + } + + return res, miss +} + +// SetSettings 批量设置站点设置缓存 +func SetSettings(values map[string]string, prefix string) error { + var toBeSet = make(map[string]interface{}, len(values)) + for key, value := range values { + toBeSet[key] = interface{}(value) + } + return Store.Sets(toBeSet, prefix) +} diff --git a/pkg/cache/memo.go b/pkg/cache/memo.go new file mode 100644 index 0000000..f9dcf97 --- /dev/null +++ b/pkg/cache/memo.go @@ -0,0 +1,181 @@ +package cache + +import ( + "encoding/gob" + "fmt" + "os" + "sync" + "time" + + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +// MemoStore 内存存储驱动 +type MemoStore struct { + Store *sync.Map +} + +// item 存储的对象 +type itemWithTTL struct { + Expires int64 + Value interface{} +} + +const DefaultCacheFile = "cache_persist.bin" + +func newItem(value interface{}, expires int) itemWithTTL { + expires64 := int64(expires) + if expires > 0 { + expires64 = time.Now().Unix() + expires64 + } + return itemWithTTL{ + Value: value, + Expires: expires64, + } +} + +// getValue 从itemWithTTL中取值 +func getValue(item interface{}, ok bool) (interface{}, bool) { + if !ok { + return nil, ok + } + + var itemObj itemWithTTL + if itemObj, ok = item.(itemWithTTL); !ok { + return item, true + } + + if itemObj.Expires > 0 && itemObj.Expires < time.Now().Unix() { + return nil, false + } + + return itemObj.Value, ok + +} + +// GarbageCollect 回收已过期的缓存 +func (store *MemoStore) GarbageCollect() { + store.Store.Range(func(key, value interface{}) bool { + if item, ok := value.(itemWithTTL); ok { + if item.Expires > 0 && item.Expires < time.Now().Unix() { + util.Log().Debug("Cache %q is garbage collected.", key.(string)) + store.Store.Delete(key) + } + } + return true + }) +} + +// NewMemoStore 新建内存存储 +func NewMemoStore() *MemoStore { + return &MemoStore{ + Store: &sync.Map{}, + } +} + +// Set 存储值 +func (store *MemoStore) Set(key string, value interface{}, ttl int) error { + store.Store.Store(key, newItem(value, ttl)) + return nil +} + +// Get 取值 +func (store *MemoStore) Get(key string) (interface{}, bool) { + return getValue(store.Store.Load(key)) +} + +// Gets 批量取值 +func (store *MemoStore) Gets(keys []string, prefix string) (map[string]interface{}, []string) { + var res = make(map[string]interface{}) + var notFound = make([]string, 0, len(keys)) + + for _, key := range keys { + if value, ok := getValue(store.Store.Load(prefix + key)); ok { + res[key] = value + } else { + notFound = append(notFound, key) + } + } + + return res, notFound +} + +// Sets 批量设置值 +func (store *MemoStore) Sets(values map[string]interface{}, prefix string) error { + for key, value := range values { + store.Store.Store(prefix+key, newItem(value, 0)) + } + return nil +} + +// Delete 批量删除值 +func (store *MemoStore) Delete(keys []string, prefix string) error { + for _, key := range keys { + store.Store.Delete(prefix + key) + } + return nil +} + +// Persist write memory store into cache +func (store *MemoStore) Persist(path string) error { + persisted := make(map[string]itemWithTTL) + store.Store.Range(func(key, value interface{}) bool { + v, ok := store.Store.Load(key) + if _, ok := getValue(v, ok); ok { + persisted[key.(string)] = v.(itemWithTTL) + } + + return true + }) + + res, err := serializer(persisted) + if err != nil { + return fmt.Errorf("failed to serialize cache: %s", err) + } + + // err = os.WriteFile(path, res, 0644) + file, err := util.CreatNestedFile(path) + if err == nil { + _, err = file.Write(res) + file.Chmod(0644) + file.Close() + } + return err +} + +// Restore memory cache from disk file +func (store *MemoStore) Restore(path string) error { + if !util.Exists(path) { + return nil + } + + f, err := os.Open(path) + if err != nil { + return fmt.Errorf("failed to read cache file: %s", err) + } + + defer func() { + f.Close() + os.Remove(path) + }() + + persisted := &item{} + dec := gob.NewDecoder(f) + if err := dec.Decode(&persisted); err != nil { + return fmt.Errorf("unknown cache file format: %s", err) + } + + items := persisted.Value.(map[string]itemWithTTL) + loaded := 0 + for k, v := range items { + if _, ok := getValue(v, true); ok { + loaded++ + store.Store.Store(k, v) + } else { + util.Log().Debug("Persisted cache %q is expired.", k) + } + } + + util.Log().Info("Restored %d items from %q into memory cache.", loaded, path) + return nil +} diff --git a/pkg/cache/redis.go b/pkg/cache/redis.go new file mode 100644 index 0000000..5c776a0 --- /dev/null +++ b/pkg/cache/redis.go @@ -0,0 +1,227 @@ +package cache + +import ( + "bytes" + "encoding/gob" + "strconv" + "time" + + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/gomodule/redigo/redis" +) + +// RedisStore redis存储驱动 +type RedisStore struct { + pool *redis.Pool +} + +type item struct { + Value interface{} +} + +func serializer(value interface{}) ([]byte, error) { + var buffer bytes.Buffer + enc := gob.NewEncoder(&buffer) + storeValue := item{ + Value: value, + } + err := enc.Encode(storeValue) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +func deserializer(value []byte) (interface{}, error) { + var res item + buffer := bytes.NewReader(value) + dec := gob.NewDecoder(buffer) + err := dec.Decode(&res) + if err != nil { + return nil, err + } + return res.Value, nil +} + +// NewRedisStore 创建新的redis存储 +func NewRedisStore(size int, network, address, user, password, database string) *RedisStore { + return &RedisStore{ + pool: &redis.Pool{ + MaxIdle: size, + IdleTimeout: 240 * time.Second, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + _, err := c.Do("PING") + return err + }, + Dial: func() (redis.Conn, error) { + db, err := strconv.Atoi(database) + if err != nil { + return nil, err + } + + c, err := redis.Dial( + network, + address, + redis.DialDatabase(db), + redis.DialUsername(user), + redis.DialPassword(password), + ) + if err != nil { + util.Log().Panic("Failed to create Redis connection: %s", err) + } + return c, nil + }, + }, + } +} + +// Set 存储值 +func (store *RedisStore) Set(key string, value interface{}, ttl int) error { + rc := store.pool.Get() + defer rc.Close() + + serialized, err := serializer(value) + if err != nil { + return err + } + + if rc.Err() != nil { + return rc.Err() + } + + if ttl > 0 { + _, err = rc.Do("SETEX", key, ttl, serialized) + } else { + _, err = rc.Do("SET", key, serialized) + } + + if err != nil { + return err + } + return nil + +} + +// Get 取值 +func (store *RedisStore) Get(key string) (interface{}, bool) { + rc := store.pool.Get() + defer rc.Close() + if rc.Err() != nil { + return nil, false + } + + v, err := redis.Bytes(rc.Do("GET", key)) + if err != nil || v == nil { + return nil, false + } + + finalValue, err := deserializer(v) + if err != nil { + return nil, false + } + + return finalValue, true + +} + +// Gets 批量取值 +func (store *RedisStore) Gets(keys []string, prefix string) (map[string]interface{}, []string) { + rc := store.pool.Get() + defer rc.Close() + if rc.Err() != nil { + return nil, keys + } + + var queryKeys = make([]string, len(keys)) + for key, value := range keys { + queryKeys[key] = prefix + value + } + + v, err := redis.ByteSlices(rc.Do("MGET", redis.Args{}.AddFlat(queryKeys)...)) + if err != nil { + return nil, keys + } + + var res = make(map[string]interface{}) + var missed = make([]string, 0, len(keys)) + + for key, value := range v { + decoded, err := deserializer(value) + if err != nil || decoded == nil { + missed = append(missed, keys[key]) + } else { + res[keys[key]] = decoded + } + } + // 解码所得值 + return res, missed +} + +// Sets 批量设置值 +func (store *RedisStore) Sets(values map[string]interface{}, prefix string) error { + rc := store.pool.Get() + defer rc.Close() + if rc.Err() != nil { + return rc.Err() + } + var setValues = make(map[string]interface{}) + + // 编码待设置值 + for key, value := range values { + serialized, err := serializer(value) + if err != nil { + return err + } + setValues[prefix+key] = serialized + } + + _, err := rc.Do("MSET", redis.Args{}.AddFlat(setValues)...) + if err != nil { + return err + } + return nil + +} + +// Delete 批量删除给定的键 +func (store *RedisStore) Delete(keys []string, prefix string) error { + rc := store.pool.Get() + defer rc.Close() + if rc.Err() != nil { + return rc.Err() + } + + // 处理前缀 + for i := 0; i < len(keys); i++ { + keys[i] = prefix + keys[i] + } + + _, err := rc.Do("DEL", redis.Args{}.AddFlat(keys)...) + if err != nil { + return err + } + return nil +} + +// DeleteAll 批量所有键 +func (store *RedisStore) DeleteAll() error { + rc := store.pool.Get() + defer rc.Close() + if rc.Err() != nil { + return rc.Err() + } + + _, err := rc.Do("FLUSHDB") + + return err +} + +// Persist Dummy implementation +func (store *RedisStore) Persist(path string) error { + return nil +} + +// Restore dummy implementation +func (store *RedisStore) Restore(path string) error { + return nil +} diff --git a/pkg/cluster/controller.go b/pkg/cluster/controller.go new file mode 100644 index 0000000..1e8417c --- /dev/null +++ b/pkg/cluster/controller.go @@ -0,0 +1,210 @@ +package cluster + +import ( + "bytes" + "encoding/gob" + "fmt" + "net/url" + "sync" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/common" + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/rpc" + "github.com/cloudreve/Cloudreve/v3/pkg/auth" + "github.com/cloudreve/Cloudreve/v3/pkg/mq" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/jinzhu/gorm" +) + +var DefaultController Controller + +// Controller controls communications between master and slave +type Controller interface { + // Handle heartbeat sent from master + HandleHeartBeat(*serializer.NodePingReq) (serializer.NodePingResp, error) + + // Get Aria2 Instance by master node ID + GetAria2Instance(string) (common.Aria2, error) + + // Send event change message to master node + SendNotification(string, string, mq.Message) error + + // Submit async task into task pool + SubmitTask(string, interface{}, string, func(interface{})) error + + // Get master node info + GetMasterInfo(string) (*MasterInfo, error) + + // Get master Oauth based policy credential + GetPolicyOauthToken(string, uint) (string, error) +} + +type slaveController struct { + masters map[string]MasterInfo + lock sync.RWMutex +} + +// info of master node +type MasterInfo struct { + ID string + TTL int + URL *url.URL + // used to invoke aria2 rpc calls + Instance Node + Client request.Client + + jobTracker map[string]bool +} + +func InitController() { + DefaultController = &slaveController{ + masters: make(map[string]MasterInfo), + } + gob.Register(rpc.StatusInfo{}) +} + +func (c *slaveController) HandleHeartBeat(req *serializer.NodePingReq) (serializer.NodePingResp, error) { + c.lock.Lock() + defer c.lock.Unlock() + + req.Node.AfterFind() + + // close old node if exist + origin, ok := c.masters[req.SiteID] + + if (ok && req.IsUpdate) || !ok { + if ok { + origin.Instance.Kill() + } + + masterUrl, err := url.Parse(req.SiteURL) + if err != nil { + return serializer.NodePingResp{}, err + } + + c.masters[req.SiteID] = MasterInfo{ + ID: req.SiteID, + URL: masterUrl, + TTL: req.CredentialTTL, + Client: request.NewClient( + request.WithEndpoint(masterUrl.String()), + request.WithSlaveMeta(fmt.Sprintf("%d", req.Node.ID)), + request.WithCredential(auth.HMACAuth{ + SecretKey: []byte(req.Node.MasterKey), + }, int64(req.CredentialTTL)), + ), + jobTracker: make(map[string]bool), + Instance: NewNodeFromDBModel(&model.Node{ + Model: gorm.Model{ID: req.Node.ID}, + MasterKey: req.Node.MasterKey, + Type: model.MasterNodeType, + Aria2Enabled: req.Node.Aria2Enabled, + Aria2OptionsSerialized: req.Node.Aria2OptionsSerialized, + }), + } + } + + return serializer.NodePingResp{}, nil +} + +func (c *slaveController) GetAria2Instance(id string) (common.Aria2, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + if node, ok := c.masters[id]; ok { + return node.Instance.GetAria2Instance(), nil + } + + return nil, ErrMasterNotFound +} + +func (c *slaveController) SendNotification(id, subject string, msg mq.Message) error { + c.lock.RLock() + + if node, ok := c.masters[id]; ok { + c.lock.RUnlock() + + body := bytes.Buffer{} + enc := gob.NewEncoder(&body) + if err := enc.Encode(&msg); err != nil { + return err + } + + res, err := node.Client.Request( + "PUT", + fmt.Sprintf("/api/v3/slave/notification/%s", subject), + &body, + ).CheckHTTPResponse(200).DecodeResponse() + if err != nil { + return err + } + + if res.Code != 0 { + return serializer.NewErrorFromResponse(res) + } + + return nil + } + + c.lock.RUnlock() + return ErrMasterNotFound +} + +// SubmitTask 提交异步任务 +func (c *slaveController) SubmitTask(id string, job interface{}, hash string, submitter func(interface{})) error { + c.lock.RLock() + defer c.lock.RUnlock() + + if node, ok := c.masters[id]; ok { + if _, ok := node.jobTracker[hash]; ok { + // 任务已存在,直接返回 + return nil + } + + node.jobTracker[hash] = true + submitter(job) + return nil + } + + return ErrMasterNotFound +} + +// GetMasterInfo 获取主机节点信息 +func (c *slaveController) GetMasterInfo(id string) (*MasterInfo, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + if node, ok := c.masters[id]; ok { + return &node, nil + } + + return nil, ErrMasterNotFound +} + +// GetPolicyOauthToken 获取主机存储策略 Oauth 凭证 +func (c *slaveController) GetPolicyOauthToken(id string, policyID uint) (string, error) { + c.lock.RLock() + + if node, ok := c.masters[id]; ok { + c.lock.RUnlock() + + res, err := node.Client.Request( + "GET", + fmt.Sprintf("/api/v3/slave/credential/%d", policyID), + nil, + ).CheckHTTPResponse(200).DecodeResponse() + if err != nil { + return "", err + } + + if res.Code != 0 { + return "", serializer.NewErrorFromResponse(res) + } + + return res.Data.(string), nil + } + + c.lock.RUnlock() + return "", ErrMasterNotFound +} diff --git a/pkg/cluster/errors.go b/pkg/cluster/errors.go new file mode 100644 index 0000000..acd21d3 --- /dev/null +++ b/pkg/cluster/errors.go @@ -0,0 +1,12 @@ +package cluster + +import ( + "errors" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" +) + +var ( + ErrFeatureNotExist = errors.New("No nodes in nodepool match the feature specificed") + ErrIlegalPath = errors.New("path out of boundary of setting temp folder") + ErrMasterNotFound = serializer.NewError(serializer.CodeMasterNotFound, "Unknown master node id", nil) +) diff --git a/pkg/cluster/master.go b/pkg/cluster/master.go new file mode 100644 index 0000000..9c3dc61 --- /dev/null +++ b/pkg/cluster/master.go @@ -0,0 +1,272 @@ +package cluster + +import ( + "context" + "encoding/json" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/common" + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/rpc" + "github.com/cloudreve/Cloudreve/v3/pkg/auth" + "github.com/cloudreve/Cloudreve/v3/pkg/mq" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/gofrs/uuid" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" +) + +const ( + deleteTempFileDuration = 60 * time.Second + statusRetryDuration = 10 * time.Second +) + +type MasterNode struct { + Model *model.Node + aria2RPC rpcService + lock sync.RWMutex +} + +// RPCService 通过RPC服务的Aria2任务管理器 +type rpcService struct { + Caller rpc.Client + Initialized bool + + retryDuration time.Duration + deletePaddingDuration time.Duration + parent *MasterNode + options *clientOptions +} + +type clientOptions struct { + Options map[string]interface{} // 创建下载时额外添加的设置 +} + +// Init 初始化节点 +func (node *MasterNode) Init(nodeModel *model.Node) { + node.lock.Lock() + node.Model = nodeModel + node.aria2RPC.parent = node + node.aria2RPC.retryDuration = statusRetryDuration + node.aria2RPC.deletePaddingDuration = deleteTempFileDuration + node.lock.Unlock() + + node.lock.RLock() + if node.Model.Aria2Enabled { + node.lock.RUnlock() + node.aria2RPC.Init() + return + } + node.lock.RUnlock() +} + +func (node *MasterNode) ID() uint { + node.lock.RLock() + defer node.lock.RUnlock() + + return node.Model.ID +} + +func (node *MasterNode) Ping(req *serializer.NodePingReq) (*serializer.NodePingResp, error) { + return &serializer.NodePingResp{}, nil +} + +// IsFeatureEnabled 查询节点的某项功能是否启用 +func (node *MasterNode) IsFeatureEnabled(feature string) bool { + node.lock.RLock() + defer node.lock.RUnlock() + + switch feature { + case "aria2": + return node.Model.Aria2Enabled + default: + return false + } +} + +func (node *MasterNode) MasterAuthInstance() auth.Auth { + node.lock.RLock() + defer node.lock.RUnlock() + + return auth.HMACAuth{SecretKey: []byte(node.Model.MasterKey)} +} + +func (node *MasterNode) SlaveAuthInstance() auth.Auth { + node.lock.RLock() + defer node.lock.RUnlock() + + return auth.HMACAuth{SecretKey: []byte(node.Model.SlaveKey)} +} + +// SubscribeStatusChange 订阅节点状态更改 +func (node *MasterNode) SubscribeStatusChange(callback func(isActive bool, id uint)) { +} + +// IsActive 返回节点是否在线 +func (node *MasterNode) IsActive() bool { + return true +} + +// Kill 结束aria2请求 +func (node *MasterNode) Kill() { + if node.aria2RPC.Caller != nil { + node.aria2RPC.Caller.Close() + } +} + +// GetAria2Instance 获取主机Aria2实例 +func (node *MasterNode) GetAria2Instance() common.Aria2 { + node.lock.RLock() + + if !node.Model.Aria2Enabled { + node.lock.RUnlock() + return &common.DummyAria2{} + } + + if !node.aria2RPC.Initialized { + node.lock.RUnlock() + node.aria2RPC.Init() + return &common.DummyAria2{} + } + + defer node.lock.RUnlock() + return &node.aria2RPC +} + +func (node *MasterNode) IsMater() bool { + return true +} + +func (node *MasterNode) DBModel() *model.Node { + node.lock.RLock() + defer node.lock.RUnlock() + + return node.Model +} + +func (r *rpcService) Init() error { + r.parent.lock.Lock() + defer r.parent.lock.Unlock() + r.Initialized = false + + // 客户端已存在,则关闭先前连接 + if r.Caller != nil { + r.Caller.Close() + } + + // 解析RPC服务地址 + server, err := url.Parse(r.parent.Model.Aria2OptionsSerialized.Server) + if err != nil { + util.Log().Warning("Failed to parse Aria2 RPC server URL: %s", err) + return err + } + server.Path = "/jsonrpc" + + // 加载自定义下载配置 + var globalOptions map[string]interface{} + if r.parent.Model.Aria2OptionsSerialized.Options != "" { + err = json.Unmarshal([]byte(r.parent.Model.Aria2OptionsSerialized.Options), &globalOptions) + if err != nil { + util.Log().Warning("Failed to parse aria2 options: %s", err) + return err + } + } + + r.options = &clientOptions{ + Options: globalOptions, + } + timeout := r.parent.Model.Aria2OptionsSerialized.Timeout + caller, err := rpc.New(context.Background(), server.String(), r.parent.Model.Aria2OptionsSerialized.Token, time.Duration(timeout)*time.Second, mq.GlobalMQ) + + r.Caller = caller + r.Initialized = err == nil + return err +} + +func (r *rpcService) CreateTask(task *model.Download, groupOptions map[string]interface{}) (string, error) { + r.parent.lock.RLock() + // 生成存储路径 + guid, _ := uuid.NewV4() + path := filepath.Join( + r.parent.Model.Aria2OptionsSerialized.TempPath, + "aria2", + guid.String(), + ) + r.parent.lock.RUnlock() + + // 创建下载任务 + options := map[string]interface{}{ + "dir": path, + } + for k, v := range r.options.Options { + options[k] = v + } + for k, v := range groupOptions { + options[k] = v + } + + gid, err := r.Caller.AddURI(task.Source, options) + if err != nil || gid == "" { + return "", err + } + + return gid, nil +} + +func (r *rpcService) Status(task *model.Download) (rpc.StatusInfo, error) { + res, err := r.Caller.TellStatus(task.GID) + if err != nil { + // 失败后重试 + util.Log().Debug("Failed to get download task status, please retry later: %s", err) + time.Sleep(r.retryDuration) + res, err = r.Caller.TellStatus(task.GID) + } + + return res, err +} + +func (r *rpcService) Cancel(task *model.Download) error { + // 取消下载任务 + _, err := r.Caller.Remove(task.GID) + if err != nil { + util.Log().Warning("Failed to cancel task %q: %s", task.GID, err) + } + + return err +} + +func (r *rpcService) Select(task *model.Download, files []int) error { + var selected = make([]string, len(files)) + for i := 0; i < len(files); i++ { + selected[i] = strconv.Itoa(files[i]) + } + _, err := r.Caller.ChangeOption(task.GID, map[string]interface{}{"select-file": strings.Join(selected, ",")}) + return err +} + +func (r *rpcService) GetConfig() model.Aria2Option { + r.parent.lock.RLock() + defer r.parent.lock.RUnlock() + + return r.parent.Model.Aria2OptionsSerialized +} + +func (s *rpcService) DeleteTempFile(task *model.Download) error { + s.parent.lock.RLock() + defer s.parent.lock.RUnlock() + + // 避免被aria2占用,异步执行删除 + go func(d time.Duration, src string) { + time.Sleep(d) + err := os.RemoveAll(src) + if err != nil { + util.Log().Warning("Failed to delete temp download folder: %q: %s", src, err) + } + }(s.deletePaddingDuration, task.Parent) + + return nil +} diff --git a/pkg/cluster/node.go b/pkg/cluster/node.go new file mode 100644 index 0000000..745dd25 --- /dev/null +++ b/pkg/cluster/node.go @@ -0,0 +1,60 @@ +package cluster + +import ( + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/common" + "github.com/cloudreve/Cloudreve/v3/pkg/auth" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" +) + +type Node interface { + // Init a node from database model + Init(node *model.Node) + + // Check if given feature is enabled + IsFeatureEnabled(feature string) bool + + // Subscribe node status change to a callback function + SubscribeStatusChange(callback func(isActive bool, id uint)) + + // Ping the node + Ping(req *serializer.NodePingReq) (*serializer.NodePingResp, error) + + // Returns if the node is active + IsActive() bool + + // Get instances for aria2 calls + GetAria2Instance() common.Aria2 + + // Returns unique id of this node + ID() uint + + // Kill node and recycle resources + Kill() + + // Returns if current node is master node + IsMater() bool + + // Get auth instance used to check RPC call from slave to master + MasterAuthInstance() auth.Auth + + // Get auth instance used to check RPC call from master to slave + SlaveAuthInstance() auth.Auth + + // Get node DB model + DBModel() *model.Node +} + +// Create new node from DB model +func NewNodeFromDBModel(node *model.Node) Node { + switch node.Type { + case model.SlaveNodeType: + slave := &SlaveNode{} + slave.Init(node) + return slave + default: + master := &MasterNode{} + master.Init(node) + return master + } +} diff --git a/pkg/cluster/pool.go b/pkg/cluster/pool.go new file mode 100644 index 0000000..a70186f --- /dev/null +++ b/pkg/cluster/pool.go @@ -0,0 +1,213 @@ +package cluster + +import ( + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/balancer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/samber/lo" + "sync" +) + +var Default *NodePool + +// 需要分类的节点组 +var featureGroup = []string{"aria2"} + +// Pool 节点池 +type Pool interface { + // Returns active node selected by given feature and load balancer + BalanceNodeByFeature(feature string, lb balancer.Balancer, available []uint, prefer uint) (error, Node) + + // Returns node by ID + GetNodeByID(id uint) Node + + // Add given node into pool. If node existed, refresh node. + Add(node *model.Node) + + // Delete and kill node from pool by given node id + Delete(id uint) +} + +// NodePool 通用节点池 +type NodePool struct { + active map[uint]Node + inactive map[uint]Node + + featureMap map[string][]Node + + lock sync.RWMutex +} + +// Init 初始化从机节点池 +func Init() { + Default = &NodePool{} + Default.Init() + if err := Default.initFromDB(); err != nil { + util.Log().Warning("Failed to initialize node pool: %s", err) + } +} + +func (pool *NodePool) Init() { + pool.lock.Lock() + defer pool.lock.Unlock() + + pool.featureMap = make(map[string][]Node) + pool.active = make(map[uint]Node) + pool.inactive = make(map[uint]Node) +} + +func (pool *NodePool) buildIndexMap() { + pool.lock.Lock() + for _, feature := range featureGroup { + pool.featureMap[feature] = make([]Node, 0) + } + + for _, v := range pool.active { + for _, feature := range featureGroup { + if v.IsFeatureEnabled(feature) { + pool.featureMap[feature] = append(pool.featureMap[feature], v) + } + } + } + pool.lock.Unlock() +} + +func (pool *NodePool) GetNodeByID(id uint) Node { + pool.lock.RLock() + defer pool.lock.RUnlock() + + if node, ok := pool.active[id]; ok { + return node + } + + return pool.inactive[id] +} + +func (pool *NodePool) nodeStatusChange(isActive bool, id uint) { + util.Log().Debug("Slave node [ID=%d] status changed to [Active=%t].", id, isActive) + var node Node + pool.lock.Lock() + if n, ok := pool.inactive[id]; ok { + node = n + delete(pool.inactive, id) + } else { + node = pool.active[id] + delete(pool.active, id) + } + + if isActive { + pool.active[id] = node + } else { + pool.inactive[id] = node + } + pool.lock.Unlock() + + pool.buildIndexMap() +} + +func (pool *NodePool) initFromDB() error { + nodes, err := model.GetNodesByStatus(model.NodeActive) + if err != nil { + return err + } + + pool.lock.Lock() + for i := 0; i < len(nodes); i++ { + pool.add(&nodes[i]) + } + pool.lock.Unlock() + + pool.buildIndexMap() + return nil +} + +func (pool *NodePool) add(node *model.Node) { + newNode := NewNodeFromDBModel(node) + if newNode.IsActive() { + pool.active[node.ID] = newNode + } else { + pool.inactive[node.ID] = newNode + } + + // 订阅节点状态变更 + newNode.SubscribeStatusChange(func(isActive bool, id uint) { + pool.nodeStatusChange(isActive, id) + }) +} + +func (pool *NodePool) Add(node *model.Node) { + pool.lock.Lock() + defer pool.buildIndexMap() + defer pool.lock.Unlock() + + var ( + old Node + ok bool + ) + if old, ok = pool.active[node.ID]; !ok { + old, ok = pool.inactive[node.ID] + } + if old != nil { + go old.Init(node) + return + } + + pool.add(node) +} + +func (pool *NodePool) Delete(id uint) { + pool.lock.Lock() + defer pool.buildIndexMap() + defer pool.lock.Unlock() + + if node, ok := pool.active[id]; ok { + node.Kill() + delete(pool.active, id) + return + } + + if node, ok := pool.inactive[id]; ok { + node.Kill() + delete(pool.inactive, id) + return + } + +} + +// BalanceNodeByFeature 根据 feature 和 LoadBalancer 取出节点 +func (pool *NodePool) BalanceNodeByFeature(feature string, lb balancer.Balancer, + available []uint, prefer uint) (error, Node) { + pool.lock.RLock() + defer pool.lock.RUnlock() + if nodes, ok := pool.featureMap[feature]; ok { + // Find nodes that are allowed to be used in user group + availableNodes := nodes + if len(available) > 0 { + idHash := make(map[uint]struct{}, len(available)) + for _, id := range available { + idHash[id] = struct{}{} + } + + availableNodes = lo.Filter[Node](nodes, func(node Node, index int) bool { + _, exist := idHash[node.ID()] + return exist + }) + } + + // Return preferred node if exists + if preferredNode, found := lo.Find[Node](availableNodes, func(node Node) bool { + return node.ID() == prefer + }); found { + return nil, preferredNode + } + + err, res := lb.NextPeer(availableNodes) + if err == nil { + return nil, res.(Node) + } + + return err, nil + } + + return ErrFeatureNotExist, nil +} diff --git a/pkg/cluster/slave.go b/pkg/cluster/slave.go new file mode 100644 index 0000000..94d286b --- /dev/null +++ b/pkg/cluster/slave.go @@ -0,0 +1,451 @@ +package cluster + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/common" + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/rpc" + "github.com/cloudreve/Cloudreve/v3/pkg/auth" + "github.com/cloudreve/Cloudreve/v3/pkg/conf" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "io" + "net/url" + "strings" + "sync" + "time" +) + +type SlaveNode struct { + Model *model.Node + Active bool + + caller slaveCaller + callback func(bool, uint) + close chan bool + lock sync.RWMutex +} + +type slaveCaller struct { + parent *SlaveNode + Client request.Client +} + +// Init 初始化节点 +func (node *SlaveNode) Init(nodeModel *model.Node) { + node.lock.Lock() + node.Model = nodeModel + + // Init http request client + var endpoint *url.URL + if serverURL, err := url.Parse(node.Model.Server); err == nil { + var controller *url.URL + controller, _ = url.Parse("/api/v3/slave/") + endpoint = serverURL.ResolveReference(controller) + } + + signTTL := model.GetIntSetting("slave_api_timeout", 60) + node.caller.Client = request.NewClient( + request.WithMasterMeta(), + request.WithTimeout(time.Duration(signTTL)*time.Second), + request.WithCredential(auth.HMACAuth{SecretKey: []byte(nodeModel.SlaveKey)}, int64(signTTL)), + request.WithEndpoint(endpoint.String()), + ) + + node.caller.parent = node + if node.close != nil { + node.lock.Unlock() + node.close <- true + go node.StartPingLoop() + } else { + node.Active = true + node.lock.Unlock() + go node.StartPingLoop() + } +} + +// IsFeatureEnabled 查询节点的某项功能是否启用 +func (node *SlaveNode) IsFeatureEnabled(feature string) bool { + node.lock.RLock() + defer node.lock.RUnlock() + + switch feature { + case "aria2": + return node.Model.Aria2Enabled + default: + return false + } +} + +// SubscribeStatusChange 订阅节点状态更改 +func (node *SlaveNode) SubscribeStatusChange(callback func(bool, uint)) { + node.lock.Lock() + node.callback = callback + node.lock.Unlock() +} + +// Ping 从机节点,返回从机负载 +func (node *SlaveNode) Ping(req *serializer.NodePingReq) (*serializer.NodePingResp, error) { + node.lock.RLock() + defer node.lock.RUnlock() + + reqBodyEncoded, err := json.Marshal(req) + if err != nil { + return nil, err + } + + bodyReader := strings.NewReader(string(reqBodyEncoded)) + + resp, err := node.caller.Client.Request( + "POST", + "heartbeat", + bodyReader, + ).CheckHTTPResponse(200).DecodeResponse() + if err != nil { + return nil, err + } + + // 处理列取结果 + if resp.Code != 0 { + return nil, serializer.NewErrorFromResponse(resp) + } + + var res serializer.NodePingResp + + if resStr, ok := resp.Data.(string); ok { + err = json.Unmarshal([]byte(resStr), &res) + if err != nil { + return nil, err + } + } + + return &res, nil +} + +// IsActive 返回节点是否在线 +func (node *SlaveNode) IsActive() bool { + node.lock.RLock() + defer node.lock.RUnlock() + + return node.Active +} + +// Kill 结束节点内相关循环 +func (node *SlaveNode) Kill() { + node.lock.RLock() + defer node.lock.RUnlock() + + if node.close != nil { + close(node.close) + } +} + +// GetAria2Instance 获取从机Aria2实例 +func (node *SlaveNode) GetAria2Instance() common.Aria2 { + node.lock.RLock() + defer node.lock.RUnlock() + + if !node.Model.Aria2Enabled { + return &common.DummyAria2{} + } + + return &node.caller +} + +func (node *SlaveNode) ID() uint { + node.lock.RLock() + defer node.lock.RUnlock() + + return node.Model.ID +} + +func (node *SlaveNode) StartPingLoop() { + node.lock.Lock() + node.close = make(chan bool) + node.lock.Unlock() + + tickDuration := time.Duration(model.GetIntSetting("slave_ping_interval", 300)) * time.Second + recoverDuration := time.Duration(model.GetIntSetting("slave_recover_interval", 600)) * time.Second + pingTicker := time.Duration(0) + + util.Log().Debug("Slave node %q heartbeat loop started.", node.Model.Name) + retry := 0 + recoverMode := false + isFirstLoop := true + +loop: + for { + select { + case <-time.After(pingTicker): + if pingTicker == 0 { + pingTicker = tickDuration + } + + util.Log().Debug("Slave node %q send ping.", node.Model.Name) + res, err := node.Ping(node.getHeartbeatContent(isFirstLoop)) + isFirstLoop = false + + if err != nil { + util.Log().Debug("Error while ping slave node %q: %s", node.Model.Name, err) + retry++ + if retry >= model.GetIntSetting("slave_node_retry", 3) { + util.Log().Debug("Retry threshold for pinging slave node %q exceeded, mark it as offline.", node.Model.Name) + node.changeStatus(false) + + if !recoverMode { + // 启动恢复监控循环 + util.Log().Debug("Slave node %q entered recovery mode.", node.Model.Name) + pingTicker = recoverDuration + recoverMode = true + } + } + } else { + if recoverMode { + util.Log().Debug("Slave node %q recovered.", node.Model.Name) + pingTicker = tickDuration + recoverMode = false + isFirstLoop = true + } + + util.Log().Debug("Status of slave node %q: %s", node.Model.Name, res) + node.changeStatus(true) + retry = 0 + } + + case <-node.close: + util.Log().Debug("Slave node %q received shutdown signal.", node.Model.Name) + break loop + } + } +} + +func (node *SlaveNode) IsMater() bool { + return false +} + +func (node *SlaveNode) MasterAuthInstance() auth.Auth { + node.lock.RLock() + defer node.lock.RUnlock() + + return auth.HMACAuth{SecretKey: []byte(node.Model.MasterKey)} +} + +func (node *SlaveNode) SlaveAuthInstance() auth.Auth { + node.lock.RLock() + defer node.lock.RUnlock() + + return auth.HMACAuth{SecretKey: []byte(node.Model.SlaveKey)} +} + +func (node *SlaveNode) DBModel() *model.Node { + node.lock.RLock() + defer node.lock.RUnlock() + + return node.Model +} + +// getHeartbeatContent gets serializer.NodePingReq used to send heartbeat to slave +func (node *SlaveNode) getHeartbeatContent(isUpdate bool) *serializer.NodePingReq { + return &serializer.NodePingReq{ + SiteURL: model.GetSiteURL().String(), + IsUpdate: isUpdate, + SiteID: model.GetSettingByName("siteID"), + Node: node.Model, + CredentialTTL: model.GetIntSetting("slave_api_timeout", 60), + } +} + +func (node *SlaveNode) changeStatus(isActive bool) { + node.lock.RLock() + id := node.Model.ID + if isActive != node.Active { + node.lock.RUnlock() + node.lock.Lock() + node.Active = isActive + node.lock.Unlock() + node.callback(isActive, id) + } else { + node.lock.RUnlock() + } + +} + +func (s *slaveCaller) Init() error { + return nil +} + +// SendAria2Call send remote aria2 call to slave node +func (s *slaveCaller) SendAria2Call(body *serializer.SlaveAria2Call, scope string) (*serializer.Response, error) { + reqReader, err := getAria2RequestBody(body) + if err != nil { + return nil, err + } + + return s.Client.Request( + "POST", + "aria2/"+scope, + reqReader, + ).CheckHTTPResponse(200).DecodeResponse() +} + +func (s *slaveCaller) CreateTask(task *model.Download, options map[string]interface{}) (string, error) { + s.parent.lock.RLock() + defer s.parent.lock.RUnlock() + + req := &serializer.SlaveAria2Call{ + Task: task, + GroupOptions: options, + } + + res, err := s.SendAria2Call(req, "task") + if err != nil { + return "", err + } + + if res.Code != 0 { + return "", serializer.NewErrorFromResponse(res) + } + + return res.Data.(string), err +} + +func (s *slaveCaller) Status(task *model.Download) (rpc.StatusInfo, error) { + s.parent.lock.RLock() + defer s.parent.lock.RUnlock() + + req := &serializer.SlaveAria2Call{ + Task: task, + } + + res, err := s.SendAria2Call(req, "status") + if err != nil { + return rpc.StatusInfo{}, err + } + + if res.Code != 0 { + return rpc.StatusInfo{}, serializer.NewErrorFromResponse(res) + } + + var status rpc.StatusInfo + res.GobDecode(&status) + + return status, err +} + +func (s *slaveCaller) Cancel(task *model.Download) error { + s.parent.lock.RLock() + defer s.parent.lock.RUnlock() + + req := &serializer.SlaveAria2Call{ + Task: task, + } + + res, err := s.SendAria2Call(req, "cancel") + if err != nil { + return err + } + + if res.Code != 0 { + return serializer.NewErrorFromResponse(res) + } + + return nil +} + +func (s *slaveCaller) Select(task *model.Download, files []int) error { + s.parent.lock.RLock() + defer s.parent.lock.RUnlock() + + req := &serializer.SlaveAria2Call{ + Task: task, + Files: files, + } + + res, err := s.SendAria2Call(req, "select") + if err != nil { + return err + } + + if res.Code != 0 { + return serializer.NewErrorFromResponse(res) + } + + return nil +} + +func (s *slaveCaller) GetConfig() model.Aria2Option { + s.parent.lock.RLock() + defer s.parent.lock.RUnlock() + + return s.parent.Model.Aria2OptionsSerialized +} + +func (s *slaveCaller) DeleteTempFile(task *model.Download) error { + s.parent.lock.RLock() + defer s.parent.lock.RUnlock() + + req := &serializer.SlaveAria2Call{ + Task: task, + } + + res, err := s.SendAria2Call(req, "delete") + if err != nil { + return err + } + + if res.Code != 0 { + return serializer.NewErrorFromResponse(res) + } + + return nil +} + +func getAria2RequestBody(body *serializer.SlaveAria2Call) (io.Reader, error) { + reqBodyEncoded, err := json.Marshal(body) + if err != nil { + return nil, err + } + + return strings.NewReader(string(reqBodyEncoded)), nil +} + +// RemoteCallback 发送远程存储策略上传回调请求 +func RemoteCallback(url string, body serializer.UploadCallback) error { + callbackBody, err := json.Marshal(struct { + Data serializer.UploadCallback `json:"data"` + }{ + Data: body, + }) + if err != nil { + return serializer.NewError(serializer.CodeCallbackError, "Failed to encode callback content", err) + } + + resp := request.GeneralClient.Request( + "POST", + url, + bytes.NewReader(callbackBody), + request.WithTimeout(time.Duration(conf.SlaveConfig.CallbackTimeout)*time.Second), + request.WithCredential(auth.General, int64(conf.SlaveConfig.SignatureTTL)), + ) + + if resp.Err != nil { + return serializer.NewError(serializer.CodeCallbackError, "Slave cannot send callback request", resp.Err) + } + + // 解析回调服务端响应 + response, err := resp.DecodeResponse() + if err != nil { + msg := fmt.Sprintf("Slave cannot parse callback response from master (StatusCode=%d).", resp.Response.StatusCode) + return serializer.NewError(serializer.CodeCallbackError, msg, err) + } + + if response.Code != 0 { + return serializer.NewError(response.Code, response.Msg, errors.New(response.Error)) + } + + return nil +} diff --git a/pkg/conf/conf.go b/pkg/conf/conf.go new file mode 100644 index 0000000..942294b --- /dev/null +++ b/pkg/conf/conf.go @@ -0,0 +1,156 @@ +package conf + +import ( + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/go-ini/ini" + "github.com/go-playground/validator/v10" +) + +// database 数据库 +type database struct { + Type string + User string + Password string + Host string + Name string + TablePrefix string + DBFile string + Port int + Charset string + UnixSocket bool +} + +// system 系统通用配置 +type system struct { + Mode string `validate:"eq=master|eq=slave"` + Listen string `validate:"required"` + Debug bool + SessionSecret string + HashIDSalt string + GracePeriod int `validate:"gte=0"` + ProxyHeader string `validate:"required_with=Listen"` +} + +type ssl struct { + CertPath string `validate:"omitempty,required"` + KeyPath string `validate:"omitempty,required"` + Listen string `validate:"required"` +} + +type unix struct { + Listen string + Perm uint32 +} + +// slave 作为slave存储端配置 +type slave struct { + Secret string `validate:"omitempty,gte=64"` + CallbackTimeout int `validate:"omitempty,gte=1"` + SignatureTTL int `validate:"omitempty,gte=1"` +} + +// redis 配置 +type redis struct { + Network string + Server string + User string + Password string + DB string +} + +// 跨域配置 +type cors struct { + AllowOrigins []string + AllowMethods []string + AllowHeaders []string + AllowCredentials bool + ExposeHeaders []string + SameSite string + Secure bool +} + +var cfg *ini.File + +const defaultConf = `[System] +Debug = false +Mode = master +Listen = :5212 +SessionSecret = {SessionSecret} +HashIDSalt = {HashIDSalt} +` + +// Init 初始化配置文件 +func Init(path string) { + var err error + + if path == "" || !util.Exists(path) { + // 创建初始配置文件 + confContent := util.Replace(map[string]string{ + "{SessionSecret}": util.RandStringRunes(64), + "{HashIDSalt}": util.RandStringRunes(64), + }, defaultConf) + f, err := util.CreatNestedFile(path) + if err != nil { + util.Log().Panic("Failed to create config file: %s", err) + } + + // 写入配置文件 + _, err = f.WriteString(confContent) + if err != nil { + util.Log().Panic("Failed to write config file: %s", err) + } + + f.Close() + } + + cfg, err = ini.Load(path) + if err != nil { + util.Log().Panic("Failed to parse config file %q: %s", path, err) + } + + sections := map[string]interface{}{ + "Database": DatabaseConfig, + "System": SystemConfig, + "SSL": SSLConfig, + "UnixSocket": UnixConfig, + "Redis": RedisConfig, + "CORS": CORSConfig, + "Slave": SlaveConfig, + } + for sectionName, sectionStruct := range sections { + err = mapSection(sectionName, sectionStruct) + if err != nil { + util.Log().Panic("Failed to parse config section %q: %s", sectionName, err) + } + } + + // 映射数据库配置覆盖 + for _, key := range cfg.Section("OptionOverwrite").Keys() { + OptionOverwrite[key.Name()] = key.Value() + } + + // 重设log等级 + if !SystemConfig.Debug { + util.Level = util.LevelInformational + util.GloablLogger = nil + util.Log() + } + +} + +// mapSection 将配置文件的 Section 映射到结构体上 +func mapSection(section string, confStruct interface{}) error { + err := cfg.Section(section).MapTo(confStruct) + if err != nil { + return err + } + + // 验证合法性 + validate := validator.New() + err = validate.Struct(confStruct) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/conf/defaults.go b/pkg/conf/defaults.go new file mode 100644 index 0000000..4ecfd2b --- /dev/null +++ b/pkg/conf/defaults.go @@ -0,0 +1,55 @@ +package conf + +// RedisConfig Redis服务器配置 +var RedisConfig = &redis{ + Network: "tcp", + Server: "", + Password: "", + DB: "0", +} + +// DatabaseConfig 数据库配置 +var DatabaseConfig = &database{ + Type: "UNSET", + Charset: "utf8", + DBFile: "cloudreve.db", + Port: 3306, + UnixSocket: false, +} + +// SystemConfig 系统公用配置 +var SystemConfig = &system{ + Debug: false, + Mode: "master", + Listen: ":5212", + ProxyHeader: "X-Forwarded-For", +} + +// CORSConfig 跨域配置 +var CORSConfig = &cors{ + AllowOrigins: []string{"UNSET"}, + AllowMethods: []string{"PUT", "POST", "GET", "OPTIONS"}, + AllowHeaders: []string{"Cookie", "X-Cr-Policy", "Authorization", "Content-Length", "Content-Type", "X-Cr-Path", "X-Cr-FileName"}, + AllowCredentials: false, + ExposeHeaders: nil, + SameSite: "Default", + Secure: false, +} + +// SlaveConfig 从机配置 +var SlaveConfig = &slave{ + CallbackTimeout: 20, + SignatureTTL: 60, +} + +var SSLConfig = &ssl{ + Listen: ":443", + CertPath: "", + KeyPath: "", +} + +var UnixConfig = &unix{ + Listen: "", +} + +var OptionOverwrite = map[string]interface{}{} diff --git a/pkg/conf/version.go b/pkg/conf/version.go new file mode 100644 index 0000000..fa4bbf3 --- /dev/null +++ b/pkg/conf/version.go @@ -0,0 +1,22 @@ +package conf + +// plusVersion 增强版版本号 +const plusVersion = "+1.1" + +// BackendVersion 当前后端版本号 +const BackendVersion = "3.8.3" + plusVersion + +// KeyVersion 授权版本号 +const KeyVersion = "3.3.1" + +// RequiredDBVersion 与当前版本匹配的数据库版本 +const RequiredDBVersion = "3.8.1+1.0-plus" + +// RequiredStaticVersion 与当前版本匹配的静态资源版本 +const RequiredStaticVersion = "3.8.3" + plusVersion + +// IsPlus 是否为Plus版本 +const IsPlus = "true" + +// LastCommit 最后commit id +const LastCommit = "88409cc" diff --git a/pkg/crontab/collect.go b/pkg/crontab/collect.go new file mode 100644 index 0000000..a5678f6 --- /dev/null +++ b/pkg/crontab/collect.go @@ -0,0 +1,99 @@ +package crontab + +import ( + "context" + "os" + "path/filepath" + "strings" + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +func garbageCollect() { + // 清理打包下载产生的临时文件 + collectArchiveFile() + + // 清理过期的内置内存缓存 + if store, ok := cache.Store.(*cache.MemoStore); ok { + collectCache(store) + } + + util.Log().Info("Crontab job \"cron_garbage_collect\" complete.") +} + +func collectArchiveFile() { + // 读取有效期、目录设置 + tempPath := util.RelativePath(model.GetSettingByName("temp_path")) + expires := model.GetIntSetting("download_timeout", 30) + + // 列出文件 + root := filepath.Join(tempPath, "archive") + err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err == nil && !info.IsDir() && + strings.HasPrefix(filepath.Base(path), "archive_") && + time.Now().Sub(info.ModTime()).Seconds() > float64(expires) { + util.Log().Debug("Delete expired batch download temp file %q.", path) + // 删除符合条件的文件 + if err := os.Remove(path); err != nil { + util.Log().Debug("Failed to delete temp file %q: %s", path, err) + } + } + return nil + }) + + if err != nil { + util.Log().Debug("Crontab job cannot list temp batch download folder: %s", err) + } + +} + +func collectCache(store *cache.MemoStore) { + util.Log().Debug("Cleanup memory cache.") + store.GarbageCollect() +} + +func uploadSessionCollect() { + placeholders := model.GetUploadPlaceholderFiles(0) + + // 将过期的上传会话按照用户分组 + userToFiles := make(map[uint][]uint) + for _, file := range placeholders { + _, sessionExist := cache.Get(filesystem.UploadSessionCachePrefix + *file.UploadSessionID) + if sessionExist { + continue + } + + if _, ok := userToFiles[file.UserID]; !ok { + userToFiles[file.UserID] = make([]uint, 0) + } + + userToFiles[file.UserID] = append(userToFiles[file.UserID], file.ID) + } + + // 删除过期的会话 + for uid, filesIDs := range userToFiles { + user, err := model.GetUserByID(uid) + if err != nil { + util.Log().Warning("Owner of the upload session cannot be found: %s", err) + continue + } + + fs, err := filesystem.NewFileSystem(&user) + if err != nil { + util.Log().Warning("Failed to initialize filesystem: %s", err) + continue + } + + if err = fs.Delete(context.Background(), []uint{}, filesIDs, false, false); err != nil { + util.Log().Warning("Failed to delete upload session: %s", err) + } + + fs.Recycle() + } + + util.Log().Info("Crontab job \"cron_recycle_upload_session\" complete.") +} diff --git a/pkg/crontab/init.go b/pkg/crontab/init.go new file mode 100644 index 0000000..3583d31 --- /dev/null +++ b/pkg/crontab/init.go @@ -0,0 +1,53 @@ +package crontab + +import ( + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/robfig/cron/v3" +) + +// Cron 定时任务 +var Cron *cron.Cron + +// Reload 重新启动定时任务 +func Reload() { + if Cron != nil { + Cron.Stop() + } + Init() +} + +// Init 初始化定时任务 +func Init() { + util.Log().Info("Initialize crontab jobs...") + // 读取cron日程设置 + options := model.GetSettingByNames( + "cron_garbage_collect", + "cron_notify_user", + "cron_ban_user", + "cron_recycle_upload_session", + ) + Cron := cron.New() + for k, v := range options { + var handler func() + switch k { + case "cron_garbage_collect": + handler = garbageCollect + case "cron_notify_user": + handler = notifyExpiredVAS + case "cron_ban_user": + handler = banOverusedUser + case "cron_recycle_upload_session": + handler = uploadSessionCollect + default: + util.Log().Warning("Unknown crontab job type %q, skipping...", k) + continue + } + + if _, err := Cron.AddFunc(v, handler); err != nil { + util.Log().Warning("Failed to start crontab job %q: %s", k, err) + } + + } + Cron.Start() +} diff --git a/pkg/crontab/vas.go b/pkg/crontab/vas.go new file mode 100644 index 0000000..7ce6ae9 --- /dev/null +++ b/pkg/crontab/vas.go @@ -0,0 +1,83 @@ +package crontab + +import ( + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/email" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +func notifyExpiredVAS() { + checkStoragePack() + checkUserGroup() + util.Log().Info("Crontab job \"cron_notify_user\" complete.") +} + +// banOverusedUser 封禁超出宽容期的用户 +func banOverusedUser() { + users := model.GetTolerantExpiredUser() + for _, user := range users { + + // 清除最后通知日期标记 + user.ClearNotified() + + // 检查容量是否超额 + if user.Storage > user.Group.MaxStorage+user.GetAvailablePackSize() { + // 封禁用户 + user.SetStatus(model.OveruseBaned) + } + } +} + +// checkUserGroup 检查已过期用户组 +func checkUserGroup() { + users := model.GetGroupExpiredUsers() + for _, user := range users { + + // 将用户回退到初始用户组 + user.GroupFallback() + + // 重新加载用户 + user, _ = model.GetUserByID(user.ID) + + // 检查容量是否超额 + if user.Storage > user.Group.MaxStorage+user.GetAvailablePackSize() { + // 如果超额,则通知用户 + sendNotification(&user, "用户组过期") + // 更新最后通知日期 + user.Notified() + } + } +} + +// checkStoragePack 检查已过期的容量包 +func checkStoragePack() { + packs := model.GetExpiredStoragePack() + for _, pack := range packs { + // 删除过期的容量包 + pack.Delete() + + //找到所属用户 + user, err := model.GetUserByID(pack.UserID) + if err != nil { + util.Log().Warning("Crontab job failed to get user info of [UID=%d]: %s", pack.UserID, err) + continue + } + + // 检查容量是否超额 + if user.Storage > user.Group.MaxStorage+user.GetAvailablePackSize() { + // 如果超额,则通知用户 + sendNotification(&user, "容量包过期") + + // 更新最后通知日期 + user.Notified() + + } + } +} + +func sendNotification(user *model.User, reason string) { + title, body := email.NewOveruseNotification(user.Nick, reason) + if err := email.Send(user.Email, title, body); err != nil { + util.Log().Warning("Failed to send notification email: %s", err) + } +} diff --git a/pkg/email/init.go b/pkg/email/init.go new file mode 100644 index 0000000..fe83fe3 --- /dev/null +++ b/pkg/email/init.go @@ -0,0 +1,52 @@ +package email + +import ( + "sync" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +// Client 默认的邮件发送客户端 +var Client Driver + +// Lock 读写锁 +var Lock sync.RWMutex + +// Init 初始化 +func Init() { + util.Log().Debug("Initializing email sending queue...") + Lock.Lock() + defer Lock.Unlock() + + if Client != nil { + Client.Close() + } + + // 读取SMTP设置 + options := model.GetSettingByNames( + "fromName", + "fromAdress", + "smtpHost", + "replyTo", + "smtpUser", + "smtpPass", + "smtpEncryption", + ) + port := model.GetIntSetting("smtpPort", 25) + keepAlive := model.GetIntSetting("mail_keepalive", 30) + + client := NewSMTPClient(SMTPConfig{ + Name: options["fromName"], + Address: options["fromAdress"], + ReplyTo: options["replyTo"], + Host: options["smtpHost"], + Port: port, + User: options["smtpUser"], + Password: options["smtpPass"], + Keepalive: keepAlive, + Encryption: model.IsTrueVal(options["smtpEncryption"]), + }) + + Client = client +} diff --git a/pkg/email/mail.go b/pkg/email/mail.go new file mode 100644 index 0000000..fbcbd68 --- /dev/null +++ b/pkg/email/mail.go @@ -0,0 +1,38 @@ +package email + +import ( + "errors" + "strings" +) + +// Driver 邮件发送驱动 +type Driver interface { + // Close 关闭驱动 + Close() + // Send 发送邮件 + Send(to, title, body string) error +} + +var ( + // ErrChanNotOpen 邮件队列未开启 + ErrChanNotOpen = errors.New("email queue is not started") + // ErrNoActiveDriver 无可用邮件发送服务 + ErrNoActiveDriver = errors.New("no avaliable email provider") +) + +// Send 发送邮件 +func Send(to, title, body string) error { + // 忽略通过QQ登录的邮箱 + if strings.HasSuffix(to, "@login.qq.com") { + return nil + } + + Lock.RLock() + defer Lock.RUnlock() + + if Client == nil { + return ErrNoActiveDriver + } + + return Client.Send(to, title, body) +} diff --git a/pkg/email/smtp.go b/pkg/email/smtp.go new file mode 100644 index 0000000..3845f44 --- /dev/null +++ b/pkg/email/smtp.go @@ -0,0 +1,122 @@ +package email + +import ( + "time" + + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/go-mail/mail" + "github.com/google/uuid" +) + +// SMTP SMTP协议发送邮件 +type SMTP struct { + Config SMTPConfig + ch chan *mail.Message + chOpen bool +} + +// SMTPConfig SMTP发送配置 +type SMTPConfig struct { + Name string // 发送者名 + Address string // 发送者地址 + ReplyTo string // 回复地址 + Host string // 服务器主机名 + Port int // 服务器端口 + User string // 用户名 + Password string // 密码 + Encryption bool // 是否启用加密 + Keepalive int // SMTP 连接保留时长 +} + +// NewSMTPClient 新建SMTP发送队列 +func NewSMTPClient(config SMTPConfig) *SMTP { + client := &SMTP{ + Config: config, + ch: make(chan *mail.Message, 30), + chOpen: false, + } + + client.Init() + + return client +} + +// Send 发送邮件 +func (client *SMTP) Send(to, title, body string) error { + if !client.chOpen { + return ErrChanNotOpen + } + m := mail.NewMessage() + m.SetAddressHeader("From", client.Config.Address, client.Config.Name) + m.SetAddressHeader("Reply-To", client.Config.ReplyTo, client.Config.Name) + m.SetHeader("To", to) + m.SetHeader("Subject", title) + m.SetHeader("Message-ID", util.StrConcat(`"<`, uuid.NewString(), `@`, `cloudreveplus`, `>"`)) + m.SetBody("text/html", body) + client.ch <- m + return nil +} + +// Close 关闭发送队列 +func (client *SMTP) Close() { + if client.ch != nil { + close(client.ch) + } +} + +// Init 初始化发送队列 +func (client *SMTP) Init() { + go func() { + defer func() { + if err := recover(); err != nil { + client.chOpen = false + util.Log().Error("Exception while sending email: %s, queue will be reset in 10 seconds.", err) + time.Sleep(time.Duration(10) * time.Second) + client.Init() + } + }() + + d := mail.NewDialer(client.Config.Host, client.Config.Port, client.Config.User, client.Config.Password) + d.Timeout = time.Duration(client.Config.Keepalive+5) * time.Second + client.chOpen = true + // 是否启用 SSL + d.SSL = false + if client.Config.Encryption { + d.SSL = true + } + d.StartTLSPolicy = mail.OpportunisticStartTLS + + var s mail.SendCloser + var err error + open := false + for { + select { + case m, ok := <-client.ch: + if !ok { + util.Log().Debug("Email queue closing...") + client.chOpen = false + return + } + if !open { + if s, err = d.Dial(); err != nil { + panic(err) + } + open = true + } + if err := mail.Send(s, m); err != nil { + util.Log().Warning("Failed to send email: %s", err) + } else { + util.Log().Debug("Email sent.") + } + // 长时间没有新邮件,则关闭SMTP连接 + case <-time.After(time.Duration(client.Config.Keepalive) * time.Second): + if open { + if err := s.Close(); err != nil { + util.Log().Warning("Failed to close SMTP connection: %s", err) + } + open = false + } + } + } + }() +} diff --git a/pkg/email/template.go b/pkg/email/template.go new file mode 100644 index 0000000..213e5e3 --- /dev/null +++ b/pkg/email/template.go @@ -0,0 +1,50 @@ +package email + +import ( + "fmt" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +// NewOveruseNotification 新建超额提醒邮件 +func NewOveruseNotification(userName, reason string) (string, string) { + options := model.GetSettingByNames("siteName", "siteURL", "siteTitle", "over_used_template") + replace := map[string]string{ + "{siteTitle}": options["siteName"], + "{userName}": userName, + "{notifyReason}": reason, + "{siteUrl}": options["siteURL"], + "{siteSecTitle}": options["siteTitle"], + } + return fmt.Sprintf("【%s】空间容量超额提醒", options["siteName"]), + util.Replace(replace, options["over_used_template"]) +} + +// NewActivationEmail 新建激活邮件 +func NewActivationEmail(userName, activateURL string) (string, string) { + options := model.GetSettingByNames("siteName", "siteURL", "siteTitle", "mail_activation_template") + replace := map[string]string{ + "{siteTitle}": options["siteName"], + "{userName}": userName, + "{activationUrl}": activateURL, + "{siteUrl}": options["siteURL"], + "{siteSecTitle}": options["siteTitle"], + } + return fmt.Sprintf("【%s】注册激活", options["siteName"]), + util.Replace(replace, options["mail_activation_template"]) +} + +// NewResetEmail 新建重设密码邮件 +func NewResetEmail(userName, resetURL string) (string, string) { + options := model.GetSettingByNames("siteName", "siteURL", "siteTitle", "mail_reset_pwd_template") + replace := map[string]string{ + "{siteTitle}": options["siteName"], + "{userName}": userName, + "{resetUrl}": resetURL, + "{siteUrl}": options["siteURL"], + "{siteSecTitle}": options["siteTitle"], + } + return fmt.Sprintf("【%s】密码重置", options["siteName"]), + util.Replace(replace, options["mail_reset_pwd_template"]) +} diff --git a/pkg/filesystem/archive.go b/pkg/filesystem/archive.go new file mode 100644 index 0000000..cd3aa83 --- /dev/null +++ b/pkg/filesystem/archive.go @@ -0,0 +1,309 @@ +package filesystem + +import ( + "archive/zip" + "context" + "fmt" + "io" + "os" + "path" + "path/filepath" + "strings" + "sync" + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/gin-gonic/gin" + "github.com/mholt/archiver/v4" +) + +/* =============== + 压缩/解压缩 + =============== +*/ + +// Compress 创建给定目录和文件的压缩文件 +func (fs *FileSystem) Compress(ctx context.Context, writer io.Writer, folderIDs, fileIDs []uint, isArchive bool) error { + // 查找待压缩目录 + folders, err := model.GetFoldersByIDs(folderIDs, fs.User.ID) + if err != nil && len(folderIDs) != 0 { + return ErrDBListObjects + } + + // 查找待压缩文件 + files, err := model.GetFilesByIDs(fileIDs, fs.User.ID) + if err != nil && len(fileIDs) != 0 { + return ErrDBListObjects + } + + // 如果上下文限制了父目录,则进行检查 + if parent, ok := ctx.Value(fsctx.LimitParentCtx).(*model.Folder); ok { + // 检查目录 + for _, folder := range folders { + if *folder.ParentID != parent.ID { + return ErrObjectNotExist + } + } + + // 检查文件 + for _, file := range files { + if file.FolderID != parent.ID { + return ErrObjectNotExist + } + } + } + + // 尝试获取请求上下文,以便于后续检查用户取消任务 + reqContext := ctx + ginCtx, ok := ctx.Value(fsctx.GinCtx).(*gin.Context) + if ok { + reqContext = ginCtx.Request.Context() + } + + // 将顶级待处理对象的路径设为根路径 + for i := 0; i < len(folders); i++ { + folders[i].Position = "" + } + for i := 0; i < len(files); i++ { + files[i].Position = "" + } + + // 创建压缩文件Writer + zipWriter := zip.NewWriter(writer) + defer zipWriter.Close() + + ctx = reqContext + + // 压缩各个目录及文件 + for i := 0; i < len(folders); i++ { + select { + case <-reqContext.Done(): + // 取消压缩请求 + return ErrClientCanceled + default: + fs.doCompress(reqContext, nil, &folders[i], zipWriter, isArchive) + } + + } + for i := 0; i < len(files); i++ { + select { + case <-reqContext.Done(): + // 取消压缩请求 + return ErrClientCanceled + default: + fs.doCompress(reqContext, &files[i], nil, zipWriter, isArchive) + } + } + + return nil +} + +func (fs *FileSystem) doCompress(ctx context.Context, file *model.File, folder *model.Folder, zipWriter *zip.Writer, isArchive bool) { + // 如果对象是文件 + if file != nil { + // 切换上传策略 + fs.Policy = file.GetPolicy() + err := fs.DispatchHandler() + if err != nil { + util.Log().Warning("Failed to compress file %q: %s", file.Name, err) + return + } + + // 获取文件内容 + fileToZip, err := fs.Handler.Get( + context.WithValue(ctx, fsctx.FileModelCtx, *file), + file.SourceName, + ) + if err != nil { + util.Log().Debug("Failed to open %q: %s", file.Name, err) + return + } + if closer, ok := fileToZip.(io.Closer); ok { + defer closer.Close() + } + + // 创建压缩文件头 + header := &zip.FileHeader{ + Name: filepath.FromSlash(path.Join(file.Position, file.Name)), + Modified: file.UpdatedAt, + UncompressedSize64: file.Size, + } + + // 指定是压缩还是归档 + if isArchive { + header.Method = zip.Store + } else { + header.Method = zip.Deflate + } + + writer, err := zipWriter.CreateHeader(header) + if err != nil { + return + } + + _, err = io.Copy(writer, fileToZip) + } else if folder != nil { + // 对象是目录 + // 获取子文件 + subFiles, err := folder.GetChildFiles() + if err == nil && len(subFiles) > 0 { + for i := 0; i < len(subFiles); i++ { + fs.doCompress(ctx, &subFiles[i], nil, zipWriter, isArchive) + } + + } + // 获取子目录,继续递归遍历 + subFolders, err := folder.GetChildFolder() + if err == nil && len(subFolders) > 0 { + for i := 0; i < len(subFolders); i++ { + fs.doCompress(ctx, nil, &subFolders[i], zipWriter, isArchive) + } + } + } +} + +// Decompress 解压缩给定压缩文件到dst目录 +func (fs *FileSystem) Decompress(ctx context.Context, src, dst, encoding string) error { + err := fs.ResetFileIfNotExist(ctx, src) + if err != nil { + return err + } + + tempZipFilePath := "" + defer func() { + // 结束时删除临时压缩文件 + if tempZipFilePath != "" { + if err := os.Remove(tempZipFilePath); err != nil { + util.Log().Warning("Failed to delete temp archive file %q: %s", tempZipFilePath, err) + } + } + }() + + // 下载压缩文件到临时目录 + fileStream, err := fs.Handler.Get(ctx, fs.FileTarget[0].SourceName) + if err != nil { + return err + } + + defer fileStream.Close() + + tempZipFilePath = filepath.Join( + util.RelativePath(model.GetSettingByName("temp_path")), + "decompress", + fmt.Sprintf("archive_%d.zip", time.Now().UnixNano()), + ) + + zipFile, err := util.CreatNestedFile(tempZipFilePath) + if err != nil { + util.Log().Warning("Failed to create temp archive file %q: %s", tempZipFilePath, err) + tempZipFilePath = "" + return err + } + defer zipFile.Close() + + // 下载前先判断是否是可解压的格式 + format, readStream, err := archiver.Identify(fs.FileTarget[0].SourceName, fileStream) + if err != nil { + util.Log().Warning("Failed to detect compressed format of file %q: %s", fs.FileTarget[0].SourceName, err) + return err + } + + extractor, ok := format.(archiver.Extractor) + if !ok { + return fmt.Errorf("file not an extractor %s", fs.FileTarget[0].SourceName) + } + + // 只有zip格式可以多个文件同时上传 + var isZip bool + switch extractor.(type) { + case archiver.Zip: + extractor = archiver.Zip{TextEncoding: encoding} + isZip = true + } + + // 除了zip必须下载到本地,其余的可以边下载边解压 + reader := readStream + if isZip { + _, err = io.Copy(zipFile, readStream) + if err != nil { + util.Log().Warning("Failed to write temp archive file %q: %s", tempZipFilePath, err) + return err + } + + fileStream.Close() + + // 设置文件偏移量 + zipFile.Seek(0, io.SeekStart) + reader = zipFile + } + + var wg sync.WaitGroup + parallel := model.GetIntSetting("max_parallel_transfer", 4) + worker := make(chan int, parallel) + for i := 0; i < parallel; i++ { + worker <- i + } + + // 上传文件函数 + uploadFunc := func(fileStream io.ReadCloser, size int64, savePath, rawPath string) { + defer func() { + if isZip { + worker <- 1 + wg.Done() + } + if err := recover(); err != nil { + util.Log().Warning("Error while uploading files inside of archive file.") + fmt.Println(err) + } + }() + + err := fs.UploadFromStream(ctx, &fsctx.FileStream{ + File: fileStream, + Size: uint64(size), + Name: path.Base(savePath), + VirtualPath: path.Dir(savePath), + }, true) + fileStream.Close() + if err != nil { + util.Log().Debug("Failed to upload file %q in archive file: %s, skipping...", rawPath, err) + } + } + + // 解压缩文件,回调函数如果出错会停止解压的下一步进行,全部return nil + err = extractor.Extract(ctx, reader, nil, func(ctx context.Context, f archiver.File) error { + rawPath := util.FormSlash(f.NameInArchive) + savePath := path.Join(dst, rawPath) + // 路径是否合法 + if !strings.HasPrefix(savePath, util.FillSlash(path.Clean(dst))) { + util.Log().Warning("%s: illegal file path", f.NameInArchive) + return nil + } + + // 如果是目录 + if f.FileInfo.IsDir() { + fs.CreateDirectory(ctx, savePath) + return nil + } + + // 上传文件 + fileStream, err := f.Open() + if err != nil { + util.Log().Warning("Failed to open file %q in archive file: %s, skipping...", rawPath, err) + return nil + } + + if !isZip { + uploadFunc(fileStream, f.FileInfo.Size(), savePath, rawPath) + } else { + <-worker + wg.Add(1) + go uploadFunc(fileStream, f.FileInfo.Size(), savePath, rawPath) + } + return nil + }) + wg.Wait() + return err + +} diff --git a/pkg/filesystem/chunk/backoff/backoff.go b/pkg/filesystem/chunk/backoff/backoff.go new file mode 100644 index 0000000..95cb1b5 --- /dev/null +++ b/pkg/filesystem/chunk/backoff/backoff.go @@ -0,0 +1,74 @@ +package backoff + +import ( + "errors" + "fmt" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "net/http" + "strconv" + "time" +) + +// Backoff used for retry sleep backoff +type Backoff interface { + Next(err error) bool + Reset() +} + +// ConstantBackoff implements Backoff interface with constant sleep time. If the error +// is retryable and with `RetryAfter` defined, the `RetryAfter` will be used as sleep duration. +type ConstantBackoff struct { + Sleep time.Duration + Max int + + tried int +} + +func (c *ConstantBackoff) Next(err error) bool { + c.tried++ + if c.tried > c.Max { + return false + } + + var e *RetryableError + if errors.As(err, &e) && e.RetryAfter > 0 { + util.Log().Warning("Retryable error %q occurs in backoff, will sleep after %s.", e, e.RetryAfter) + time.Sleep(e.RetryAfter) + } else { + time.Sleep(c.Sleep) + } + + return true +} + +func (c *ConstantBackoff) Reset() { + c.tried = 0 +} + +type RetryableError struct { + Err error + RetryAfter time.Duration +} + +// NewRetryableErrorFromHeader constructs a new RetryableError from http response header +// and existing error. +func NewRetryableErrorFromHeader(err error, header http.Header) *RetryableError { + retryAfter := header.Get("retry-after") + if retryAfter == "" { + retryAfter = "0" + } + + res := &RetryableError{ + Err: err, + } + + if retryAfterSecond, err := strconv.ParseInt(retryAfter, 10, 64); err == nil { + res.RetryAfter = time.Duration(retryAfterSecond) * time.Second + } + + return res +} + +func (e *RetryableError) Error() string { + return fmt.Sprintf("retryable error with retry-after=%s: %s", e.RetryAfter, e.Err) +} diff --git a/pkg/filesystem/chunk/chunk.go b/pkg/filesystem/chunk/chunk.go new file mode 100644 index 0000000..cf790f6 --- /dev/null +++ b/pkg/filesystem/chunk/chunk.go @@ -0,0 +1,167 @@ +package chunk + +import ( + "context" + "fmt" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/chunk/backoff" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "io" + "os" +) + +const bufferTempPattern = "cdChunk.*.tmp" + +// ChunkProcessFunc callback function for processing a chunk +type ChunkProcessFunc func(c *ChunkGroup, chunk io.Reader) error + +// ChunkGroup manage groups of chunks +type ChunkGroup struct { + file fsctx.FileHeader + chunkSize uint64 + backoff backoff.Backoff + enableRetryBuffer bool + + fileInfo *fsctx.UploadTaskInfo + currentIndex int + chunkNum uint64 + bufferTemp *os.File +} + +func NewChunkGroup(file fsctx.FileHeader, chunkSize uint64, backoff backoff.Backoff, useBuffer bool) *ChunkGroup { + c := &ChunkGroup{ + file: file, + chunkSize: chunkSize, + backoff: backoff, + fileInfo: file.Info(), + currentIndex: -1, + enableRetryBuffer: useBuffer, + } + + if c.chunkSize == 0 { + c.chunkSize = c.fileInfo.Size + } + + if c.fileInfo.Size == 0 { + c.chunkNum = 1 + } else { + c.chunkNum = c.fileInfo.Size / c.chunkSize + if c.fileInfo.Size%c.chunkSize != 0 { + c.chunkNum++ + } + } + + return c +} + +// TempAvailable returns if current chunk temp file is available to be read +func (c *ChunkGroup) TempAvailable() bool { + if c.bufferTemp != nil { + state, _ := c.bufferTemp.Stat() + return state != nil && state.Size() == c.Length() + } + + return false +} + +// Process a chunk with retry logic +func (c *ChunkGroup) Process(processor ChunkProcessFunc) error { + reader := io.LimitReader(c.file, c.Length()) + + // If useBuffer is enabled, tee the reader to a temp file + if c.enableRetryBuffer && c.bufferTemp == nil && !c.file.Seekable() { + c.bufferTemp, _ = os.CreateTemp("", bufferTempPattern) + reader = io.TeeReader(reader, c.bufferTemp) + } + + if c.bufferTemp != nil { + defer func() { + if c.bufferTemp != nil { + c.bufferTemp.Close() + os.Remove(c.bufferTemp.Name()) + c.bufferTemp = nil + } + }() + + // if temp buffer file is available, use it + if c.TempAvailable() { + if _, err := c.bufferTemp.Seek(0, io.SeekStart); err != nil { + return fmt.Errorf("failed to seek temp file back to chunk start: %w", err) + } + + util.Log().Debug("Chunk %d will be read from temp file %q.", c.Index(), c.bufferTemp.Name()) + reader = io.NopCloser(c.bufferTemp) + } + } + + err := processor(c, reader) + if err != nil { + if c.enableRetryBuffer { + request.BlackHole(reader) + } + + if err != context.Canceled && (c.file.Seekable() || c.TempAvailable()) && c.backoff.Next(err) { + if c.file.Seekable() { + if _, seekErr := c.file.Seek(c.Start(), io.SeekStart); seekErr != nil { + return fmt.Errorf("failed to seek back to chunk start: %w, last error: %s", seekErr, err) + } + } + + util.Log().Debug("Retrying chunk %d, last error: %s", c.currentIndex, err) + return c.Process(processor) + } + + return err + } + + util.Log().Debug("Chunk %d processed", c.currentIndex) + return nil +} + +// Start returns the byte index of current chunk +func (c *ChunkGroup) Start() int64 { + return int64(uint64(c.Index()) * c.chunkSize) +} + +// Total returns the total length +func (c *ChunkGroup) Total() int64 { + return int64(c.fileInfo.Size) +} + +// Num returns the total chunk number +func (c *ChunkGroup) Num() int { + return int(c.chunkNum) +} + +// RangeHeader returns header value of Content-Range +func (c *ChunkGroup) RangeHeader() string { + return fmt.Sprintf("bytes %d-%d/%d", c.Start(), c.Start()+c.Length()-1, c.Total()) +} + +// Index returns current chunk index, starts from 0 +func (c *ChunkGroup) Index() int { + return c.currentIndex +} + +// Next switch to next chunk, returns whether all chunks are processed +func (c *ChunkGroup) Next() bool { + c.currentIndex++ + c.backoff.Reset() + return c.currentIndex < int(c.chunkNum) +} + +// Length returns the length of current chunk +func (c *ChunkGroup) Length() int64 { + contentLength := c.chunkSize + if c.Index() == int(c.chunkNum-1) { + contentLength = c.fileInfo.Size - c.chunkSize*(c.chunkNum-1) + } + + return int64(contentLength) +} + +// IsLast returns if current chunk is the last one +func (c *ChunkGroup) IsLast() bool { + return c.Index() == int(c.chunkNum-1) +} diff --git a/pkg/filesystem/driver/cos/handler.go b/pkg/filesystem/driver/cos/handler.go new file mode 100644 index 0000000..50b500c --- /dev/null +++ b/pkg/filesystem/driver/cos/handler.go @@ -0,0 +1,427 @@ +package cos + +import ( + "context" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "path" + "path/filepath" + "strings" + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/response" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/google/go-querystring/query" + cossdk "github.com/tencentyun/cos-go-sdk-v5" +) + +// UploadPolicy 腾讯云COS上传策略 +type UploadPolicy struct { + Expiration string `json:"expiration"` + Conditions []interface{} `json:"conditions"` +} + +// MetaData 文件元信息 +type MetaData struct { + Size uint64 + CallbackKey string + CallbackURL string +} + +type urlOption struct { + Speed int `url:"x-cos-traffic-limit,omitempty"` + ContentDescription string `url:"response-content-disposition,omitempty"` +} + +// Driver 腾讯云COS适配器模板 +type Driver struct { + Policy *model.Policy + Client *cossdk.Client + HTTPClient request.Client +} + +// List 列出COS文件 +func (handler Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) { + // 初始化列目录参数 + opt := &cossdk.BucketGetOptions{ + Prefix: strings.TrimPrefix(base, "/"), + EncodingType: "", + MaxKeys: 1000, + } + // 是否为递归列出 + if !recursive { + opt.Delimiter = "/" + } + // 手动补齐结尾的slash + if opt.Prefix != "" { + opt.Prefix += "/" + } + + var ( + marker string + objects []cossdk.Object + commons []string + ) + + for { + res, _, err := handler.Client.Bucket.Get(ctx, opt) + if err != nil { + return nil, err + } + objects = append(objects, res.Contents...) + commons = append(commons, res.CommonPrefixes...) + // 如果本次未列取完,则继续使用marker获取结果 + marker = res.NextMarker + // marker 为空时结果列取完毕,跳出 + if marker == "" { + break + } + } + + // 处理列取结果 + res := make([]response.Object, 0, len(objects)+len(commons)) + // 处理目录 + for _, object := range commons { + rel, err := filepath.Rel(opt.Prefix, object) + if err != nil { + continue + } + res = append(res, response.Object{ + Name: path.Base(object), + RelativePath: filepath.ToSlash(rel), + Size: 0, + IsDir: true, + LastModify: time.Now(), + }) + } + // 处理文件 + for _, object := range objects { + rel, err := filepath.Rel(opt.Prefix, object.Key) + if err != nil { + continue + } + res = append(res, response.Object{ + Name: path.Base(object.Key), + Source: object.Key, + RelativePath: filepath.ToSlash(rel), + Size: uint64(object.Size), + IsDir: false, + LastModify: time.Now(), + }) + } + + return res, nil + +} + +// CORS 创建跨域策略 +func (handler Driver) CORS() error { + _, err := handler.Client.Bucket.PutCORS(context.Background(), &cossdk.BucketPutCORSOptions{ + Rules: []cossdk.BucketCORSRule{{ + AllowedMethods: []string{ + "GET", + "POST", + "PUT", + "DELETE", + "HEAD", + }, + AllowedOrigins: []string{"*"}, + AllowedHeaders: []string{"*"}, + MaxAgeSeconds: 3600, + ExposeHeaders: []string{}, + }}, + }) + + return err +} + +// Get 获取文件 +func (handler Driver) Get(ctx context.Context, path string) (response.RSCloser, error) { + // 获取文件源地址 + downloadURL, err := handler.Source(ctx, path, int64(model.GetIntSetting("preview_timeout", 60)), false, 0) + if err != nil { + return nil, err + } + + // 获取文件数据流 + resp, err := handler.HTTPClient.Request( + "GET", + downloadURL, + nil, + request.WithContext(ctx), + request.WithTimeout(time.Duration(0)), + ).CheckHTTPResponse(200).GetRSCloser() + if err != nil { + return nil, err + } + + resp.SetFirstFakeChunk() + + // 尝试自主获取文件大小 + if file, ok := ctx.Value(fsctx.FileModelCtx).(model.File); ok { + resp.SetContentLength(int64(file.Size)) + } + + return resp, nil +} + +// Put 将文件流保存到指定目录 +func (handler Driver) Put(ctx context.Context, file fsctx.FileHeader) error { + defer file.Close() + + opt := &cossdk.ObjectPutOptions{} + _, err := handler.Client.Object.Put(ctx, file.Info().SavePath, file, opt) + return err +} + +// Delete 删除一个或多个文件, +// 返回未删除的文件,及遇到的最后一个错误 +func (handler Driver) Delete(ctx context.Context, files []string) ([]string, error) { + obs := []cossdk.Object{} + for _, v := range files { + obs = append(obs, cossdk.Object{Key: v}) + } + opt := &cossdk.ObjectDeleteMultiOptions{ + Objects: obs, + Quiet: true, + } + + res, _, err := handler.Client.Object.DeleteMulti(context.Background(), opt) + if err != nil { + return files, err + } + + // 整理删除结果 + failed := make([]string, 0, len(files)) + for _, v := range res.Errors { + failed = append(failed, v.Key) + } + + if len(failed) == 0 { + return failed, nil + } + + return failed, errors.New("delete failed") +} + +// Thumb 获取文件缩略图 +func (handler Driver) Thumb(ctx context.Context, file *model.File) (*response.ContentResponse, error) { + // quick check by extension name + // https://cloud.tencent.com/document/product/436/44893 + supported := []string{"png", "jpg", "jpeg", "gif", "bmp", "webp", "heif", "heic"} + if len(handler.Policy.OptionsSerialized.ThumbExts) > 0 { + supported = handler.Policy.OptionsSerialized.ThumbExts + } + + if !util.IsInExtensionList(supported, file.Name) || file.Size > (32<<(10*2)) { + return nil, driver.ErrorThumbNotSupported + } + + var ( + thumbSize = [2]uint{400, 300} + ok = false + ) + if thumbSize, ok = ctx.Value(fsctx.ThumbSizeCtx).([2]uint); !ok { + return nil, errors.New("failed to get thumbnail size") + } + + thumbEncodeQuality := model.GetIntSetting("thumb_encode_quality", 85) + + thumbParam := fmt.Sprintf("imageMogr2/thumbnail/%dx%d/quality/%d", thumbSize[0], thumbSize[1], thumbEncodeQuality) + + source, err := handler.signSourceURL( + ctx, + file.SourceName, + int64(model.GetIntSetting("preview_timeout", 60)), + &urlOption{}, + ) + if err != nil { + return nil, err + } + + thumbURL, _ := url.Parse(source) + thumbQuery := thumbURL.Query() + thumbQuery.Add(thumbParam, "") + thumbURL.RawQuery = thumbQuery.Encode() + + return &response.ContentResponse{ + Redirect: true, + URL: thumbURL.String(), + }, nil +} + +// Source 获取外链URL +func (handler Driver) Source(ctx context.Context, path string, ttl int64, isDownload bool, speed int) (string, error) { + // 尝试从上下文获取文件名 + fileName := "" + if file, ok := ctx.Value(fsctx.FileModelCtx).(model.File); ok { + fileName = file.Name + } + + // 添加各项设置 + options := urlOption{} + if speed > 0 { + if speed < 819200 { + speed = 819200 + } + if speed > 838860800 { + speed = 838860800 + } + options.Speed = speed + } + if isDownload { + options.ContentDescription = "attachment; filename=\"" + url.PathEscape(fileName) + "\"" + } + + return handler.signSourceURL(ctx, path, ttl, &options) +} + +func (handler Driver) signSourceURL(ctx context.Context, path string, ttl int64, options *urlOption) (string, error) { + cdnURL, err := url.Parse(handler.Policy.BaseURL) + if err != nil { + return "", err + } + + // 公有空间不需要签名 + if !handler.Policy.IsPrivate { + file, err := url.Parse(path) + if err != nil { + return "", err + } + + // 非签名URL不支持设置响应header + options.ContentDescription = "" + + optionQuery, err := query.Values(*options) + if err != nil { + return "", err + } + file.RawQuery = optionQuery.Encode() + sourceURL := cdnURL.ResolveReference(file) + + return sourceURL.String(), nil + } + + presignedURL, err := handler.Client.Object.GetPresignedURL(ctx, http.MethodGet, path, + handler.Policy.AccessKey, handler.Policy.SecretKey, time.Duration(ttl)*time.Second, options) + if err != nil { + return "", err + } + + // 将最终生成的签名URL域名换成用户自定义的加速域名(如果有) + presignedURL.Host = cdnURL.Host + presignedURL.Scheme = cdnURL.Scheme + + return presignedURL.String(), nil +} + +// Token 获取上传策略和认证Token +func (handler Driver) Token(ctx context.Context, ttl int64, uploadSession *serializer.UploadSession, file fsctx.FileHeader) (*serializer.UploadCredential, error) { + // 生成回调地址 + siteURL := model.GetSiteURL() + apiBaseURI, _ := url.Parse("/api/v3/callback/cos/" + uploadSession.Key) + apiURL := siteURL.ResolveReference(apiBaseURI).String() + + // 上传策略 + savePath := file.Info().SavePath + startTime := time.Now() + endTime := startTime.Add(time.Duration(ttl) * time.Second) + keyTime := fmt.Sprintf("%d;%d", startTime.Unix(), endTime.Unix()) + postPolicy := UploadPolicy{ + Expiration: endTime.UTC().Format(time.RFC3339), + Conditions: []interface{}{ + map[string]string{"bucket": handler.Policy.BucketName}, + map[string]string{"$key": savePath}, + map[string]string{"x-cos-meta-callback": apiURL}, + map[string]string{"x-cos-meta-key": uploadSession.Key}, + map[string]string{"q-sign-algorithm": "sha1"}, + map[string]string{"q-ak": handler.Policy.AccessKey}, + map[string]string{"q-sign-time": keyTime}, + }, + } + + if handler.Policy.MaxSize > 0 { + postPolicy.Conditions = append(postPolicy.Conditions, + []interface{}{"content-length-range", 0, handler.Policy.MaxSize}) + } + + res, err := handler.getUploadCredential(ctx, postPolicy, keyTime, savePath) + if err == nil { + res.SessionID = uploadSession.Key + res.Callback = apiURL + res.UploadURLs = []string{handler.Policy.Server} + } + + return res, err + +} + +// 取消上传凭证 +func (handler Driver) CancelToken(ctx context.Context, uploadSession *serializer.UploadSession) error { + return nil +} + +// Meta 获取文件信息 +func (handler Driver) Meta(ctx context.Context, path string) (*MetaData, error) { + res, err := handler.Client.Object.Head(ctx, path, &cossdk.ObjectHeadOptions{}) + if err != nil { + return nil, err + } + return &MetaData{ + Size: uint64(res.ContentLength), + CallbackKey: res.Header.Get("x-cos-meta-key"), + CallbackURL: res.Header.Get("x-cos-meta-callback"), + }, nil +} + +func (handler Driver) getUploadCredential(ctx context.Context, policy UploadPolicy, keyTime string, savePath string) (*serializer.UploadCredential, error) { + // 编码上传策略 + policyJSON, err := json.Marshal(policy) + if err != nil { + return nil, err + } + policyEncoded := base64.StdEncoding.EncodeToString(policyJSON) + + // 签名上传策略 + hmacSign := hmac.New(sha1.New, []byte(handler.Policy.SecretKey)) + _, err = io.WriteString(hmacSign, keyTime) + if err != nil { + return nil, err + } + signKey := fmt.Sprintf("%x", hmacSign.Sum(nil)) + + sha1Sign := sha1.New() + _, err = sha1Sign.Write(policyJSON) + if err != nil { + return nil, err + } + stringToSign := fmt.Sprintf("%x", sha1Sign.Sum(nil)) + + // 最终签名 + hmacFinalSign := hmac.New(sha1.New, []byte(signKey)) + _, err = hmacFinalSign.Write([]byte(stringToSign)) + if err != nil { + return nil, err + } + signature := hmacFinalSign.Sum(nil) + + return &serializer.UploadCredential{ + Policy: policyEncoded, + Path: savePath, + AccessKey: handler.Policy.AccessKey, + Credential: fmt.Sprintf("%x", signature), + KeyTime: keyTime, + }, nil +} diff --git a/pkg/filesystem/driver/cos/scf.go b/pkg/filesystem/driver/cos/scf.go new file mode 100644 index 0000000..9ddb29c --- /dev/null +++ b/pkg/filesystem/driver/cos/scf.go @@ -0,0 +1,134 @@ +package cos + +import ( + "archive/zip" + "bytes" + "encoding/base64" + "io" + "io/ioutil" + "net/url" + "strconv" + "strings" + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/hashid" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common" + "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile" + scf "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/scf/v20180416" +) + +const scfFunc = `# -*- coding: utf8 -*- +# SCF配置COS触发,向 Cloudreve 发送回调 +from qcloud_cos_v5 import CosConfig +from qcloud_cos_v5 import CosS3Client +from qcloud_cos_v5 import CosServiceError +from qcloud_cos_v5 import CosClientError +import sys +import logging +import requests + +logging.basicConfig(level=logging.INFO, stream=sys.stdout) +logger = logging.getLogger() + + +def main_handler(event, context): + logger.info("start main handler") + for record in event['Records']: + try: + if "x-cos-meta-callback" not in record['cos']['cosObject']['meta']: + logger.info("Cannot find callback URL, skiped.") + return 'Success' + callback = record['cos']['cosObject']['meta']['x-cos-meta-callback'] + key = record['cos']['cosObject']['key'] + logger.info("Callback URL is " + callback) + + r = requests.get(callback) + print(r.text) + + + + except Exception as e: + print(e) + print('Error getting object {} callback url. '.format(key)) + raise e + return "Fail" + + return "Success" +` + +// CreateSCF 创建回调云函数 +func CreateSCF(policy *model.Policy, region string) error { + // 初始化客户端 + credential := common.NewCredential( + policy.AccessKey, + policy.SecretKey, + ) + cpf := profile.NewClientProfile() + client, err := scf.NewClient(credential, region, cpf) + if err != nil { + return err + } + + // 创建回调代码数据 + buff := &bytes.Buffer{} + bs64 := base64.NewEncoder(base64.StdEncoding, buff) + zipWriter := zip.NewWriter(bs64) + header := zip.FileHeader{ + Name: "callback.py", + Method: zip.Deflate, + } + writer, err := zipWriter.CreateHeader(&header) + if err != nil { + return err + } + _, err = io.Copy(writer, strings.NewReader(scfFunc)) + zipWriter.Close() + + // 创建云函数 + req := scf.NewCreateFunctionRequest() + funcName := "cloudreve_" + hashid.HashID(policy.ID, hashid.PolicyID) + strconv.FormatInt(time.Now().Unix(), 10) + zipFileBytes, _ := ioutil.ReadAll(buff) + zipFileStr := string(zipFileBytes) + codeSource := "ZipFile" + handler := "callback.main_handler" + desc := "Cloudreve 用回调函数" + timeout := int64(60) + runtime := "Python3.6" + req.FunctionName = &funcName + req.Code = &scf.Code{ + ZipFile: &zipFileStr, + } + req.Handler = &handler + req.Description = &desc + req.Timeout = &timeout + req.Runtime = &runtime + req.CodeSource = &codeSource + + _, err = client.CreateFunction(req) + if err != nil { + return err + } + + time.Sleep(time.Duration(5) * time.Second) + + // 创建触发器 + server, _ := url.Parse(policy.Server) + triggerType := "cos" + triggerDesc := `{"event":"cos:ObjectCreated:Post","filter":{"Prefix":"","Suffix":""}}` + enable := "OPEN" + + trigger := scf.NewCreateTriggerRequest() + trigger.FunctionName = &funcName + trigger.TriggerName = &server.Host + trigger.Type = &triggerType + trigger.TriggerDesc = &triggerDesc + trigger.Enable = &enable + + _, err = client.CreateTrigger(trigger) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/filesystem/driver/googledrive/client.go b/pkg/filesystem/driver/googledrive/client.go new file mode 100644 index 0000000..de37257 --- /dev/null +++ b/pkg/filesystem/driver/googledrive/client.go @@ -0,0 +1,73 @@ +package googledrive + +import ( + "errors" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/cluster" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "google.golang.org/api/drive/v3" +) + +// Client Google Drive client +type Client struct { + Endpoints *Endpoints + Policy *model.Policy + Credential *Credential + + ClientID string + ClientSecret string + Redirect string + + Request request.Client + ClusterController cluster.Controller +} + +// Endpoints OneDrive客户端相关设置 +type Endpoints struct { + UserConsentEndpoint string // OAuth认证的基URL + TokenEndpoint string // OAuth token 基URL + EndpointURL string // 接口请求的基URL +} + +const ( + TokenCachePrefix = "googledrive_" + + oauthEndpoint = "https://oauth2.googleapis.com/token" + userConsentBase = "https://accounts.google.com/o/oauth2/auth" + v3DriveEndpoint = "https://www.googleapis.com/drive/v3" +) + +var ( + // Defualt required scopes + RequiredScope = []string{ + drive.DriveScope, + "openid", + "profile", + "https://www.googleapis.com/auth/userinfo.profile", + } + + // ErrInvalidRefreshToken 上传策略无有效的RefreshToken + ErrInvalidRefreshToken = errors.New("no valid refresh token in this policy") +) + +// NewClient 根据存储策略获取新的client +func NewClient(policy *model.Policy) (*Client, error) { + client := &Client{ + Endpoints: &Endpoints{ + TokenEndpoint: oauthEndpoint, + UserConsentEndpoint: userConsentBase, + EndpointURL: v3DriveEndpoint, + }, + Credential: &Credential{ + RefreshToken: policy.AccessKey, + }, + Policy: policy, + ClientID: policy.BucketName, + ClientSecret: policy.SecretKey, + Redirect: policy.OptionsSerialized.OauthRedirect, + Request: request.NewClient(), + ClusterController: cluster.DefaultController, + } + + return client, nil +} diff --git a/pkg/filesystem/driver/googledrive/handler.go b/pkg/filesystem/driver/googledrive/handler.go new file mode 100644 index 0000000..917ae87 --- /dev/null +++ b/pkg/filesystem/driver/googledrive/handler.go @@ -0,0 +1,65 @@ +package googledrive + +import ( + "context" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/response" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" +) + +// Driver Google Drive 适配器 +type Driver struct { + Policy *model.Policy + HTTPClient request.Client +} + +// NewDriver 从存储策略初始化新的Driver实例 +func NewDriver(policy *model.Policy) (driver.Handler, error) { + return &Driver{ + Policy: policy, + HTTPClient: request.NewClient(), + }, nil +} + +func (d *Driver) Put(ctx context.Context, file fsctx.FileHeader) error { + //TODO implement me + panic("implement me") +} + +func (d *Driver) Delete(ctx context.Context, files []string) ([]string, error) { + //TODO implement me + panic("implement me") +} + +func (d *Driver) Get(ctx context.Context, path string) (response.RSCloser, error) { + //TODO implement me + panic("implement me") +} + +func (d *Driver) Thumb(ctx context.Context, file *model.File) (*response.ContentResponse, error) { + //TODO implement me + panic("implement me") +} + +func (d *Driver) Source(ctx context.Context, path string, ttl int64, isDownload bool, speed int) (string, error) { + //TODO implement me + panic("implement me") +} + +func (d *Driver) Token(ctx context.Context, ttl int64, uploadSession *serializer.UploadSession, file fsctx.FileHeader) (*serializer.UploadCredential, error) { + //TODO implement me + panic("implement me") +} + +func (d *Driver) CancelToken(ctx context.Context, uploadSession *serializer.UploadSession) error { + //TODO implement me + panic("implement me") +} + +func (d *Driver) List(ctx context.Context, path string, recursive bool) ([]response.Object, error) { + //TODO implement me + panic("implement me") +} diff --git a/pkg/filesystem/driver/googledrive/oauth.go b/pkg/filesystem/driver/googledrive/oauth.go new file mode 100644 index 0000000..da8a80a --- /dev/null +++ b/pkg/filesystem/driver/googledrive/oauth.go @@ -0,0 +1,154 @@ +package googledrive + +import ( + "context" + "encoding/json" + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/oauth" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +// OAuthURL 获取OAuth认证页面URL +func (client *Client) OAuthURL(ctx context.Context, scope []string) string { + query := url.Values{ + "client_id": {client.ClientID}, + "scope": {strings.Join(scope, " ")}, + "response_type": {"code"}, + "redirect_uri": {client.Redirect}, + "access_type": {"offline"}, + "prompt": {"consent"}, + } + + u, _ := url.Parse(client.Endpoints.UserConsentEndpoint) + u.RawQuery = query.Encode() + return u.String() +} + +// ObtainToken 通过code或refresh_token兑换token +func (client *Client) ObtainToken(ctx context.Context, code, refreshToken string) (*Credential, error) { + body := url.Values{ + "client_id": {client.ClientID}, + "redirect_uri": {client.Redirect}, + "client_secret": {client.ClientSecret}, + } + if code != "" { + body.Add("grant_type", "authorization_code") + body.Add("code", code) + } else { + body.Add("grant_type", "refresh_token") + body.Add("refresh_token", refreshToken) + } + strBody := body.Encode() + + res := client.Request.Request( + "POST", + client.Endpoints.TokenEndpoint, + io.NopCloser(strings.NewReader(strBody)), + request.WithHeader(http.Header{ + "Content-Type": {"application/x-www-form-urlencoded"}}, + ), + request.WithContentLength(int64(len(strBody))), + ) + if res.Err != nil { + return nil, res.Err + } + + respBody, err := res.GetResponse() + if err != nil { + return nil, err + } + + var ( + errResp OAuthError + credential Credential + decodeErr error + ) + + if res.Response.StatusCode != 200 { + decodeErr = json.Unmarshal([]byte(respBody), &errResp) + } else { + decodeErr = json.Unmarshal([]byte(respBody), &credential) + } + if decodeErr != nil { + return nil, decodeErr + } + + if errResp.ErrorType != "" { + return nil, errResp + } + + return &credential, nil +} + +// UpdateCredential 更新凭证,并检查有效期 +func (client *Client) UpdateCredential(ctx context.Context, isSlave bool) error { + if isSlave { + return client.fetchCredentialFromMaster(ctx) + } + + oauth.GlobalMutex.Lock(client.Policy.ID) + defer oauth.GlobalMutex.Unlock(client.Policy.ID) + + // 如果已存在凭证 + if client.Credential != nil && client.Credential.AccessToken != "" { + // 检查已有凭证是否过期 + if client.Credential.ExpiresIn > time.Now().Unix() { + // 未过期,不要更新 + return nil + } + } + + // 尝试从缓存中获取凭证 + if cacheCredential, ok := cache.Get(TokenCachePrefix + client.ClientID); ok { + credential := cacheCredential.(Credential) + if credential.ExpiresIn > time.Now().Unix() { + client.Credential = &credential + return nil + } + } + + // 获取新的凭证 + if client.Credential == nil || client.Credential.RefreshToken == "" { + // 无有效的RefreshToken + util.Log().Error("Failed to refresh credential for policy %q, please login your Google account again.", client.Policy.Name) + return ErrInvalidRefreshToken + } + + credential, err := client.ObtainToken(ctx, "", client.Credential.RefreshToken) + if err != nil { + return err + } + + // 更新有效期为绝对时间戳 + expires := credential.ExpiresIn - 60 + credential.ExpiresIn = time.Now().Add(time.Duration(expires) * time.Second).Unix() + // refresh token for Google Drive does not expire in production + credential.RefreshToken = client.Credential.RefreshToken + client.Credential = credential + + // 更新缓存 + cache.Set(TokenCachePrefix+client.ClientID, *credential, int(expires)) + + return nil +} + +func (client *Client) AccessToken() string { + return client.Credential.AccessToken +} + +// UpdateCredential 更新凭证,并检查有效期 +func (client *Client) fetchCredentialFromMaster(ctx context.Context) error { + res, err := client.ClusterController.GetPolicyOauthToken(client.Policy.MasterID, client.Policy.ID) + if err != nil { + return err + } + + client.Credential = &Credential{AccessToken: res} + return nil +} diff --git a/pkg/filesystem/driver/googledrive/types.go b/pkg/filesystem/driver/googledrive/types.go new file mode 100644 index 0000000..a459c15 --- /dev/null +++ b/pkg/filesystem/driver/googledrive/types.go @@ -0,0 +1,43 @@ +package googledrive + +import "encoding/gob" + +// RespError 接口返回错误 +type RespError struct { + APIError APIError `json:"error"` +} + +// APIError 接口返回的错误内容 +type APIError struct { + Code string `json:"code"` + Message string `json:"message"` +} + +// Error 实现error接口 +func (err RespError) Error() string { + return err.APIError.Message +} + +// Credential 获取token时返回的凭证 +type Credential struct { + ExpiresIn int64 `json:"expires_in"` + Scope string `json:"scope"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + UserID string `json:"user_id"` +} + +// OAuthError OAuth相关接口的错误响应 +type OAuthError struct { + ErrorType string `json:"error"` + ErrorDescription string `json:"error_description"` +} + +// Error 实现error接口 +func (err OAuthError) Error() string { + return err.ErrorDescription +} + +func init() { + gob.Register(Credential{}) +} diff --git a/pkg/filesystem/driver/handler.go b/pkg/filesystem/driver/handler.go new file mode 100644 index 0000000..f145281 --- /dev/null +++ b/pkg/filesystem/driver/handler.go @@ -0,0 +1,52 @@ +package driver + +import ( + "context" + "fmt" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/response" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" +) + +var ( + ErrorThumbNotExist = fmt.Errorf("thumb not exist") + ErrorThumbNotSupported = fmt.Errorf("thumb not supported") +) + +// Handler 存储策略适配器 +type Handler interface { + // 上传文件, dst为文件存储路径,size 为文件大小。上下文关闭 + // 时,应取消上传并清理临时文件 + Put(ctx context.Context, file fsctx.FileHeader) error + + // 删除一个或多个给定路径的文件,返回删除失败的文件路径列表及错误 + Delete(ctx context.Context, files []string) ([]string, error) + + // 获取文件内容 + Get(ctx context.Context, path string) (response.RSCloser, error) + + // 获取缩略图,可直接在ContentResponse中返回文件数据流,也可指 + // 定为重定向 + // 如果缩略图不存在, 且需要 Cloudreve 代理生成并上传,应返回 ErrorThumbNotExist,生 + // 成的缩略图文件存储规则与本机策略一致。 + // 如果不支持此文件的缩略图,并且不希望后续继续请求此缩略图,应返回 ErrorThumbNotSupported + Thumb(ctx context.Context, file *model.File) (*response.ContentResponse, error) + + // 获取外链/下载地址, + // url - 站点本身地址, + // isDownload - 是否直接下载 + Source(ctx context.Context, path string, ttl int64, isDownload bool, speed int) (string, error) + + // Token 获取有效期为ttl的上传凭证和签名 + Token(ctx context.Context, ttl int64, uploadSession *serializer.UploadSession, file fsctx.FileHeader) (*serializer.UploadCredential, error) + + // CancelToken 取消已经创建的有状态上传凭证 + CancelToken(ctx context.Context, uploadSession *serializer.UploadSession) error + + // List 递归列取远程端path路径下文件、目录,不包含path本身, + // 返回的对象路径以path作为起始根目录. + // recursive - 是否递归列出 + List(ctx context.Context, path string, recursive bool) ([]response.Object, error) +} diff --git a/pkg/filesystem/driver/local/handler.go b/pkg/filesystem/driver/local/handler.go new file mode 100644 index 0000000..85ba1af --- /dev/null +++ b/pkg/filesystem/driver/local/handler.go @@ -0,0 +1,292 @@ +package local + +import ( + "context" + "errors" + "fmt" + "io" + "net/url" + "os" + "path/filepath" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/auth" + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/conf" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/response" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +const ( + Perm = 0744 +) + +// Driver 本地策略适配器 +type Driver struct { + Policy *model.Policy +} + +// List 递归列取给定物理路径下所有文件 +func (handler Driver) List(ctx context.Context, path string, recursive bool) ([]response.Object, error) { + var res []response.Object + + // 取得起始路径 + root := util.RelativePath(filepath.FromSlash(path)) + + // 开始遍历路径下的文件、目录 + err := filepath.Walk(root, + func(path string, info os.FileInfo, err error) error { + // 跳过根目录 + if path == root { + return nil + } + + if err != nil { + util.Log().Warning("Failed to walk folder %q: %s", path, err) + return filepath.SkipDir + } + + // 将遍历对象的绝对路径转换为相对路径 + rel, err := filepath.Rel(root, path) + if err != nil { + return err + } + + res = append(res, response.Object{ + Name: info.Name(), + RelativePath: filepath.ToSlash(rel), + Source: path, + Size: uint64(info.Size()), + IsDir: info.IsDir(), + LastModify: info.ModTime(), + }) + + // 如果非递归,则不步入目录 + if !recursive && info.IsDir() { + return filepath.SkipDir + } + + return nil + }) + + return res, err +} + +// Get 获取文件内容 +func (handler Driver) Get(ctx context.Context, path string) (response.RSCloser, error) { + // 打开文件 + file, err := os.Open(util.RelativePath(path)) + if err != nil { + util.Log().Debug("Failed to open file: %s", err) + return nil, err + } + + return file, nil +} + +// Put 将文件流保存到指定目录 +func (handler Driver) Put(ctx context.Context, file fsctx.FileHeader) error { + defer file.Close() + fileInfo := file.Info() + dst := util.RelativePath(filepath.FromSlash(fileInfo.SavePath)) + + // 如果非 Overwrite,则检查是否有重名冲突 + if fileInfo.Mode&fsctx.Overwrite != fsctx.Overwrite { + if util.Exists(dst) { + util.Log().Warning("File with the same name existed or unavailable: %s", dst) + return errors.New("file with the same name existed or unavailable") + } + } + + // 如果目标目录不存在,创建 + basePath := filepath.Dir(dst) + if !util.Exists(basePath) { + err := os.MkdirAll(basePath, Perm) + if err != nil { + util.Log().Warning("Failed to create directory: %s", err) + return err + } + } + + var ( + out *os.File + err error + ) + + openMode := os.O_CREATE | os.O_RDWR + if fileInfo.Mode&fsctx.Append == fsctx.Append { + openMode |= os.O_APPEND + } else { + openMode |= os.O_TRUNC + } + + out, err = os.OpenFile(dst, openMode, Perm) + if err != nil { + util.Log().Warning("Failed to open or create file: %s", err) + return err + } + defer out.Close() + + if fileInfo.Mode&fsctx.Append == fsctx.Append { + stat, err := out.Stat() + if err != nil { + util.Log().Warning("Failed to read file info: %s", err) + return err + } + + if uint64(stat.Size()) < fileInfo.AppendStart { + return errors.New("size of unfinished uploaded chunks is not as expected") + } else if uint64(stat.Size()) > fileInfo.AppendStart { + out.Close() + if err := handler.Truncate(ctx, dst, fileInfo.AppendStart); err != nil { + return fmt.Errorf("failed to overwrite chunk: %w", err) + } + + out, err = os.OpenFile(dst, openMode, Perm) + defer out.Close() + if err != nil { + util.Log().Warning("Failed to create or open file: %s", err) + return err + } + } + } + + // 写入文件内容 + _, err = io.Copy(out, file) + return err +} + +func (handler Driver) Truncate(ctx context.Context, src string, size uint64) error { + util.Log().Warning("Truncate file %q to [%d].", src, size) + out, err := os.OpenFile(src, os.O_WRONLY, Perm) + if err != nil { + util.Log().Warning("Failed to open file: %s", err) + return err + } + + defer out.Close() + return out.Truncate(int64(size)) +} + +// Delete 删除一个或多个文件, +// 返回未删除的文件,及遇到的最后一个错误 +func (handler Driver) Delete(ctx context.Context, files []string) ([]string, error) { + deleteFailed := make([]string, 0, len(files)) + var retErr error + + for _, value := range files { + filePath := util.RelativePath(filepath.FromSlash(value)) + if util.Exists(filePath) { + err := os.Remove(filePath) + if err != nil { + util.Log().Warning("Failed to delete file: %s", err) + retErr = err + deleteFailed = append(deleteFailed, value) + } + } + + // 尝试删除文件的缩略图(如果有) + _ = os.Remove(util.RelativePath(value + model.GetSettingByNameWithDefault("thumb_file_suffix", "._thumb"))) + } + + return deleteFailed, retErr +} + +// Thumb 获取文件缩略图 +func (handler Driver) Thumb(ctx context.Context, file *model.File) (*response.ContentResponse, error) { + // Quick check thumb existence on master. + if conf.SystemConfig.Mode == "master" && file.MetadataSerialized[model.ThumbStatusMetadataKey] == model.ThumbStatusNotExist { + // Tell invoker to generate a thumb + return nil, driver.ErrorThumbNotExist + } + + thumbFile, err := handler.Get(ctx, file.ThumbFile()) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + err = fmt.Errorf("thumb not exist: %w (%w)", err, driver.ErrorThumbNotExist) + } + + return nil, err + } + + return &response.ContentResponse{ + Redirect: false, + Content: thumbFile, + }, nil +} + +// Source 获取外链URL +func (handler Driver) Source(ctx context.Context, path string, ttl int64, isDownload bool, speed int) (string, error) { + file, ok := ctx.Value(fsctx.FileModelCtx).(model.File) + if !ok { + return "", errors.New("failed to read file model context") + } + + var baseURL *url.URL + // 是否启用了CDN + if handler.Policy.BaseURL != "" { + cdnURL, err := url.Parse(handler.Policy.BaseURL) + if err != nil { + return "", err + } + baseURL = cdnURL + } + + var ( + signedURI *url.URL + err error + ) + if isDownload { + // 创建下载会话,将文件信息写入缓存 + downloadSessionID := util.RandStringRunes(16) + err = cache.Set("download_"+downloadSessionID, file, int(ttl)) + if err != nil { + return "", serializer.NewError(serializer.CodeCacheOperation, "Failed to create download session", err) + } + + // 签名生成文件记录 + signedURI, err = auth.SignURI( + auth.General, + fmt.Sprintf("/api/v3/file/download/%s", downloadSessionID), + ttl, + ) + } else { + // 签名生成文件记录 + signedURI, err = auth.SignURI( + auth.General, + fmt.Sprintf("/api/v3/file/get/%d/%s", file.ID, file.Name), + ttl, + ) + } + + if err != nil { + return "", serializer.NewError(serializer.CodeEncryptError, "Failed to sign url", err) + } + + finalURL := signedURI.String() + if baseURL != nil { + finalURL = baseURL.ResolveReference(signedURI).String() + } + + return finalURL, nil +} + +// Token 获取上传策略和认证Token,本地策略直接返回空值 +func (handler Driver) Token(ctx context.Context, ttl int64, uploadSession *serializer.UploadSession, file fsctx.FileHeader) (*serializer.UploadCredential, error) { + if util.Exists(uploadSession.SavePath) { + return nil, errors.New("placeholder file already exist") + } + + return &serializer.UploadCredential{ + SessionID: uploadSession.Key, + ChunkSize: handler.Policy.OptionsSerialized.ChunkSize, + }, nil +} + +// 取消上传凭证 +func (handler Driver) CancelToken(ctx context.Context, uploadSession *serializer.UploadSession) error { + return nil +} diff --git a/pkg/filesystem/driver/onedrive/api.go b/pkg/filesystem/driver/onedrive/api.go new file mode 100644 index 0000000..74649ea --- /dev/null +++ b/pkg/filesystem/driver/onedrive/api.go @@ -0,0 +1,595 @@ +package onedrive + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/conf" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/chunk" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/chunk/backoff" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/mq" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +const ( + // SmallFileSize 单文件上传接口最大尺寸 + SmallFileSize uint64 = 4 * 1024 * 1024 + // ChunkSize 服务端中转分片上传分片大小 + ChunkSize uint64 = 10 * 1024 * 1024 + // ListRetry 列取请求重试次数 + ListRetry = 1 + chunkRetrySleep = time.Second * 5 + + notFoundError = "itemNotFound" +) + +// GetSourcePath 获取文件的绝对路径 +func (info *FileInfo) GetSourcePath() string { + res, err := url.PathUnescape(info.ParentReference.Path) + if err != nil { + return "" + } + + return strings.TrimPrefix( + path.Join( + strings.TrimPrefix(res, "/drive/root:"), + info.Name, + ), + "/", + ) +} + +func (client *Client) getRequestURL(api string, opts ...Option) string { + options := newDefaultOption() + for _, o := range opts { + o.apply(options) + } + + base, _ := url.Parse(client.Endpoints.EndpointURL) + if base == nil { + return "" + } + + if options.useDriverResource { + base.Path = path.Join(base.Path, client.Endpoints.DriverResource, api) + } else { + base.Path = path.Join(base.Path, api) + } + + return base.String() +} + +// ListChildren 根据路径列取子对象 +func (client *Client) ListChildren(ctx context.Context, path string) ([]FileInfo, error) { + var requestURL string + dst := strings.TrimPrefix(path, "/") + if dst == "" { + requestURL = client.getRequestURL("root/children") + } else { + requestURL = client.getRequestURL("root:/" + dst + ":/children") + } + + res, err := client.requestWithStr(ctx, "GET", requestURL+"?$top=999999999", "", 200) + if err != nil { + retried := 0 + if v, ok := ctx.Value(fsctx.RetryCtx).(int); ok { + retried = v + } + if retried < ListRetry { + retried++ + util.Log().Debug("Failed to list path %q: %s, will retry in 5 seconds.", path, err) + time.Sleep(time.Duration(5) * time.Second) + return client.ListChildren(context.WithValue(ctx, fsctx.RetryCtx, retried), path) + } + return nil, err + } + + var ( + decodeErr error + fileInfo ListResponse + ) + decodeErr = json.Unmarshal([]byte(res), &fileInfo) + if decodeErr != nil { + return nil, decodeErr + } + + return fileInfo.Value, nil +} + +// Meta 根据资源ID或文件路径获取文件元信息 +func (client *Client) Meta(ctx context.Context, id string, path string) (*FileInfo, error) { + var requestURL string + if id != "" { + requestURL = client.getRequestURL("items/" + id) + } else { + dst := strings.TrimPrefix(path, "/") + requestURL = client.getRequestURL("root:/" + dst) + } + + res, err := client.requestWithStr(ctx, "GET", requestURL+"?expand=thumbnails", "", 200) + if err != nil { + return nil, err + } + + var ( + decodeErr error + fileInfo FileInfo + ) + decodeErr = json.Unmarshal([]byte(res), &fileInfo) + if decodeErr != nil { + return nil, decodeErr + } + + return &fileInfo, nil + +} + +// CreateUploadSession 创建分片上传会话 +func (client *Client) CreateUploadSession(ctx context.Context, dst string, opts ...Option) (string, error) { + options := newDefaultOption() + for _, o := range opts { + o.apply(options) + } + + dst = strings.TrimPrefix(dst, "/") + requestURL := client.getRequestURL("root:/" + dst + ":/createUploadSession") + body := map[string]map[string]interface{}{ + "item": { + "@microsoft.graph.conflictBehavior": options.conflictBehavior, + }, + } + bodyBytes, _ := json.Marshal(body) + + res, err := client.requestWithStr(ctx, "POST", requestURL, string(bodyBytes), 200) + if err != nil { + return "", err + } + + var ( + decodeErr error + uploadSession UploadSessionResponse + ) + decodeErr = json.Unmarshal([]byte(res), &uploadSession) + if decodeErr != nil { + return "", decodeErr + } + + return uploadSession.UploadURL, nil +} + +// GetSiteIDByURL 通过 SharePoint 站点 URL 获取站点ID +func (client *Client) GetSiteIDByURL(ctx context.Context, siteUrl string) (string, error) { + siteUrlParsed, err := url.Parse(siteUrl) + if err != nil { + return "", err + } + + hostName := siteUrlParsed.Hostname() + relativePath := strings.Trim(siteUrlParsed.Path, "/") + requestURL := client.getRequestURL(fmt.Sprintf("sites/%s:/%s", hostName, relativePath), WithDriverResource(false)) + res, reqErr := client.requestWithStr(ctx, "GET", requestURL, "", 200) + if reqErr != nil { + return "", reqErr + } + + var ( + decodeErr error + siteInfo Site + ) + decodeErr = json.Unmarshal([]byte(res), &siteInfo) + if decodeErr != nil { + return "", decodeErr + } + + return siteInfo.ID, nil +} + +// GetUploadSessionStatus 查询上传会话状态 +func (client *Client) GetUploadSessionStatus(ctx context.Context, uploadURL string) (*UploadSessionResponse, error) { + res, err := client.requestWithStr(ctx, "GET", uploadURL, "", 200) + if err != nil { + return nil, err + } + + var ( + decodeErr error + uploadSession UploadSessionResponse + ) + decodeErr = json.Unmarshal([]byte(res), &uploadSession) + if decodeErr != nil { + return nil, decodeErr + } + + return &uploadSession, nil +} + +// UploadChunk 上传分片 +func (client *Client) UploadChunk(ctx context.Context, uploadURL string, content io.Reader, current *chunk.ChunkGroup) (*UploadSessionResponse, error) { + res, err := client.request( + ctx, "PUT", uploadURL, content, + request.WithContentLength(current.Length()), + request.WithHeader(http.Header{ + "Content-Range": {current.RangeHeader()}, + }), + request.WithoutHeader([]string{"Authorization", "Content-Type"}), + request.WithTimeout(0), + ) + if err != nil { + return nil, fmt.Errorf("failed to upload OneDrive chunk #%d: %w", current.Index(), err) + } + + if current.IsLast() { + return nil, nil + } + + var ( + decodeErr error + uploadRes UploadSessionResponse + ) + decodeErr = json.Unmarshal([]byte(res), &uploadRes) + if decodeErr != nil { + return nil, decodeErr + } + + return &uploadRes, nil +} + +// Upload 上传文件 +func (client *Client) Upload(ctx context.Context, file fsctx.FileHeader) error { + fileInfo := file.Info() + // 决定是否覆盖文件 + overwrite := "fail" + if fileInfo.Mode&fsctx.Overwrite == fsctx.Overwrite { + overwrite = "replace" + } + + size := int(fileInfo.Size) + dst := fileInfo.SavePath + + // 小文件,使用简单上传接口上传 + if size <= int(SmallFileSize) { + _, err := client.SimpleUpload(ctx, dst, file, int64(size), WithConflictBehavior(overwrite)) + return err + } + + // 大文件,进行分片 + // 创建上传会话 + uploadURL, err := client.CreateUploadSession(ctx, dst, WithConflictBehavior(overwrite)) + if err != nil { + return err + } + + // Initial chunk groups + chunks := chunk.NewChunkGroup(file, client.Policy.OptionsSerialized.ChunkSize, &backoff.ConstantBackoff{ + Max: model.GetIntSetting("chunk_retries", 5), + Sleep: chunkRetrySleep, + }, model.IsTrueVal(model.GetSettingByName("use_temp_chunk_buffer"))) + + uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error { + _, err := client.UploadChunk(ctx, uploadURL, content, current) + return err + } + + // upload chunks + for chunks.Next() { + if err := chunks.Process(uploadFunc); err != nil { + return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err) + } + } + + return nil +} + +// DeleteUploadSession 删除上传会话 +func (client *Client) DeleteUploadSession(ctx context.Context, uploadURL string) error { + _, err := client.requestWithStr(ctx, "DELETE", uploadURL, "", 204) + if err != nil { + return err + } + + return nil +} + +// SimpleUpload 上传小文件到dst +func (client *Client) SimpleUpload(ctx context.Context, dst string, body io.Reader, size int64, opts ...Option) (*UploadResult, error) { + options := newDefaultOption() + for _, o := range opts { + o.apply(options) + } + + dst = strings.TrimPrefix(dst, "/") + requestURL := client.getRequestURL("root:/" + dst + ":/content") + requestURL += ("?@microsoft.graph.conflictBehavior=" + options.conflictBehavior) + + res, err := client.request(ctx, "PUT", requestURL, body, request.WithContentLength(int64(size)), + request.WithTimeout(0), + ) + if err != nil { + return nil, err + } + + var ( + decodeErr error + uploadRes UploadResult + ) + decodeErr = json.Unmarshal([]byte(res), &uploadRes) + if decodeErr != nil { + return nil, decodeErr + } + + return &uploadRes, nil +} + +// BatchDelete 并行删除给出的文件,返回删除失败的文件,及第一个遇到的错误。此方法将文件分为 +// 20个一组,调用Delete并行删除 +// TODO 测试 +func (client *Client) BatchDelete(ctx context.Context, dst []string) ([]string, error) { + groupNum := len(dst)/20 + 1 + finalRes := make([]string, 0, len(dst)) + res := make([]string, 0, 20) + var err error + + for i := 0; i < groupNum; i++ { + end := 20*i + 20 + if i == groupNum-1 { + end = len(dst) + } + res, err = client.Delete(ctx, dst[20*i:end]) + finalRes = append(finalRes, res...) + } + + return finalRes, err +} + +// Delete 并行删除文件,返回删除失败的文件,及第一个遇到的错误, +// 由于API限制,最多删除20个 +func (client *Client) Delete(ctx context.Context, dst []string) ([]string, error) { + body := client.makeBatchDeleteRequestsBody(dst) + res, err := client.requestWithStr(ctx, "POST", client.getRequestURL("$batch", + WithDriverResource(false)), body, 200) + if err != nil { + return dst, err + } + + var ( + decodeErr error + deleteRes BatchResponses + ) + decodeErr = json.Unmarshal([]byte(res), &deleteRes) + if decodeErr != nil { + return dst, decodeErr + } + + // 取得删除失败的文件 + failed := getDeleteFailed(&deleteRes) + if len(failed) != 0 { + return failed, ErrDeleteFile + } + return failed, nil +} + +func getDeleteFailed(res *BatchResponses) []string { + var failed = make([]string, 0, len(res.Responses)) + for _, v := range res.Responses { + if v.Status != 204 && v.Status != 404 { + failed = append(failed, v.ID) + } + } + return failed +} + +// makeBatchDeleteRequestsBody 生成批量删除请求正文 +func (client *Client) makeBatchDeleteRequestsBody(files []string) string { + req := BatchRequests{ + Requests: make([]BatchRequest, len(files)), + } + for i, v := range files { + v = strings.TrimPrefix(v, "/") + filePath, _ := url.Parse("/" + client.Endpoints.DriverResource + "/root:/") + filePath.Path = path.Join(filePath.Path, v) + req.Requests[i] = BatchRequest{ + ID: v, + Method: "DELETE", + URL: filePath.EscapedPath(), + } + } + + res, _ := json.Marshal(req) + return string(res) +} + +// GetThumbURL 获取给定尺寸的缩略图URL +func (client *Client) GetThumbURL(ctx context.Context, dst string, w, h uint) (string, error) { + dst = strings.TrimPrefix(dst, "/") + requestURL := client.getRequestURL("root:/"+dst+":/thumbnails/0") + "/large" + + res, err := client.requestWithStr(ctx, "GET", requestURL, "", 200) + if err != nil { + return "", err + } + + var ( + decodeErr error + thumbRes ThumbResponse + ) + decodeErr = json.Unmarshal([]byte(res), &thumbRes) + if decodeErr != nil { + return "", decodeErr + } + + if thumbRes.URL != "" { + return thumbRes.URL, nil + } + + if len(thumbRes.Value) == 1 { + if res, ok := thumbRes.Value[0]["large"]; ok { + return res.(map[string]interface{})["url"].(string), nil + } + } + + return "", ErrThumbSizeNotFound +} + +// MonitorUpload 监控客户端分片上传进度 +func (client *Client) MonitorUpload(uploadURL, callbackKey, path string, size uint64, ttl int64) { + // 回调完成通知chan + callbackChan := mq.GlobalMQ.Subscribe(callbackKey, 1) + defer mq.GlobalMQ.Unsubscribe(callbackKey, callbackChan) + + timeout := model.GetIntSetting("onedrive_monitor_timeout", 600) + interval := model.GetIntSetting("onedrive_callback_check", 20) + + for { + select { + case <-callbackChan: + util.Log().Debug("Client finished OneDrive callback.") + return + case <-time.After(time.Duration(ttl) * time.Second): + // 上传会话到期,仍未完成上传,创建占位符 + client.DeleteUploadSession(context.Background(), uploadURL) + _, err := client.SimpleUpload(context.Background(), path, strings.NewReader(""), 0, WithConflictBehavior("replace")) + if err != nil { + util.Log().Debug("Failed to create placeholder file: %s", err) + } + return + case <-time.After(time.Duration(timeout) * time.Second): + util.Log().Debug("Checking OneDrive upload status.") + status, err := client.GetUploadSessionStatus(context.Background(), uploadURL) + + if err != nil { + if resErr, ok := err.(*RespError); ok { + if resErr.APIError.Code == notFoundError { + util.Log().Debug("Upload completed, will check upload callback later.") + select { + case <-time.After(time.Duration(interval) * time.Second): + util.Log().Warning("No callback is made, file will be deleted.") + cache.Deletes([]string{callbackKey}, "callback_") + _, err = client.Delete(context.Background(), []string{path}) + if err != nil { + util.Log().Warning("Failed to delete file without callback: %s", err) + } + case <-callbackChan: + util.Log().Debug("Client finished callback.") + } + return + } + } + util.Log().Debug("Failed to get upload session status: %s, continue next iteration.", err.Error()) + continue + } + + // 成功获取分片上传状态,检查文件大小 + if len(status.NextExpectedRanges) == 0 { + continue + } + sizeRange := strings.Split( + status.NextExpectedRanges[len(status.NextExpectedRanges)-1], + "-", + ) + if len(sizeRange) != 2 { + continue + } + uploadFullSize, _ := strconv.ParseUint(sizeRange[1], 10, 64) + if (sizeRange[0] == "0" && sizeRange[1] == "") || uploadFullSize+1 != size { + util.Log().Debug("Upload has not started, or uploaded file size not match, canceling upload session...") + // 取消上传会话,实测OneDrive取消上传会话后,客户端还是可以上传, + // 所以上传一个空文件占位,阻止客户端上传 + client.DeleteUploadSession(context.Background(), uploadURL) + _, err := client.SimpleUpload(context.Background(), path, strings.NewReader(""), 0, WithConflictBehavior("replace")) + if err != nil { + util.Log().Debug("无法创建占位文件,%s", err) + } + return + } + + } + } +} + +func sysError(err error) *RespError { + return &RespError{APIError: APIError{ + Code: "system", + Message: err.Error(), + }} +} + +func (client *Client) request(ctx context.Context, method string, url string, body io.Reader, option ...request.Option) (string, error) { + // 获取凭证 + err := client.UpdateCredential(ctx, conf.SystemConfig.Mode == "slave") + if err != nil { + return "", sysError(err) + } + + option = append(option, + request.WithHeader(http.Header{ + "Authorization": {"Bearer " + client.Credential.AccessToken}, + "Content-Type": {"application/json"}, + }), + request.WithContext(ctx), + request.WithTPSLimit( + fmt.Sprintf("policy_%d", client.Policy.ID), + client.Policy.OptionsSerialized.TPSLimit, + client.Policy.OptionsSerialized.TPSLimitBurst, + ), + ) + + // 发送请求 + res := client.Request.Request( + method, + url, + body, + option..., + ) + + if res.Err != nil { + return "", sysError(res.Err) + } + + respBody, err := res.GetResponse() + if err != nil { + return "", sysError(err) + } + + // 解析请求响应 + var ( + errResp RespError + decodeErr error + ) + // 如果有错误 + if res.Response.StatusCode < 200 || res.Response.StatusCode >= 300 { + decodeErr = json.Unmarshal([]byte(respBody), &errResp) + if decodeErr != nil { + util.Log().Debug("Onedrive returns unknown response: %s", respBody) + return "", sysError(decodeErr) + } + + if res.Response.StatusCode == 429 { + util.Log().Warning("OneDrive request is throttled.") + return "", backoff.NewRetryableErrorFromHeader(&errResp, res.Response.Header) + } + + return "", &errResp + } + + return respBody, nil +} + +func (client *Client) requestWithStr(ctx context.Context, method string, url string, body string, expectedCode int) (string, error) { + // 发送请求 + bodyReader := io.NopCloser(strings.NewReader(body)) + return client.request(ctx, method, url, bodyReader, + request.WithContentLength(int64(len(body))), + ) +} diff --git a/pkg/filesystem/driver/onedrive/client.go b/pkg/filesystem/driver/onedrive/client.go new file mode 100644 index 0000000..89e696b --- /dev/null +++ b/pkg/filesystem/driver/onedrive/client.go @@ -0,0 +1,78 @@ +package onedrive + +import ( + "errors" + + "github.com/cloudreve/Cloudreve/v3/pkg/cluster" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/request" +) + +var ( + // ErrAuthEndpoint 无法解析授权端点地址 + ErrAuthEndpoint = errors.New("failed to parse endpoint url") + // ErrInvalidRefreshToken 上传策略无有效的RefreshToken + ErrInvalidRefreshToken = errors.New("no valid refresh token in this policy") + // ErrDeleteFile 无法删除文件 + ErrDeleteFile = errors.New("cannot delete file") + // ErrClientCanceled 客户端取消操作 + ErrClientCanceled = errors.New("client canceled") + // Desired thumb size not available + ErrThumbSizeNotFound = errors.New("thumb size not found") +) + +// Client OneDrive客户端 +type Client struct { + Endpoints *Endpoints + Policy *model.Policy + Credential *Credential + + ClientID string + ClientSecret string + Redirect string + + Request request.Client + ClusterController cluster.Controller +} + +// Endpoints OneDrive客户端相关设置 +type Endpoints struct { + OAuthURL string // OAuth认证的基URL + OAuthEndpoints *oauthEndpoint + EndpointURL string // 接口请求的基URL + isInChina bool // 是否为世纪互联 + DriverResource string // 要使用的驱动器 +} + +// NewClient 根据存储策略获取新的client +func NewClient(policy *model.Policy) (*Client, error) { + client := &Client{ + Endpoints: &Endpoints{ + OAuthURL: policy.BaseURL, + EndpointURL: policy.Server, + DriverResource: policy.OptionsSerialized.OdDriver, + }, + Credential: &Credential{ + RefreshToken: policy.AccessKey, + }, + Policy: policy, + ClientID: policy.BucketName, + ClientSecret: policy.SecretKey, + Redirect: policy.OptionsSerialized.OauthRedirect, + Request: request.NewClient(), + ClusterController: cluster.DefaultController, + } + + if client.Endpoints.DriverResource == "" { + client.Endpoints.DriverResource = "me/drive" + } + + oauthBase := client.getOAuthEndpoint() + if oauthBase == nil { + return nil, ErrAuthEndpoint + } + client.Endpoints.OAuthEndpoints = oauthBase + + return client, nil +} diff --git a/pkg/filesystem/driver/onedrive/handler.go b/pkg/filesystem/driver/onedrive/handler.go new file mode 100644 index 0000000..149fdba --- /dev/null +++ b/pkg/filesystem/driver/onedrive/handler.go @@ -0,0 +1,238 @@ +package onedrive + +import ( + "context" + "errors" + "fmt" + "net/url" + "path" + "path/filepath" + "strings" + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/response" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" +) + +// Driver OneDrive 适配器 +type Driver struct { + Policy *model.Policy + Client *Client + HTTPClient request.Client +} + +// NewDriver 从存储策略初始化新的Driver实例 +func NewDriver(policy *model.Policy) (driver.Handler, error) { + client, err := NewClient(policy) + if policy.OptionsSerialized.ChunkSize == 0 { + policy.OptionsSerialized.ChunkSize = 50 << 20 // 50MB + } + + return Driver{ + Policy: policy, + Client: client, + HTTPClient: request.NewClient(), + }, err +} + +// List 列取项目 +func (handler Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) { + base = strings.TrimPrefix(base, "/") + // 列取子项目 + objects, _ := handler.Client.ListChildren(ctx, base) + + // 获取真实的列取起始根目录 + rootPath := base + if realBase, ok := ctx.Value(fsctx.PathCtx).(string); ok { + rootPath = realBase + } else { + ctx = context.WithValue(ctx, fsctx.PathCtx, base) + } + + // 整理结果 + res := make([]response.Object, 0, len(objects)) + for _, object := range objects { + source := path.Join(base, object.Name) + rel, err := filepath.Rel(rootPath, source) + if err != nil { + continue + } + res = append(res, response.Object{ + Name: object.Name, + RelativePath: filepath.ToSlash(rel), + Source: source, + Size: object.Size, + IsDir: object.Folder != nil, + LastModify: time.Now(), + }) + } + + // 递归列取子目录 + if recursive { + for _, object := range objects { + if object.Folder != nil { + sub, _ := handler.List(ctx, path.Join(base, object.Name), recursive) + res = append(res, sub...) + } + } + } + + return res, nil +} + +// Get 获取文件 +func (handler Driver) Get(ctx context.Context, path string) (response.RSCloser, error) { + // 获取文件源地址 + downloadURL, err := handler.Source( + ctx, + path, + 60, + false, + 0, + ) + if err != nil { + return nil, err + } + + // 获取文件数据流 + resp, err := handler.HTTPClient.Request( + "GET", + downloadURL, + nil, + request.WithContext(ctx), + request.WithTimeout(time.Duration(0)), + ).CheckHTTPResponse(200).GetRSCloser() + if err != nil { + return nil, err + } + + resp.SetFirstFakeChunk() + + // 尝试自主获取文件大小 + if file, ok := ctx.Value(fsctx.FileModelCtx).(model.File); ok { + resp.SetContentLength(int64(file.Size)) + } + + return resp, nil +} + +// Put 将文件流保存到指定目录 +func (handler Driver) Put(ctx context.Context, file fsctx.FileHeader) error { + defer file.Close() + + return handler.Client.Upload(ctx, file) +} + +// Delete 删除一个或多个文件, +// 返回未删除的文件,及遇到的最后一个错误 +func (handler Driver) Delete(ctx context.Context, files []string) ([]string, error) { + return handler.Client.BatchDelete(ctx, files) +} + +// Thumb 获取文件缩略图 +func (handler Driver) Thumb(ctx context.Context, file *model.File) (*response.ContentResponse, error) { + var ( + thumbSize = [2]uint{400, 300} + ok = false + ) + if thumbSize, ok = ctx.Value(fsctx.ThumbSizeCtx).([2]uint); !ok { + return nil, errors.New("failed to get thumbnail size") + } + + res, err := handler.Client.GetThumbURL(ctx, file.SourceName, thumbSize[0], thumbSize[1]) + if err != nil { + var apiErr *RespError + if errors.As(err, &apiErr); err == ErrThumbSizeNotFound || (apiErr != nil && apiErr.APIError.Code == notFoundError) { + // OneDrive cannot generate thumbnail for this file + return nil, driver.ErrorThumbNotSupported + } + } + + return &response.ContentResponse{ + Redirect: true, + URL: res, + }, err +} + +// Source 获取外链URL +func (handler Driver) Source( + ctx context.Context, + path string, + ttl int64, + isDownload bool, + speed int, +) (string, error) { + cacheKey := fmt.Sprintf("onedrive_source_%d_%s", handler.Policy.ID, path) + if file, ok := ctx.Value(fsctx.FileModelCtx).(model.File); ok { + cacheKey = fmt.Sprintf("onedrive_source_file_%d_%d", file.UpdatedAt.Unix(), file.ID) + } + + // 尝试从缓存中查找 + if cachedURL, ok := cache.Get(cacheKey); ok { + return handler.replaceSourceHost(cachedURL.(string)) + } + + // 缓存不存在,重新获取 + res, err := handler.Client.Meta(ctx, "", path) + if err == nil { + // 写入新的缓存 + cache.Set( + cacheKey, + res.DownloadURL, + model.GetIntSetting("onedrive_source_timeout", 1800), + ) + return handler.replaceSourceHost(res.DownloadURL) + } + return "", err +} + +func (handler Driver) replaceSourceHost(origin string) (string, error) { + if handler.Policy.OptionsSerialized.OdProxy != "" { + source, err := url.Parse(origin) + if err != nil { + return "", err + } + + cdn, err := url.Parse(handler.Policy.OptionsSerialized.OdProxy) + if err != nil { + return "", err + } + + // 替换反代地址 + source.Scheme = cdn.Scheme + source.Host = cdn.Host + return source.String(), nil + } + + return origin, nil +} + +// Token 获取上传会话URL +func (handler Driver) Token(ctx context.Context, ttl int64, uploadSession *serializer.UploadSession, file fsctx.FileHeader) (*serializer.UploadCredential, error) { + fileInfo := file.Info() + + uploadURL, err := handler.Client.CreateUploadSession(ctx, fileInfo.SavePath, WithConflictBehavior("fail")) + if err != nil { + return nil, err + } + + // 监控回调及上传 + go handler.Client.MonitorUpload(uploadURL, uploadSession.Key, fileInfo.SavePath, fileInfo.Size, ttl) + + uploadSession.UploadURL = uploadURL + return &serializer.UploadCredential{ + SessionID: uploadSession.Key, + ChunkSize: handler.Policy.OptionsSerialized.ChunkSize, + UploadURLs: []string{uploadURL}, + }, nil +} + +// 取消上传凭证 +func (handler Driver) CancelToken(ctx context.Context, uploadSession *serializer.UploadSession) error { + return handler.Client.DeleteUploadSession(ctx, uploadSession.UploadURL) +} diff --git a/pkg/filesystem/driver/onedrive/lock.go b/pkg/filesystem/driver/onedrive/lock.go new file mode 100644 index 0000000..655936b --- /dev/null +++ b/pkg/filesystem/driver/onedrive/lock.go @@ -0,0 +1,25 @@ +package onedrive + +import "sync" + +// CredentialLock 针对存储策略凭证的锁 +type CredentialLock interface { + Lock(uint) + Unlock(uint) +} + +var GlobalMutex = mutexMap{} + +type mutexMap struct { + locks sync.Map +} + +func (m *mutexMap) Lock(id uint) { + lock, _ := m.locks.LoadOrStore(id, &sync.Mutex{}) + lock.(*sync.Mutex).Lock() +} + +func (m *mutexMap) Unlock(id uint) { + lock, _ := m.locks.LoadOrStore(id, &sync.Mutex{}) + lock.(*sync.Mutex).Unlock() +} diff --git a/pkg/filesystem/driver/onedrive/oauth.go b/pkg/filesystem/driver/onedrive/oauth.go new file mode 100644 index 0000000..bb00005 --- /dev/null +++ b/pkg/filesystem/driver/onedrive/oauth.go @@ -0,0 +1,192 @@ +package onedrive + +import ( + "context" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/oauth" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +// Error 实现error接口 +func (err OAuthError) Error() string { + return err.ErrorDescription +} + +// OAuthURL 获取OAuth认证页面URL +func (client *Client) OAuthURL(ctx context.Context, scope []string) string { + query := url.Values{ + "client_id": {client.ClientID}, + "scope": {strings.Join(scope, " ")}, + "response_type": {"code"}, + "redirect_uri": {client.Redirect}, + } + client.Endpoints.OAuthEndpoints.authorize.RawQuery = query.Encode() + return client.Endpoints.OAuthEndpoints.authorize.String() +} + +// getOAuthEndpoint 根据指定的AuthURL获取详细的认证接口地址 +func (client *Client) getOAuthEndpoint() *oauthEndpoint { + base, err := url.Parse(client.Endpoints.OAuthURL) + if err != nil { + return nil + } + var ( + token *url.URL + authorize *url.URL + ) + switch base.Host { + case "login.live.com": + token, _ = url.Parse("https://login.live.com/oauth20_token.srf") + authorize, _ = url.Parse("https://login.live.com/oauth20_authorize.srf") + case "login.chinacloudapi.cn": + client.Endpoints.isInChina = true + token, _ = url.Parse("https://login.chinacloudapi.cn/common/oauth2/v2.0/token") + authorize, _ = url.Parse("https://login.chinacloudapi.cn/common/oauth2/v2.0/authorize") + default: + token, _ = url.Parse("https://login.microsoftonline.com/common/oauth2/v2.0/token") + authorize, _ = url.Parse("https://login.microsoftonline.com/common/oauth2/v2.0/authorize") + } + + return &oauthEndpoint{ + token: *token, + authorize: *authorize, + } +} + +// ObtainToken 通过code或refresh_token兑换token +func (client *Client) ObtainToken(ctx context.Context, opts ...Option) (*Credential, error) { + options := newDefaultOption() + for _, o := range opts { + o.apply(options) + } + + body := url.Values{ + "client_id": {client.ClientID}, + "redirect_uri": {client.Redirect}, + "client_secret": {client.ClientSecret}, + } + if options.code != "" { + body.Add("grant_type", "authorization_code") + body.Add("code", options.code) + } else { + body.Add("grant_type", "refresh_token") + body.Add("refresh_token", options.refreshToken) + } + strBody := body.Encode() + + res := client.Request.Request( + "POST", + client.Endpoints.OAuthEndpoints.token.String(), + ioutil.NopCloser(strings.NewReader(strBody)), + request.WithHeader(http.Header{ + "Content-Type": {"application/x-www-form-urlencoded"}}, + ), + request.WithContentLength(int64(len(strBody))), + ) + if res.Err != nil { + return nil, res.Err + } + + respBody, err := res.GetResponse() + if err != nil { + return nil, err + } + + var ( + errResp OAuthError + credential Credential + decodeErr error + ) + + if res.Response.StatusCode != 200 { + decodeErr = json.Unmarshal([]byte(respBody), &errResp) + } else { + decodeErr = json.Unmarshal([]byte(respBody), &credential) + } + if decodeErr != nil { + return nil, decodeErr + } + + if errResp.ErrorType != "" { + return nil, errResp + } + + return &credential, nil + +} + +// UpdateCredential 更新凭证,并检查有效期 +func (client *Client) UpdateCredential(ctx context.Context, isSlave bool) error { + if isSlave { + return client.fetchCredentialFromMaster(ctx) + } + + oauth.GlobalMutex.Lock(client.Policy.ID) + defer oauth.GlobalMutex.Unlock(client.Policy.ID) + + // 如果已存在凭证 + if client.Credential != nil && client.Credential.AccessToken != "" { + // 检查已有凭证是否过期 + if client.Credential.ExpiresIn > time.Now().Unix() { + // 未过期,不要更新 + return nil + } + } + + // 尝试从缓存中获取凭证 + if cacheCredential, ok := cache.Get("onedrive_" + client.ClientID); ok { + credential := cacheCredential.(Credential) + if credential.ExpiresIn > time.Now().Unix() { + client.Credential = &credential + return nil + } + } + + // 获取新的凭证 + if client.Credential == nil || client.Credential.RefreshToken == "" { + // 无有效的RefreshToken + util.Log().Error("Failed to refresh credential for policy %q, please login your Microsoft account again.", client.Policy.Name) + return ErrInvalidRefreshToken + } + + credential, err := client.ObtainToken(ctx, WithRefreshToken(client.Credential.RefreshToken)) + if err != nil { + return err + } + + // 更新有效期为绝对时间戳 + expires := credential.ExpiresIn - 60 + credential.ExpiresIn = time.Now().Add(time.Duration(expires) * time.Second).Unix() + client.Credential = credential + + // 更新存储策略的 RefreshToken + client.Policy.UpdateAccessKeyAndClearCache(credential.RefreshToken) + + // 更新缓存 + cache.Set("onedrive_"+client.ClientID, *credential, int(expires)) + + return nil +} + +func (client *Client) AccessToken() string { + return client.Credential.AccessToken +} + +// UpdateCredential 更新凭证,并检查有效期 +func (client *Client) fetchCredentialFromMaster(ctx context.Context) error { + res, err := client.ClusterController.GetPolicyOauthToken(client.Policy.MasterID, client.Policy.ID) + if err != nil { + return err + } + + client.Credential = &Credential{AccessToken: res} + return nil +} diff --git a/pkg/filesystem/driver/onedrive/options.go b/pkg/filesystem/driver/onedrive/options.go new file mode 100644 index 0000000..0c8c107 --- /dev/null +++ b/pkg/filesystem/driver/onedrive/options.go @@ -0,0 +1,59 @@ +package onedrive + +import "time" + +// Option 发送请求的额外设置 +type Option interface { + apply(*options) +} + +type options struct { + redirect string + code string + refreshToken string + conflictBehavior string + expires time.Time + useDriverResource bool +} + +type optionFunc func(*options) + +// WithCode 设置接口Code +func WithCode(t string) Option { + return optionFunc(func(o *options) { + o.code = t + }) +} + +// WithRefreshToken 设置接口RefreshToken +func WithRefreshToken(t string) Option { + return optionFunc(func(o *options) { + o.refreshToken = t + }) +} + +// WithConflictBehavior 设置文件重名后的处理方式 +func WithConflictBehavior(t string) Option { + return optionFunc(func(o *options) { + o.conflictBehavior = t + }) +} + +// WithConflictBehavior 设置文件重名后的处理方式 +func WithDriverResource(t bool) Option { + return optionFunc(func(o *options) { + o.useDriverResource = t + }) +} + +func (f optionFunc) apply(o *options) { + f(o) +} + +func newDefaultOption() *options { + return &options{ + conflictBehavior: "fail", + useDriverResource: true, + expires: time.Now().UTC().Add(time.Duration(1) * time.Hour), + } +} diff --git a/pkg/filesystem/driver/onedrive/types.go b/pkg/filesystem/driver/onedrive/types.go new file mode 100644 index 0000000..2a2ea4c --- /dev/null +++ b/pkg/filesystem/driver/onedrive/types.go @@ -0,0 +1,140 @@ +package onedrive + +import ( + "encoding/gob" + "net/url" +) + +// RespError 接口返回错误 +type RespError struct { + APIError APIError `json:"error"` +} + +// APIError 接口返回的错误内容 +type APIError struct { + Code string `json:"code"` + Message string `json:"message"` +} + +// UploadSessionResponse 分片上传会话 +type UploadSessionResponse struct { + DataContext string `json:"@odata.context"` + ExpirationDateTime string `json:"expirationDateTime"` + NextExpectedRanges []string `json:"nextExpectedRanges"` + UploadURL string `json:"uploadUrl"` +} + +// FileInfo 文件元信息 +type FileInfo struct { + Name string `json:"name"` + Size uint64 `json:"size"` + Image imageInfo `json:"image"` + ParentReference parentReference `json:"parentReference"` + DownloadURL string `json:"@microsoft.graph.downloadUrl"` + File *file `json:"file"` + Folder *folder `json:"folder"` +} + +type file struct { + MimeType string `json:"mimeType"` +} + +type folder struct { + ChildCount int `json:"childCount"` +} + +type imageInfo struct { + Height int `json:"height"` + Width int `json:"width"` +} + +type parentReference struct { + Path string `json:"path"` + Name string `json:"name"` + ID string `json:"id"` +} + +// UploadResult 上传结果 +type UploadResult struct { + ID string `json:"id"` + Name string `json:"name"` + Size uint64 `json:"size"` +} + +// BatchRequests 批量操作请求 +type BatchRequests struct { + Requests []BatchRequest `json:"requests"` +} + +// BatchRequest 批量操作单个请求 +type BatchRequest struct { + ID string `json:"id"` + Method string `json:"method"` + URL string `json:"url"` + Body interface{} `json:"body,omitempty"` + Headers map[string]string `json:"headers,omitempty"` +} + +// BatchResponses 批量操作响应 +type BatchResponses struct { + Responses []BatchResponse `json:"responses"` +} + +// BatchResponse 批量操作单个响应 +type BatchResponse struct { + ID string `json:"id"` + Status int `json:"status"` +} + +// ThumbResponse 获取缩略图的响应 +type ThumbResponse struct { + Value []map[string]interface{} `json:"value"` + URL string `json:"url"` +} + +// ListResponse 列取子项目响应 +type ListResponse struct { + Value []FileInfo `json:"value"` + Context string `json:"@odata.context"` +} + +// oauthEndpoint OAuth接口地址 +type oauthEndpoint struct { + token url.URL + authorize url.URL +} + +// Credential 获取token时返回的凭证 +type Credential struct { + TokenType string `json:"token_type"` + ExpiresIn int64 `json:"expires_in"` + Scope string `json:"scope"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + UserID string `json:"user_id"` +} + +// OAuthError OAuth相关接口的错误响应 +type OAuthError struct { + ErrorType string `json:"error"` + ErrorDescription string `json:"error_description"` + CorrelationID string `json:"correlation_id"` +} + +// Site SharePoint 站点信息 +type Site struct { + Description string `json:"description"` + ID string `json:"id"` + Name string `json:"name"` + DisplayName string `json:"displayName"` + WebUrl string `json:"webUrl"` +} + +func init() { + gob.Register(Credential{}) +} + +// Error 实现error接口 +func (err RespError) Error() string { + return err.APIError.Message +} diff --git a/pkg/filesystem/driver/oss/callback.go b/pkg/filesystem/driver/oss/callback.go new file mode 100644 index 0000000..b2a8803 --- /dev/null +++ b/pkg/filesystem/driver/oss/callback.go @@ -0,0 +1,117 @@ +package oss + +import ( + "bytes" + "crypto" + "crypto/md5" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/request" +) + +// GetPublicKey 从回调请求或缓存中获取OSS的回调签名公钥 +func GetPublicKey(r *http.Request) ([]byte, error) { + var pubKey []byte + + // 尝试从缓存中获取 + pub, exist := cache.Get("oss_public_key") + if exist { + return pub.([]byte), nil + } + + // 从请求中获取 + pubURL, err := base64.StdEncoding.DecodeString(r.Header.Get("x-oss-pub-key-url")) + if err != nil { + return pubKey, err + } + + // 确保这个 public key 是由 OSS 颁发的 + if !strings.HasPrefix(string(pubURL), "http://gosspublic.alicdn.com/") && + !strings.HasPrefix(string(pubURL), "https://gosspublic.alicdn.com/") { + return pubKey, errors.New("public key url invalid") + } + + // 获取公钥 + client := request.NewClient() + body, err := client.Request("GET", string(pubURL), nil). + CheckHTTPResponse(200). + GetResponse() + if err != nil { + return pubKey, err + } + + // 写入缓存 + _ = cache.Set("oss_public_key", []byte(body), 86400*7) + + return []byte(body), nil +} + +func getRequestMD5(r *http.Request) ([]byte, error) { + var byteMD5 []byte + + // 获取请求正文 + body, err := ioutil.ReadAll(r.Body) + r.Body.Close() + if err != nil { + return byteMD5, err + } + r.Body = ioutil.NopCloser(bytes.NewReader(body)) + + strURLPathDecode, err := url.PathUnescape(r.URL.Path) + if err != nil { + return byteMD5, err + } + + strAuth := fmt.Sprintf("%s\n%s", strURLPathDecode, string(body)) + md5Ctx := md5.New() + md5Ctx.Write([]byte(strAuth)) + byteMD5 = md5Ctx.Sum(nil) + + return byteMD5, nil +} + +// VerifyCallbackSignature 验证OSS回调请求 +func VerifyCallbackSignature(r *http.Request) error { + bytePublicKey, err := GetPublicKey(r) + if err != nil { + return err + } + + byteMD5, err := getRequestMD5(r) + if err != nil { + return err + } + + strAuthorizationBase64 := r.Header.Get("authorization") + if strAuthorizationBase64 == "" { + return errors.New("no authorization field in Request header") + } + authorization, _ := base64.StdEncoding.DecodeString(strAuthorizationBase64) + + pubBlock, _ := pem.Decode(bytePublicKey) + if pubBlock == nil { + return errors.New("pubBlock not exist") + } + pubInterface, err := x509.ParsePKIXPublicKey(pubBlock.Bytes) + if (pubInterface == nil) || (err != nil) { + return err + } + pub := pubInterface.(*rsa.PublicKey) + + errorVerifyPKCS1v15 := rsa.VerifyPKCS1v15(pub, crypto.MD5, byteMD5, authorization) + if errorVerifyPKCS1v15 != nil { + return errorVerifyPKCS1v15 + } + + return nil +} diff --git a/pkg/filesystem/driver/oss/handler.go b/pkg/filesystem/driver/oss/handler.go new file mode 100644 index 0000000..ccccbd2 --- /dev/null +++ b/pkg/filesystem/driver/oss/handler.go @@ -0,0 +1,501 @@ +package oss + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/url" + "path" + "path/filepath" + "strings" + "time" + + "github.com/HFO4/aliyun-oss-go-sdk/oss" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/chunk" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/chunk/backoff" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/response" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +// UploadPolicy 阿里云OSS上传策略 +type UploadPolicy struct { + Expiration string `json:"expiration"` + Conditions []interface{} `json:"conditions"` +} + +// CallbackPolicy 回调策略 +type CallbackPolicy struct { + CallbackURL string `json:"callbackUrl"` + CallbackBody string `json:"callbackBody"` + CallbackBodyType string `json:"callbackBodyType"` +} + +// Driver 阿里云OSS策略适配器 +type Driver struct { + Policy *model.Policy + client *oss.Client + bucket *oss.Bucket + HTTPClient request.Client +} + +type key int + +const ( + chunkRetrySleep = time.Duration(5) * time.Second + + // MultiPartUploadThreshold 服务端使用分片上传的阈值 + MultiPartUploadThreshold uint64 = 5 * (1 << 30) // 5GB + // VersionID 文件版本标识 + VersionID key = iota +) + +func NewDriver(policy *model.Policy) (*Driver, error) { + if policy.OptionsSerialized.ChunkSize == 0 { + policy.OptionsSerialized.ChunkSize = 25 << 20 // 25 MB + } + + driver := &Driver{ + Policy: policy, + HTTPClient: request.NewClient(), + } + + return driver, driver.InitOSSClient(false) +} + +// CORS 创建跨域策略 +func (handler *Driver) CORS() error { + return handler.client.SetBucketCORS(handler.Policy.BucketName, []oss.CORSRule{ + { + AllowedOrigin: []string{"*"}, + AllowedMethod: []string{ + "GET", + "POST", + "PUT", + "DELETE", + "HEAD", + }, + ExposeHeader: []string{}, + AllowedHeader: []string{"*"}, + MaxAgeSeconds: 3600, + }, + }) +} + +// InitOSSClient 初始化OSS鉴权客户端 +func (handler *Driver) InitOSSClient(forceUsePublicEndpoint bool) error { + if handler.Policy == nil { + return errors.New("empty policy") + } + + // 决定是否使用内网 Endpoint + endpoint := handler.Policy.Server + if handler.Policy.OptionsSerialized.ServerSideEndpoint != "" && !forceUsePublicEndpoint { + endpoint = handler.Policy.OptionsSerialized.ServerSideEndpoint + } + + // 初始化客户端 + client, err := oss.New(endpoint, handler.Policy.AccessKey, handler.Policy.SecretKey) + if err != nil { + return err + } + handler.client = client + + // 初始化存储桶 + bucket, err := client.Bucket(handler.Policy.BucketName) + if err != nil { + return err + } + handler.bucket = bucket + + return nil +} + +// List 列出OSS上的文件 +func (handler *Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) { + // 列取文件 + base = strings.TrimPrefix(base, "/") + if base != "" { + base += "/" + } + + var ( + delimiter string + marker string + objects []oss.ObjectProperties + commons []string + ) + if !recursive { + delimiter = "/" + } + + for { + subRes, err := handler.bucket.ListObjects(oss.Marker(marker), oss.Prefix(base), + oss.MaxKeys(1000), oss.Delimiter(delimiter)) + if err != nil { + return nil, err + } + objects = append(objects, subRes.Objects...) + commons = append(commons, subRes.CommonPrefixes...) + marker = subRes.NextMarker + if marker == "" { + break + } + } + + // 处理列取结果 + res := make([]response.Object, 0, len(objects)+len(commons)) + // 处理目录 + for _, object := range commons { + rel, err := filepath.Rel(base, object) + if err != nil { + continue + } + res = append(res, response.Object{ + Name: path.Base(object), + RelativePath: filepath.ToSlash(rel), + Size: 0, + IsDir: true, + LastModify: time.Now(), + }) + } + // 处理文件 + for _, object := range objects { + rel, err := filepath.Rel(base, object.Key) + if err != nil { + continue + } + if strings.HasSuffix(object.Key, "/") { + res = append(res, response.Object{ + Name: path.Base(object.Key), + RelativePath: filepath.ToSlash(rel), + Size: 0, + IsDir: true, + LastModify: time.Now(), + }) + } else { + res = append(res, response.Object{ + Name: path.Base(object.Key), + Source: object.Key, + RelativePath: filepath.ToSlash(rel), + Size: uint64(object.Size), + IsDir: false, + LastModify: object.LastModified, + }) + } + } + + return res, nil +} + +// Get 获取文件 +func (handler *Driver) Get(ctx context.Context, path string) (response.RSCloser, error) { + // 通过VersionID禁止缓存 + ctx = context.WithValue(ctx, VersionID, time.Now().UnixNano()) + + // 尽可能使用私有 Endpoint + ctx = context.WithValue(ctx, fsctx.ForceUsePublicEndpointCtx, false) + + // 获取文件源地址 + downloadURL, err := handler.Source(ctx, path, int64(model.GetIntSetting("preview_timeout", 60)), false, 0) + if err != nil { + return nil, err + } + + // 获取文件数据流 + resp, err := handler.HTTPClient.Request( + "GET", + downloadURL, + nil, + request.WithContext(ctx), + request.WithTimeout(time.Duration(0)), + ).CheckHTTPResponse(200).GetRSCloser() + if err != nil { + return nil, err + } + + resp.SetFirstFakeChunk() + + // 尝试自主获取文件大小 + if file, ok := ctx.Value(fsctx.FileModelCtx).(model.File); ok { + resp.SetContentLength(int64(file.Size)) + } + + return resp, nil +} + +// Put 将文件流保存到指定目录 +func (handler *Driver) Put(ctx context.Context, file fsctx.FileHeader) error { + defer file.Close() + fileInfo := file.Info() + + // 凭证有效期 + credentialTTL := model.GetIntSetting("upload_session_timeout", 3600) + + // 是否允许覆盖 + overwrite := fileInfo.Mode&fsctx.Overwrite == fsctx.Overwrite + options := []oss.Option{ + oss.Expires(time.Now().Add(time.Duration(credentialTTL) * time.Second)), + oss.ForbidOverWrite(!overwrite), + } + + // 小文件直接上传 + if fileInfo.Size < MultiPartUploadThreshold { + return handler.bucket.PutObject(fileInfo.SavePath, file, options...) + } + + // 超过阈值时使用分片上传 + imur, err := handler.bucket.InitiateMultipartUpload(fileInfo.SavePath, options...) + if err != nil { + return fmt.Errorf("failed to initiate multipart upload: %w", err) + } + + chunks := chunk.NewChunkGroup(file, handler.Policy.OptionsSerialized.ChunkSize, &backoff.ConstantBackoff{ + Max: model.GetIntSetting("chunk_retries", 5), + Sleep: chunkRetrySleep, + }, model.IsTrueVal(model.GetSettingByName("use_temp_chunk_buffer"))) + + uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error { + _, err := handler.bucket.UploadPart(imur, content, current.Length(), current.Index()+1) + return err + } + + for chunks.Next() { + if err := chunks.Process(uploadFunc); err != nil { + return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err) + } + } + + _, err = handler.bucket.CompleteMultipartUpload(imur, oss.CompleteAll("yes"), oss.ForbidOverWrite(!overwrite)) + return err +} + +// Delete 删除一个或多个文件, +// 返回未删除的文件 +func (handler *Driver) Delete(ctx context.Context, files []string) ([]string, error) { + // 删除文件 + delRes, err := handler.bucket.DeleteObjects(files) + + if err != nil { + return files, err + } + + // 统计未删除的文件 + failed := util.SliceDifference(files, delRes.DeletedObjects) + if len(failed) > 0 { + return failed, errors.New("failed to delete") + } + + return []string{}, nil +} + +// Thumb 获取文件缩略图 +func (handler *Driver) Thumb(ctx context.Context, file *model.File) (*response.ContentResponse, error) { + // quick check by extension name + // https://help.aliyun.com/document_detail/183902.html + supported := []string{"png", "jpg", "jpeg", "gif", "bmp", "webp", "heic", "tiff", "avif"} + if len(handler.Policy.OptionsSerialized.ThumbExts) > 0 { + supported = handler.Policy.OptionsSerialized.ThumbExts + } + + if !util.IsInExtensionList(supported, file.Name) || file.Size > (20<<(10*2)) { + return nil, driver.ErrorThumbNotSupported + } + + // 初始化客户端 + if err := handler.InitOSSClient(true); err != nil { + return nil, err + } + + var ( + thumbSize = [2]uint{400, 300} + ok = false + ) + if thumbSize, ok = ctx.Value(fsctx.ThumbSizeCtx).([2]uint); !ok { + return nil, errors.New("failed to get thumbnail size") + } + + thumbEncodeQuality := model.GetIntSetting("thumb_encode_quality", 85) + + thumbParam := fmt.Sprintf("image/resize,m_lfit,h_%d,w_%d/quality,q_%d", thumbSize[1], thumbSize[0], thumbEncodeQuality) + ctx = context.WithValue(ctx, fsctx.ThumbSizeCtx, thumbParam) + thumbOption := []oss.Option{oss.Process(thumbParam)} + thumbURL, err := handler.signSourceURL( + ctx, + file.SourceName, + int64(model.GetIntSetting("preview_timeout", 60)), + thumbOption, + ) + if err != nil { + return nil, err + } + + return &response.ContentResponse{ + Redirect: true, + URL: thumbURL, + }, nil +} + +// Source 获取外链URL +func (handler *Driver) Source(ctx context.Context, path string, ttl int64, isDownload bool, speed int) (string, error) { + // 初始化客户端 + usePublicEndpoint := true + if forceUsePublicEndpoint, ok := ctx.Value(fsctx.ForceUsePublicEndpointCtx).(bool); ok { + usePublicEndpoint = forceUsePublicEndpoint + } + if err := handler.InitOSSClient(usePublicEndpoint); err != nil { + return "", err + } + + // 尝试从上下文获取文件名 + fileName := "" + if file, ok := ctx.Value(fsctx.FileModelCtx).(model.File); ok { + fileName = file.Name + } + + // 添加各项设置 + var signOptions = make([]oss.Option, 0, 2) + if isDownload { + signOptions = append(signOptions, oss.ResponseContentDisposition("attachment; filename=\""+url.PathEscape(fileName)+"\"")) + } + if speed > 0 { + // Byte 转换为 bit + speed *= 8 + + // OSS对速度值有范围限制 + if speed < 819200 { + speed = 819200 + } + if speed > 838860800 { + speed = 838860800 + } + signOptions = append(signOptions, oss.TrafficLimitParam(int64(speed))) + } + + return handler.signSourceURL(ctx, path, ttl, signOptions) +} + +func (handler *Driver) signSourceURL(ctx context.Context, path string, ttl int64, options []oss.Option) (string, error) { + signedURL, err := handler.bucket.SignURL(path, oss.HTTPGet, ttl, options...) + if err != nil { + return "", err + } + + // 将最终生成的签名URL域名换成用户自定义的加速域名(如果有) + finalURL, err := url.Parse(signedURL) + if err != nil { + return "", err + } + + // 公有空间替换掉Key及不支持的头 + if !handler.Policy.IsPrivate { + query := finalURL.Query() + query.Del("OSSAccessKeyId") + query.Del("Signature") + query.Del("response-content-disposition") + query.Del("x-oss-traffic-limit") + finalURL.RawQuery = query.Encode() + } + + if handler.Policy.BaseURL != "" { + cdnURL, err := url.Parse(handler.Policy.BaseURL) + if err != nil { + return "", err + } + finalURL.Host = cdnURL.Host + finalURL.Scheme = cdnURL.Scheme + } + + return finalURL.String(), nil +} + +// Token 获取上传策略和认证Token +func (handler *Driver) Token(ctx context.Context, ttl int64, uploadSession *serializer.UploadSession, file fsctx.FileHeader) (*serializer.UploadCredential, error) { + // 初始化客户端 + if err := handler.InitOSSClient(true); err != nil { + return nil, err + } + + // 生成回调地址 + siteURL := model.GetSiteURL() + apiBaseURI, _ := url.Parse("/api/v3/callback/oss/" + uploadSession.Key) + apiURL := siteURL.ResolveReference(apiBaseURI) + + // 回调策略 + callbackPolicy := CallbackPolicy{ + CallbackURL: apiURL.String(), + CallbackBody: `{"name":${x:fname},"source_name":${object},"size":${size},"pic_info":"${imageInfo.width},${imageInfo.height}"}`, + CallbackBodyType: "application/json", + } + callbackPolicyJSON, err := json.Marshal(callbackPolicy) + if err != nil { + return nil, fmt.Errorf("failed to encode callback policy: %w", err) + } + callbackPolicyEncoded := base64.StdEncoding.EncodeToString(callbackPolicyJSON) + + // 初始化分片上传 + fileInfo := file.Info() + options := []oss.Option{ + oss.Expires(time.Now().Add(time.Duration(ttl) * time.Second)), + oss.ForbidOverWrite(true), + oss.ContentType(fileInfo.DetectMimeType()), + } + imur, err := handler.bucket.InitiateMultipartUpload(fileInfo.SavePath, options...) + if err != nil { + return nil, fmt.Errorf("failed to initialize multipart upload: %w", err) + } + uploadSession.UploadID = imur.UploadID + + // 为每个分片签名上传 URL + chunks := chunk.NewChunkGroup(file, handler.Policy.OptionsSerialized.ChunkSize, &backoff.ConstantBackoff{}, false) + urls := make([]string, chunks.Num()) + for chunks.Next() { + err := chunks.Process(func(c *chunk.ChunkGroup, chunk io.Reader) error { + signedURL, err := handler.bucket.SignURL(fileInfo.SavePath, oss.HTTPPut, ttl, + oss.PartNumber(c.Index()+1), + oss.UploadID(imur.UploadID), + oss.ContentType("application/octet-stream")) + if err != nil { + return err + } + + urls[c.Index()] = signedURL + return nil + }) + if err != nil { + return nil, err + } + } + + // 签名完成分片上传的URL + completeURL, err := handler.bucket.SignURL(fileInfo.SavePath, oss.HTTPPost, ttl, + oss.ContentType("application/octet-stream"), + oss.UploadID(imur.UploadID), + oss.Expires(time.Now().Add(time.Duration(ttl)*time.Second)), + oss.CompleteAll("yes"), + oss.ForbidOverWrite(true), + oss.CallbackParam(callbackPolicyEncoded)) + if err != nil { + return nil, err + } + + return &serializer.UploadCredential{ + SessionID: uploadSession.Key, + ChunkSize: handler.Policy.OptionsSerialized.ChunkSize, + UploadID: imur.UploadID, + UploadURLs: urls, + CompleteURL: completeURL, + }, nil +} + +// 取消上传凭证 +func (handler *Driver) CancelToken(ctx context.Context, uploadSession *serializer.UploadSession) error { + return handler.bucket.AbortMultipartUpload(oss.InitiateMultipartUploadResult{UploadID: uploadSession.UploadID, Key: uploadSession.SavePath}, nil) +} diff --git a/pkg/filesystem/driver/qiniu/handler.go b/pkg/filesystem/driver/qiniu/handler.go new file mode 100644 index 0000000..a11b574 --- /dev/null +++ b/pkg/filesystem/driver/qiniu/handler.go @@ -0,0 +1,354 @@ +package qiniu + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "net/http" + "net/url" + "path" + "path/filepath" + "strings" + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/response" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/qiniu/go-sdk/v7/auth/qbox" + "github.com/qiniu/go-sdk/v7/storage" +) + +// Driver 本地策略适配器 +type Driver struct { + Policy *model.Policy + mac *qbox.Mac + cfg *storage.Config + bucket *storage.BucketManager +} + +func NewDriver(policy *model.Policy) *Driver { + if policy.OptionsSerialized.ChunkSize == 0 { + policy.OptionsSerialized.ChunkSize = 25 << 20 // 25 MB + } + + mac := qbox.NewMac(policy.AccessKey, policy.SecretKey) + cfg := &storage.Config{UseHTTPS: true} + return &Driver{ + Policy: policy, + mac: mac, + cfg: cfg, + bucket: storage.NewBucketManager(mac, cfg), + } +} + +// List 列出给定路径下的文件 +func (handler *Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) { + base = strings.TrimPrefix(base, "/") + if base != "" { + base += "/" + } + + var ( + delimiter string + marker string + objects []storage.ListItem + commons []string + ) + if !recursive { + delimiter = "/" + } + + for { + entries, folders, nextMarker, hashNext, err := handler.bucket.ListFiles( + handler.Policy.BucketName, + base, delimiter, marker, 1000) + if err != nil { + return nil, err + } + objects = append(objects, entries...) + commons = append(commons, folders...) + if !hashNext { + break + } + marker = nextMarker + } + + // 处理列取结果 + res := make([]response.Object, 0, len(objects)+len(commons)) + // 处理目录 + for _, object := range commons { + rel, err := filepath.Rel(base, object) + if err != nil { + continue + } + res = append(res, response.Object{ + Name: path.Base(object), + RelativePath: filepath.ToSlash(rel), + Size: 0, + IsDir: true, + LastModify: time.Now(), + }) + } + // 处理文件 + for _, object := range objects { + rel, err := filepath.Rel(base, object.Key) + if err != nil { + continue + } + res = append(res, response.Object{ + Name: path.Base(object.Key), + Source: object.Key, + RelativePath: filepath.ToSlash(rel), + Size: uint64(object.Fsize), + IsDir: false, + LastModify: time.Unix(object.PutTime/10000000, 0), + }) + } + + return res, nil +} + +// Get 获取文件 +func (handler *Driver) Get(ctx context.Context, path string) (response.RSCloser, error) { + // 给文件名加上随机参数以强制拉取 + path = fmt.Sprintf("%s?v=%d", path, time.Now().UnixNano()) + + // 获取文件源地址 + downloadURL, err := handler.Source(ctx, path, int64(model.GetIntSetting("preview_timeout", 60)), false, 0) + if err != nil { + return nil, err + } + + // 获取文件数据流 + client := request.NewClient() + resp, err := client.Request( + "GET", + downloadURL, + nil, + request.WithContext(ctx), + request.WithHeader( + http.Header{"Cache-Control": {"no-cache", "no-store", "must-revalidate"}}, + ), + request.WithTimeout(time.Duration(0)), + ).CheckHTTPResponse(200).GetRSCloser() + if err != nil { + return nil, err + } + + resp.SetFirstFakeChunk() + + // 尝试自主获取文件大小 + if file, ok := ctx.Value(fsctx.FileModelCtx).(model.File); ok { + resp.SetContentLength(int64(file.Size)) + } + + return resp, nil +} + +// Put 将文件流保存到指定目录 +func (handler *Driver) Put(ctx context.Context, file fsctx.FileHeader) error { + defer file.Close() + + // 凭证有效期 + credentialTTL := model.GetIntSetting("upload_session_timeout", 3600) + + // 生成上传策略 + fileInfo := file.Info() + scope := handler.Policy.BucketName + if fileInfo.Mode&fsctx.Overwrite == fsctx.Overwrite { + scope = fmt.Sprintf("%s:%s", handler.Policy.BucketName, fileInfo.SavePath) + } + + putPolicy := storage.PutPolicy{ + // 指定为覆盖策略 + Scope: scope, + SaveKey: fileInfo.SavePath, + ForceSaveKey: true, + FsizeLimit: int64(fileInfo.Size), + } + // 是否开启了MIMEType限制 + if handler.Policy.OptionsSerialized.MimeType != "" { + putPolicy.MimeLimit = handler.Policy.OptionsSerialized.MimeType + } + + // 生成上传凭证 + token, err := handler.getUploadCredential(ctx, putPolicy, fileInfo, int64(credentialTTL), false) + if err != nil { + return err + } + + // 创建上传表单 + cfg := storage.Config{} + formUploader := storage.NewFormUploader(&cfg) + ret := storage.PutRet{} + putExtra := storage.PutExtra{ + Params: map[string]string{}, + } + + // 开始上传 + err = formUploader.Put(ctx, &ret, token.Credential, fileInfo.SavePath, file, int64(fileInfo.Size), &putExtra) + if err != nil { + return err + } + + return nil +} + +// Delete 删除一个或多个文件, +// 返回未删除的文件 +func (handler *Driver) Delete(ctx context.Context, files []string) ([]string, error) { + // TODO 大于一千个文件需要分批发送 + deleteOps := make([]string, 0, len(files)) + for _, key := range files { + deleteOps = append(deleteOps, storage.URIDelete(handler.Policy.BucketName, key)) + } + + rets, err := handler.bucket.Batch(deleteOps) + + // 处理删除结果 + if err != nil { + failed := make([]string, 0, len(rets)) + for k, ret := range rets { + if ret.Code != 200 && ret.Code != 612 { + failed = append(failed, files[k]) + } + } + return failed, errors.New("删除失败") + } + + return []string{}, nil +} + +// Thumb 获取文件缩略图 +func (handler *Driver) Thumb(ctx context.Context, file *model.File) (*response.ContentResponse, error) { + // quick check by extension name + // https://developer.qiniu.com/dora/api/basic-processing-images-imageview2 + supported := []string{"png", "jpg", "jpeg", "gif", "bmp", "webp", "tiff", "avif", "psd"} + if len(handler.Policy.OptionsSerialized.ThumbExts) > 0 { + supported = handler.Policy.OptionsSerialized.ThumbExts + } + + if !util.IsInExtensionList(supported, file.Name) || file.Size > (20<<(10*2)) { + return nil, driver.ErrorThumbNotSupported + } + + var ( + thumbSize = [2]uint{400, 300} + ok = false + ) + if thumbSize, ok = ctx.Value(fsctx.ThumbSizeCtx).([2]uint); !ok { + return nil, errors.New("failed to get thumbnail size") + } + + thumbEncodeQuality := model.GetIntSetting("thumb_encode_quality", 85) + + thumb := fmt.Sprintf("%s?imageView2/1/w/%d/h/%d/q/%d", file.SourceName, thumbSize[0], thumbSize[1], thumbEncodeQuality) + return &response.ContentResponse{ + Redirect: true, + URL: handler.signSourceURL( + ctx, + thumb, + int64(model.GetIntSetting("preview_timeout", 60)), + ), + }, nil +} + +// Source 获取外链URL +func (handler *Driver) Source(ctx context.Context, path string, ttl int64, isDownload bool, speed int) (string, error) { + // 尝试从上下文获取文件名 + fileName := "" + if file, ok := ctx.Value(fsctx.FileModelCtx).(model.File); ok { + fileName = file.Name + } + + // 加入下载相关设置 + if isDownload { + path = path + "?attname=" + url.PathEscape(fileName) + } + + // 取得原始文件地址 + return handler.signSourceURL(ctx, path, ttl), nil +} + +func (handler *Driver) signSourceURL(ctx context.Context, path string, ttl int64) string { + var sourceURL string + if handler.Policy.IsPrivate { + deadline := time.Now().Add(time.Second * time.Duration(ttl)).Unix() + sourceURL = storage.MakePrivateURL(handler.mac, handler.Policy.BaseURL, path, deadline) + } else { + sourceURL = storage.MakePublicURL(handler.Policy.BaseURL, path) + } + return sourceURL +} + +// Token 获取上传策略和认证Token +func (handler *Driver) Token(ctx context.Context, ttl int64, uploadSession *serializer.UploadSession, file fsctx.FileHeader) (*serializer.UploadCredential, error) { + // 生成回调地址 + siteURL := model.GetSiteURL() + apiBaseURI, _ := url.Parse("/api/v3/callback/qiniu/" + uploadSession.Key) + apiURL := siteURL.ResolveReference(apiBaseURI) + + // 创建上传策略 + fileInfo := file.Info() + putPolicy := storage.PutPolicy{ + Scope: handler.Policy.BucketName, + CallbackURL: apiURL.String(), + CallbackBody: `{"size":$(fsize),"pic_info":"$(imageInfo.width),$(imageInfo.height)"}`, + CallbackBodyType: "application/json", + SaveKey: fileInfo.SavePath, + ForceSaveKey: true, + FsizeLimit: int64(handler.Policy.MaxSize), + } + // 是否开启了MIMEType限制 + if handler.Policy.OptionsSerialized.MimeType != "" { + putPolicy.MimeLimit = handler.Policy.OptionsSerialized.MimeType + } + + credential, err := handler.getUploadCredential(ctx, putPolicy, fileInfo, ttl, true) + if err != nil { + return nil, fmt.Errorf("failed to init parts: %w", err) + } + + credential.SessionID = uploadSession.Key + credential.ChunkSize = handler.Policy.OptionsSerialized.ChunkSize + + uploadSession.UploadURL = credential.UploadURLs[0] + uploadSession.Credential = credential.Credential + + return credential, nil +} + +// getUploadCredential 签名上传策略并创建上传会话 +func (handler *Driver) getUploadCredential(ctx context.Context, policy storage.PutPolicy, file *fsctx.UploadTaskInfo, TTL int64, resume bool) (*serializer.UploadCredential, error) { + // 上传凭证 + policy.Expires = uint64(TTL) + upToken := policy.UploadToken(handler.mac) + + // 初始化分片上传 + resumeUploader := storage.NewResumeUploaderV2(handler.cfg) + upHost, err := resumeUploader.UpHost(handler.Policy.AccessKey, handler.Policy.BucketName) + if err != nil { + return nil, err + } + + ret := &storage.InitPartsRet{} + if resume { + err = resumeUploader.InitParts(ctx, upToken, upHost, handler.Policy.BucketName, file.SavePath, true, ret) + } + + return &serializer.UploadCredential{ + UploadURLs: []string{upHost + "/buckets/" + handler.Policy.BucketName + "/objects/" + base64.URLEncoding.EncodeToString([]byte(file.SavePath)) + "/uploads/" + ret.UploadID}, + Credential: upToken, + }, err +} + +// 取消上传凭证 +func (handler Driver) CancelToken(ctx context.Context, uploadSession *serializer.UploadSession) error { + resumeUploader := storage.NewResumeUploaderV2(handler.cfg) + return resumeUploader.Client.CallWith(ctx, nil, "DELETE", uploadSession.UploadURL, http.Header{"Authorization": {"UpToken " + uploadSession.Credential}}, nil, 0) +} diff --git a/pkg/filesystem/driver/remote/client.go b/pkg/filesystem/driver/remote/client.go new file mode 100644 index 0000000..b1b1804 --- /dev/null +++ b/pkg/filesystem/driver/remote/client.go @@ -0,0 +1,195 @@ +package remote + +import ( + "context" + "encoding/json" + "fmt" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/auth" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/chunk" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/chunk/backoff" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/gofrs/uuid" + "io" + "net/http" + "net/url" + "path" + "strings" + "time" +) + +const ( + basePath = "/api/v3/slave/" + OverwriteHeader = auth.CrHeaderPrefix + "Overwrite" + chunkRetrySleep = time.Duration(5) * time.Second +) + +// Client to operate uploading to remote slave server +type Client interface { + // CreateUploadSession creates remote upload session + CreateUploadSession(ctx context.Context, session *serializer.UploadSession, ttl int64, overwrite bool) error + // GetUploadURL signs an url for uploading file + GetUploadURL(ttl int64, sessionID string) (string, string, error) + // Upload uploads file to remote server + Upload(ctx context.Context, file fsctx.FileHeader) error + // DeleteUploadSession deletes remote upload session + DeleteUploadSession(ctx context.Context, sessionID string) error +} + +// NewClient creates new Client from given policy +func NewClient(policy *model.Policy) (Client, error) { + authInstance := auth.HMACAuth{[]byte(policy.SecretKey)} + serverURL, err := url.Parse(policy.Server) + if err != nil { + return nil, err + } + + base, _ := url.Parse(basePath) + signTTL := model.GetIntSetting("slave_api_timeout", 60) + + return &remoteClient{ + policy: policy, + authInstance: authInstance, + httpClient: request.NewClient( + request.WithEndpoint(serverURL.ResolveReference(base).String()), + request.WithCredential(authInstance, int64(signTTL)), + request.WithMasterMeta(), + request.WithSlaveMeta(policy.AccessKey), + ), + }, nil +} + +type remoteClient struct { + policy *model.Policy + authInstance auth.Auth + httpClient request.Client +} + +func (c *remoteClient) Upload(ctx context.Context, file fsctx.FileHeader) error { + ttl := model.GetIntSetting("upload_session_timeout", 86400) + fileInfo := file.Info() + session := &serializer.UploadSession{ + Key: uuid.Must(uuid.NewV4()).String(), + VirtualPath: fileInfo.VirtualPath, + Name: fileInfo.FileName, + Size: fileInfo.Size, + SavePath: fileInfo.SavePath, + LastModified: fileInfo.LastModified, + Policy: *c.policy, + } + + // Create upload session + overwrite := fileInfo.Mode&fsctx.Overwrite == fsctx.Overwrite + if err := c.CreateUploadSession(ctx, session, int64(ttl), overwrite); err != nil { + return fmt.Errorf("failed to create upload session: %w", err) + } + + // Initial chunk groups + chunks := chunk.NewChunkGroup(file, c.policy.OptionsSerialized.ChunkSize, &backoff.ConstantBackoff{ + Max: model.GetIntSetting("chunk_retries", 5), + Sleep: chunkRetrySleep, + }, model.IsTrueVal(model.GetSettingByName("use_temp_chunk_buffer"))) + + uploadFunc := func(current *chunk.ChunkGroup, content io.Reader) error { + return c.uploadChunk(ctx, session.Key, current.Index(), content, overwrite, current.Length()) + } + + // upload chunks + for chunks.Next() { + if err := chunks.Process(uploadFunc); err != nil { + if err := c.DeleteUploadSession(ctx, session.Key); err != nil { + util.Log().Warning("failed to delete upload session: %s", err) + } + + return fmt.Errorf("failed to upload chunk #%d: %w", chunks.Index(), err) + } + } + + return nil +} + +func (c *remoteClient) DeleteUploadSession(ctx context.Context, sessionID string) error { + resp, err := c.httpClient.Request( + "DELETE", + "upload/"+sessionID, + nil, + request.WithContext(ctx), + ).CheckHTTPResponse(200).DecodeResponse() + if err != nil { + return err + } + + if resp.Code != 0 { + return serializer.NewErrorFromResponse(resp) + } + + return nil +} + +func (c *remoteClient) CreateUploadSession(ctx context.Context, session *serializer.UploadSession, ttl int64, overwrite bool) error { + reqBodyEncoded, err := json.Marshal(map[string]interface{}{ + "session": session, + "ttl": ttl, + "overwrite": overwrite, + }) + if err != nil { + return err + } + + bodyReader := strings.NewReader(string(reqBodyEncoded)) + resp, err := c.httpClient.Request( + "PUT", + "upload", + bodyReader, + request.WithContext(ctx), + ).CheckHTTPResponse(200).DecodeResponse() + if err != nil { + return err + } + + if resp.Code != 0 { + return serializer.NewErrorFromResponse(resp) + } + + return nil +} + +func (c *remoteClient) GetUploadURL(ttl int64, sessionID string) (string, string, error) { + base, err := url.Parse(c.policy.Server) + if err != nil { + return "", "", err + } + + base.Path = path.Join(base.Path, basePath, "upload", sessionID) + req, err := http.NewRequest("POST", base.String(), nil) + if err != nil { + return "", "", err + } + + req = auth.SignRequest(c.authInstance, req, ttl) + return req.URL.String(), req.Header["Authorization"][0], nil +} + +func (c *remoteClient) uploadChunk(ctx context.Context, sessionID string, index int, chunk io.Reader, overwrite bool, size int64) error { + resp, err := c.httpClient.Request( + "POST", + fmt.Sprintf("upload/%s?chunk=%d", sessionID, index), + chunk, + request.WithContext(ctx), + request.WithTimeout(time.Duration(0)), + request.WithContentLength(size), + request.WithHeader(map[string][]string{OverwriteHeader: {fmt.Sprintf("%t", overwrite)}}), + ).CheckHTTPResponse(200).DecodeResponse() + if err != nil { + return err + } + + if resp.Code != 0 { + return serializer.NewErrorFromResponse(resp) + } + + return nil +} diff --git a/pkg/filesystem/driver/remote/handler.go b/pkg/filesystem/driver/remote/handler.go new file mode 100644 index 0000000..5918f3b --- /dev/null +++ b/pkg/filesystem/driver/remote/handler.go @@ -0,0 +1,311 @@ +package remote + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" + "path" + "path/filepath" + "strings" + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/auth" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/response" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +// Driver 远程存储策略适配器 +type Driver struct { + Client request.Client + Policy *model.Policy + AuthInstance auth.Auth + + uploadClient Client +} + +// NewDriver initializes a new Driver from policy +// TODO: refactor all method into upload client +func NewDriver(policy *model.Policy) (*Driver, error) { + client, err := NewClient(policy) + if err != nil { + return nil, err + } + + return &Driver{ + Policy: policy, + Client: request.NewClient(), + AuthInstance: auth.HMACAuth{[]byte(policy.SecretKey)}, + uploadClient: client, + }, nil +} + +// List 列取文件 +func (handler *Driver) List(ctx context.Context, path string, recursive bool) ([]response.Object, error) { + var res []response.Object + + reqBody := serializer.ListRequest{ + Path: path, + Recursive: recursive, + } + reqBodyEncoded, err := json.Marshal(reqBody) + if err != nil { + return res, err + } + + // 发送列表请求 + bodyReader := strings.NewReader(string(reqBodyEncoded)) + signTTL := model.GetIntSetting("slave_api_timeout", 60) + resp, err := handler.Client.Request( + "POST", + handler.getAPIUrl("list"), + bodyReader, + request.WithCredential(handler.AuthInstance, int64(signTTL)), + request.WithMasterMeta(), + ).CheckHTTPResponse(200).DecodeResponse() + if err != nil { + return res, err + } + + // 处理列取结果 + if resp.Code != 0 { + return res, errors.New(resp.Error) + } + + if resStr, ok := resp.Data.(string); ok { + err = json.Unmarshal([]byte(resStr), &res) + if err != nil { + return res, err + } + } + + return res, nil +} + +// getAPIUrl 获取接口请求地址 +func (handler *Driver) getAPIUrl(scope string, routes ...string) string { + serverURL, err := url.Parse(handler.Policy.Server) + if err != nil { + return "" + } + var controller *url.URL + + switch scope { + case "delete": + controller, _ = url.Parse("/api/v3/slave/delete") + case "thumb": + controller, _ = url.Parse("/api/v3/slave/thumb") + case "list": + controller, _ = url.Parse("/api/v3/slave/list") + default: + controller = serverURL + } + + for _, r := range routes { + controller.Path = path.Join(controller.Path, r) + } + + return serverURL.ResolveReference(controller).String() +} + +// Get 获取文件内容 +func (handler *Driver) Get(ctx context.Context, path string) (response.RSCloser, error) { + // 尝试获取速度限制 + speedLimit := 0 + if user, ok := ctx.Value(fsctx.UserCtx).(model.User); ok { + speedLimit = user.Group.SpeedLimit + } + + // 获取文件源地址 + downloadURL, err := handler.Source(ctx, path, 0, true, speedLimit) + if err != nil { + return nil, err + } + + // 获取文件数据流 + resp, err := handler.Client.Request( + "GET", + downloadURL, + nil, + request.WithContext(ctx), + request.WithTimeout(time.Duration(0)), + request.WithMasterMeta(), + ).CheckHTTPResponse(200).GetRSCloser() + if err != nil { + return nil, err + } + + resp.SetFirstFakeChunk() + + // 尝试获取文件大小 + if file, ok := ctx.Value(fsctx.FileModelCtx).(model.File); ok { + resp.SetContentLength(int64(file.Size)) + } + + return resp, nil +} + +// Put 将文件流保存到指定目录 +func (handler *Driver) Put(ctx context.Context, file fsctx.FileHeader) error { + defer file.Close() + + return handler.uploadClient.Upload(ctx, file) +} + +// Delete 删除一个或多个文件, +// 返回未删除的文件,及遇到的最后一个错误 +func (handler *Driver) Delete(ctx context.Context, files []string) ([]string, error) { + // 封装接口请求正文 + reqBody := serializer.RemoteDeleteRequest{ + Files: files, + } + reqBodyEncoded, err := json.Marshal(reqBody) + if err != nil { + return files, err + } + + // 发送删除请求 + bodyReader := strings.NewReader(string(reqBodyEncoded)) + signTTL := model.GetIntSetting("slave_api_timeout", 60) + resp, err := handler.Client.Request( + "POST", + handler.getAPIUrl("delete"), + bodyReader, + request.WithCredential(handler.AuthInstance, int64(signTTL)), + request.WithMasterMeta(), + request.WithSlaveMeta(handler.Policy.AccessKey), + ).CheckHTTPResponse(200).GetResponse() + if err != nil { + return files, err + } + + // 处理删除结果 + var reqResp serializer.Response + err = json.Unmarshal([]byte(resp), &reqResp) + if err != nil { + return files, err + } + if reqResp.Code != 0 { + var failedResp serializer.RemoteDeleteRequest + if failed, ok := reqResp.Data.(string); ok { + err = json.Unmarshal([]byte(failed), &failedResp) + if err == nil { + return failedResp.Files, errors.New(reqResp.Error) + } + } + return files, errors.New("unknown format of returned response") + } + + return []string{}, nil +} + +// Thumb 获取文件缩略图 +func (handler *Driver) Thumb(ctx context.Context, file *model.File) (*response.ContentResponse, error) { + // quick check by extension name + supported := []string{"png", "jpg", "jpeg", "gif"} + if len(handler.Policy.OptionsSerialized.ThumbExts) > 0 { + supported = handler.Policy.OptionsSerialized.ThumbExts + } + + if !util.IsInExtensionList(supported, file.Name) { + return nil, driver.ErrorThumbNotSupported + } + + sourcePath := base64.RawURLEncoding.EncodeToString([]byte(file.SourceName)) + thumbURL := fmt.Sprintf("%s/%s/%s", handler.getAPIUrl("thumb"), sourcePath, filepath.Ext(file.Name)) + ttl := model.GetIntSetting("preview_timeout", 60) + signedThumbURL, err := auth.SignURI(handler.AuthInstance, thumbURL, int64(ttl)) + if err != nil { + return nil, err + } + + return &response.ContentResponse{ + Redirect: true, + URL: signedThumbURL.String(), + }, nil +} + +// Source 获取外链URL +func (handler *Driver) Source(ctx context.Context, path string, ttl int64, isDownload bool, speed int) (string, error) { + // 尝试从上下文获取文件名 + fileName := "file" + if file, ok := ctx.Value(fsctx.FileModelCtx).(model.File); ok { + fileName = file.Name + } + + serverURL, err := url.Parse(handler.Policy.Server) + if err != nil { + return "", errors.New("无法解析远程服务端地址") + } + + // 是否启用了CDN + if handler.Policy.BaseURL != "" { + cdnURL, err := url.Parse(handler.Policy.BaseURL) + if err != nil { + return "", err + } + serverURL = cdnURL + } + + var ( + signedURI *url.URL + controller = "/api/v3/slave/download" + ) + if !isDownload { + controller = "/api/v3/slave/source" + } + + // 签名下载地址 + sourcePath := base64.RawURLEncoding.EncodeToString([]byte(path)) + signedURI, err = auth.SignURI( + handler.AuthInstance, + fmt.Sprintf("%s/%d/%s/%s", controller, speed, sourcePath, url.PathEscape(fileName)), + ttl, + ) + + if err != nil { + return "", serializer.NewError(serializer.CodeEncryptError, "Failed to sign URL", err) + } + + finalURL := serverURL.ResolveReference(signedURI).String() + return finalURL, nil + +} + +// Token 获取上传策略和认证Token +func (handler *Driver) Token(ctx context.Context, ttl int64, uploadSession *serializer.UploadSession, file fsctx.FileHeader) (*serializer.UploadCredential, error) { + siteURL := model.GetSiteURL() + apiBaseURI, _ := url.Parse(path.Join("/api/v3/callback/remote", uploadSession.Key, uploadSession.CallbackSecret)) + apiURL := siteURL.ResolveReference(apiBaseURI) + + // 在从机端创建上传会话 + uploadSession.Callback = apiURL.String() + if err := handler.uploadClient.CreateUploadSession(ctx, uploadSession, ttl, false); err != nil { + return nil, err + } + + // 获取上传地址 + uploadURL, sign, err := handler.uploadClient.GetUploadURL(ttl, uploadSession.Key) + if err != nil { + return nil, fmt.Errorf("failed to sign upload url: %w", err) + } + + return &serializer.UploadCredential{ + SessionID: uploadSession.Key, + ChunkSize: handler.Policy.OptionsSerialized.ChunkSize, + UploadURLs: []string{uploadURL}, + Credential: sign, + }, nil +} + +// 取消上传凭证 +func (handler *Driver) CancelToken(ctx context.Context, uploadSession *serializer.UploadSession) error { + return handler.uploadClient.DeleteUploadSession(ctx, uploadSession.Key) +} diff --git a/pkg/filesystem/driver/s3/handler.go b/pkg/filesystem/driver/s3/handler.go new file mode 100644 index 0000000..56a7aaa --- /dev/null +++ b/pkg/filesystem/driver/s3/handler.go @@ -0,0 +1,440 @@ +package s3 + +import ( + "context" + "errors" + "fmt" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver" + "io" + "net/http" + "net/url" + "path" + "path/filepath" + "strings" + "time" + + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/chunk" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/chunk/backoff" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/response" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" +) + +// Driver 适配器模板 +type Driver struct { + Policy *model.Policy + sess *session.Session + svc *s3.S3 +} + +// UploadPolicy S3上传策略 +type UploadPolicy struct { + Expiration string `json:"expiration"` + Conditions []interface{} `json:"conditions"` +} + +// MetaData 文件信息 +type MetaData struct { + Size uint64 + Etag string +} + +func NewDriver(policy *model.Policy) (*Driver, error) { + if policy.OptionsSerialized.ChunkSize == 0 { + policy.OptionsSerialized.ChunkSize = 25 << 20 // 25 MB + } + + driver := &Driver{ + Policy: policy, + } + + return driver, driver.InitS3Client() +} + +// InitS3Client 初始化S3会话 +func (handler *Driver) InitS3Client() error { + if handler.Policy == nil { + return errors.New("empty policy") + } + + if handler.svc == nil { + // 初始化会话 + sess, err := session.NewSession(&aws.Config{ + Credentials: credentials.NewStaticCredentials(handler.Policy.AccessKey, handler.Policy.SecretKey, ""), + Endpoint: &handler.Policy.Server, + Region: &handler.Policy.OptionsSerialized.Region, + S3ForcePathStyle: &handler.Policy.OptionsSerialized.S3ForcePathStyle, + }) + + if err != nil { + return err + } + handler.sess = sess + handler.svc = s3.New(sess) + } + return nil +} + +// List 列出给定路径下的文件 +func (handler *Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) { + // 初始化列目录参数 + base = strings.TrimPrefix(base, "/") + if base != "" { + base += "/" + } + + opt := &s3.ListObjectsInput{ + Bucket: &handler.Policy.BucketName, + Prefix: &base, + MaxKeys: aws.Int64(1000), + } + + // 是否为递归列出 + if !recursive { + opt.Delimiter = aws.String("/") + } + + var ( + objects []*s3.Object + commons []*s3.CommonPrefix + ) + + for { + res, err := handler.svc.ListObjectsWithContext(ctx, opt) + if err != nil { + return nil, err + } + objects = append(objects, res.Contents...) + commons = append(commons, res.CommonPrefixes...) + + // 如果本次未列取完,则继续使用marker获取结果 + if *res.IsTruncated { + opt.Marker = res.NextMarker + } else { + break + } + } + + // 处理列取结果 + res := make([]response.Object, 0, len(objects)+len(commons)) + + // 处理目录 + for _, object := range commons { + rel, err := filepath.Rel(*opt.Prefix, *object.Prefix) + if err != nil { + continue + } + res = append(res, response.Object{ + Name: path.Base(*object.Prefix), + RelativePath: filepath.ToSlash(rel), + Size: 0, + IsDir: true, + LastModify: time.Now(), + }) + } + // 处理文件 + for _, object := range objects { + rel, err := filepath.Rel(*opt.Prefix, *object.Key) + if err != nil { + continue + } + res = append(res, response.Object{ + Name: path.Base(*object.Key), + Source: *object.Key, + RelativePath: filepath.ToSlash(rel), + Size: uint64(*object.Size), + IsDir: false, + LastModify: time.Now(), + }) + } + + return res, nil + +} + +// Get 获取文件 +func (handler *Driver) Get(ctx context.Context, path string) (response.RSCloser, error) { + // 获取文件源地址 + downloadURL, err := handler.Source(ctx, path, int64(model.GetIntSetting("preview_timeout", 60)), false, 0) + if err != nil { + return nil, err + } + + // 获取文件数据流 + client := request.NewClient() + resp, err := client.Request( + "GET", + downloadURL, + nil, + request.WithContext(ctx), + request.WithHeader( + http.Header{"Cache-Control": {"no-cache", "no-store", "must-revalidate"}}, + ), + request.WithTimeout(time.Duration(0)), + ).CheckHTTPResponse(200).GetRSCloser() + if err != nil { + return nil, err + } + + resp.SetFirstFakeChunk() + + // 尝试自主获取文件大小 + if file, ok := ctx.Value(fsctx.FileModelCtx).(model.File); ok { + resp.SetContentLength(int64(file.Size)) + } + + return resp, nil +} + +// Put 将文件流保存到指定目录 +func (handler *Driver) Put(ctx context.Context, file fsctx.FileHeader) error { + defer file.Close() + + // 初始化客户端 + if err := handler.InitS3Client(); err != nil { + return err + } + + uploader := s3manager.NewUploader(handler.sess, func(u *s3manager.Uploader) { + u.PartSize = int64(handler.Policy.OptionsSerialized.ChunkSize) + }) + + dst := file.Info().SavePath + _, err := uploader.Upload(&s3manager.UploadInput{ + Bucket: &handler.Policy.BucketName, + Key: &dst, + Body: io.LimitReader(file, int64(file.Info().Size)), + }) + + if err != nil { + return err + } + + return nil +} + +// Delete 删除一个或多个文件, +// 返回未删除的文件,及遇到的最后一个错误 +func (handler *Driver) Delete(ctx context.Context, files []string) ([]string, error) { + failed := make([]string, 0, len(files)) + deleted := make([]string, 0, len(files)) + + keys := make([]*s3.ObjectIdentifier, 0, len(files)) + for _, file := range files { + filePath := file + keys = append(keys, &s3.ObjectIdentifier{Key: &filePath}) + } + + // 发送异步删除请求 + res, err := handler.svc.DeleteObjects( + &s3.DeleteObjectsInput{ + Bucket: &handler.Policy.BucketName, + Delete: &s3.Delete{ + Objects: keys, + }, + }) + + if err != nil { + return files, err + } + + // 统计未删除的文件 + for _, deleteRes := range res.Deleted { + deleted = append(deleted, *deleteRes.Key) + } + failed = util.SliceDifference(files, deleted) + + return failed, nil + +} + +// Thumb 获取文件缩略图 +func (handler *Driver) Thumb(ctx context.Context, file *model.File) (*response.ContentResponse, error) { + return nil, driver.ErrorThumbNotSupported +} + +// Source 获取外链URL +func (handler *Driver) Source(ctx context.Context, path string, ttl int64, isDownload bool, speed int) (string, error) { + + // 尝试从上下文获取文件名 + fileName := "" + if file, ok := ctx.Value(fsctx.FileModelCtx).(model.File); ok { + fileName = file.Name + } + + // 初始化客户端 + if err := handler.InitS3Client(); err != nil { + return "", err + } + + contentDescription := aws.String("attachment; filename=\"" + url.PathEscape(fileName) + "\"") + if !isDownload { + contentDescription = nil + } + req, _ := handler.svc.GetObjectRequest( + &s3.GetObjectInput{ + Bucket: &handler.Policy.BucketName, + Key: &path, + ResponseContentDisposition: contentDescription, + }) + + signedURL, err := req.Presign(time.Duration(ttl) * time.Second) + if err != nil { + return "", err + } + + // 将最终生成的签名URL域名换成用户自定义的加速域名(如果有) + finalURL, err := url.Parse(signedURL) + if err != nil { + return "", err + } + + // 公有空间替换掉Key及不支持的头 + if !handler.Policy.IsPrivate { + finalURL.RawQuery = "" + } + + if handler.Policy.BaseURL != "" { + cdnURL, err := url.Parse(handler.Policy.BaseURL) + if err != nil { + return "", err + } + finalURL.Host = cdnURL.Host + finalURL.Scheme = cdnURL.Scheme + } + + return finalURL.String(), nil +} + +// Token 获取上传策略和认证Token +func (handler *Driver) Token(ctx context.Context, ttl int64, uploadSession *serializer.UploadSession, file fsctx.FileHeader) (*serializer.UploadCredential, error) { + // 检查文件是否存在 + fileInfo := file.Info() + if _, err := handler.Meta(ctx, fileInfo.SavePath); err == nil { + return nil, fmt.Errorf("file already exist") + } + + // 创建分片上传 + expires := time.Now().Add(time.Duration(ttl) * time.Second) + res, err := handler.svc.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ + Bucket: &handler.Policy.BucketName, + Key: &fileInfo.SavePath, + Expires: &expires, + ContentType: aws.String(fileInfo.DetectMimeType()), + }) + if err != nil { + return nil, fmt.Errorf("failed to create multipart upload: %w", err) + } + + uploadSession.UploadID = *res.UploadId + + // 为每个分片签名上传 URL + chunks := chunk.NewChunkGroup(file, handler.Policy.OptionsSerialized.ChunkSize, &backoff.ConstantBackoff{}, false) + urls := make([]string, chunks.Num()) + for chunks.Next() { + err := chunks.Process(func(c *chunk.ChunkGroup, chunk io.Reader) error { + signedReq, _ := handler.svc.UploadPartRequest(&s3.UploadPartInput{ + Bucket: &handler.Policy.BucketName, + Key: &fileInfo.SavePath, + PartNumber: aws.Int64(int64(c.Index() + 1)), + UploadId: res.UploadId, + }) + + signedURL, err := signedReq.Presign(time.Duration(ttl) * time.Second) + if err != nil { + return err + } + + urls[c.Index()] = signedURL + return nil + }) + if err != nil { + return nil, err + } + } + + // 签名完成分片上传的请求URL + signedReq, _ := handler.svc.CompleteMultipartUploadRequest(&s3.CompleteMultipartUploadInput{ + Bucket: &handler.Policy.BucketName, + Key: &fileInfo.SavePath, + UploadId: res.UploadId, + }) + + signedURL, err := signedReq.Presign(time.Duration(ttl) * time.Second) + if err != nil { + return nil, err + } + + // 生成上传凭证 + return &serializer.UploadCredential{ + SessionID: uploadSession.Key, + ChunkSize: handler.Policy.OptionsSerialized.ChunkSize, + UploadID: *res.UploadId, + UploadURLs: urls, + CompleteURL: signedURL, + }, nil +} + +// Meta 获取文件信息 +func (handler *Driver) Meta(ctx context.Context, path string) (*MetaData, error) { + res, err := handler.svc.HeadObject( + &s3.HeadObjectInput{ + Bucket: &handler.Policy.BucketName, + Key: &path, + }) + + if err != nil { + return nil, err + } + + return &MetaData{ + Size: uint64(*res.ContentLength), + Etag: *res.ETag, + }, nil + +} + +// CORS 创建跨域策略 +func (handler *Driver) CORS() error { + rule := s3.CORSRule{ + AllowedMethods: aws.StringSlice([]string{ + "GET", + "POST", + "PUT", + "DELETE", + "HEAD", + }), + AllowedOrigins: aws.StringSlice([]string{"*"}), + AllowedHeaders: aws.StringSlice([]string{"*"}), + ExposeHeaders: aws.StringSlice([]string{"ETag"}), + MaxAgeSeconds: aws.Int64(3600), + } + + _, err := handler.svc.PutBucketCors(&s3.PutBucketCorsInput{ + Bucket: &handler.Policy.BucketName, + CORSConfiguration: &s3.CORSConfiguration{ + CORSRules: []*s3.CORSRule{&rule}, + }, + }) + + return err +} + +// 取消上传凭证 +func (handler *Driver) CancelToken(ctx context.Context, uploadSession *serializer.UploadSession) error { + _, err := handler.svc.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + UploadId: &uploadSession.UploadID, + Bucket: &handler.Policy.BucketName, + Key: &uploadSession.SavePath, + }) + return err +} diff --git a/pkg/filesystem/driver/shadow/masterinslave/errors.go b/pkg/filesystem/driver/shadow/masterinslave/errors.go new file mode 100644 index 0000000..27d0428 --- /dev/null +++ b/pkg/filesystem/driver/shadow/masterinslave/errors.go @@ -0,0 +1,7 @@ +package masterinslave + +import "errors" + +var ( + ErrNotImplemented = errors.New("this method of shadowed policy is not implemented") +) diff --git a/pkg/filesystem/driver/shadow/masterinslave/handler.go b/pkg/filesystem/driver/shadow/masterinslave/handler.go new file mode 100644 index 0000000..d3f376a --- /dev/null +++ b/pkg/filesystem/driver/shadow/masterinslave/handler.go @@ -0,0 +1,60 @@ +package masterinslave + +import ( + "context" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/cluster" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/response" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" +) + +// Driver 影子存储策略,用于在从机端上传文件 +type Driver struct { + master cluster.Node + handler driver.Handler + policy *model.Policy +} + +// NewDriver 返回新的处理器 +func NewDriver(master cluster.Node, handler driver.Handler, policy *model.Policy) driver.Handler { + return &Driver{ + master: master, + handler: handler, + policy: policy, + } +} + +func (d *Driver) Put(ctx context.Context, file fsctx.FileHeader) error { + return d.handler.Put(ctx, file) +} + +func (d *Driver) Delete(ctx context.Context, files []string) ([]string, error) { + return d.handler.Delete(ctx, files) +} + +func (d *Driver) Get(ctx context.Context, path string) (response.RSCloser, error) { + return nil, ErrNotImplemented +} + +func (d *Driver) Thumb(ctx context.Context, file *model.File) (*response.ContentResponse, error) { + return nil, ErrNotImplemented +} + +func (d *Driver) Source(ctx context.Context, path string, ttl int64, isDownload bool, speed int) (string, error) { + return "", ErrNotImplemented +} + +func (d *Driver) Token(ctx context.Context, ttl int64, uploadSession *serializer.UploadSession, file fsctx.FileHeader) (*serializer.UploadCredential, error) { + return nil, ErrNotImplemented +} + +func (d *Driver) List(ctx context.Context, path string, recursive bool) ([]response.Object, error) { + return nil, ErrNotImplemented +} + +// 取消上传凭证 +func (handler Driver) CancelToken(ctx context.Context, uploadSession *serializer.UploadSession) error { + return nil +} diff --git a/pkg/filesystem/driver/shadow/slaveinmaster/errors.go b/pkg/filesystem/driver/shadow/slaveinmaster/errors.go new file mode 100644 index 0000000..6acadc8 --- /dev/null +++ b/pkg/filesystem/driver/shadow/slaveinmaster/errors.go @@ -0,0 +1,9 @@ +package slaveinmaster + +import "errors" + +var ( + ErrNotImplemented = errors.New("this method of shadowed policy is not implemented") + ErrSlaveSrcPathNotExist = errors.New("cannot determine source file path in slave node") + ErrWaitResultTimeout = errors.New("timeout waiting for slave transfer result") +) diff --git a/pkg/filesystem/driver/shadow/slaveinmaster/handler.go b/pkg/filesystem/driver/shadow/slaveinmaster/handler.go new file mode 100644 index 0000000..bfcac26 --- /dev/null +++ b/pkg/filesystem/driver/shadow/slaveinmaster/handler.go @@ -0,0 +1,124 @@ +package slaveinmaster + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "net/url" + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/cluster" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/response" + "github.com/cloudreve/Cloudreve/v3/pkg/mq" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" +) + +// Driver 影子存储策略,将上传任务指派给从机节点处理,并等待从机通知上传结果 +type Driver struct { + node cluster.Node + handler driver.Handler + policy *model.Policy + client request.Client +} + +// NewDriver 返回新的从机指派处理器 +func NewDriver(node cluster.Node, handler driver.Handler, policy *model.Policy) driver.Handler { + var endpoint *url.URL + if serverURL, err := url.Parse(node.DBModel().Server); err == nil { + var controller *url.URL + controller, _ = url.Parse("/api/v3/slave/") + endpoint = serverURL.ResolveReference(controller) + } + + signTTL := model.GetIntSetting("slave_api_timeout", 60) + return &Driver{ + node: node, + handler: handler, + policy: policy, + client: request.NewClient( + request.WithMasterMeta(), + request.WithTimeout(time.Duration(signTTL)*time.Second), + request.WithCredential(node.SlaveAuthInstance(), int64(signTTL)), + request.WithEndpoint(endpoint.String()), + ), + } +} + +// Put 将ctx中指定的从机物理文件由从机上传到目标存储策略 +func (d *Driver) Put(ctx context.Context, file fsctx.FileHeader) error { + defer file.Close() + + fileInfo := file.Info() + req := serializer.SlaveTransferReq{ + Src: fileInfo.Src, + Dst: fileInfo.SavePath, + Policy: d.policy, + } + + body, err := json.Marshal(req) + if err != nil { + return err + } + + // 订阅转存结果 + resChan := mq.GlobalMQ.Subscribe(req.Hash(model.GetSettingByName("siteID")), 0) + defer mq.GlobalMQ.Unsubscribe(req.Hash(model.GetSettingByName("siteID")), resChan) + + res, err := d.client.Request("PUT", "task/transfer", bytes.NewReader(body)). + CheckHTTPResponse(200). + DecodeResponse() + if err != nil { + return err + } + + if res.Code != 0 { + return serializer.NewErrorFromResponse(res) + } + + // 等待转存结果或者超时 + waitTimeout := model.GetIntSetting("slave_transfer_timeout", 172800) + select { + case <-time.After(time.Duration(waitTimeout) * time.Second): + return ErrWaitResultTimeout + case msg := <-resChan: + if msg.Event != serializer.SlaveTransferSuccess { + return errors.New(msg.Content.(serializer.SlaveTransferResult).Error) + } + } + + return nil +} + +func (d *Driver) Delete(ctx context.Context, files []string) ([]string, error) { + return d.handler.Delete(ctx, files) +} + +func (d *Driver) Get(ctx context.Context, path string) (response.RSCloser, error) { + return nil, ErrNotImplemented +} + +func (d *Driver) Thumb(ctx context.Context, file *model.File) (*response.ContentResponse, error) { + return nil, ErrNotImplemented +} + +func (d *Driver) Source(ctx context.Context, path string, ttl int64, isDownload bool, speed int) (string, error) { + return "", ErrNotImplemented +} + +func (d *Driver) Token(ctx context.Context, ttl int64, uploadSession *serializer.UploadSession, file fsctx.FileHeader) (*serializer.UploadCredential, error) { + return nil, ErrNotImplemented +} + +func (d *Driver) List(ctx context.Context, path string, recursive bool) ([]response.Object, error) { + return nil, ErrNotImplemented +} + +// 取消上传凭证 +func (d *Driver) CancelToken(ctx context.Context, uploadSession *serializer.UploadSession) error { + return nil +} diff --git a/pkg/filesystem/driver/upyun/handler.go b/pkg/filesystem/driver/upyun/handler.go new file mode 100644 index 0000000..a9d18d6 --- /dev/null +++ b/pkg/filesystem/driver/upyun/handler.go @@ -0,0 +1,358 @@ +package upyun + +import ( + "context" + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "sync" + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/response" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/upyun/go-sdk/upyun" +) + +// UploadPolicy 又拍云上传策略 +type UploadPolicy struct { + Bucket string `json:"bucket"` + SaveKey string `json:"save-key"` + Expiration int64 `json:"expiration"` + CallbackURL string `json:"notify-url"` + ContentLength uint64 `json:"content-length"` + ContentLengthRange string `json:"content-length-range,omitempty"` + AllowFileType string `json:"allow-file-type,omitempty"` +} + +// Driver 又拍云策略适配器 +type Driver struct { + Policy *model.Policy +} + +func (handler Driver) List(ctx context.Context, base string, recursive bool) ([]response.Object, error) { + base = strings.TrimPrefix(base, "/") + + // 用于接受SDK返回对象的chan + objChan := make(chan *upyun.FileInfo) + objects := []*upyun.FileInfo{} + + // 列取配置 + listConf := &upyun.GetObjectsConfig{ + Path: "/" + base, + ObjectsChan: objChan, + MaxListTries: 1, + } + // 递归列取时不限制递归次数 + if recursive { + listConf.MaxListLevel = -1 + } + + // 启动一个goroutine收集列取的对象信 + wg := &sync.WaitGroup{} + wg.Add(1) + go func(input chan *upyun.FileInfo, output *[]*upyun.FileInfo, wg *sync.WaitGroup) { + defer wg.Done() + for { + file, ok := <-input + if !ok { + return + } + *output = append(*output, file) + } + }(objChan, &objects, wg) + + up := upyun.NewUpYun(&upyun.UpYunConfig{ + Bucket: handler.Policy.BucketName, + Operator: handler.Policy.AccessKey, + Password: handler.Policy.SecretKey, + }) + + err := up.List(listConf) + if err != nil { + return nil, err + } + + wg.Wait() + + // 汇总处理列取结果 + res := make([]response.Object, 0, len(objects)) + for _, object := range objects { + res = append(res, response.Object{ + Name: path.Base(object.Name), + RelativePath: object.Name, + Source: path.Join(base, object.Name), + Size: uint64(object.Size), + IsDir: object.IsDir, + LastModify: object.Time, + }) + } + + return res, nil +} + +// Get 获取文件 +func (handler Driver) Get(ctx context.Context, path string) (response.RSCloser, error) { + // 获取文件源地址 + downloadURL, err := handler.Source(ctx, path, int64(model.GetIntSetting("preview_timeout", 60)), false, 0) + if err != nil { + return nil, err + } + + // 获取文件数据流 + client := request.NewClient() + resp, err := client.Request( + "GET", + downloadURL, + nil, + request.WithContext(ctx), + request.WithHeader( + http.Header{"Cache-Control": {"no-cache", "no-store", "must-revalidate"}}, + ), + request.WithTimeout(time.Duration(0)), + ).CheckHTTPResponse(200).GetRSCloser() + if err != nil { + return nil, err + } + + resp.SetFirstFakeChunk() + + // 尝试自主获取文件大小 + if file, ok := ctx.Value(fsctx.FileModelCtx).(model.File); ok { + resp.SetContentLength(int64(file.Size)) + } + + return resp, nil + +} + +// Put 将文件流保存到指定目录 +func (handler Driver) Put(ctx context.Context, file fsctx.FileHeader) error { + defer file.Close() + + up := upyun.NewUpYun(&upyun.UpYunConfig{ + Bucket: handler.Policy.BucketName, + Operator: handler.Policy.AccessKey, + Password: handler.Policy.SecretKey, + }) + err := up.Put(&upyun.PutObjectConfig{ + Path: file.Info().SavePath, + Reader: file, + }) + + return err +} + +// Delete 删除一个或多个文件, +// 返回未删除的文件,及遇到的最后一个错误 +func (handler Driver) Delete(ctx context.Context, files []string) ([]string, error) { + up := upyun.NewUpYun(&upyun.UpYunConfig{ + Bucket: handler.Policy.BucketName, + Operator: handler.Policy.AccessKey, + Password: handler.Policy.SecretKey, + }) + + var ( + failed = make([]string, 0, len(files)) + lastErr error + currentIndex = 0 + indexLock sync.Mutex + failedLock sync.Mutex + wg sync.WaitGroup + routineNum = 4 + ) + wg.Add(routineNum) + + // upyun不支持批量操作,这里开四个协程并行操作 + for i := 0; i < routineNum; i++ { + go func() { + for { + // 取得待删除文件 + indexLock.Lock() + if currentIndex >= len(files) { + // 所有文件处理完成 + wg.Done() + indexLock.Unlock() + return + } + path := files[currentIndex] + currentIndex++ + indexLock.Unlock() + + // 发送异步删除请求 + err := up.Delete(&upyun.DeleteObjectConfig{ + Path: path, + Async: true, + }) + + // 处理错误 + if err != nil { + failedLock.Lock() + lastErr = err + failed = append(failed, path) + failedLock.Unlock() + } + } + }() + } + + wg.Wait() + + return failed, lastErr +} + +// Thumb 获取文件缩略图 +func (handler Driver) Thumb(ctx context.Context, file *model.File) (*response.ContentResponse, error) { + // quick check by extension name + // https://help.upyun.com/knowledge-base/image/ + supported := []string{"png", "jpg", "jpeg", "gif", "bmp", "webp", "svg"} + if len(handler.Policy.OptionsSerialized.ThumbExts) > 0 { + supported = handler.Policy.OptionsSerialized.ThumbExts + } + + if !util.IsInExtensionList(supported, file.Name) { + return nil, driver.ErrorThumbNotSupported + } + + var ( + thumbSize = [2]uint{400, 300} + ok = false + ) + if thumbSize, ok = ctx.Value(fsctx.ThumbSizeCtx).([2]uint); !ok { + return nil, errors.New("failed to get thumbnail size") + } + + thumbEncodeQuality := model.GetIntSetting("thumb_encode_quality", 85) + + thumbParam := fmt.Sprintf("!/fwfh/%dx%d/quality/%d", thumbSize[0], thumbSize[1], thumbEncodeQuality) + thumbURL, err := handler.Source(ctx, file.SourceName+thumbParam, int64(model.GetIntSetting("preview_timeout", 60)), false, 0) + if err != nil { + return nil, err + } + + return &response.ContentResponse{ + Redirect: true, + URL: thumbURL, + }, nil +} + +// Source 获取外链URL +func (handler Driver) Source(ctx context.Context, path string, ttl int64, isDownload bool, speed int) (string, error) { + // 尝试从上下文获取文件名 + fileName := "" + if file, ok := ctx.Value(fsctx.FileModelCtx).(model.File); ok { + fileName = file.Name + } + + sourceURL, err := url.Parse(handler.Policy.BaseURL) + if err != nil { + return "", err + } + + fileKey, err := url.Parse(url.PathEscape(path)) + if err != nil { + return "", err + } + + sourceURL = sourceURL.ResolveReference(fileKey) + + // 如果是下载文件URL + if isDownload { + query := sourceURL.Query() + query.Add("_upd", fileName) + sourceURL.RawQuery = query.Encode() + } + + return handler.signURL(ctx, sourceURL, ttl) +} + +func (handler Driver) signURL(ctx context.Context, path *url.URL, TTL int64) (string, error) { + if !handler.Policy.IsPrivate { + // 未开启Token防盗链时,直接返回 + return path.String(), nil + } + + etime := time.Now().Add(time.Duration(TTL) * time.Second).Unix() + signStr := fmt.Sprintf( + "%s&%d&%s", + handler.Policy.OptionsSerialized.Token, + etime, + path.Path, + ) + signMd5 := fmt.Sprintf("%x", md5.Sum([]byte(signStr))) + finalSign := signMd5[12:20] + strconv.FormatInt(etime, 10) + + // 将签名添加到URL中 + query := path.Query() + query.Add("_upt", finalSign) + path.RawQuery = query.Encode() + + return path.String(), nil +} + +// Token 获取上传策略和认证Token +func (handler Driver) Token(ctx context.Context, ttl int64, uploadSession *serializer.UploadSession, file fsctx.FileHeader) (*serializer.UploadCredential, error) { + // 生成回调地址 + siteURL := model.GetSiteURL() + apiBaseURI, _ := url.Parse("/api/v3/callback/upyun/" + uploadSession.Key) + apiURL := siteURL.ResolveReference(apiBaseURI) + + // 上传策略 + fileInfo := file.Info() + putPolicy := UploadPolicy{ + Bucket: handler.Policy.BucketName, + // TODO escape + SaveKey: fileInfo.SavePath, + Expiration: time.Now().Add(time.Duration(ttl) * time.Second).Unix(), + CallbackURL: apiURL.String(), + ContentLength: fileInfo.Size, + ContentLengthRange: fmt.Sprintf("0,%d", fileInfo.Size), + AllowFileType: strings.Join(handler.Policy.OptionsSerialized.FileType, ","), + } + + // 生成上传凭证 + policyJSON, err := json.Marshal(putPolicy) + if err != nil { + return nil, err + } + policyEncoded := base64.StdEncoding.EncodeToString(policyJSON) + + // 生成签名 + elements := []string{"POST", "/" + handler.Policy.BucketName, policyEncoded} + signStr := handler.Sign(ctx, elements) + + return &serializer.UploadCredential{ + SessionID: uploadSession.Key, + Policy: policyEncoded, + Credential: signStr, + UploadURLs: []string{"https://v0.api.upyun.com/" + handler.Policy.BucketName}, + }, nil +} + +// 取消上传凭证 +func (handler Driver) CancelToken(ctx context.Context, uploadSession *serializer.UploadSession) error { + return nil +} + +// Sign 计算又拍云的签名头 +func (handler Driver) Sign(ctx context.Context, elements []string) string { + password := fmt.Sprintf("%x", md5.Sum([]byte(handler.Policy.SecretKey))) + mac := hmac.New(sha1.New, []byte(password)) + value := strings.Join(elements, "&") + mac.Write([]byte(value)) + signStr := base64.StdEncoding.EncodeToString((mac.Sum(nil))) + return fmt.Sprintf("UPYUN %s:%s", handler.Policy.AccessKey, signStr) +} diff --git a/pkg/filesystem/errors.go b/pkg/filesystem/errors.go new file mode 100644 index 0000000..5c3f231 --- /dev/null +++ b/pkg/filesystem/errors.go @@ -0,0 +1,25 @@ +package filesystem + +import ( + "errors" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" +) + +var ( + ErrUnknownPolicyType = serializer.NewError(serializer.CodeInternalSetting, "Unknown policy type", nil) + ErrFileSizeTooBig = serializer.NewError(serializer.CodeFileTooLarge, "File is too large", nil) + ErrFileExtensionNotAllowed = serializer.NewError(serializer.CodeFileTypeNotAllowed, "File type not allowed", nil) + ErrInsufficientCapacity = serializer.NewError(serializer.CodeInsufficientCapacity, "Insufficient capacity", nil) + ErrIllegalObjectName = serializer.NewError(serializer.CodeIllegalObjectName, "Invalid object name", nil) + ErrClientCanceled = errors.New("Client canceled operation") + ErrRootProtected = serializer.NewError(serializer.CodeRootProtected, "Root protected", nil) + ErrInsertFileRecord = serializer.NewError(serializer.CodeDBError, "Failed to create file record", nil) + ErrFileExisted = serializer.NewError(serializer.CodeObjectExist, "Object existed", nil) + ErrFileUploadSessionExisted = serializer.NewError(serializer.CodeConflictUploadOngoing, "Upload session existed", nil) + ErrPathNotExist = serializer.NewError(serializer.CodeParentNotExist, "Path not exist", nil) + ErrObjectNotExist = serializer.NewError(serializer.CodeParentNotExist, "Object not exist", nil) + ErrIO = serializer.NewError(serializer.CodeIOFailed, "Failed to read file data", nil) + ErrDBListObjects = serializer.NewError(serializer.CodeDBError, "Failed to list object records", nil) + ErrDBDeleteObjects = serializer.NewError(serializer.CodeDBError, "Failed to delete object records", nil) + ErrOneObjectOnly = serializer.ParamErr("You can only copy one object at the same time", nil) +) diff --git a/pkg/filesystem/file.go b/pkg/filesystem/file.go new file mode 100644 index 0000000..a2ddbb1 --- /dev/null +++ b/pkg/filesystem/file.go @@ -0,0 +1,387 @@ +package filesystem + +import ( + "context" + "fmt" + "io" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/conf" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/response" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/juju/ratelimit" +) + +/* ============ + 文件相关 + ============ +*/ + +// 限速后的ReaderSeeker +type lrs struct { + response.RSCloser + r io.Reader +} + +func (r lrs) Read(p []byte) (int, error) { + return r.r.Read(p) +} + +// withSpeedLimit 给原有的ReadSeeker加上限速 +func (fs *FileSystem) withSpeedLimit(rs response.RSCloser) response.RSCloser { + // 如果用户组有速度限制,就返回限制流速的ReaderSeeker + if fs.User.Group.SpeedLimit != 0 { + speed := fs.User.Group.SpeedLimit + bucket := ratelimit.NewBucketWithRate(float64(speed), int64(speed)) + lrs := lrs{rs, ratelimit.Reader(rs, bucket)} + return lrs + } + // 否则返回原始流 + return rs + +} + +// AddFile 新增文件记录 +func (fs *FileSystem) AddFile(ctx context.Context, parent *model.Folder, file fsctx.FileHeader) (*model.File, error) { + // 添加文件记录前的钩子 + err := fs.Trigger(ctx, "BeforeAddFile", file) + if err != nil { + return nil, err + } + + uploadInfo := file.Info() + newFile := model.File{ + Name: uploadInfo.FileName, + SourceName: uploadInfo.SavePath, + UserID: fs.User.ID, + Size: uploadInfo.Size, + FolderID: parent.ID, + PolicyID: fs.Policy.ID, + MetadataSerialized: uploadInfo.Metadata, + UploadSessionID: uploadInfo.UploadSessionID, + } + + err = newFile.Create() + + if err != nil { + if err := fs.Trigger(ctx, "AfterValidateFailed", file); err != nil { + util.Log().Debug("AfterValidateFailed hook execution failed: %s", err) + } + return nil, ErrFileExisted.WithError(err) + } + + fs.User.Storage += newFile.Size + return &newFile, nil +} + +// GetPhysicalFileContent 根据文件物理路径获取文件流 +func (fs *FileSystem) GetPhysicalFileContent(ctx context.Context, path string) (response.RSCloser, error) { + // 重设上传策略 + fs.Policy = &model.Policy{Type: "local"} + _ = fs.DispatchHandler() + + // 获取文件流 + rs, err := fs.Handler.Get(ctx, path) + if err != nil { + return nil, err + } + + return fs.withSpeedLimit(rs), nil +} + +// Preview 预览文件 +// +// path - 文件虚拟路径 +// isText - 是否为文本文件,文本文件会忽略重定向,直接由 +// 服务端拉取中转给用户,故会对文件大小进行限制 +func (fs *FileSystem) Preview(ctx context.Context, id uint, isText bool) (*response.ContentResponse, error) { + err := fs.resetFileIDIfNotExist(ctx, id) + if err != nil { + return nil, err + } + + // 如果是文本文件预览,需要检查大小限制 + sizeLimit := model.GetIntSetting("maxEditSize", 2<<20) + if isText && fs.FileTarget[0].Size > uint64(sizeLimit) { + return nil, ErrFileSizeTooBig + } + + // 是否直接返回文件内容 + if isText || fs.Policy.IsDirectlyPreview() { + resp, err := fs.GetDownloadContent(ctx, id) + if err != nil { + return nil, err + } + return &response.ContentResponse{ + Redirect: false, + Content: resp, + }, nil + } + + // 否则重定向到签名的预览URL + ttl := model.GetIntSetting("preview_timeout", 60) + previewURL, err := fs.SignURL(ctx, &fs.FileTarget[0], int64(ttl), false) + if err != nil { + return nil, err + } + return &response.ContentResponse{ + Redirect: true, + URL: previewURL, + MaxAge: ttl, + }, nil + +} + +// GetDownloadContent 获取用于下载的文件流 +func (fs *FileSystem) GetDownloadContent(ctx context.Context, id uint) (response.RSCloser, error) { + // 获取原始文件流 + rs, err := fs.GetContent(ctx, id) + if err != nil { + return nil, err + } + + // 返回限速处理后的文件流 + return fs.withSpeedLimit(rs), nil + +} + +// GetContent 获取文件内容,path为虚拟路径 +func (fs *FileSystem) GetContent(ctx context.Context, id uint) (response.RSCloser, error) { + err := fs.resetFileIDIfNotExist(ctx, id) + if err != nil { + return nil, err + } + ctx = context.WithValue(ctx, fsctx.FileModelCtx, fs.FileTarget[0]) + + // 获取文件流 + rs, err := fs.Handler.Get(ctx, fs.FileTarget[0].SourceName) + if err != nil { + return nil, ErrIO.WithError(err) + } + + return rs, nil +} + +// deleteGroupedFile 对分组好的文件执行删除操作, +// 返回每个分组失败的文件列表 +func (fs *FileSystem) deleteGroupedFile(ctx context.Context, files map[uint][]*model.File) map[uint][]string { + // 失败的文件列表 + // TODO 并行删除 + failed := make(map[uint][]string, len(files)) + thumbs := make([]string, 0) + + for policyID, toBeDeletedFiles := range files { + // 列举出需要物理删除的文件的物理路径 + sourceNamesAll := make([]string, 0, len(toBeDeletedFiles)) + uploadSessions := make([]*serializer.UploadSession, 0, len(toBeDeletedFiles)) + + for i := 0; i < len(toBeDeletedFiles); i++ { + sourceNamesAll = append(sourceNamesAll, toBeDeletedFiles[i].SourceName) + + if toBeDeletedFiles[i].UploadSessionID != nil { + if session, ok := cache.Get(UploadSessionCachePrefix + *toBeDeletedFiles[i].UploadSessionID); ok { + uploadSession := session.(serializer.UploadSession) + uploadSessions = append(uploadSessions, &uploadSession) + } + } + + // Check if sidecar thumb file exist + if model.IsTrueVal(toBeDeletedFiles[i].MetadataSerialized[model.ThumbSidecarMetadataKey]) { + thumbs = append(thumbs, toBeDeletedFiles[i].ThumbFile()) + } + } + + // 切换上传策略 + fs.Policy = toBeDeletedFiles[0].GetPolicy() + err := fs.DispatchHandler() + if err != nil { + failed[policyID] = sourceNamesAll + continue + } + + // 取消上传会话 + for _, upSession := range uploadSessions { + if err := fs.Handler.CancelToken(ctx, upSession); err != nil { + util.Log().Warning("Failed to cancel upload session for %q: %s", upSession.Name, err) + } + + cache.Deletes([]string{upSession.Key}, UploadSessionCachePrefix) + } + + // 执行删除 + toBeDeletedSrcs := append(sourceNamesAll, thumbs...) + failedFile, _ := fs.Handler.Delete(ctx, toBeDeletedSrcs) + + // Exclude failed results related to thumb file + failed[policyID] = util.SliceDifference(failedFile, thumbs) + } + + return failed +} + +// GroupFileByPolicy 将目标文件按照存储策略分组 +func (fs *FileSystem) GroupFileByPolicy(ctx context.Context, files []model.File) map[uint][]*model.File { + var policyGroup = make(map[uint][]*model.File) + + for key := range files { + if file, ok := policyGroup[files[key].PolicyID]; ok { + // 如果已存在分组,直接追加 + policyGroup[files[key].PolicyID] = append(file, &files[key]) + } else { + // 分组不存在,创建 + policyGroup[files[key].PolicyID] = make([]*model.File, 0) + policyGroup[files[key].PolicyID] = append(policyGroup[files[key].PolicyID], &files[key]) + } + } + + return policyGroup +} + +// GetDownloadURL 创建文件下载链接, timeout 为数据库中存储过期时间的字段 +func (fs *FileSystem) GetDownloadURL(ctx context.Context, id uint, timeout string) (string, error) { + err := fs.resetFileIDIfNotExist(ctx, id) + if err != nil { + return "", err + } + fileTarget := &fs.FileTarget[0] + + // 生成下載地址 + ttl := model.GetIntSetting(timeout, 60) + source, err := fs.SignURL( + ctx, + fileTarget, + int64(ttl), + true, + ) + if err != nil { + return "", err + } + + return source, nil +} + +// GetSource 获取可直接访问文件的外链地址 +func (fs *FileSystem) GetSource(ctx context.Context, fileID uint) (string, error) { + // 查找文件记录 + err := fs.resetFileIDIfNotExist(ctx, fileID) + if err != nil { + return "", ErrObjectNotExist.WithError(err) + } + + // 检查存储策略是否可以获得外链 + if !fs.Policy.IsOriginLinkEnable { + return "", serializer.NewError( + serializer.CodePolicyNotAllowed, + "This policy is not enabled for getting source link", + nil, + ) + } + + source, err := fs.SignURL(ctx, &fs.FileTarget[0], 0, false) + if err != nil { + return "", serializer.NewError(serializer.CodeNotSet, "Failed to get source link", err) + } + + return source, nil +} + +// SignURL 签名文件原始 URL +func (fs *FileSystem) SignURL(ctx context.Context, file *model.File, ttl int64, isDownload bool) (string, error) { + fs.FileTarget = []model.File{*file} + ctx = context.WithValue(ctx, fsctx.FileModelCtx, *file) + + err := fs.resetPolicyToFirstFile(ctx) + if err != nil { + return "", err + } + + // 签名最终URL + // 生成外链地址 + source, err := fs.Handler.Source(ctx, fs.FileTarget[0].SourceName, ttl, isDownload, fs.User.Group.SpeedLimit) + if err != nil { + return "", serializer.NewError(serializer.CodeNotSet, "Failed to get source link", err) + } + + return source, nil +} + +// ResetFileIfNotExist 重设当前目标文件为 path,如果当前目标为空 +func (fs *FileSystem) ResetFileIfNotExist(ctx context.Context, path string) error { + // 找到文件 + if len(fs.FileTarget) == 0 { + exist, file := fs.IsFileExist(path) + if !exist { + return ErrObjectNotExist + } + fs.FileTarget = []model.File{*file} + } + + // 将当前存储策略重设为文件使用的 + return fs.resetPolicyToFirstFile(ctx) +} + +// ResetFileIfNotExist 重设当前目标文件为 id,如果当前目标为空 +func (fs *FileSystem) resetFileIDIfNotExist(ctx context.Context, id uint) error { + // 找到文件 + if len(fs.FileTarget) == 0 { + file, err := model.GetFilesByIDs([]uint{id}, fs.User.ID) + if err != nil || len(file) == 0 { + return ErrObjectNotExist + } + fs.FileTarget = []model.File{file[0]} + } + + // 如果上下文限制了父目录,则进行检查 + if parent, ok := ctx.Value(fsctx.LimitParentCtx).(*model.Folder); ok { + if parent.ID != fs.FileTarget[0].FolderID { + return ErrObjectNotExist + } + } + + // 将当前存储策略重设为文件使用的 + return fs.resetPolicyToFirstFile(ctx) +} + +// resetPolicyToFirstFile 将当前存储策略重设为第一个目标文件文件使用的 +func (fs *FileSystem) resetPolicyToFirstFile(ctx context.Context) error { + if len(fs.FileTarget) == 0 { + return ErrObjectNotExist + } + + // 从机模式不进行操作 + if conf.SystemConfig.Mode == "slave" { + return nil + } + + fs.Policy = fs.FileTarget[0].GetPolicy() + err := fs.DispatchHandler() + if err != nil { + return err + } + return nil +} + +// Search 搜索文件 +func (fs *FileSystem) Search(ctx context.Context, keywords ...interface{}) ([]serializer.Object, error) { + parents := make([]uint, 0) + + // 如果限定了根目录,则只在这个根目录下搜索。 + if fs.Root != nil { + allFolders, err := model.GetRecursiveChildFolder([]uint{fs.Root.ID}, fs.User.ID, true) + if err != nil { + return nil, fmt.Errorf("failed to list all folders: %w", err) + } + + for _, folder := range allFolders { + parents = append(parents, folder.ID) + } + } + + files, _ := model.GetFilesByKeywords(fs.User.ID, parents, keywords...) + fs.SetTargetFile(&files) + + return fs.listObjects(ctx, "/", files, nil, nil), nil +} diff --git a/pkg/filesystem/filesystem.go b/pkg/filesystem/filesystem.go new file mode 100644 index 0000000..d892ed0 --- /dev/null +++ b/pkg/filesystem/filesystem.go @@ -0,0 +1,295 @@ +package filesystem + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "sync" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/cluster" + "github.com/cloudreve/Cloudreve/v3/pkg/conf" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/cos" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/googledrive" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/local" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/onedrive" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/oss" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/qiniu" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/remote" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/s3" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/shadow/masterinslave" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/shadow/slaveinmaster" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/upyun" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/gin-gonic/gin" + cossdk "github.com/tencentyun/cos-go-sdk-v5" +) + +// FSPool 文件系统资源池 +var FSPool = sync.Pool{ + New: func() interface{} { + return &FileSystem{} + }, +} + +// FileSystem 管理文件的文件系统 +type FileSystem struct { + // 文件系统所有者 + User *model.User + // 操作文件使用的存储策略 + Policy *model.Policy + // 当前正在处理的文件对象 + FileTarget []model.File + // 当前正在处理的目录对象 + DirTarget []model.Folder + // 相对根目录 + Root *model.Folder + // 互斥锁 + Lock sync.Mutex + + /* + 钩子函数 + */ + Hooks map[string][]Hook + + /* + 文件系统处理适配器 + */ + Handler driver.Handler + + // 回收锁 + recycleLock sync.Mutex +} + +// getEmptyFS 从pool中获取新的FileSystem +func getEmptyFS() *FileSystem { + fs := FSPool.Get().(*FileSystem) + return fs +} + +// Recycle 回收FileSystem资源 +func (fs *FileSystem) Recycle() { + fs.recycleLock.Lock() + fs.reset() + FSPool.Put(fs) +} + +// reset 重设文件系统,以便回收使用 +func (fs *FileSystem) reset() { + fs.User = nil + fs.CleanTargets() + fs.Policy = nil + fs.Hooks = nil + fs.Handler = nil + fs.Root = nil + fs.Lock = sync.Mutex{} + fs.recycleLock = sync.Mutex{} +} + +// NewFileSystem 初始化一个文件系统 +func NewFileSystem(user *model.User) (*FileSystem, error) { + fs := getEmptyFS() + fs.User = user + fs.Policy = user.GetPolicyID(nil) + + // 分配存储策略适配器 + err := fs.DispatchHandler() + + return fs, err +} + +// NewAnonymousFileSystem 初始化匿名文件系统 +func NewAnonymousFileSystem() (*FileSystem, error) { + fs := getEmptyFS() + fs.User = &model.User{} + + // 如果是主机模式下,则为匿名文件系统分配游客用户组 + if conf.SystemConfig.Mode == "master" { + anonymousGroup, err := model.GetGroupByID(3) + if err != nil { + return nil, err + } + fs.User.Group = anonymousGroup + } else { + // 从机模式下,分配本地策略处理器 + fs.Handler = local.Driver{} + } + + return fs, nil +} + +// DispatchHandler 根据存储策略分配文件适配器 +func (fs *FileSystem) DispatchHandler() error { + handler, err := getNewPolicyHandler(fs.Policy) + fs.Handler = handler + + return err +} + +// getNewPolicyHandler 根据存储策略类型字段获取处理器 +func getNewPolicyHandler(policy *model.Policy) (driver.Handler, error) { + if policy == nil { + return nil, ErrUnknownPolicyType + } + + switch policy.Type { + case "mock", "anonymous": + return nil, nil + case "local": + return local.Driver{ + Policy: policy, + }, nil + case "remote": + return remote.NewDriver(policy) + case "qiniu": + return qiniu.NewDriver(policy), nil + case "oss": + return oss.NewDriver(policy) + case "upyun": + return upyun.Driver{ + Policy: policy, + }, nil + case "onedrive": + return onedrive.NewDriver(policy) + case "cos": + u, _ := url.Parse(policy.Server) + b := &cossdk.BaseURL{BucketURL: u} + return cos.Driver{ + Policy: policy, + Client: cossdk.NewClient(b, &http.Client{ + Transport: &cossdk.AuthorizationTransport{ + SecretID: policy.AccessKey, + SecretKey: policy.SecretKey, + }, + }), + HTTPClient: request.NewClient(), + }, nil + case "s3": + return s3.NewDriver(policy) + case "googledrive": + return googledrive.NewDriver(policy) + default: + return nil, ErrUnknownPolicyType + } +} + +// NewFileSystemFromContext 从gin.Context创建文件系统 +func NewFileSystemFromContext(c *gin.Context) (*FileSystem, error) { + user, exist := c.Get("user") + if !exist { + return NewAnonymousFileSystem() + } + fs, err := NewFileSystem(user.(*model.User)) + return fs, err +} + +// NewFileSystemFromCallback 从gin.Context创建回调用文件系统 +func NewFileSystemFromCallback(c *gin.Context) (*FileSystem, error) { + fs, err := NewFileSystemFromContext(c) + if err != nil { + return nil, err + } + + // 获取回调会话 + callbackSessionRaw, ok := c.Get(UploadSessionCtx) + if !ok { + return nil, errors.New("upload session not exist") + } + callbackSession := callbackSessionRaw.(*serializer.UploadSession) + + // 重新指向上传策略 + fs.Policy = &callbackSession.Policy + err = fs.DispatchHandler() + + return fs, err +} + +// SwitchToSlaveHandler 将负责上传的 Handler 切换为从机节点 +func (fs *FileSystem) SwitchToSlaveHandler(node cluster.Node) { + fs.Handler = slaveinmaster.NewDriver(node, fs.Handler, fs.Policy) +} + +// SwitchToShadowHandler 将负责上传的 Handler 切换为从机节点转存使用的影子处理器 +func (fs *FileSystem) SwitchToShadowHandler(master cluster.Node, masterURL, masterID string) { + switch fs.Policy.Type { + case "local": + fs.Policy.Type = "remote" + fs.Policy.Server = masterURL + fs.Policy.AccessKey = fmt.Sprintf("%d", master.ID()) + fs.Policy.SecretKey = master.DBModel().MasterKey + fs.DispatchHandler() + case "onedrive": + fs.Policy.MasterID = masterID + } + + fs.Handler = masterinslave.NewDriver(master, fs.Handler, fs.Policy) +} + +// SetTargetFile 设置当前处理的目标文件 +func (fs *FileSystem) SetTargetFile(files *[]model.File) { + if len(fs.FileTarget) == 0 { + fs.FileTarget = *files + } else { + fs.FileTarget = append(fs.FileTarget, *files...) + } + +} + +// SetTargetDir 设置当前处理的目标目录 +func (fs *FileSystem) SetTargetDir(dirs *[]model.Folder) { + if len(fs.DirTarget) == 0 { + fs.DirTarget = *dirs + } else { + fs.DirTarget = append(fs.DirTarget, *dirs...) + } + +} + +// SetTargetFileByIDs 根据文件ID设置目标文件,忽略用户ID +func (fs *FileSystem) SetTargetFileByIDs(ids []uint) error { + files, err := model.GetFilesByIDs(ids, 0) + if err != nil || len(files) == 0 { + return ErrFileExisted.WithError(err) + } + fs.SetTargetFile(&files) + return nil +} + +// SetTargetByInterface 根据 model.File 或者 model.Folder 设置目标对象 +// TODO 测试 +func (fs *FileSystem) SetTargetByInterface(target interface{}) error { + if file, ok := target.(*model.File); ok { + fs.SetTargetFile(&[]model.File{*file}) + return nil + } + if folder, ok := target.(*model.Folder); ok { + fs.SetTargetDir(&[]model.Folder{*folder}) + return nil + } + + return ErrObjectNotExist +} + +// CleanTargets 清空目标 +func (fs *FileSystem) CleanTargets() { + fs.FileTarget = fs.FileTarget[:0] + fs.DirTarget = fs.DirTarget[:0] +} + +// SetPolicyFromPath 根据给定路径尝试设定偏好存储策略 +func (fs *FileSystem) SetPolicyFromPath(filePath string) error { + _, parent := fs.getClosedParent(filePath) + // 尝试获取并重设存储策略 + fs.Policy = fs.User.GetPolicyID(parent) + return fs.DispatchHandler() +} + +// SetPolicyFromPreference 尝试设定偏好存储策略 +func (fs *FileSystem) SetPolicyFromPreference(preference uint) error { + // 尝试获取并重设存储策略 + fs.Policy = fs.User.GetPolicyByPreference(preference) + return fs.DispatchHandler() +} diff --git a/pkg/filesystem/fsctx/context.go b/pkg/filesystem/fsctx/context.go new file mode 100644 index 0000000..1b7b3be --- /dev/null +++ b/pkg/filesystem/fsctx/context.go @@ -0,0 +1,44 @@ +package fsctx + +type key int + +const ( + // GinCtx Gin的上下文 + GinCtx key = iota + // PathCtx 文件或目录的虚拟路径 + PathCtx + // FileModelCtx 文件数据库模型 + FileModelCtx + // FolderModelCtx 目录数据库模型 + FolderModelCtx + // HTTPCtx HTTP请求的上下文 + HTTPCtx + // UploadPolicyCtx 上传策略,一般为slave模式下使用 + UploadPolicyCtx + // UserCtx 用户 + UserCtx + // ThumbSizeCtx 缩略图尺寸 + ThumbSizeCtx + // FileSizeCtx 文件大小 + FileSizeCtx + // ShareKeyCtx 分享文件的 HashID + ShareKeyCtx + // LimitParentCtx 限制父目录 + LimitParentCtx + // IgnoreDirectoryConflictCtx 忽略目录重名冲突 + IgnoreDirectoryConflictCtx + // RetryCtx 失败重试次数 + RetryCtx + // ForceUsePublicEndpointCtx 强制使用公网 Endpoint + ForceUsePublicEndpointCtx + // CancelFuncCtx Context 取消函數 + CancelFuncCtx + // 文件在从机节点中的路径 + SlaveSrcPath + // Webdav目标名称 + WebdavDstName + // WebDAVCtx WebDAV + WebDAVCtx + // WebDAV反代Url + WebDAVProxyUrlCtx +) diff --git a/pkg/filesystem/fsctx/stream.go b/pkg/filesystem/fsctx/stream.go new file mode 100644 index 0000000..512270b --- /dev/null +++ b/pkg/filesystem/fsctx/stream.go @@ -0,0 +1,123 @@ +package fsctx + +import ( + "errors" + "github.com/HFO4/aliyun-oss-go-sdk/oss" + "io" + "time" +) + +type WriteMode int + +const ( + Overwrite WriteMode = 0x00001 + // Append 只适用于本地策略 + Append WriteMode = 0x00002 + Nop WriteMode = 0x00004 +) + +type UploadTaskInfo struct { + Size uint64 + MimeType string + FileName string + VirtualPath string + Mode WriteMode + Metadata map[string]string + LastModified *time.Time + SavePath string + UploadSessionID *string + AppendStart uint64 + Model interface{} + Src string +} + +// Get mimetype of uploaded file, if it's not defined, detect it from file name +func (u *UploadTaskInfo) DetectMimeType() string { + if u.MimeType != "" { + return u.MimeType + } + + return oss.TypeByExtension(u.FileName) +} + +// FileHeader 上传来的文件数据处理器 +type FileHeader interface { + io.Reader + io.Closer + io.Seeker + Info() *UploadTaskInfo + SetSize(uint64) + SetModel(fileModel interface{}) + Seekable() bool +} + +// FileStream 用户传来的文件 +type FileStream struct { + Mode WriteMode + LastModified *time.Time + Metadata map[string]string + File io.ReadCloser + Seeker io.Seeker + Size uint64 + VirtualPath string + Name string + MimeType string + SavePath string + UploadSessionID *string + AppendStart uint64 + Model interface{} + Src string +} + +func (file *FileStream) Read(p []byte) (n int, err error) { + if file.File != nil { + return file.File.Read(p) + } + + return 0, io.EOF +} + +func (file *FileStream) Close() error { + if file.File != nil { + return file.File.Close() + } + + return nil +} + +func (file *FileStream) Seek(offset int64, whence int) (int64, error) { + if file.Seekable() { + return file.Seeker.Seek(offset, whence) + } + + return 0, errors.New("no seeker") +} + +func (file *FileStream) Seekable() bool { + return file.Seeker != nil +} + +func (file *FileStream) Info() *UploadTaskInfo { + return &UploadTaskInfo{ + Size: file.Size, + MimeType: file.MimeType, + FileName: file.Name, + VirtualPath: file.VirtualPath, + Mode: file.Mode, + Metadata: file.Metadata, + LastModified: file.LastModified, + SavePath: file.SavePath, + UploadSessionID: file.UploadSessionID, + AppendStart: file.AppendStart, + Model: file.Model, + Src: file.Src, + } +} + +func (file *FileStream) SetSize(size uint64) { + file.Size = size +} + +func (file *FileStream) SetModel(fileModel interface{}) { + file.Model = fileModel +} diff --git a/pkg/filesystem/hooks.go b/pkg/filesystem/hooks.go new file mode 100644 index 0000000..2a4c5d5 --- /dev/null +++ b/pkg/filesystem/hooks.go @@ -0,0 +1,320 @@ +package filesystem + +import ( + "context" + "io/ioutil" + "net/http" + "strconv" + "strings" + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/cluster" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver/local" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +// Hook 钩子函数 +type Hook func(ctx context.Context, fs *FileSystem, file fsctx.FileHeader) error + +// Use 注入钩子 +func (fs *FileSystem) Use(name string, hook Hook) { + if fs.Hooks == nil { + fs.Hooks = make(map[string][]Hook) + } + if _, ok := fs.Hooks[name]; ok { + fs.Hooks[name] = append(fs.Hooks[name], hook) + return + } + fs.Hooks[name] = []Hook{hook} +} + +// CleanHooks 清空钩子,name为空表示全部清空 +func (fs *FileSystem) CleanHooks(name string) { + if name == "" { + fs.Hooks = nil + } else { + delete(fs.Hooks, name) + } +} + +// Trigger 触发钩子,遇到第一个错误时 +// 返回错误,后续钩子不会继续执行 +func (fs *FileSystem) Trigger(ctx context.Context, name string, file fsctx.FileHeader) error { + if hooks, ok := fs.Hooks[name]; ok { + for _, hook := range hooks { + err := hook(ctx, fs, file) + if err != nil { + util.Log().Warning("Failed to execute hook:%s", err) + return err + } + } + } + return nil +} + +// HookValidateFile 一系列对文件检验的集合 +func HookValidateFile(ctx context.Context, fs *FileSystem, file fsctx.FileHeader) error { + fileInfo := file.Info() + + // 验证单文件尺寸 + if !fs.ValidateFileSize(ctx, fileInfo.Size) { + return ErrFileSizeTooBig + } + + // 验证文件名 + if !fs.ValidateLegalName(ctx, fileInfo.FileName) { + return ErrIllegalObjectName + } + + // 验证扩展名 + if !fs.ValidateExtension(ctx, fileInfo.FileName) { + return ErrFileExtensionNotAllowed + } + + return nil + +} + +// HookResetPolicy 重设存储策略为上下文已有文件 +func HookResetPolicy(ctx context.Context, fs *FileSystem, file fsctx.FileHeader) error { + originFile, ok := ctx.Value(fsctx.FileModelCtx).(model.File) + if !ok { + return ErrObjectNotExist + } + + fs.Policy = originFile.GetPolicy() + return fs.DispatchHandler() +} + +// HookValidateCapacity 验证用户容量 +func HookValidateCapacity(ctx context.Context, fs *FileSystem, file fsctx.FileHeader) error { + // 验证并扣除容量 + if fs.User.GetRemainingCapacity() < file.Info().Size { + return ErrInsufficientCapacity + } + return nil +} + +// HookValidateCapacityDiff 根据原有文件和新文件的大小验证用户容量 +func HookValidateCapacityDiff(ctx context.Context, fs *FileSystem, newFile fsctx.FileHeader) error { + originFile := ctx.Value(fsctx.FileModelCtx).(model.File) + newFileSize := newFile.Info().Size + + if newFileSize > originFile.Size { + return HookValidateCapacity(ctx, fs, newFile) + } + + return nil +} + +// HookDeleteTempFile 删除已保存的临时文件 +func HookDeleteTempFile(ctx context.Context, fs *FileSystem, file fsctx.FileHeader) error { + // 删除临时文件 + _, err := fs.Handler.Delete(ctx, []string{file.Info().SavePath}) + if err != nil { + util.Log().Warning("Failed to clean-up temp files: %s", err) + } + + return nil +} + +// HookCleanFileContent 清空文件内容 +func HookCleanFileContent(ctx context.Context, fs *FileSystem, file fsctx.FileHeader) error { + // 清空内容 + return fs.Handler.Put(ctx, &fsctx.FileStream{ + File: ioutil.NopCloser(strings.NewReader("")), + SavePath: file.Info().SavePath, + Size: 0, + Mode: fsctx.Overwrite, + }) +} + +// HookClearFileSize 将原始文件的尺寸设为0 +func HookClearFileSize(ctx context.Context, fs *FileSystem, file fsctx.FileHeader) error { + originFile, ok := ctx.Value(fsctx.FileModelCtx).(model.File) + if !ok { + return ErrObjectNotExist + } + return originFile.UpdateSize(0) +} + +// HookCancelContext 取消上下文 +func HookCancelContext(ctx context.Context, fs *FileSystem, file fsctx.FileHeader) error { + cancelFunc, ok := ctx.Value(fsctx.CancelFuncCtx).(context.CancelFunc) + if ok { + cancelFunc() + } + return nil +} + +// HookUpdateSourceName 更新文件SourceName +func HookUpdateSourceName(ctx context.Context, fs *FileSystem, file fsctx.FileHeader) error { + originFile, ok := ctx.Value(fsctx.FileModelCtx).(model.File) + if !ok { + return ErrObjectNotExist + } + return originFile.UpdateSourceName(originFile.SourceName) +} + +// GenericAfterUpdate 文件内容更新后 +func GenericAfterUpdate(ctx context.Context, fs *FileSystem, newFile fsctx.FileHeader) error { + // 更新文件尺寸 + originFile, ok := ctx.Value(fsctx.FileModelCtx).(model.File) + if !ok { + return ErrObjectNotExist + } + + newFile.SetModel(&originFile) + + err := originFile.UpdateSize(newFile.Info().Size) + if err != nil { + return err + } + + return nil +} + +// SlaveAfterUpload Slave模式下上传完成钩子 +func SlaveAfterUpload(session *serializer.UploadSession) Hook { + return func(ctx context.Context, fs *FileSystem, fileHeader fsctx.FileHeader) error { + if session.Callback == "" { + return nil + } + + // 发送回调请求 + callbackBody := serializer.UploadCallback{} + return cluster.RemoteCallback(session.Callback, callbackBody) + } +} + +// GenericAfterUpload 文件上传完成后,包含数据库操作 +func GenericAfterUpload(ctx context.Context, fs *FileSystem, fileHeader fsctx.FileHeader) error { + fileInfo := fileHeader.Info() + + // 创建或查找根目录 + folder, err := fs.CreateDirectory(ctx, fileInfo.VirtualPath) + if err != nil { + return err + } + + // 检查文件是否存在 + if ok, file := fs.IsChildFileExist( + folder, + fileInfo.FileName, + ); ok { + if file.UploadSessionID != nil { + return ErrFileUploadSessionExisted + } + + return ErrFileExisted + } + + // 向数据库中插入记录 + file, err := fs.AddFile(ctx, folder, fileHeader) + if err != nil { + return ErrInsertFileRecord + } + fileHeader.SetModel(file) + + return nil +} + +// HookGenerateThumb 生成缩略图 +// func HookGenerateThumb(ctx context.Context, fs *FileSystem, fileHeader fsctx.FileHeader) error { +// // 异步尝试生成缩略图 +// fileMode := fileHeader.Info().Model.(*model.File) +// if fs.Policy.IsThumbGenerateNeeded() { +// fs.recycleLock.Lock() +// go func() { +// defer fs.recycleLock.Unlock() +// _, _ = fs.Handler.Delete(ctx, []string{fileMode.SourceName + model.GetSettingByNameWithDefault("thumb_file_suffix", "._thumb")}) +// fs.GenerateThumbnail(ctx, fileMode) +// }() +// } +// return nil +// } + +// HookClearFileHeaderSize 将FileHeader大小设定为0 +func HookClearFileHeaderSize(ctx context.Context, fs *FileSystem, fileHeader fsctx.FileHeader) error { + fileHeader.SetSize(0) + return nil +} + +// HookTruncateFileTo 将物理文件截断至 size +func HookTruncateFileTo(size uint64) Hook { + return func(ctx context.Context, fs *FileSystem, fileHeader fsctx.FileHeader) error { + if handler, ok := fs.Handler.(local.Driver); ok { + return handler.Truncate(ctx, fileHeader.Info().SavePath, size) + } + + return nil + } +} + +// HookChunkUploadFinished 单个分片上传结束后 +func HookChunkUploaded(ctx context.Context, fs *FileSystem, fileHeader fsctx.FileHeader) error { + fileInfo := fileHeader.Info() + + // 更新文件大小 + return fileInfo.Model.(*model.File).UpdateSize(fileInfo.AppendStart + fileInfo.Size) +} + +// HookChunkUploadFailed 单个分片上传失败后 +func HookChunkUploadFailed(ctx context.Context, fs *FileSystem, fileHeader fsctx.FileHeader) error { + fileInfo := fileHeader.Info() + + // 更新文件大小 + return fileInfo.Model.(*model.File).UpdateSize(fileInfo.AppendStart) +} + +// HookPopPlaceholderToFile 将占位文件提升为正式文件 +func HookPopPlaceholderToFile(picInfo string) Hook { + return func(ctx context.Context, fs *FileSystem, fileHeader fsctx.FileHeader) error { + fileInfo := fileHeader.Info() + fileModel := fileInfo.Model.(*model.File) + return fileModel.PopChunkToFile(fileInfo.LastModified, picInfo) + } +} + +// HookChunkUploadFinished 分片上传结束后处理文件 +func HookDeleteUploadSession(id string) Hook { + return func(ctx context.Context, fs *FileSystem, fileHeader fsctx.FileHeader) error { + cache.Deletes([]string{id}, UploadSessionCachePrefix) + return nil + } +} + +// NewWebdavAfterUploadHook 每次创建一个新的钩子函数 rclone 在 PUT 请求里有 OC-Checksum 字符串 +// 和 X-OC-Mtime +func NewWebdavAfterUploadHook(request *http.Request) func(ctx context.Context, fs *FileSystem, newFile fsctx.FileHeader) error { + var modtime time.Time + if timeVal := request.Header.Get("X-OC-Mtime"); timeVal != "" { + timeUnix, err := strconv.ParseInt(timeVal, 10, 64) + if err == nil { + modtime = time.Unix(timeUnix, 0) + } + } + checksum := request.Header.Get("OC-Checksum") + + return func(ctx context.Context, fs *FileSystem, newFile fsctx.FileHeader) error { + file := newFile.Info().Model.(*model.File) + if !modtime.IsZero() { + err := model.DB.Model(file).UpdateColumn("updated_at", modtime).Error + if err != nil { + return err + } + } + + if checksum != "" { + return file.UpdateMetadata(map[string]string{ + model.ChecksumMetadataKey: checksum, + }) + } + + return nil + } +} diff --git a/pkg/filesystem/image.go b/pkg/filesystem/image.go new file mode 100644 index 0000000..dc573dd --- /dev/null +++ b/pkg/filesystem/image.go @@ -0,0 +1,218 @@ +package filesystem + +import ( + "context" + "errors" + "fmt" + "os" + "sync" + + "runtime" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/conf" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/driver" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/response" + "github.com/cloudreve/Cloudreve/v3/pkg/thumb" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +/* ================ + 图像处理相关 + ================ +*/ + +// GetThumb 获取文件的缩略图 +func (fs *FileSystem) GetThumb(ctx context.Context, id uint) (*response.ContentResponse, error) { + // 根据 ID 查找文件 + err := fs.resetFileIDIfNotExist(ctx, id) + if err != nil { + return nil, ErrObjectNotExist + } + + file := fs.FileTarget[0] + if !file.ShouldLoadThumb() { + return nil, ErrObjectNotExist + } + + w, h := fs.GenerateThumbnailSize(0, 0) + ctx = context.WithValue(ctx, fsctx.ThumbSizeCtx, [2]uint{w, h}) + ctx = context.WithValue(ctx, fsctx.FileModelCtx, file) + res, err := fs.Handler.Thumb(ctx, &file) + if errors.Is(err, driver.ErrorThumbNotExist) { + // Regenerate thumb if the thumb is not initialized yet + if generateErr := fs.generateThumbnail(ctx, &file); generateErr == nil { + res, err = fs.Handler.Thumb(ctx, &file) + } else { + err = generateErr + } + } else if errors.Is(err, driver.ErrorThumbNotSupported) { + // Policy handler explicitly indicates thumb not available, check if proxy is enabled + if fs.Policy.CouldProxyThumb() { + // if thumb id marked as existed, redirect to "sidecar" thumb file. + if file.MetadataSerialized != nil && + file.MetadataSerialized[model.ThumbStatusMetadataKey] == model.ThumbStatusExist { + // redirect to sidecar file + res = &response.ContentResponse{ + Redirect: true, + } + res.URL, err = fs.Handler.Source(ctx, file.ThumbFile(), int64(model.GetIntSetting("preview_timeout", 60)), false, 0) + } else { + // if not exist, generate and upload the sidecar thumb. + if err = fs.generateThumbnail(ctx, &file); err == nil { + return fs.GetThumb(ctx, id) + } + } + } else { + // thumb not supported and proxy is disabled, mark as not available + _ = updateThumbStatus(&file, model.ThumbStatusNotAvailable) + } + } + + if err == nil && conf.SystemConfig.Mode == "master" { + res.MaxAge = model.GetIntSetting("preview_timeout", 60) + } + + return res, err +} + +// thumbPool 要使用的任务池 +var thumbPool *Pool +var once sync.Once + +// Pool 带有最大配额的任务池 +type Pool struct { + // 容量 + worker chan int +} + +// Init 初始化任务池 +func getThumbWorker() *Pool { + once.Do(func() { + maxWorker := model.GetIntSetting("thumb_max_task_count", -1) + if maxWorker <= 0 { + maxWorker = runtime.GOMAXPROCS(0) + } + thumbPool = &Pool{ + worker: make(chan int, maxWorker), + } + util.Log().Debug("Initialize thumbnails task queue with: WorkerNum = %d", maxWorker) + }) + return thumbPool +} +func (pool *Pool) addWorker() { + pool.worker <- 1 + util.Log().Debug("Worker added to thumbnails task queue.") +} +func (pool *Pool) releaseWorker() { + util.Log().Debug("Worker released from thumbnails task queue.") + <-pool.worker +} + +// generateThumbnail generates thumb for given file, upload the thumb file back with given suffix +func (fs *FileSystem) generateThumbnail(ctx context.Context, file *model.File) error { + // 新建上下文 + newCtx, cancel := context.WithCancel(context.Background()) + defer cancel() + // TODO: check file size + + if file.Size > uint64(model.GetIntSetting("thumb_max_src_size", 31457280)) { + _ = updateThumbStatus(file, model.ThumbStatusNotAvailable) + return errors.New("file too large") + } + + getThumbWorker().addWorker() + defer getThumbWorker().releaseWorker() + + // 获取文件数据 + source, err := fs.Handler.Get(newCtx, file.SourceName) + if err != nil { + return fmt.Errorf("faield to fetch original file %q: %w", file.SourceName, err) + } + defer source.Close() + + // Provide file source path for local policy files + src := "" + if conf.SystemConfig.Mode == "slave" || file.GetPolicy().Type == "local" { + src = file.SourceName + } + + thumbRes, err := thumb.Generators.Generate(ctx, source, src, file.Name, model.GetSettingByNames( + "thumb_width", + "thumb_height", + "thumb_builtin_enabled", + "thumb_vips_enabled", + "thumb_ffmpeg_enabled", + "thumb_libreoffice_enabled", + )) + if err != nil { + _ = updateThumbStatus(file, model.ThumbStatusNotAvailable) + return fmt.Errorf("failed to generate thumb for %q: %w", file.Name, err) + } + + defer os.Remove(thumbRes.Path) + + thumbFile, err := os.Open(thumbRes.Path) + if err != nil { + return fmt.Errorf("failed to open temp thumb %q: %w", thumbRes.Path, err) + } + + defer thumbFile.Close() + fileInfo, err := thumbFile.Stat() + if err != nil { + return fmt.Errorf("failed to stat temp thumb %q: %w", thumbRes.Path, err) + } + + if err = fs.Handler.Put(newCtx, &fsctx.FileStream{ + Mode: fsctx.Overwrite, + File: thumbFile, + Seeker: thumbFile, + Size: uint64(fileInfo.Size()), + SavePath: file.SourceName + model.GetSettingByNameWithDefault("thumb_file_suffix", "._thumb"), + }); err != nil { + return fmt.Errorf("failed to save thumb for %q: %w", file.Name, err) + } + + if model.IsTrueVal(model.GetSettingByName("thumb_gc_after_gen")) { + util.Log().Debug("generateThumbnail runtime.GC") + runtime.GC() + } + + // Mark this file as thumb available + err = updateThumbStatus(file, model.ThumbStatusExist) + + // 失败时删除缩略图文件 + if err != nil { + _, _ = fs.Handler.Delete(newCtx, []string{file.SourceName + model.GetSettingByNameWithDefault("thumb_file_suffix", "._thumb")}) + } + + return nil +} + +// GenerateThumbnailSize 获取要生成的缩略图的尺寸 +func (fs *FileSystem) GenerateThumbnailSize(w, h int) (uint, uint) { + return uint(model.GetIntSetting("thumb_width", 400)), uint(model.GetIntSetting("thumb_height", 300)) +} + +func updateThumbStatus(file *model.File, status string) error { + if file.Model.ID > 0 { + meta := map[string]string{ + model.ThumbStatusMetadataKey: status, + } + + if status == model.ThumbStatusExist { + meta[model.ThumbSidecarMetadataKey] = "true" + } + + return file.UpdateMetadata(meta) + } else { + if file.MetadataSerialized == nil { + file.MetadataSerialized = map[string]string{} + } + + file.MetadataSerialized[model.ThumbStatusMetadataKey] = status + } + + return nil +} diff --git a/pkg/filesystem/manage.go b/pkg/filesystem/manage.go new file mode 100644 index 0000000..c77c9d9 --- /dev/null +++ b/pkg/filesystem/manage.go @@ -0,0 +1,479 @@ +package filesystem + +import ( + "context" + "fmt" + "path" + "strings" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/hashid" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +/* ================= + 文件/目录管理 + ================= +*/ + +// Rename 重命名对象 +func (fs *FileSystem) Rename(ctx context.Context, dir, file []uint, new string) (err error) { + // 验证新名字 + if !fs.ValidateLegalName(ctx, new) || (len(file) > 0 && !fs.ValidateExtension(ctx, new)) { + return ErrIllegalObjectName + } + + // 如果源对象是文件 + if len(file) > 0 { + fileObject, err := model.GetFilesByIDs([]uint{file[0]}, fs.User.ID) + if err != nil || len(fileObject) == 0 { + return ErrPathNotExist + } + + err = fileObject[0].Rename(new) + if err != nil { + return ErrFileExisted + } + return nil + } + + if len(dir) > 0 { + folderObject, err := model.GetFoldersByIDs([]uint{dir[0]}, fs.User.ID) + if err != nil || len(folderObject) == 0 { + return ErrPathNotExist + } + + err = folderObject[0].Rename(new) + if err != nil { + return ErrFileExisted + } + return nil + } + + return ErrPathNotExist +} + +// Copy 复制src目录下的文件或目录到dst, +// 暂时只支持单文件 +func (fs *FileSystem) Copy(ctx context.Context, dirs, files []uint, src, dst string) error { + // 获取目的目录 + isDstExist, dstFolder := fs.IsPathExist(dst) + isSrcExist, srcFolder := fs.IsPathExist(src) + // 不存在时返回空的结果 + if !isDstExist || !isSrcExist { + return ErrPathNotExist + } + + // 记录复制的文件的总容量 + var newUsedStorage uint64 + + // 设置webdav目标名 + if dstName, ok := ctx.Value(fsctx.WebdavDstName).(string); ok { + dstFolder.WebdavDstName = dstName + } + + // 复制目录 + if len(dirs) > 0 { + subFileSizes, err := srcFolder.CopyFolderTo(dirs[0], dstFolder) + if err != nil { + return ErrObjectNotExist.WithError(err) + } + newUsedStorage += subFileSizes + } + + // 复制文件 + if len(files) > 0 { + subFileSizes, err := srcFolder.MoveOrCopyFileTo(files, dstFolder, true) + if err != nil { + return ErrObjectNotExist.WithError(err) + } + newUsedStorage += subFileSizes + } + + // 扣除容量 + fs.User.IncreaseStorageWithoutCheck(newUsedStorage) + + return nil +} + +// Move 移动文件和目录, 将id列表dirs和files从src移动至dst +func (fs *FileSystem) Move(ctx context.Context, dirs, files []uint, src, dst string) error { + // 获取目的目录 + isDstExist, dstFolder := fs.IsPathExist(dst) + isSrcExist, srcFolder := fs.IsPathExist(src) + // 不存在时返回空的结果 + if !isDstExist || !isSrcExist { + return ErrPathNotExist + } + + // 设置webdav目标名 + if dstName, ok := ctx.Value(fsctx.WebdavDstName).(string); ok { + dstFolder.WebdavDstName = dstName + } + + // 处理目录及子文件移动 + err := srcFolder.MoveFolderTo(dirs, dstFolder) + if err != nil { + return ErrFileExisted.WithError(err) + } + + // 处理文件移动 + _, err = srcFolder.MoveOrCopyFileTo(files, dstFolder, false) + if err != nil { + return ErrFileExisted.WithError(err) + } + + // 移动文件 + + return err +} + +// Delete 递归删除对象, force 为 true 时强制删除文件记录,忽略物理删除是否成功; +// unlink 为 true 时只删除虚拟文件系统的文件记录,不删除物理文件。 +func (fs *FileSystem) Delete(ctx context.Context, dirs, files []uint, force, unlink bool) error { + // 已删除的文件ID + var deletedFiles = make([]*model.File, 0, len(fs.FileTarget)) + // 删除失败的文件的父目录ID + + // 所有文件的ID + var allFiles = make([]*model.File, 0, len(fs.FileTarget)) + + // 列出要删除的目录 + if len(dirs) > 0 { + err := fs.ListDeleteDirs(ctx, dirs) + if err != nil { + return err + } + } + + // 列出要删除的文件 + if len(files) > 0 { + err := fs.ListDeleteFiles(ctx, files) + if err != nil { + return err + } + } + + // 去除待删除文件中包含软连接的部分 + filesToBeDelete, err := model.RemoveFilesWithSoftLinks(fs.FileTarget) + if err != nil { + return ErrDBListObjects.WithError(err) + } + + // 根据存储策略将文件分组 + policyGroup := fs.GroupFileByPolicy(ctx, filesToBeDelete) + + // 按照存储策略分组删除对象 + failed := make(map[uint][]string) + if !unlink { + failed = fs.deleteGroupedFile(ctx, policyGroup) + } + + // 整理删除结果 + for i := 0; i < len(fs.FileTarget); i++ { + if !util.ContainsString(failed[fs.FileTarget[i].PolicyID], fs.FileTarget[i].SourceName) { + // 已成功删除的文件 + deletedFiles = append(deletedFiles, &fs.FileTarget[i]) + } + + // 全部文件 + allFiles = append(allFiles, &fs.FileTarget[i]) + } + + // 如果强制删除,则将全部文件视为删除成功 + if force { + deletedFiles = allFiles + } + + // 删除文件记录 + err = model.DeleteFiles(deletedFiles, fs.User.ID) + if err != nil { + return ErrDBDeleteObjects.WithError(err) + } + + // 删除文件记录对应的分享记录 + // TODO 先取消分享再删除文件 + deletedFileIDs := make([]uint, len(deletedFiles)) + for k, file := range deletedFiles { + deletedFileIDs[k] = file.ID + } + + model.DeleteShareBySourceIDs(deletedFileIDs, false) + + // 如果文件全部删除成功,继续删除目录 + if len(deletedFiles) == len(allFiles) { + var allFolderIDs = make([]uint, 0, len(fs.DirTarget)) + for _, value := range fs.DirTarget { + allFolderIDs = append(allFolderIDs, value.ID) + } + err = model.DeleteFolderByIDs(allFolderIDs) + if err != nil { + return ErrDBDeleteObjects.WithError(err) + } + + // 删除目录记录对应的分享记录 + model.DeleteShareBySourceIDs(allFolderIDs, true) + } + + if notDeleted := len(fs.FileTarget) - len(deletedFiles); notDeleted > 0 { + return serializer.NewError( + serializer.CodeNotFullySuccess, + fmt.Sprintf("Failed to delete %d file(s).", notDeleted), + nil, + ) + } + + return nil +} + +// ListDeleteDirs 递归列出要删除目录,及目录下所有文件 +func (fs *FileSystem) ListDeleteDirs(ctx context.Context, ids []uint) error { + // 列出所有递归子目录 + folders, err := model.GetRecursiveChildFolder(ids, fs.User.ID, true) + if err != nil { + return ErrDBListObjects.WithError(err) + } + + // 忽略根目录 + for i := 0; i < len(folders); i++ { + if folders[i].ParentID == nil { + folders = append(folders[:i], folders[i+1:]...) + break + } + } + + fs.SetTargetDir(&folders) + + // 检索目录下的子文件 + files, err := model.GetChildFilesOfFolders(&folders) + if err != nil { + return ErrDBListObjects.WithError(err) + } + fs.SetTargetFile(&files) + + return nil +} + +// ListDeleteFiles 根据给定的路径列出要删除的文件 +func (fs *FileSystem) ListDeleteFiles(ctx context.Context, ids []uint) error { + files, err := model.GetFilesByIDs(ids, fs.User.ID) + if err != nil { + return ErrDBListObjects.WithError(err) + } + fs.SetTargetFile(&files) + return nil +} + +// List 列出路径下的内容, +// pathProcessor为最终对象路径的处理钩子。 +// 有些情况下(如在分享页面列对象)时, +// 路径需要截取掉被分享目录路径之前的部分。 +func (fs *FileSystem) List(ctx context.Context, dirPath string, pathProcessor func(string) string) ([]serializer.Object, error) { + // 获取父目录 + isExist, folder := fs.IsPathExist(dirPath) + if !isExist { + return nil, ErrPathNotExist + } + fs.SetTargetDir(&[]model.Folder{*folder}) + + var parentPath = path.Join(folder.Position, folder.Name) + var childFolders []model.Folder + var childFiles []model.File + + // 获取子目录 + childFolders, _ = folder.GetChildFolder() + + // 获取子文件 + childFiles, _ = folder.GetChildFiles() + + return fs.listObjects(ctx, parentPath, childFiles, childFolders, pathProcessor), nil +} + +// ListPhysical 列出存储策略中的外部目录 +// TODO:测试 +func (fs *FileSystem) ListPhysical(ctx context.Context, dirPath string) ([]serializer.Object, error) { + if err := fs.DispatchHandler(); fs.Policy == nil || err != nil { + return nil, ErrUnknownPolicyType + } + + // 存储策略不支持列取时,返回空结果 + if !fs.Policy.CanStructureBeListed() { + return nil, nil + } + + // 列取路径 + objects, err := fs.Handler.List(ctx, dirPath, false) + if err != nil { + return nil, err + } + + var ( + folders []model.Folder + ) + for _, object := range objects { + if object.IsDir { + folders = append(folders, model.Folder{ + Name: object.Name, + }) + } + } + + return fs.listObjects(ctx, dirPath, nil, folders, nil), nil +} + +func (fs *FileSystem) listObjects(ctx context.Context, parent string, files []model.File, folders []model.Folder, pathProcessor func(string) string) []serializer.Object { + // 分享文件的ID + shareKey := "" + if key, ok := ctx.Value(fsctx.ShareKeyCtx).(string); ok { + shareKey = key + } + + // 汇总处理结果 + objects := make([]serializer.Object, 0, len(files)+len(folders)) + + // 所有对象的父目录 + var processedPath string + + for _, subFolder := range folders { + // 路径处理钩子, + // 所有对象父目录都是一样的,所以只处理一次 + if processedPath == "" { + if pathProcessor != nil { + processedPath = pathProcessor(parent) + } else { + processedPath = parent + } + } + + objects = append(objects, serializer.Object{ + ID: hashid.HashID(subFolder.ID, hashid.FolderID), + Name: subFolder.Name, + Path: processedPath, + Size: 0, + Type: "dir", + Date: subFolder.UpdatedAt, + CreateDate: subFolder.CreatedAt, + }) + } + + for _, file := range files { + if processedPath == "" { + if pathProcessor != nil { + processedPath = pathProcessor(parent) + } else { + processedPath = parent + } + } + + if file.UploadSessionID == nil { + newFile := serializer.Object{ + ID: hashid.HashID(file.ID, hashid.FileID), + Name: file.Name, + Path: processedPath, + Thumb: file.ShouldLoadThumb(), + Size: file.Size, + Type: "file", + Date: file.UpdatedAt, + SourceEnabled: file.GetPolicy().IsOriginLinkEnable, + CreateDate: file.CreatedAt, + } + if shareKey != "" { + newFile.Key = shareKey + } + objects = append(objects, newFile) + } + } + + return objects +} + +// CreateDirectory 根据给定的完整创建目录,支持递归创建。如果目录已存在,则直接 +// 返回已存在的目录。 +func (fs *FileSystem) CreateDirectory(ctx context.Context, fullPath string) (*model.Folder, error) { + if fullPath == "." || fullPath == "" { + return nil, ErrRootProtected + } + + if fullPath == "/" { + if fs.Root != nil { + return fs.Root, nil + } + return fs.User.Root() + } + + // 获取要创建目录的父路径和目录名 + fullPath = path.Clean(fullPath) + base := path.Dir(fullPath) + dir := path.Base(fullPath) + + // 去掉结尾空格 + dir = strings.TrimRight(dir, " ") + + // 检查目录名是否合法 + if !fs.ValidateLegalName(ctx, dir) { + return nil, ErrIllegalObjectName + } + + // 父目录是否存在 + isExist, parent := fs.IsPathExist(base) + if !isExist { + newParent, err := fs.CreateDirectory(ctx, base) + if err != nil { + return nil, err + } + parent = newParent + } + + // 是否有同名文件 + if ok, _ := fs.IsChildFileExist(parent, dir); ok { + return nil, ErrFileExisted + } + + // 创建目录 + newFolder := model.Folder{ + Name: dir, + ParentID: &parent.ID, + OwnerID: fs.User.ID, + } + _, err := newFolder.Create() + + if err != nil { + return nil, fmt.Errorf("failed to create folder: %w", err) + } + + return &newFolder, nil +} + +// SaveTo 将别人分享的文件转存到目标路径下 +func (fs *FileSystem) SaveTo(ctx context.Context, path string) error { + // 获取父目录 + isExist, folder := fs.IsPathExist(path) + if !isExist { + return ErrPathNotExist + } + + var ( + totalSize uint64 + err error + ) + + if len(fs.DirTarget) > 0 { + totalSize, err = fs.DirTarget[0].CopyFolderTo(fs.DirTarget[0].ID, folder) + } else { + parent := model.Folder{ + OwnerID: fs.FileTarget[0].UserID, + } + parent.ID = fs.FileTarget[0].FolderID + totalSize, err = parent.MoveOrCopyFileTo([]uint{fs.FileTarget[0].ID}, folder, true) + } + + // 扣除用户容量 + fs.User.IncreaseStorageWithoutCheck(totalSize) + if err != nil { + return ErrFileExisted.WithError(err) + } + + return nil +} diff --git a/pkg/filesystem/oauth/mutex.go b/pkg/filesystem/oauth/mutex.go new file mode 100644 index 0000000..41f588d --- /dev/null +++ b/pkg/filesystem/oauth/mutex.go @@ -0,0 +1,25 @@ +package oauth + +import "sync" + +// CredentialLock 针对存储策略凭证的锁 +type CredentialLock interface { + Lock(uint) + Unlock(uint) +} + +var GlobalMutex = mutexMap{} + +type mutexMap struct { + locks sync.Map +} + +func (m *mutexMap) Lock(id uint) { + lock, _ := m.locks.LoadOrStore(id, &sync.Mutex{}) + lock.(*sync.Mutex).Lock() +} + +func (m *mutexMap) Unlock(id uint) { + lock, _ := m.locks.LoadOrStore(id, &sync.Mutex{}) + lock.(*sync.Mutex).Unlock() +} diff --git a/pkg/filesystem/oauth/token.go b/pkg/filesystem/oauth/token.go new file mode 100644 index 0000000..cdc5cf0 --- /dev/null +++ b/pkg/filesystem/oauth/token.go @@ -0,0 +1,8 @@ +package oauth + +import "context" + +type TokenProvider interface { + UpdateCredential(ctx context.Context, isSlave bool) error + AccessToken() string +} diff --git a/pkg/filesystem/path.go b/pkg/filesystem/path.go new file mode 100644 index 0000000..056c0c8 --- /dev/null +++ b/pkg/filesystem/path.go @@ -0,0 +1,84 @@ +package filesystem + +import ( + "path" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +/* ================= + 路径/目录相关 + ================= +*/ + +// IsPathExist 返回给定目录是否存在 +// 如果存在就返回目录 +func (fs *FileSystem) IsPathExist(path string) (bool, *model.Folder) { + tracedEnd, currentFolder := fs.getClosedParent(path) + if tracedEnd { + return true, currentFolder + } + return false, nil +} + +func (fs *FileSystem) getClosedParent(path string) (bool, *model.Folder) { + pathList := util.SplitPath(path) + if len(pathList) == 0 { + return false, nil + } + + // 递归步入目录 + var currentFolder *model.Folder + + // 如果已设定跟目录对象,则从给定目录向下遍历 + if fs.Root != nil { + currentFolder = fs.Root + } + + for _, folderName := range pathList { + var err error + + // 根目录 + if folderName == "/" { + if currentFolder != nil { + continue + } + currentFolder, err = fs.User.Root() + if err != nil { + return false, nil + } + } else { + nextFolder, err := currentFolder.GetChild(folderName) + if err != nil { + return false, currentFolder + } + + currentFolder = nextFolder + } + } + + return true, currentFolder +} + +// IsFileExist 返回给定路径的文件是否存在 +func (fs *FileSystem) IsFileExist(fullPath string) (bool, *model.File) { + basePath := path.Dir(fullPath) + fileName := path.Base(fullPath) + + // 获得父目录 + exist, parent := fs.IsPathExist(basePath) + if !exist { + return false, nil + } + + file, err := parent.GetChildFile(fileName) + + return err == nil, file +} + +// IsChildFileExist 确定folder目录下是否有名为name的文件 +func (fs *FileSystem) IsChildFileExist(folder *model.Folder, name string) (bool, *model.File) { + file, err := folder.GetChildFile(name) + return err == nil, file +} diff --git a/pkg/filesystem/relocate.go b/pkg/filesystem/relocate.go new file mode 100644 index 0000000..673c61b --- /dev/null +++ b/pkg/filesystem/relocate.go @@ -0,0 +1,102 @@ +package filesystem + +import ( + "context" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +/* ================ + 存储策略迁移 + ================ +*/ + +// Relocate 将目标文件转移到当前存储策略下 +func (fs *FileSystem) Relocate(ctx context.Context, files []model.File, policy *model.Policy) error { + // 重设存储策略为要转移的目的策略 + fs.Policy = policy + if err := fs.DispatchHandler(); err != nil { + return err + } + + // 将目前文件根据存储策略分组 + fileGroup := fs.GroupFileByPolicy(ctx, files) + + // 按照存储策略分组处理每个文件 + for _, fileList := range fileGroup { + // 如果存储策略一样,则跳过 + if fileList[0].GetPolicy().ID == fs.Policy.ID { + util.Log().Debug("Skip relocating %d file(s), since they are already in desired policy.", + len(fileList)) + continue + } + + // 获取当前存储策略的处理器 + currentPolicy, _ := model.GetPolicyByID(fileList[0].PolicyID) + currentHandler, err := getNewPolicyHandler(¤tPolicy) + if err != nil { + return err + } + + // 记录转移完毕需要删除的文件 + toBeDeleted := make([]model.File, 0, len(fileList)) + + // 循环处理每一个文件 + // for id, r := 0, len(fileList); id < r; id++ { + for id, _ := range fileList { + // 验证文件是否符合新存储策略的规定 + if err := HookValidateFile(ctx, fs, fileList[id]); err != nil { + util.Log().Debug("File %q failed to pass validators in new policy %q, skipping.", + fileList[id].Name, err) + continue + } + + // 为文件生成新存储策略下的物理路径 + savePath := fs.GenerateSavePath(ctx, fileList[id]) + + // 获取原始文件 + src, err := currentHandler.Get(ctx, fileList[id].SourceName) + if err != nil { + util.Log().Debug("Failed to get file %q: %s, skipping.", + fileList[id].Name, err) + continue + } + + // 转存到新的存储策略 + if err := fs.Handler.Put(ctx, &fsctx.FileStream{ + File: src, + SavePath: savePath, + Size: fileList[id].Size, + }); err != nil { + util.Log().Debug("Failed to migrate file %q: %s, skipping.", + fileList[id].Name, err) + continue + } + + toBeDeleted = append(toBeDeleted, *fileList[id]) + + // 更新文件信息 + fileList[id].Relocate(savePath, fs.Policy.ID) + } + + // 排除带有软链接的文件 + toBeDeletedClean, err := model.RemoveFilesWithSoftLinks(toBeDeleted) + if err != nil { + util.Log().Warning("Failed to check soft links: %s", err) + } + + deleteSourceNames := make([]string, 0, len(toBeDeleted)) + for i := 0; i < len(toBeDeletedClean); i++ { + deleteSourceNames = append(deleteSourceNames, toBeDeletedClean[i].SourceName) + } + + // 删除原始策略中的文件 + if _, err := currentHandler.Delete(ctx, deleteSourceNames); err != nil { + util.Log().Warning("Cannot delete files in origin policy after relocating: %s", err) + } + } + + return nil +} diff --git a/pkg/filesystem/response/common.go b/pkg/filesystem/response/common.go new file mode 100644 index 0000000..a6c9a1d --- /dev/null +++ b/pkg/filesystem/response/common.go @@ -0,0 +1,32 @@ +package response + +import ( + "io" + "time" +) + +// ContentResponse 获取文件内容类方法的通用返回值。 +// 有些上传策略需要重定向, +// 有些直接写文件数据到浏览器 +type ContentResponse struct { + Redirect bool + Content RSCloser + URL string + MaxAge int +} + +// RSCloser 存储策略适配器返回的文件流,有些策略需要带有Closer +type RSCloser interface { + io.ReadSeeker + io.Closer +} + +// Object 列出文件、目录时返回的对象 +type Object struct { + Name string `json:"name"` + RelativePath string `json:"relative_path"` + Source string `json:"source"` + Size uint64 `json:"size"` + IsDir bool `json:"is_dir"` + LastModify time.Time `json:"last_modify"` +} diff --git a/pkg/filesystem/tests/file1.txt b/pkg/filesystem/tests/file1.txt new file mode 100644 index 0000000..e69de29 diff --git a/pkg/filesystem/tests/file2.txt b/pkg/filesystem/tests/file2.txt new file mode 100644 index 0000000..e69de29 diff --git a/pkg/filesystem/tests/test.zip b/pkg/filesystem/tests/test.zip new file mode 100644 index 0000000000000000000000000000000000000000..316212ee2456cffe75105f5919eefe2b021f70a0 GIT binary patch literal 154 zcmWIWW@Zs#0D;I*_W+){`Wq}jHVAV7aY<@%iC#%X35X8xW@Hj!z@=6Ns2T(mz$8K^ s7gU6iL4u**`mjgB-K&f+`t)D9U3~kmG6s0FvVpWQ0$~)8)&iRW0I_)<3jhEB literal 0 HcmV?d00001 diff --git a/pkg/filesystem/upload.go b/pkg/filesystem/upload.go new file mode 100644 index 0000000..bb8844d --- /dev/null +++ b/pkg/filesystem/upload.go @@ -0,0 +1,243 @@ +package filesystem + +import ( + "context" + "os" + "path" + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/gin-gonic/gin" + "github.com/gofrs/uuid" +) + +/* ================ + 上传处理相关 + ================ +*/ + +const ( + UploadSessionMetaKey = "upload_session" + UploadSessionCtx = "uploadSession" + UserCtx = "user" + UploadSessionCachePrefix = "callback_" +) + +// Upload 上传文件 +func (fs *FileSystem) Upload(ctx context.Context, file *fsctx.FileStream) (err error) { + // 上传前的钩子 + err = fs.Trigger(ctx, "BeforeUpload", file) + if err != nil { + request.BlackHole(file) + return err + } + + // 生成文件名和路径, + var savePath string + if file.SavePath == "" { + // 如果是更新操作就从上下文中获取 + if originFile, ok := ctx.Value(fsctx.FileModelCtx).(model.File); ok { + savePath = originFile.SourceName + } else { + savePath = fs.GenerateSavePath(ctx, file) + } + file.SavePath = savePath + } + + // 保存文件 + if file.Mode&fsctx.Nop != fsctx.Nop { + // 处理客户端未完成上传时,关闭连接 + go fs.CancelUpload(ctx, savePath, file) + + err = fs.Handler.Put(ctx, file) + if err != nil { + fs.Trigger(ctx, "AfterUploadFailed", file) + return err + } + } + + // 上传完成后的钩子 + err = fs.Trigger(ctx, "AfterUpload", file) + + if err != nil { + // 上传完成后续处理失败 + followUpErr := fs.Trigger(ctx, "AfterValidateFailed", file) + // 失败后再失败... + if followUpErr != nil { + util.Log().Debug("AfterValidateFailed hook execution failed: %s", followUpErr) + } + + return err + } + + return nil +} + +// GenerateSavePath 生成要存放文件的路径 +// TODO 完善测试 +func (fs *FileSystem) GenerateSavePath(ctx context.Context, file fsctx.FileHeader) string { + fileInfo := file.Info() + return path.Join( + fs.Policy.GeneratePath( + fs.User.Model.ID, + fileInfo.VirtualPath, + ), + fs.Policy.GenerateFileName( + fs.User.Model.ID, + fileInfo.FileName, + ), + ) + +} + +// CancelUpload 监测客户端取消上传 +func (fs *FileSystem) CancelUpload(ctx context.Context, path string, file fsctx.FileHeader) { + var reqContext context.Context + if ginCtx, ok := ctx.Value(fsctx.GinCtx).(*gin.Context); ok { + reqContext = ginCtx.Request.Context() + } else if reqCtx, ok := ctx.Value(fsctx.HTTPCtx).(context.Context); ok { + reqContext = reqCtx + } else { + return + } + + select { + case <-reqContext.Done(): + select { + case <-ctx.Done(): + // 客户端正常关闭,不执行操作 + default: + // 客户端取消上传,删除临时文件 + util.Log().Debug("Client canceled upload.") + if fs.Hooks["AfterUploadCanceled"] == nil { + return + } + err := fs.Trigger(ctx, "AfterUploadCanceled", file) + if err != nil { + util.Log().Debug("AfterUploadCanceled hook execution failed: %s", err) + } + } + + } +} + +// CreateUploadSession 创建上传会话 +func (fs *FileSystem) CreateUploadSession(ctx context.Context, file *fsctx.FileStream) (*serializer.UploadCredential, error) { + // 获取相关有效期设置 + callBackSessionTTL := model.GetIntSetting("upload_session_timeout", 86400) + + callbackKey := uuid.Must(uuid.NewV4()).String() + fileSize := file.Size + + // 创建占位的文件,同时校验文件信息 + file.Mode = fsctx.Nop + if callbackKey != "" { + file.UploadSessionID = &callbackKey + } + + fs.Use("BeforeUpload", HookValidateFile) + fs.Use("BeforeUpload", HookValidateCapacity) + + // 验证文件规格 + if err := fs.Upload(ctx, file); err != nil { + return nil, err + } + + uploadSession := &serializer.UploadSession{ + Key: callbackKey, + UID: fs.User.ID, + Policy: *fs.Policy, + VirtualPath: file.VirtualPath, + Name: file.Name, + Size: fileSize, + SavePath: file.SavePath, + LastModified: file.LastModified, + CallbackSecret: util.RandStringRunes(32), + } + + // 获取上传凭证 + credential, err := fs.Handler.Token(ctx, int64(callBackSessionTTL), uploadSession, file) + if err != nil { + return nil, err + } + + // 创建占位符 + if !fs.Policy.IsUploadPlaceholderWithSize() { + fs.Use("AfterUpload", HookClearFileHeaderSize) + } + fs.Use("AfterUpload", GenericAfterUpload) + ctx = context.WithValue(ctx, fsctx.IgnoreDirectoryConflictCtx, true) + if err := fs.Upload(ctx, file); err != nil { + return nil, err + } + + // 创建回调会话 + err = cache.Set( + UploadSessionCachePrefix+callbackKey, + *uploadSession, + callBackSessionTTL, + ) + if err != nil { + return nil, err + } + + // 补全上传凭证其他信息 + credential.Expires = time.Now().Add(time.Duration(callBackSessionTTL) * time.Second).Unix() + + return credential, nil +} + +// UploadFromStream 从文件流上传文件 +func (fs *FileSystem) UploadFromStream(ctx context.Context, file *fsctx.FileStream, resetPolicy bool) error { + // 给文件系统分配钩子 + fs.Lock.Lock() + if resetPolicy { + err := fs.SetPolicyFromPath(file.VirtualPath) + if err != nil { + return err + } + } + + if fs.Hooks == nil { + fs.Use("BeforeUpload", HookValidateFile) + fs.Use("BeforeUpload", HookValidateCapacity) + fs.Use("AfterUploadCanceled", HookDeleteTempFile) + fs.Use("AfterUpload", GenericAfterUpload) + fs.Use("AfterValidateFailed", HookDeleteTempFile) + } + fs.Lock.Unlock() + + // 开始上传 + return fs.Upload(ctx, file) +} + +// UploadFromPath 将本机已有文件上传到用户的文件系统 +func (fs *FileSystem) UploadFromPath(ctx context.Context, src, dst string, mode fsctx.WriteMode) error { + file, err := os.Open(util.RelativePath(src)) + if err != nil { + return err + } + defer file.Close() + + // 获取源文件大小 + fi, err := file.Stat() + if err != nil { + return err + } + size := fi.Size() + + // 开始上传 + return fs.UploadFromStream(ctx, &fsctx.FileStream{ + File: file, + Seeker: file, + Size: uint64(size), + Name: path.Base(dst), + VirtualPath: path.Dir(dst), + Mode: mode, + }, true) +} diff --git a/pkg/filesystem/validator.go b/pkg/filesystem/validator.go new file mode 100644 index 0000000..1992547 --- /dev/null +++ b/pkg/filesystem/validator.go @@ -0,0 +1,66 @@ +package filesystem + +import ( + "context" + "strings" + + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +/* ========== + 验证器 + ========== +*/ + +// 文件/路径名保留字符 +var reservedCharacter = []string{"\\", "?", "*", "<", "\"", ":", ">", "/", "|"} + +// ValidateLegalName 验证文件名/文件夹名是否合法 +func (fs *FileSystem) ValidateLegalName(ctx context.Context, name string) bool { + // 是否包含保留字符 + for _, value := range reservedCharacter { + if strings.Contains(name, value) { + return false + } + } + + // 是否超出长度限制 + if len(name) >= 256 { + return false + } + + // 是否为空限制 + if len(name) == 0 { + return false + } + + // 结尾不能是空格 + if strings.HasSuffix(name, " ") { + return false + } + + return true +} + +// ValidateFileSize 验证上传的文件大小是否超出限制 +func (fs *FileSystem) ValidateFileSize(ctx context.Context, size uint64) bool { + if fs.Policy.MaxSize == 0 { + return true + } + return size <= fs.Policy.MaxSize +} + +// ValidateCapacity 验证并扣除用户容量 +func (fs *FileSystem) ValidateCapacity(ctx context.Context, size uint64) bool { + return fs.User.IncreaseStorage(size) +} + +// ValidateExtension 验证文件扩展名 +func (fs *FileSystem) ValidateExtension(ctx context.Context, fileName string) bool { + // 不需要验证 + if len(fs.Policy.OptionsSerialized.FileType) == 0 { + return true + } + + return util.IsInExtensionList(fs.Policy.OptionsSerialized.FileType, fileName) +} diff --git a/pkg/hashid/hash.go b/pkg/hashid/hash.go new file mode 100644 index 0000000..ffe5944 --- /dev/null +++ b/pkg/hashid/hash.go @@ -0,0 +1,70 @@ +package hashid + +import ( + "errors" + + "github.com/cloudreve/Cloudreve/v3/pkg/conf" + "github.com/speps/go-hashids" +) + +// ID类型 +const ( + ShareID = iota // 分享 + UserID // 用户 + FileID // 文件ID + FolderID // 目录ID + TagID // 标签ID + PolicyID // 存储策略ID + SourceLinkID +) + +var ( + // ErrTypeNotMatch ID类型不匹配 + ErrTypeNotMatch = errors.New("mismatched ID type.") +) + +// HashEncode 对给定数据计算HashID +func HashEncode(v []int) (string, error) { + hd := hashids.NewData() + hd.Salt = conf.SystemConfig.HashIDSalt + + h, err := hashids.NewWithData(hd) + if err != nil { + return "", err + } + + id, err := h.Encode(v) + if err != nil { + return "", err + } + return id, nil +} + +// HashDecode 对给定数据计算原始数据 +func HashDecode(raw string) ([]int, error) { + hd := hashids.NewData() + hd.Salt = conf.SystemConfig.HashIDSalt + + h, err := hashids.NewWithData(hd) + if err != nil { + return []int{}, err + } + + return h.DecodeWithError(raw) + +} + +// HashID 计算数据库内主键对应的HashID +func HashID(id uint, t int) string { + v, _ := HashEncode([]int{int(id), t}) + return v +} + +// DecodeHashID 计算HashID对应的数据库ID +func DecodeHashID(id string, t int) (uint, error) { + v, _ := HashDecode(id) + if len(v) != 2 || v[1] != t { + return 0, ErrTypeNotMatch + } + return uint(v[0]), nil +} diff --git a/pkg/mocks/cachemock/mock.go b/pkg/mocks/cachemock/mock.go new file mode 100644 index 0000000..921b1cd --- /dev/null +++ b/pkg/mocks/cachemock/mock.go @@ -0,0 +1,37 @@ +package cachemock + +import "github.com/stretchr/testify/mock" + +type CacheClientMock struct { + mock.Mock +} + +func (c CacheClientMock) Set(key string, value interface{}, ttl int) error { + return c.Called(key, value, ttl).Error(0) +} + +func (c CacheClientMock) Get(key string) (interface{}, bool) { + args := c.Called(key) + return args.Get(0), args.Bool(1) +} + +func (c CacheClientMock) Gets(keys []string, prefix string) (map[string]interface{}, []string) { + args := c.Called(keys, prefix) + return args.Get(0).(map[string]interface{}), args.Get(1).([]string) +} + +func (c CacheClientMock) Sets(values map[string]interface{}, prefix string) error { + return c.Called(values).Error(0) +} + +func (c CacheClientMock) Delete(keys []string, prefix string) error { + return c.Called(keys, prefix).Error(0) +} + +func (c CacheClientMock) Persist(path string) error { + return c.Called(path).Error(0) +} + +func (c CacheClientMock) Restore(path string) error { + return c.Called(path).Error(0) +} diff --git a/pkg/mocks/controllermock/c.go b/pkg/mocks/controllermock/c.go new file mode 100644 index 0000000..6a77793 --- /dev/null +++ b/pkg/mocks/controllermock/c.go @@ -0,0 +1,43 @@ +package controllermock + +import ( + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/common" + "github.com/cloudreve/Cloudreve/v3/pkg/cluster" + "github.com/cloudreve/Cloudreve/v3/pkg/mq" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/stretchr/testify/mock" +) + +type SlaveControllerMock struct { + mock.Mock +} + +func (s SlaveControllerMock) HandleHeartBeat(pingReq *serializer.NodePingReq) (serializer.NodePingResp, error) { + args := s.Called(pingReq) + return args.Get(0).(serializer.NodePingResp), args.Error(1) +} + +func (s SlaveControllerMock) GetAria2Instance(s2 string) (common.Aria2, error) { + args := s.Called(s2) + return args.Get(0).(common.Aria2), args.Error(1) +} + +func (s SlaveControllerMock) SendNotification(s3 string, s2 string, message mq.Message) error { + args := s.Called(s3, s2, message) + return args.Error(0) +} + +func (s SlaveControllerMock) SubmitTask(s3 string, i interface{}, s2 string, f func(interface{})) error { + args := s.Called(s3, i, s2, f) + return args.Error(0) +} + +func (s SlaveControllerMock) GetMasterInfo(s2 string) (*cluster.MasterInfo, error) { + args := s.Called(s2) + return args.Get(0).(*cluster.MasterInfo), args.Error(1) +} + +func (s SlaveControllerMock) GetPolicyOauthToken(s2 string, u uint) (string, error) { + args := s.Called(s2, u) + return args.String(0), args.Error(1) +} diff --git a/pkg/mocks/mocks.go b/pkg/mocks/mocks.go new file mode 100644 index 0000000..01c450b --- /dev/null +++ b/pkg/mocks/mocks.go @@ -0,0 +1,151 @@ +package mocks + +import ( + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/common" + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/rpc" + "github.com/cloudreve/Cloudreve/v3/pkg/auth" + "github.com/cloudreve/Cloudreve/v3/pkg/balancer" + "github.com/cloudreve/Cloudreve/v3/pkg/cluster" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/task" + testMock "github.com/stretchr/testify/mock" +) + +type NodePoolMock struct { + testMock.Mock +} + +func (n NodePoolMock) BalanceNodeByFeature(feature string, lb balancer.Balancer) (error, cluster.Node) { + args := n.Called(feature, lb) + return args.Error(0), args.Get(1).(cluster.Node) +} + +func (n NodePoolMock) GetNodeByID(id uint) cluster.Node { + args := n.Called(id) + if res, ok := args.Get(0).(cluster.Node); ok { + return res + } + + return nil +} + +func (n NodePoolMock) Add(node *model.Node) { + n.Called(node) +} + +func (n NodePoolMock) Delete(id uint) { + n.Called(id) +} + +type NodeMock struct { + testMock.Mock +} + +func (n NodeMock) Init(node *model.Node) { + n.Called(node) +} + +func (n NodeMock) IsFeatureEnabled(feature string) bool { + args := n.Called(feature) + return args.Bool(0) +} + +func (n NodeMock) SubscribeStatusChange(callback func(isActive bool, id uint)) { + n.Called(callback) +} + +func (n NodeMock) Ping(req *serializer.NodePingReq) (*serializer.NodePingResp, error) { + args := n.Called(req) + return args.Get(0).(*serializer.NodePingResp), args.Error(1) +} + +func (n NodeMock) IsActive() bool { + args := n.Called() + return args.Bool(0) +} + +func (n NodeMock) GetAria2Instance() common.Aria2 { + args := n.Called() + return args.Get(0).(common.Aria2) +} + +func (n NodeMock) ID() uint { + args := n.Called() + return args.Get(0).(uint) +} + +func (n NodeMock) Kill() { + n.Called() +} + +func (n NodeMock) IsMater() bool { + args := n.Called() + return args.Bool(0) +} + +func (n NodeMock) MasterAuthInstance() auth.Auth { + args := n.Called() + return args.Get(0).(auth.Auth) +} + +func (n NodeMock) SlaveAuthInstance() auth.Auth { + args := n.Called() + return args.Get(0).(auth.Auth) +} + +func (n NodeMock) DBModel() *model.Node { + args := n.Called() + return args.Get(0).(*model.Node) +} + +type Aria2Mock struct { + testMock.Mock +} + +func (a Aria2Mock) Init() error { + args := a.Called() + return args.Error(0) +} + +func (a Aria2Mock) CreateTask(task *model.Download, options map[string]interface{}) (string, error) { + args := a.Called(task, options) + return args.String(0), args.Error(1) +} + +func (a Aria2Mock) Status(task *model.Download) (rpc.StatusInfo, error) { + args := a.Called(task) + return args.Get(0).(rpc.StatusInfo), args.Error(1) +} + +func (a Aria2Mock) Cancel(task *model.Download) error { + args := a.Called(task) + return args.Error(0) +} + +func (a Aria2Mock) Select(task *model.Download, files []int) error { + args := a.Called(task, files) + return args.Error(0) +} + +func (a Aria2Mock) GetConfig() model.Aria2Option { + args := a.Called() + return args.Get(0).(model.Aria2Option) +} + +func (a Aria2Mock) DeleteTempFile(download *model.Download) error { + args := a.Called(download) + return args.Error(0) +} + +type TaskPoolMock struct { + testMock.Mock +} + +func (t TaskPoolMock) Add(num int) { + t.Called(num) +} + +func (t TaskPoolMock) Submit(job task.Job) { + t.Called(job) +} diff --git a/pkg/mocks/remoteclientmock/mock.go b/pkg/mocks/remoteclientmock/mock.go new file mode 100644 index 0000000..f036541 --- /dev/null +++ b/pkg/mocks/remoteclientmock/mock.go @@ -0,0 +1,33 @@ +package remoteclientmock + +import ( + "context" + + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/stretchr/testify/mock" +) + +type RemoteClientMock struct { + mock.Mock +} + +func (r *RemoteClientMock) CreateUploadSession(ctx context.Context, session *serializer.UploadSession, ttl int64, overwrite bool) error { + return r.Called(ctx, session, ttl, overwrite).Error(0) +} + +func (r *RemoteClientMock) GetUploadURL(ttl int64, sessionID string) (string, string, error) { + args := r.Called(ttl, sessionID) + + return args.String(0), args.String(1), args.Error(2) +} + +func (r *RemoteClientMock) Upload(ctx context.Context, file fsctx.FileHeader) error { + args := r.Called(ctx, file) + return args.Error(0) +} + +func (r *RemoteClientMock) DeleteUploadSession(ctx context.Context, sessionID string) error { + args := r.Called(ctx, sessionID) + return args.Error(0) +} diff --git a/pkg/mocks/requestmock/request.go b/pkg/mocks/requestmock/request.go new file mode 100644 index 0000000..7e6ca1b --- /dev/null +++ b/pkg/mocks/requestmock/request.go @@ -0,0 +1,15 @@ +package requestmock + +import ( + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/stretchr/testify/mock" + "io" +) + +type RequestMock struct { + mock.Mock +} + +func (r RequestMock) Request(method, target string, body io.Reader, opts ...request.Option) *request.Response { + return r.Called(method, target, body, opts).Get(0).(*request.Response) +} diff --git a/pkg/mocks/thumbmock/thumb.go b/pkg/mocks/thumbmock/thumb.go new file mode 100644 index 0000000..553ba50 --- /dev/null +++ b/pkg/mocks/thumbmock/thumb.go @@ -0,0 +1,25 @@ +package thumbmock + +import ( + "context" + "github.com/cloudreve/Cloudreve/v3/pkg/thumb" + "github.com/stretchr/testify/mock" + "io" +) + +type GeneratorMock struct { + mock.Mock +} + +func (g GeneratorMock) Generate(ctx context.Context, file io.Reader, src string, name string, options map[string]string) (*thumb.Result, error) { + res := g.Called(ctx, file, src, name, options) + return res.Get(0).(*thumb.Result), res.Error(1) +} + +func (g GeneratorMock) Priority() int { + return 0 +} + +func (g GeneratorMock) EnableFlag() string { + return "thumb_vips_enabled" +} diff --git a/pkg/mocks/wopimock/mock.go b/pkg/mocks/wopimock/mock.go new file mode 100644 index 0000000..0573c04 --- /dev/null +++ b/pkg/mocks/wopimock/mock.go @@ -0,0 +1,21 @@ +package wopimock + +import ( + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/wopi" + "github.com/stretchr/testify/mock" +) + +type WopiClientMock struct { + mock.Mock +} + +func (w *WopiClientMock) NewSession(user uint, file *model.File, action wopi.ActonType) (*wopi.Session, error) { + args := w.Called(user, file, action) + return args.Get(0).(*wopi.Session), args.Error(1) +} + +func (w *WopiClientMock) AvailableExts() []string { + args := w.Called() + return args.Get(0).([]string) +} diff --git a/pkg/mq/mq.go b/pkg/mq/mq.go new file mode 100644 index 0000000..e7a8a34 --- /dev/null +++ b/pkg/mq/mq.go @@ -0,0 +1,160 @@ +package mq + +import ( + "encoding/gob" + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/common" + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/rpc" + "strconv" + "sync" + "time" +) + +// Message 消息事件正文 +type Message struct { + // 消息触发者 + TriggeredBy string + + // 事件标识 + Event string + + // 消息正文 + Content interface{} +} + +type CallbackFunc func(Message) + +// MQ 消息队列 +type MQ interface { + rpc.Notifier + + // 发布一个消息 + Publish(string, Message) + + // 订阅一个消息主题 + Subscribe(string, int) <-chan Message + + // 订阅一个消息主题,注册触发回调函数 + SubscribeCallback(string, CallbackFunc) + + // 取消订阅一个消息主题 + Unsubscribe(string, <-chan Message) +} + +var GlobalMQ = NewMQ() + +func NewMQ() MQ { + return &inMemoryMQ{ + topics: make(map[string][]chan Message), + callbacks: make(map[string][]CallbackFunc), + } +} + +func init() { + gob.Register(Message{}) + gob.Register([]rpc.Event{}) +} + +type inMemoryMQ struct { + topics map[string][]chan Message + callbacks map[string][]CallbackFunc + sync.RWMutex +} + +func (i *inMemoryMQ) Publish(topic string, message Message) { + i.RLock() + subscribersChan, okChan := i.topics[topic] + subscribersCallback, okCallback := i.callbacks[topic] + i.RUnlock() + + if okChan { + go func(subscribersChan []chan Message) { + for i := 0; i < len(subscribersChan); i++ { + select { + case subscribersChan[i] <- message: + case <-time.After(time.Millisecond * 500): + } + } + }(subscribersChan) + + } + + if okCallback { + for i := 0; i < len(subscribersCallback); i++ { + go subscribersCallback[i](message) + } + } +} + +func (i *inMemoryMQ) Subscribe(topic string, buffer int) <-chan Message { + ch := make(chan Message, buffer) + i.Lock() + i.topics[topic] = append(i.topics[topic], ch) + i.Unlock() + return ch +} + +func (i *inMemoryMQ) SubscribeCallback(topic string, callbackFunc CallbackFunc) { + i.Lock() + i.callbacks[topic] = append(i.callbacks[topic], callbackFunc) + i.Unlock() +} + +func (i *inMemoryMQ) Unsubscribe(topic string, sub <-chan Message) { + i.Lock() + defer i.Unlock() + + subscribers, ok := i.topics[topic] + if !ok { + return + } + + var newSubs []chan Message + for _, subscriber := range subscribers { + if subscriber == sub { + continue + } + newSubs = append(newSubs, subscriber) + } + + i.topics[topic] = newSubs +} + +func (i *inMemoryMQ) Aria2Notify(events []rpc.Event, status int) { + for _, event := range events { + i.Publish(event.Gid, Message{ + TriggeredBy: event.Gid, + Event: strconv.FormatInt(int64(status), 10), + Content: events, + }) + } +} + +// OnDownloadStart 下载开始 +func (i *inMemoryMQ) OnDownloadStart(events []rpc.Event) { + i.Aria2Notify(events, common.Downloading) +} + +// OnDownloadPause 下载暂停 +func (i *inMemoryMQ) OnDownloadPause(events []rpc.Event) { + i.Aria2Notify(events, common.Paused) +} + +// OnDownloadStop 下载停止 +func (i *inMemoryMQ) OnDownloadStop(events []rpc.Event) { + i.Aria2Notify(events, common.Canceled) +} + +// OnDownloadComplete 下载完成 +func (i *inMemoryMQ) OnDownloadComplete(events []rpc.Event) { + i.Aria2Notify(events, common.Complete) +} + +// OnDownloadError 下载出错 +func (i *inMemoryMQ) OnDownloadError(events []rpc.Event) { + i.Aria2Notify(events, common.Error) +} + +// OnBtDownloadComplete BT下载完成 +func (i *inMemoryMQ) OnBtDownloadComplete(events []rpc.Event) { + i.Aria2Notify(events, common.Complete) +} diff --git a/pkg/payment/alipay.go b/pkg/payment/alipay.go new file mode 100644 index 0000000..a08f45e --- /dev/null +++ b/pkg/payment/alipay.go @@ -0,0 +1,43 @@ +package payment + +import ( + "fmt" + "net/url" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + alipay "github.com/smartwalle/alipay/v3" +) + +// Alipay 支付宝当面付支付处理 +type Alipay struct { + Client *alipay.Client +} + +// Create 创建订单 +func (pay *Alipay) Create(order *model.Order, pack *serializer.PackProduct, group *serializer.GroupProducts, user *model.User) (*OrderCreateRes, error) { + gateway, _ := url.Parse("/api/v3/callback/alipay") + var p = alipay.TradePreCreate{ + Trade: alipay.Trade{ + NotifyURL: model.GetSiteURL().ResolveReference(gateway).String(), + Subject: order.Name, + OutTradeNo: order.OrderNo, + TotalAmount: fmt.Sprintf("%.2f", float64(order.Price*order.Num)/100), + }, + } + + if _, err := order.Create(); err != nil { + return nil, ErrInsertOrder.WithError(err) + } + + res, err := pay.Client.TradePreCreate(p) + if err != nil { + return nil, ErrIssueOrder.WithError(err) + } + + return &OrderCreateRes{ + Payment: true, + QRCode: res.QRCode, + ID: order.OrderNo, + }, nil +} diff --git a/pkg/payment/custom.go b/pkg/payment/custom.go new file mode 100644 index 0000000..23c3748 --- /dev/null +++ b/pkg/payment/custom.go @@ -0,0 +1,93 @@ +package payment + +import ( + "encoding/json" + "errors" + "fmt" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/auth" + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/gofrs/uuid" + "github.com/qiniu/go-sdk/v7/sms/bytes" + "net/http" + "net/url" +) + +// Custom payment client +type Custom struct { + client request.Client + endpoint string + authClient auth.Auth +} + +const ( + paymentTTL = 3600 * 24 // 24h + CallbackSessionPrefix = "custom_payment_callback_" +) + +func newCustomClient(endpoint, secret string) *Custom { + authClient := auth.HMACAuth{ + SecretKey: []byte(secret), + } + return &Custom{ + endpoint: endpoint, + authClient: auth.General, + client: request.NewClient( + request.WithCredential(authClient, paymentTTL), + request.WithMasterMeta(), + ), + } +} + +// Request body from Cloudreve to create a new payment +type NewCustomOrderRequest struct { + Name string `json:"name"` // Order name + OrderNo string `json:"order_no"` // Order number + NotifyURL string `json:"notify_url"` // Payment callback url + Amount int64 `json:"amount"` // Order total amount +} + +// Create a new payment +func (pay *Custom) Create(order *model.Order, pack *serializer.PackProduct, group *serializer.GroupProducts, user *model.User) (*OrderCreateRes, error) { + callbackID := uuid.Must(uuid.NewV4()) + gateway, _ := url.Parse(fmt.Sprintf("/api/v3/callback/custom/%s/%s", order.OrderNo, callbackID)) + callback, err := auth.SignURI(pay.authClient, model.GetSiteURL().ResolveReference(gateway).String(), paymentTTL) + if err != nil { + return nil, fmt.Errorf("failed to sign callback url: %w", err) + } + + cache.Set(CallbackSessionPrefix+callbackID.String(), order.OrderNo, paymentTTL) + + body := &NewCustomOrderRequest{ + Name: order.Name, + OrderNo: order.OrderNo, + NotifyURL: callback.String(), + Amount: int64(order.Price * order.Num), + } + bodyJson, err := json.Marshal(body) + if err != nil { + return nil, fmt.Errorf("failed to encode body: %w", err) + } + + res, err := pay.client.Request("POST", pay.endpoint, bytes.NewReader(bodyJson)). + CheckHTTPResponse(http.StatusOK).DecodeResponse() + if err != nil { + return nil, fmt.Errorf("failed to request payment gateway: %w", err) + } + + if res.Code != 0 { + return nil, errors.New(res.Error) + } + + if _, err := order.Create(); err != nil { + return nil, ErrInsertOrder.WithError(err) + } + + return &OrderCreateRes{ + Payment: true, + QRCode: res.Data.(string), + ID: order.OrderNo, + }, nil +} diff --git a/pkg/payment/order.go b/pkg/payment/order.go new file mode 100644 index 0000000..c161411 --- /dev/null +++ b/pkg/payment/order.go @@ -0,0 +1,171 @@ +package payment + +import ( + "fmt" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/iGoogle-ink/gopay/wechat/v3" + "github.com/qingwg/payjs" + "github.com/smartwalle/alipay/v3" + "math/rand" + "net/url" + "time" +) + +var ( + // ErrUnknownPaymentMethod 未知支付方式 + ErrUnknownPaymentMethod = serializer.NewError(serializer.CodeInternalSetting, "Unknown payment method", nil) + // ErrUnsupportedPaymentMethod 未知支付方式 + ErrUnsupportedPaymentMethod = serializer.NewError(serializer.CodeInternalSetting, "This order cannot be paid with this method", nil) + // ErrInsertOrder 无法插入订单记录 + ErrInsertOrder = serializer.NewError(serializer.CodeDBError, "Failed to insert order record", nil) + // ErrScoreNotEnough 积分不足 + ErrScoreNotEnough = serializer.NewError(serializer.CodeInsufficientCredit, "", nil) + // ErrCreateStoragePack 无法创建容量包 + ErrCreateStoragePack = serializer.NewError(serializer.CodeDBError, "Failed to create storage pack record", nil) + // ErrGroupConflict 用户组冲突 + ErrGroupConflict = serializer.NewError(serializer.CodeGroupConflict, "", nil) + // ErrGroupInvalid 用户组冲突 + ErrGroupInvalid = serializer.NewError(serializer.CodeGroupInvalid, "", nil) + // ErrAdminFulfillGroup 管理员无法购买用户组 + ErrAdminFulfillGroup = serializer.NewError(serializer.CodeFulfillAdminGroup, "", nil) + // ErrUpgradeGroup 用户组冲突 + ErrUpgradeGroup = serializer.NewError(serializer.CodeDBError, "Failed to update user's group", nil) + // ErrUInitPayment 无法初始化支付实例 + ErrUInitPayment = serializer.NewError(serializer.CodeInternalSetting, "Failed to initialize payment client", nil) + // ErrIssueOrder 订单接口请求失败 + ErrIssueOrder = serializer.NewError(serializer.CodeInternalSetting, "Failed to create order", nil) + // ErrOrderNotFound 订单不存在 + ErrOrderNotFound = serializer.NewError(serializer.CodeNotFound, "", nil) +) + +// Pay 支付处理接口 +type Pay interface { + Create(order *model.Order, pack *serializer.PackProduct, group *serializer.GroupProducts, user *model.User) (*OrderCreateRes, error) +} + +// OrderCreateRes 订单创建结果 +type OrderCreateRes struct { + Payment bool `json:"payment"` // 是否需要支付 + ID string `json:"id,omitempty"` // 订单号 + QRCode string `json:"qr_code,omitempty"` // 支付二维码指向的地址 +} + +// NewPaymentInstance 获取新的支付实例 +func NewPaymentInstance(method string) (Pay, error) { + switch method { + case "score": + return &ScorePayment{}, nil + case "alipay": + options := model.GetSettingByNames("alipay_enabled", "appid", "appkey", "shopid") + if options["alipay_enabled"] != "1" { + return nil, ErrUnknownPaymentMethod + } + + // 初始化支付宝客户端 + var client, err = alipay.New(options["appid"], options["appkey"], true) + if err != nil { + return nil, ErrUInitPayment.WithError(err) + } + + // 加载支付宝公钥 + err = client.LoadAliPayPublicKey(options["shopid"]) + if err != nil { + return nil, ErrUInitPayment.WithError(err) + } + + return &Alipay{Client: client}, nil + case "payjs": + options := model.GetSettingByNames("payjs_enabled", "payjs_secret", "payjs_id") + if options["payjs_enabled"] != "1" { + return nil, ErrUnknownPaymentMethod + } + + callback, _ := url.Parse("/api/v3/callback/payjs") + payjsConfig := &payjs.Config{ + Key: options["payjs_secret"], + MchID: options["payjs_id"], + NotifyUrl: model.GetSiteURL().ResolveReference(callback).String(), + } + + return &PayJSClient{Client: payjs.New(payjsConfig)}, nil + case "wechat": + options := model.GetSettingByNames("wechat_enabled", "wechat_appid", "wechat_mchid", "wechat_serial_no", "wechat_api_key", "wechat_pk_content") + if options["wechat_enabled"] != "1" { + return nil, ErrUnknownPaymentMethod + } + client, err := wechat.NewClientV3(options["wechat_appid"], options["wechat_mchid"], options["wechat_serial_no"], options["wechat_api_key"], options["wechat_pk_content"]) + if err != nil { + return nil, ErrUInitPayment.WithError(err) + } + + return &Wechat{Client: client, ApiV3Key: options["wechat_api_key"]}, nil + case "custom": + options := model.GetSettingByNames("custom_payment_enabled", "custom_payment_endpoint", "custom_payment_secret") + if !model.IsTrueVal(options["custom_payment_enabled"]) { + return nil, ErrUnknownPaymentMethod + } + + return newCustomClient(options["custom_payment_endpoint"], options["custom_payment_secret"]), nil + default: + return nil, ErrUnknownPaymentMethod + } +} + +// NewOrder 创建新订单 +func NewOrder(pack *serializer.PackProduct, group *serializer.GroupProducts, num int, method string, user *model.User) (*OrderCreateRes, error) { + // 获取支付实例 + pay, err := NewPaymentInstance(method) + if err != nil { + return nil, err + } + + var ( + orderType int + productID int64 + title string + price int + ) + if pack != nil { + orderType = model.PackOrderType + productID = pack.ID + title = pack.Name + price = pack.Price + } else if group != nil { + if err := checkGroupUpgrade(user, group); err != nil { + return nil, err + } + + orderType = model.GroupOrderType + productID = group.ID + title = group.Name + price = group.Price + } else { + orderType = model.ScoreOrderType + productID = 0 + title = fmt.Sprintf("%d 积分", num) + price = model.GetIntSetting("score_price", 1) + } + + // 创建订单记录 + order := &model.Order{ + UserID: user.ID, + OrderNo: orderID(), + Type: orderType, + Method: method, + ProductID: productID, + Num: num, + Name: fmt.Sprintf("%s - %s", model.GetSettingByName("siteName"), title), + Price: price, + Status: model.OrderUnpaid, + } + + return pay.Create(order, pack, group, user) +} + +func orderID() string { + return fmt.Sprintf("%s%d", + time.Now().Format("20060102150405"), + 100000+rand.Intn(900000), + ) +} diff --git a/pkg/payment/payjs.go b/pkg/payment/payjs.go new file mode 100644 index 0000000..a82ce7b --- /dev/null +++ b/pkg/payment/payjs.go @@ -0,0 +1,31 @@ +package payment + +import ( + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/qingwg/payjs" +) + +// PayJSClient PayJS支付处理 +type PayJSClient struct { + Client *payjs.PayJS +} + +// Create 创建订单 +func (pay *PayJSClient) Create(order *model.Order, pack *serializer.PackProduct, group *serializer.GroupProducts, user *model.User) (*OrderCreateRes, error) { + if _, err := order.Create(); err != nil { + return nil, ErrInsertOrder.WithError(err) + } + + PayNative := pay.Client.GetNative() + res, err := PayNative.Create(int64(order.Price*order.Num), order.Name, order.OrderNo, "", "") + if err != nil { + return nil, ErrIssueOrder.WithError(err) + } + + return &OrderCreateRes{ + Payment: true, + QRCode: res.CodeUrl, + ID: order.OrderNo, + }, nil +} diff --git a/pkg/payment/purchase.go b/pkg/payment/purchase.go new file mode 100644 index 0000000..1c558a6 --- /dev/null +++ b/pkg/payment/purchase.go @@ -0,0 +1,137 @@ +package payment + +import ( + "encoding/json" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "strconv" + "time" +) + +// GivePack 创建容量包 +func GivePack(user *model.User, packInfo *serializer.PackProduct, num int) error { + timeNow := time.Now() + expires := timeNow.Add(time.Duration(packInfo.Time*int64(num)) * time.Second) + pack := model.StoragePack{ + Name: packInfo.Name, + UserID: user.ID, + ActiveTime: &timeNow, + ExpiredTime: &expires, + Size: packInfo.Size, + } + if _, err := pack.Create(); err != nil { + return ErrCreateStoragePack.WithError(err) + } + cache.Deletes([]string{strconv.FormatUint(uint64(user.ID), 10)}, "pack_size_") + return nil +} + +func checkGroupUpgrade(user *model.User, groupInfo *serializer.GroupProducts) error { + if user.Group.ID == 1 { + return ErrAdminFulfillGroup + } + + // 检查用户是否已有未过期用户 + if user.PreviousGroupID != 0 && user.GroupID != groupInfo.GroupID { + return ErrGroupConflict + } + + // 用户组不能相同 + if user.GroupID == groupInfo.GroupID && user.PreviousGroupID == 0 { + return ErrGroupInvalid + } + + return nil +} + +// GiveGroup 升级用户组 +func GiveGroup(user *model.User, groupInfo *serializer.GroupProducts, num int) error { + if err := checkGroupUpgrade(user, groupInfo); err != nil { + return err + } + + timeNow := time.Now() + expires := timeNow.Add(time.Duration(groupInfo.Time*int64(num)) * time.Second) + if user.PreviousGroupID != 0 { + expires = user.GroupExpires.Add(time.Duration(groupInfo.Time*int64(num)) * time.Second) + } + + if err := user.UpgradeGroup(groupInfo.GroupID, &expires); err != nil { + return ErrUpgradeGroup.WithError(err) + } + + return nil +} + +// GiveScore 积分充值 +func GiveScore(user *model.User, num int) error { + user.AddScore(num) + return nil +} + +// GiveProduct “发货” +func GiveProduct(user *model.User, pack *serializer.PackProduct, group *serializer.GroupProducts, num int) error { + if pack != nil { + return GivePack(user, pack, num) + } else if group != nil { + return GiveGroup(user, group, num) + } else { + return GiveScore(user, num) + } +} + +// OrderPaid 订单已支付处理 +func OrderPaid(orderNo string) error { + order, err := model.GetOrderByNo(orderNo) + if err != nil || order.Status == model.OrderPaid { + return ErrOrderNotFound.WithError(err) + } + + // 更新订单状态为 已支付 + order.UpdateStatus(model.OrderPaid) + + user, err := model.GetActiveUserByID(order.UserID) + if err != nil { + return serializer.NewError(serializer.CodeUserNotFound, "", err) + } + + // 查询商品 + options := model.GetSettingByNames("pack_data", "group_sell_data") + + var ( + packs []serializer.PackProduct + groups []serializer.GroupProducts + ) + if err := json.Unmarshal([]byte(options["pack_data"]), &packs); err != nil { + return err + } + if err := json.Unmarshal([]byte(options["group_sell_data"]), &groups); err != nil { + return err + } + + // 查找要购买的商品 + var ( + pack *serializer.PackProduct + group *serializer.GroupProducts + ) + if order.Type == model.GroupOrderType { + for _, v := range groups { + if v.ID == order.ProductID { + group = &v + break + } + } + } else if order.Type == model.PackOrderType { + for _, v := range packs { + if v.ID == order.ProductID { + pack = &v + break + } + } + } + + // "发货" + return GiveProduct(&user, pack, group, order.Num) + +} diff --git a/pkg/payment/score.go b/pkg/payment/score.go new file mode 100644 index 0000000..351c848 --- /dev/null +++ b/pkg/payment/score.go @@ -0,0 +1,45 @@ +package payment + +import ( + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" +) + +// ScorePayment 积分支付处理 +type ScorePayment struct { +} + +// Create 创建新订单 +func (pay *ScorePayment) Create(order *model.Order, pack *serializer.PackProduct, group *serializer.GroupProducts, user *model.User) (*OrderCreateRes, error) { + if pack != nil { + order.Price = pack.Score + } else { + order.Price = group.Score + } + + // 检查此订单是否可用积分支付 + if order.Price == 0 { + return nil, ErrUnsupportedPaymentMethod + } + + // 扣除用户积分 + if !user.PayScore(order.Price * order.Num) { + return nil, ErrScoreNotEnough + } + + // 商品“发货” + if err := GiveProduct(user, pack, group, order.Num); err != nil { + user.AddScore(order.Price * order.Num) + return nil, err + } + + // 创建订单记录 + order.Status = model.OrderPaid + if _, err := order.Create(); err != nil { + return nil, ErrInsertOrder.WithError(err) + } + + return &OrderCreateRes{ + Payment: false, + }, nil +} diff --git a/pkg/payment/wechat.go b/pkg/payment/wechat.go new file mode 100644 index 0000000..c9025c5 --- /dev/null +++ b/pkg/payment/wechat.go @@ -0,0 +1,88 @@ +package payment + +import ( + "errors" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/iGoogle-ink/gopay" + "github.com/iGoogle-ink/gopay/wechat/v3" + "net/url" + "time" +) + +// Wechat 微信扫码支付接口 +type Wechat struct { + Client *wechat.ClientV3 + ApiV3Key string +} + +// Create 创建订单 +func (pay *Wechat) Create(order *model.Order, pack *serializer.PackProduct, group *serializer.GroupProducts, user *model.User) (*OrderCreateRes, error) { + gateway, _ := url.Parse("/api/v3/callback/wechat") + bm := make(gopay.BodyMap) + bm. + Set("description", order.Name). + Set("out_trade_no", order.OrderNo). + Set("notify_url", model.GetSiteURL().ResolveReference(gateway).String()). + SetBodyMap("amount", func(bm gopay.BodyMap) { + bm.Set("total", int64(order.Price*order.Num)). + Set("currency", "CNY") + }) + + wxRsp, err := pay.Client.V3TransactionNative(bm) + if err != nil { + return nil, ErrIssueOrder.WithError(err) + } + + if wxRsp.Code == wechat.Success { + if _, err := order.Create(); err != nil { + return nil, ErrInsertOrder.WithError(err) + } + + return &OrderCreateRes{ + Payment: true, + QRCode: wxRsp.Response.CodeUrl, + ID: order.OrderNo, + }, nil + } + + return nil, ErrIssueOrder.WithError(errors.New(wxRsp.Error)) +} + +// GetPlatformCert 获取微信平台证书 +func (pay *Wechat) GetPlatformCert() string { + if cert, ok := cache.Get("wechat_platform_cert"); ok { + return cert.(string) + } + + res, err := pay.Client.GetPlatformCerts() + if err == nil { + // 使用反馈证书中启用时间较晚的 + var ( + currentLatest *time.Time + currentCert string + ) + for _, cert := range res.Certs { + effectiveTime, err := time.Parse("2006-01-02T15:04:05-0700", cert.EffectiveTime) + if err != nil { + if currentLatest == nil { + currentLatest = &effectiveTime + currentCert = cert.PublicKey + continue + } + if currentLatest.Before(effectiveTime) { + currentLatest = &effectiveTime + currentCert = cert.PublicKey + } + } + } + + cache.Set("wechat_platform_cert", currentCert, 3600*10) + return currentCert + } + + util.Log().Debug("Failed to get Wechat Pay platform certificate: %s", err) + return "" +} diff --git a/pkg/qq/connect.go b/pkg/qq/connect.go new file mode 100644 index 0000000..b8cb72f --- /dev/null +++ b/pkg/qq/connect.go @@ -0,0 +1,211 @@ +package qq + +import ( + "crypto/md5" + "encoding/json" + "errors" + "fmt" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/gofrs/uuid" + "net/url" + "strings" +) + +// LoginPage 登陆页面描述 +type LoginPage struct { + URL string + SecretKey string +} + +// UserCredentials 登陆成功后的凭证 +type UserCredentials struct { + OpenID string + AccessToken string +} + +// UserInfo 用户信息 +type UserInfo struct { + Nick string + Avatar string +} + +var ( + // ErrNotEnabled 未开启登录功能 + ErrNotEnabled = serializer.NewError(serializer.CodeFeatureNotEnabled, "QQ Login is not enabled", nil) + // ErrObtainAccessToken 无法获取AccessToken + ErrObtainAccessToken = serializer.NewError(serializer.CodeNotSet, "Cannot obtain AccessToken", nil) + // ErrObtainOpenID 无法获取OpenID + ErrObtainOpenID = serializer.NewError(serializer.CodeNotSet, "Cannot obtain OpenID", nil) + //ErrDecodeResponse 无法解析服务端响应 + ErrDecodeResponse = serializer.NewError(serializer.CodeNotSet, "Cannot parse serverside response", nil) +) + +// NewLoginRequest 新建登录会话 +func NewLoginRequest() (*LoginPage, error) { + // 获取相关设定 + options := model.GetSettingByNames("qq_login", "qq_login_id") + if options["qq_login"] == "0" { + return nil, ErrNotEnabled + } + + // 生成唯一ID + u2, err := uuid.NewV4() + if err != nil { + return nil, err + } + secret := fmt.Sprintf("%x", md5.Sum(u2.Bytes())) + + // 生成登录地址 + loginURL, _ := url.Parse("https://graph.qq.com/oauth2.0/authorize?response_type=code") + queries := loginURL.Query() + queries.Add("client_id", options["qq_login_id"]) + queries.Add("redirect_uri", getCallbackURL()) + queries.Add("state", secret) + loginURL.RawQuery = queries.Encode() + + return &LoginPage{ + URL: loginURL.String(), + SecretKey: secret, + }, nil +} + +func getCallbackURL() string { + //return "https://drive.aoaoao.me/Callback/QQ" + // 生成回调地址 + gateway, _ := url.Parse("/login/qq") + callback := model.GetSiteURL().ResolveReference(gateway).String() + + return callback +} + +func getAccessTokenURL(code string) string { + // 获取相关设定 + options := model.GetSettingByNames("qq_login_id", "qq_login_key") + + api, _ := url.Parse("https://graph.qq.com/oauth2.0/token?grant_type=authorization_code") + queries := api.Query() + queries.Add("client_id", options["qq_login_id"]) + queries.Add("redirect_uri", getCallbackURL()) + queries.Add("client_secret", options["qq_login_key"]) + queries.Add("code", code) + api.RawQuery = queries.Encode() + + return api.String() +} + +func getUserInfoURL(openid, ak string) string { + // 获取相关设定 + options := model.GetSettingByNames("qq_login_id", "qq_login_key") + + api, _ := url.Parse("https://graph.qq.com/user/get_user_info") + queries := api.Query() + queries.Add("oauth_consumer_key", options["qq_login_id"]) + queries.Add("openid", openid) + queries.Add("access_token", ak) + api.RawQuery = queries.Encode() + + return api.String() +} + +func getResponse(body string) (map[string]interface{}, error) { + var res map[string]interface{} + + if !strings.Contains(body, "callback") { + return res, nil + } + + body = strings.TrimPrefix(body, "callback(") + body = strings.TrimSuffix(body, ");\n") + + err := json.Unmarshal([]byte(body), &res) + + return res, err +} + +// Callback 处理回调,返回openid和access key +func Callback(code string) (*UserCredentials, error) { + // 获取相关设定 + options := model.GetSettingByNames("qq_login") + if options["qq_login"] == "0" { + return nil, ErrNotEnabled + } + + api := getAccessTokenURL(code) + + // 获取AccessToken + client := request.NewClient() + res := client.Request("GET", api, nil) + resp, err := res.GetResponse() + if err != nil { + return nil, ErrObtainAccessToken.WithError(err) + } + + // 如果服务端返回错误 + errResp, err := getResponse(resp) + if msg, ok := errResp["error_description"]; err == nil && ok { + return nil, ErrObtainAccessToken.WithError(errors.New(msg.(string))) + } + + // 获取AccessToken + vals, err := url.ParseQuery(resp) + if err != nil { + return nil, ErrDecodeResponse.WithError(err) + } + accessToken := vals.Get("access_token") + + // 用 AccessToken 换取OpenID + res = client.Request("GET", "https://graph.qq.com/oauth2.0/me?access_token="+accessToken, nil) + resp, err = res.GetResponse() + if err != nil { + return nil, ErrObtainOpenID.WithError(err) + } + + // 解析服务端响应 + errResp, err = getResponse(resp) + if msg, ok := errResp["error_description"]; err == nil && ok { + return nil, ErrObtainOpenID.WithError(errors.New(msg.(string))) + } + + if openid, ok := errResp["openid"]; ok { + return &UserCredentials{ + OpenID: openid.(string), + AccessToken: accessToken, + }, nil + } + + return nil, ErrDecodeResponse +} + +// GetUserInfo 使用凭证获取用户信息 +func GetUserInfo(credential *UserCredentials) (*UserInfo, error) { + api := getUserInfoURL(credential.OpenID, credential.AccessToken) + + // 获取用户信息 + client := request.NewClient() + res := client.Request("GET", api, nil) + resp, err := res.GetResponse() + if err != nil { + return nil, ErrObtainAccessToken.WithError(err) + } + + var resSerialized map[string]interface{} + if err := json.Unmarshal([]byte(resp), &resSerialized); err != nil { + return nil, ErrDecodeResponse.WithError(err) + } + + // 如果服务端返回错误 + if msg, ok := resSerialized["msg"]; ok && msg.(string) != "" { + return nil, ErrObtainAccessToken.WithError(errors.New(msg.(string))) + } + + if avatar, ok := resSerialized["figureurl_qq_2"]; ok { + return &UserInfo{ + Nick: resSerialized["nickname"].(string), + Avatar: avatar.(string), + }, nil + } + + return nil, ErrDecodeResponse +} diff --git a/pkg/recaptcha/recaptcha.go b/pkg/recaptcha/recaptcha.go new file mode 100644 index 0000000..e360898 --- /dev/null +++ b/pkg/recaptcha/recaptcha.go @@ -0,0 +1,183 @@ +package recaptcha + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "time" +) + +const reCAPTCHALink = "https://www.recaptcha.net/recaptcha/api/siteverify" + +// VERSION the recaptcha api version +type VERSION int8 + +const ( + // V2 recaptcha api v2 + V2 VERSION = iota + // V3 recaptcha api v3, more details can be found here : https://developers.google.com/recaptcha/docs/v3 + V3 + // DefaultTreshold Default minimin score when using V3 api + DefaultTreshold float32 = 0.5 +) + +type reCHAPTCHARequest struct { + Secret string `json:"secret"` + Response string `json:"response"` + RemoteIP string `json:"remoteip,omitempty"` +} + +type reCHAPTCHAResponse struct { + Success bool `json:"success"` + ChallengeTS time.Time `json:"challenge_ts"` + Hostname string `json:"hostname,omitempty"` + ApkPackageName string `json:"apk_package_name,omitempty"` + Action string `json:"action,omitempty"` + Score float32 `json:"score,omitempty"` + ErrorCodes []string `json:"error-codes,omitempty"` +} + +// custom client so we can mock in tests +type netClient interface { + PostForm(url string, formValues url.Values) (resp *http.Response, err error) +} + +// custom clock so we can mock in tests +type clock interface { + Since(t time.Time) time.Duration +} + +type realClock struct { +} + +func (realClock) Since(t time.Time) time.Duration { + return time.Since(t) +} + +// ReCAPTCHA recpatcha holder struct, make adding mocking code simpler +type ReCAPTCHA struct { + client netClient + Secret string + ReCAPTCHALink string + Version VERSION + Timeout time.Duration + horloge clock +} + +// NewReCAPTCHA new ReCAPTCHA instance if version is set to V2 uses recatpcha v2 API, get your secret from https://www.google.com/recaptcha/admin +// +// if version is set to V2 uses recatpcha v2 API, get your secret from https://g.co/recaptcha/v3 +func NewReCAPTCHA(ReCAPTCHASecret string, version VERSION, timeout time.Duration) (ReCAPTCHA, error) { + if ReCAPTCHASecret == "" { + return ReCAPTCHA{}, fmt.Errorf("recaptcha secret cannot be blank") + } + return ReCAPTCHA{ + client: &http.Client{ + Timeout: timeout, + }, + horloge: &realClock{}, + Secret: ReCAPTCHASecret, + ReCAPTCHALink: reCAPTCHALink, + Timeout: timeout, + Version: version, + }, nil +} + +// Verify returns `nil` if no error and the client solved the challenge correctly +func (r *ReCAPTCHA) Verify(challengeResponse string) error { + body := reCHAPTCHARequest{Secret: r.Secret, Response: challengeResponse} + return r.confirm(body, VerifyOption{}) +} + +// VerifyOption verification options expected for the challenge +type VerifyOption struct { + Threshold float32 // ignored in v2 recaptcha + Action string // ignored in v2 recaptcha + Hostname string + ApkPackageName string + ResponseTime time.Duration + RemoteIP string +} + +// VerifyWithOptions returns `nil` if no error and the client solved the challenge correctly and all options are natching +// `Threshold` and `Action` are ignored when using V2 version +func (r *ReCAPTCHA) VerifyWithOptions(challengeResponse string, options VerifyOption) error { + var body reCHAPTCHARequest + if options.RemoteIP == "" { + body = reCHAPTCHARequest{Secret: r.Secret, Response: challengeResponse} + } else { + body = reCHAPTCHARequest{Secret: r.Secret, Response: challengeResponse, RemoteIP: options.RemoteIP} + } + return r.confirm(body, options) +} + +func (r *ReCAPTCHA) confirm(recaptcha reCHAPTCHARequest, options VerifyOption) (Err error) { + Err = nil + var formValues url.Values + if recaptcha.RemoteIP != "" { + formValues = url.Values{"secret": {recaptcha.Secret}, "remoteip": {recaptcha.RemoteIP}, "response": {recaptcha.Response}} + } else { + formValues = url.Values{"secret": {recaptcha.Secret}, "response": {recaptcha.Response}} + } + response, err := r.client.PostForm(r.ReCAPTCHALink, formValues) + if err != nil { + Err = fmt.Errorf("error posting to recaptcha endpoint: '%s'", err) + return + } + defer response.Body.Close() + resultBody, err := ioutil.ReadAll(response.Body) + if err != nil { + Err = fmt.Errorf("couldn't read response body: '%s'", err) + return + } + var result reCHAPTCHAResponse + err = json.Unmarshal(resultBody, &result) + if err != nil { + Err = fmt.Errorf("invalid response body json: '%s'", err) + return + } + + if options.Hostname != "" && options.Hostname != result.Hostname { + Err = fmt.Errorf("invalid response hostname '%s', while expecting '%s'", result.Hostname, options.Hostname) + return + } + + if options.ApkPackageName != "" && options.ApkPackageName != result.ApkPackageName { + Err = fmt.Errorf("invalid response ApkPackageName '%s', while expecting '%s'", result.ApkPackageName, options.ApkPackageName) + return + } + + if options.ResponseTime != 0 { + duration := r.horloge.Since(result.ChallengeTS) + if options.ResponseTime < duration { + Err = fmt.Errorf("time spent in resolving challenge '%fs', while expecting maximum '%fs'", duration.Seconds(), options.ResponseTime.Seconds()) + return + } + } + if r.Version == V3 { + if options.Action != "" && options.Action != result.Action { + Err = fmt.Errorf("invalid response action '%s', while expecting '%s'", result.Action, options.Action) + return + } + if options.Threshold != 0 && options.Threshold >= result.Score { + Err = fmt.Errorf("received score '%f', while expecting minimum '%f'", result.Score, options.Threshold) + return + } + if options.Threshold == 0 && DefaultTreshold >= result.Score { + Err = fmt.Errorf("received score '%f', while expecting minimum '%f'", result.Score, DefaultTreshold) + return + } + } + if result.ErrorCodes != nil { + Err = fmt.Errorf("remote error codes: %v", result.ErrorCodes) + return + } + if !result.Success && recaptcha.RemoteIP != "" { + Err = fmt.Errorf("invalid challenge solution or remote IP") + } else if !result.Success { + Err = fmt.Errorf("invalid challenge solution") + } + return +} diff --git a/pkg/request/options.go b/pkg/request/options.go new file mode 100644 index 0000000..63bc8dd --- /dev/null +++ b/pkg/request/options.go @@ -0,0 +1,137 @@ +package request + +import ( + "context" + "github.com/cloudreve/Cloudreve/v3/pkg/auth" + "net/http" + "net/url" + "strings" + "time" +) + +// Option 发送请求的额外设置 +type Option interface { + apply(*options) +} + +type options struct { + timeout time.Duration + header http.Header + sign auth.Auth + signTTL int64 + ctx context.Context + contentLength int64 + masterMeta bool + endpoint *url.URL + slaveNodeID string + tpsLimiterToken string + tps float64 + tpsBurst int +} + +type optionFunc func(*options) + +func (f optionFunc) apply(o *options) { + f(o) +} + +func newDefaultOption() *options { + return &options{ + header: http.Header{}, + timeout: time.Duration(30) * time.Second, + contentLength: -1, + ctx: context.Background(), + } +} + +func (o *options) clone() options { + newOptions := *o + newOptions.header = o.header.Clone() + return newOptions +} + +// WithTimeout 设置请求超时 +func WithTimeout(t time.Duration) Option { + return optionFunc(func(o *options) { + o.timeout = t + }) +} + +// WithContext 设置请求上下文 +func WithContext(c context.Context) Option { + return optionFunc(func(o *options) { + o.ctx = c + }) +} + +// WithCredential 对请求进行签名 +func WithCredential(instance auth.Auth, ttl int64) Option { + return optionFunc(func(o *options) { + o.sign = instance + o.signTTL = ttl + }) +} + +// WithHeader 设置请求Header +func WithHeader(header http.Header) Option { + return optionFunc(func(o *options) { + for k, v := range header { + o.header[k] = v + } + }) +} + +// WithoutHeader 设置清除请求Header +func WithoutHeader(header []string) Option { + return optionFunc(func(o *options) { + for _, v := range header { + delete(o.header, v) + } + + }) +} + +// WithContentLength 设置请求大小 +func WithContentLength(s int64) Option { + return optionFunc(func(o *options) { + o.contentLength = s + }) +} + +// WithMasterMeta 请求时携带主机信息 +func WithMasterMeta() Option { + return optionFunc(func(o *options) { + o.masterMeta = true + }) +} + +// WithSlaveMeta 请求时携带从机信息 +func WithSlaveMeta(s string) Option { + return optionFunc(func(o *options) { + o.slaveNodeID = s + }) +} + +// Endpoint 使用同一的请求Endpoint +func WithEndpoint(endpoint string) Option { + if !strings.HasSuffix(endpoint, "/") { + endpoint += "/" + } + + endpointURL, _ := url.Parse(endpoint) + return optionFunc(func(o *options) { + o.endpoint = endpointURL + }) +} + +// WithTPSLimit 请求时使用全局流量限制 +func WithTPSLimit(token string, tps float64, burst int) Option { + return optionFunc(func(o *options) { + o.tpsLimiterToken = token + o.tps = tps + if burst < 1 { + burst = 1 + } + o.tpsBurst = burst + }) +} diff --git a/pkg/request/request.go b/pkg/request/request.go new file mode 100644 index 0000000..2947085 --- /dev/null +++ b/pkg/request/request.go @@ -0,0 +1,263 @@ +package request + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "sync" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/auth" + "github.com/cloudreve/Cloudreve/v3/pkg/conf" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +// GeneralClient 通用 HTTP Client +var GeneralClient Client = NewClient() + +// Response 请求的响应或错误信息 +type Response struct { + Err error + Response *http.Response +} + +// Client 请求客户端 +type Client interface { + Request(method, target string, body io.Reader, opts ...Option) *Response +} + +// HTTPClient 实现 Client 接口 +type HTTPClient struct { + mu sync.Mutex + options *options + tpsLimiter TPSLimiter +} + +func NewClient(opts ...Option) Client { + client := &HTTPClient{ + options: newDefaultOption(), + tpsLimiter: globalTPSLimiter, + } + + for _, o := range opts { + o.apply(client.options) + } + + return client +} + +// Request 发送HTTP请求 +func (c *HTTPClient) Request(method, target string, body io.Reader, opts ...Option) *Response { + // 应用额外设置 + c.mu.Lock() + options := c.options.clone() + c.mu.Unlock() + for _, o := range opts { + o.apply(&options) + } + + // 创建请求客户端 + client := &http.Client{Timeout: options.timeout} + + // size为0时将body设为nil + if options.contentLength == 0 { + body = nil + } + + // 确定请求URL + if options.endpoint != nil { + targetPath, err := url.Parse(target) + if err != nil { + return &Response{Err: err} + } + + targetURL := *options.endpoint + target = targetURL.ResolveReference(targetPath).String() + } + + // 创建请求 + var ( + req *http.Request + err error + ) + if options.ctx != nil { + req, err = http.NewRequestWithContext(options.ctx, method, target, body) + } else { + req, err = http.NewRequest(method, target, body) + } + if err != nil { + return &Response{Err: err} + } + + // 添加请求相关设置 + if options.header != nil { + for k, v := range options.header { + req.Header.Add(k, strings.Join(v, " ")) + } + } + + if options.masterMeta && conf.SystemConfig.Mode == "master" { + req.Header.Add(auth.CrHeaderPrefix+"Site-Url", model.GetSiteURL().String()) + req.Header.Add(auth.CrHeaderPrefix+"Site-Id", model.GetSettingByName("siteID")) + req.Header.Add(auth.CrHeaderPrefix+"Cloudreve-Version", conf.BackendVersion) + } + + if options.slaveNodeID != "" && conf.SystemConfig.Mode == "slave" { + req.Header.Add(auth.CrHeaderPrefix+"Node-Id", options.slaveNodeID) + } + + if options.contentLength != -1 { + req.ContentLength = options.contentLength + } + + // 签名请求 + if options.sign != nil { + switch method { + case "PUT", "POST", "PATCH": + auth.SignRequest(options.sign, req, options.signTTL) + default: + if resURL, err := auth.SignURI(options.sign, req.URL.String(), options.signTTL); err == nil { + req.URL = resURL + } + } + } + + if options.tps > 0 { + c.tpsLimiter.Limit(options.ctx, options.tpsLimiterToken, options.tps, options.tpsBurst) + } + + // 发送请求 + resp, err := client.Do(req) + if err != nil { + return &Response{Err: err} + } + + return &Response{Err: nil, Response: resp} +} + +// GetResponse 检查响应并获取响应正文 +func (resp *Response) GetResponse() (string, error) { + if resp.Err != nil { + return "", resp.Err + } + respBody, err := ioutil.ReadAll(resp.Response.Body) + _ = resp.Response.Body.Close() + + return string(respBody), err +} + +// CheckHTTPResponse 检查请求响应HTTP状态码 +func (resp *Response) CheckHTTPResponse(status int) *Response { + if resp.Err != nil { + return resp + } + + // 检查HTTP状态码 + if resp.Response.StatusCode != status { + resp.Err = fmt.Errorf("服务器返回非正常HTTP状态%d", resp.Response.StatusCode) + } + return resp +} + +// DecodeResponse 尝试解析为serializer.Response,并对状态码进行检查 +func (resp *Response) DecodeResponse() (*serializer.Response, error) { + if resp.Err != nil { + return nil, resp.Err + } + + respString, err := resp.GetResponse() + if err != nil { + return nil, err + } + + var res serializer.Response + err = json.Unmarshal([]byte(respString), &res) + if err != nil { + util.Log().Debug("Failed to parse response: %s", string(respString)) + return nil, err + } + return &res, nil +} + +// NopRSCloser 实现不完整seeker +type NopRSCloser struct { + body io.ReadCloser + status *rscStatus +} + +type rscStatus struct { + // http.ServeContent 会读取一小块以决定内容类型, + // 但是响应body无法实现seek,所以此项为真时第一个read会返回假数据 + IgnoreFirst bool + + Size int64 +} + +// GetRSCloser 返回带有空seeker的RSCloser,供http.ServeContent使用 +func (resp *Response) GetRSCloser() (*NopRSCloser, error) { + if resp.Err != nil { + return nil, resp.Err + } + + return &NopRSCloser{ + body: resp.Response.Body, + status: &rscStatus{ + Size: resp.Response.ContentLength, + }, + }, resp.Err +} + +// SetFirstFakeChunk 开启第一次read返回空数据 +// TODO 测试 +func (instance NopRSCloser) SetFirstFakeChunk() { + instance.status.IgnoreFirst = true +} + +// SetContentLength 设置数据流大小 +func (instance NopRSCloser) SetContentLength(size int64) { + instance.status.Size = size +} + +// Read 实现 NopRSCloser reader +func (instance NopRSCloser) Read(p []byte) (n int, err error) { + if instance.status.IgnoreFirst && len(p) == 512 { + return 0, io.EOF + } + return instance.body.Read(p) +} + +// Close 实现 NopRSCloser closer +func (instance NopRSCloser) Close() error { + return instance.body.Close() +} + +// Seek 实现 NopRSCloser seeker, 只实现seek开头/结尾以便http.ServeContent用于确定正文大小 +func (instance NopRSCloser) Seek(offset int64, whence int) (int64, error) { + // 进行第一次Seek操作后,取消忽略选项 + if instance.status.IgnoreFirst { + instance.status.IgnoreFirst = false + } + if offset == 0 { + switch whence { + case io.SeekStart: + return 0, nil + case io.SeekEnd: + return instance.status.Size, nil + } + } + return 0, errors.New("not implemented") + +} + +// BlackHole 将客户端发来的数据放入黑洞 +func BlackHole(r io.Reader) { + if !model.IsTrueVal(model.GetSettingByName("reset_after_upload_failed")) { + io.Copy(ioutil.Discard, r) + } +} diff --git a/pkg/request/tpslimiter.go b/pkg/request/tpslimiter.go new file mode 100644 index 0000000..edea0fa --- /dev/null +++ b/pkg/request/tpslimiter.go @@ -0,0 +1,39 @@ +package request + +import ( + "context" + "golang.org/x/time/rate" + "sync" +) + +var globalTPSLimiter = NewTPSLimiter() + +type TPSLimiter interface { + Limit(ctx context.Context, token string, tps float64, burst int) +} + +func NewTPSLimiter() TPSLimiter { + return &multipleBucketLimiter{ + buckets: make(map[string]*rate.Limiter), + } +} + +// multipleBucketLimiter implements TPSLimiter with multiple bucket support. +type multipleBucketLimiter struct { + mu sync.Mutex + buckets map[string]*rate.Limiter +} + +// Limit finds the given bucket, if bucket not exist or limit is changed, +// a new bucket will be generated. +func (m *multipleBucketLimiter) Limit(ctx context.Context, token string, tps float64, burst int) { + m.mu.Lock() + bucket, ok := m.buckets[token] + if !ok || float64(bucket.Limit()) != tps || bucket.Burst() != burst { + bucket = rate.NewLimiter(rate.Limit(tps), burst) + m.buckets[token] = bucket + } + m.mu.Unlock() + + bucket.Wait(ctx) +} diff --git a/pkg/serializer/aria2.go b/pkg/serializer/aria2.go new file mode 100644 index 0000000..890b2b9 --- /dev/null +++ b/pkg/serializer/aria2.go @@ -0,0 +1,117 @@ +package serializer + +import ( + "path" + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/aria2/rpc" +) + +// DownloadListResponse 下载列表响应条目 +type DownloadListResponse struct { + UpdateTime time.Time `json:"update"` + UpdateInterval int `json:"interval"` + Name string `json:"name"` + Status int `json:"status"` + Dst string `json:"dst"` + Total uint64 `json:"total"` + Downloaded uint64 `json:"downloaded"` + Speed int `json:"speed"` + Info rpc.StatusInfo `json:"info"` + NodeName string `json:"node"` +} + +// FinishedListResponse 已完成任务条目 +type FinishedListResponse struct { + Name string `json:"name"` + GID string `json:"gid"` + Status int `json:"status"` + Dst string `json:"dst"` + Error string `json:"error"` + Total uint64 `json:"total"` + Files []rpc.FileInfo `json:"files"` + TaskStatus int `json:"task_status"` + TaskError string `json:"task_error"` + CreateTime time.Time `json:"create"` + UpdateTime time.Time `json:"update"` + NodeName string `json:"node"` +} + +// BuildFinishedListResponse 构建已完成任务条目 +func BuildFinishedListResponse(tasks []model.Download) Response { + resp := make([]FinishedListResponse, 0, len(tasks)) + + for i := 0; i < len(tasks); i++ { + fileName := tasks[i].StatusInfo.BitTorrent.Info.Name + if len(tasks[i].StatusInfo.Files) == 1 { + fileName = path.Base(tasks[i].StatusInfo.Files[0].Path) + } + + // 过滤敏感信息 + for i2 := 0; i2 < len(tasks[i].StatusInfo.Files); i2++ { + tasks[i].StatusInfo.Files[i2].Path = path.Base(tasks[i].StatusInfo.Files[i2].Path) + } + + download := FinishedListResponse{ + Name: fileName, + GID: tasks[i].GID, + Status: tasks[i].Status, + Error: tasks[i].Error, + Dst: tasks[i].Dst, + Total: tasks[i].TotalSize, + Files: tasks[i].StatusInfo.Files, + TaskStatus: -1, + UpdateTime: tasks[i].UpdatedAt, + CreateTime: tasks[i].CreatedAt, + NodeName: tasks[i].NodeName, + } + + if tasks[i].Task != nil { + download.TaskError = tasks[i].Task.Error + download.TaskStatus = tasks[i].Task.Status + } + + resp = append(resp, download) + } + + return Response{Data: resp} +} + +// BuildDownloadingResponse 构建正在下载的列表响应 +func BuildDownloadingResponse(tasks []model.Download, intervals map[uint]int) Response { + resp := make([]DownloadListResponse, 0, len(tasks)) + + for i := 0; i < len(tasks); i++ { + fileName := "" + if len(tasks[i].StatusInfo.Files) > 0 { + fileName = path.Base(tasks[i].StatusInfo.Files[0].Path) + } + + // 过滤敏感信息 + tasks[i].StatusInfo.Dir = "" + for i2 := 0; i2 < len(tasks[i].StatusInfo.Files); i2++ { + tasks[i].StatusInfo.Files[i2].Path = path.Base(tasks[i].StatusInfo.Files[i2].Path) + } + + interval := 10 + if actualInterval, ok := intervals[tasks[i].ID]; ok { + interval = actualInterval + } + + resp = append(resp, DownloadListResponse{ + UpdateTime: tasks[i].UpdatedAt, + UpdateInterval: interval, + Name: fileName, + Status: tasks[i].Status, + Dst: tasks[i].Dst, + Total: tasks[i].TotalSize, + Downloaded: tasks[i].DownloadedSize, + Speed: tasks[i].Speed, + Info: tasks[i].StatusInfo, + NodeName: tasks[i].NodeName, + }) + } + + return Response{Data: resp} +} diff --git a/pkg/serializer/auth.go b/pkg/serializer/auth.go new file mode 100644 index 0000000..c8b348e --- /dev/null +++ b/pkg/serializer/auth.go @@ -0,0 +1,21 @@ +package serializer + +import "encoding/json" + +// RequestRawSign 待签名的HTTP请求 +type RequestRawSign struct { + Path string + Header string + Body string +} + +// NewRequestSignString 返回JSON格式的待签名字符串 +func NewRequestSignString(path, header, body string) string { + req := RequestRawSign{ + Path: path, + Header: header, + Body: body, + } + res, _ := json.Marshal(req) + return string(res) +} diff --git a/pkg/serializer/error.go b/pkg/serializer/error.go new file mode 100644 index 0000000..7ddbc59 --- /dev/null +++ b/pkg/serializer/error.go @@ -0,0 +1,262 @@ +package serializer + +import ( + "errors" + "github.com/gin-gonic/gin" +) + +// AppError 应用错误,实现了error接口 +type AppError struct { + Code int + Msg string + RawError error +} + +// NewError 返回新的错误对象 +func NewError(code int, msg string, err error) AppError { + return AppError{ + Code: code, + Msg: msg, + RawError: err, + } +} + +// NewErrorFromResponse 从 serializer.Response 构建错误 +func NewErrorFromResponse(resp *Response) AppError { + return AppError{ + Code: resp.Code, + Msg: resp.Msg, + RawError: errors.New(resp.Error), + } +} + +// WithError 将应用error携带标准库中的error +func (err *AppError) WithError(raw error) AppError { + err.RawError = raw + return *err +} + +// Error 返回业务代码确定的可读错误信息 +func (err AppError) Error() string { + return err.Msg +} + +// 三位数错误编码为复用http原本含义 +// 五位数错误编码为应用自定义错误 +// 五开头的五位数错误编码为服务器端错误,比如数据库操作失败 +// 四开头的五位数错误编码为客户端错误,有时候是客户端代码写错了,有时候是用户操作错误 +const ( + // CodeNotFullySuccess 未完全成功 + CodeNotFullySuccess = 203 + // CodeCheckLogin 未登录 + CodeCheckLogin = 401 + // CodeNoPermissionErr 未授权访问 + CodeNoPermissionErr = 403 + // CodeNotFound 资源未找到 + CodeNotFound = 404 + // CodeConflict 资源冲突 + CodeConflict = 409 + // CodeUploadFailed 上传出错 + CodeUploadFailed = 40002 + // CodeCreateFolderFailed 目录创建失败 + CodeCreateFolderFailed = 40003 + // CodeObjectExist 对象已存在 + CodeObjectExist = 40004 + // CodeSignExpired 签名过期 + CodeSignExpired = 40005 + // CodePolicyNotAllowed 当前存储策略不允许 + CodePolicyNotAllowed = 40006 + // CodeGroupNotAllowed 用户组无法进行此操作 + CodeGroupNotAllowed = 40007 + // CodeAdminRequired 非管理用户组 + CodeAdminRequired = 40008 + // CodeMasterNotFound 主机节点未注册 + CodeMasterNotFound = 40009 + // CodeUploadSessionExpired 上传会话已过期 + CodeUploadSessionExpired = 40011 + // CodeInvalidChunkIndex 无效的分片序号 + CodeInvalidChunkIndex = 40012 + // CodeInvalidContentLength 无效的正文长度 + CodeInvalidContentLength = 40013 + // CodePhoneRequired 未绑定手机 + CodePhoneRequired = 40010 + // CodeBatchSourceSize 超出批量获取外链限制 + CodeBatchSourceSize = 40014 + // CodeBatchAria2Size 超出最大 Aria2 任务数量限制 + CodeBatchAria2Size = 40015 + // CodeParentNotExist 父目录不存在 + CodeParentNotExist = 40016 + // CodeUserBaned 用户不活跃 + CodeUserBaned = 40017 + // CodeUserNotActivated 用户不活跃 + CodeUserNotActivated = 40018 + // CodeFeatureNotEnabled 此功能未开启 + CodeFeatureNotEnabled = 40019 + // CodeCredentialInvalid 凭证无效 + CodeCredentialInvalid = 40020 + // CodeUserNotFound 用户不存在 + CodeUserNotFound = 40021 + // Code2FACodeErr 二步验证代码错误 + Code2FACodeErr = 40022 + // CodeLoginSessionNotExist 登录会话不存在 + CodeLoginSessionNotExist = 40023 + // CodeInitializeAuthn 无法初始化 WebAuthn + CodeInitializeAuthn = 40024 + // CodeWebAuthnCredentialError WebAuthn 凭证无效 + CodeWebAuthnCredentialError = 40025 + // CodeCaptchaError 验证码错误 + CodeCaptchaError = 40026 + // CodeCaptchaRefreshNeeded 验证码需要刷新 + CodeCaptchaRefreshNeeded = 40027 + // CodeFailedSendEmail 邮件发送失败 + CodeFailedSendEmail = 40028 + // CodeInvalidTempLink 临时链接无效 + CodeInvalidTempLink = 40029 + // CodeTempLinkExpired 临时链接过期 + CodeTempLinkExpired = 40030 + // CodeEmailProviderBaned 邮箱后缀被禁用 + CodeEmailProviderBaned = 40031 + // CodeEmailExisted 邮箱已被使用 + CodeEmailExisted = 40032 + // CodeEmailSent 邮箱已重新发送 + CodeEmailSent = 40033 + // CodeUserCannotActivate 用户无法激活 + CodeUserCannotActivate = 40034 + // 存储策略不存在 + CodePolicyNotExist = 40035 + // 无法删除默认存储策略 + CodeDeleteDefaultPolicy = 40036 + // 存储策略下还有文件 + CodePolicyUsedByFiles = 40037 + // 存储策略绑定了用户组 + CodePolicyUsedByGroups = 40038 + // 用户组不存在 + CodeGroupNotFound = 40039 + // 对系统用户组执行非法操作 + CodeInvalidActionOnSystemGroup = 40040 + // 用户组正在被使用 + CodeGroupUsedByUser = 40041 + // 为初始用户更改用户组 + CodeChangeGroupForDefaultUser = 40042 + // 对系统用户执行非法操作 + CodeInvalidActionOnDefaultUser = 40043 + // 文件不存在 + CodeFileNotFound = 40044 + // 列取文件失败 + CodeListFilesError = 40045 + // 对系统节点进行非法操作 + CodeInvalidActionOnSystemNode = 40046 + // 创建文件系统出错 + CodeCreateFSError = 40047 + // 创建任务出错 + CodeCreateTaskError = 40048 + // 文件尺寸太大 + CodeFileTooLarge = 40049 + // 文件类型不允许 + CodeFileTypeNotAllowed = 40050 + // 用户容量不足 + CodeInsufficientCapacity = 40051 + // 对象名非法 + CodeIllegalObjectName = 40052 + // 不支持对根目录执行此操作 + CodeRootProtected = 40053 + // 当前目录下已经有同名文件正在上传中 + CodeConflictUploadOngoing = 40054 + // 文件信息不一致 + CodeMetaMismatch = 40055 + // 不支持该格式的压缩文件 + CodeUnsupportedArchiveType = 40056 + // 可用存储策略发生变化 + CodePolicyChanged = 40057 + // 分享链接无效 + CodeShareLinkNotFound = 40058 + // 不能转存自己的分享 + CodeSaveOwnShare = 40059 + // 从机无法向主机发送回调请求 + CodeSlavePingMaster = 40060 + // Cloudreve 版本不一致 + CodeVersionMismatch = 40061 + // 积分不足 + CodeInsufficientCredit = 40062 + // 用户组冲突 + CodeGroupConflict = 40063 + // 当前已处于此用户组中 + CodeGroupInvalid = 40064 + // 兑换码无效 + CodeInvalidGiftCode = 40065 + // 已绑定了QQ账号 + CodeQQBindConflict = 40066 + // QQ账号已被绑定其他账号 + CodeQQBindOtherAccount = 40067 + // QQ 未绑定对应账号 + CodeQQNotLinked = 40068 + // 密码不正确 + CodeIncorrectPassword = 40069 + // 分享无法预览 + CodeDisabledSharePreview = 40070 + // 签名无效 + CodeInvalidSign = 40071 + // 管理员无法购买用户组 + CodeFulfillAdminGroup = 40072 + // CodeDBError 数据库操作失败 + CodeDBError = 50001 + // CodeEncryptError 加密失败 + CodeEncryptError = 50002 + // CodeIOFailed IO操作失败 + CodeIOFailed = 50004 + // CodeInternalSetting 内部设置参数错误 + CodeInternalSetting = 50005 + // CodeCacheOperation 缓存操作失败 + CodeCacheOperation = 50006 + // CodeCallbackError 回调失败 + CodeCallbackError = 50007 + // 后台设置更新失败 + CodeUpdateSetting = 50008 + // 跨域策略添加失败 + CodeAddCORS = 50009 + // 节点不可用 + CodeNodeOffline = 50010 + // 文件元信息查询失败 + CodeQueryMetaFailed = 50011 + //CodeParamErr 各种奇奇怪怪的参数错误 + CodeParamErr = 40001 + // CodeNotSet 未定错误,后续尝试从error中获取 + CodeNotSet = -1 +) + +// DBErr 数据库操作失败 +func DBErr(msg string, err error) Response { + if msg == "" { + msg = "Database operation failed." + } + return Err(CodeDBError, msg, err) +} + +// ParamErr 各种参数错误 +func ParamErr(msg string, err error) Response { + if msg == "" { + msg = "Invalid parameters." + } + return Err(CodeParamErr, msg, err) +} + +// Err 通用错误处理 +func Err(errCode int, msg string, err error) Response { + // 底层错误是AppError,则尝试从AppError中获取详细信息 + var appError AppError + if errors.As(err, &appError) { + errCode = appError.Code + err = appError.RawError + msg = appError.Msg + } + + res := Response{ + Code: errCode, + Msg: msg, + } + // 生产环境隐藏底层报错 + if err != nil && gin.Mode() != gin.ReleaseMode { + res.Error = err.Error() + } + return res +} diff --git a/pkg/serializer/explorer.go b/pkg/serializer/explorer.go new file mode 100644 index 0000000..da3dc32 --- /dev/null +++ b/pkg/serializer/explorer.go @@ -0,0 +1,132 @@ +package serializer + +import ( + "encoding/gob" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/hashid" + "time" +) + +func init() { + gob.Register(ObjectProps{}) +} + +// ObjectProps 文件、目录对象的详细属性信息 +type ObjectProps struct { + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Policy string `json:"policy"` + Size uint64 `json:"size"` + ChildFolderNum int `json:"child_folder_num"` + ChildFileNum int `json:"child_file_num"` + Path string `json:"path"` + + QueryDate time.Time `json:"query_date"` +} + +// ObjectList 文件、目录列表 +type ObjectList struct { + Parent string `json:"parent,omitempty"` + Objects []Object `json:"objects"` + Policy *PolicySummary `json:"policy,omitempty"` +} + +// Object 文件或者目录 +type Object struct { + ID string `json:"id"` + Name string `json:"name"` + Path string `json:"path"` + Thumb bool `json:"thumb"` + Size uint64 `json:"size"` + Type string `json:"type"` + Date time.Time `json:"date"` + CreateDate time.Time `json:"create_date"` + Key string `json:"key,omitempty"` + SourceEnabled bool `json:"source_enabled"` +} + +// PolicySummary 用于前端组件使用的存储策略概况 +type PolicySummary struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + MaxSize uint64 `json:"max_size"` + FileType []string `json:"file_type"` +} + +// BuildObjectList 构建列目录响应 +func BuildObjectList(parent uint, objects []Object, policy *model.Policy) ObjectList { + res := ObjectList{ + Objects: objects, + } + + if parent > 0 { + res.Parent = hashid.HashID(parent, hashid.FolderID) + } + + if policy != nil { + res.Policy = &PolicySummary{ + ID: hashid.HashID(policy.ID, hashid.PolicyID), + Name: policy.Name, + Type: policy.Type, + MaxSize: policy.MaxSize, + FileType: policy.OptionsSerialized.FileType, + } + } + + return res +} + +// Sources 获取外链的结果响应 +type Sources struct { + URL string `json:"url"` + Name string `json:"name"` + Parent uint `json:"parent"` + Error string `json:"error,omitempty"` +} + +// DocPreviewSession 文档预览会话响应 +type DocPreviewSession struct { + URL string `json:"url"` + AccessToken string `json:"access_token,omitempty"` + AccessTokenTTL int64 `json:"access_token_ttl,omitempty"` +} + +// WopiFileInfo Response for `CheckFileInfo` +type WopiFileInfo struct { + // Required + BaseFileName string + Version string + Size int64 + + // Breadcrumb + BreadcrumbBrandName string + BreadcrumbBrandUrl string + BreadcrumbFolderName string + BreadcrumbFolderUrl string + + // Post Message + FileSharingPostMessage bool + ClosePostMessage bool + PostMessageOrigin string + + // Other miscellaneous properties + FileNameMaxLength int + LastModifiedTime string + + // User metadata + IsAnonymousUser bool + UserFriendlyName string + UserId string + OwnerId string + + // Permission + ReadOnly bool + UserCanRename bool + UserCanReview bool + UserCanWrite bool + + SupportsRename bool + SupportsReviewing bool + SupportsUpdate bool +} diff --git a/pkg/serializer/response.go b/pkg/serializer/response.go new file mode 100644 index 0000000..ecfaec2 --- /dev/null +++ b/pkg/serializer/response.go @@ -0,0 +1,35 @@ +package serializer + +import ( + "bytes" + "encoding/base64" + "encoding/gob" +) + +// Response 基础序列化器 +type Response struct { + Code int `json:"code"` + Data interface{} `json:"data,omitempty"` + Msg string `json:"msg"` + Error string `json:"error,omitempty"` +} + +// NewResponseWithGobData 返回Data字段使用gob编码的Response +func NewResponseWithGobData(data interface{}) Response { + var w bytes.Buffer + encoder := gob.NewEncoder(&w) + if err := encoder.Encode(data); err != nil { + return Err(CodeInternalSetting, "Failed to encode response content", err) + } + + return Response{Data: w.Bytes()} +} + +// GobDecode 将 Response 正文解码至目标指针 +func (r *Response) GobDecode(target interface{}) { + src := r.Data.(string) + raw := make([]byte, len(src)*len(src)/base64.StdEncoding.DecodedLen(len(src))) + base64.StdEncoding.Decode(raw, []byte(src)) + decoder := gob.NewDecoder(bytes.NewBuffer(raw)) + decoder.Decode(target) +} diff --git a/pkg/serializer/setting.go b/pkg/serializer/setting.go new file mode 100644 index 0000000..df6fc2c --- /dev/null +++ b/pkg/serializer/setting.go @@ -0,0 +1,113 @@ +package serializer + +import ( + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" +) + +// SiteConfig 站点全局设置序列 +type SiteConfig struct { + SiteName string `json:"title"` + LoginCaptcha bool `json:"loginCaptcha"` + RegCaptcha bool `json:"regCaptcha"` + ForgetCaptcha bool `json:"forgetCaptcha"` + EmailActive bool `json:"emailActive"` + QQLogin bool `json:"QQLogin"` + Themes string `json:"themes"` + DefaultTheme string `json:"defaultTheme"` + ScoreEnabled bool `json:"score_enabled"` + ShareScoreRate string `json:"share_score_rate"` + HomepageViewMethod string `json:"home_view_method"` + ShareViewMethod string `json:"share_view_method"` + Authn bool `json:"authn"` + User User `json:"user"` + ReCaptchaKey string `json:"captcha_ReCaptchaKey"` + SiteNotice string `json:"site_notice"` + CaptchaType string `json:"captcha_type"` + TCaptchaCaptchaAppId string `json:"tcaptcha_captcha_app_id"` + RegisterEnabled bool `json:"registerEnabled"` + ReportEnabled bool `json:"report_enabled"` + AppPromotion bool `json:"app_promotion"` + WopiExts []string `json:"wopi_exts"` + AppFeedbackLink string `json:"app_feedback"` + AppForumLink string `json:"app_forum"` +} + +type task struct { + Status int `json:"status"` + Type int `json:"type"` + CreateDate time.Time `json:"create_date"` + Progress int `json:"progress"` + Error string `json:"error"` +} + +// BuildTaskList 构建任务列表响应 +func BuildTaskList(tasks []model.Task, total int) Response { + res := make([]task, 0, len(tasks)) + for _, t := range tasks { + res = append(res, task{ + Status: t.Status, + Type: t.Type, + CreateDate: t.CreatedAt, + Progress: t.Progress, + Error: t.Error, + }) + } + + return Response{Data: map[string]interface{}{ + "total": total, + "tasks": res, + }} +} + +func checkSettingValue(setting map[string]string, key string) string { + if v, ok := setting[key]; ok { + return v + } + return "" +} + +// BuildSiteConfig 站点全局设置 +func BuildSiteConfig(settings map[string]string, user *model.User, wopiExts []string) Response { + var userRes User + if user != nil { + userRes = BuildUser(*user) + } else { + userRes = BuildUser(*model.NewAnonymousUser()) + } + res := Response{ + Data: SiteConfig{ + SiteName: checkSettingValue(settings, "siteName"), + LoginCaptcha: model.IsTrueVal(checkSettingValue(settings, "login_captcha")), + RegCaptcha: model.IsTrueVal(checkSettingValue(settings, "reg_captcha")), + ForgetCaptcha: model.IsTrueVal(checkSettingValue(settings, "forget_captcha")), + EmailActive: model.IsTrueVal(checkSettingValue(settings, "email_active")), + QQLogin: model.IsTrueVal(checkSettingValue(settings, "qq_login")), + Themes: checkSettingValue(settings, "themes"), + DefaultTheme: checkSettingValue(settings, "defaultTheme"), + ScoreEnabled: model.IsTrueVal(checkSettingValue(settings, "score_enabled")), + ShareScoreRate: checkSettingValue(settings, "share_score_rate"), + HomepageViewMethod: checkSettingValue(settings, "home_view_method"), + ShareViewMethod: checkSettingValue(settings, "share_view_method"), + Authn: model.IsTrueVal(checkSettingValue(settings, "authn_enabled")), + User: userRes, + SiteNotice: checkSettingValue(settings, "siteNotice"), + ReCaptchaKey: checkSettingValue(settings, "captcha_ReCaptchaKey"), + CaptchaType: checkSettingValue(settings, "captcha_type"), + TCaptchaCaptchaAppId: checkSettingValue(settings, "captcha_TCaptcha_CaptchaAppId"), + RegisterEnabled: model.IsTrueVal(checkSettingValue(settings, "register_enabled")), + ReportEnabled: model.IsTrueVal(checkSettingValue(settings, "report_enabled")), + AppPromotion: model.IsTrueVal(checkSettingValue(settings, "show_app_promotion")), + AppFeedbackLink: checkSettingValue(settings, "app_feedback_link"), + AppForumLink: checkSettingValue(settings, "app_forum_link"), + WopiExts: wopiExts, + }} + return res +} + +// VolResponse VOL query response +type VolResponse struct { + Signature string `json:"signature"` + Content string `json:"content"` +} diff --git a/pkg/serializer/share.go b/pkg/serializer/share.go new file mode 100644 index 0000000..ade604d --- /dev/null +++ b/pkg/serializer/share.go @@ -0,0 +1,139 @@ +package serializer + +import ( + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/hashid" +) + +// Share 分享信息序列化 +type Share struct { + Key string `json:"key"` + Locked bool `json:"locked"` + IsDir bool `json:"is_dir"` + Score int `json:"score"` + CreateDate time.Time `json:"create_date,omitempty"` + Downloads int `json:"downloads"` + Views int `json:"views"` + Expire int64 `json:"expire"` + Preview bool `json:"preview"` + Creator *shareCreator `json:"creator,omitempty"` + Source *shareSource `json:"source,omitempty"` +} + +type shareCreator struct { + Key string `json:"key"` + Nick string `json:"nick"` + GroupName string `json:"group_name"` +} + +type shareSource struct { + Name string `json:"name"` + Size uint64 `json:"size"` +} + +// myShareItem 我的分享列表条目 +type myShareItem struct { + Key string `json:"key"` + IsDir bool `json:"is_dir"` + Score int `json:"score"` + Password string `json:"password"` + CreateDate time.Time `json:"create_date,omitempty"` + Downloads int `json:"downloads"` + RemainDownloads int `json:"remain_downloads"` + Views int `json:"views"` + Expire int64 `json:"expire"` + Preview bool `json:"preview"` + Source *shareSource `json:"source,omitempty"` +} + +// BuildShareList 构建我的分享列表响应 +func BuildShareList(shares []model.Share, total int) Response { + res := make([]myShareItem, 0, total) + now := time.Now().Unix() + for i := 0; i < len(shares); i++ { + item := myShareItem{ + Key: hashid.HashID(shares[i].ID, hashid.ShareID), + IsDir: shares[i].IsDir, + Score: shares[i].Score, + Password: shares[i].Password, + CreateDate: shares[i].CreatedAt, + Downloads: shares[i].Downloads, + Views: shares[i].Views, + Preview: shares[i].PreviewEnabled, + Expire: -1, + RemainDownloads: shares[i].RemainDownloads, + } + if shares[i].Expires != nil { + item.Expire = shares[i].Expires.Unix() - now + if item.Expire == 0 { + item.Expire = 0 + } + } + if shares[i].File.ID != 0 { + item.Source = &shareSource{ + Name: shares[i].File.Name, + Size: shares[i].File.Size, + } + } else if shares[i].Folder.ID != 0 { + item.Source = &shareSource{ + Name: shares[i].Folder.Name, + } + } + + res = append(res, item) + } + + return Response{Data: map[string]interface{}{ + "total": total, + "items": res, + }} +} + +// BuildShareResponse 构建获取分享信息响应 +func BuildShareResponse(share *model.Share, unlocked bool) Share { + creator := share.Creator() + resp := Share{ + Key: hashid.HashID(share.ID, hashid.ShareID), + Locked: !unlocked, + Creator: &shareCreator{ + Key: hashid.HashID(creator.ID, hashid.UserID), + Nick: creator.Nick, + GroupName: creator.Group.Name, + }, + Score: share.Score, + CreateDate: share.CreatedAt, + } + + // 未解锁时只返回基本信息 + if !unlocked { + return resp + } + + resp.IsDir = share.IsDir + resp.Downloads = share.Downloads + resp.Views = share.Views + resp.Preview = share.PreviewEnabled + + if share.Expires != nil { + resp.Expire = share.Expires.Unix() - time.Now().Unix() + } + + if share.IsDir { + source := share.SourceFolder() + resp.Source = &shareSource{ + Name: source.Name, + Size: 0, + } + } else { + source := share.SourceFile() + resp.Source = &shareSource{ + Name: source.Name, + Size: source.Size, + } + } + + return resp + +} diff --git a/pkg/serializer/slave.go b/pkg/serializer/slave.go new file mode 100644 index 0000000..04d56d3 --- /dev/null +++ b/pkg/serializer/slave.go @@ -0,0 +1,68 @@ +package serializer + +import ( + "crypto/sha1" + "encoding/gob" + "fmt" + + model "github.com/cloudreve/Cloudreve/v3/models" +) + +// RemoteDeleteRequest 远程策略删除接口请求正文 +type RemoteDeleteRequest struct { + Files []string `json:"files"` +} + +// ListRequest 远程策略列文件请求正文 +type ListRequest struct { + Path string `json:"path"` + Recursive bool `json:"recursive"` +} + +// NodePingReq 从机节点Ping请求 +type NodePingReq struct { + SiteURL string `json:"site_url"` + SiteID string `json:"site_id"` + IsUpdate bool `json:"is_update"` + CredentialTTL int `json:"credential_ttl"` + Node *model.Node `json:"node"` +} + +// NodePingResp 从机节点Ping响应 +type NodePingResp struct { +} + +// SlaveAria2Call 从机有关Aria2的请求正文 +type SlaveAria2Call struct { + Task *model.Download `json:"task"` + GroupOptions map[string]interface{} `json:"group_options"` + Files []int `json:"files"` +} + +// SlaveTransferReq 从机中转任务创建请求 +type SlaveTransferReq struct { + Src string `json:"src"` + Dst string `json:"dst"` + Policy *model.Policy `json:"policy"` +} + +// Hash 返回创建请求的唯一标识,保持创建请求幂等 +func (s *SlaveTransferReq) Hash(id string) string { + h := sha1.New() + h.Write([]byte(fmt.Sprintf("transfer-%s-%s-%s-%d", id, s.Src, s.Dst, s.Policy.ID))) + bs := h.Sum(nil) + return fmt.Sprintf("%x", bs) +} + +const ( + SlaveTransferSuccess = "success" + SlaveTransferFailed = "failed" +) + +type SlaveTransferResult struct { + Error string +} + +func init() { + gob.Register(SlaveTransferResult{}) +} diff --git a/pkg/serializer/upload.go b/pkg/serializer/upload.go new file mode 100644 index 0000000..4e7150b --- /dev/null +++ b/pkg/serializer/upload.go @@ -0,0 +1,64 @@ +package serializer + +import ( + "encoding/gob" + model "github.com/cloudreve/Cloudreve/v3/models" + "time" +) + +// UploadPolicy slave模式下传递的上传策略 +type UploadPolicy struct { + SavePath string `json:"save_path"` + FileName string `json:"file_name"` + AutoRename bool `json:"auto_rename"` + MaxSize uint64 `json:"max_size"` + AllowedExtension []string `json:"allowed_extension"` + CallbackURL string `json:"callback_url"` +} + +// UploadCredential 返回给客户端的上传凭证 +type UploadCredential struct { + SessionID string `json:"sessionID"` + ChunkSize uint64 `json:"chunkSize"` // 分块大小,0 为部分快 + Expires int64 `json:"expires"` // 上传凭证过期时间, Unix 时间戳 + UploadURLs []string `json:"uploadURLs,omitempty"` + Credential string `json:"credential,omitempty"` + UploadID string `json:"uploadID,omitempty"` + Callback string `json:"callback,omitempty"` // 回调地址 + Path string `json:"path,omitempty"` // 存储路径 + AccessKey string `json:"ak,omitempty"` + KeyTime string `json:"keyTime,omitempty"` // COS用有效期 + Policy string `json:"policy,omitempty"` + CompleteURL string `json:"completeURL,omitempty"` +} + +// UploadSession 上传会话 +type UploadSession struct { + Key string // 上传会话 GUID + UID uint // 发起者 + VirtualPath string // 用户文件路径,不含文件名 + Name string // 文件名 + Size uint64 // 文件大小 + SavePath string // 物理存储路径,包含物理文件名 + LastModified *time.Time // 可选的文件最后修改日期 + Policy model.Policy + Callback string // 回调 URL 地址 + CallbackSecret string // 回调 URL + UploadURL string + UploadID string + Credential string +} + +// UploadCallback 上传回调正文 +type UploadCallback struct { + PicInfo string `json:"pic_info"` +} + +// GeneralUploadCallbackFailed 存储策略上传回调失败响应 +type GeneralUploadCallbackFailed struct { + Error string `json:"error"` +} + +func init() { + gob.Register(UploadSession{}) +} diff --git a/pkg/serializer/user.go b/pkg/serializer/user.go new file mode 100644 index 0000000..e5f67d8 --- /dev/null +++ b/pkg/serializer/user.go @@ -0,0 +1,172 @@ +package serializer + +import ( + "fmt" + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/hashid" + "github.com/duo-labs/webauthn/webauthn" +) + +// CheckLogin 检查登录 +func CheckLogin() Response { + return Response{ + Code: CodeCheckLogin, + Msg: "Login required", + } +} + +// PhoneRequired 需要绑定手机 +func PhoneRequired() Response { + return Response{ + Code: CodePhoneRequired, + Msg: "此功能需要绑定手机后使用", + } +} + +// User 用户序列化器 +type User struct { + ID string `json:"id"` + Email string `json:"user_name"` + Nickname string `json:"nickname"` + Status int `json:"status"` + Avatar string `json:"avatar"` + CreatedAt time.Time `json:"created_at"` + PreferredTheme string `json:"preferred_theme"` + Score int `json:"score"` + Anonymous bool `json:"anonymous"` + Group group `json:"group"` + Tags []tag `json:"tags"` +} + +type group struct { + ID uint `json:"id"` + Name string `json:"name"` + AllowShare bool `json:"allowShare"` + AllowRemoteDownload bool `json:"allowRemoteDownload"` + AllowArchiveDownload bool `json:"allowArchiveDownload"` + ShareFreeEnabled bool `json:"shareFree"` + ShareDownload bool `json:"shareDownload"` + CompressEnabled bool `json:"compress"` + WebDAVEnabled bool `json:"webdav"` + RelocateEnabled bool `json:"relocate"` + SourceBatchSize int `json:"sourceBatch"` + SelectNode bool `json:"selectNode"` + AdvanceDelete bool `json:"advanceDelete"` + AllowWebDAVProxy bool `json:"allowWebDAVProxy"` +} + +type tag struct { + ID string `json:"id"` + Name string `json:"name"` + Icon string `json:"icon"` + Color string `json:"color"` + Type int `json:"type"` + Expression string `json:"expression"` +} + +type storage struct { + Used uint64 `json:"used"` + Free uint64 `json:"free"` + Total uint64 `json:"total"` +} + +// WebAuthnCredentials 外部验证器凭证 +type WebAuthnCredentials struct { + ID []byte `json:"id"` + FingerPrint string `json:"fingerprint"` +} + +// BuildWebAuthnList 构建设置页面凭证列表 +func BuildWebAuthnList(credentials []webauthn.Credential) []WebAuthnCredentials { + res := make([]WebAuthnCredentials, 0, len(credentials)) + for _, v := range credentials { + credential := WebAuthnCredentials{ + ID: v.ID, + FingerPrint: fmt.Sprintf("% X", v.Authenticator.AAGUID), + } + res = append(res, credential) + } + + return res +} + +// BuildUser 序列化用户 +func BuildUser(user model.User) User { + tags, _ := model.GetTagsByUID(user.ID) + return User{ + ID: hashid.HashID(user.ID, hashid.UserID), + Email: user.Email, + Nickname: user.Nick, + Status: user.Status, + Avatar: user.Avatar, + CreatedAt: user.CreatedAt, + PreferredTheme: user.OptionsSerialized.PreferredTheme, + Score: user.Score, + Anonymous: user.IsAnonymous(), + Group: group{ + ID: user.GroupID, + Name: user.Group.Name, + AllowShare: user.Group.ShareEnabled, + AllowRemoteDownload: user.Group.OptionsSerialized.Aria2, + AllowArchiveDownload: user.Group.OptionsSerialized.ArchiveDownload, + ShareFreeEnabled: user.Group.OptionsSerialized.ShareFree, + ShareDownload: user.Group.OptionsSerialized.ShareDownload, + CompressEnabled: user.Group.OptionsSerialized.ArchiveTask, + WebDAVEnabled: user.Group.WebDAVEnabled, + AllowWebDAVProxy: user.Group.OptionsSerialized.WebDAVProxy, + RelocateEnabled: user.Group.OptionsSerialized.Relocate, + SourceBatchSize: user.Group.OptionsSerialized.SourceBatchSize, + SelectNode: user.Group.OptionsSerialized.SelectNode, + AdvanceDelete: user.Group.OptionsSerialized.AdvanceDelete, + }, + Tags: buildTagRes(tags), + } +} + +// BuildUserResponse 序列化用户响应 +func BuildUserResponse(user model.User) Response { + return Response{ + Data: BuildUser(user), + } +} + +// BuildUserStorageResponse 序列化用户存储概况响应 +func BuildUserStorageResponse(user model.User) Response { + total := user.Group.MaxStorage + user.GetAvailablePackSize() + storageResp := storage{ + Used: user.Storage, + Free: total - user.Storage, + Total: total, + } + + if total < user.Storage { + storageResp.Free = 0 + } + + return Response{ + Data: storageResp, + } +} + +// buildTagRes 构建标签列表 +func buildTagRes(tags []model.Tag) []tag { + res := make([]tag, 0, len(tags)) + for i := 0; i < len(tags); i++ { + newTag := tag{ + ID: hashid.HashID(tags[i].ID, hashid.TagID), + Name: tags[i].Name, + Icon: tags[i].Icon, + Color: tags[i].Color, + Type: tags[i].Type, + } + if newTag.Type != 0 { + newTag.Expression = tags[i].Expression + + } + res = append(res, newTag) + } + + return res +} diff --git a/pkg/serializer/vas.go b/pkg/serializer/vas.go new file mode 100644 index 0000000..2da604a --- /dev/null +++ b/pkg/serializer/vas.go @@ -0,0 +1,158 @@ +package serializer + +import ( + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/hashid" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "time" +) + +type quota struct { + Base uint64 `json:"base"` + Pack uint64 `json:"pack"` + Used uint64 `json:"used"` + Total uint64 `json:"total"` + Packs []storagePacks `json:"packs"` +} + +type storagePacks struct { + Name string `json:"name"` + Size uint64 `json:"size"` + ActivateDate time.Time `json:"activate_date"` + Expiration int `json:"expiration"` + ExpirationDate time.Time `json:"expiration_date"` +} + +// MountedFolders 已挂载的目录 +type MountedFolders struct { + ID string `json:"id"` + Name string `json:"name"` + PolicyName string `json:"policy_name"` +} + +type policyOptions struct { + Name string `json:"name"` + ID string `json:"id"` +} + +type nodeOptions struct { + Name string `json:"name"` + ID uint `json:"id"` +} + +// BuildPolicySettingRes 构建存储策略选项选择 +func BuildPolicySettingRes(policies []model.Policy) Response { + options := make([]policyOptions, 0, len(policies)) + for _, policy := range policies { + options = append(options, policyOptions{ + Name: policy.Name, + ID: hashid.HashID(policy.ID, hashid.PolicyID), + }) + } + + return Response{ + Data: options, + } +} + +// BuildMountedFolderRes 构建已挂载目录响应,list为当前用户可用存储策略ID +func BuildMountedFolderRes(folders []model.Folder, list []uint) []MountedFolders { + res := make([]MountedFolders, 0, len(folders)) + for _, folder := range folders { + single := MountedFolders{ + ID: hashid.HashID(folder.ID, hashid.FolderID), + Name: folder.Name, + PolicyName: "[Invalid Policy]", + } + if policy, err := model.GetPolicyByID(folder.PolicyID); err == nil && util.ContainsUint(list, policy.ID) { + single.PolicyName = policy.Name + } + + res = append(res, single) + } + + return res +} + +// BuildUserQuotaResponse 序列化用户存储配额概况响应 +func BuildUserQuotaResponse(user *model.User, packs []model.StoragePack) Response { + packSize := user.GetAvailablePackSize() + res := quota{ + Base: user.Group.MaxStorage, + Pack: packSize, + Used: user.Storage, + Total: packSize + user.Group.MaxStorage, + Packs: make([]storagePacks, 0, len(packs)), + } + for _, pack := range packs { + res.Packs = append(res.Packs, storagePacks{ + Name: pack.Name, + Size: pack.Size, + ActivateDate: *pack.ActiveTime, + Expiration: int(pack.ExpiredTime.Sub(*pack.ActiveTime).Seconds()), + ExpirationDate: *pack.ExpiredTime, + }) + } + + return Response{ + Data: res, + } +} + +// PackProduct 容量包商品 +type PackProduct struct { + ID int64 `json:"id"` + Name string `json:"name"` + Size uint64 `json:"size"` + Time int64 `json:"time"` + Price int `json:"price"` + Score int `json:"score"` +} + +// GroupProducts 用户组商品 +type GroupProducts struct { + ID int64 `json:"id"` + Name string `json:"name"` + GroupID uint `json:"group_id"` + Time int64 `json:"time"` + Price int `json:"price"` + Score int `json:"score"` + Des []string `json:"des"` + Highlight bool `json:"highlight"` +} + +// BuildProductResponse 构建增值服务商品响应 +func BuildProductResponse(groups []GroupProducts, packs []PackProduct, + wechat, alipay, payjs, custom bool, customName string, scorePrice int) Response { + // 隐藏响应中的用户组ID + for i := 0; i < len(groups); i++ { + groups[i].GroupID = 0 + } + return Response{ + Data: map[string]interface{}{ + "packs": packs, + "groups": groups, + "alipay": alipay, + "wechat": wechat, + "payjs": payjs, + "custom": custom, + "custom_name": customName, + "score_price": scorePrice, + }, + } +} + +// BuildNodeOptionRes 构建可用节点列表响应 +func BuildNodeOptionRes(nodes []*model.Node) Response { + options := make([]nodeOptions, 0, len(nodes)) + for _, node := range nodes { + options = append(options, nodeOptions{ + Name: node.Name, + ID: node.ID, + }) + } + + return Response{ + Data: options, + } +} diff --git a/pkg/sessionstore/kv.go b/pkg/sessionstore/kv.go new file mode 100644 index 0000000..193d5c6 --- /dev/null +++ b/pkg/sessionstore/kv.go @@ -0,0 +1,136 @@ +package sessionstore + +import ( + "bytes" + "encoding/base32" + "encoding/gob" + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/gorilla/securecookie" + "github.com/gorilla/sessions" + "net/http" + "strings" +) + +type kvStore struct { + Codecs []securecookie.Codec + Options *sessions.Options + DefaultMaxAge int + + prefix string + serializer SessionSerializer + store cache.Driver +} + +func newKvStore(prefix string, store cache.Driver, keyPairs ...[]byte) *kvStore { + return &kvStore{ + prefix: prefix, + store: store, + DefaultMaxAge: 60 * 20, + serializer: GobSerializer{}, + Codecs: securecookie.CodecsFromPairs(keyPairs...), + Options: &sessions.Options{ + Path: "/", + MaxAge: 86400 * 30, + }, + } +} + +// Get returns a session for the given name after adding it to the registry. +// +// It returns a new session if the sessions doesn't exist. Access IsNew on +// the session to check if it is an existing session or a new one. +// +// It returns a new session and an error if the session exists but could +// not be decoded. +func (s *kvStore) Get(r *http.Request, name string) (*sessions.Session, error) { + return sessions.GetRegistry(r).Get(s, name) +} + +// New returns a session for the given name without adding it to the registry. +// +// The difference between New() and Get() is that calling New() twice will +// decode the session data twice, while Get() registers and reuses the same +// decoded session after the first call. +func (s *kvStore) New(r *http.Request, name string) (*sessions.Session, error) { + var ( + err error + ) + session := sessions.NewSession(s, name) + // make a copy + options := *s.Options + session.Options = &options + session.IsNew = true + if c, errCookie := r.Cookie(name); errCookie == nil { + err = securecookie.DecodeMulti(name, c.Value, &session.ID, s.Codecs...) + if err == nil { + res, ok := s.store.Get(s.prefix + session.ID) + if ok { + err = s.serializer.Deserialize(res.([]byte), session) + } + + session.IsNew = !(err == nil && ok) // not new if no error and data available + } + } + return session, err +} +func (s *kvStore) Save(r *http.Request, w http.ResponseWriter, session *sessions.Session) error { + // Marked for deletion. + if session.Options.MaxAge <= 0 { + if err := s.store.Delete([]string{session.ID}, s.prefix); err != nil { + return err + } + http.SetCookie(w, sessions.NewCookie(session.Name(), "", session.Options)) + } else { + // Build an alphanumeric key for the redis store. + if session.ID == "" { + session.ID = strings.TrimRight(base32.StdEncoding.EncodeToString(securecookie.GenerateRandomKey(32)), "=") + } + + b, err := s.serializer.Serialize(session) + if err != nil { + return err + } + + age := session.Options.MaxAge + if age == 0 { + age = s.DefaultMaxAge + } + + if err := s.store.Set(s.prefix+session.ID, b, age); err != nil { + return err + } + + encoded, err := securecookie.EncodeMulti(session.Name(), session.ID, s.Codecs...) + if err != nil { + return err + } + http.SetCookie(w, sessions.NewCookie(session.Name(), encoded, session.Options)) + } + return nil +} + +// SessionSerializer provides an interface hook for alternative serializers +type SessionSerializer interface { + Deserialize(d []byte, ss *sessions.Session) error + Serialize(ss *sessions.Session) ([]byte, error) +} + +// GobSerializer uses gob package to encode the session map +type GobSerializer struct{} + +// Serialize using gob +func (s GobSerializer) Serialize(ss *sessions.Session) ([]byte, error) { + buf := new(bytes.Buffer) + enc := gob.NewEncoder(buf) + err := enc.Encode(ss.Values) + if err == nil { + return buf.Bytes(), nil + } + return nil, err +} + +// Deserialize back to map[interface{}]interface{} +func (s GobSerializer) Deserialize(d []byte, ss *sessions.Session) error { + dec := gob.NewDecoder(bytes.NewBuffer(d)) + return dec.Decode(&ss.Values) +} diff --git a/pkg/sessionstore/sessionstore.go b/pkg/sessionstore/sessionstore.go new file mode 100644 index 0000000..3b1c302 --- /dev/null +++ b/pkg/sessionstore/sessionstore.go @@ -0,0 +1,22 @@ +package sessionstore + +import ( + "github.com/cloudreve/Cloudreve/v3/pkg/cache" + "github.com/gin-contrib/sessions" +) + +type Store interface { + sessions.Store +} + +func NewStore(driver cache.Driver, keyPairs ...[]byte) Store { + return &store{newKvStore("cd_session_", driver, keyPairs...)} +} + +type store struct { + *kvStore +} + +func (c *store) Options(options sessions.Options) { + c.kvStore.Options = options.ToGorillaOptions() +} diff --git a/pkg/task/compress.go b/pkg/task/compress.go new file mode 100644 index 0000000..7f5025f --- /dev/null +++ b/pkg/task/compress.go @@ -0,0 +1,175 @@ +package task + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +// CompressTask 文件压缩任务 +type CompressTask struct { + User *model.User + TaskModel *model.Task + TaskProps CompressProps + Err *JobError + + zipPath string +} + +// CompressProps 压缩任务属性 +type CompressProps struct { + Dirs []uint `json:"dirs"` + Files []uint `json:"files"` + Dst string `json:"dst"` +} + +// Props 获取任务属性 +func (job *CompressTask) Props() string { + res, _ := json.Marshal(job.TaskProps) + return string(res) +} + +// Type 获取任务状态 +func (job *CompressTask) Type() int { + return CompressTaskType +} + +// Creator 获取创建者ID +func (job *CompressTask) Creator() uint { + return job.User.ID +} + +// Model 获取任务的数据库模型 +func (job *CompressTask) Model() *model.Task { + return job.TaskModel +} + +// SetStatus 设定状态 +func (job *CompressTask) SetStatus(status int) { + job.TaskModel.SetStatus(status) +} + +// SetError 设定任务失败信息 +func (job *CompressTask) SetError(err *JobError) { + job.Err = err + res, _ := json.Marshal(job.Err) + job.TaskModel.SetError(string(res)) + + // 删除压缩文件 + job.removeZipFile() +} + +func (job *CompressTask) removeZipFile() { + if job.zipPath != "" { + if err := os.Remove(job.zipPath); err != nil { + util.Log().Warning("Failed to delete temp zip file %q: %s", job.zipPath, err) + } + } +} + +// SetErrorMsg 设定任务失败信息 +func (job *CompressTask) SetErrorMsg(msg string) { + job.SetError(&JobError{Msg: msg}) +} + +// GetError 返回任务失败信息 +func (job *CompressTask) GetError() *JobError { + return job.Err +} + +// Do 开始执行任务 +func (job *CompressTask) Do() { + // 创建文件系统 + fs, err := filesystem.NewFileSystem(job.User) + if err != nil { + job.SetErrorMsg(err.Error()) + return + } + + util.Log().Debug("Starting compress file...") + job.TaskModel.SetProgress(CompressingProgress) + + // 创建临时压缩文件 + saveFolder := "compress" + zipFilePath := filepath.Join( + util.RelativePath(model.GetSettingByName("temp_path")), + saveFolder, + fmt.Sprintf("archive_%d.zip", time.Now().UnixNano()), + ) + zipFile, err := util.CreatNestedFile(zipFilePath) + if err != nil { + util.Log().Warning("%s", err) + job.SetErrorMsg(err.Error()) + return + } + + defer zipFile.Close() + + // 开始压缩 + ctx := context.Background() + err = fs.Compress(ctx, zipFile, job.TaskProps.Dirs, job.TaskProps.Files, false) + if err != nil { + job.SetErrorMsg(err.Error()) + return + } + + job.zipPath = zipFilePath + zipFile.Close() + util.Log().Debug("Compressed file saved to %q, start uploading it...", zipFile) + job.TaskModel.SetProgress(TransferringProgress) + + // 上传文件 + err = fs.UploadFromPath(ctx, zipFilePath, job.TaskProps.Dst, 0) + if err != nil { + job.SetErrorMsg(err.Error()) + return + } + + job.removeZipFile() +} + +// NewCompressTask 新建压缩任务 +func NewCompressTask(user *model.User, dst string, dirs, files []uint) (Job, error) { + newTask := &CompressTask{ + User: user, + TaskProps: CompressProps{ + Dirs: dirs, + Files: files, + Dst: dst, + }, + } + + record, err := Record(newTask) + if err != nil { + return nil, err + } + newTask.TaskModel = record + + return newTask, nil +} + +// NewRelocateTaskFromModel 从数据库记录中恢复迁移任务 +func NewCompressTaskFromModel(task *model.Task) (Job, error) { + user, err := model.GetActiveUserByID(task.UserID) + if err != nil { + return nil, err + } + newTask := &CompressTask{ + User: &user, + TaskModel: task, + } + + err = json.Unmarshal([]byte(task.Props), &newTask.TaskProps) + if err != nil { + return nil, err + } + + return newTask, nil +} diff --git a/pkg/task/decompress.go b/pkg/task/decompress.go new file mode 100644 index 0000000..9c6d88e --- /dev/null +++ b/pkg/task/decompress.go @@ -0,0 +1,131 @@ +package task + +import ( + "context" + "encoding/json" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem" +) + +// DecompressTask 文件压缩任务 +type DecompressTask struct { + User *model.User + TaskModel *model.Task + TaskProps DecompressProps + Err *JobError + + zipPath string +} + +// DecompressProps 压缩任务属性 +type DecompressProps struct { + Src string `json:"src"` + Dst string `json:"dst"` + Encoding string `json:"encoding"` +} + +// Props 获取任务属性 +func (job *DecompressTask) Props() string { + res, _ := json.Marshal(job.TaskProps) + return string(res) +} + +// Type 获取任务状态 +func (job *DecompressTask) Type() int { + return DecompressTaskType +} + +// Creator 获取创建者ID +func (job *DecompressTask) Creator() uint { + return job.User.ID +} + +// Model 获取任务的数据库模型 +func (job *DecompressTask) Model() *model.Task { + return job.TaskModel +} + +// SetStatus 设定状态 +func (job *DecompressTask) SetStatus(status int) { + job.TaskModel.SetStatus(status) +} + +// SetError 设定任务失败信息 +func (job *DecompressTask) SetError(err *JobError) { + job.Err = err + res, _ := json.Marshal(job.Err) + job.TaskModel.SetError(string(res)) +} + +// SetErrorMsg 设定任务失败信息 +func (job *DecompressTask) SetErrorMsg(msg string, err error) { + jobErr := &JobError{Msg: msg} + if err != nil { + jobErr.Error = err.Error() + } + job.SetError(jobErr) +} + +// GetError 返回任务失败信息 +func (job *DecompressTask) GetError() *JobError { + return job.Err +} + +// Do 开始执行任务 +func (job *DecompressTask) Do() { + // 创建文件系统 + fs, err := filesystem.NewFileSystem(job.User) + if err != nil { + job.SetErrorMsg("Failed to create filesystem.", err) + return + } + + job.TaskModel.SetProgress(DecompressingProgress) + + err = fs.Decompress(context.Background(), job.TaskProps.Src, job.TaskProps.Dst, job.TaskProps.Encoding) + if err != nil { + job.SetErrorMsg("Failed to decompress file.", err) + return + } + +} + +// NewDecompressTask 新建压缩任务 +func NewDecompressTask(user *model.User, src, dst, encoding string) (Job, error) { + newTask := &DecompressTask{ + User: user, + TaskProps: DecompressProps{ + Src: src, + Dst: dst, + Encoding: encoding, + }, + } + + record, err := Record(newTask) + if err != nil { + return nil, err + } + newTask.TaskModel = record + + return newTask, nil +} + +// NewDecompressTaskFromModel 从数据库记录中恢复压缩任务 +func NewDecompressTaskFromModel(task *model.Task) (Job, error) { + user, err := model.GetActiveUserByID(task.UserID) + if err != nil { + return nil, err + } + newTask := &DecompressTask{ + User: &user, + TaskModel: task, + } + + err = json.Unmarshal([]byte(task.Props), &newTask.TaskProps) + if err != nil { + return nil, err + } + + return newTask, nil +} diff --git a/pkg/task/errors.go b/pkg/task/errors.go new file mode 100644 index 0000000..f1fca16 --- /dev/null +++ b/pkg/task/errors.go @@ -0,0 +1,8 @@ +package task + +import "errors" + +var ( + // ErrUnknownTaskType 未知任务类型 + ErrUnknownTaskType = errors.New("unknown task type") +) diff --git a/pkg/task/import.go b/pkg/task/import.go new file mode 100644 index 0000000..2b5134b --- /dev/null +++ b/pkg/task/import.go @@ -0,0 +1,220 @@ +package task + +import ( + "context" + "encoding/json" + "path" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +// ImportTask 导入务 +type ImportTask struct { + User *model.User + TaskModel *model.Task + TaskProps ImportProps + Err *JobError +} + +// ImportProps 导入任务属性 +type ImportProps struct { + PolicyID uint `json:"policy_id"` // 存储策略ID + Src string `json:"src"` // 原始路径 + Recursive bool `json:"is_recursive"` // 是否递归导入 + Dst string `json:"dst"` // 目的目录 +} + +// Props 获取任务属性 +func (job *ImportTask) Props() string { + res, _ := json.Marshal(job.TaskProps) + return string(res) +} + +// Type 获取任务状态 +func (job *ImportTask) Type() int { + return ImportTaskType +} + +// Creator 获取创建者ID +func (job *ImportTask) Creator() uint { + return job.User.ID +} + +// Model 获取任务的数据库模型 +func (job *ImportTask) Model() *model.Task { + return job.TaskModel +} + +// SetStatus 设定状态 +func (job *ImportTask) SetStatus(status int) { + job.TaskModel.SetStatus(status) +} + +// SetError 设定任务失败信息 +func (job *ImportTask) SetError(err *JobError) { + job.Err = err + res, _ := json.Marshal(job.Err) + job.TaskModel.SetError(string(res)) +} + +// SetErrorMsg 设定任务失败信息 +func (job *ImportTask) SetErrorMsg(msg string, err error) { + jobErr := &JobError{Msg: msg} + if err != nil { + jobErr.Error = err.Error() + } + job.SetError(jobErr) +} + +// GetError 返回任务失败信息 +func (job *ImportTask) GetError() *JobError { + return job.Err +} + +// Do 开始执行任务 +func (job *ImportTask) Do() { + ctx := context.Background() + + // 查找存储策略 + policy, err := model.GetPolicyByID(job.TaskProps.PolicyID) + if err != nil { + job.SetErrorMsg("Policy not exist.", err) + return + } + + // 创建文件系统 + fs, err := filesystem.NewFileSystem(job.User) + if err != nil { + job.SetErrorMsg(err.Error(), nil) + return + } + defer fs.Recycle() + + fs.Policy = &policy + if err := fs.DispatchHandler(); err != nil { + job.SetErrorMsg("Failed to dispatch policy.", err) + return + } + + // 注册钩子 + fs.Use("BeforeAddFile", filesystem.HookValidateFile) + fs.Use("BeforeAddFile", filesystem.HookValidateCapacity) + + // 列取目录、对象 + job.TaskModel.SetProgress(ListingProgress) + coxIgnoreConflict := context.WithValue(context.Background(), fsctx.IgnoreDirectoryConflictCtx, + true) + objects, err := fs.Handler.List(ctx, job.TaskProps.Src, job.TaskProps.Recursive) + if err != nil { + job.SetErrorMsg("Failed to list files.", err) + return + } + + job.TaskModel.SetProgress(InsertingProgress) + + // 虚拟目录路径与folder对象ID的对应 + pathCache := make(map[string]*model.Folder, len(objects)) + + // 插入目录记录到用户文件系统 + for _, object := range objects { + if object.IsDir { + // 创建目录 + virtualPath := path.Join(job.TaskProps.Dst, object.RelativePath) + folder, err := fs.CreateDirectory(coxIgnoreConflict, virtualPath) + if err != nil { + util.Log().Warning("Importing task cannot create user directory %q: %s", virtualPath, err) + } else if folder.ID > 0 { + pathCache[virtualPath] = folder + } + } + } + + // 插入文件记录到用户文件系统 + for _, object := range objects { + if !object.IsDir { + // 创建文件信息 + virtualPath := path.Dir(path.Join(job.TaskProps.Dst, object.RelativePath)) + fileHeader := fsctx.FileStream{ + Size: object.Size, + VirtualPath: virtualPath, + Name: object.Name, + SavePath: object.Source, + } + + // 查找父目录 + parentFolder := &model.Folder{} + if parent, ok := pathCache[virtualPath]; ok { + parentFolder = parent + } else { + folder, err := fs.CreateDirectory(context.Background(), virtualPath) + if err != nil { + util.Log().Warning("Importing task cannot create user directory %q: %s", + virtualPath, err) + continue + } + parentFolder = folder + + } + + // 插入文件记录 + _, err := fs.AddFile(context.Background(), parentFolder, &fileHeader) + if err != nil { + util.Log().Warning("Importing task cannot insert user file %q: %s", + object.RelativePath, err) + if err == filesystem.ErrInsufficientCapacity { + job.SetErrorMsg("Insufficient storage capacity.", err) + return + } + } + + } + } +} + +// NewImportTask 新建导入任务 +func NewImportTask(user, policy uint, src, dst string, recursive bool) (Job, error) { + creator, err := model.GetActiveUserByID(user) + if err != nil { + return nil, err + } + + newTask := &ImportTask{ + User: &creator, + TaskProps: ImportProps{ + PolicyID: policy, + Recursive: recursive, + Src: src, + Dst: dst, + }, + } + + record, err := Record(newTask) + if err != nil { + return nil, err + } + newTask.TaskModel = record + + return newTask, nil +} + +// NewImportTaskFromModel 从数据库记录中恢复导入任务 +func NewImportTaskFromModel(task *model.Task) (Job, error) { + user, err := model.GetActiveUserByID(task.UserID) + if err != nil { + return nil, err + } + newTask := &ImportTask{ + User: &user, + TaskModel: task, + } + + err = json.Unmarshal([]byte(task.Props), &newTask.TaskProps) + if err != nil { + return nil, err + } + + return newTask, nil +} diff --git a/pkg/task/job.go b/pkg/task/job.go new file mode 100644 index 0000000..ad77c6b --- /dev/null +++ b/pkg/task/job.go @@ -0,0 +1,127 @@ +package task + +import ( + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +// 任务类型 +const ( + // CompressTaskType 压缩任务 + CompressTaskType = iota + // DecompressTaskType 解压缩任务 + DecompressTaskType + // TransferTaskType 中转任务 + TransferTaskType + // ImportTaskType 导入任务 + ImportTaskType + // RelocateTaskType 存储策略迁移任务 + RelocateTaskType + // RecycleTaskType 回收任务 + RecycleTaskType +) + +// 任务状态 +const ( + // Queued 排队中 + Queued = iota + // Processing 处理中 + Processing + // Error 失败 + Error + // Canceled 取消 + Canceled + // Complete 完成 + Complete +) + +// 任务进度 +const ( + // PendingProgress 等待中 + PendingProgress = iota + // Compressing 压缩中 + CompressingProgress + // Decompressing 解压缩中 + DecompressingProgress + // Downloading 下载中 + DownloadingProgress + // Transferring 转存中 + TransferringProgress + // ListingProgress 索引中 + ListingProgress + // InsertingProgress 插入中 + InsertingProgress +) + +// Job 任务接口 +type Job interface { + Type() int // 返回任务类型 + Creator() uint // 返回创建者ID + Props() string // 返回序列化后的任务属性 + Model() *model.Task // 返回对应的数据库模型 + SetStatus(int) // 设定任务状态 + Do() // 开始执行任务 + SetError(*JobError) // 设定任务失败信息 + GetError() *JobError // 获取任务执行结果,返回nil表示成功完成执行 +} + +// JobError 任务失败信息 +type JobError struct { + Msg string `json:"msg,omitempty"` + Error string `json:"error,omitempty"` +} + +// Record 将任务记录到数据库中 +func Record(job Job) (*model.Task, error) { + record := model.Task{ + Status: Queued, + Type: job.Type(), + UserID: job.Creator(), + Progress: 0, + Error: "", + Props: job.Props(), + } + _, err := record.Create() + return &record, err +} + +// Resume 从数据库中恢复未完成任务 +func Resume(p Pool) { + tasks := model.GetTasksByStatus(Queued, Processing) + if len(tasks) == 0 { + return + } + util.Log().Info("Resume %d unfinished task(s) from database.", len(tasks)) + + for i := 0; i < len(tasks); i++ { + job, err := GetJobFromModel(&tasks[i]) + if err != nil { + util.Log().Warning("Failed to resume task: %s", err) + continue + } + + if job != nil { + p.Submit(job) + } + } +} + +// GetJobFromModel 从数据库给定模型获取任务 +func GetJobFromModel(task *model.Task) (Job, error) { + switch task.Type { + case CompressTaskType: + return NewCompressTaskFromModel(task) + case DecompressTaskType: + return NewDecompressTaskFromModel(task) + case TransferTaskType: + return NewTransferTaskFromModel(task) + case ImportTaskType: + return NewImportTaskFromModel(task) + case RelocateTaskType: + return NewRelocateTaskFromModel(task) + case RecycleTaskType: + return NewRecycleTaskFromModel(task) + default: + return nil, ErrUnknownTaskType + } +} diff --git a/pkg/task/pool.go b/pkg/task/pool.go new file mode 100644 index 0000000..e37f179 --- /dev/null +++ b/pkg/task/pool.go @@ -0,0 +1,68 @@ +package task + +import ( + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/conf" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +// TaskPoll 要使用的任务池 +var TaskPoll Pool + +type Pool interface { + Add(num int) + Submit(job Job) +} + +// AsyncPool 带有最大配额的任务池 +type AsyncPool struct { + // 容量 + idleWorker chan int +} + +// Add 增加可用Worker数量 +func (pool *AsyncPool) Add(num int) { + for i := 0; i < num; i++ { + pool.idleWorker <- 1 + } +} + +// ObtainWorker 阻塞直到获取新的Worker +func (pool *AsyncPool) obtainWorker() Worker { + select { + case <-pool.idleWorker: + // 有空闲Worker名额时,返回新Worker + return &GeneralWorker{} + } +} + +// FreeWorker 添加空闲Worker +func (pool *AsyncPool) freeWorker() { + pool.Add(1) +} + +// Submit 开始提交任务 +func (pool *AsyncPool) Submit(job Job) { + go func() { + util.Log().Debug("Waiting for Worker.") + worker := pool.obtainWorker() + util.Log().Debug("Worker obtained.") + worker.Do(job) + util.Log().Debug("Worker released.") + pool.freeWorker() + }() +} + +// Init 初始化任务池 +func Init() { + maxWorker := model.GetIntSetting("max_worker_num", 10) + TaskPoll = &AsyncPool{ + idleWorker: make(chan int, maxWorker), + } + TaskPoll.Add(maxWorker) + util.Log().Info("Initialize task queue with WorkerNum = %d", maxWorker) + + if conf.SystemConfig.Mode == "master" { + Resume(TaskPoll) + } +} diff --git a/pkg/task/recycle.go b/pkg/task/recycle.go new file mode 100644 index 0000000..60cc97f --- /dev/null +++ b/pkg/task/recycle.go @@ -0,0 +1,130 @@ +package task + +import ( + "encoding/json" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/cluster" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +// RecycleTask 文件回收任务 +type RecycleTask struct { + User *model.User + TaskModel *model.Task + TaskProps RecycleProps + Err *JobError +} + +// RecycleProps 回收任务属性 +type RecycleProps struct { + // 下载任务 GID + DownloadGID string `json:"download_gid"` +} + +// Props 获取任务属性 +func (job *RecycleTask) Props() string { + res, _ := json.Marshal(job.TaskProps) + return string(res) +} + +// Type 获取任务状态 +func (job *RecycleTask) Type() int { + return RecycleTaskType +} + +// Creator 获取创建者ID +func (job *RecycleTask) Creator() uint { + return job.User.ID +} + +// Model 获取任务的数据库模型 +func (job *RecycleTask) Model() *model.Task { + return job.TaskModel +} + +// SetStatus 设定状态 +func (job *RecycleTask) SetStatus(status int) { + job.TaskModel.SetStatus(status) +} + +// SetError 设定任务失败信息 +func (job *RecycleTask) SetError(err *JobError) { + job.Err = err + res, _ := json.Marshal(job.Err) + job.TaskModel.SetError(string(res)) +} + +// SetErrorMsg 设定任务失败信息 +func (job *RecycleTask) SetErrorMsg(msg string, err error) { + jobErr := &JobError{Msg: msg} + if err != nil { + jobErr.Error = err.Error() + } + job.SetError(jobErr) +} + +// GetError 返回任务失败信息 +func (job *RecycleTask) GetError() *JobError { + return job.Err +} + +// Do 开始执行任务 +func (job *RecycleTask) Do() { + download, err := model.GetDownloadByGid(job.TaskProps.DownloadGID, job.User.ID) + if err != nil { + util.Log().Warning("Recycle task %d cannot found download record.", job.TaskModel.ID) + job.SetErrorMsg("Cannot found download task.", err) + return + } + nodeID := download.GetNodeID() + node := cluster.Default.GetNodeByID(nodeID) + if node == nil { + util.Log().Warning("Recycle task %d cannot found node.", job.TaskModel.ID) + job.SetErrorMsg("Invalid slave node.", nil) + return + } + err = node.GetAria2Instance().DeleteTempFile(download) + if err != nil { + util.Log().Warning("Failed to delete transfer temp folder %q: %s", download.Parent, err) + job.SetErrorMsg("Failed to recycle files.", err) + return + } +} + +// NewRecycleTask 新建回收任务 +func NewRecycleTask(download *model.Download) (Job, error) { + newTask := &RecycleTask{ + User: download.GetOwner(), + TaskProps: RecycleProps{ + DownloadGID: download.GID, + }, + } + + record, err := Record(newTask) + if err != nil { + return nil, err + } + newTask.TaskModel = record + + return newTask, nil +} + +// NewRecycleTaskFromModel 从数据库记录中恢复回收任务 +func NewRecycleTaskFromModel(task *model.Task) (Job, error) { + user, err := model.GetActiveUserByID(task.UserID) + if err != nil { + return nil, err + } + newTask := &RecycleTask{ + User: &user, + TaskModel: task, + } + + err = json.Unmarshal([]byte(task.Props), &newTask.TaskProps) + if err != nil { + return nil, err + } + + return newTask, nil +} diff --git a/pkg/task/relocate.go b/pkg/task/relocate.go new file mode 100644 index 0000000..65666ed --- /dev/null +++ b/pkg/task/relocate.go @@ -0,0 +1,176 @@ +package task + +import ( + "context" + "encoding/json" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +// RelocateTask 存储策略迁移任务 +type RelocateTask struct { + User *model.User + TaskModel *model.Task + TaskProps RelocateProps + Err *JobError +} + +// RelocateProps 存储策略迁移任务属性 +type RelocateProps struct { + Dirs []uint `json:"dirs"` + Files []uint `json:"files"` + DstPolicyID uint `json:"dst_policy_id"` +} + +// Props 获取任务属性 +func (job *RelocateTask) Props() string { + res, _ := json.Marshal(job.TaskProps) + return string(res) +} + +// Type 获取任务状态 +func (job *RelocateTask) Type() int { + return RelocateTaskType +} + +// Creator 获取创建者ID +func (job *RelocateTask) Creator() uint { + return job.User.ID +} + +// Model 获取任务的数据库模型 +func (job *RelocateTask) Model() *model.Task { + return job.TaskModel +} + +// SetStatus 设定状态 +func (job *RelocateTask) SetStatus(status int) { + job.TaskModel.SetStatus(status) +} + +// SetError 设定任务失败信息 +func (job *RelocateTask) SetError(err *JobError) { + job.Err = err + res, _ := json.Marshal(job.Err) + job.TaskModel.SetError(string(res)) +} + +// SetErrorMsg 设定任务失败信息 +func (job *RelocateTask) SetErrorMsg(msg string) { + job.SetError(&JobError{Msg: msg}) +} + +// GetError 返回任务失败信息 +func (job *RelocateTask) GetError() *JobError { + return job.Err +} + +// Do 开始执行任务 +func (job *RelocateTask) Do() { + // 创建文件系统 + fs, err := filesystem.NewFileSystem(job.User) + if err != nil { + job.SetErrorMsg(err.Error()) + return + } + + job.TaskModel.SetProgress(ListingProgress) + util.Log().Debug("Start migration task.") + + // ---------------------------- + // 索引出所有待迁移的文件 + // ---------------------------- + targetFiles := make([]model.File, 0, len(job.TaskProps.Files)) + + // 索引用户选择的单独的文件 + outerFiles, err := model.GetFilesByIDs(job.TaskProps.Files, job.User.ID) + if err != nil { + job.SetError(&JobError{ + Msg: "Failed to index files.", + Error: err.Error(), + }) + return + } + targetFiles = append(targetFiles, outerFiles...) + + // 索引用户选择目录下的所有递归子文件 + subFolders, err := model.GetRecursiveChildFolder(job.TaskProps.Dirs, job.User.ID, true) + if err != nil { + job.SetError(&JobError{ + Msg: "Failed to index child folders.", + Error: err.Error(), + }) + return + } + + subFiles, err := model.GetChildFilesOfFolders(&subFolders) + if err != nil { + job.SetError(&JobError{ + Msg: "Failed to index child files.", + Error: err.Error(), + }) + return + } + targetFiles = append(targetFiles, subFiles...) + + // 查找目标存储策略 + policy, err := model.GetPolicyByID(job.TaskProps.DstPolicyID) + if err != nil { + job.SetError(&JobError{ + Msg: "Invalid policy.", + Error: err.Error(), + }) + return + } + + // 开始转移文件 + job.TaskModel.SetProgress(TransferringProgress) + ctx := context.Background() + err = fs.Relocate(ctx, targetFiles, &policy) + if err != nil { + job.SetErrorMsg(err.Error()) + return + } + + return +} + +// NewRelocateTask 新建转移任务 +func NewRelocateTask(user *model.User, dstPolicyID uint, dirs, files []uint) (Job, error) { + newTask := &RelocateTask{ + User: user, + TaskProps: RelocateProps{ + Dirs: dirs, + Files: files, + DstPolicyID: dstPolicyID, + }, + } + + record, err := Record(newTask) + if err != nil { + return nil, err + } + newTask.TaskModel = record + + return newTask, nil +} + +// NewCompressTaskFromModel 从数据库记录中恢复压缩任务 +func NewRelocateTaskFromModel(task *model.Task) (Job, error) { + user, err := model.GetActiveUserByID(task.UserID) + if err != nil { + return nil, err + } + newTask := &RelocateTask{ + User: &user, + TaskModel: task, + } + + err = json.Unmarshal([]byte(task.Props), &newTask.TaskProps) + if err != nil { + return nil, err + } + + return newTask, nil +} diff --git a/pkg/task/slavetask/transfer.go b/pkg/task/slavetask/transfer.go new file mode 100644 index 0000000..bdc5926 --- /dev/null +++ b/pkg/task/slavetask/transfer.go @@ -0,0 +1,138 @@ +package slavetask + +import ( + "context" + "os" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/cluster" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/mq" + "github.com/cloudreve/Cloudreve/v3/pkg/serializer" + "github.com/cloudreve/Cloudreve/v3/pkg/task" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +// TransferTask 文件中转任务 +type TransferTask struct { + Err *task.JobError + Req *serializer.SlaveTransferReq + MasterID string +} + +// Props 获取任务属性 +func (job *TransferTask) Props() string { + return "" +} + +// Type 获取任务类型 +func (job *TransferTask) Type() int { + return 0 +} + +// Creator 获取创建者ID +func (job *TransferTask) Creator() uint { + return 0 +} + +// Model 获取任务的数据库模型 +func (job *TransferTask) Model() *model.Task { + return nil +} + +// SetStatus 设定状态 +func (job *TransferTask) SetStatus(status int) { +} + +// SetError 设定任务失败信息 +func (job *TransferTask) SetError(err *task.JobError) { + job.Err = err + +} + +// SetErrorMsg 设定任务失败信息 +func (job *TransferTask) SetErrorMsg(msg string, err error) { + jobErr := &task.JobError{Msg: msg} + if err != nil { + jobErr.Error = err.Error() + } + + job.SetError(jobErr) + + notifyMsg := mq.Message{ + TriggeredBy: job.MasterID, + Event: serializer.SlaveTransferFailed, + Content: serializer.SlaveTransferResult{ + Error: err.Error(), + }, + } + + if err := cluster.DefaultController.SendNotification(job.MasterID, job.Req.Hash(job.MasterID), notifyMsg); err != nil { + util.Log().Warning("Failed to send transfer failure notification to master node: %s", err) + } +} + +// GetError 返回任务失败信息 +func (job *TransferTask) GetError() *task.JobError { + return job.Err +} + +// Do 开始执行任务 +func (job *TransferTask) Do() { + fs, err := filesystem.NewAnonymousFileSystem() + if err != nil { + job.SetErrorMsg("Failed to initialize anonymous filesystem.", err) + return + } + + fs.Policy = job.Req.Policy + if err := fs.DispatchHandler(); err != nil { + job.SetErrorMsg("Failed to dispatch policy.", err) + return + } + + master, err := cluster.DefaultController.GetMasterInfo(job.MasterID) + if err != nil { + job.SetErrorMsg("Cannot found master node ID.", err) + return + } + + fs.SwitchToShadowHandler(master.Instance, master.URL.String(), master.ID) + file, err := os.Open(util.RelativePath(job.Req.Src)) + if err != nil { + job.SetErrorMsg("Failed to read source file.", err) + return + } + + defer file.Close() + + // 获取源文件大小 + fi, err := file.Stat() + if err != nil { + job.SetErrorMsg("Failed to get source file size.", err) + return + } + + size := fi.Size() + + err = fs.Handler.Put(context.Background(), &fsctx.FileStream{ + File: file, + SavePath: job.Req.Dst, + Size: uint64(size), + }) + if err != nil { + job.SetErrorMsg("Upload failed.", err) + return + } + + msg := mq.Message{ + TriggeredBy: job.MasterID, + Event: serializer.SlaveTransferSuccess, + Content: serializer.SlaveTransferResult{}, + } + + if err := cluster.DefaultController.SendNotification(job.MasterID, job.Req.Hash(job.MasterID), msg); err != nil { + util.Log().Warning("Failed to send transfer success notification to master node: %s", err) + } +} diff --git a/pkg/task/tranfer.go b/pkg/task/tranfer.go new file mode 100644 index 0000000..135c809 --- /dev/null +++ b/pkg/task/tranfer.go @@ -0,0 +1,192 @@ +package task + +import ( + "context" + "encoding/json" + "fmt" + "path" + "path/filepath" + "strings" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/cluster" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +// TransferTask 文件中转任务 +type TransferTask struct { + User *model.User + TaskModel *model.Task + TaskProps TransferProps + Err *JobError + + zipPath string +} + +// TransferProps 中转任务属性 +type TransferProps struct { + Src []string `json:"src"` // 原始文件 + SrcSizes map[string]uint64 `json:"src_size"` // 原始文件的大小信息,从机转存时使用 + Parent string `json:"parent"` // 父目录 + Dst string `json:"dst"` // 目的目录ID + // 将会保留原始文件的目录结构,Src 除去 Parent 开头作为最终路径 + TrimPath bool `json:"trim_path"` + // 负责处理中专任务的节点ID + NodeID uint `json:"node_id"` +} + +// Props 获取任务属性 +func (job *TransferTask) Props() string { + res, _ := json.Marshal(job.TaskProps) + return string(res) +} + +// Type 获取任务状态 +func (job *TransferTask) Type() int { + return TransferTaskType +} + +// Creator 获取创建者ID +func (job *TransferTask) Creator() uint { + return job.User.ID +} + +// Model 获取任务的数据库模型 +func (job *TransferTask) Model() *model.Task { + return job.TaskModel +} + +// SetStatus 设定状态 +func (job *TransferTask) SetStatus(status int) { + job.TaskModel.SetStatus(status) +} + +// SetError 设定任务失败信息 +func (job *TransferTask) SetError(err *JobError) { + job.Err = err + res, _ := json.Marshal(job.Err) + job.TaskModel.SetError(string(res)) + +} + +// SetErrorMsg 设定任务失败信息 +func (job *TransferTask) SetErrorMsg(msg string, err error) { + jobErr := &JobError{Msg: msg} + if err != nil { + jobErr.Error = err.Error() + } + job.SetError(jobErr) +} + +// GetError 返回任务失败信息 +func (job *TransferTask) GetError() *JobError { + return job.Err +} + +// Do 开始执行任务 +func (job *TransferTask) Do() { + // 创建文件系统 + fs, err := filesystem.NewFileSystem(job.User) + if err != nil { + job.SetErrorMsg(err.Error(), nil) + return + } + defer fs.Recycle() + + successCount := 0 + errorList := make([]string, 0, len(job.TaskProps.Src)) + for _, file := range job.TaskProps.Src { + dst := path.Join(job.TaskProps.Dst, filepath.Base(file)) + if job.TaskProps.TrimPath { + // 保留原始目录 + trim := util.FormSlash(job.TaskProps.Parent) + src := util.FormSlash(file) + dst = path.Join(job.TaskProps.Dst, strings.TrimPrefix(src, trim)) + } + + if job.TaskProps.NodeID > 1 { + // 指定为从机中转 + + // 获取从机节点 + node := cluster.Default.GetNodeByID(job.TaskProps.NodeID) + if node == nil { + job.SetErrorMsg("Invalid slave node.", nil) + } + + // 切换为从机节点处理上传 + fs.SetPolicyFromPath(path.Dir(dst)) + fs.SwitchToSlaveHandler(node) + err = fs.UploadFromStream(context.Background(), &fsctx.FileStream{ + File: nil, + Size: job.TaskProps.SrcSizes[file], + Name: path.Base(dst), + VirtualPath: path.Dir(dst), + Src: file, + }, false) + } else { + // 主机节点中转 + err = fs.UploadFromPath(context.Background(), file, dst, 0) + } + + if err != nil { + errorList = append(errorList, err.Error()) + } else { + successCount++ + job.TaskModel.SetProgress(successCount) + } + } + + if len(errorList) > 0 { + job.SetErrorMsg("Failed to transfer one or more file(s).", fmt.Errorf(strings.Join(errorList, "\n"))) + } + +} + +// NewTransferTask 新建中转任务 +func NewTransferTask(user uint, src []string, dst, parent string, trim bool, node uint, sizes map[string]uint64) (Job, error) { + creator, err := model.GetActiveUserByID(user) + if err != nil { + return nil, err + } + + newTask := &TransferTask{ + User: &creator, + TaskProps: TransferProps{ + Src: src, + Parent: parent, + Dst: dst, + TrimPath: trim, + NodeID: node, + SrcSizes: sizes, + }, + } + + record, err := Record(newTask) + if err != nil { + return nil, err + } + newTask.TaskModel = record + + return newTask, nil +} + +// NewTransferTaskFromModel 从数据库记录中恢复中转任务 +func NewTransferTaskFromModel(task *model.Task) (Job, error) { + user, err := model.GetActiveUserByID(task.UserID) + if err != nil { + return nil, err + } + newTask := &TransferTask{ + User: &user, + TaskModel: task, + } + + err = json.Unmarshal([]byte(task.Props), &newTask.TaskProps) + if err != nil { + return nil, err + } + + return newTask, nil +} diff --git a/pkg/task/worker.go b/pkg/task/worker.go new file mode 100644 index 0000000..e40a3b5 --- /dev/null +++ b/pkg/task/worker.go @@ -0,0 +1,44 @@ +package task + +import ( + "fmt" + "github.com/cloudreve/Cloudreve/v3/pkg/util" +) + +// Worker 处理任务的对象 +type Worker interface { + Do(Job) // 执行任务 +} + +// GeneralWorker 通用Worker +type GeneralWorker struct { +} + +// Do 执行任务 +func (worker *GeneralWorker) Do(job Job) { + util.Log().Debug("Start executing task.") + job.SetStatus(Processing) + + defer func() { + // 致命错误捕获 + if err := recover(); err != nil { + util.Log().Debug("Failed to execute task: %s", err) + job.SetError(&JobError{Msg: "Fatal error.", Error: fmt.Sprintf("%s", err)}) + job.SetStatus(Error) + } + }() + + // 开始执行任务 + job.Do() + + // 任务执行失败 + if err := job.GetError(); err != nil { + util.Log().Debug("Failed to execute task.") + job.SetStatus(Error) + return + } + + util.Log().Debug("Task finished.") + // 执行完成 + job.SetStatus(Complete) +} diff --git a/pkg/thumb/builtin.go b/pkg/thumb/builtin.go new file mode 100644 index 0000000..fadac6e --- /dev/null +++ b/pkg/thumb/builtin.go @@ -0,0 +1,193 @@ +package thumb + +import ( + "context" + "fmt" + "image" + "image/gif" + "image/jpeg" + "image/png" + "io" + "path/filepath" + "strings" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/gofrs/uuid" + + //"github.com/nfnt/resize" + "golang.org/x/image/draw" +) + +func init() { + RegisterGenerator(&Builtin{}) +} + +// Thumb 缩略图 +type Thumb struct { + src image.Image + ext string +} + +// NewThumbFromFile 从文件数据获取新的Thumb对象, +// 尝试通过文件名name解码图像 +func NewThumbFromFile(file io.Reader, name string) (*Thumb, error) { + ext := strings.ToLower(filepath.Ext(name)) + // 无扩展名时 + if len(ext) == 0 { + return nil, fmt.Errorf("unknown image format: %w", ErrPassThrough) + } + + var err error + var img image.Image + switch ext[1:] { + case "jpg", "jpeg": + img, err = jpeg.Decode(file) + case "gif": + img, err = gif.Decode(file) + case "png": + img, err = png.Decode(file) + default: + return nil, fmt.Errorf("unknown image format: %w", ErrPassThrough) + } + if err != nil { + return nil, fmt.Errorf("failed to parse image: %w (%w)", err, ErrPassThrough) + } + + return &Thumb{ + src: img, + ext: ext[1:], + }, nil +} + +// GetThumb 生成给定最大尺寸的缩略图 +func (image *Thumb) GetThumb(width, height uint) { + //image.src = resize.Thumbnail(width, height, image.src, resize.Lanczos3) + image.src = Thumbnail(width, height, image.src) +} + +// GetSize 获取图像尺寸 +func (image *Thumb) GetSize() (int, int) { + b := image.src.Bounds() + return b.Max.X, b.Max.Y +} + +// Save 保存图像到给定路径 +func (image *Thumb) Save(w io.Writer) (err error) { + switch model.GetSettingByNameWithDefault("thumb_encode_method", "jpg") { + case "png": + err = png.Encode(w, image.src) + default: + err = jpeg.Encode(w, image.src, &jpeg.Options{Quality: model.GetIntSetting("thumb_encode_quality", 85)}) + } + + return err + +} + +// Thumbnail will downscale provided image to max width and height preserving +// original aspect ratio and using the interpolation function interp. +// It will return original image, without processing it, if original sizes +// are already smaller than provided constraints. +func Thumbnail(maxWidth, maxHeight uint, img image.Image) image.Image { + origBounds := img.Bounds() + origWidth := uint(origBounds.Dx()) + origHeight := uint(origBounds.Dy()) + newWidth, newHeight := origWidth, origHeight + + // Return original image if it have same or smaller size as constraints + if maxWidth >= origWidth && maxHeight >= origHeight { + return img + } + + // Preserve aspect ratio + if origWidth > maxWidth { + newHeight = uint(origHeight * maxWidth / origWidth) + if newHeight < 1 { + newHeight = 1 + } + newWidth = maxWidth + } + + if newHeight > maxHeight { + newWidth = uint(newWidth * maxHeight / newHeight) + if newWidth < 1 { + newWidth = 1 + } + newHeight = maxHeight + } + return Resize(newWidth, newHeight, img) +} + +func Resize(newWidth, newHeight uint, img image.Image) image.Image { + // Set the expected size that you want: + dst := image.NewRGBA(image.Rect(0, 0, int(newWidth), int(newHeight))) + // Resize: + draw.BiLinear.Scale(dst, dst.Rect, img, img.Bounds(), draw.Src, nil) + return dst +} + +// CreateAvatar 创建头像 +func (image *Thumb) CreateAvatar(uid uint) error { + // 读取头像相关设定 + savePath := util.RelativePath(model.GetSettingByName("avatar_path")) + s := model.GetIntSetting("avatar_size_s", 50) + m := model.GetIntSetting("avatar_size_m", 130) + l := model.GetIntSetting("avatar_size_l", 200) + + // 生成头像缩略图 + src := image.src + for k, size := range []int{s, m, l} { + out, err := util.CreatNestedFile(filepath.Join(savePath, fmt.Sprintf("avatar_%d_%d.png", uid, k))) + + if err != nil { + return err + } + defer out.Close() + + image.src = Resize(uint(size), uint(size), src) + err = image.Save(out) + if err != nil { + return err + } + } + + return nil + +} + +type Builtin struct{} + +func (b Builtin) Generate(ctx context.Context, file io.Reader, src, name string, options map[string]string) (*Result, error) { + img, err := NewThumbFromFile(file, name) + if err != nil { + return nil, err + } + + img.GetThumb(thumbSize(options)) + tempPath := filepath.Join( + util.RelativePath(model.GetSettingByName("temp_path")), + "thumb", + fmt.Sprintf("thumb_%s", uuid.Must(uuid.NewV4()).String()), + ) + + thumbFile, err := util.CreatNestedFile(tempPath) + if err != nil { + return nil, fmt.Errorf("failed to create temp file: %w", err) + } + + defer thumbFile.Close() + if err := img.Save(thumbFile); err != nil { + return nil, err + } + + return &Result{Path: tempPath}, nil +} + +func (b Builtin) Priority() int { + return 300 +} + +func (b Builtin) EnableFlag() string { + return "thumb_builtin_enabled" +} diff --git a/pkg/thumb/ffmpeg.go b/pkg/thumb/ffmpeg.go new file mode 100644 index 0000000..d98f107 --- /dev/null +++ b/pkg/thumb/ffmpeg.go @@ -0,0 +1,93 @@ +package thumb + +import ( + "bytes" + "context" + "fmt" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/gofrs/uuid" + "io" + "os" + "os/exec" + "path/filepath" + "strings" +) + +func init() { + RegisterGenerator(&FfmpegGenerator{}) +} + +type FfmpegGenerator struct { + exts []string + lastRawExts string +} + +func (f *FfmpegGenerator) Generate(ctx context.Context, file io.Reader, src, name string, options map[string]string) (*Result, error) { + ffmpegOpts := model.GetSettingByNames("thumb_ffmpeg_path", "thumb_ffmpeg_exts", "thumb_ffmpeg_seek", "thumb_encode_method", "temp_path") + + if f.lastRawExts != ffmpegOpts["thumb_ffmpeg_exts"] { + f.exts = strings.Split(ffmpegOpts["thumb_ffmpeg_exts"], ",") + } + + if !util.IsInExtensionList(f.exts, name) { + return nil, fmt.Errorf("unsupported video format: %w", ErrPassThrough) + } + + tempOutputPath := filepath.Join( + util.RelativePath(ffmpegOpts["temp_path"]), + "thumb", + fmt.Sprintf("thumb_%s.%s", uuid.Must(uuid.NewV4()).String(), ffmpegOpts["thumb_encode_method"]), + ) + + tempInputPath := src + if tempInputPath == "" { + // If not local policy files, download to temp folder + tempInputPath = filepath.Join( + util.RelativePath(ffmpegOpts["temp_path"]), + "thumb", + fmt.Sprintf("ffmpeg_%s%s", uuid.Must(uuid.NewV4()).String(), filepath.Ext(name)), + ) + + // Due to limitations of ffmpeg, we need to write the input file to disk first + tempInputFile, err := util.CreatNestedFile(tempInputPath) + if err != nil { + return nil, fmt.Errorf("failed to create temp file: %w", err) + } + + defer os.Remove(tempInputPath) + defer tempInputFile.Close() + + if _, err = io.Copy(tempInputFile, file); err != nil { + return nil, fmt.Errorf("failed to write input file: %w", err) + } + + tempInputFile.Close() + } + + // Invoke ffmpeg + scaleOpt := fmt.Sprintf("scale=%s:%s:force_original_aspect_ratio=decrease", options["thumb_width"], options["thumb_height"]) + cmd := exec.CommandContext(ctx, + ffmpegOpts["thumb_ffmpeg_path"], "-ss", ffmpegOpts["thumb_ffmpeg_seek"], "-i", tempInputPath, + "-vf", scaleOpt, "-vframes", "1", tempOutputPath) + + // Redirect IO + var stdErr bytes.Buffer + cmd.Stdin = file + cmd.Stderr = &stdErr + + if err := cmd.Run(); err != nil { + util.Log().Warning("Failed to invoke ffmpeg: %s", stdErr.String()) + return nil, fmt.Errorf("failed to invoke ffmpeg: %w", err) + } + + return &Result{Path: tempOutputPath}, nil +} + +func (f *FfmpegGenerator) Priority() int { + return 200 +} + +func (f *FfmpegGenerator) EnableFlag() string { + return "thumb_ffmpeg_enabled" +} diff --git a/pkg/thumb/libreoffice.go b/pkg/thumb/libreoffice.go new file mode 100644 index 0000000..75e871b --- /dev/null +++ b/pkg/thumb/libreoffice.go @@ -0,0 +1,99 @@ +package thumb + +import ( + "bytes" + "context" + "fmt" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/gofrs/uuid" + "io" + "os" + "os/exec" + "path/filepath" + "strings" +) + +func init() { + RegisterGenerator(&LibreOfficeGenerator{}) +} + +type LibreOfficeGenerator struct { + exts []string + lastRawExts string +} + +func (l *LibreOfficeGenerator) Generate(ctx context.Context, file io.Reader, src string, name string, options map[string]string) (*Result, error) { + sofficeOpts := model.GetSettingByNames("thumb_libreoffice_path", "thumb_libreoffice_exts", "thumb_encode_method", "temp_path") + + if l.lastRawExts != sofficeOpts["thumb_libreoffice_exts"] { + l.exts = strings.Split(sofficeOpts["thumb_libreoffice_exts"], ",") + } + + if !util.IsInExtensionList(l.exts, name) { + return nil, fmt.Errorf("unsupported document format: %w", ErrPassThrough) + } + + tempOutputPath := filepath.Join( + util.RelativePath(sofficeOpts["temp_path"]), + "thumb", + fmt.Sprintf("soffice_%s", uuid.Must(uuid.NewV4()).String()), + ) + + tempInputPath := src + if tempInputPath == "" { + // If not local policy files, download to temp folder + tempInputPath = filepath.Join( + util.RelativePath(sofficeOpts["temp_path"]), + "thumb", + fmt.Sprintf("soffice_%s%s", uuid.Must(uuid.NewV4()).String(), filepath.Ext(name)), + ) + + // Due to limitations of ffmpeg, we need to write the input file to disk first + tempInputFile, err := util.CreatNestedFile(tempInputPath) + if err != nil { + return nil, fmt.Errorf("failed to create temp file: %w", err) + } + + defer os.Remove(tempInputPath) + defer tempInputFile.Close() + + if _, err = io.Copy(tempInputFile, file); err != nil { + return nil, fmt.Errorf("failed to write input file: %w", err) + } + + tempInputFile.Close() + } + + // Convert the document to an image + cmd := exec.CommandContext(ctx, sofficeOpts["thumb_libreoffice_path"], "--headless", + "-nologo", "--nofirststartwizard", "--invisible", "--norestore", "--convert-to", + sofficeOpts["thumb_encode_method"], "--outdir", tempOutputPath, tempInputPath) + + // Redirect IO + var stdErr bytes.Buffer + cmd.Stdin = file + cmd.Stderr = &stdErr + + if err := cmd.Run(); err != nil { + util.Log().Warning("Failed to invoke LibreOffice: %s", stdErr.String()) + return nil, fmt.Errorf("failed to invoke LibreOffice: %w", err) + } + + return &Result{ + Path: filepath.Join( + tempOutputPath, + strings.TrimSuffix(filepath.Base(tempInputPath), filepath.Ext(tempInputPath))+"."+sofficeOpts["thumb_encode_method"], + ), + Continue: true, + Cleanup: []func(){func() { _ = os.RemoveAll(tempOutputPath) }}, + }, nil +} + +func (l *LibreOfficeGenerator) Priority() int { + return 50 +} + +func (l *LibreOfficeGenerator) EnableFlag() string { + return "thumb_libreoffice_enabled" +} diff --git a/pkg/thumb/pipeline.go b/pkg/thumb/pipeline.go new file mode 100644 index 0000000..8ea1fd5 --- /dev/null +++ b/pkg/thumb/pipeline.go @@ -0,0 +1,122 @@ +package thumb + +import ( + "context" + "errors" + "fmt" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "io" + "os" + "path/filepath" + "reflect" + "sort" + "strconv" +) + +// Generator generates a thumbnail for a given reader. +type Generator interface { + // Generate generates a thumbnail for a given reader. Src is the original file path, only provided + // for local policy files. + Generate(ctx context.Context, file io.Reader, src string, name string, options map[string]string) (*Result, error) + + // Priority of execution order, smaller value means higher priority. + Priority() int + + // EnableFlag returns the setting name to enable this generator. + EnableFlag() string +} + +type Result struct { + Path string + Continue bool + Cleanup []func() +} + +type ( + GeneratorType string + GeneratorList []Generator +) + +var ( + Generators = GeneratorList{} + + ErrPassThrough = errors.New("pass through") + ErrNotAvailable = fmt.Errorf("thumbnail not available: %w", ErrPassThrough) +) + +func (g GeneratorList) Len() int { + return len(g) +} + +func (g GeneratorList) Less(i, j int) bool { + return g[i].Priority() < g[j].Priority() +} + +func (g GeneratorList) Swap(i, j int) { + g[i], g[j] = g[j], g[i] +} + +// RegisterGenerator registers a thumbnail generator. +func RegisterGenerator(generator Generator) { + Generators = append(Generators, generator) + sort.Sort(Generators) +} + +func (p GeneratorList) Generate(ctx context.Context, file io.Reader, src, name string, options map[string]string) (*Result, error) { + inputFile, inputSrc, inputName := file, src, name + for _, generator := range p { + if model.IsTrueVal(options[generator.EnableFlag()]) { + res, err := generator.Generate(ctx, inputFile, inputSrc, inputName, options) + if errors.Is(err, ErrPassThrough) { + util.Log().Debug("Failed to generate thumbnail using %s for %s: %s, passing through to next generator.", reflect.TypeOf(generator).String(), name, err) + continue + } + + if res != nil && res.Continue { + util.Log().Debug("Generator %s for %s returned continue, passing through to next generator.", reflect.TypeOf(generator).String(), name) + + // defer cleanup funcs + for _, cleanup := range res.Cleanup { + defer cleanup() + } + + // prepare file reader for next generator + intermediate, err := os.Open(res.Path) + if err != nil { + return nil, fmt.Errorf("failed to open intermediate thumb file: %w", err) + } + + defer intermediate.Close() + inputFile = intermediate + inputSrc = res.Path + inputName = filepath.Base(res.Path) + continue + } + + return res, err + } + } + return nil, ErrNotAvailable +} + +func (p GeneratorList) Priority() int { + return 0 +} + +func (p GeneratorList) EnableFlag() string { + return "" +} + +func thumbSize(options map[string]string) (uint, uint) { + w, h := uint(400), uint(300) + if wParsed, err := strconv.Atoi(options["thumb_width"]); err == nil { + w = uint(wParsed) + } + + if hParsed, err := strconv.Atoi(options["thumb_height"]); err == nil { + h = uint(hParsed) + } + + return w, h +} diff --git a/pkg/thumb/tester.go b/pkg/thumb/tester.go new file mode 100644 index 0000000..2dcc4be --- /dev/null +++ b/pkg/thumb/tester.go @@ -0,0 +1,74 @@ +package thumb + +import ( + "bytes" + "context" + "errors" + "fmt" + "os/exec" + "strings" +) + +var ( + ErrUnknownGenerator = errors.New("unknown generator type") + ErrUnknownOutput = errors.New("unknown output from generator") +) + +// TestGenerator tests thumb generator by getting lib version +func TestGenerator(ctx context.Context, name, executable string) (string, error) { + switch name { + case "vips": + return testVipsGenerator(ctx, executable) + case "ffmpeg": + return testFfmpegGenerator(ctx, executable) + case "libreOffice": + return testLibreOfficeGenerator(ctx, executable) + default: + return "", ErrUnknownGenerator + } +} + +func testVipsGenerator(ctx context.Context, executable string) (string, error) { + cmd := exec.CommandContext(ctx, executable, "--version") + var output bytes.Buffer + cmd.Stdout = &output + if err := cmd.Run(); err != nil { + return "", fmt.Errorf("failed to invoke vips executable: %w", err) + } + + if !strings.Contains(output.String(), "vips") { + return "", ErrUnknownOutput + } + + return output.String(), nil +} + +func testFfmpegGenerator(ctx context.Context, executable string) (string, error) { + cmd := exec.CommandContext(ctx, executable, "-version") + var output bytes.Buffer + cmd.Stdout = &output + if err := cmd.Run(); err != nil { + return "", fmt.Errorf("failed to invoke ffmpeg executable: %w", err) + } + + if !strings.Contains(output.String(), "ffmpeg") { + return "", ErrUnknownOutput + } + + return output.String(), nil +} + +func testLibreOfficeGenerator(ctx context.Context, executable string) (string, error) { + cmd := exec.CommandContext(ctx, executable, "--version") + var output bytes.Buffer + cmd.Stdout = &output + if err := cmd.Run(); err != nil { + return "", fmt.Errorf("failed to invoke libreoffice executable: %w", err) + } + + if !strings.Contains(output.String(), "LibreOffice") { + return "", ErrUnknownOutput + } + + return output.String(), nil +} diff --git a/pkg/thumb/vips.go b/pkg/thumb/vips.go new file mode 100644 index 0000000..ac43535 --- /dev/null +++ b/pkg/thumb/vips.go @@ -0,0 +1,78 @@ +package thumb + +import ( + "bytes" + "context" + "fmt" + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/util" + "github.com/gofrs/uuid" + "io" + "os/exec" + "path/filepath" + "strings" +) + +func init() { + RegisterGenerator(&VipsGenerator{}) +} + +type VipsGenerator struct { + exts []string + lastRawExts string +} + +func (v *VipsGenerator) Generate(ctx context.Context, file io.Reader, src, name string, options map[string]string) (*Result, error) { + vipsOpts := model.GetSettingByNames("thumb_vips_path", "thumb_vips_exts", "thumb_encode_quality", "thumb_encode_method", "temp_path") + + if v.lastRawExts != vipsOpts["thumb_vips_exts"] { + v.exts = strings.Split(vipsOpts["thumb_vips_exts"], ",") + } + + if !util.IsInExtensionList(v.exts, name) { + return nil, fmt.Errorf("unsupported image format: %w", ErrPassThrough) + } + + outputOpt := ".png" + if vipsOpts["thumb_encode_method"] == "jpg" { + outputOpt = fmt.Sprintf(".jpg[Q=%s]", vipsOpts["thumb_encode_quality"]) + } + + cmd := exec.CommandContext(ctx, + vipsOpts["thumb_vips_path"], "thumbnail_source", "[descriptor=0]", outputOpt, options["thumb_width"], + "--height", options["thumb_height"]) + + tempPath := filepath.Join( + util.RelativePath(vipsOpts["temp_path"]), + "thumb", + fmt.Sprintf("thumb_%s", uuid.Must(uuid.NewV4()).String()), + ) + + thumbFile, err := util.CreatNestedFile(tempPath) + if err != nil { + return nil, fmt.Errorf("failed to create temp file: %w", err) + } + + defer thumbFile.Close() + + // Redirect IO + var vipsErr bytes.Buffer + cmd.Stdin = file + cmd.Stdout = thumbFile + cmd.Stderr = &vipsErr + + if err := cmd.Run(); err != nil { + util.Log().Warning("Failed to invoke vips: %s", vipsErr.String()) + return nil, fmt.Errorf("failed to invoke vips: %w", err) + } + + return &Result{Path: tempPath}, nil +} + +func (v *VipsGenerator) Priority() int { + return 100 +} + +func (v *VipsGenerator) EnableFlag() string { + return "thumb_vips_enabled" +} diff --git a/pkg/util/common.go b/pkg/util/common.go new file mode 100644 index 0000000..fe1fa91 --- /dev/null +++ b/pkg/util/common.go @@ -0,0 +1,124 @@ +package util + +import ( + "math/rand" + "path/filepath" + "regexp" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// RandStringRunes 返回随机字符串 +func RandStringRunes(n int) string { + var letterRunes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} + +// ContainsUint 返回list中是否包含 +func ContainsUint(s []uint, e uint) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} + +// IsInExtensionList 返回文件的扩展名是否在给定的列表范围内 +func IsInExtensionList(extList []string, fileName string) bool { + ext := strings.ToLower(filepath.Ext(fileName)) + // 无扩展名时 + if len(ext) == 0 { + return false + } + + if ContainsString(extList, ext[1:]) { + return true + } + + return false +} + +// ContainsString 返回list中是否包含 +func ContainsString(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} + +// Replace 根据替换表执行批量替换 +func Replace(table map[string]string, s string) string { + for key, value := range table { + s = strings.Replace(s, key, value, -1) + } + return s +} + +// BuildRegexp 构建用于SQL查询用的多条件正则 +func BuildRegexp(search []string, prefix, suffix, condition string) string { + var res string + for key, value := range search { + res += prefix + regexp.QuoteMeta(value) + suffix + if key < len(search)-1 { + res += condition + } + } + return res +} + +// BuildConcat 根据数据库类型构建字符串连接表达式 +func BuildConcat(str1, str2 string, DBType string) string { + switch DBType { + case "mysql": + return "CONCAT(" + str1 + "," + str2 + ")" + default: + return str1 + "||" + str2 + } +} + +// SliceIntersect 求两个切片交集 +func SliceIntersect(slice1, slice2 []string) []string { + m := make(map[string]int) + nn := make([]string, 0) + for _, v := range slice1 { + m[v]++ + } + + for _, v := range slice2 { + times, _ := m[v] + if times == 1 { + nn = append(nn, v) + } + } + return nn +} + +// SliceDifference 求两个切片差集 +func SliceDifference(slice1, slice2 []string) []string { + m := make(map[string]int) + nn := make([]string, 0) + inter := SliceIntersect(slice1, slice2) + for _, v := range inter { + m[v]++ + } + + for _, value := range slice1 { + times, _ := m[value] + if times == 0 { + nn = append(nn, value) + } + } + return nn +} diff --git a/pkg/util/io.go b/pkg/util/io.go new file mode 100644 index 0000000..fe3bd9a --- /dev/null +++ b/pkg/util/io.go @@ -0,0 +1,46 @@ +package util + +import ( + "io" + "os" + "path/filepath" +) + +// Exists reports whether the named file or directory exists. +func Exists(name string) bool { + if _, err := os.Stat(name); err != nil { + if os.IsNotExist(err) { + return false + } + } + return true +} + +// CreatNestedFile 给定path创建文件,如果目录不存在就递归创建 +func CreatNestedFile(path string) (*os.File, error) { + basePath := filepath.Dir(path) + if !Exists(basePath) { + err := os.MkdirAll(basePath, 0700) + if err != nil { + Log().Warning("Failed to create directory: %s", err) + return nil, err + } + } + + return os.Create(path) +} + +// IsEmpty 返回给定目录是否为空目录 +func IsEmpty(name string) (bool, error) { + f, err := os.Open(name) + if err != nil { + return false, err + } + defer f.Close() + + _, err = f.Readdirnames(1) // Or f.Readdir(1) + if err == io.EOF { + return true, nil + } + return false, err // Either not empty or error, suits both cases +} diff --git a/pkg/util/logger.go b/pkg/util/logger.go new file mode 100644 index 0000000..107ec71 --- /dev/null +++ b/pkg/util/logger.go @@ -0,0 +1,150 @@ +package util + +import ( + "fmt" + "github.com/fatih/color" + "sync" + "time" +) + +const ( + // LevelError 错误 + LevelError = iota + // LevelWarning 警告 + LevelWarning + // LevelInformational 提示 + LevelInformational + // LevelDebug 除错 + LevelDebug +) + +var GloablLogger *Logger +var Level = LevelDebug + +// Logger 日志 +type Logger struct { + level int + mu sync.Mutex +} + +// 日志颜色 +var colors = map[string]func(a ...interface{}) string{ + "Warning": color.New(color.FgYellow).Add(color.Bold).SprintFunc(), + "Panic": color.New(color.BgRed).Add(color.Bold).SprintFunc(), + "Error": color.New(color.FgRed).Add(color.Bold).SprintFunc(), + "Info": color.New(color.FgCyan).Add(color.Bold).SprintFunc(), + "Debug": color.New(color.FgWhite).Add(color.Bold).SprintFunc(), +} + +// 不同级别前缀与时间的间隔,保持宽度一致 +var spaces = map[string]string{ + "Warning": "", + "Panic": " ", + "Error": " ", + "Info": " ", + "Debug": " ", +} + +// Println 打印 +func (ll *Logger) Println(prefix string, msg string) { + // TODO Release时去掉 + // color.NoColor = false + + c := color.New() + + ll.mu.Lock() + defer ll.mu.Unlock() + + _, _ = c.Printf( + "%s%s %s %s\n", + colors[prefix]("["+prefix+"]"), + spaces[prefix], + time.Now().Format("2006-01-02 15:04:05"), + msg, + ) +} + +// Panic 极端错误 +func (ll *Logger) Panic(format string, v ...interface{}) { + if LevelError > ll.level { + return + } + msg := fmt.Sprintf(format, v...) + ll.Println("Panic", msg) + panic(msg) +} + +// Error 错误 +func (ll *Logger) Error(format string, v ...interface{}) { + if LevelError > ll.level { + return + } + msg := fmt.Sprintf(format, v...) + ll.Println("Error", msg) +} + +// Warning 警告 +func (ll *Logger) Warning(format string, v ...interface{}) { + if LevelWarning > ll.level { + return + } + msg := fmt.Sprintf(format, v...) + ll.Println("Warning", msg) +} + +// Info 信息 +func (ll *Logger) Info(format string, v ...interface{}) { + if LevelInformational > ll.level { + return + } + msg := fmt.Sprintf(format, v...) + ll.Println("Info", msg) +} + +// Debug 校验 +func (ll *Logger) Debug(format string, v ...interface{}) { + if LevelDebug > ll.level { + return + } + msg := fmt.Sprintf(format, v...) + ll.Println("Debug", msg) +} + +// Print GORM 的 Logger实现 +//func (ll *Logger) Print(v ...interface{}) { +// if LevelDebug > ll.level { +// return +// } +// msg := fmt.Sprintf("[SQL] %s", v...) +// ll.Println(msg) +//} + +// BuildLogger 构建logger +func BuildLogger(level string) { + intLevel := LevelError + switch level { + case "error": + intLevel = LevelError + case "warning": + intLevel = LevelWarning + case "info": + intLevel = LevelInformational + case "debug": + intLevel = LevelDebug + } + l := Logger{ + level: intLevel, + } + GloablLogger = &l +} + +// Log 返回日志对象 +func Log() *Logger { + if GloablLogger == nil { + l := Logger{ + level: Level, + } + GloablLogger = &l + } + return GloablLogger +} diff --git a/pkg/util/path.go b/pkg/util/path.go new file mode 100644 index 0000000..ff51d57 --- /dev/null +++ b/pkg/util/path.go @@ -0,0 +1,58 @@ +package util + +import ( + "os" + "path" + "path/filepath" + "strings" +) + +// DotPathToStandardPath 将","分割的路径转换为标准路径 +func DotPathToStandardPath(path string) string { + return "/" + strings.Replace(path, ",", "/", -1) +} + +// FillSlash 给路径补全`/` +func FillSlash(path string) string { + if path == "/" { + return path + } + return path + "/" +} + +// RemoveSlash 移除路径最后的`/` +func RemoveSlash(path string) string { + if len(path) > 1 { + return strings.TrimSuffix(path, "/") + } + return path +} + +// SplitPath 分割路径为列表 +func SplitPath(path string) []string { + if len(path) == 0 || path[0] != '/' { + return []string{} + } + + if path == "/" { + return []string{"/"} + } + + pathSplit := strings.Split(path, "/") + pathSplit[0] = "/" + return pathSplit +} + +// FormSlash 将path中的反斜杠'\'替换为'/' +func FormSlash(old string) string { + return path.Clean(strings.ReplaceAll(old, "\\", "/")) +} + +// RelativePath 获取相对可执行文件的路径 +func RelativePath(name string) string { + if filepath.IsAbs(name) { + return name + } + e, _ := os.Executable() + return filepath.Join(filepath.Dir(e), name) +} diff --git a/pkg/util/session.go b/pkg/util/session.go new file mode 100644 index 0000000..705eee1 --- /dev/null +++ b/pkg/util/session.go @@ -0,0 +1,39 @@ +package util + +import ( + "github.com/gin-contrib/sessions" + "github.com/gin-gonic/gin" +) + +// SetSession 设置session +func SetSession(c *gin.Context, list map[string]interface{}) { + s := sessions.Default(c) + for key, value := range list { + s.Set(key, value) + } + + err := s.Save() + if err != nil { + Log().Warning("无法设置 Session 值:%s", err) + } +} + +// GetSession 获取session +func GetSession(c *gin.Context, key string) interface{} { + s := sessions.Default(c) + return s.Get(key) +} + +// DeleteSession 删除session +func DeleteSession(c *gin.Context, key string) { + s := sessions.Default(c) + s.Delete(key) + s.Save() +} + +// ClearSession 清空session +func ClearSession(c *gin.Context) { + s := sessions.Default(c) + s.Clear() + s.Save() +} diff --git a/pkg/util/ztool.go b/pkg/util/ztool.go new file mode 100644 index 0000000..9733b6f --- /dev/null +++ b/pkg/util/ztool.go @@ -0,0 +1,35 @@ +package util + +import "strings" + +type ( + // 可取长度类型 + LenAble interface{ string | []any | chan any } +) + +// 计算切片元素总长度 +/* + 传入字符串切片, 返回其所有元素长度之和 + e.g. LenArray({`ele3`,`ele2`,`ele1`}) => 12 +*/ +func LenArray[T LenAble](a []T) int { + var o int + for i, r := 0, len(a); i < r; i++ { + o += len(a[i]) + } + return o +} + +// 字符串快速拼接 +/* + 传入多个字符串参数, 返回拼接后的结果 + e.g: StrConcat("str1", "str2", "str3") => "str1str2str3" +*/ +func StrConcat(a ...string) string { + var b strings.Builder + b.Grow(LenArray(a)) + for i, r := 0, len(a); i < r; i++ { + b.WriteString(a[i]) + } + return b.String() +} diff --git a/pkg/vol/vol.go b/pkg/vol/vol.go new file mode 100644 index 0000000..acc44e3 --- /dev/null +++ b/pkg/vol/vol.go @@ -0,0 +1,46 @@ +package vol + +import ( + // "fmt" + "github.com/cloudreve/Cloudreve/v3/pkg/request" + // "net/http" +) + +var ClientSecret = "" + +const CRMSite = "https://pro.cloudreve.org/crm/api/vol/" + +type Client interface { + // Sync VOL from CRM, return content (base64 encoded) and signature. + Sync() (string, string, error) +} + +type VolClient struct { + secret string + client request.Client +} + +func New(secret string) Client { + return &VolClient{secret: secret, client: request.NewClient()} +} + +// func (c *VolClient) Sync() (string, string, error) { +// res, err := c.client.Request("GET", CRMSite+c.secret, nil).CheckHTTPResponse(http.StatusOK).DecodeResponse() +// if err != nil { +// return "", "", fmt.Errorf("failed to get VOL from CRM: %w", err) +// } + +// if res.Code != 0 { +// return "", "", fmt.Errorf("CRM return error: %s", res.Msg) +// } + +// vol := res.Data.(map[string]interface{}) +// return vol["content"].(string), vol["signature"].(string), nil +// } + +// Sync 同步Token <> +func (c *VolClient) Sync() (string, string, error) { + return `eyJkb21haW4iOiJjbG91ZHJldmUub3JnIiwicHVyY2hhc2VfZGF0ZSI6MTY3MDMyOTI3OX0=`, + `UzVBwjfFNTU1bSQV8OTgbMvTdRO7FwNYyMdTu4/phmyUltc6MrluUItiK0v+Uq6yX05L4ZnhTlojVLgi3zXWNq0Tjo3zW3CffZVwj7FCrmG72PBuQp4hV3+b/eMpUbYcTTT9zEt2mneSpGJBOsxDgaf9isVzP+J+YwynPJy1UMa1ckYlc/rEExcxqZxH1tiSHfkyuelIENDiwiggOZl7J2opM5jbxH9oTiAhxl6MN1dbY6DH9bydTibcylSXoQASCse6P/i6JmEWPSRDY22Ofkw3cqTzQcxuMSJjYYVkdAHdeqoDYi4ywmAr1tAJnlDyNNU/KmLQzufgAWjdGKTPNA==`, + nil +} diff --git a/pkg/webdav/file.go b/pkg/webdav/file.go new file mode 100644 index 0000000..a0e589b --- /dev/null +++ b/pkg/webdav/file.go @@ -0,0 +1,206 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +import ( + "context" + "net/http" + "path" + "path/filepath" + "strconv" + "time" + + model "github.com/cloudreve/Cloudreve/v3/models" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem" + "github.com/cloudreve/Cloudreve/v3/pkg/filesystem/fsctx" +) + +// slashClean is equivalent to but slightly more efficient than +// path.Clean("/" + name). +func slashClean(name string) string { + if name == "" || name[0] != '/' { + name = "/" + name + } + return path.Clean(name) +} + +// 更新Copy或Move后的修改时间 +func updateCopyMoveModtime(req *http.Request, fs *filesystem.FileSystem, dst string) error { + var modtime time.Time + if timeVal := req.Header.Get("X-OC-Mtime"); timeVal != "" { + timeUnix, err := strconv.ParseInt(timeVal, 10, 64) + if err == nil { + modtime = time.Unix(timeUnix, 0) + } + } + + if modtime.IsZero() { + return nil + } + + ok, fi := isPathExist(req.Context(), fs, dst) + if !ok { + return nil + } + + if fi.IsDir() { + return model.DB.Model(fi.(*model.Folder)).UpdateColumn("updated_at", modtime).Error + } + return model.DB.Model(fi.(*model.File)).UpdateColumn("updated_at", modtime).Error +} + +// moveFiles moves files and/or directories from src to dst. +// +// See section 9.9.4 for when various HTTP status codes apply. +func moveFiles(ctx context.Context, fs *filesystem.FileSystem, src FileInfo, dst string, overwrite bool) (status int, err error) { + + var ( + fileIDs []uint + folderIDs []uint + ) + if src.IsDir() { + folderIDs = []uint{src.(*model.Folder).ID} + } else { + fileIDs = []uint{src.(*model.File).ID} + } + + if overwrite { + if err := _checkOverwriteFile(ctx, fs, src, dst); err != nil { + return http.StatusInternalServerError, err + } + } + + // 判断是否需要移动 + if src.GetPosition() != path.Dir(dst) { + err = fs.Move( + context.WithValue(ctx, fsctx.WebdavDstName, path.Base(dst)), + folderIDs, + fileIDs, + src.GetPosition(), + path.Dir(dst), + ) + } else if src.GetName() != path.Base(dst) { + // 判断是否需要重命名 + err = fs.Rename( + ctx, + folderIDs, + fileIDs, + path.Base(dst), + ) + } + + if err != nil { + return http.StatusInternalServerError, err + } + return http.StatusNoContent, nil +} + +// copyFiles copies files and/or directories from src to dst. +// +// See section 9.8.5 for when various HTTP status codes apply. +func copyFiles(ctx context.Context, fs *filesystem.FileSystem, src FileInfo, dst string, overwrite bool, depth int, recursion int) (status int, err error) { + if recursion == 1000 { + return http.StatusInternalServerError, errRecursionTooDeep + } + recursion++ + + var ( + fileIDs []uint + folderIDs []uint + ) + + if overwrite { + if err := _checkOverwriteFile(ctx, fs, src, dst); err != nil { + return http.StatusInternalServerError, err + } + } + + if src.IsDir() { + folderIDs = []uint{src.(*model.Folder).ID} + } else { + fileIDs = []uint{src.(*model.File).ID} + } + + err = fs.Copy( + context.WithValue(ctx, fsctx.WebdavDstName, path.Base(dst)), + folderIDs, + fileIDs, + src.GetPosition(), + path.Dir(dst), + ) + if err != nil { + return http.StatusInternalServerError, err + } + + return http.StatusNoContent, nil +} + +// 判断目标 文件/夹 是否已经存在,存在则先删除目标文件/夹 +func _checkOverwriteFile(ctx context.Context, fs *filesystem.FileSystem, src FileInfo, dst string) error { + if src.IsDir() { + ok, folder := fs.IsPathExist(dst) + if ok { + return fs.Delete(ctx, []uint{folder.ID}, []uint{}, false, false) + } + } else { + ok, file := fs.IsFileExist(dst) + if ok { + return fs.Delete(ctx, []uint{}, []uint{file.ID}, false, false) + } + } + return nil +} + +// walkFS traverses filesystem fs starting at name up to depth levels. +// +// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node, +// walkFS calls walkFn. If a visited file system node is a directory and +// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node. +func walkFS( + ctx context.Context, + fs *filesystem.FileSystem, + depth int, + name string, + info FileInfo, + walkFn func(reqPath string, info FileInfo, err error) error) error { + // This implementation is based on Walk's code in the standard path/filepath package. + err := walkFn(name, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + if !info.IsDir() || depth == 0 { + return nil + } + if depth == 1 { + depth = 0 + } + + dirs, _ := info.(*model.Folder).GetChildFolder() + files, _ := info.(*model.Folder).GetChildFiles() + + for _, fileInfo := range files { + filename := path.Join(name, fileInfo.Name) + err = walkFS(ctx, fs, depth, filename, &fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + + for _, fileInfo := range dirs { + filename := path.Join(name, fileInfo.Name) + err = walkFS(ctx, fs, depth, filename, &fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + return nil +} diff --git a/pkg/webdav/if.go b/pkg/webdav/if.go new file mode 100644 index 0000000..a98dac2 --- /dev/null +++ b/pkg/webdav/if.go @@ -0,0 +1,173 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package webdav + +// The If header is covered by Section 10.4. +// http://www.webdav.org/specs/rfc4918.html#HEADER_If + +import ( + "strings" +) + +// ifHeader is a disjunction (OR) of ifLists. +type ifHeader struct { + lists []ifList +} + +// ifList is a conjunction (AND) of Conditions, and an optional resource tag. +type ifList struct { + resourceTag string + conditions []Condition +} + +// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string +// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is +// returned by req.Header.Get("If") for a http.Request req. +func parseIfHeader(httpHeader string) (h ifHeader, ok bool) { + s := strings.TrimSpace(httpHeader) + switch tokenType, _, _ := lex(s); tokenType { + case '(': + return parseNoTagLists(s) + case angleTokenType: + return parseTaggedLists(s) + default: + return ifHeader{}, false + } +} + +func parseNoTagLists(s string) (h ifHeader, ok bool) { + for { + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + } +} + +func parseTaggedLists(s string) (h ifHeader, ok bool) { + resourceTag, n := "", 0 + for first := true; ; first = false { + tokenType, tokenStr, remaining := lex(s) + switch tokenType { + case angleTokenType: + if !first && n == 0 { + return ifHeader{}, false + } + resourceTag, n = tokenStr, 0 + s = remaining + case '(': + n++ + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + l.resourceTag = resourceTag + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + default: + return ifHeader{}, false + } + } +} + +func parseList(s string) (l ifList, remaining string, ok bool) { + tokenType, _, s := lex(s) + if tokenType != '(' { + return ifList{}, "", false + } + for { + tokenType, _, remaining = lex(s) + if tokenType == ')' { + if len(l.conditions) == 0 { + return ifList{}, "", false + } + return l, remaining, true + } + c, remaining, ok := parseCondition(s) + if !ok { + return ifList{}, "", false + } + l.conditions = append(l.conditions, c) + s = remaining + } +} + +func parseCondition(s string) (c Condition, remaining string, ok bool) { + tokenType, tokenStr, s := lex(s) + if tokenType == notTokenType { + c.Not = true + tokenType, tokenStr, s = lex(s) + } + switch tokenType { + case strTokenType, angleTokenType: + c.Token = tokenStr + case squareTokenType: + c.ETag = tokenStr + default: + return Condition{}, "", false + } + return c, s, true +} + +// Single-rune tokens like '(' or ')' have a token type equal to their rune. +// All other tokens have a negative token type. +const ( + errTokenType = rune(-1) + eofTokenType = rune(-2) + strTokenType = rune(-3) + notTokenType = rune(-4) + angleTokenType = rune(-5) + squareTokenType = rune(-6) +) + +func lex(s string) (tokenType rune, tokenStr string, remaining string) { + // The net/textproto Data that parses the HTTP header will collapse + // Linear White Space that spans multiple "\r\n" lines to a single " ", + // so we don't need to look for '\r' or '\n'. + for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') { + s = s[1:] + } + if len(s) == 0 { + return eofTokenType, "", "" + } + i := 0 +loop: + for ; i < len(s); i++ { + switch s[i] { + case '\t', ' ', '(', ')', '<', '>', '[', ']': + break loop + } + } + + if i != 0 { + tokenStr, remaining = s[:i], s[i:] + if tokenStr == "Not" { + return notTokenType, "", remaining + } + return strTokenType, tokenStr, remaining + } + + j := 0 + switch s[0] { + case '<': + j, tokenType = strings.IndexByte(s, '>'), angleTokenType + case '[': + j, tokenType = strings.IndexByte(s, ']'), squareTokenType + default: + return rune(s[0]), "", s[1:] + } + if j < 0 { + return errTokenType, "", "" + } + return tokenType, s[1:j], s[j+1:] +} diff --git a/pkg/webdav/internal/xml/README b/pkg/webdav/internal/xml/README new file mode 100644 index 0000000..89656f4 --- /dev/null +++ b/pkg/webdav/internal/xml/README @@ -0,0 +1,11 @@ +This is a fork of the encoding/xml package at ca1d6c4, the last commit before +https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name +space behavior" made late in the lead-up to the Go 1.5 release. + +The list of encoding/xml changes is at +https://go.googlesource.com/go/+log/master/src/encoding/xml + +This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is +released. + +See http://golang.org/issue/11841 diff --git a/pkg/webdav/internal/xml/marshal.go b/pkg/webdav/internal/xml/marshal.go new file mode 100644 index 0000000..cb82ec2 --- /dev/null +++ b/pkg/webdav/internal/xml/marshal.go @@ -0,0 +1,1223 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bufio" + "bytes" + "encoding" + "fmt" + "io" + "reflect" + "strconv" + "strings" +) + +const ( + // A generic XML header suitable for use with the output of Marshal. + // This is not automatically added to any output of this package, + // it is provided as a convenience. + Header = `` + "\n" +) + +// Marshal returns the XML encoding of v. +// +// Marshal handles an array or slice by marshalling each of the elements. +// Marshal handles a pointer by marshalling the value it points at or, if the +// pointer is nil, by writing nothing. Marshal handles an interface value by +// marshalling the value it contains or, if the interface value is nil, by +// writing nothing. Marshal handles all other data by writing one or more XML +// elements containing the data. +// +// The name for the XML elements is taken from, in order of preference: +// - the tag on the XMLName field, if the data is a struct +// - the value of the XMLName field of type xml.Name +// - the tag of the struct field used to obtain the data +// - the name of the struct field used to obtain the data +// - the name of the marshalled type +// +// The XML element for a struct contains marshalled elements for each of the +// exported fields of the struct, with these exceptions: +// - the XMLName field, described above, is omitted. +// - a field with tag "-" is omitted. +// - a field with tag "name,attr" becomes an attribute with +// the given name in the XML element. +// - a field with tag ",attr" becomes an attribute with the +// field name in the XML element. +// - a field with tag ",chardata" is written as character data, +// not as an XML element. +// - a field with tag ",innerxml" is written verbatim, not subject +// to the usual marshalling procedure. +// - a field with tag ",comment" is written as an XML comment, not +// subject to the usual marshalling procedure. It must not contain +// the "--" string within it. +// - a field with a tag including the "omitempty" option is omitted +// if the field value is empty. The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or +// string of length zero. +// - an anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// If a field uses a tag "a>b>c", then the element c will be nested inside +// parent elements a and b. Fields that appear next to each other that name +// the same parent will be enclosed in one XML element. +// +// See MarshalIndent for an example. +// +// Marshal will return an error if asked to marshal a channel, function, or map. +func Marshal(v interface{}) ([]byte, error) { + var b bytes.Buffer + if err := NewEncoder(&b).Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Marshaler is the interface implemented by objects that can marshal +// themselves into valid XML elements. +// +// MarshalXML encodes the receiver as zero or more XML elements. +// By convention, arrays or slices are typically encoded as a sequence +// of elements, one per entry. +// Using start as the element tag is not required, but doing so +// will enable Unmarshal to match the XML elements to the correct +// struct field. +// One common implementation strategy is to construct a separate +// value with a layout corresponding to the desired XML and then +// to encode it using e.EncodeElement. +// Another common strategy is to use repeated calls to e.EncodeToken +// to generate the XML output one token at a time. +// The sequence of encoded tokens must make up zero or more valid +// XML elements. +type Marshaler interface { + MarshalXML(e *Encoder, start StartElement) error +} + +// MarshalerAttr is the interface implemented by objects that can marshal +// themselves into valid XML attributes. +// +// MarshalXMLAttr returns an XML attribute with the encoded value of the receiver. +// Using name as the attribute name is not required, but doing so +// will enable Unmarshal to match the attribute to the correct +// struct field. +// If MarshalXMLAttr returns the zero attribute Attr{}, no attribute +// will be generated in the output. +// MarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type MarshalerAttr interface { + MarshalXMLAttr(name Name) (Attr, error) +} + +// MarshalIndent works like Marshal, but each XML element begins on a new +// indented line that starts with prefix and is followed by one or more +// copies of indent according to the nesting depth. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + var b bytes.Buffer + enc := NewEncoder(&b) + enc.Indent(prefix, indent) + if err := enc.Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// An Encoder writes XML data to an output stream. +type Encoder struct { + p printer +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{printer{Writer: bufio.NewWriter(w)}} + e.p.encoder = e + return e +} + +// Indent sets the encoder to generate XML in which each element +// begins on a new indented line that starts with prefix and is followed by +// one or more copies of indent according to the nesting depth. +func (enc *Encoder) Indent(prefix, indent string) { + enc.p.prefix = prefix + enc.p.indent = indent +} + +// Encode writes the XML encoding of v to the stream. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// Encode calls Flush before returning. +func (enc *Encoder) Encode(v interface{}) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, nil) + if err != nil { + return err + } + return enc.p.Flush() +} + +// EncodeElement writes the XML encoding of v to the stream, +// using start as the outermost tag in the encoding. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// EncodeElement calls Flush before returning. +func (enc *Encoder) EncodeElement(v interface{}, start StartElement) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, &start) + if err != nil { + return err + } + return enc.p.Flush() +} + +var ( + begComment = []byte("") + endProcInst = []byte("?>") + endDirective = []byte(">") +) + +// EncodeToken writes the given XML token to the stream. +// It returns an error if StartElement and EndElement tokens are not +// properly matched. +// +// EncodeToken does not call Flush, because usually it is part of a +// larger operation such as Encode or EncodeElement (or a custom +// Marshaler's MarshalXML invoked during those), and those will call +// Flush when finished. Callers that create an Encoder and then invoke +// EncodeToken directly, without using Encode or EncodeElement, need to +// call Flush when finished to ensure that the XML is written to the +// underlying writer. +// +// EncodeToken allows writing a ProcInst with Target set to "xml" only +// as the first token in the stream. +// +// When encoding a StartElement holding an XML namespace prefix +// declaration for a prefix that is not already declared, contained +// elements (including the StartElement itself) will use the declared +// prefix when encoding names with matching namespace URIs. +func (enc *Encoder) EncodeToken(t Token) error { + + p := &enc.p + switch t := t.(type) { + case StartElement: + if err := p.writeStart(&t); err != nil { + return err + } + case EndElement: + if err := p.writeEnd(t.Name); err != nil { + return err + } + case CharData: + escapeText(p, t, false) + case Comment: + if bytes.Contains(t, endComment) { + return fmt.Errorf("xml: EncodeToken of Comment containing --> marker") + } + p.WriteString("") + return p.cachedWriteError() + case ProcInst: + // First token to be encoded which is also a ProcInst with target of xml + // is the xml declaration. The only ProcInst where target of xml is allowed. + if t.Target == "xml" && p.Buffered() != 0 { + return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded") + } + if !isNameString(t.Target) { + return fmt.Errorf("xml: EncodeToken of ProcInst with invalid Target") + } + if bytes.Contains(t.Inst, endProcInst) { + return fmt.Errorf("xml: EncodeToken of ProcInst containing ?> marker") + } + p.WriteString(" 0 { + p.WriteByte(' ') + p.Write(t.Inst) + } + p.WriteString("?>") + case Directive: + if !isValidDirective(t) { + return fmt.Errorf("xml: EncodeToken of Directive containing wrong < or > markers") + } + p.WriteString("") + default: + return fmt.Errorf("xml: EncodeToken of invalid token type") + + } + return p.cachedWriteError() +} + +// isValidDirective reports whether dir is a valid directive text, +// meaning angle brackets are matched, ignoring comments and strings. +func isValidDirective(dir Directive) bool { + var ( + depth int + inquote uint8 + incomment bool + ) + for i, c := range dir { + switch { + case incomment: + if c == '>' { + if n := 1 + i - len(endComment); n >= 0 && bytes.Equal(dir[n:i+1], endComment) { + incomment = false + } + } + // Just ignore anything in comment + case inquote != 0: + if c == inquote { + inquote = 0 + } + // Just ignore anything within quotes + case c == '\'' || c == '"': + inquote = c + case c == '<': + if i+len(begComment) < len(dir) && bytes.Equal(dir[i:i+len(begComment)], begComment) { + incomment = true + } else { + depth++ + } + case c == '>': + if depth == 0 { + return false + } + depth-- + } + } + return depth == 0 && inquote == 0 && !incomment +} + +// Flush flushes any buffered XML to the underlying writer. +// See the EncodeToken documentation for details about when it is necessary. +func (enc *Encoder) Flush() error { + return enc.p.Flush() +} + +type printer struct { + *bufio.Writer + encoder *Encoder + seq int + indent string + prefix string + depth int + indentedIn bool + putNewline bool + defaultNS string + attrNS map[string]string // map prefix -> name space + attrPrefix map[string]string // map name space -> prefix + prefixes []printerPrefix + tags []Name +} + +// printerPrefix holds a namespace undo record. +// When an element is popped, the prefix record +// is set back to the recorded URL. The empty +// prefix records the URL for the default name space. +// +// The start of an element is recorded with an element +// that has mark=true. +type printerPrefix struct { + prefix string + url string + mark bool +} + +func (p *printer) prefixForNS(url string, isAttr bool) string { + // The "http://www.w3.org/XML/1998/namespace" name space is predefined as "xml" + // and must be referred to that way. + // (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns", + // but users should not be trying to use that one directly - that's our job.) + if url == xmlURL { + return "xml" + } + if !isAttr && url == p.defaultNS { + // We can use the default name space. + return "" + } + return p.attrPrefix[url] +} + +// defineNS pushes any namespace definition found in the given attribute. +// If ignoreNonEmptyDefault is true, an xmlns="nonempty" +// attribute will be ignored. +func (p *printer) defineNS(attr Attr, ignoreNonEmptyDefault bool) error { + var prefix string + if attr.Name.Local == "xmlns" { + if attr.Name.Space != "" && attr.Name.Space != "xml" && attr.Name.Space != xmlURL { + return fmt.Errorf("xml: cannot redefine xmlns attribute prefix") + } + } else if attr.Name.Space == "xmlns" && attr.Name.Local != "" { + prefix = attr.Name.Local + if attr.Value == "" { + // Technically, an empty XML namespace is allowed for an attribute. + // From http://www.w3.org/TR/xml-names11/#scoping-defaulting: + // + // The attribute value in a namespace declaration for a prefix may be + // empty. This has the effect, within the scope of the declaration, of removing + // any association of the prefix with a namespace name. + // + // However our namespace prefixes here are used only as hints. There's + // no need to respect the removal of a namespace prefix, so we ignore it. + return nil + } + } else { + // Ignore: it's not a namespace definition + return nil + } + if prefix == "" { + if attr.Value == p.defaultNS { + // No need for redefinition. + return nil + } + if attr.Value != "" && ignoreNonEmptyDefault { + // We have an xmlns="..." value but + // it can't define a name space in this context, + // probably because the element has an empty + // name space. In this case, we just ignore + // the name space declaration. + return nil + } + } else if _, ok := p.attrPrefix[attr.Value]; ok { + // There's already a prefix for the given name space, + // so use that. This prevents us from + // having two prefixes for the same name space + // so attrNS and attrPrefix can remain bijective. + return nil + } + p.pushPrefix(prefix, attr.Value) + return nil +} + +// createNSPrefix creates a name space prefix attribute +// to use for the given name space, defining a new prefix +// if necessary. +// If isAttr is true, the prefix is to be created for an attribute +// prefix, which means that the default name space cannot +// be used. +func (p *printer) createNSPrefix(url string, isAttr bool) { + if _, ok := p.attrPrefix[url]; ok { + // We already have a prefix for the given URL. + return + } + switch { + case !isAttr && url == p.defaultNS: + // We can use the default name space. + return + case url == "": + // The only way we can encode names in the empty + // name space is by using the default name space, + // so we must use that. + if p.defaultNS != "" { + // The default namespace is non-empty, so we + // need to set it to empty. + p.pushPrefix("", "") + } + return + case url == xmlURL: + return + } + // TODO If the URL is an existing prefix, we could + // use it as is. That would enable the + // marshaling of elements that had been unmarshaled + // and with a name space prefix that was not found. + // although technically it would be incorrect. + + // Pick a name. We try to use the final element of the path + // but fall back to _. + prefix := strings.TrimRight(url, "/") + if i := strings.LastIndex(prefix, "/"); i >= 0 { + prefix = prefix[i+1:] + } + if prefix == "" || !isName([]byte(prefix)) || strings.Contains(prefix, ":") { + prefix = "_" + } + if strings.HasPrefix(prefix, "xml") { + // xmlanything is reserved. + prefix = "_" + prefix + } + if p.attrNS[prefix] != "" { + // Name is taken. Find a better one. + for p.seq++; ; p.seq++ { + if id := prefix + "_" + strconv.Itoa(p.seq); p.attrNS[id] == "" { + prefix = id + break + } + } + } + + p.pushPrefix(prefix, url) +} + +// writeNamespaces writes xmlns attributes for all the +// namespace prefixes that have been defined in +// the current element. +func (p *printer) writeNamespaces() { + for i := len(p.prefixes) - 1; i >= 0; i-- { + prefix := p.prefixes[i] + if prefix.mark { + return + } + p.WriteString(" ") + if prefix.prefix == "" { + // Default name space. + p.WriteString(`xmlns="`) + } else { + p.WriteString("xmlns:") + p.WriteString(prefix.prefix) + p.WriteString(`="`) + } + EscapeText(p, []byte(p.nsForPrefix(prefix.prefix))) + p.WriteString(`"`) + } +} + +// pushPrefix pushes a new prefix on the prefix stack +// without checking to see if it is already defined. +func (p *printer) pushPrefix(prefix, url string) { + p.prefixes = append(p.prefixes, printerPrefix{ + prefix: prefix, + url: p.nsForPrefix(prefix), + }) + p.setAttrPrefix(prefix, url) +} + +// nsForPrefix returns the name space for the given +// prefix. Note that this is not valid for the +// empty attribute prefix, which always has an empty +// name space. +func (p *printer) nsForPrefix(prefix string) string { + if prefix == "" { + return p.defaultNS + } + return p.attrNS[prefix] +} + +// markPrefix marks the start of an element on the prefix +// stack. +func (p *printer) markPrefix() { + p.prefixes = append(p.prefixes, printerPrefix{ + mark: true, + }) +} + +// popPrefix pops all defined prefixes for the current +// element. +func (p *printer) popPrefix() { + for len(p.prefixes) > 0 { + prefix := p.prefixes[len(p.prefixes)-1] + p.prefixes = p.prefixes[:len(p.prefixes)-1] + if prefix.mark { + break + } + p.setAttrPrefix(prefix.prefix, prefix.url) + } +} + +// setAttrPrefix sets an attribute name space prefix. +// If url is empty, the attribute is removed. +// If prefix is empty, the default name space is set. +func (p *printer) setAttrPrefix(prefix, url string) { + if prefix == "" { + p.defaultNS = url + return + } + if url == "" { + delete(p.attrPrefix, p.attrNS[prefix]) + delete(p.attrNS, prefix) + return + } + if p.attrPrefix == nil { + // Need to define a new name space. + p.attrPrefix = make(map[string]string) + p.attrNS = make(map[string]string) + } + // Remove any old prefix value. This is OK because we maintain a + // strict one-to-one mapping between prefix and URL (see + // defineNS) + delete(p.attrPrefix, p.attrNS[prefix]) + p.attrPrefix[url] = prefix + p.attrNS[prefix] = url +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalerAttrType = reflect.TypeOf((*MarshalerAttr)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() +) + +// marshalValue writes one or more XML elements representing val. +// If val was obtained from a struct field, finfo must have its details. +func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error { + if startTemplate != nil && startTemplate.Name.Local == "" { + return fmt.Errorf("xml: EncodeElement of StartElement with missing name") + } + + if !val.IsValid() { + return nil + } + if finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) { + return nil + } + + // Drill into interfaces and pointers. + // This can turn into an infinite loop given a cyclic chain, + // but it matches the Go 1 behavior. + for val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + if val.IsNil() { + return nil + } + val = val.Elem() + } + + kind := val.Kind() + typ := val.Type() + + // Check for marshaler. + if val.CanInterface() && typ.Implements(marshalerType) { + return p.marshalInterface(val.Interface().(Marshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerType) { + return p.marshalInterface(pv.Interface().(Marshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Check for text marshaler. + if val.CanInterface() && typ.Implements(textMarshalerType) { + return p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + return p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Slices and arrays iterate over the elements. They do not have an enclosing tag. + if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 { + for i, n := 0, val.Len(); i < n; i++ { + if err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil { + return err + } + } + return nil + } + + tinfo, err := getTypeInfo(typ) + if err != nil { + return err + } + + // Create start element. + // Precedence for the XML element name is: + // 0. startTemplate + // 1. XMLName field in underlying struct; + // 2. field name/tag in the struct field; and + // 3. type name + var start StartElement + + // explicitNS records whether the element's name space has been + // explicitly set (for example an XMLName field). + explicitNS := false + + if startTemplate != nil { + start.Name = startTemplate.Name + explicitNS = true + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if tinfo.xmlname != nil { + xmlname := tinfo.xmlname + if xmlname.name != "" { + start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name + } else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != "" { + start.Name = v + } + explicitNS = true + } + if start.Name.Local == "" && finfo != nil { + start.Name.Local = finfo.name + if finfo.xmlns != "" { + start.Name.Space = finfo.xmlns + explicitNS = true + } + } + if start.Name.Local == "" { + name := typ.Name() + if name == "" { + return &UnsupportedTypeError{typ} + } + start.Name.Local = name + } + + // defaultNS records the default name space as set by a xmlns="..." + // attribute. We don't set p.defaultNS because we want to let + // the attribute writing code (in p.defineNS) be solely responsible + // for maintaining that. + defaultNS := p.defaultNS + + // Attributes + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr == 0 { + continue + } + attr, err := p.fieldAttr(finfo, val) + if err != nil { + return err + } + if attr.Name.Local == "" { + continue + } + start.Attr = append(start.Attr, attr) + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + defaultNS = attr.Value + } + } + if !explicitNS { + // Historic behavior: elements use the default name space + // they are contained in by default. + start.Name.Space = defaultNS + } + // Historic behaviour: an element that's in a namespace sets + // the default namespace for all elements contained within it. + start.setDefaultNamespace() + + if err := p.writeStart(&start); err != nil { + return err + } + + if val.Kind() == reflect.Struct { + err = p.marshalStruct(tinfo, val) + } else { + s, b, err1 := p.marshalSimple(typ, val) + if err1 != nil { + err = err1 + } else if b != nil { + EscapeText(p, b) + } else { + p.EscapeString(s) + } + } + if err != nil { + return err + } + + if err := p.writeEnd(start.Name); err != nil { + return err + } + + return p.cachedWriteError() +} + +// fieldAttr returns the attribute of the given field. +// If the returned attribute has an empty Name.Local, +// it should not be used. +// The given value holds the value containing the field. +func (p *printer) fieldAttr(finfo *fieldInfo, val reflect.Value) (Attr, error) { + fv := finfo.value(val) + name := Name{Space: finfo.xmlns, Local: finfo.name} + if finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) { + return Attr{}, nil + } + if fv.Kind() == reflect.Interface && fv.IsNil() { + return Attr{}, nil + } + if fv.CanInterface() && fv.Type().Implements(marshalerAttrType) { + attr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { + attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + } + if fv.CanInterface() && fv.Type().Implements(textMarshalerType) { + text, err := fv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + } + // Dereference or skip nil pointer, interface values. + switch fv.Kind() { + case reflect.Ptr, reflect.Interface: + if fv.IsNil() { + return Attr{}, nil + } + fv = fv.Elem() + } + s, b, err := p.marshalSimple(fv.Type(), fv) + if err != nil { + return Attr{}, err + } + if b != nil { + s = string(b) + } + return Attr{name, s}, nil +} + +// defaultStart returns the default start element to use, +// given the reflect type, field info, and start template. +func (p *printer) defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement { + var start StartElement + // Precedence for the XML element name is as above, + // except that we do not look inside structs for the first field. + if startTemplate != nil { + start.Name = startTemplate.Name + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if finfo != nil && finfo.name != "" { + start.Name.Local = finfo.name + start.Name.Space = finfo.xmlns + } else if typ.Name() != "" { + start.Name.Local = typ.Name() + } else { + // Must be a pointer to a named type, + // since it has the Marshaler methods. + start.Name.Local = typ.Elem().Name() + } + // Historic behaviour: elements use the name space of + // the element they are contained in by default. + if start.Name.Space == "" { + start.Name.Space = p.defaultNS + } + start.setDefaultNamespace() + return start +} + +// marshalInterface marshals a Marshaler interface value. +func (p *printer) marshalInterface(val Marshaler, start StartElement) error { + // Push a marker onto the tag stack so that MarshalXML + // cannot close the XML tags that it did not open. + p.tags = append(p.tags, Name{}) + n := len(p.tags) + + err := val.MarshalXML(p.encoder, start) + if err != nil { + return err + } + + // Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark. + if len(p.tags) > n { + return fmt.Errorf("xml: %s.MarshalXML wrote invalid XML: <%s> not closed", receiverType(val), p.tags[len(p.tags)-1].Local) + } + p.tags = p.tags[:n-1] + return nil +} + +// marshalTextInterface marshals a TextMarshaler interface value. +func (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error { + if err := p.writeStart(&start); err != nil { + return err + } + text, err := val.MarshalText() + if err != nil { + return err + } + EscapeText(p, text) + return p.writeEnd(start.Name) +} + +// writeStart writes the given start element. +func (p *printer) writeStart(start *StartElement) error { + if start.Name.Local == "" { + return fmt.Errorf("xml: start tag with no name") + } + + p.tags = append(p.tags, start.Name) + p.markPrefix() + // Define any name spaces explicitly declared in the attributes. + // We do this as a separate pass so that explicitly declared prefixes + // will take precedence over implicitly declared prefixes + // regardless of the order of the attributes. + ignoreNonEmptyDefault := start.Name.Space == "" + for _, attr := range start.Attr { + if err := p.defineNS(attr, ignoreNonEmptyDefault); err != nil { + return err + } + } + // Define any new name spaces implied by the attributes. + for _, attr := range start.Attr { + name := attr.Name + // From http://www.w3.org/TR/xml-names11/#defaulting + // "Default namespace declarations do not apply directly + // to attribute names; the interpretation of unprefixed + // attributes is determined by the element on which they + // appear." + // This means we don't need to create a new namespace + // when an attribute name space is empty. + if name.Space != "" && !name.isNamespace() { + p.createNSPrefix(name.Space, true) + } + } + p.createNSPrefix(start.Name.Space, false) + + p.writeIndent(1) + p.WriteByte('<') + p.writeName(start.Name, false) + p.writeNamespaces() + for _, attr := range start.Attr { + name := attr.Name + if name.Local == "" || name.isNamespace() { + // Namespaces have already been written by writeNamespaces above. + continue + } + p.WriteByte(' ') + p.writeName(name, true) + p.WriteString(`="`) + p.EscapeString(attr.Value) + p.WriteByte('"') + } + p.WriteByte('>') + return nil +} + +// writeName writes the given name. It assumes +// that p.createNSPrefix(name) has already been called. +func (p *printer) writeName(name Name, isAttr bool) { + if prefix := p.prefixForNS(name.Space, isAttr); prefix != "" { + p.WriteString(prefix) + p.WriteByte(':') + } + p.WriteString(name.Local) +} + +func (p *printer) writeEnd(name Name) error { + if name.Local == "" { + return fmt.Errorf("xml: end tag with no name") + } + if len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == "" { + return fmt.Errorf("xml: end tag without start tag", name.Local) + } + if top := p.tags[len(p.tags)-1]; top != name { + if top.Local != name.Local { + return fmt.Errorf("xml: end tag does not match start tag <%s>", name.Local, top.Local) + } + return fmt.Errorf("xml: end tag in namespace %s does not match start tag <%s> in namespace %s", name.Local, name.Space, top.Local, top.Space) + } + p.tags = p.tags[:len(p.tags)-1] + + p.writeIndent(-1) + p.WriteByte('<') + p.WriteByte('/') + p.writeName(name, false) + p.WriteByte('>') + p.popPrefix() + return nil +} + +func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) { + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(val.Int(), 10), nil, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(val.Uint(), 10), nil, nil + case reflect.Float32, reflect.Float64: + return strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil + case reflect.String: + return val.String(), nil, nil + case reflect.Bool: + return strconv.FormatBool(val.Bool()), nil, nil + case reflect.Array: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // [...]byte + var bytes []byte + if val.CanAddr() { + bytes = val.Slice(0, val.Len()).Bytes() + } else { + bytes = make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(bytes), val) + } + return "", bytes, nil + case reflect.Slice: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // []byte + return "", val.Bytes(), nil + } + return "", nil, &UnsupportedTypeError{typ} +} + +var ddBytes = []byte("--") + +func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { + s := parentStack{p: p} + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr != 0 { + continue + } + vf := finfo.value(val) + + // Dereference or skip nil pointer, interface values. + switch vf.Kind() { + case reflect.Ptr, reflect.Interface: + if !vf.IsNil() { + vf = vf.Elem() + } + } + + switch finfo.flags & fMode { + case fCharData: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + if vf.CanInterface() && vf.Type().Implements(textMarshalerType) { + data, err := vf.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + if vf.CanAddr() { + pv := vf.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + data, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + } + var scratch [64]byte + switch vf.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + Escape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + Escape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10)) + case reflect.Float32, reflect.Float64: + Escape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits())) + case reflect.Bool: + Escape(p, strconv.AppendBool(scratch[:0], vf.Bool())) + case reflect.String: + if err := EscapeText(p, []byte(vf.String())); err != nil { + return err + } + case reflect.Slice: + if elem, ok := vf.Interface().([]byte); ok { + if err := EscapeText(p, elem); err != nil { + return err + } + } + } + continue + + case fComment: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + k := vf.Kind() + if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) { + return fmt.Errorf("xml: bad type for comment field of %s", val.Type()) + } + if vf.Len() == 0 { + continue + } + p.writeIndent(0) + p.WriteString("" is invalid grammar. Make it "- -->" + p.WriteByte(' ') + } + p.WriteString("-->") + continue + + case fInnerXml: + iface := vf.Interface() + switch raw := iface.(type) { + case []byte: + p.Write(raw) + continue + case string: + p.WriteString(raw) + continue + } + + case fElement, fElement | fAny: + if err := s.setParents(finfo, vf); err != nil { + return err + } + } + if err := p.marshalValue(vf, finfo, nil); err != nil { + return err + } + } + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + return p.cachedWriteError() +} + +var noField fieldInfo + +// return the bufio Writer's cached write error +func (p *printer) cachedWriteError() error { + _, err := p.Write(nil) + return err +} + +func (p *printer) writeIndent(depthDelta int) { + if len(p.prefix) == 0 && len(p.indent) == 0 { + return + } + if depthDelta < 0 { + p.depth-- + if p.indentedIn { + p.indentedIn = false + return + } + p.indentedIn = false + } + if p.putNewline { + p.WriteByte('\n') + } else { + p.putNewline = true + } + if len(p.prefix) > 0 { + p.WriteString(p.prefix) + } + if len(p.indent) > 0 { + for i := 0; i < p.depth; i++ { + p.WriteString(p.indent) + } + } + if depthDelta > 0 { + p.depth++ + p.indentedIn = true + } +} + +type parentStack struct { + p *printer + xmlns string + parents []string +} + +// setParents sets the stack of current parents to those found in finfo. +// It only writes the start elements if vf holds a non-nil value. +// If finfo is &noField, it pops all elements. +func (s *parentStack) setParents(finfo *fieldInfo, vf reflect.Value) error { + xmlns := s.p.defaultNS + if finfo.xmlns != "" { + xmlns = finfo.xmlns + } + commonParents := 0 + if xmlns == s.xmlns { + for ; commonParents < len(finfo.parents) && commonParents < len(s.parents); commonParents++ { + if finfo.parents[commonParents] != s.parents[commonParents] { + break + } + } + } + // Pop off any parents that aren't in common with the previous field. + for i := len(s.parents) - 1; i >= commonParents; i-- { + if err := s.p.writeEnd(Name{ + Space: s.xmlns, + Local: s.parents[i], + }); err != nil { + return err + } + } + s.parents = finfo.parents + s.xmlns = xmlns + if commonParents >= len(s.parents) { + // No new elements to push. + return nil + } + if (vf.Kind() == reflect.Ptr || vf.Kind() == reflect.Interface) && vf.IsNil() { + // The element is nil, so no need for the start elements. + s.parents = s.parents[:commonParents] + return nil + } + // Push any new parents required. + for _, name := range s.parents[commonParents:] { + start := &StartElement{ + Name: Name{ + Space: s.xmlns, + Local: name, + }, + } + // Set the default name space for parent elements + // to match what we do with other elements. + if s.xmlns != s.p.defaultNS { + start.setDefaultNamespace() + } + if err := s.p.writeStart(start); err != nil { + return err + } + } + return nil +} + +// A MarshalXMLError is returned when Marshal encounters a type +// that cannot be converted into XML. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "xml: unsupported type: " + e.Type.String() +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/pkg/webdav/internal/xml/read.go b/pkg/webdav/internal/xml/read.go new file mode 100644 index 0000000..4089056 --- /dev/null +++ b/pkg/webdav/internal/xml/read.go @@ -0,0 +1,692 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +// BUG(rsc): Mapping between XML elements and data structures is inherently flawed: +// an XML element is an order-dependent collection of anonymous +// values, while a data structure is an order-independent collection +// of named values. +// See package json for a textual representation more suitable +// to data structures. + +// Unmarshal parses the XML-encoded data and stores the result in +// the value pointed to by v, which must be an arbitrary struct, +// slice, or string. Well-formed data that does not fit into v is +// discarded. +// +// Because Unmarshal uses the reflect package, it can only assign +// to exported (upper case) fields. Unmarshal uses a case-sensitive +// comparison to match XML element names to tag values and struct +// field names. +// +// Unmarshal maps an XML element to a struct using the following rules. +// In the rules, the tag of a field refers to the value associated with the +// key 'xml' in the struct field's tag (see the example above). +// +// * If the struct has a field of type []byte or string with tag +// ",innerxml", Unmarshal accumulates the raw XML nested inside the +// element in that field. The rest of the rules still apply. +// +// * If the struct has a field named XMLName of type xml.Name, +// Unmarshal records the element name in that field. +// +// * If the XMLName field has an associated tag of the form +// "name" or "namespace-URL name", the XML element must have +// the given name (and, optionally, name space) or else Unmarshal +// returns an error. +// +// * If the XML element has an attribute whose name matches a +// struct field name with an associated tag containing ",attr" or +// the explicit name in a struct field tag of the form "name,attr", +// Unmarshal records the attribute value in that field. +// +// * If the XML element contains character data, that data is +// accumulated in the first struct field that has tag ",chardata". +// The struct field may have type []byte or string. +// If there is no such field, the character data is discarded. +// +// * If the XML element contains comments, they are accumulated in +// the first struct field that has tag ",comment". The struct +// field may have type []byte or string. If there is no such +// field, the comments are discarded. +// +// * If the XML element contains a sub-element whose name matches +// the prefix of a tag formatted as "a" or "a>b>c", unmarshal +// will descend into the XML structure looking for elements with the +// given names, and will map the innermost elements to that struct +// field. A tag starting with ">" is equivalent to one starting +// with the field name followed by ">". +// +// * If the XML element contains a sub-element whose name matches +// a struct field's XMLName tag and the struct field has no +// explicit name tag as per the previous rule, unmarshal maps +// the sub-element to that struct field. +// +// * If the XML element contains a sub-element whose name matches a +// field without any mode flags (",attr", ",chardata", etc), Unmarshal +// maps the sub-element to that struct field. +// +// * If the XML element contains a sub-element that hasn't matched any +// of the above rules and the struct has a field with tag ",any", +// unmarshal maps the sub-element to that struct field. +// +// * An anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// * A struct field with tag "-" is never unmarshalled into. +// +// Unmarshal maps an XML element to a string or []byte by saving the +// concatenation of that element's character data in the string or +// []byte. The saved []byte is never nil. +// +// Unmarshal maps an attribute value to a string or []byte by saving +// the value in the string or slice. +// +// Unmarshal maps an XML element to a slice by extending the length of +// the slice and mapping the element to the newly created value. +// +// Unmarshal maps an XML element or attribute value to a bool by +// setting it to the boolean value represented by the string. +// +// Unmarshal maps an XML element or attribute value to an integer or +// floating-point field by setting the field to the result of +// interpreting the string value in decimal. There is no check for +// overflow. +// +// Unmarshal maps an XML element to an xml.Name by recording the +// element name. +// +// Unmarshal maps an XML element to a pointer by setting the pointer +// to a freshly allocated value and then mapping the element to that value. +// +func Unmarshal(data []byte, v interface{}) error { + return NewDecoder(bytes.NewReader(data)).Decode(v) +} + +// Decode works like xml.Unmarshal, except it reads the decoder +// stream to find the start element. +func (d *Decoder) Decode(v interface{}) error { + return d.DecodeElement(v, nil) +} + +// DecodeElement works like xml.Unmarshal except that it takes +// a pointer to the start XML element to decode into v. +// It is useful when a client reads some raw XML tokens itself +// but also wants to defer to Unmarshal for some elements. +func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error { + val := reflect.ValueOf(v) + if val.Kind() != reflect.Ptr { + return errors.New("non-pointer passed to Unmarshal") + } + return d.unmarshal(val.Elem(), start) +} + +// An UnmarshalError represents an error in the unmarshalling process. +type UnmarshalError string + +func (e UnmarshalError) Error() string { return string(e) } + +// Unmarshaler is the interface implemented by objects that can unmarshal +// an XML element description of themselves. +// +// UnmarshalXML decodes a single XML element +// beginning with the given start element. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXML must consume exactly one XML element. +// One common implementation strategy is to unmarshal into +// a separate value with a layout matching the expected XML +// using d.DecodeElement, and then to copy the data from +// that value into the receiver. +// Another common strategy is to use d.Token to process the +// XML object one token at a time. +// UnmarshalXML may not use d.RawToken. +type Unmarshaler interface { + UnmarshalXML(d *Decoder, start StartElement) error +} + +// UnmarshalerAttr is the interface implemented by objects that can unmarshal +// an XML attribute description of themselves. +// +// UnmarshalXMLAttr decodes a single XML attribute. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type UnmarshalerAttr interface { + UnmarshalXMLAttr(attr Attr) error +} + +// receiverType returns the receiver type to use in an expression like "%s.MethodName". +func receiverType(val interface{}) string { + t := reflect.TypeOf(val) + if t.Name() != "" { + return t.String() + } + return "(" + t.String() + ")" +} + +// unmarshalInterface unmarshals a single XML element into val. +// start is the opening tag of the element. +func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error { + // Record that decoder must stop at end tag corresponding to start. + p.pushEOF() + + p.unmarshalDepth++ + err := val.UnmarshalXML(p, *start) + p.unmarshalDepth-- + if err != nil { + p.popEOF() + return err + } + + if !p.popEOF() { + return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local) + } + + return nil +} + +// unmarshalTextInterface unmarshals a single XML element into val. +// The chardata contained in the element (but not its children) +// is passed to the text unmarshaler. +func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error { + var buf []byte + depth := 1 + for depth > 0 { + t, err := p.Token() + if err != nil { + return err + } + switch t := t.(type) { + case CharData: + if depth == 1 { + buf = append(buf, t...) + } + case StartElement: + depth++ + case EndElement: + depth-- + } + } + return val.UnmarshalText(buf) +} + +// unmarshalAttr unmarshals a single XML attribute into val. +func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) { + return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + } + + // Not an UnmarshalerAttr; try encoding.TextUnmarshaler. + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + } + + copyValue(val, []byte(attr.Value)) + return nil +} + +var ( + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// Unmarshal a single XML element into val. +func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { + // Find start element if we need it. + if start == nil { + for { + tok, err := p.Token() + if err != nil { + return err + } + if t, ok := tok.(StartElement); ok { + start = &t + break + } + } + } + + // Load value from interface, but only if the result will be + // usefully addressable. + if val.Kind() == reflect.Interface && !val.IsNil() { + e := val.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() { + val = e + } + } + + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return p.unmarshalInterface(val.Interface().(Unmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerType) { + return p.unmarshalInterface(pv.Interface().(Unmarshaler), start) + } + } + + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start) + } + } + + var ( + data []byte + saveData reflect.Value + comment []byte + saveComment reflect.Value + saveXML reflect.Value + saveXMLIndex int + saveXMLData []byte + saveAny reflect.Value + sv reflect.Value + tinfo *typeInfo + err error + ) + + switch v := val; v.Kind() { + default: + return errors.New("unknown type " + v.Type().String()) + + case reflect.Interface: + // TODO: For now, simply ignore the field. In the near + // future we may choose to unmarshal the start + // element on it, if not nil. + return p.Skip() + + case reflect.Slice: + typ := v.Type() + if typ.Elem().Kind() == reflect.Uint8 { + // []byte + saveData = v + break + } + + // Slice of element values. + // Grow slice. + n := v.Len() + if n >= v.Cap() { + ncap := 2 * n + if ncap < 4 { + ncap = 4 + } + new := reflect.MakeSlice(typ, n, ncap) + reflect.Copy(new, v) + v.Set(new) + } + v.SetLen(n + 1) + + // Recur to read element into slice. + if err := p.unmarshal(v.Index(n), start); err != nil { + v.SetLen(n) + return err + } + return nil + + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String: + saveData = v + + case reflect.Struct: + typ := v.Type() + if typ == nameType { + v.Set(reflect.ValueOf(start.Name)) + break + } + + sv = v + tinfo, err = getTypeInfo(typ) + if err != nil { + return err + } + + // Validate and assign element name. + if tinfo.xmlname != nil { + finfo := tinfo.xmlname + if finfo.name != "" && finfo.name != start.Name.Local { + return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">") + } + if finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have " + if start.Name.Space == "" { + e += "no name space" + } else { + e += start.Name.Space + } + return UnmarshalError(e) + } + fv := finfo.value(sv) + if _, ok := fv.Interface().(Name); ok { + fv.Set(reflect.ValueOf(start.Name)) + } + } + + // Assign attributes. + // Also, determine whether we need to save character data or comments. + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + switch finfo.flags & fMode { + case fAttr: + strv := finfo.value(sv) + // Look for attribute. + for _, a := range start.Attr { + if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) { + if err := p.unmarshalAttr(strv, a); err != nil { + return err + } + break + } + } + + case fCharData: + if !saveData.IsValid() { + saveData = finfo.value(sv) + } + + case fComment: + if !saveComment.IsValid() { + saveComment = finfo.value(sv) + } + + case fAny, fAny | fElement: + if !saveAny.IsValid() { + saveAny = finfo.value(sv) + } + + case fInnerXml: + if !saveXML.IsValid() { + saveXML = finfo.value(sv) + if p.saved == nil { + saveXMLIndex = 0 + p.saved = new(bytes.Buffer) + } else { + saveXMLIndex = p.savedOffset() + } + } + } + } + } + + // Find end element. + // Process sub-elements along the way. +Loop: + for { + var savedOffset int + if saveXML.IsValid() { + savedOffset = p.savedOffset() + } + tok, err := p.Token() + if err != nil { + return err + } + switch t := tok.(type) { + case StartElement: + consumed := false + if sv.IsValid() { + consumed, err = p.unmarshalPath(tinfo, sv, nil, &t) + if err != nil { + return err + } + if !consumed && saveAny.IsValid() { + consumed = true + if err := p.unmarshal(saveAny, &t); err != nil { + return err + } + } + } + if !consumed { + if err := p.Skip(); err != nil { + return err + } + } + + case EndElement: + if saveXML.IsValid() { + saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset] + if saveXMLIndex == 0 { + p.saved = nil + } + } + break Loop + + case CharData: + if saveData.IsValid() { + data = append(data, t...) + } + + case Comment: + if saveComment.IsValid() { + comment = append(comment, t...) + } + } + } + + if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) { + if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + + if saveData.IsValid() && saveData.CanAddr() { + pv := saveData.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + } + + if err := copyValue(saveData, data); err != nil { + return err + } + + switch t := saveComment; t.Kind() { + case reflect.String: + t.SetString(string(comment)) + case reflect.Slice: + t.Set(reflect.ValueOf(comment)) + } + + switch t := saveXML; t.Kind() { + case reflect.String: + t.SetString(string(saveXMLData)) + case reflect.Slice: + t.Set(reflect.ValueOf(saveXMLData)) + } + + return nil +} + +func copyValue(dst reflect.Value, src []byte) (err error) { + dst0 := dst + + if dst.Kind() == reflect.Ptr { + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + dst = dst.Elem() + } + + // Save accumulated data. + switch dst.Kind() { + case reflect.Invalid: + // Probably a comment. + default: + return errors.New("cannot unmarshal into " + dst0.Type().String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetInt(itmp) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetUint(utmp) + case reflect.Float32, reflect.Float64: + ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits()) + if err != nil { + return err + } + dst.SetFloat(ftmp) + case reflect.Bool: + value, err := strconv.ParseBool(strings.TrimSpace(string(src))) + if err != nil { + return err + } + dst.SetBool(value) + case reflect.String: + dst.SetString(string(src)) + case reflect.Slice: + if len(src) == 0 { + // non-nil to flag presence + src = []byte{} + } + dst.SetBytes(src) + } + return nil +} + +// unmarshalPath walks down an XML structure looking for wanted +// paths, and calls unmarshal on them. +// The consumed result tells whether XML elements have been consumed +// from the Decoder until start's matching end element, or if it's +// still untouched because start is uninteresting for sv's fields. +func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { + recurse := false +Loop: + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + continue + } + for j := range parents { + if parents[j] != finfo.parents[j] { + continue Loop + } + } + if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { + // It's a perfect match, unmarshal the field. + return true, p.unmarshal(finfo.value(sv), start) + } + if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { + // It's a prefix for the field. Break and recurse + // since it's not ok for one field path to be itself + // the prefix for another field path. + recurse = true + + // We can reuse the same slice as long as we + // don't try to append to it. + parents = finfo.parents[:len(parents)+1] + break + } + } + if !recurse { + // We have no business with this element. + return false, nil + } + // The element is not a perfect match for any field, but one + // or more fields have the path to this element as a parent + // prefix. Recurse and attempt to match these. + for { + var tok Token + tok, err = p.Token() + if err != nil { + return true, err + } + switch t := tok.(type) { + case StartElement: + consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t) + if err != nil { + return true, err + } + if !consumed2 { + if err := p.Skip(); err != nil { + return true, err + } + } + case EndElement: + return true, nil + } + } +} + +// Skip reads tokens until it has consumed the end element +// matching the most recent start element already consumed. +// It recurs if it encounters a start element, so it can be used to +// skip nested structures. +// It returns nil if it finds an end element matching the start +// element; otherwise it returns an error describing the problem. +func (d *Decoder) Skip() error { + for { + tok, err := d.Token() + if err != nil { + return err + } + switch tok.(type) { + case StartElement: + if err := d.Skip(); err != nil { + return err + } + case EndElement: + return nil + } + } +} diff --git a/pkg/webdav/internal/xml/typeinfo.go b/pkg/webdav/internal/xml/typeinfo.go new file mode 100644 index 0000000..fdde288 --- /dev/null +++ b/pkg/webdav/internal/xml/typeinfo.go @@ -0,0 +1,371 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// typeInfo holds details for the xml representation of a type. +type typeInfo struct { + xmlname *fieldInfo + fields []fieldInfo +} + +// fieldInfo holds details for the xml representation of a single field. +type fieldInfo struct { + idx []int + name string + xmlns string + flags fieldFlags + parents []string +} + +type fieldFlags int + +const ( + fElement fieldFlags = 1 << iota + fAttr + fCharData + fInnerXml + fComment + fAny + + fOmitEmpty + + fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny +) + +var tinfoMap = make(map[reflect.Type]*typeInfo) +var tinfoLock sync.RWMutex + +var nameType = reflect.TypeOf(Name{}) + +// getTypeInfo returns the typeInfo structure with details necessary +// for marshalling and unmarshalling typ. +func getTypeInfo(typ reflect.Type) (*typeInfo, error) { + tinfoLock.RLock() + tinfo, ok := tinfoMap[typ] + tinfoLock.RUnlock() + if ok { + return tinfo, nil + } + tinfo = &typeInfo{} + if typ.Kind() == reflect.Struct && typ != nameType { + n := typ.NumField() + for i := 0; i < n; i++ { + f := typ.Field(i) + if f.PkgPath != "" || f.Tag.Get("xml") == "-" { + continue // Private field + } + + // For embedded structs, embed its fields. + if f.Anonymous { + t := f.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() == reflect.Struct { + inner, err := getTypeInfo(t) + if err != nil { + return nil, err + } + if tinfo.xmlname == nil { + tinfo.xmlname = inner.xmlname + } + for _, finfo := range inner.fields { + finfo.idx = append([]int{i}, finfo.idx...) + if err := addFieldInfo(typ, tinfo, &finfo); err != nil { + return nil, err + } + } + continue + } + } + + finfo, err := structFieldInfo(typ, &f) + if err != nil { + return nil, err + } + + if f.Name == "XMLName" { + tinfo.xmlname = finfo + continue + } + + // Add the field if it doesn't conflict with other fields. + if err := addFieldInfo(typ, tinfo, finfo); err != nil { + return nil, err + } + } + } + tinfoLock.Lock() + tinfoMap[typ] = tinfo + tinfoLock.Unlock() + return tinfo, nil +} + +// structFieldInfo builds and returns a fieldInfo for f. +func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) { + finfo := &fieldInfo{idx: f.Index} + + // Split the tag from the xml namespace if necessary. + tag := f.Tag.Get("xml") + if i := strings.Index(tag, " "); i >= 0 { + finfo.xmlns, tag = tag[:i], tag[i+1:] + } + + // Parse flags. + tokens := strings.Split(tag, ",") + if len(tokens) == 1 { + finfo.flags = fElement + } else { + tag = tokens[0] + for _, flag := range tokens[1:] { + switch flag { + case "attr": + finfo.flags |= fAttr + case "chardata": + finfo.flags |= fCharData + case "innerxml": + finfo.flags |= fInnerXml + case "comment": + finfo.flags |= fComment + case "any": + finfo.flags |= fAny + case "omitempty": + finfo.flags |= fOmitEmpty + } + } + + // Validate the flags used. + valid := true + switch mode := finfo.flags & fMode; mode { + case 0: + finfo.flags |= fElement + case fAttr, fCharData, fInnerXml, fComment, fAny: + if f.Name == "XMLName" || tag != "" && mode != fAttr { + valid = false + } + default: + // This will also catch multiple modes in a single field. + valid = false + } + if finfo.flags&fMode == fAny { + finfo.flags |= fElement + } + if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 { + valid = false + } + if !valid { + return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + } + + // Use of xmlns without a name is not allowed. + if finfo.xmlns != "" && tag == "" { + return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + + if f.Name == "XMLName" { + // The XMLName field records the XML element name. Don't + // process it as usual because its name should default to + // empty rather than to the field name. + finfo.name = tag + return finfo, nil + } + + if tag == "" { + // If the name part of the tag is completely empty, get + // default from XMLName of underlying struct if feasible, + // or field name otherwise. + if xmlname := lookupXMLName(f.Type); xmlname != nil { + finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name + } else { + finfo.name = f.Name + } + return finfo, nil + } + + if finfo.xmlns == "" && finfo.flags&fAttr == 0 { + // If it's an element no namespace specified, get the default + // from the XMLName of enclosing struct if possible. + if xmlname := lookupXMLName(typ); xmlname != nil { + finfo.xmlns = xmlname.xmlns + } + } + + // Prepare field name and parents. + parents := strings.Split(tag, ">") + if parents[0] == "" { + parents[0] = f.Name + } + if parents[len(parents)-1] == "" { + return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ) + } + finfo.name = parents[len(parents)-1] + if len(parents) > 1 { + if (finfo.flags & fElement) == 0 { + return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ",")) + } + finfo.parents = parents[:len(parents)-1] + } + + // If the field type has an XMLName field, the names must match + // so that the behavior of both marshalling and unmarshalling + // is straightforward and unambiguous. + if finfo.flags&fElement != 0 { + ftyp := f.Type + xmlname := lookupXMLName(ftyp) + if xmlname != nil && xmlname.name != finfo.name { + return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName", + finfo.name, typ, f.Name, xmlname.name, ftyp) + } + } + return finfo, nil +} + +// lookupXMLName returns the fieldInfo for typ's XMLName field +// in case it exists and has a valid xml field tag, otherwise +// it returns nil. +func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + if typ.Kind() != reflect.Struct { + return nil + } + for i, n := 0, typ.NumField(); i < n; i++ { + f := typ.Field(i) + if f.Name != "XMLName" { + continue + } + finfo, err := structFieldInfo(typ, &f) + if finfo.name != "" && err == nil { + return finfo + } + // Also consider errors as a non-existent field tag + // and let getTypeInfo itself report the error. + break + } + return nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +// addFieldInfo adds finfo to tinfo.fields if there are no +// conflicts, or if conflicts arise from previous fields that were +// obtained from deeper embedded structures than finfo. In the latter +// case, the conflicting entries are dropped. +// A conflict occurs when the path (parent + name) to a field is +// itself a prefix of another path, or when two paths match exactly. +// It is okay for field paths to share a common, shorter prefix. +func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error { + var conflicts []int +Loop: + // First, figure all conflicts. Most working code will have none. + for i := range tinfo.fields { + oldf := &tinfo.fields[i] + if oldf.flags&fMode != newf.flags&fMode { + continue + } + if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns { + continue + } + minl := min(len(newf.parents), len(oldf.parents)) + for p := 0; p < minl; p++ { + if oldf.parents[p] != newf.parents[p] { + continue Loop + } + } + if len(oldf.parents) > len(newf.parents) { + if oldf.parents[len(newf.parents)] == newf.name { + conflicts = append(conflicts, i) + } + } else if len(oldf.parents) < len(newf.parents) { + if newf.parents[len(oldf.parents)] == oldf.name { + conflicts = append(conflicts, i) + } + } else { + if newf.name == oldf.name { + conflicts = append(conflicts, i) + } + } + } + // Without conflicts, add the new field and return. + if conflicts == nil { + tinfo.fields = append(tinfo.fields, *newf) + return nil + } + + // If any conflict is shallower, ignore the new field. + // This matches the Go field resolution on embedding. + for _, i := range conflicts { + if len(tinfo.fields[i].idx) < len(newf.idx) { + return nil + } + } + + // Otherwise, if any of them is at the same depth level, it's an error. + for _, i := range conflicts { + oldf := &tinfo.fields[i] + if len(oldf.idx) == len(newf.idx) { + f1 := typ.FieldByIndex(oldf.idx) + f2 := typ.FieldByIndex(newf.idx) + return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")} + } + } + + // Otherwise, the new field is shallower, and thus takes precedence, + // so drop the conflicting fields from tinfo and append the new one. + for c := len(conflicts) - 1; c >= 0; c-- { + i := conflicts[c] + copy(tinfo.fields[i:], tinfo.fields[i+1:]) + tinfo.fields = tinfo.fields[:len(tinfo.fields)-1] + } + tinfo.fields = append(tinfo.fields, *newf) + return nil +} + +// A TagPathError represents an error in the unmarshalling process +// caused by the use of field tags with conflicting paths. +type TagPathError struct { + Struct reflect.Type + Field1, Tag1 string + Field2, Tag2 string +} + +func (e *TagPathError) Error() string { + return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2) +} + +// value returns v's field value corresponding to finfo. +// It's equivalent to v.FieldByIndex(finfo.idx), but initializes +// and dereferences pointers as necessary. +func (finfo *fieldInfo) value(v reflect.Value) reflect.Value { + for i, x := range finfo.idx { + if i > 0 { + t := v.Type() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + v = v.Field(x) + } + return v +} diff --git a/pkg/webdav/internal/xml/xml.go b/pkg/webdav/internal/xml/xml.go new file mode 100644 index 0000000..be282bb --- /dev/null +++ b/pkg/webdav/internal/xml/xml.go @@ -0,0 +1,1998 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xml implements a simple XML 1.0 parser that +// understands XML name spaces. +package xml + +// References: +// Annotated XML spec: http://www.xml.com/axml/testaxml.htm +// XML name spaces: http://www.w3.org/TR/REC-xml-names/ + +// TODO(rsc): +// Test error handling. + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A SyntaxError represents a syntax error in the XML input stream. +type SyntaxError struct { + Msg string + Line int +} + +func (e *SyntaxError) Error() string { + return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg +} + +// A Name represents an XML name (Local) annotated with a name space +// identifier (Space). In tokens returned by Decoder.Token, the Space +// identifier is given as a canonical URL, not the short prefix used in +// the document being parsed. +// +// As a special case, XML namespace declarations will use the literal +// string "xmlns" for the Space field instead of the fully resolved URL. +// See Encoder.EncodeToken for more information on namespace encoding +// behaviour. +type Name struct { + Space, Local string +} + +// isNamespace reports whether the name is a namespace-defining name. +func (name Name) isNamespace() bool { + return name.Local == "xmlns" || name.Space == "xmlns" +} + +// An Attr represents an attribute in an XML element (Name=Value). +type Attr struct { + Name Name + Value string +} + +// A Token is an interface holding one of the token types: +// StartElement, EndElement, CharData, Comment, ProcInst, or Directive. +type Token interface{} + +// A StartElement represents an XML start element. +type StartElement struct { + Name Name + Attr []Attr +} + +func (e StartElement) Copy() StartElement { + attrs := make([]Attr, len(e.Attr)) + copy(attrs, e.Attr) + e.Attr = attrs + return e +} + +// End returns the corresponding XML end element. +func (e StartElement) End() EndElement { + return EndElement{e.Name} +} + +// setDefaultNamespace sets the namespace of the element +// as the default for all elements contained within it. +func (e *StartElement) setDefaultNamespace() { + if e.Name.Space == "" { + // If there's no namespace on the element, don't + // set the default. Strictly speaking this might be wrong, as + // we can't tell if the element had no namespace set + // or was just using the default namespace. + return + } + // Don't add a default name space if there's already one set. + for _, attr := range e.Attr { + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + return + } + } + e.Attr = append(e.Attr, Attr{ + Name: Name{ + Local: "xmlns", + }, + Value: e.Name.Space, + }) +} + +// An EndElement represents an XML end element. +type EndElement struct { + Name Name +} + +// A CharData represents XML character data (raw text), +// in which XML escape sequences have been replaced by +// the characters they represent. +type CharData []byte + +func makeCopy(b []byte) []byte { + b1 := make([]byte, len(b)) + copy(b1, b) + return b1 +} + +func (c CharData) Copy() CharData { return CharData(makeCopy(c)) } + +// A Comment represents an XML comment of the form . +// The bytes do not include the comment markers. +type Comment []byte + +func (c Comment) Copy() Comment { return Comment(makeCopy(c)) } + +// A ProcInst represents an XML processing instruction of the form +type ProcInst struct { + Target string + Inst []byte +} + +func (p ProcInst) Copy() ProcInst { + p.Inst = makeCopy(p.Inst) + return p +} + +// A Directive represents an XML directive of the form . +// The bytes do not include the markers. +type Directive []byte + +func (d Directive) Copy() Directive { return Directive(makeCopy(d)) } + +// CopyToken returns a copy of a Token. +func CopyToken(t Token) Token { + switch v := t.(type) { + case CharData: + return v.Copy() + case Comment: + return v.Copy() + case Directive: + return v.Copy() + case ProcInst: + return v.Copy() + case StartElement: + return v.Copy() + } + return t +} + +// A Decoder represents an XML parser reading a particular input stream. +// The parser assumes that its input is encoded in UTF-8. +type Decoder struct { + // Strict defaults to true, enforcing the requirements + // of the XML specification. + // If set to false, the parser allows input containing common + // mistakes: + // * If an element is missing an end tag, the parser invents + // end tags as necessary to keep the return values from Token + // properly balanced. + // * In attribute values and character data, unknown or malformed + // character entities (sequences beginning with &) are left alone. + // + // Setting: + // + // d.Strict = false; + // d.AutoClose = HTMLAutoClose; + // d.Entity = HTMLEntity + // + // creates a parser that can handle typical HTML. + // + // Strict mode does not enforce the requirements of the XML name spaces TR. + // In particular it does not reject name space tags using undefined prefixes. + // Such tags are recorded with the unknown prefix as the name space URL. + Strict bool + + // When Strict == false, AutoClose indicates a set of elements to + // consider closed immediately after they are opened, regardless + // of whether an end element is present. + AutoClose []string + + // Entity can be used to map non-standard entity names to string replacements. + // The parser behaves as if these standard mappings are present in the map, + // regardless of the actual map content: + // + // "lt": "<", + // "gt": ">", + // "amp": "&", + // "apos": "'", + // "quot": `"`, + Entity map[string]string + + // CharsetReader, if non-nil, defines a function to generate + // charset-conversion readers, converting from the provided + // non-UTF-8 charset into UTF-8. If CharsetReader is nil or + // returns an error, parsing stops with an error. One of the + // the CharsetReader's result values must be non-nil. + CharsetReader func(charset string, input io.Reader) (io.Reader, error) + + // DefaultSpace sets the default name space used for unadorned tags, + // as if the entire XML stream were wrapped in an element containing + // the attribute xmlns="DefaultSpace". + DefaultSpace string + + r io.ByteReader + buf bytes.Buffer + saved *bytes.Buffer + stk *stack + free *stack + needClose bool + toClose Name + nextToken Token + nextByte int + ns map[string]string + err error + line int + offset int64 + unmarshalDepth int +} + +// NewDecoder creates a new XML parser reading from r. +// If r does not implement io.ByteReader, NewDecoder will +// do its own buffering. +func NewDecoder(r io.Reader) *Decoder { + d := &Decoder{ + ns: make(map[string]string), + nextByte: -1, + line: 1, + Strict: true, + } + d.switchToReader(r) + return d +} + +// Token returns the next XML token in the input stream. +// At the end of the input stream, Token returns nil, io.EOF. +// +// Slices of bytes in the returned token data refer to the +// parser's internal buffer and remain valid only until the next +// call to Token. To acquire a copy of the bytes, call CopyToken +// or the token's Copy method. +// +// Token expands self-closing elements such as
+// into separate start and end elements returned by successive calls. +// +// Token guarantees that the StartElement and EndElement +// tokens it returns are properly nested and matched: +// if Token encounters an unexpected end element, +// it will return an error. +// +// Token implements XML name spaces as described by +// http://www.w3.org/TR/REC-xml-names/. Each of the +// Name structures contained in the Token has the Space +// set to the URL identifying its name space when known. +// If Token encounters an unrecognized name space prefix, +// it uses the prefix as the Space rather than report an error. +func (d *Decoder) Token() (t Token, err error) { + if d.stk != nil && d.stk.kind == stkEOF { + err = io.EOF + return + } + if d.nextToken != nil { + t = d.nextToken + d.nextToken = nil + } else if t, err = d.rawToken(); err != nil { + return + } + + if !d.Strict { + if t1, ok := d.autoClose(t); ok { + d.nextToken = t + t = t1 + } + } + switch t1 := t.(type) { + case StartElement: + // In XML name spaces, the translations listed in the + // attributes apply to the element name and + // to the other attribute names, so process + // the translations first. + for _, a := range t1.Attr { + if a.Name.Space == "xmlns" { + v, ok := d.ns[a.Name.Local] + d.pushNs(a.Name.Local, v, ok) + d.ns[a.Name.Local] = a.Value + } + if a.Name.Space == "" && a.Name.Local == "xmlns" { + // Default space for untagged names + v, ok := d.ns[""] + d.pushNs("", v, ok) + d.ns[""] = a.Value + } + } + + d.translate(&t1.Name, true) + for i := range t1.Attr { + d.translate(&t1.Attr[i].Name, false) + } + d.pushElement(t1.Name) + t = t1 + + case EndElement: + d.translate(&t1.Name, true) + if !d.popElement(&t1) { + return nil, d.err + } + t = t1 + } + return +} + +const xmlURL = "http://www.w3.org/XML/1998/namespace" + +// Apply name space translation to name n. +// The default name space (for Space=="") +// applies only to element names, not to attribute names. +func (d *Decoder) translate(n *Name, isElementName bool) { + switch { + case n.Space == "xmlns": + return + case n.Space == "" && !isElementName: + return + case n.Space == "xml": + n.Space = xmlURL + case n.Space == "" && n.Local == "xmlns": + return + } + if v, ok := d.ns[n.Space]; ok { + n.Space = v + } else if n.Space == "" { + n.Space = d.DefaultSpace + } +} + +func (d *Decoder) switchToReader(r io.Reader) { + // Get efficient byte at a time reader. + // Assume that if reader has its own + // ReadByte, it's efficient enough. + // Otherwise, use bufio. + if rb, ok := r.(io.ByteReader); ok { + d.r = rb + } else { + d.r = bufio.NewReader(r) + } +} + +// Parsing state - stack holds old name space translations +// and the current set of open elements. The translations to pop when +// ending a given tag are *below* it on the stack, which is +// more work but forced on us by XML. +type stack struct { + next *stack + kind int + name Name + ok bool +} + +const ( + stkStart = iota + stkNs + stkEOF +) + +func (d *Decoder) push(kind int) *stack { + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.next = d.stk + s.kind = kind + d.stk = s + return s +} + +func (d *Decoder) pop() *stack { + s := d.stk + if s != nil { + d.stk = s.next + s.next = d.free + d.free = s + } + return s +} + +// Record that after the current element is finished +// (that element is already pushed on the stack) +// Token should return EOF until popEOF is called. +func (d *Decoder) pushEOF() { + // Walk down stack to find Start. + // It might not be the top, because there might be stkNs + // entries above it. + start := d.stk + for start.kind != stkStart { + start = start.next + } + // The stkNs entries below a start are associated with that + // element too; skip over them. + for start.next != nil && start.next.kind == stkNs { + start = start.next + } + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.kind = stkEOF + s.next = start.next + start.next = s +} + +// Undo a pushEOF. +// The element must have been finished, so the EOF should be at the top of the stack. +func (d *Decoder) popEOF() bool { + if d.stk == nil || d.stk.kind != stkEOF { + return false + } + d.pop() + return true +} + +// Record that we are starting an element with the given name. +func (d *Decoder) pushElement(name Name) { + s := d.push(stkStart) + s.name = name +} + +// Record that we are changing the value of ns[local]. +// The old value is url, ok. +func (d *Decoder) pushNs(local string, url string, ok bool) { + s := d.push(stkNs) + s.name.Local = local + s.name.Space = url + s.ok = ok +} + +// Creates a SyntaxError with the current line number. +func (d *Decoder) syntaxError(msg string) error { + return &SyntaxError{Msg: msg, Line: d.line} +} + +// Record that we are ending an element with the given name. +// The name must match the record at the top of the stack, +// which must be a pushElement record. +// After popping the element, apply any undo records from +// the stack to restore the name translations that existed +// before we saw this element. +func (d *Decoder) popElement(t *EndElement) bool { + s := d.pop() + name := t.Name + switch { + case s == nil || s.kind != stkStart: + d.err = d.syntaxError("unexpected end element ") + return false + case s.name.Local != name.Local: + if !d.Strict { + d.needClose = true + d.toClose = t.Name + t.Name = s.name + return true + } + d.err = d.syntaxError("element <" + s.name.Local + "> closed by ") + return false + case s.name.Space != name.Space: + d.err = d.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space + + "closed by in space " + name.Space) + return false + } + + // Pop stack until a Start or EOF is on the top, undoing the + // translations that were associated with the element we just closed. + for d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF { + s := d.pop() + if s.ok { + d.ns[s.name.Local] = s.name.Space + } else { + delete(d.ns, s.name.Local) + } + } + + return true +} + +// If the top element on the stack is autoclosing and +// t is not the end tag, invent the end tag. +func (d *Decoder) autoClose(t Token) (Token, bool) { + if d.stk == nil || d.stk.kind != stkStart { + return nil, false + } + name := strings.ToLower(d.stk.name.Local) + for _, s := range d.AutoClose { + if strings.ToLower(s) == name { + // This one should be auto closed if t doesn't close it. + et, ok := t.(EndElement) + if !ok || et.Name.Local != name { + return EndElement{d.stk.name}, true + } + break + } + } + return nil, false +} + +var errRawToken = errors.New("xml: cannot use RawToken from UnmarshalXML method") + +// RawToken is like Token but does not verify that +// start and end elements match and does not translate +// name space prefixes to their corresponding URLs. +func (d *Decoder) RawToken() (Token, error) { + if d.unmarshalDepth > 0 { + return nil, errRawToken + } + return d.rawToken() +} + +func (d *Decoder) rawToken() (Token, error) { + if d.err != nil { + return nil, d.err + } + if d.needClose { + // The last element we read was self-closing and + // we returned just the StartElement half. + // Return the EndElement half now. + d.needClose = false + return EndElement{d.toClose}, nil + } + + b, ok := d.getc() + if !ok { + return nil, d.err + } + + if b != '<' { + // Text section. + d.ungetc(b) + data := d.text(-1, false) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + switch b { + case '/': + // ' { + d.err = d.syntaxError("invalid characters between ") + return nil, d.err + } + return EndElement{name}, nil + + case '?': + // ' { + break + } + b0 = b + } + data := d.buf.Bytes() + data = data[0 : len(data)-2] // chop ?> + + if target == "xml" { + content := string(data) + ver := procInst("version", content) + if ver != "" && ver != "1.0" { + d.err = fmt.Errorf("xml: unsupported version %q; only version 1.0 is supported", ver) + return nil, d.err + } + enc := procInst("encoding", content) + if enc != "" && enc != "utf-8" && enc != "UTF-8" { + if d.CharsetReader == nil { + d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc) + return nil, d.err + } + newr, err := d.CharsetReader(enc, d.r.(io.Reader)) + if err != nil { + d.err = fmt.Errorf("xml: opening charset %q: %v", enc, err) + return nil, d.err + } + if newr == nil { + panic("CharsetReader returned a nil Reader for charset " + enc) + } + d.switchToReader(newr) + } + } + return ProcInst{target, data}, nil + + case '!': + // ' { + break + } + b0, b1 = b1, b + } + data := d.buf.Bytes() + data = data[0 : len(data)-3] // chop --> + return Comment(data), nil + + case '[': // . + data := d.text(-1, true) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + // Probably a directive: , , etc. + // We don't care, but accumulate for caller. Quoted angle + // brackets do not count for nesting. + d.buf.Reset() + d.buf.WriteByte(b) + inquote := uint8(0) + depth := 0 + for { + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if inquote == 0 && b == '>' && depth == 0 { + break + } + HandleB: + d.buf.WriteByte(b) + switch { + case b == inquote: + inquote = 0 + + case inquote != 0: + // in quotes, no special action + + case b == '\'' || b == '"': + inquote = b + + case b == '>' && inquote == 0: + depth-- + + case b == '<' && inquote == 0: + // Look for