From 7862cfa25da7896b79bfff8d08834d730edfbec8 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 27 May 2024 15:50:30 +0200 Subject: [PATCH 001/203] Fix nomination-election process for cluster leader (#948) When cluster leader is nominated and should be elected for backup execution (e.g. due to highest priority) current implementation blocks election go routine. This fix removes the block and allows cluster leader to accept nomination. --- cmd/pbm-agent/backup.go | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/cmd/pbm-agent/backup.go b/cmd/pbm-agent/backup.go index daae0bede..f0f683844 100644 --- a/cmd/pbm-agent/backup.go +++ b/cmd/pbm-agent/backup.go @@ -4,8 +4,6 @@ import ( "context" "time" - "golang.org/x/sync/errgroup" - "github.com/percona/percona-backup-mongodb/pbm/backup" "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/ctrl" @@ -185,20 +183,12 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, return } - errGrp, grpCtx := errgroup.WithContext(ctx) - for i := range shards { - rs := shards[i].RS - - errGrp.Go(func() error { - err := a.nominateRS(grpCtx, cmd.Name, rs, nodes.RS(rs)) - return errors.Wrapf(err, "nodes nomination for %s", rs) - }) - } - - err = errGrp.Wait() - if err != nil { - l.Error(err.Error()) - return + for _, sh := range shards { + go func(rs string) { + if err := a.nominateRS(ctx, cmd.Name, rs, nodes.RS(rs)); err != nil { + l.Error("nodes nomination error for %s: %v", rs, err) + } + }(sh.RS) } } From e321a23d59a34529f5d86f7ee64022c9b4f8743c Mon Sep 17 00:00:00 2001 From: Ivan Groenewold <9805809+igroene@users.noreply.github.com> Date: Tue, 28 May 2024 05:18:37 -0300 Subject: [PATCH 002/203] Improve logging when not the primary (#947) --- cmd/pbm-agent/restore.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/pbm-agent/restore.go b/cmd/pbm-agent/restore.go index 6ba38f150..2329380c3 100644 --- a/cmd/pbm-agent/restore.go +++ b/cmd/pbm-agent/restore.go @@ -404,7 +404,7 @@ func (a *Agent) Restore(ctx context.Context, r *ctrl.RestoreCmd, opid ctrl.OPID, switch bcpType { case defs.LogicalBackup: if !nodeInfo.IsPrimary { - l.Info("Node in not suitable for restore") + l.Info("This node is not the primary. Check pbm agent on the primary for restore progress") return } if r.OplogTS.IsZero() { From 2cb2d414242a011fbaf8113b5ebb6a44c72a7670 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 27 May 2024 08:31:20 +0200 Subject: [PATCH 003/203] update vscode settings --- .gitignore | 1 + .vscode/extensions.json | 3 +++ .vscode/settings.json | 60 +++++++++++++++++++++-------------------- 3 files changed, 35 insertions(+), 29 deletions(-) create mode 100644 .vscode/extensions.json diff --git a/.gitignore b/.gitignore index de8ab492f..73fa2eb3c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ .env .vscode/* !.vscode/settings.json +!.vscode/extensions.json .idea/ /bin/ /.dev diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 000000000..12c44d319 --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,3 @@ +{ + "recommendations": ["golang.go", "aleksandra.go-group-imports"] +} diff --git a/.vscode/settings.json b/.vscode/settings.json index 4dfb78533..9eafda9cb 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,32 +1,34 @@ { - "[go]": { - "editor.defaultFormatter": "golang.go", - "editor.insertSpaces": false, - "editor.tabSize": 4, - }, - "[json][jsonc][yaml]": { - "editor.insertSpaces": true, - "editor.tabSize": 2, - }, - "[shellscript]": { - "editor.insertSpaces": false, - "editor.tabSize": 4, - }, - "files.eol": "\n", - "files.insertFinalNewline": true, - "files.trimFinalNewlines": true, - "files.trimTrailingWhitespace": true, - "go.formatTool": "gofumpt", - "go.lintTool": "golangci-lint", - "go.useLanguageServer": true, - "gopls": { - "analyses": { - "composites": false, - "deepequalerrors": false + "[go]": { + "editor.defaultFormatter": "golang.go", + "editor.insertSpaces": false, + "editor.tabSize": 4 }, - "formatting.gofumpt": true, - "formatting.local": "github.com/percona" - }, - "groupImports.onSave": true, - "shellformat.flag": "-bn -ci -s" + "[json][jsonc][yaml]": { + "editor.insertSpaces": true, + "editor.tabSize": 2 + }, + "[shellscript]": { + "editor.insertSpaces": false, + "editor.tabSize": 4 + }, + "files.encoding": "utf8", + "files.eol": "\n", + "files.insertFinalNewline": true, + "files.trimFinalNewlines": true, + "files.trimTrailingWhitespace": true, + "go.formatFlags": ["-extra"], + "go.lintTool": "golangci-lint", + "go.useLanguageServer": true, + "gopls": { + "analyses": { + "composites": false, + "deepequalerrors": false, + "fieldalignment": false + }, + "formatting.gofumpt": true, + "formatting.local": "github.com/percona" + }, + "groupImports.onSave": true, + "shellformat.flag": "-bn -ci -s" } From d89ae8956ec6b433a4812966ae8d820a37fc903d Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 27 May 2024 09:15:37 +0200 Subject: [PATCH 004/203] check oplog availability on agent start --- cmd/pbm-agent/agent.go | 6 ++++++ cmd/pbm-agent/backup.go | 7 ------- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 6e4dfcf57..54672377b 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -82,6 +82,12 @@ func (a *Agent) CanStart(ctx context.Context) error { return errors.Wrap(err, "get node info") } + if info.IsStandalone() { + return errors.New("mongod node can not be used to fetch a consistent " + + "backup because it has no oplog. Please restart it as a primary " + + "in a single-node replicaset to make it compatible with PBM's " + + "backup method using the oplog") + } if info.Msg == "isdbgrid" { return errors.New("mongos is not supported") } diff --git a/cmd/pbm-agent/backup.go b/cmd/pbm-agent/backup.go index f0f683844..c730cad1e 100644 --- a/cmd/pbm-agent/backup.go +++ b/cmd/pbm-agent/backup.go @@ -58,13 +58,6 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, l.Error("get node info: %v", err) return } - // TODO: do the check on the agent start only - if nodeInfo.IsStandalone() { - l.Error("mongod node can not be used to fetch a consistent backup because it has no oplog. " + - "Please restart it as a primary in a single-node replicaset " + - "to make it compatible with PBM's backup method using the oplog") - return - } isClusterLeader := nodeInfo.IsClusterLeader() canRunBackup, err := topo.NodeSuitsExt(ctx, a.nodeConn, nodeInfo, cmd.Type) From 78a5845a0f0670ad91ee02ff3e5533225a7dd8cf Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 27 May 2024 11:05:29 +0200 Subject: [PATCH 005/203] improve storage checks and init --- cmd/pbm-agent/agent.go | 17 ++++---- pbm/backup/backup.go | 16 ++++++++ pbm/resync/rsync.go | 21 ++++++---- pbm/storage/storage.go | 93 ++++++++++++++++++++++++++++++++++++++---- sdk/impl.go | 7 +--- 5 files changed, 125 insertions(+), 29 deletions(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 54672377b..01247513b 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -397,16 +397,13 @@ func (a *Agent) storStatus(ctx context.Context, log log.LogEvent, forceCheckStor return topo.SubsysStatus{Err: fmt.Sprintf("unable to get storage: %v", err)} } - _, err = stg.FileStat(defs.StorInitFile) - if errors.Is(err, storage.ErrNotExist) { - err := stg.Save(defs.StorInitFile, bytes.NewBufferString(version.Current().Version), 0) - if err != nil { - return topo.SubsysStatus{ - Err: fmt.Sprintf("storage: no init file, attempt to create failed: %v", err), - } - } - } else if err != nil { - return topo.SubsysStatus{Err: fmt.Sprintf("storage check failed with: %v", err)} + ok, err := storage.IsStorageInitialized(ctx, stg) + if err != nil { + errStr := fmt.Sprintf("storage check failed with: %v", err) + return topo.SubsysStatus{Err: errStr} + } + if !ok { + return topo.SubsysStatus{Err: "storage is not initialized"} } return topo.SubsysStatus{OK: true} diff --git a/pbm/backup/backup.go b/pbm/backup/backup.go index f2f614ecf..cb397ab75 100644 --- a/pbm/backup/backup.go +++ b/pbm/backup/backup.go @@ -266,7 +266,23 @@ func (b *Backup) Run(ctx context.Context, bcp *ctrl.BackupCmd, opid ctrl.OPID, l } } }() + } + + err = storage.HasReadAccess(ctx, stg) + if err != nil { + if !errors.Is(err, storage.ErrUninitialized) { + return errors.Wrap(err, "check read access") + } + + if inf.IsLeader() { + err = storage.InitStorage(ctx, stg) + if err != nil { + return errors.Wrap(err, "init storage") + } + } + } + if inf.IsSharded() && inf.IsLeader() { if bcpm.BalancerStatus == topo.BalancerModeOn { err = topo.SetBalancerStatus(ctx, b.leadConn, topo.BalancerModeOff) if err != nil { diff --git a/pbm/resync/rsync.go b/pbm/resync/rsync.go index 3b1df92ab..d78380184 100644 --- a/pbm/resync/rsync.go +++ b/pbm/resync/rsync.go @@ -1,7 +1,6 @@ package resync import ( - "bytes" "context" "encoding/json" "strings" @@ -18,7 +17,6 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/restore" "github.com/percona/percona-backup-mongodb/pbm/storage" "github.com/percona/percona-backup-mongodb/pbm/util" - "github.com/percona/percona-backup-mongodb/pbm/version" ) // ResyncStorage updates PBM metadata (snapshots and pitr) according to the data in the storage @@ -28,12 +26,21 @@ func ResyncStorage(ctx context.Context, m connect.Client, l log.LogEvent) error return errors.Wrap(err, "unable to get backup store") } - _, err = stg.FileStat(defs.StorInitFile) - if errors.Is(err, storage.ErrNotExist) { - err = stg.Save(defs.StorInitFile, bytes.NewBufferString(version.Current().Version), 0) - } + err = storage.HasReadAccess(ctx, stg) if err != nil { - return errors.Wrap(err, "init storage") + if !errors.Is(err, storage.ErrUninitialized) { + return errors.Wrap(err, "check read access") + } + + err = storage.InitStorage(ctx, stg) + if err != nil { + return errors.Wrap(err, "init storage") + } + } else { + err = storage.ReinitStorage(ctx, stg) + if err != nil { + return errors.Wrap(err, "reinit storage") + } } rstrs, err := stg.List(defs.PhysRestoresDir, ".json") diff --git a/pbm/storage/storage.go b/pbm/storage/storage.go index 30a0c428e..145c6f939 100644 --- a/pbm/storage/storage.go +++ b/pbm/storage/storage.go @@ -3,16 +3,20 @@ package storage import ( "context" "io" + "strings" "github.com/percona/percona-backup-mongodb/pbm/compress" "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" + "github.com/percona/percona-backup-mongodb/pbm/log" + "github.com/percona/percona-backup-mongodb/pbm/version" ) var ( // ErrNotExist is an error for file doesn't exists on storage - ErrNotExist = errors.New("no such file") - ErrEmpty = errors.New("file is empty") + ErrNotExist = errors.New("no such file") + ErrEmpty = errors.New("file is empty") + ErrUninitialized = errors.New("uninitialized") ) // Type represents a type of the destination storage for backups @@ -63,12 +67,87 @@ func ParseType(s string) Type { } } -// HasReadAccess checks if the storage has read access to the specified file. -// It returns true if read access is available, otherwise it returns false. -// If an error occurs during the check, it returns the error. -func HasReadAccess(ctx context.Context, stg Storage) (bool, error) { +// IsStorageInitialized checks if there is PBM init file on the storage. +func IsStorageInitialized(ctx context.Context, stg Storage) (bool, error) { _, err := stg.FileStat(defs.StorInitFile) - return err == nil, err + if err != nil { + if errors.Is(err, ErrNotExist) { + return false, nil + } + + return false, errors.Wrap(err, "file stat") + } + + return true, nil +} + +// HasReadAccess checks if the provided storage allows the reading of file content. +// +// It gets the size (stat) and reads the content of the PBM init file. +// +// ErrUninitialized is returned if there is no init file. +func HasReadAccess(ctx context.Context, stg Storage) error { + stat, err := stg.FileStat(defs.StorInitFile) + if err != nil { + if errors.Is(err, ErrNotExist) { + return ErrUninitialized + } + + return errors.Wrap(err, "file stat") + } + + r, err := stg.SourceReader(defs.StorInitFile) + if err != nil { + return errors.Wrap(err, "open file") + } + defer func() { + err := r.Close() + if err != nil { + log.LogEventFromContext(ctx). + Error("HasReadAccess(): close file: %v", err) + } + }() + + const MaxCount = 10 // for "v999.99.99" + var buf [MaxCount]byte + n, err := r.Read(buf[:]) + if err != nil && !errors.Is(err, io.EOF) { + return errors.Wrap(err, "read file") + } + + expect := MaxCount + if stat.Size < int64(expect) { + expect = int(stat.Size) + } + if n != expect { + return errors.Errorf("short read (%d of %d)", n, expect) + } + + return nil +} + +// InitStorage write current PBM version to PBM init file. +// +// It does not handle "file already exists" error. +func InitStorage(ctx context.Context, stg Storage) error { + err := stg.Save(defs.StorInitFile, strings.NewReader(version.Current().Version), 0) + if err != nil { + return errors.Wrap(err, "write init file") + } + + return nil +} + +// ReinitStorage delete existing PBM init file and create new once with current PBM version. +// +// It expects that the file exists. +func ReinitStorage(ctx context.Context, stg Storage) error { + err := stg.Delete(defs.StorInitFile) + if err != nil { + return errors.Wrap(err, "delete init file") + } + + return InitStorage(ctx, stg) } // rwError multierror for the read/compress/write-to-store operations set diff --git a/sdk/impl.go b/sdk/impl.go index c515679ed..57531a041 100644 --- a/sdk/impl.go +++ b/sdk/impl.go @@ -207,13 +207,10 @@ func getStorageForRead(ctx context.Context, cc connect.Client) (storage.Storage, if err != nil { return nil, errors.Wrap(err, "get storage") } - ok, err := storage.HasReadAccess(ctx, stg) - if err != nil { + err = storage.HasReadAccess(ctx, stg) + if err != nil && !errors.Is(err, storage.ErrUninitialized) { return nil, errors.Wrap(err, "check storage access") } - if !ok { - return nil, errors.New("no read permission for configured storage") - } return stg, nil } From 33c2df2a4f406117eaddb6066d950e87184e54e4 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 27 May 2024 11:10:47 +0200 Subject: [PATCH 006/203] fix lint warnings --- e2e-tests/cmd/ensure-oplog/main.go | 3 +-- e2e-tests/pkg/pbm/pbm_ctl.go | 12 ++++++------ .../pkg/tests/sharded/test_backup_cancellation.go | 1 + pbm/backup/backup.go | 2 +- pbm/backup/query.go | 2 +- pbm/config/config.go | 7 +------ pbm/connect/connect.go | 4 ++-- pbm/util/sel.go | 8 +++----- sdk/sdk.go | 2 +- 9 files changed, 17 insertions(+), 24 deletions(-) diff --git a/e2e-tests/cmd/ensure-oplog/main.go b/e2e-tests/cmd/ensure-oplog/main.go index 19e1e1f6a..a0e742251 100644 --- a/e2e-tests/cmd/ensure-oplog/main.go +++ b/e2e-tests/cmd/ensure-oplog/main.go @@ -250,8 +250,7 @@ func ensureReplsetOplog(ctx context.Context, uri string, from, till primitive.Ti return errors.Wrap(err, "get config") } - stg, err := util.StorageFromConfig(cfg.Storage, - log.FromContext(ctx).NewDefaultEvent()) + stg, err := util.StorageFromConfig(&cfg.Storage, log.FromContext(ctx).NewDefaultEvent()) if err != nil { return errors.Wrap(err, "get storage") } diff --git a/e2e-tests/pkg/pbm/pbm_ctl.go b/e2e-tests/pkg/pbm/pbm_ctl.go index 6798721e1..94e3e0407 100644 --- a/e2e-tests/pkg/pbm/pbm_ctl.go +++ b/e2e-tests/pkg/pbm/pbm_ctl.go @@ -150,12 +150,12 @@ func skipCtl(str string) []byte { func (c *Ctl) CheckRestore(bcpName string, waitFor time.Duration) error { type rlist struct { - Start int - Status defs.Status - Type string - Name string - Snapshot string - Error string + Start int `json:"Start"` + Status defs.Status `json:"Status"` + Type string `json:"Type"` + Name string `json:"Name"` + Snapshot string `json:"Snapshot"` + Error string `json:"Error"` } tmr := time.NewTimer(waitFor) tkr := time.NewTicker(500 * time.Millisecond) diff --git a/e2e-tests/pkg/tests/sharded/test_backup_cancellation.go b/e2e-tests/pkg/tests/sharded/test_backup_cancellation.go index 16539dde4..7cb70449e 100644 --- a/e2e-tests/pkg/tests/sharded/test_backup_cancellation.go +++ b/e2e-tests/pkg/tests/sharded/test_backup_cancellation.go @@ -48,6 +48,7 @@ func (c *Cluster) BackupCancellation(storage string) { } } +//nolint:unused func checkNoBackupFiles(backupName, conf string) { log.Println("check no artifacts left for backup", backupName) buf, err := os.ReadFile(conf) diff --git a/pbm/backup/backup.go b/pbm/backup/backup.go index cb397ab75..f39f5aa98 100644 --- a/pbm/backup/backup.go +++ b/pbm/backup/backup.go @@ -381,7 +381,7 @@ Loop: } } - if bs != nil { + if bs == nil { return topo.BalancerMode("") } diff --git a/pbm/backup/query.go b/pbm/backup/query.go index 99ecdfe3c..de3c5efc1 100644 --- a/pbm/backup/query.go +++ b/pbm/backup/query.go @@ -25,7 +25,7 @@ type dbMangerImpl struct { conn connect.Client } -func NewDBManager(conn connect.Client) Manager { +func NewDBManager(conn connect.Client) *dbMangerImpl { return &dbMangerImpl{conn: conn} } diff --git a/pbm/config/config.go b/pbm/config/config.go index 96e906499..70ab33565 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -31,12 +31,7 @@ var errMissedConfig = errors.New("missed config") type confMap map[string]reflect.Kind // _confmap is a list of config's valid keys and its types -var _confmap confMap - -//nolint:gochecknoinits -func init() { - _confmap = keys(reflect.TypeOf(Config{})) -} +var _confmap confMap = keys(reflect.TypeOf(Config{})) func keys(t reflect.Type) confMap { v := make(confMap) diff --git a/pbm/connect/connect.go b/pbm/connect/connect.go index 728539858..ccd6455c9 100644 --- a/pbm/connect/connect.go +++ b/pbm/connect/connect.go @@ -183,7 +183,7 @@ type clientImpl struct { options *options.ClientOptions } -func UnsafeClient(m *mongo.Client) Client { +func UnsafeClient(m *mongo.Client) *clientImpl { return &clientImpl{ client: m, options: options.Client(), @@ -193,7 +193,7 @@ func UnsafeClient(m *mongo.Client) Client { // Connect resolves MongoDB connection to Primary member and wraps it within Client object. // In case of replica set it returns connection to Primary member, // while in case of sharded cluster it returns connection to Config RS Primary member. -func Connect(ctx context.Context, uri, appName string) (Client, error) { +func Connect(ctx context.Context, uri, appName string) (*clientImpl, error) { client, opts, err := MongoConnectWithOpts(ctx, uri, AppName(appName)) if err != nil { return nil, errors.Wrap(err, "create mongo connection") diff --git a/pbm/util/sel.go b/pbm/util/sel.go index 80d584b64..737dacf01 100644 --- a/pbm/util/sel.go +++ b/pbm/util/sel.go @@ -44,9 +44,7 @@ func ContainsColl(ns string) bool { // ContainsSpecifiedColl inspects if any collection exists for multi-ns func ContainsSpecifiedColl(nss []string) bool { - return slices.ContainsFunc(nss, func(ns string) bool { - return ContainsColl(ns) - }) + return slices.ContainsFunc(nss, ContainsColl) } func MakeSelectedPred(nss []string) archive.NSFilterFn { @@ -88,7 +86,7 @@ type ChunkSelector interface { type nsChunkMap map[string]struct{} -func NewNSChunkSelector() ChunkSelector { +func NewNSChunkSelector() nsChunkMap { return make(nsChunkMap) } @@ -114,7 +112,7 @@ func (s nsChunkMap) BuildFilter() bson.D { type uuidChunkMap map[string]struct{} -func NewUUIDChunkSelector() ChunkSelector { +func NewUUIDChunkSelector() uuidChunkMap { return make(uuidChunkMap) } diff --git a/sdk/sdk.go b/sdk/sdk.go index 9819950e7..67b9d9c0f 100644 --- a/sdk/sdk.go +++ b/sdk/sdk.go @@ -129,7 +129,7 @@ type Client interface { SyncFromStorage(ctx context.Context) (CommandID, error) } -func NewClient(ctx context.Context, uri string) (Client, error) { +func NewClient(ctx context.Context, uri string) (*clientImpl, error) { conn, err := connect.Connect(ctx, uri, "sdk") if err != nil { return nil, err From a00061a130d495e450c9112fa146861038b22ece Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 27 May 2024 14:38:16 +0200 Subject: [PATCH 007/203] move the blackhole storage to pbm-speed-test --- {pbm/storage/blackhole => cmd/pbm-speed-test}/blackhole.go | 6 +++--- cmd/pbm-speed-test/main.go | 3 +-- e2e-tests/docker/conf/bh.yaml | 2 -- pbm/config/config.go | 4 ---- pbm/storage/storage.go | 3 --- pbm/util/storage.go | 3 --- 6 files changed, 4 insertions(+), 17 deletions(-) rename {pbm/storage/blackhole => cmd/pbm-speed-test}/blackhole.go (93%) delete mode 100644 e2e-tests/docker/conf/bh.yaml diff --git a/pbm/storage/blackhole/blackhole.go b/cmd/pbm-speed-test/blackhole.go similarity index 93% rename from pbm/storage/blackhole/blackhole.go rename to cmd/pbm-speed-test/blackhole.go index fe0d94276..4d5ccb541 100644 --- a/pbm/storage/blackhole/blackhole.go +++ b/cmd/pbm-speed-test/blackhole.go @@ -1,4 +1,4 @@ -package blackhole +package main import ( "io" @@ -8,12 +8,12 @@ import ( type Blackhole struct{} -func New() *Blackhole { +func newBlackhole() *Blackhole { return &Blackhole{} } func (*Blackhole) Type() storage.Type { - return storage.BlackHole + return "blackhole" } func (*Blackhole) Save(_ string, data io.Reader, _ int64) error { diff --git a/cmd/pbm-speed-test/main.go b/cmd/pbm-speed-test/main.go index 833b8a088..505947024 100644 --- a/cmd/pbm-speed-test/main.go +++ b/cmd/pbm-speed-test/main.go @@ -13,7 +13,6 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/compress" "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/log" - "github.com/percona/percona-backup-mongodb/pbm/storage/blackhole" "github.com/percona/percona-backup-mongodb/pbm/util" "github.com/percona/percona-backup-mongodb/pbm/version" ) @@ -93,7 +92,7 @@ func testCompression(mURL string, compression compress.CompressionType, level *i defer cn.Disconnect(ctx) //nolint:errcheck } - stg := blackhole.New() + stg := newBlackhole() done := make(chan struct{}) go printw(done) diff --git a/e2e-tests/docker/conf/bh.yaml b/e2e-tests/docker/conf/bh.yaml deleted file mode 100644 index 9c6a83acc..000000000 --- a/e2e-tests/docker/conf/bh.yaml +++ /dev/null @@ -1,2 +0,0 @@ -storage: - type: blackhole \ No newline at end of file diff --git a/pbm/config/config.go b/pbm/config/config.go index 70ab33565..14424e2e4 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -147,8 +147,6 @@ func (s *StorageConf) Typ() string { return "Azure" case storage.Filesystem: return "FS" - case storage.BlackHole: - return "BlackHole" case storage.Undef: fallthrough default: @@ -179,8 +177,6 @@ func (s *StorageConf) Path() string { } case storage.Filesystem: path = s.Filesystem.Path - case storage.BlackHole: - path = "BlackHole" } return path diff --git a/pbm/storage/storage.go b/pbm/storage/storage.go index 145c6f939..7b246ed20 100644 --- a/pbm/storage/storage.go +++ b/pbm/storage/storage.go @@ -27,7 +27,6 @@ const ( S3 Type = "s3" Azure Type = "azure" Filesystem Type = "filesystem" - BlackHole Type = "blackhole" ) type FileInfo struct { @@ -60,8 +59,6 @@ func ParseType(s string) Type { return Azure case string(Filesystem): return Filesystem - case string(BlackHole): - return BlackHole default: return Undef } diff --git a/pbm/util/storage.go b/pbm/util/storage.go index 8544e4243..4c9120b50 100644 --- a/pbm/util/storage.go +++ b/pbm/util/storage.go @@ -9,7 +9,6 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/log" "github.com/percona/percona-backup-mongodb/pbm/storage" "github.com/percona/percona-backup-mongodb/pbm/storage/azure" - "github.com/percona/percona-backup-mongodb/pbm/storage/blackhole" "github.com/percona/percona-backup-mongodb/pbm/storage/fs" "github.com/percona/percona-backup-mongodb/pbm/storage/s3" ) @@ -26,8 +25,6 @@ func StorageFromConfig(cfg config.StorageConf, l log.LogEvent) (storage.Storage, return azure.New(cfg.Azure, l) case storage.Filesystem: return fs.New(cfg.Filesystem) - case storage.BlackHole: - return blackhole.New(), nil case storage.Undef: return nil, ErrStorageUndefined default: From 14f18d3f542024b30b85b1d2df904b764c6b9008 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 27 May 2024 15:04:46 +0200 Subject: [PATCH 008/203] in config, keep selected storage only --- cmd/pbm-agent/agent.go | 1 - cmd/pbm-agent/backup.go | 2 +- cmd/pbm-agent/restore.go | 2 +- cmd/pbm/config.go | 4 +-- cmd/pbm/restore.go | 2 +- pbm/backup/backup.go | 7 ++-- pbm/backup/logical.go | 2 +- pbm/backup/storage.go | 4 +-- pbm/backup/types.go | 6 +++- pbm/config/config.go | 66 ++++++++++++++++++++------------------ pbm/restore/logical.go | 2 +- pbm/restore/physical.go | 2 +- pbm/storage/azure/azure.go | 6 ++-- pbm/storage/fs/fs.go | 6 ++-- pbm/storage/s3/download.go | 4 +-- pbm/storage/s3/s3.go | 8 ++--- pbm/storage/storage.go | 4 +-- pbm/util/storage.go | 6 ++-- 18 files changed, 70 insertions(+), 64 deletions(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 01247513b..d69f0d6c4 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -1,7 +1,6 @@ package main import ( - "bytes" "context" "fmt" "sync" diff --git a/cmd/pbm-agent/backup.go b/cmd/pbm-agent/backup.go index c730cad1e..efe96df1e 100644 --- a/cmd/pbm-agent/backup.go +++ b/cmd/pbm-agent/backup.go @@ -98,7 +98,7 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, l.Error("unable to get PBM config settings: " + err.Error()) return } - if storage.ParseType(string(cfg.Storage.Type)) == storage.Undef { + if storage.ParseType(string(cfg.Storage.Type)) == storage.Undefined { l.Error("backups cannot be saved because PBM storage configuration hasn't been set yet") return } diff --git a/cmd/pbm-agent/restore.go b/cmd/pbm-agent/restore.go index 2329380c3..64f4aacbe 100644 --- a/cmd/pbm-agent/restore.go +++ b/cmd/pbm-agent/restore.go @@ -220,7 +220,7 @@ func (a *Agent) pitr(ctx context.Context) error { return nil } - stg, err := util.StorageFromConfig(cfg.Storage, l) + stg, err := util.StorageFromConfig(&cfg.Storage, l) if err != nil { return errors.Wrap(err, "unable to get storage configuration") } diff --git a/cmd/pbm/config.go b/cmd/pbm/config.go index 30e38c92a..192644cd3 100644 --- a/cmd/pbm/config.go +++ b/cmd/pbm/config.go @@ -113,7 +113,7 @@ func runConfig(ctx context.Context, conn connect.Client, pbm sdk.Client, c *conf return nil, errors.Wrap(err, "unable to read config file") } - var newCfg config.Config + var newCfg *config.Config err = yaml.UnmarshalStrict(buf, &newCfg) if err != nil { return nil, errors.Wrap(err, "unable to unmarshal config file") @@ -127,7 +127,7 @@ func runConfig(ctx context.Context, conn connect.Client, pbm sdk.Client, c *conf oldCfg = &config.Config{} } - if err := config.SetConfig(ctx, conn, &newCfg); err != nil { + if err := config.SetConfig(ctx, conn, newCfg); err != nil { return nil, errors.Wrap(err, "unable to set config: write to db") } diff --git a/cmd/pbm/restore.go b/cmd/pbm/restore.go index 27935d515..45feacff8 100644 --- a/cmd/pbm/restore.go +++ b/cmd/pbm/restore.go @@ -573,7 +573,7 @@ func getRestoreMetaStg(cfgPath string) (storage.Storage, error) { } l := log.New(nil, "cli", "").NewEvent("", "", "", primitive.Timestamp{}) - return util.StorageFromConfig(cfg.Storage, l) + return util.StorageFromConfig(&cfg.Storage, l) } func describeRestore(ctx context.Context, conn connect.Client, o descrRestoreOpts) (fmt.Stringer, error) { diff --git a/pbm/backup/backup.go b/pbm/backup/backup.go index f39f5aa98..a526678b4 100644 --- a/pbm/backup/backup.go +++ b/pbm/backup/backup.go @@ -98,7 +98,7 @@ func (b *Backup) Init( bcp *ctrl.BackupCmd, opid ctrl.OPID, inf *topo.NodeInfo, - store config.StorageConf, + store config.Storage, balancer topo.BalancerMode, l log.LogEvent, ) error { @@ -113,7 +113,6 @@ func (b *Backup) Init( Name: bcp.Name, Namespaces: bcp.Namespaces, Compression: bcp.Compression, - Store: store, StartTS: time.Now().Unix(), Status: defs.StatusStarting, Replsets: []BackupReplset{}, @@ -132,11 +131,11 @@ func (b *Backup) Init( if err != nil { return errors.Wrap(err, "unable to get PBM config settings") } - _, err = util.StorageFromConfig(cfg.Storage, l) + _, err = util.StorageFromConfig(&cfg.Storage, l) if errors.Is(err, util.ErrStorageUndefined) { return errors.New("backups cannot be saved because PBM storage configuration hasn't been set yet") } - meta.Store = cfg.Storage + meta.Store = Storage{cfg.Storage} fcv, err := version.GetFCV(ctx, b.nodeConn) if err != nil { diff --git a/pbm/backup/logical.go b/pbm/backup/logical.go index 88d38a3e9..415eceedd 100644 --- a/pbm/backup/logical.go +++ b/pbm/backup/logical.go @@ -155,7 +155,7 @@ func (b *Backup) doLogical( snapshotSize, err := snapshot.UploadDump(ctx, dump, func(ns, ext string, r io.Reader) error { - stg, err := util.StorageFromConfig(cfg.Storage, l) + stg, err := util.StorageFromConfig(&cfg.Storage, l) if err != nil { return errors.Wrap(err, "get storage") } diff --git a/pbm/backup/storage.go b/pbm/backup/storage.go index 075264f8f..5a72c7d6f 100644 --- a/pbm/backup/storage.go +++ b/pbm/backup/storage.go @@ -24,11 +24,11 @@ type StorageManager interface { } type storageManagerImpl struct { - cfg config.StorageConf + cfg *config.Storage stg storage.Storage } -func NewStorageManager(ctx context.Context, cfg config.StorageConf) (*storageManagerImpl, error) { +func NewStorageManager(ctx context.Context, cfg *config.Storage) (*storageManagerImpl, error) { stg, err := util.StorageFromConfig(cfg, log.LogEventFromContext(ctx)) if err != nil { return nil, errors.Wrap(err, "unable to get backup store") diff --git a/pbm/backup/types.go b/pbm/backup/types.go index 8012254e8..0d65a7929 100644 --- a/pbm/backup/types.go +++ b/pbm/backup/types.go @@ -41,7 +41,7 @@ type BackupMeta struct { Namespaces []string `bson:"nss,omitempty" json:"nss,omitempty"` Replsets []BackupReplset `bson:"replsets" json:"replsets"` Compression compress.CompressionType `bson:"compression" json:"compression"` - Store config.StorageConf `bson:"store" json:"store"` + Store Storage `bson:"store" json:"store"` Size int64 `bson:"size" json:"size"` MongoVersion string `bson:"mongodb_version" json:"mongodb_version"` FCV string `bson:"fcv" json:"fcv"` @@ -86,6 +86,10 @@ func (b *BackupMeta) RS(name string) *BackupReplset { return nil } +type Storage struct { + config.Storage `bson:",inline" json:",inline"` +} + // BackupRsNomination is used to choose (nominate and elect) nodes for the backup // within a replica set type BackupRsNomination struct { diff --git a/pbm/config/config.go b/pbm/config/config.go index 14424e2e4..5e12ee22d 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -62,34 +62,38 @@ func validateConfigKey(k string) bool { // Config is a pbm config type Config struct { PITR PITRConf `bson:"pitr" json:"pitr" yaml:"pitr"` - Storage StorageConf `bson:"storage" json:"storage" yaml:"storage"` Restore RestoreConf `bson:"restore" json:"restore,omitempty" yaml:"restore,omitempty"` Backup BackupConf `bson:"backup" json:"backup,omitempty" yaml:"backup,omitempty"` + Storage Storage `bson:"storage" json:"storage" yaml:"storage"` Epoch primitive.Timestamp `bson:"epoch" json:"-" yaml:"-"` } -func (c Config) String() string { - if c.Storage.S3.Credentials.AccessKeyID != "" { - c.Storage.S3.Credentials.AccessKeyID = "***" - } - if c.Storage.S3.Credentials.SecretAccessKey != "" { - c.Storage.S3.Credentials.SecretAccessKey = "***" - } - if c.Storage.S3.Credentials.SessionToken != "" { - c.Storage.S3.Credentials.SessionToken = "***" - } - if c.Storage.S3.Credentials.Vault.Secret != "" { - c.Storage.S3.Credentials.Vault.Secret = "***" - } - if c.Storage.S3.Credentials.Vault.Token != "" { - c.Storage.S3.Credentials.Vault.Token = "***" - } - if c.Storage.S3.ServerSideEncryption != nil && - c.Storage.S3.ServerSideEncryption.SseCustomerKey != "" { - c.Storage.S3.ServerSideEncryption.SseCustomerKey = "***" +func (c *Config) String() string { + if c.Storage.S3 != nil { + if c.Storage.S3.Credentials.AccessKeyID != "" { + c.Storage.S3.Credentials.AccessKeyID = "***" + } + if c.Storage.S3.Credentials.SecretAccessKey != "" { + c.Storage.S3.Credentials.SecretAccessKey = "***" + } + if c.Storage.S3.Credentials.SessionToken != "" { + c.Storage.S3.Credentials.SessionToken = "***" + } + if c.Storage.S3.Credentials.Vault.Secret != "" { + c.Storage.S3.Credentials.Vault.Secret = "***" + } + if c.Storage.S3.Credentials.Vault.Token != "" { + c.Storage.S3.Credentials.Vault.Token = "***" + } + if c.Storage.S3.ServerSideEncryption != nil && + c.Storage.S3.ServerSideEncryption.SseCustomerKey != "" { + c.Storage.S3.ServerSideEncryption.SseCustomerKey = "***" + } } - if c.Storage.Azure.Credentials.Key != "" { - c.Storage.Azure.Credentials.Key = "***" + if c.Storage.Azure != nil { + if c.Storage.Azure.Credentials.Key != "" { + c.Storage.Azure.Credentials.Key = "***" + } } b, err := yaml.Marshal(c) @@ -131,15 +135,15 @@ type PITRConf struct { CompressionLevel *int `bson:"compressionLevel,omitempty" json:"compressionLevel,omitempty" yaml:"compressionLevel,omitempty"` } -// StorageConf is a configuration of the backup storage -type StorageConf struct { - Type storage.Type `bson:"type" json:"type" yaml:"type"` - S3 s3.Conf `bson:"s3,omitempty" json:"s3,omitempty" yaml:"s3,omitempty"` - Azure azure.Conf `bson:"azure,omitempty" json:"azure,omitempty" yaml:"azure,omitempty"` - Filesystem fs.Conf `bson:"filesystem,omitempty" json:"filesystem,omitempty" yaml:"filesystem,omitempty"` +// Storage is a configuration of the backup storage +type Storage struct { + Type storage.Type `bson:"type" json:"type" yaml:"type"` + S3 *s3.Config `bson:"s3,omitempty" json:"s3,omitempty" yaml:"s3,omitempty"` + Azure *azure.Config `bson:"azure,omitempty" json:"azure,omitempty" yaml:"azure,omitempty"` + Filesystem *fs.Config `bson:"filesystem,omitempty" json:"filesystem,omitempty" yaml:"filesystem,omitempty"` } -func (s *StorageConf) Typ() string { +func (s *Storage) Typ() string { switch s.Type { case storage.S3: return "S3" @@ -147,14 +151,14 @@ func (s *StorageConf) Typ() string { return "Azure" case storage.Filesystem: return "FS" - case storage.Undef: + case storage.Undefined: fallthrough default: return "Unknown" } } -func (s *StorageConf) Path() string { +func (s *Storage) Path() string { path := "" switch s.Type { case storage.S3: diff --git a/pbm/restore/logical.go b/pbm/restore/logical.go index 04e35e7a5..ed613aaa6 100644 --- a/pbm/restore/logical.go +++ b/pbm/restore/logical.go @@ -736,7 +736,7 @@ func (r *Restore) RunSnapshot( rdr, err = snapshot.DownloadDump( func(ns string) (io.ReadCloser, error) { - stg, err := util.StorageFromConfig(cfg.Storage, r.log) + stg, err := util.StorageFromConfig(&cfg.Storage, r.log) if err != nil { return nil, errors.Wrap(err, "get storage") } diff --git a/pbm/restore/physical.go b/pbm/restore/physical.go index 7331dd3eb..9db7baf55 100644 --- a/pbm/restore/physical.go +++ b/pbm/restore/physical.go @@ -1777,7 +1777,7 @@ func (r *PhysRestore) init(ctx context.Context, name string, opid ctrl.OPID, l l return errors.Wrap(err, "get pbm config") } - r.stg, err = util.StorageFromConfig(cfg.Storage, l) + r.stg, err = util.StorageFromConfig(&cfg.Storage, l) if err != nil { return errors.Wrap(err, "get storage") } diff --git a/pbm/storage/azure/azure.go b/pbm/storage/azure/azure.go index ec5bdc41f..e1a63e4cf 100644 --- a/pbm/storage/azure/azure.go +++ b/pbm/storage/azure/azure.go @@ -31,7 +31,7 @@ const ( maxBlocks = 50_000 ) -type Conf struct { +type Config struct { Account string `bson:"account" json:"account,omitempty" yaml:"account,omitempty"` Container string `bson:"container" json:"container,omitempty" yaml:"container,omitempty"` EndpointURL string `bson:"endpointUrl" json:"endpointUrl,omitempty" yaml:"endpointUrl,omitempty"` @@ -44,13 +44,13 @@ type Credentials struct { } type Blob struct { - opts Conf + opts *Config log log.LogEvent // url *url.URL c *azblob.Client } -func New(opts Conf, l log.LogEvent) (*Blob, error) { +func New(opts *Config, l log.LogEvent) (*Blob, error) { if l == nil { l = log.DiscardEvent } diff --git a/pbm/storage/fs/fs.go b/pbm/storage/fs/fs.go index 5f25b6b8b..f5396b49c 100644 --- a/pbm/storage/fs/fs.go +++ b/pbm/storage/fs/fs.go @@ -11,11 +11,11 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/storage" ) -type Conf struct { +type Config struct { Path string `bson:"path" json:"path" yaml:"path"` } -func (c *Conf) Cast() error { +func (c *Config) Cast() error { if c.Path == "" { return errors.New("path can't be empty") } @@ -27,7 +27,7 @@ type FS struct { root string } -func New(opts Conf) (*FS, error) { +func New(opts *Config) (*FS, error) { info, err := os.Lstat(opts.Path) if err != nil { if os.IsNotExist(err) { diff --git a/pbm/storage/s3/download.go b/pbm/storage/s3/download.go index c6b2369da..733c766e6 100644 --- a/pbm/storage/s3/download.go +++ b/pbm/storage/s3/download.go @@ -190,7 +190,7 @@ type partReader struct { getSess func() (*s3.S3, error) l log.LogEvent - opts *Conf + opts *Config buf []byte // preallocated buf for io.Copy taskq chan chunkMeta @@ -203,7 +203,7 @@ func (s *S3) newPartReader(fname string, fsize int64, chunkSize int) *partReader return &partReader{ l: s.log, buf: make([]byte, 32*1024), - opts: &s.opts, + opts: s.opts, fname: fname, fsize: fsize, chunkSize: int64(chunkSize), diff --git a/pbm/storage/s3/s3.go b/pbm/storage/s3/s3.go index 51fec7ca7..fc714d746 100644 --- a/pbm/storage/s3/s3.go +++ b/pbm/storage/s3/s3.go @@ -42,7 +42,7 @@ const ( ) //nolint:lll -type Conf struct { +type Config struct { Provider S3Provider `bson:"provider,omitempty" json:"provider,omitempty" yaml:"provider,omitempty"` Region string `bson:"region" json:"region" yaml:"region"` EndpointURL string `bson:"endpointUrl,omitempty" json:"endpointUrl" yaml:"endpointUrl,omitempty"` @@ -129,7 +129,7 @@ type AWSsse struct { SseCustomerKey string `bson:"sseCustomerKey" json:"sseCustomerKey" yaml:"sseCustomerKey"` } -func (c *Conf) Cast() error { +func (c *Config) Cast() error { if c.Region == "" { c.Region = defaultS3Region } @@ -222,14 +222,14 @@ const ( ) type S3 struct { - opts Conf + opts *Config log log.LogEvent s3s *s3.S3 d *Download // default downloader for small files } -func New(opts Conf, l log.LogEvent) (*S3, error) { +func New(opts *Config, l log.LogEvent) (*S3, error) { err := opts.Cast() if err != nil { return nil, errors.Wrap(err, "cast options") diff --git a/pbm/storage/storage.go b/pbm/storage/storage.go index 7b246ed20..376efe1b4 100644 --- a/pbm/storage/storage.go +++ b/pbm/storage/storage.go @@ -23,7 +23,7 @@ var ( type Type string const ( - Undef Type = "" + Undefined Type = "" S3 Type = "s3" Azure Type = "azure" Filesystem Type = "filesystem" @@ -60,7 +60,7 @@ func ParseType(s string) Type { case string(Filesystem): return Filesystem default: - return Undef + return Undefined } } diff --git a/pbm/util/storage.go b/pbm/util/storage.go index 4c9120b50..1d3447a98 100644 --- a/pbm/util/storage.go +++ b/pbm/util/storage.go @@ -17,7 +17,7 @@ import ( var ErrStorageUndefined = errors.New("storage undefined") // StorageFromConfig creates and returns a storage object based on a given config -func StorageFromConfig(cfg config.StorageConf, l log.LogEvent) (storage.Storage, error) { +func StorageFromConfig(cfg *config.Storage, l log.LogEvent) (storage.Storage, error) { switch cfg.Type { case storage.S3: return s3.New(cfg.S3, l) @@ -25,7 +25,7 @@ func StorageFromConfig(cfg config.StorageConf, l log.LogEvent) (storage.Storage, return azure.New(cfg.Azure, l) case storage.Filesystem: return fs.New(cfg.Filesystem) - case storage.Undef: + case storage.Undefined: return nil, ErrStorageUndefined default: return nil, errors.Errorf("unknown storage type %s", cfg.Type) @@ -40,5 +40,5 @@ func GetStorage(ctx context.Context, m connect.Client, l log.LogEvent) (storage. return nil, errors.Wrap(err, "get config") } - return StorageFromConfig(c.Storage, l) + return StorageFromConfig(&c.Storage, l) } From 01b608643ca0736fc343b249014c743f78ec1c4a Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 27 May 2024 15:45:47 +0200 Subject: [PATCH 009/203] in config, keep pitr field if it is configured --- cmd/pbm-agent/restore.go | 14 ++++---- e2e-tests/cmd/ensure-oplog/main.go | 10 ++++-- pbm/backup/backup.go | 2 +- pbm/config/config.go | 51 +++++++++++++++++------------- pbm/defs/defs.go | 12 +++++-- pbm/slicer/slicer.go | 8 +++-- 6 files changed, 61 insertions(+), 36 deletions(-) diff --git a/cmd/pbm-agent/restore.go b/cmd/pbm-agent/restore.go index 64f4aacbe..958e8ad4e 100644 --- a/cmd/pbm-agent/restore.go +++ b/cmd/pbm-agent/restore.go @@ -132,12 +132,14 @@ func (a *Agent) pitr(ctx context.Context) error { if !errors.Is(err, mongo.ErrNoDocuments) { return errors.Wrap(err, "get conf") } - cfg = &config.Config{} + cfg = &config.Config{ + Oplog: &config.GlobalSlicer{}, + } } - a.stopPitrOnOplogOnlyChange(cfg.PITR.OplogOnly) + a.stopPitrOnOplogOnlyChange(cfg.Oplog.OplogOnly) - if !cfg.PITR.Enabled { + if cfg.Oplog == nil || !cfg.Oplog.Enabled { a.removePitr() return nil } @@ -228,7 +230,7 @@ func (a *Agent) pitr(ctx context.Context) error { ibcp := slicer.NewSlicer(a.brief.SetName, a.leadConn, a.nodeConn, stg, cfg, log.FromContext(ctx)) ibcp.SetSpan(slicerInterval) - if cfg.PITR.OplogOnly { + if cfg.Oplog.OplogOnly { err = ibcp.OplogOnlyCatchup(ctx) } else { err = ibcp.Catchup(ctx) @@ -261,8 +263,8 @@ func (a *Agent) pitr(ctx context.Context) error { streamErr := ibcp.Stream(ctx, stopC, w, - cfg.PITR.Compression, - cfg.PITR.CompressionLevel, + cfg.Oplog.Compression, + cfg.Oplog.CompressionLevel, cfg.Backup.Timeouts) if streamErr != nil { out := l.Error diff --git a/e2e-tests/cmd/ensure-oplog/main.go b/e2e-tests/cmd/ensure-oplog/main.go index a0e742251..e7897ef5e 100644 --- a/e2e-tests/cmd/ensure-oplog/main.go +++ b/e2e-tests/cmd/ensure-oplog/main.go @@ -19,6 +19,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/compress" "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/connect" + "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/log" "github.com/percona/percona-backup-mongodb/pbm/oplog" @@ -255,7 +256,12 @@ func ensureReplsetOplog(ctx context.Context, uri string, from, till primitive.Ti return errors.Wrap(err, "get storage") } - compression := compress.CompressionType(cfg.PITR.Compression) + compression := defs.DefaultCompression + compressionLevel := (*int)(nil) + if cfg.Oplog != nil { + compression = compress.CompressionType(cfg.Oplog.Compression) + compressionLevel = cfg.Oplog.CompressionLevel + } for _, t := range missedChunks { logger.Printf("[%s] ensure missed chunk: %s - %s", @@ -265,7 +271,7 @@ func ensureReplsetOplog(ctx context.Context, uri string, from, till primitive.Ti o := oplog.NewOplogBackup(m) o.SetTailingSpan(t.from, t.till) - n, err := storage.Upload(ctx, o, stg, compression, cfg.PITR.CompressionLevel, filename, -1) + n, err := storage.Upload(ctx, o, stg, compression, compressionLevel, filename, -1) if err != nil { return errors.Wrapf(err, "failed to upload %s - %s chunk", formatTimestamp(t.from), formatTimestamp(t.till)) diff --git a/pbm/backup/backup.go b/pbm/backup/backup.go index a526678b4..297cab867 100644 --- a/pbm/backup/backup.go +++ b/pbm/backup/backup.go @@ -87,7 +87,7 @@ func (b *Backup) SetSlicerInterval(d time.Duration) { func (b *Backup) SlicerInterval() time.Duration { if b.oplogSlicerInterval == 0 { - return defs.PITRdefaultSpan + return defs.DefaultPITRInterval } return b.oplogSlicerInterval diff --git a/pbm/config/config.go b/pbm/config/config.go index 5e12ee22d..c14c868d0 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -61,10 +61,10 @@ func validateConfigKey(k string) bool { // Config is a pbm config type Config struct { - PITR PITRConf `bson:"pitr" json:"pitr" yaml:"pitr"` Restore RestoreConf `bson:"restore" json:"restore,omitempty" yaml:"restore,omitempty"` Backup BackupConf `bson:"backup" json:"backup,omitempty" yaml:"backup,omitempty"` Storage Storage `bson:"storage" json:"storage" yaml:"storage"` + Oplog *GlobalSlicer `bson:"pitr,omitempty" json:"pitr,omitempty" yaml:"pitr,omitempty"` Epoch primitive.Timestamp `bson:"epoch" json:"-" yaml:"-"` } @@ -106,12 +106,12 @@ func (c *Config) String() string { // OplogSlicerInterval returns interval for general oplog slicer routine. // If it is not configured, the function returns default (hardcoded) value 10 mins. -func (c Config) OplogSlicerInterval() time.Duration { - if c.PITR.OplogSpanMin == 0 { - return defs.PITRdefaultSpan +func (c *Config) OplogSlicerInterval() time.Duration { + if c.Oplog == nil || c.Oplog.Interval == 0 { + return defs.DefaultPITRInterval } - return time.Duration(c.PITR.OplogSpanMin * float64(time.Minute)) + return time.Duration(c.Oplog.Interval * float64(time.Minute)) } // BackupSlicerInterval returns interval for backup slicer routine. @@ -124,12 +124,12 @@ func (c Config) BackupSlicerInterval() time.Duration { return time.Duration(c.Backup.OplogSpanMin * float64(time.Minute)) } -// PITRConf is a Point-In-Time Recovery options +// GlobalSlicer is a Point-In-Time Recovery options // //nolint:lll -type PITRConf struct { +type GlobalSlicer struct { Enabled bool `bson:"enabled" json:"enabled" yaml:"enabled"` - OplogSpanMin float64 `bson:"oplogSpanMin" json:"oplogSpanMin" yaml:"oplogSpanMin"` + Interval float64 `bson:"oplogSpanMin" json:"oplogSpanMin" yaml:"oplogSpanMin"` OplogOnly bool `bson:"oplogOnly,omitempty" json:"oplogOnly,omitempty" yaml:"oplogOnly,omitempty"` Compression compress.CompressionType `bson:"compression,omitempty" json:"compression,omitempty" yaml:"compression,omitempty"` CompressionLevel *int `bson:"compressionLevel,omitempty" json:"compressionLevel,omitempty" yaml:"compressionLevel,omitempty"` @@ -245,14 +245,17 @@ func GetConfig(ctx context.Context, m connect.Client) (*Config, error) { return nil, errors.Wrap(err, "decode") } + if cfg.Oplog == nil { + cfg.Oplog = &GlobalSlicer{} + } if cfg.Backup.Compression == "" { cfg.Backup.Compression = compress.CompressionTypeS2 } - if cfg.PITR.Compression == "" { - cfg.PITR.Compression = cfg.Backup.Compression + if cfg.Oplog.Compression == "" { + cfg.Oplog.Compression = cfg.Backup.Compression } - if cfg.PITR.CompressionLevel == nil { - cfg.PITR.CompressionLevel = cfg.Backup.CompressionLevel + if cfg.Oplog.CompressionLevel == nil { + cfg.Oplog.CompressionLevel = cfg.Backup.CompressionLevel } return cfg, nil @@ -279,8 +282,10 @@ func SetConfig(ctx context.Context, m connect.Client, cfg *Config) error { } } - if c := string(cfg.PITR.Compression); c != "" && !compress.IsValidCompressionType(c) { - return errors.Errorf("unsupported compression type: %q", c) + if cfg.Oplog != nil { + if c := string(cfg.Oplog.Compression); c != "" && !compress.IsValidCompressionType(c) { + return errors.Errorf("unsupported compression type: %q", c) + } } ct, err := topo.GetClusterTime(ctx, m) @@ -343,7 +348,7 @@ func SetConfigVar(ctx context.Context, m connect.Client, key, val string) error // TODO: how to be with special case options like pitr.enabled switch key { case "pitr.enabled": - return errors.Wrap(confSetPITR(ctx, m, key, v.(bool)), "write to db") + return errors.Wrap(confSetPITR(ctx, m, v.(bool)), "write to db") case "pitr.compression": if c := v.(string); c != "" && !compress.IsValidCompressionType(c) { return errors.Errorf("unsupported compression type: %q", c) @@ -365,16 +370,18 @@ func SetConfigVar(ctx context.Context, m connect.Client, key, val string) error return errors.Wrap(err, "write to db") } -func confSetPITR(ctx context.Context, m connect.Client, k string, v bool) error { +func confSetPITR(ctx context.Context, m connect.Client, value bool) error { ct, err := topo.GetClusterTime(ctx, m) if err != nil { return errors.Wrap(err, "get cluster time") } - _, err = m.ConfigCollection().UpdateOne( - ctx, - bson.D{}, - bson.M{"$set": bson.M{k: v, "pitr.changed": time.Now().Unix(), "epoch": ct}}, - ) + + _, err = m.ConfigCollection().UpdateOne(ctx, + bson.D{{"profile", nil}}, + bson.M{"$set": bson.M{ + "pitr.enabled": value, + "epoch": ct, + }}) return err } @@ -442,7 +449,7 @@ func IsPITREnabled(ctx context.Context, m connect.Client) (bool, bool, error) { return false, false, errors.Wrap(err, "get config") } - return cfg.PITR.Enabled, cfg.PITR.OplogOnly, nil + return cfg.Oplog.Enabled, cfg.Oplog.OplogOnly, nil } type Epoch primitive.Timestamp diff --git a/pbm/defs/defs.go b/pbm/defs/defs.go index 575d26098..d4c278cd9 100644 --- a/pbm/defs/defs.go +++ b/pbm/defs/defs.go @@ -1,6 +1,10 @@ package defs -import "time" +import ( + "time" + + "github.com/percona/percona-backup-mongodb/pbm/compress" +) const ( // DB is a name of the PBM database @@ -139,8 +143,10 @@ const ( ) const ( - // PITRdefaultSpan oplog slicing time span - PITRdefaultSpan = time.Minute * 10 + // DefaultPITRInterval oplog slicing time span + DefaultPITRInterval = time.Minute * 10 // PITRfsPrefix is a prefix (folder) for PITR chunks on the storage PITRfsPrefix = "pbmPitr" ) + +const DefaultCompression = compress.CompressionTypeS2 diff --git a/pbm/slicer/slicer.go b/pbm/slicer/slicer.go index 108f427eb..740510e22 100644 --- a/pbm/slicer/slicer.go +++ b/pbm/slicer/slicer.go @@ -51,7 +51,7 @@ func NewSlicer( leadClient: cn, node: node, rs: rs, - span: int64(defs.PITRdefaultSpan), + span: int64(defs.DefaultPITRInterval), storage: to, oplog: oplog.NewOplogBackup(node), cfg: cfg, @@ -138,7 +138,11 @@ func (s *Slicer) Catchup(ctx context.Context) error { return errors.Wrap(err, "get config") } - err = s.upload(ctx, lastChunk.EndTS, rs.FirstWriteTS, cfg.PITR.Compression, cfg.PITR.CompressionLevel) + err = s.upload(ctx, + lastChunk.EndTS, + rs.FirstWriteTS, + cfg.Oplog.Compression, + cfg.Oplog.CompressionLevel) if err != nil { return err } From 6ca66ebabf8d46aed4d857c8bbc7247ac88db39f Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 27 May 2024 16:01:18 +0200 Subject: [PATCH 010/203] in config, keep backup field if it is configured --- pbm/config/config.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/pbm/config/config.go b/pbm/config/config.go index c14c868d0..c44b14f55 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -62,9 +62,9 @@ func validateConfigKey(k string) bool { // Config is a pbm config type Config struct { Restore RestoreConf `bson:"restore" json:"restore,omitempty" yaml:"restore,omitempty"` - Backup BackupConf `bson:"backup" json:"backup,omitempty" yaml:"backup,omitempty"` Storage Storage `bson:"storage" json:"storage" yaml:"storage"` Oplog *GlobalSlicer `bson:"pitr,omitempty" json:"pitr,omitempty" yaml:"pitr,omitempty"` + Backup *Backup `bson:"backup,omitempty" json:"backup,omitempty" yaml:"backup,omitempty"` Epoch primitive.Timestamp `bson:"epoch" json:"-" yaml:"-"` } @@ -116,12 +116,12 @@ func (c *Config) OplogSlicerInterval() time.Duration { // BackupSlicerInterval returns interval for backup slicer routine. // If it is not confugured, the function returns general oplog slicer interval. -func (c Config) BackupSlicerInterval() time.Duration { - if c.Backup.OplogSpanMin == 0 { +func (c *Config) BackupSlicerInterval() time.Duration { + if c.Backup == nil || c.Backup.SlicingInterval == 0 { return c.OplogSlicerInterval() } - return time.Duration(c.Backup.OplogSpanMin * float64(time.Minute)) + return time.Duration(c.Backup.SlicingInterval * float64(time.Minute)) } // GlobalSlicer is a Point-In-Time Recovery options @@ -211,8 +211,8 @@ type RestoreConf struct { } //nolint:lll -type BackupConf struct { - OplogSpanMin float64 `bson:"oplogSpanMin" json:"oplogSpanMin" yaml:"oplogSpanMin"` +type Backup struct { + SlicingInterval float64 `bson:"oplogSpanMin" json:"oplogSpanMin" yaml:"oplogSpanMin"` Priority map[string]float64 `bson:"priority,omitempty" json:"priority,omitempty" yaml:"priority,omitempty"` Timeouts *BackupTimeouts `bson:"timeouts,omitempty" json:"timeouts,omitempty" yaml:"timeouts,omitempty"` Compression compress.CompressionType `bson:"compression,omitempty" json:"compression,omitempty" yaml:"compression,omitempty"` @@ -248,8 +248,11 @@ func GetConfig(ctx context.Context, m connect.Client) (*Config, error) { if cfg.Oplog == nil { cfg.Oplog = &GlobalSlicer{} } + if cfg.Backup == nil { + cfg.Backup = &Backup{} + } if cfg.Backup.Compression == "" { - cfg.Backup.Compression = compress.CompressionTypeS2 + cfg.Backup.Compression = defs.DefaultCompression } if cfg.Oplog.Compression == "" { cfg.Oplog.Compression = cfg.Backup.Compression From 79d9ec7e3ea4d25905443004208e7a5ced184127 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 27 May 2024 16:03:47 +0200 Subject: [PATCH 011/203] in config, keep restore field if it is configured --- pbm/config/config.go | 10 +++++++--- pbm/restore/physical.go | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/pbm/config/config.go b/pbm/config/config.go index c44b14f55..c5a9fd27f 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -61,10 +61,10 @@ func validateConfigKey(k string) bool { // Config is a pbm config type Config struct { - Restore RestoreConf `bson:"restore" json:"restore,omitempty" yaml:"restore,omitempty"` Storage Storage `bson:"storage" json:"storage" yaml:"storage"` Oplog *GlobalSlicer `bson:"pitr,omitempty" json:"pitr,omitempty" yaml:"pitr,omitempty"` Backup *Backup `bson:"backup,omitempty" json:"backup,omitempty" yaml:"backup,omitempty"` + Restore *Restore `bson:"restore,omitempty" json:"restore,omitempty" yaml:"restore,omitempty"` Epoch primitive.Timestamp `bson:"epoch" json:"-" yaml:"-"` } @@ -186,10 +186,10 @@ func (s *Storage) Path() string { return path } -// RestoreConf is config options for the restore +// Restore is config options for the restore // //nolint:lll -type RestoreConf struct { +type Restore struct { // Logical restore // // num of documents to buffer @@ -251,6 +251,10 @@ func GetConfig(ctx context.Context, m connect.Client) (*Config, error) { if cfg.Backup == nil { cfg.Backup = &Backup{} } + if cfg.Restore == nil { + cfg.Restore = &Restore{} + } + if cfg.Backup.Compression == "" { cfg.Backup.Compression = defs.DefaultCompression } diff --git a/pbm/restore/physical.go b/pbm/restore/physical.go index 9db7baf55..891fb1510 100644 --- a/pbm/restore/physical.go +++ b/pbm/restore/physical.go @@ -86,7 +86,7 @@ type PhysRestore struct { files []files restoreTS primitive.Timestamp - confOpts config.RestoreConf + confOpts *config.Restore mongod string // location of mongod used for internal restarts From e08c92ced77cb729a2efc0d0067debf159fdda77 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 27 May 2024 16:10:05 +0200 Subject: [PATCH 012/203] improve config parsing --- cmd/pbm/config.go | 31 +++++++++++++++++----------- pbm/config/config.go | 48 ++++++++++++++++++++++++++++++++++---------- 2 files changed, 56 insertions(+), 23 deletions(-) diff --git a/cmd/pbm/config.go b/cmd/pbm/config.go index 192644cd3..ded50a133 100644 --- a/cmd/pbm/config.go +++ b/cmd/pbm/config.go @@ -3,14 +3,13 @@ package main import ( "context" "fmt" - "io" + "log" "os" "reflect" "strings" "time" "go.mongodb.org/mongo-driver/mongo" - "gopkg.in/yaml.v2" "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/connect" @@ -101,22 +100,16 @@ func runConfig(ctx context.Context, conn connect.Client, pbm sdk.Client, c *conf return outMsg{"Storage resync finished"}, nil case len(c.file) > 0: - var buf []byte var err error + var newCfg *config.Config if c.file == "-" { - buf, err = io.ReadAll(os.Stdin) + newCfg, err = config.Parse(os.Stdin) } else { - buf, err = os.ReadFile(c.file) - } - if err != nil { - return nil, errors.Wrap(err, "unable to read config file") + newCfg, err = readConfigFromFile(c.file) } - - var newCfg *config.Config - err = yaml.UnmarshalStrict(buf, &newCfg) if err != nil { - return nil, errors.Wrap(err, "unable to unmarshal config file") + return nil, errors.Wrap(err, "unable to get new config") } oldCfg, err := pbm.GetConfig(ctx) @@ -143,3 +136,17 @@ func runConfig(ctx context.Context, conn connect.Client, pbm sdk.Client, c *conf return pbm.GetConfig(ctx) } + +func readConfigFromFile(filename string) (*config.Config, error) { + file, err := os.Open(filename) + if err != nil { + return nil, errors.Wrapf(err, "open %q", err) + } + defer func() { + if err := file.Close(); err != nil { + log.Printf("close: %v", err) + } + }() + + return config.Parse(file) +} diff --git a/pbm/config/config.go b/pbm/config/config.go index c5a9fd27f..226893dab 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -3,6 +3,7 @@ package config import ( "context" "fmt" + "io" "os" "reflect" "strconv" @@ -26,6 +27,8 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/topo" ) +var ErrUnkownStorageType = errors.New("unknown storage type") + var errMissedConfig = errors.New("missed config") type confMap map[string]reflect.Kind @@ -68,6 +71,24 @@ type Config struct { Epoch primitive.Timestamp `bson:"epoch" json:"-" yaml:"-"` } +func Parse(r io.Reader) (*Config, error) { + cfg := &Config{} + + dec := yaml.NewDecoder(r) + dec.SetStrict(true) + err := dec.Decode(cfg) + if err != nil { + return nil, errors.Wrap(err, "decode") + } + + err = cfg.Storage.Cast() + if err != nil { + return nil, errors.Wrap(err, "storage cast") + } + + return cfg, nil +} + func (c *Config) String() string { if c.Storage.S3 != nil { if c.Storage.S3.Credentials.AccessKeyID != "" { @@ -143,6 +164,18 @@ type Storage struct { Filesystem *fs.Config `bson:"filesystem,omitempty" json:"filesystem,omitempty" yaml:"filesystem,omitempty"` } +func (s *Storage) Cast() error { + switch s.Type { + case storage.Filesystem: + return s.Filesystem.Cast() + case storage.S3: + return s.S3.Cast() + case storage.Azure: // noop + } + + return errors.Wrap(ErrUnkownStorageType, string(s.Type)) +} + func (s *Storage) Typ() string { switch s.Type { case storage.S3: @@ -272,21 +305,14 @@ func GetConfig(ctx context.Context, m connect.Client) (*Config, error) { // It also applies default storage parameters depending on the type of storage // and assigns those possible default values to the cfg parameter. func SetConfig(ctx context.Context, m connect.Client, cfg *Config) error { - switch cfg.Storage.Type { - case storage.S3: - err := cfg.Storage.S3.Cast() - if err != nil { - return errors.Wrap(err, "cast storage") - } + if err := cfg.Storage.Cast(); err != nil { + return errors.Wrap(err, "cast storage") + } + if cfg.Storage.Type == storage.S3 { // call the function for notification purpose. // warning about unsupported levels will be printed s3.SDKLogLevel(cfg.Storage.S3.DebugLogLevels, os.Stderr) - case storage.Filesystem: - err := cfg.Storage.Filesystem.Cast() - if err != nil { - return errors.Wrap(err, "check config") - } } if cfg.Oplog != nil { From b84ac6380235f72bdc821199e4a8aa680778554e Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 27 May 2024 16:15:09 +0200 Subject: [PATCH 013/203] add Clone() to config --- pbm/config/config.go | 96 ++++++++++++++++++++++++++++++++++++++ pbm/storage/azure/azure.go | 9 ++++ pbm/storage/fs/fs.go | 8 ++++ pbm/storage/s3/s3.go | 22 +++++++++ 4 files changed, 135 insertions(+) diff --git a/pbm/config/config.go b/pbm/config/config.go index 226893dab..87f0fe955 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -89,7 +89,25 @@ func Parse(r io.Reader) (*Config, error) { return cfg, nil } +func (c *Config) Clone() *Config { + if c == nil { + return nil + } + + rv := &Config{ + Storage: *c.Storage.Clone(), + Oplog: c.Oplog.Clone(), + Restore: c.Restore.Clone(), + Backup: c.Backup.Clone(), + Epoch: c.Epoch, + } + + return rv +} + func (c *Config) String() string { + c = c.Clone() + if c.Storage.S3 != nil { if c.Storage.S3.Credentials.AccessKeyID != "" { c.Storage.S3.Credentials.AccessKeyID = "***" @@ -156,6 +174,20 @@ type GlobalSlicer struct { CompressionLevel *int `bson:"compressionLevel,omitempty" json:"compressionLevel,omitempty" yaml:"compressionLevel,omitempty"` } +func (cfg *GlobalSlicer) Clone() *GlobalSlicer { + if cfg == nil { + return nil + } + + rv := *cfg + if cfg.CompressionLevel != nil { + a := *cfg.CompressionLevel + rv.CompressionLevel = &a + } + + return &rv +} + // Storage is a configuration of the backup storage type Storage struct { Type storage.Type `bson:"type" json:"type" yaml:"type"` @@ -164,6 +196,27 @@ type Storage struct { Filesystem *fs.Config `bson:"filesystem,omitempty" json:"filesystem,omitempty" yaml:"filesystem,omitempty"` } +func (s *Storage) Clone() *Storage { + if s == nil { + return nil + } + + rv := &Storage{ + Type: s.Type, + } + + switch s.Type { + case storage.Filesystem: + rv.Filesystem = s.Filesystem.Clone() + case storage.S3: + rv.S3 = s.S3.Clone() + case storage.Azure: + rv.Azure = s.Azure.Clone() + } + + return rv +} + func (s *Storage) Cast() error { switch s.Type { case storage.Filesystem: @@ -243,6 +296,22 @@ type Restore struct { MongodLocationMap map[string]string `bson:"mongodLocationMap" json:"mongodLocationMap,omitempty" yaml:"mongodLocationMap,omitempty"` } +func (cfg *Restore) Clone() *Restore { + if cfg == nil { + return nil + } + + rv := *cfg + if len(cfg.MongodLocationMap) != 0 { + rv.MongodLocationMap = make(map[string]string, len(cfg.MongodLocationMap)) + for k, v := range cfg.MongodLocationMap { + rv.MongodLocationMap[k] = v + } + } + + return &rv +} + //nolint:lll type Backup struct { SlicingInterval float64 `bson:"oplogSpanMin" json:"oplogSpanMin" yaml:"oplogSpanMin"` @@ -252,6 +321,33 @@ type Backup struct { CompressionLevel *int `bson:"compressionLevel,omitempty" json:"compressionLevel,omitempty" yaml:"compressionLevel,omitempty"` } +func (cfg *Backup) Clone() *Backup { + if cfg == nil { + return nil + } + + rv := *cfg + if len(cfg.Priority) != 0 { + rv.Priority = make(map[string]float64, len(cfg.Priority)) + for k, v := range cfg.Priority { + rv.Priority[k] = v + } + } + if cfg.Timeouts != nil { + if cfg.Timeouts.Starting != nil { + rv.Timeouts = &BackupTimeouts{ + Starting: cfg.Timeouts.Starting, + } + } + } + if cfg.CompressionLevel != nil { + a := *cfg.CompressionLevel + rv.CompressionLevel = &a + } + + return &rv +} + type BackupTimeouts struct { // Starting is timeout (in seconds) to wait for a backup to start. Starting *uint32 `bson:"startingStatus,omitempty" json:"startingStatus,omitempty" yaml:"startingStatus,omitempty"` diff --git a/pbm/storage/azure/azure.go b/pbm/storage/azure/azure.go index e1a63e4cf..6f949cdac 100644 --- a/pbm/storage/azure/azure.go +++ b/pbm/storage/azure/azure.go @@ -39,6 +39,15 @@ type Config struct { Credentials Credentials `bson:"credentials" json:"-" yaml:"credentials"` } +func (cfg *Config) Clone() *Config { + if cfg == nil { + return nil + } + + rv := *cfg + return &rv +} + type Credentials struct { Key string `bson:"key" json:"key,omitempty" yaml:"key,omitempty"` } diff --git a/pbm/storage/fs/fs.go b/pbm/storage/fs/fs.go index f5396b49c..dc0168143 100644 --- a/pbm/storage/fs/fs.go +++ b/pbm/storage/fs/fs.go @@ -15,6 +15,14 @@ type Config struct { Path string `bson:"path" json:"path" yaml:"path"` } +func (c *Config) Clone() *Config { + if c == nil { + return nil + } + + return &Config{Path: c.Path} +} + func (c *Config) Cast() error { if c.Path == "" { return errors.New("path can't be empty") diff --git a/pbm/storage/s3/s3.go b/pbm/storage/s3/s3.go index fc714d746..0d5fb9714 100644 --- a/pbm/storage/s3/s3.go +++ b/pbm/storage/s3/s3.go @@ -71,6 +71,28 @@ type Config struct { Retryer *Retryer `bson:"retryer,omitempty" json:"retryer,omitempty" yaml:"retryer,omitempty"` } +func (c *Config) Clone() *Config { + if c == nil { + return nil + } + + rv := *c + if c.ForcePathStyle != nil { + a := *c.ForcePathStyle + rv.ForcePathStyle = &a + } + if c.ServerSideEncryption != nil { + a := *c.ServerSideEncryption + rv.ServerSideEncryption = &a + } + if c.Retryer != nil { + a := *c.Retryer + rv.Retryer = &a + } + + return &rv +} + type Retryer struct { // Num max Retries is the number of max retries that will be performed. // https://pkg.go.dev/github.com/aws/aws-sdk-go/aws/client#DefaultRetryer.NumMaxRetries From 14af78c54e73f2078b55df78cc7e1bb16e02041c Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 27 May 2024 16:17:40 +0200 Subject: [PATCH 014/203] return message on pbm delete commands --- cmd/pbm/delete.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/pbm/delete.go b/cmd/pbm/delete.go index 652b8ef94..e77f32586 100644 --- a/cmd/pbm/delete.go +++ b/cmd/pbm/delete.go @@ -59,7 +59,7 @@ func deleteBackup( } if d.dryRun { - return nil, nil + return &outMsg{"running an agent"}, nil } return waitForDelete(ctx, conn, pbm, cid) @@ -198,7 +198,7 @@ func deletePITR( printDeleteInfoTo(os.Stdout, nil, chunks) if d.dryRun { - return nil, nil + return &outMsg{"running an agent"}, nil } if !d.yes { q := "Are you sure you want to delete chunks?" @@ -254,7 +254,7 @@ func doCleanup(ctx context.Context, conn connect.Client, pbm sdk.Client, d *clea printDeleteInfoTo(os.Stdout, info.Backups, info.Chunks) if d.dryRun { - return nil, nil + return &outMsg{"running an agent"}, nil } if !d.yes { if err := askConfirmation("Are you sure you want to delete?"); err != nil { From 9cdd6c2c131ac3b297781308b7920f903f795268 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 27 May 2024 16:21:45 +0200 Subject: [PATCH 015/203] misc --- cmd/pbm-agent/agent.go | 3 ++- cmd/pbm-agent/backup.go | 20 ++++++--------- cmd/pbm-agent/priority.go | 22 +++-------------- pbm/backup/backup.go | 8 ++---- pbm/backup/delete.go | 5 ++-- pbm/backup/logical.go | 3 ++- pbm/backup/physical.go | 4 ++- pbm/config/config.go | 52 +++++++++++++++++---------------------- pbm/restore/physical.go | 2 +- pbm/topo/node.go | 1 + pbm/util/util.go | 5 ++++ 11 files changed, 54 insertions(+), 71 deletions(-) create mode 100644 pbm/util/util.go diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index d69f0d6c4..e72d11cbc 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -68,6 +68,7 @@ func newAgent(ctx context.Context, leadConn connect.Client, uri string, dumpConn URI: uri, SetName: info.SetName, Me: info.Me, + Sharded: info.IsSharded(), }, mongoVersion: mongoVersion, dumpConns: dumpConns, @@ -76,7 +77,7 @@ func newAgent(ctx context.Context, leadConn connect.Client, uri string, dumpConn } func (a *Agent) CanStart(ctx context.Context) error { - info, err := topo.GetNodeInfoExt(ctx, a.nodeConn) + info, err := topo.GetNodeInfo(ctx, a.nodeConn) if err != nil { return errors.Wrap(err, "get node info") } diff --git a/cmd/pbm-agent/backup.go b/cmd/pbm-agent/backup.go index efe96df1e..0c93e08fc 100644 --- a/cmd/pbm-agent/backup.go +++ b/cmd/pbm-agent/backup.go @@ -109,7 +109,7 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, if isClusterLeader { balancer := topo.BalancerModeOff - if nodeInfo.IsSharded() { + if a.brief.Sharded { bs, err := topo.GetBalancerStatus(ctx, a.leadConn) if err != nil { l.Error("get balancer status: %v", err) @@ -165,11 +165,7 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, validCandidates = append(validCandidates, s) } - nodes, err := BcpNodesPriority(ctx, a.leadConn, c, validCandidates) - if err != nil { - l.Error("get nodes priority: %v", err) - return - } + nodes := BcpNodesPriority(cfg.Backup.Priority, c, validCandidates) shards, err := topo.ClusterMembers(ctx, a.leadConn.MongoClient()) if err != nil { l.Error("get cluster members: %v", err) @@ -185,7 +181,7 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, } } - nominated, err := a.waitNomination(ctx, cmd.Name, nodeInfo.SetName, nodeInfo.Me) + nominated, err := a.waitNomination(ctx, cmd.Name) if err != nil { l.Error("wait for nomination: %v", err) } @@ -197,8 +193,8 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, epoch := ep.TS() lck := lock.NewLock(a.leadConn, lock.LockHeader{ Type: ctrl.CmdBackup, - Replset: nodeInfo.SetName, - Node: nodeInfo.Me, + Replset: a.brief.SetName, + Node: a.brief.Me, OPID: opid.String(), Epoch: &epoch, }) @@ -282,7 +278,7 @@ func (a *Agent) nominateRS(ctx context.Context, bcp, rs string, nodes [][]string return nil } -func (a *Agent) waitNomination(ctx context.Context, bcp, rs, node string) (bool, error) { +func (a *Agent) waitNomination(ctx context.Context, bcp string) (bool, error) { l := log.LogEventFromContext(ctx) tk := time.NewTicker(time.Millisecond * 500) @@ -294,7 +290,7 @@ func (a *Agent) waitNomination(ctx context.Context, bcp, rs, node string) (bool, for { select { case <-tk.C: - nm, err := backup.GetRSNominees(ctx, a.leadConn, bcp, rs) + nm, err := backup.GetRSNominees(ctx, a.leadConn, bcp, a.brief.SetName) if err != nil { if errors.Is(err, errors.ErrNotFound) { continue @@ -305,7 +301,7 @@ func (a *Agent) waitNomination(ctx context.Context, bcp, rs, node string) (bool, return false, nil } for _, n := range nm.Nodes { - if n == node { + if n == a.brief.Me { return true, nil } } diff --git a/cmd/pbm-agent/priority.go b/cmd/pbm-agent/priority.go index aa35a5c3e..99ef13291 100644 --- a/cmd/pbm-agent/priority.go +++ b/cmd/pbm-agent/priority.go @@ -1,13 +1,9 @@ package main import ( - "context" "sort" - "github.com/percona/percona-backup-mongodb/pbm/config" - "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/defs" - "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/topo" ) @@ -45,17 +41,7 @@ type agentScore func(topo.AgentStat) float64 // in descended order. First are nodes with the highest priority. // Custom coefficients might be passed. These will be ignored though // if the config is set. -func BcpNodesPriority( - ctx context.Context, - m connect.Client, - c map[string]float64, - agents []topo.AgentStat, -) (*NodesPriority, error) { - cfg, err := config.GetConfig(ctx, m) - if err != nil { - return nil, errors.Wrap(err, "get config") - } - +func BcpNodesPriority(priority map[string]float64, c map[string]float64, agents []topo.AgentStat) *NodesPriority { // if cfg.Backup.Priority doesn't set apply defaults f := func(a topo.AgentStat) float64 { if coeff, ok := c[a.Node]; ok && c != nil { @@ -68,9 +54,9 @@ func BcpNodesPriority( return defaultScore } - if cfg.Backup.Priority != nil || len(cfg.Backup.Priority) > 0 { + if len(priority) != 0 { f = func(a topo.AgentStat) float64 { - sc, ok := cfg.Backup.Priority[a.Node] + sc, ok := priority[a.Node] if !ok || sc < 0 { return defaultScore } @@ -79,7 +65,7 @@ func BcpNodesPriority( } } - return bcpNodesPriority(agents, f), nil + return bcpNodesPriority(agents, f) } func bcpNodesPriority(agents []topo.AgentStat, f agentScore) *NodesPriority { diff --git a/pbm/backup/backup.go b/pbm/backup/backup.go index 297cab867..01289aedd 100644 --- a/pbm/backup/backup.go +++ b/pbm/backup/backup.go @@ -143,7 +143,7 @@ func (b *Backup) Init( } meta.FCV = fcv - if inf.IsSharded() { + if b.brief.Sharded { ss, err := topo.ClusterMembers(ctx, b.leadConn.MongoClient()) if err != nil { return errors.Wrap(err, "get shards") @@ -296,7 +296,7 @@ func (b *Backup) Run(ctx context.Context, bcp *ctrl.BackupCmd, opid ctrl.OPID, l // Waiting for StatusStarting to move further. // In case some preparations has to be done before backup. - err = b.waitForStatus(ctx, bcp.Name, defs.StatusStarting, ref(b.timeouts.StartingStatus())) + err = b.waitForStatus(ctx, bcp.Name, defs.StatusStarting, util.Ref(b.timeouts.StartingStatus())) if err != nil { return errors.Wrap(err, "waiting for start") } @@ -747,7 +747,3 @@ func condAll[T any, Cond func(*T) bool](ts []T, ok Cond) bool { return true } - -func ref[T any](v T) *T { - return &v -} diff --git a/pbm/backup/delete.go b/pbm/backup/delete.go index 22adb151c..19a34bbab 100644 --- a/pbm/backup/delete.go +++ b/pbm/backup/delete.go @@ -352,11 +352,12 @@ func ListDeleteBackupBefore( } pred := func(m *BackupMeta) bool { return m.Type == bcpType } - if bcpType == defs.LogicalBackup { + switch bcpType { + case defs.LogicalBackup: pred = func(m *BackupMeta) bool { return m.Type == defs.LogicalBackup && !util.IsSelective(m.Namespaces) } - } else if bcpType == SelectiveBackup { + case SelectiveBackup: pred = func(m *BackupMeta) bool { return util.IsSelective(m.Namespaces) } } diff --git a/pbm/backup/logical.go b/pbm/backup/logical.go index 415eceedd..400f3a07c 100644 --- a/pbm/backup/logical.go +++ b/pbm/backup/logical.go @@ -67,7 +67,8 @@ func (b *Backup) doLogical( } if inf.IsLeader() { - err := b.reconcileStatus(ctx, bcp.Name, opid.String(), defs.StatusRunning, ref(b.timeouts.StartingStatus())) + err := b.reconcileStatus(ctx, + bcp.Name, opid.String(), defs.StatusRunning, util.Ref(b.timeouts.StartingStatus())) if err != nil { if errors.Is(err, errConvergeTimeOut) { return errors.Wrap(err, "couldn't get response from all shards") diff --git a/pbm/backup/physical.go b/pbm/backup/physical.go index c886e3b1e..23a3703f2 100644 --- a/pbm/backup/physical.go +++ b/pbm/backup/physical.go @@ -25,6 +25,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/log" "github.com/percona/percona-backup-mongodb/pbm/storage" "github.com/percona/percona-backup-mongodb/pbm/topo" + "github.com/percona/percona-backup-mongodb/pbm/util" ) const cursorCreateRetries = 10 @@ -306,7 +307,8 @@ func (b *Backup) doPhysical( } if inf.IsLeader() { - err := b.reconcileStatus(ctx, bcp.Name, opid.String(), defs.StatusRunning, ref(b.timeouts.StartingStatus())) + err := b.reconcileStatus(ctx, + bcp.Name, opid.String(), defs.StatusRunning, util.Ref(b.timeouts.StartingStatus())) if err != nil { if errors.Is(err, errConvergeTimeOut) { return errors.Wrap(err, "couldn't get response from all shards") diff --git a/pbm/config/config.go b/pbm/config/config.go index 87f0fe955..b9f7b50fb 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -515,28 +515,6 @@ func confSetPITR(ctx context.Context, m connect.Client, value bool) error { return err } -func DeleteConfigVar(ctx context.Context, m connect.Client, key string) error { - if !validateConfigKey(key) { - return errors.New("invalid config key") - } - - _, err := GetConfig(ctx, m) - if err != nil { - if errors.Is(err, mongo.ErrNoDocuments) { - return errors.New("config is not set") - } - return err - } - - _, err = m.ConfigCollection().UpdateOne( - ctx, - bson.D{}, - bson.M{"$unset": bson.M{key: 1}}, - ) - - return errors.Wrap(err, "write to db") -} - // GetConfigVar returns value of given config vaiable func GetConfigVar(ctx context.Context, m connect.Client, key string) (interface{}, error) { if !validateConfigKey(key) { @@ -583,13 +561,33 @@ func IsPITREnabled(ctx context.Context, m connect.Client) (bool, bool, error) { type Epoch primitive.Timestamp +func (e Epoch) TS() primitive.Timestamp { + return primitive.Timestamp(e) +} + func GetEpoch(ctx context.Context, m connect.Client) (Epoch, error) { - c, err := GetConfig(ctx, m) + opts := options.FindOne().SetProjection(bson.D{{"_id", 0}, {"epoch", 1}}) + res := m.ConfigCollection().FindOne(ctx, bson.D{{"profile", nil}}, opts) + if err := res.Err(); err != nil { + return Epoch{}, errors.Wrap(err, "query") + } + + raw, err := res.Raw() if err != nil { - return Epoch{}, errors.Wrap(err, "get config") + return Epoch{}, errors.Wrap(err, "read raw") } - return Epoch(c.Epoch), nil + val, err := raw.LookupErr("epoch") + if err != nil { + return Epoch{}, errors.Wrap(err, "lookup") + } + + t, i, ok := val.TimestampOK() + if !ok { + return Epoch{}, errors.Wrap(err, "not a timestamp") + } + + return Epoch{T: t, I: i}, nil } func ResetEpoch(ctx context.Context, m connect.Client) (Epoch, error) { @@ -605,7 +603,3 @@ func ResetEpoch(ctx context.Context, m connect.Client) (Epoch, error) { return Epoch(ct), err } - -func (e Epoch) TS() primitive.Timestamp { - return primitive.Timestamp(e) -} diff --git a/pbm/restore/physical.go b/pbm/restore/physical.go index 891fb1510..7108ec348 100644 --- a/pbm/restore/physical.go +++ b/pbm/restore/physical.go @@ -1037,7 +1037,7 @@ func (r *PhysRestore) dumpMeta(meta *RestoreMeta, s defs.Status, msg string) err r.log.Warning("meta `%s` already exists, trying write %s status with '%s'", name, s, msg) return nil } - if err != nil && !errors.Is(err, storage.ErrNotExist) { + if !errors.Is(err, storage.ErrNotExist) { return errors.Wrapf(err, "check restore meta `%s`", name) } diff --git a/pbm/topo/node.go b/pbm/topo/node.go index f52bb96c2..62d48b09c 100644 --- a/pbm/topo/node.go +++ b/pbm/topo/node.go @@ -50,6 +50,7 @@ type NodeBrief struct { URI string SetName string Me string + Sharded bool } // NodeInfo represents the mongo's node info diff --git a/pbm/util/util.go b/pbm/util/util.go new file mode 100644 index 000000000..93bef73cb --- /dev/null +++ b/pbm/util/util.go @@ -0,0 +1,5 @@ +package util + +func Ref[T any](v T) *T { + return &v +} From 5ed75c4927e7d9ba2c524ca540073e60d684947e Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 27 May 2024 16:35:31 +0200 Subject: [PATCH 016/203] add initial config profile support --- cmd/pbm-agent/agent.go | 4 + cmd/pbm-agent/backup.go | 52 ++++++++--- cmd/pbm-agent/profile.go | 187 +++++++++++++++++++++++++++++++++++++++ cmd/pbm-agent/restore.go | 6 +- cmd/pbm/backup.go | 2 + cmd/pbm/main.go | 75 +++++++++++++--- cmd/pbm/profile.go | 116 ++++++++++++++++++++++++ pbm/backup/backup.go | 31 +++---- pbm/backup/logical.go | 8 +- pbm/backup/types.go | 3 + pbm/config/config.go | 65 +++++++++----- pbm/config/profile.go | 97 ++++++++++++++++++++ pbm/ctrl/cmd.go | 35 +++++--- pbm/ctrl/send.go | 28 ++++++ sdk/impl.go | 18 +++- sdk/sdk.go | 4 + 16 files changed, 643 insertions(+), 88 deletions(-) create mode 100644 cmd/pbm-agent/profile.go create mode 100644 cmd/pbm/profile.go create mode 100644 pbm/config/profile.go diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index e72d11cbc..e1aa9572f 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -136,6 +136,10 @@ func (a *Agent) Start(ctx context.Context) error { logger.Printf("got epoch %v", ep) switch cmd.Cmd { + case ctrl.CmdAddConfigProfile: + a.handleAddConfigProfile(ctx, cmd.Profile, cmd.OPID, ep) + case ctrl.CmdRemoveConfigProfile: + a.handleRemoveConfigProfile(ctx, cmd.Profile, cmd.OPID, ep) case ctrl.CmdBackup: // backup runs in the go-routine so it can be canceled go a.Backup(ctx, cmd.Backup, cmd.OPID, ep) diff --git a/cmd/pbm-agent/backup.go b/cmd/pbm-agent/backup.go index 0c93e08fc..b603541ad 100644 --- a/cmd/pbm-agent/backup.go +++ b/cmd/pbm-agent/backup.go @@ -6,6 +6,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/backup" "github.com/percona/percona-backup-mongodb/pbm/config" + "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/ctrl" "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" @@ -79,6 +80,12 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, go a.sliceNow(opid) } + cfg, err := getMergedConfig(ctx, a.leadConn, cmd.Profile) + if err != nil { + l.Error("get merged config: %v", err) + return + } + var bcp *backup.Backup switch cmd.Type { case defs.PhysicalBackup: @@ -93,16 +100,7 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, bcp = backup.New(a.leadConn, a.nodeConn, a.brief, a.dumpConns) } - cfg, err := config.GetConfig(ctx, a.leadConn) - if err != nil { - l.Error("unable to get PBM config settings: " + err.Error()) - return - } - if storage.ParseType(string(cfg.Storage.Type)) == storage.Undefined { - l.Error("backups cannot be saved because PBM storage configuration hasn't been set yet") - return - } - + bcp.SetConfig(cfg) bcp.SetMongoVersion(a.mongoVersion.VersionString) bcp.SetSlicerInterval(cfg.BackupSlicerInterval()) bcp.SetTimeouts(cfg.Backup.Timeouts) @@ -119,7 +117,7 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, balancer = topo.BalancerModeOn } } - err = bcp.Init(ctx, cmd, opid, nodeInfo, cfg.Storage, balancer, l) + err = bcp.Init(ctx, cmd, opid, balancer) if err != nil { l.Error("init meta: %v", err) return @@ -240,6 +238,38 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, } } +func getMergedConfig( + ctx context.Context, + conn connect.Client, + profileName string, +) (*config.Config, error) { + cfg, err := config.GetConfig(ctx, conn) + if err != nil { + return nil, errors.Wrap(err, "get main config") + } + + if profileName != "" { + custom, err := config.GetProfile(ctx, conn, profileName) + if err != nil { + return nil, errors.Wrap(err, "get config profile") + } + if err := custom.Storage.Cast(); err != nil { + return nil, errors.Wrap(err, "storage cast") + } + + // use storage config only + cfg.Storage = custom.Storage + cfg.Name = custom.Name + cfg.IsProfile = true + } + + if storage.ParseType(string(cfg.Storage.Type)) == storage.Undefined { + return nil, errors.New("backups cannot be saved because PBM storage configuration hasn't been set yet") + } + + return cfg, nil +} + const renominationFrame = 5 * time.Second func (a *Agent) nominateRS(ctx context.Context, bcp, rs string, nodes [][]string) error { diff --git a/cmd/pbm-agent/profile.go b/cmd/pbm-agent/profile.go new file mode 100644 index 000000000..780a2fa9c --- /dev/null +++ b/cmd/pbm-agent/profile.go @@ -0,0 +1,187 @@ +package main + +import ( + "context" + + "github.com/percona/percona-backup-mongodb/pbm/config" + "github.com/percona/percona-backup-mongodb/pbm/ctrl" + "github.com/percona/percona-backup-mongodb/pbm/errors" + "github.com/percona/percona-backup-mongodb/pbm/lock" + "github.com/percona/percona-backup-mongodb/pbm/log" + "github.com/percona/percona-backup-mongodb/pbm/storage" + "github.com/percona/percona-backup-mongodb/pbm/topo" + "github.com/percona/percona-backup-mongodb/pbm/util" +) + +func (a *Agent) handleAddConfigProfile( + ctx context.Context, + cmd *ctrl.ProfileCmd, + opid ctrl.OPID, + epoch config.Epoch, +) { + logger := log.FromContext(ctx) + + if cmd == nil { + l := logger.NewEvent(string(ctrl.CmdAddConfigProfile), "", opid.String(), epoch.TS()) + l.Error("missed command") + return + } + if cmd.Name == "" { + l := logger.NewEvent(string(ctrl.CmdAddConfigProfile), "", opid.String(), epoch.TS()) + l.Error("missed config profile name") + return + } + + l := logger.NewEvent(string(ctrl.CmdAddConfigProfile), cmd.Name, opid.String(), epoch.TS()) + ctx = log.SetLogEventToContext(ctx, l) + + var err error + defer func() { + if err != nil { + l.Error("failed to add config profile: %v", err) + } + }() + + nodeInfo, err := topo.GetNodeInfo(ctx, a.nodeConn) + if err != nil { + err = errors.Wrap(err, "get node info") + return + } + if !nodeInfo.IsClusterLeader() { + l.Debug("not leader. skip") + return + } + + lck := lock.NewLock(a.leadConn, lock.LockHeader{ + Type: ctrl.CmdAddConfigProfile, + Replset: a.brief.SetName, + Node: a.brief.Me, + OPID: opid.String(), + Epoch: util.Ref(epoch.TS()), + }) + + got, err := a.acquireLock(ctx, lck, l) + if err != nil { + l.Error("acquiring lock: %v", err) + return + } + if !got { + l.Error("lock not acquired") + return + } + defer func() { + l.Debug("releasing lock") + err = lck.Release() + if err != nil { + l.Error("unable to release lock %v: %v", lck, err) + } + }() + + err = cmd.Storage.Cast() + if err != nil { + l.Error("storage cast: %v", err) + return + } + + stg, err := util.StorageFromConfig(&cmd.Storage, log.LogEventFromContext(ctx)) + if err != nil { + err = errors.Wrap(err, "storage from config") + return + } + + err = storage.HasReadAccess(ctx, stg) + if err != nil { + if !errors.Is(err, storage.ErrUninitialized) { + err = errors.Wrap(err, "check read access") + return + } + + err = storage.InitStorage(ctx, stg) + if err != nil { + err = errors.Wrap(err, "init storage") + return + } + } + + profile := &config.Config{ + Name: cmd.Name, + IsProfile: true, + Storage: cmd.Storage, + } + err = config.AddProfile(ctx, a.leadConn, profile) + if err != nil { + err = errors.Wrap(err, "add profile config") + return + } +} + +func (a *Agent) handleRemoveConfigProfile( + ctx context.Context, + cmd *ctrl.ProfileCmd, + opid ctrl.OPID, + epoch config.Epoch, +) { + logger := log.FromContext(ctx) + + if cmd == nil { + l := logger.NewEvent(string(ctrl.CmdRemoveConfigProfile), "", opid.String(), epoch.TS()) + l.Error("missed command") + return + } + if cmd.Name == "" { + l := logger.NewEvent(string(ctrl.CmdRemoveConfigProfile), "", opid.String(), epoch.TS()) + l.Error("missed config profile name") + return + } + + l := logger.NewEvent(string(ctrl.CmdRemoveConfigProfile), cmd.Name, opid.String(), epoch.TS()) + ctx = log.SetLogEventToContext(ctx, l) + + var err error + defer func() { + if err != nil { + l.Error("failed to remove config profile: %v", err) + } + }() + + nodeInfo, err := topo.GetNodeInfo(ctx, a.nodeConn) + if err != nil { + err = errors.Wrap(err, "get node info") + return + } + if !nodeInfo.IsClusterLeader() { + l.Debug("not leader. skip") + return + } + + lck := lock.NewLock(a.leadConn, lock.LockHeader{ + Type: ctrl.CmdRemoveConfigProfile, + Replset: a.brief.SetName, + Node: a.brief.Me, + OPID: opid.String(), + Epoch: util.Ref(epoch.TS()), + }) + + got, err := a.acquireLock(ctx, lck, l) + if err != nil { + l.Error("acquiring lock: %v", err) + return + } + if !got { + l.Error("lock not acquired") + return + } + defer func() { + l.Debug("releasing lock") + err = lck.Release() + if err != nil { + l.Error("unable to release lock %v: %v", lck, err) + } + }() + + err = config.RemoveProfile(ctx, a.leadConn, cmd.Name) + if err != nil { + l.Error("delete document", err) + return + } +} diff --git a/cmd/pbm-agent/restore.go b/cmd/pbm-agent/restore.go index 958e8ad4e..afa618588 100644 --- a/cmd/pbm-agent/restore.go +++ b/cmd/pbm-agent/restore.go @@ -100,7 +100,7 @@ func (a *Agent) stopPitrOnOplogOnlyChange(currOO bool) { // canSlicingNow returns lock.ConcurrentOpError if there is a parallel operation. // Only physical backups (full, incremental, external) is allowed. -func canSlicingNow(ctx context.Context, conn connect.Client) error { +func canSlicingNow(ctx context.Context, conn connect.Client, cfg *config.Storage) error { locks, err := lock.GetLocks(ctx, conn, &lock.LockHeader{}) if err != nil { return errors.Wrap(err, "get locks data") @@ -118,7 +118,7 @@ func canSlicingNow(ctx context.Context, conn connect.Client) error { return errors.Wrap(err, "get backup metadata") } - if bcp.Type == defs.LogicalBackup { + if bcp.Type == defs.LogicalBackup && bcp.Store.Equal(cfg) { return lock.ConcurrentOpError{l.LockHeader} } } @@ -148,7 +148,7 @@ func (a *Agent) pitr(ctx context.Context) error { l := log.FromContext(ctx).NewEvent(string(ctrl.CmdPITR), "", "", ep.TS()) ctx = log.SetLogEventToContext(ctx, l) - if err := canSlicingNow(ctx, a.leadConn); err != nil { + if err := canSlicingNow(ctx, a.leadConn, &cfg.Storage); err != nil { e := lock.ConcurrentOpError{} if errors.As(err, &e) { l.Info("oplog slicer is paused for lock [%s, opid: %s]", e.Lock.Type, e.Lock.OPID) diff --git a/cmd/pbm/backup.go b/cmd/pbm/backup.go index 73148ee7b..912519ef7 100644 --- a/cmd/pbm/backup.go +++ b/cmd/pbm/backup.go @@ -33,6 +33,7 @@ type backupOpts struct { base bool compression string compressionLevel []int + profile string ns string wait bool externList bool @@ -139,6 +140,7 @@ func runBackup( Compression: compression, CompressionLevel: level, Filelist: b.externList, + Profile: b.profile, }, }) if err != nil { diff --git a/cmd/pbm/main.go b/cmd/pbm/main.go index 312de2daf..be7676987 100644 --- a/cmd/pbm/main.go +++ b/cmd/pbm/main.go @@ -99,6 +99,45 @@ func main() { Short('w'). BoolVar(&cfg.wait) + configProfileCmd := pbmCmd. + Command("profile", "Configuration profiles") + + listConfigProfileCmd := configProfileCmd. + Command("list", "List configuration profiles"). + Default() + + descConfigProfileOpts := descConfigProfileOptions{} + descConfigProfileCmd := configProfileCmd. + Command("describe", "Describe configuration profile") + descConfigProfileCmd. + Arg("name", "Profile name"). + Required(). + StringVar(&descConfigProfileOpts.name) + + addConfigProfileOpts := addConfigProfileOptions{} + addConfigProfileCmd := configProfileCmd. + Command("add", "Save configuration profile") + addConfigProfileCmd. + Flag("name", "Profile name"). + Required(). + StringVar(&addConfigProfileOpts.name) + addConfigProfileCmd. + Arg("file", "Path to configuration file"). + Required(). + StringVar(&addConfigProfileOpts.file) + + removeConfigProfileOpts := removeConfigProfileOptions{} + removeConfigProfileCmd := configProfileCmd. + Command("remove", "Remove configuration profile") + removeConfigProfileCmd. + Arg("profile-name", "Profile name"). + Required(). + StringVar(&removeConfigProfileOpts.name) + removeConfigProfileCmd. + Flag("wait", "Wait for done by agents"). + Short('w'). + BoolVar(&removeConfigProfileOpts.wait) + backupCmd := pbmCmd.Command("backup", "Make backup") backupOptions := backupOpts{} backupCmd.Flag("compression", "Compression type //////"). @@ -125,6 +164,7 @@ func main() { string(defs.ExternalBackup)) backupCmd.Flag("base", "Is this a base for incremental backups"). BoolVar(&backupOptions.base) + backupCmd.Flag("profile", "Config profile name").StringVar(&backupOptions.profile) backupCmd.Flag("compression-level", "Compression level (specific to the compression type)"). IntsVar(&backupOptions.compressionLevel) backupCmd.Flag("ns", `Namespaces to backup (e.g. "db.*", "db.collection"). If not set, backup all ("*.*")`). @@ -409,6 +449,14 @@ func main() { switch cmd { case configCmd.FullCommand(): out, err = runConfig(ctx, conn, pbm, &cfg) + case listConfigProfileCmd.FullCommand(): + out, err = handleListConfigProfiles(ctx, pbm) + case descConfigProfileCmd.FullCommand(): + out, err = handleDescibeConfigProfiles(ctx, pbm, descConfigProfileOpts) + case addConfigProfileCmd.FullCommand(): + out, err = handleAddConfigProfile(ctx, pbm, addConfigProfileOpts) + case removeConfigProfileCmd.FullCommand(): + out, err = handleRemoveConfigProfile(ctx, pbm, removeConfigProfileOpts) case backupCmd.FullCommand(): backupOptions.name = time.Now().UTC().Format(time.RFC3339) out, err = runBackup(ctx, conn, pbm, &backupOptions, pbmOutF) @@ -567,9 +615,10 @@ func followLogs(ctx context.Context, conn connect.Client, r *log.LogRequest, sho outC, errC := log.Follow(ctx, conn, r, false) var enc *json.Encoder - if f == outJSON { + switch f { + case outJSON: enc = json.NewEncoder(os.Stdout) - } else if f == outJSONpretty { + case outJSONpretty: enc = json.NewEncoder(os.Stdout) enc.SetIndent("", " ") } @@ -707,13 +756,11 @@ func findLock(ctx context.Context, conn connect.Client, fn findLockFn) (*lock.Lo // But chances for that are quite low and on the next run of `pbm status` everything // would be ok. So no reason to complicate code to avoid that. if lck != nil && l.OPID != lck.OPID { - if err != nil { - return nil, errors.Errorf("conflicting ops running: [%s/%s::%s-%s] [%s/%s::%s-%s]. "+ - "This conflict may naturally resolve after 10 seconds", - l.Replset, l.Node, l.Type, l.OPID, - lck.Replset, lck.Node, lck.Type, lck.OPID, - ) - } + return nil, errors.Errorf("conflicting ops running: [%s/%s::%s-%s] [%s/%s::%s-%s]. "+ + "This conflict may naturally resolve after 10 seconds", + l.Replset, l.Node, l.Type, l.OPID, + lck.Replset, lck.Node, lck.Type, lck.OPID, + ) } l := l @@ -727,16 +774,16 @@ type concurentOpError struct { op *lock.LockHeader } -func (e concurentOpError) Error() string { +func (e *concurentOpError) Error() string { return fmt.Sprintf("another operation in progress, %s/%s [%s/%s]", e.op.Type, e.op.OPID, e.op.Replset, e.op.Node) } -func (e concurentOpError) As(err any) bool { +func (e *concurentOpError) As(err any) bool { if err == nil { return false } - er, ok := err.(concurentOpError) + er, ok := err.(*concurentOpError) if !ok { return false } @@ -745,7 +792,7 @@ func (e concurentOpError) As(err any) bool { return true } -func (e concurentOpError) MarshalJSON() ([]byte, error) { +func (e *concurentOpError) MarshalJSON() ([]byte, error) { s := make(map[string]interface{}) s["error"] = "another operation in progress" s["operation"] = e.op @@ -768,7 +815,7 @@ func checkConcurrentOp(ctx context.Context, conn connect.Client) error { // and leave it for agents to deal with. for _, l := range locks { if l.Heartbeat.T+defs.StaleFrameSec >= ts.T { - return concurentOpError{&l.LockHeader} + return &concurentOpError{&l.LockHeader} } } diff --git a/cmd/pbm/profile.go b/cmd/pbm/profile.go new file mode 100644 index 000000000..f8a6047ca --- /dev/null +++ b/cmd/pbm/profile.go @@ -0,0 +1,116 @@ +package main + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/percona/percona-backup-mongodb/pbm/config" + "github.com/percona/percona-backup-mongodb/pbm/errors" + "github.com/percona/percona-backup-mongodb/sdk" +) + +type descConfigProfileOptions struct { + name string +} + +type addConfigProfileOptions struct { + name string + file string +} + +type removeConfigProfileOptions struct { + name string + wait bool +} + +type configProfileList struct { + configs []config.Config +} + +func (l configProfileList) String() string { + if len(l.configs) == 0 { + return "" + } + + sb := strings.Builder{} + sb.WriteString(l.configs[0].String()) + for _, profile := range l.configs[1:] { + sb.WriteString("---\n") + sb.WriteString(profile.String()) + } + + return sb.String() +} + +func handleListConfigProfiles(ctx context.Context, pbm sdk.Client) (fmt.Stringer, error) { + profiles, err := pbm.ListConfigProfiles(ctx) + if err != nil { + return nil, err + } + + return configProfileList{profiles}, nil +} + +func handleDescibeConfigProfiles( + ctx context.Context, + pbm sdk.Client, + opts descConfigProfileOptions, +) (fmt.Stringer, error) { + if opts.name == "" { + return nil, errors.New("name is required") + } + + profile, err := pbm.GetConfigProfile(ctx, opts.name) + if err != nil { + return nil, err + } + + return profile, nil +} + +func handleAddConfigProfile( + ctx context.Context, + pbm sdk.Client, + opts addConfigProfileOptions, +) (fmt.Stringer, error) { + if opts.name == "" { + return nil, errors.New("name is required") + } + + var err error + var cfg *config.Config + if opts.file == "-" { + cfg, err = config.Parse(os.Stdin) + } else { + cfg, err = readConfigFromFile(opts.file) + } + if err != nil { + return nil, errors.Wrap(err, "unable to get new config") + } + + _, err = pbm.AddConfigProfile(ctx, opts.name, cfg) + if err != nil { + return nil, errors.Wrap(err, "add config profile") + } + + return &outMsg{"OK"}, nil +} + +func handleRemoveConfigProfile( + ctx context.Context, + pbm sdk.Client, + opts removeConfigProfileOptions, +) (fmt.Stringer, error) { + if opts.name == "" { + return nil, errors.New("name is required") + } + + _, err := pbm.RemoveConfigProfile(ctx, opts.name) + if err != nil { + return nil, errors.Wrap(err, "sdk: remove config profile") + } + + return &outMsg{"OK"}, nil +} diff --git a/pbm/backup/backup.go b/pbm/backup/backup.go index 01289aedd..7a39b79b9 100644 --- a/pbm/backup/backup.go +++ b/pbm/backup/backup.go @@ -27,6 +27,7 @@ type Backup struct { leadConn connect.Client nodeConn *mongo.Client brief topo.NodeBrief + config *config.Config mongoVersion string typ defs.BackupType incrBase bool @@ -73,6 +74,10 @@ func NewIncremental(leadConn connect.Client, conn *mongo.Client, brief topo.Node } } +func (b *Backup) SetConfig(cfg *config.Config) { + b.config = cfg +} + func (b *Backup) SetMongoVersion(v string) { b.mongoVersion = v } @@ -97,10 +102,7 @@ func (b *Backup) Init( ctx context.Context, bcp *ctrl.BackupCmd, opid ctrl.OPID, - inf *topo.NodeInfo, - store config.Storage, balancer topo.BalancerMode, - l log.LogEvent, ) error { ts, err := topo.GetClusterTime(ctx, b.leadConn) if err != nil { @@ -113,9 +115,14 @@ func (b *Backup) Init( Name: bcp.Name, Namespaces: bcp.Namespaces, Compression: bcp.Compression, - StartTS: time.Now().Unix(), - Status: defs.StatusStarting, - Replsets: []BackupReplset{}, + Store: Storage{ + Name: b.config.Name, + IsProfile: b.config.IsProfile, + Storage: b.config.Storage, + }, + StartTS: time.Now().Unix(), + Status: defs.StatusStarting, + Replsets: []BackupReplset{}, // the driver (mongo?) sets TS to the current wall clock if TS was 0, so have to init with 1 LastWriteTS: primitive.Timestamp{T: 1, I: 1}, // the driver (mongo?) sets TS to the current wall clock if TS was 0, so have to init with 1 @@ -127,16 +134,6 @@ func (b *Backup) Init( Hb: ts, } - cfg, err := config.GetConfig(ctx, b.leadConn) - if err != nil { - return errors.Wrap(err, "unable to get PBM config settings") - } - _, err = util.StorageFromConfig(&cfg.Storage, l) - if errors.Is(err, util.ErrStorageUndefined) { - return errors.New("backups cannot be saved because PBM storage configuration hasn't been set yet") - } - meta.Store = Storage{cfg.Storage} - fcv, err := version.GetFCV(ctx, b.nodeConn) if err != nil { return errors.Wrap(err, "get featureCompatibilityVersion") @@ -192,7 +189,7 @@ func (b *Backup) Run(ctx context.Context, bcp *ctrl.BackupCmd, opid ctrl.OPID, l rsMeta.IsConfigSvr = &v } - stg, err := util.GetStorage(ctx, b.leadConn, l) + stg, err := util.StorageFromConfig(&b.config.Storage, l) if err != nil { return errors.Wrap(err, "unable to get PBM storage configuration settings") } diff --git a/pbm/backup/logical.go b/pbm/backup/logical.go index 400f3a07c..ef9a51f59 100644 --- a/pbm/backup/logical.go +++ b/pbm/backup/logical.go @@ -15,7 +15,6 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/archive" "github.com/percona/percona-backup-mongodb/pbm/compress" - "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/ctrl" "github.com/percona/percona-backup-mongodb/pbm/defs" @@ -136,11 +135,6 @@ func (b *Backup) doLogical( } } - cfg, err := config.GetConfig(ctx, b.leadConn) - if err != nil { - return errors.Wrap(err, "get config") - } - nsFilter := archive.DefaultNSFilter docFilter := archive.DefaultDocFilter if inf.IsConfigSrv() && util.IsSelective(bcp.Namespaces) { @@ -156,7 +150,7 @@ func (b *Backup) doLogical( snapshotSize, err := snapshot.UploadDump(ctx, dump, func(ns, ext string, r io.Reader) error { - stg, err := util.StorageFromConfig(&cfg.Storage, l) + stg, err := util.StorageFromConfig(&b.config.Storage, l) if err != nil { return errors.Wrap(err, "get storage") } diff --git a/pbm/backup/types.go b/pbm/backup/types.go index 0d65a7929..821d808de 100644 --- a/pbm/backup/types.go +++ b/pbm/backup/types.go @@ -87,6 +87,9 @@ func (b *BackupMeta) RS(name string) *BackupReplset { } type Storage struct { + Name string `bson:"name,omitempty" json:"name,omitempty"` + IsProfile bool `bson:"profile,omitempty" json:"profile,omitempty"` + config.Storage `bson:",inline" json:",inline"` } diff --git a/pbm/config/config.go b/pbm/config/config.go index b9f7b50fb..c83f8957b 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -64,6 +64,9 @@ func validateConfigKey(k string) bool { // Config is a pbm config type Config struct { + Name string `bson:"name,omitempty" json:"name,omitempty" yaml:"name,omitempty"` + IsProfile bool `bson:"profile,omitempty" json:"profile,omitempty" yaml:"profile,omitempty"` + Storage Storage `bson:"storage" json:"storage" yaml:"storage"` Oplog *GlobalSlicer `bson:"pitr,omitempty" json:"pitr,omitempty" yaml:"pitr,omitempty"` Backup *Backup `bson:"backup,omitempty" json:"backup,omitempty" yaml:"backup,omitempty"` @@ -95,11 +98,13 @@ func (c *Config) Clone() *Config { } rv := &Config{ - Storage: *c.Storage.Clone(), - Oplog: c.Oplog.Clone(), - Restore: c.Restore.Clone(), - Backup: c.Backup.Clone(), - Epoch: c.Epoch, + Name: c.Name, + IsProfile: c.IsProfile, + Storage: *c.Storage.Clone(), + Oplog: c.Oplog.Clone(), + Restore: c.Restore.Clone(), + Backup: c.Backup.Clone(), + Epoch: c.Epoch, } return rv @@ -217,6 +222,23 @@ func (s *Storage) Clone() *Storage { return rv } +func (s *Storage) Equal(other *Storage) bool { + if s.Type != other.Type { + return false + } + + switch s.Type { + case storage.S3: + return reflect.DeepEqual(s.S3, other.S3) + case storage.Azure: + return reflect.DeepEqual(s.Azure, other.Azure) + case storage.Filesystem: + return reflect.DeepEqual(s.Filesystem, other.Filesystem) + } + + return false +} + func (s *Storage) Cast() error { switch s.Type { case storage.Filesystem: @@ -364,7 +386,7 @@ func (t *BackupTimeouts) StartingStatus() time.Duration { } func GetConfig(ctx context.Context, m connect.Client) (*Config, error) { - res := m.ConfigCollection().FindOne(ctx, bson.D{}) + res := m.ConfigCollection().FindOne(ctx, bson.D{{"profile", nil}}) if err := res.Err(); err != nil { return nil, errors.Wrap(err, "get") } @@ -428,12 +450,10 @@ func SetConfig(ctx context.Context, m connect.Client, cfg *Config) error { // TODO: struct tags to config opts `pbm:"resync,epoch"`? _, _ = GetConfig(ctx, m) - _, err = m.ConfigCollection().UpdateOne( - ctx, - bson.D{}, - bson.M{"$set": *cfg}, - options.Update().SetUpsert(true), - ) + _, err = m.ConfigCollection().ReplaceOne(ctx, + bson.D{{"profile", nil}}, + cfg, + options.Replace().SetUpsert(true)) return errors.Wrap(err, "mongo defs.ConfigCollection UpdateOne") } @@ -490,12 +510,9 @@ func SetConfigVar(ctx context.Context, m connect.Client, key, val string) error s3.SDKLogLevel(v.(string), os.Stderr) } - _, err = m.ConfigCollection().UpdateOne( - ctx, - bson.D{}, - bson.M{"$set": bson.M{key: v}}, - ) - + _, err = m.ConfigCollection().UpdateOne(ctx, + bson.D{{"profile", nil}}, + bson.M{"$set": bson.M{key: v}}) return errors.Wrap(err, "write to db") } @@ -521,7 +538,9 @@ func GetConfigVar(ctx context.Context, m connect.Client, key string) (interface{ return nil, errors.New("invalid config key") } - bts, err := m.ConfigCollection().FindOne(ctx, bson.D{}).Raw() + bts, err := m.ConfigCollection(). + FindOne(ctx, bson.D{{"profile", nil}}). + Raw() if err != nil { return nil, errors.Wrap(err, "get from db") } @@ -595,11 +614,9 @@ func ResetEpoch(ctx context.Context, m connect.Client) (Epoch, error) { if err != nil { return Epoch{}, errors.Wrap(err, "get cluster time") } - _, err = m.ConfigCollection().UpdateOne( - ctx, - bson.D{}, - bson.M{"$set": bson.M{"epoch": ct}}, - ) + _, err = m.ConfigCollection().UpdateOne(ctx, + bson.D{{"profile", nil}}, + bson.M{"$set": bson.M{"epoch": ct}}) return Epoch(ct), err } diff --git a/pbm/config/profile.go b/pbm/config/profile.go new file mode 100644 index 000000000..8f6e5a6e4 --- /dev/null +++ b/pbm/config/profile.go @@ -0,0 +1,97 @@ +package config + +import ( + "context" + "os" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo/options" + + "github.com/percona/percona-backup-mongodb/pbm/connect" + "github.com/percona/percona-backup-mongodb/pbm/errors" + "github.com/percona/percona-backup-mongodb/pbm/storage" + "github.com/percona/percona-backup-mongodb/pbm/storage/s3" +) + +func ListProfiles(ctx context.Context, m connect.Client) ([]Config, error) { + cur, err := m.ConfigCollection().Find(ctx, bson.D{ + {"profile", true}, + }) + if err != nil { + return nil, errors.Wrap(err, "query") + } + + var profiles []Config + err = cur.All(ctx, &profiles) + if err != nil { + return nil, errors.Wrap(err, "decode") + } + + return profiles, nil +} + +func GetProfile(ctx context.Context, m connect.Client, name string) (*Config, error) { + res := m.ConfigCollection().FindOne(ctx, bson.D{ + {"profile", true}, + {"name", name}, + }) + if err := res.Err(); err != nil { + return nil, errors.Wrap(err, "query") + } + + var profile *Config + err := res.Decode(&profile) + if err != nil { + return nil, errors.Wrap(err, "decode") + } + + return profile, nil +} + +func AddProfile(ctx context.Context, m connect.Client, profile *Config) error { + if !profile.IsProfile { + return errors.New("not a profile") + } + if profile.Name == "" { + return errors.New("name is required") + } + + if err := profile.Storage.Cast(); err != nil { + return errors.Wrap(err, "cast storage") + } + + if profile.Storage.Type == storage.S3 { + // call the function for notification purpose. + // warning about unsupported levels will be printed + s3.SDKLogLevel(profile.Storage.S3.DebugLogLevels, os.Stderr) + } + + _, err := m.ConfigCollection().ReplaceOne(ctx, + bson.D{ + {"profile", true}, + {"name", profile.Name}, + }, + profile, + options.Replace().SetUpsert(true)) + if err != nil { + return errors.Wrap(err, "save profile") + } + + return nil +} + +func RemoveProfile(ctx context.Context, m connect.Client, name string) error { + if name == "" { + return errors.New("name is required") + } + + _, err := m.ConfigCollection().DeleteOne(ctx, bson.D{ + {"profile", true}, + {"name", name}, + }) + if err != nil { + return errors.Wrap(err, "query") + } + + return nil +} diff --git a/pbm/ctrl/cmd.go b/pbm/ctrl/cmd.go index d9dcf3295..4a87286a0 100644 --- a/pbm/ctrl/cmd.go +++ b/pbm/ctrl/cmd.go @@ -8,6 +8,7 @@ import ( "go.mongodb.org/mongo-driver/bson/primitive" "github.com/percona/percona-backup-mongodb/pbm/compress" + "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/topo" ) @@ -16,20 +17,26 @@ import ( type Command string const ( - CmdUndefined Command = "" - CmdBackup Command = "backup" - CmdRestore Command = "restore" - CmdReplay Command = "replay" - CmdCancelBackup Command = "cancelBackup" - CmdResync Command = "resync" - CmdPITR Command = "pitr" - CmdDeleteBackup Command = "delete" - CmdDeletePITR Command = "deletePitr" - CmdCleanup Command = "cleanup" + CmdUndefined Command = "" + CmdAddConfigProfile Command = "addConfigProfile" + CmdRemoveConfigProfile Command = "removeConfigProfile" + CmdBackup Command = "backup" + CmdRestore Command = "restore" + CmdReplay Command = "replay" + CmdCancelBackup Command = "cancelBackup" + CmdResync Command = "resync" + CmdPITR Command = "pitr" + CmdDeleteBackup Command = "delete" + CmdDeletePITR Command = "deletePitr" + CmdCleanup Command = "cleanup" ) func (c Command) String() string { switch c { + case CmdAddConfigProfile: + return "Add Config Profile" + case CmdRemoveConfigProfile: + return "Remove Config Profile" case CmdBackup: return "Snapshot backup" case CmdRestore: @@ -75,6 +82,7 @@ func (o OPID) Obj() primitive.ObjectID { type Cmd struct { Cmd Command `bson:"cmd"` + Profile *ProfileCmd `bson:"profile,omitempty"` Backup *BackupCmd `bson:"backup,omitempty"` Restore *RestoreCmd `bson:"restore,omitempty"` Replay *ReplayCmd `bson:"replay,omitempty"` @@ -105,6 +113,12 @@ func (c Cmd) String() string { return buf.String() } +type ProfileCmd struct { + Name string `bson:"name"` + IsProfile bool `bson:"profile"` + Storage config.Storage `bson:"storage"` +} + type BackupCmd struct { Type defs.BackupType `bson:"type"` IncrBase bool `bson:"base"` @@ -113,6 +127,7 @@ type BackupCmd struct { Compression compress.CompressionType `bson:"compression"` CompressionLevel *int `bson:"level,omitempty"` Filelist bool `bson:"filelist,omitempty"` + Profile string `bson:"profile,omitempty"` } func (b BackupCmd) String() string { diff --git a/pbm/ctrl/send.go b/pbm/ctrl/send.go index 5eb3b2377..dfb78cb89 100644 --- a/pbm/ctrl/send.go +++ b/pbm/ctrl/send.go @@ -6,6 +6,7 @@ import ( "go.mongodb.org/mongo-driver/bson/primitive" + "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" @@ -65,6 +66,33 @@ func SendCleanup( return sendCommand(ctx, m, cmd) } +func SendAddConfigProfile( + ctx context.Context, + m connect.Client, + name string, + storage config.Storage, +) (OPID, error) { + cmd := Cmd{ + Cmd: CmdAddConfigProfile, + Profile: &ProfileCmd{ + Name: name, + IsProfile: true, + Storage: storage, + }, + } + return sendCommand(ctx, m, cmd) +} + +func SendRemoveConfigProfile(ctx context.Context, m connect.Client, name string) (OPID, error) { + cmd := Cmd{ + Cmd: CmdRemoveConfigProfile, + Profile: &ProfileCmd{ + Name: name, + }, + } + return sendCommand(ctx, m, cmd) +} + func SendResync(ctx context.Context, m connect.Client) (OPID, error) { return sendCommand(ctx, m, Cmd{Cmd: CmdResync}) } diff --git a/sdk/impl.go b/sdk/impl.go index 57531a041..e9c5f5a85 100644 --- a/sdk/impl.go +++ b/sdk/impl.go @@ -78,8 +78,22 @@ func (c *clientImpl) GetConfig(ctx context.Context) (*Config, error) { return config.GetConfig(ctx, c.conn) } -func (c *clientImpl) SetConfig(ctx context.Context, cfg Config) (CommandID, error) { - return NoOpID, config.SetConfig(ctx, c.conn, &cfg) +func (c *clientImpl) ListConfigProfiles(ctx context.Context) ([]config.Config, error) { + return config.ListProfiles(ctx, c.conn) +} + +func (c *clientImpl) GetConfigProfile(ctx context.Context, name string) (*config.Config, error) { + return config.GetProfile(ctx, c.conn, name) +} + +func (c *clientImpl) AddConfigProfile(ctx context.Context, name string, cfg *Config) (CommandID, error) { + opid, err := ctrl.SendAddConfigProfile(ctx, c.conn, name, cfg.Storage) + return CommandID(opid.String()), err +} + +func (c *clientImpl) RemoveConfigProfile(ctx context.Context, name string) (CommandID, error) { + opid, err := ctrl.SendRemoveConfigProfile(ctx, c.conn, name) + return CommandID(opid.String()), err } func (c *clientImpl) GetAllBackups(ctx context.Context) ([]BackupMetadata, error) { diff --git a/sdk/sdk.go b/sdk/sdk.go index 67b9d9c0f..3ba679e7b 100644 --- a/sdk/sdk.go +++ b/sdk/sdk.go @@ -109,6 +109,10 @@ type Client interface { CommandInfo(ctx context.Context, id CommandID) (*Command, error) GetConfig(ctx context.Context) (*Config, error) + ListConfigProfiles(ctx context.Context) ([]config.Config, error) + GetConfigProfile(ctx context.Context, name string) (*config.Config, error) + AddConfigProfile(ctx context.Context, name string, cfg *config.Config) (CommandID, error) + RemoveConfigProfile(ctx context.Context, name string) (CommandID, error) GetAllBackups(ctx context.Context) ([]BackupMetadata, error) GetBackupByName(ctx context.Context, name string, options GetBackupByNameOptions) (*BackupMetadata, error) From 1e8233a9dc4949451bf9814475444a3a85852fee Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 27 May 2024 16:53:44 +0200 Subject: [PATCH 017/203] rename init storage related functions --- cmd/pbm-agent/agent.go | 2 +- cmd/pbm-agent/profile.go | 2 +- pbm/backup/backup.go | 2 +- pbm/resync/rsync.go | 4 ++-- pbm/storage/storage.go | 14 +++++++------- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index e1aa9572f..ad79d0eae 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -401,7 +401,7 @@ func (a *Agent) storStatus(ctx context.Context, log log.LogEvent, forceCheckStor return topo.SubsysStatus{Err: fmt.Sprintf("unable to get storage: %v", err)} } - ok, err := storage.IsStorageInitialized(ctx, stg) + ok, err := storage.IsInitialized(ctx, stg) if err != nil { errStr := fmt.Sprintf("storage check failed with: %v", err) return topo.SubsysStatus{Err: errStr} diff --git a/cmd/pbm-agent/profile.go b/cmd/pbm-agent/profile.go index 780a2fa9c..04a311adf 100644 --- a/cmd/pbm-agent/profile.go +++ b/cmd/pbm-agent/profile.go @@ -96,7 +96,7 @@ func (a *Agent) handleAddConfigProfile( return } - err = storage.InitStorage(ctx, stg) + err = storage.Initialize(ctx, stg) if err != nil { err = errors.Wrap(err, "init storage") return diff --git a/pbm/backup/backup.go b/pbm/backup/backup.go index 7a39b79b9..e0da6f9dd 100644 --- a/pbm/backup/backup.go +++ b/pbm/backup/backup.go @@ -271,7 +271,7 @@ func (b *Backup) Run(ctx context.Context, bcp *ctrl.BackupCmd, opid ctrl.OPID, l } if inf.IsLeader() { - err = storage.InitStorage(ctx, stg) + err = storage.Initialize(ctx, stg) if err != nil { return errors.Wrap(err, "init storage") } diff --git a/pbm/resync/rsync.go b/pbm/resync/rsync.go index d78380184..aee6e0272 100644 --- a/pbm/resync/rsync.go +++ b/pbm/resync/rsync.go @@ -32,12 +32,12 @@ func ResyncStorage(ctx context.Context, m connect.Client, l log.LogEvent) error return errors.Wrap(err, "check read access") } - err = storage.InitStorage(ctx, stg) + err = storage.Initialize(ctx, stg) if err != nil { return errors.Wrap(err, "init storage") } } else { - err = storage.ReinitStorage(ctx, stg) + err = storage.Reinitialize(ctx, stg) if err != nil { return errors.Wrap(err, "reinit storage") } diff --git a/pbm/storage/storage.go b/pbm/storage/storage.go index 376efe1b4..c94de5f7f 100644 --- a/pbm/storage/storage.go +++ b/pbm/storage/storage.go @@ -64,8 +64,8 @@ func ParseType(s string) Type { } } -// IsStorageInitialized checks if there is PBM init file on the storage. -func IsStorageInitialized(ctx context.Context, stg Storage) (bool, error) { +// IsInitialized checks if there is PBM init file on the storage. +func IsInitialized(ctx context.Context, stg Storage) (bool, error) { _, err := stg.FileStat(defs.StorInitFile) if err != nil { if errors.Is(err, ErrNotExist) { @@ -123,10 +123,10 @@ func HasReadAccess(ctx context.Context, stg Storage) error { return nil } -// InitStorage write current PBM version to PBM init file. +// Initialize write current PBM version to PBM init file. // // It does not handle "file already exists" error. -func InitStorage(ctx context.Context, stg Storage) error { +func Initialize(ctx context.Context, stg Storage) error { err := stg.Save(defs.StorInitFile, strings.NewReader(version.Current().Version), 0) if err != nil { return errors.Wrap(err, "write init file") @@ -135,16 +135,16 @@ func InitStorage(ctx context.Context, stg Storage) error { return nil } -// ReinitStorage delete existing PBM init file and create new once with current PBM version. +// Reinitialize delete existing PBM init file and create new once with current PBM version. // // It expects that the file exists. -func ReinitStorage(ctx context.Context, stg Storage) error { +func Reinitialize(ctx context.Context, stg Storage) error { err := stg.Delete(defs.StorInitFile) if err != nil { return errors.Wrap(err, "delete init file") } - return InitStorage(ctx, stg) + return Initialize(ctx, stg) } // rwError multierror for the read/compress/write-to-store operations set From 842c547cc237d1742af92747b52f8f38c9cae3af Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 27 May 2024 23:23:13 +0200 Subject: [PATCH 018/203] decompose resync package --- cmd/pbm-agent/agent.go | 9 +- cmd/pbm-agent/delete.go | 2 +- e2e-tests/pkg/pbm/mongo_pbm.go | 8 +- pbm/resync/rsync.go | 230 ++++++++++++++++++++++++--------- 4 files changed, 182 insertions(+), 67 deletions(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index ad79d0eae..5de16f4a3 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -220,7 +220,14 @@ func (a *Agent) Resync(ctx context.Context, opid ctrl.OPID, ep config.Epoch) { }() l.Info("started") - err = resync.ResyncStorage(ctx, a.leadConn, l) + + stg, err := util.GetStorage(ctx, a.leadConn, l) + if err != nil { + l.Error("unable to get backup store: %v", err) + return + } + + err = resync.Resync(ctx, a.leadConn, stg) if err != nil { l.Error("%v", err) return diff --git a/cmd/pbm-agent/delete.go b/cmd/pbm-agent/delete.go index 6306fbd19..5f02a1ad0 100644 --- a/cmd/pbm-agent/delete.go +++ b/cmd/pbm-agent/delete.go @@ -293,7 +293,7 @@ func (a *Agent) Cleanup(ctx context.Context, d *ctrl.CleanupCmd, opid ctrl.OPID, l.Error(err.Error()) } - err = resync.ResyncStorage(ctx, a.leadConn, l) + err = resync.Resync(ctx, a.leadConn, stg) if err != nil { l.Error("storage resync: " + err.Error()) } diff --git a/e2e-tests/pkg/pbm/mongo_pbm.go b/e2e-tests/pkg/pbm/mongo_pbm.go index 896d8a625..9f6bffb9f 100644 --- a/e2e-tests/pkg/pbm/mongo_pbm.go +++ b/e2e-tests/pkg/pbm/mongo_pbm.go @@ -63,7 +63,13 @@ func (m *MongoPBM) Storage(ctx context.Context) (storage.Storage, error) { func (m *MongoPBM) StoreResync(ctx context.Context) error { l := log.FromContext(ctx). NewEvent(string(ctrl.CmdResync), "", "", primitive.Timestamp{}) - return resync.ResyncStorage(ctx, m.conn, l) + + stg, err := util.GetStorage(ctx, m.conn, l) + if err != nil { + return errors.Wrap(err, "unable to get backup store") + } + + return resync.Resync(ctx, m.conn, stg) } func (m *MongoPBM) Conn() connect.Client { diff --git a/pbm/resync/rsync.go b/pbm/resync/rsync.go index aee6e0272..4ec8e9c4e 100644 --- a/pbm/resync/rsync.go +++ b/pbm/resync/rsync.go @@ -6,7 +6,6 @@ import ( "strings" "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/mongo/options" "github.com/percona/percona-backup-mongodb/pbm/backup" "github.com/percona/percona-backup-mongodb/pbm/connect" @@ -16,17 +15,16 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/oplog" "github.com/percona/percona-backup-mongodb/pbm/restore" "github.com/percona/percona-backup-mongodb/pbm/storage" - "github.com/percona/percona-backup-mongodb/pbm/util" ) -// ResyncStorage updates PBM metadata (snapshots and pitr) according to the data in the storage -func ResyncStorage(ctx context.Context, m connect.Client, l log.LogEvent) error { - stg, err := util.GetStorage(ctx, m, l) - if err != nil { - return errors.Wrap(err, "unable to get backup store") - } +// Resync sync oplog, backup, and restore meta from provided storage. +// +// It checks for read and write permissions, drops all meta from the database +// and populate it again by reading meta from the storage. +func Resync(ctx context.Context, conn connect.Client, stg storage.Storage) error { + l := log.LogEventFromContext(ctx) - err = storage.HasReadAccess(ctx, stg) + err := storage.HasReadAccess(ctx, stg) if err != nil { if !errors.Is(err, storage.ErrUninitialized) { return errors.Wrap(err, "check read access") @@ -37,83 +35,79 @@ func ResyncStorage(ctx context.Context, m connect.Client, l log.LogEvent) error return errors.Wrap(err, "init storage") } } else { + // check write permission and update PBM version err = storage.Reinitialize(ctx, stg) if err != nil { return errors.Wrap(err, "reinit storage") } } - rstrs, err := stg.List(defs.PhysRestoresDir, ".json") + err = resyncPhysicalRestores(ctx, conn, stg) if err != nil { - return errors.Wrap(err, "get physical restores list from the storage") + l.Error("resync physical restore metadata") } - l.Debug("got physical restores list: %v", len(rstrs)) - for _, rs := range rstrs { - rname := strings.TrimSuffix(rs.Name, ".json") - rmeta, err := restore.GetPhysRestoreMeta(rname, stg, l) - if err != nil { - l.Error("get meta for restore %s: %v", rs.Name, err) - if rmeta == nil { - continue - } - } - _, err = m.RestoresCollection().ReplaceOne( - ctx, - bson.D{{"name", rmeta.Name}}, - rmeta, - options.Replace().SetUpsert(true), - ) - if err != nil { - return errors.Wrapf(err, "upsert restore %s/%s", rmeta.Name, rmeta.Backup) - } + err = resyncBackupList(ctx, conn, stg) + if err != nil { + l.Error("resync backup metadata") } - bcps, err := stg.List("", defs.MetadataFileSuffix) + err = resyncOplogRange(ctx, conn, stg) if err != nil { - return errors.Wrap(err, "get a backups list from the storage") + l.Error("resync oplog range") } - l.Debug("got backups list: %v", len(bcps)) - _, err = m.BcpCollection().DeleteMany(ctx, bson.M{}) + return nil +} + +func resyncBackupList( + ctx context.Context, + conn connect.Client, + stg storage.Storage, +) error { + l := log.LogEventFromContext(ctx) + + backupList, err := getAllBackupMetaFromStorage(ctx, stg) if err != nil { - return errors.Wrapf(err, "clean up %s", defs.BcpCollection) + return errors.Wrap(err, "get all backups meta from the storage") } - _, err = m.PITRChunksCollection().DeleteMany(ctx, bson.M{}) - if err != nil { - return errors.Wrapf(err, "clean up %s", defs.PITRChunksCollection) + l.Debug("got backups list: %v", len(backupList)) + + if len(backupList) == 0 { + return nil } - var ins []interface{} - for _, b := range bcps { - l.Debug("bcp: %v", b.Name) + docs := make([]any, len(backupList)) + for i, m := range backupList { + l.Debug("bcp: %v", m.Name) - d, err := stg.SourceReader(b.Name) - if err != nil { - return errors.Wrapf(err, "read meta for %v", b.Name) - } + docs[i] = m + } - v := backup.BackupMeta{} - err = json.NewDecoder(d).Decode(&v) - d.Close() - if err != nil { - return errors.Wrapf(err, "unmarshal backup meta [%s]", b.Name) - } - err = backup.CheckBackupFiles(ctx, &v, stg) - if err != nil { - l.Warning("skip snapshot %s: %v", v.Name, err) - v.Status = defs.StatusError - v.Err = err.Error() - } - ins = append(ins, v) + _, err = conn.BcpCollection().DeleteMany(ctx, bson.M{}) + if err != nil { + return errors.Wrapf(err, "delete all backup meta from db") } - if len(ins) != 0 { - _, err = m.BcpCollection().InsertMany(ctx, ins) - if err != nil { - return errors.Wrap(err, "insert retrieved backups meta") - } + _, err = conn.BcpCollection().InsertMany(ctx, docs) + if err != nil { + return errors.Wrap(err, "insert backups meta into db") + } + + return nil +} + +func resyncOplogRange( + ctx context.Context, + conn connect.Client, + stg storage.Storage, +) error { + l := log.LogEventFromContext(ctx) + + _, err := conn.PITRChunksCollection().DeleteMany(ctx, bson.M{}) + if err != nil { + return errors.Wrapf(err, "clean up %s", defs.PITRChunksCollection) } pitrf, err := stg.List(defs.PITRfsPrefix, "") @@ -142,10 +136,118 @@ func ResyncStorage(ctx context.Context, m connect.Client, l log.LogEvent) error return nil } - _, err = m.PITRChunksCollection().InsertMany(ctx, pitr) + _, err = conn.PITRChunksCollection().InsertMany(ctx, pitr) if err != nil { return errors.Wrap(err, "insert retrieved pitr meta") } return nil } + +func resyncPhysicalRestores( + ctx context.Context, + conn connect.Client, + stg storage.Storage, +) error { + restoreFiles, err := stg.List(defs.PhysRestoresDir, ".json") + if err != nil { + return errors.Wrap(err, "get physical restores list from the storage") + } + + log.LogEventFromContext(ctx). + Debug("got physical restores list: %v", len(restoreFiles)) + + if len(restoreFiles) == 0 { + return nil + } + + restoreMeta, err := getAllRestoreMetaFromStorage(ctx, stg) + if err != nil { + return errors.Wrap(err, "get all restore meta from storage") + } + + docs := make([]any, len(restoreMeta)) + for i, m := range restoreMeta { + docs[i] = m + } + + _, err = conn.RestoresCollection().DeleteMany(ctx, bson.D{}) + if err != nil { + return errors.Wrap(err, "delete all documents") + } + + _, err = conn.RestoresCollection().InsertMany(ctx, docs) + if err != nil { + return errors.Wrap(err, "insert restore meta into db") + } + + return nil +} + +func getAllBackupMetaFromStorage( + ctx context.Context, + stg storage.Storage, +) ([]*backup.BackupMeta, error) { + l := log.LogEventFromContext(ctx) + + backupFiles, err := stg.List("", defs.MetadataFileSuffix) + if err != nil { + return nil, errors.Wrap(err, "get a backups list from the storage") + } + + backupMeta := make([]*backup.BackupMeta, 0, len(backupFiles)) + for _, b := range backupFiles { + d, err := stg.SourceReader(b.Name) + if err != nil { + l.Error("read meta for %v", b.Name) + continue + } + + var meta *backup.BackupMeta + err = json.NewDecoder(d).Decode(&meta) + d.Close() + if err != nil { + l.Error("unmarshal backup meta [%s]", b.Name) + continue + } + + err = backup.CheckBackupFiles(ctx, meta, stg) + if err != nil { + l.Warning("skip snapshot %s: %v", meta.Name, err) + meta.Status = defs.StatusError + meta.Err = err.Error() + } + + backupMeta = append(backupMeta, meta) + } + + return backupMeta, nil +} + +func getAllRestoreMetaFromStorage( + ctx context.Context, + stg storage.Storage, +) ([]*restore.RestoreMeta, error) { + l := log.LogEventFromContext(ctx) + + restoreMeta, err := stg.List(defs.PhysRestoresDir, ".json") + if err != nil { + return nil, errors.Wrap(err, "get physical restores list from the storage") + } + + rv := make([]*restore.RestoreMeta, 0, len(restoreMeta)) + for _, file := range restoreMeta { + filename := strings.TrimSuffix(file.Name, ".json") + meta, err := restore.GetPhysRestoreMeta(filename, stg, l) + if err != nil { + l.Error("get restore meta from storage: %s: %v", file.Name, err) + if meta == nil { + continue + } + } + + rv = append(rv, meta) + } + + return rv, nil +} From 9b7c0e2cbdb96f5276ae1e2399c7d515b4346a68 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 30 May 2024 14:10:27 +0200 Subject: [PATCH 019/203] profile sync --- cmd/pbm-agent/agent.go | 86 +++++++++++++++++++-------- cmd/pbm-agent/delete.go | 9 ++- cmd/pbm-agent/profile.go | 84 +++++++++++++++++++++++++-- cmd/pbm/main.go | 32 +++++++++- cmd/pbm/profile.go | 100 +++++++++++++++++++++++++------- e2e-tests/pkg/pbm/mongo_pbm.go | 8 ++- pbm/config/config.go | 2 +- pbm/ctrl/cmd.go | 7 +++ pbm/ctrl/send.go | 21 +++++++ pbm/resync/rsync.go | 103 +++++++++++++++++++++++---------- sdk/impl.go | 5 ++ sdk/sdk.go | 11 ++++ 12 files changed, 378 insertions(+), 90 deletions(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 5de16f4a3..92ff67a97 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -136,10 +136,6 @@ func (a *Agent) Start(ctx context.Context) error { logger.Printf("got epoch %v", ep) switch cmd.Cmd { - case ctrl.CmdAddConfigProfile: - a.handleAddConfigProfile(ctx, cmd.Profile, cmd.OPID, ep) - case ctrl.CmdRemoveConfigProfile: - a.handleRemoveConfigProfile(ctx, cmd.Profile, cmd.OPID, ep) case ctrl.CmdBackup: // backup runs in the go-routine so it can be canceled go a.Backup(ctx, cmd.Backup, cmd.OPID, ep) @@ -149,8 +145,12 @@ func (a *Agent) Start(ctx context.Context) error { a.Restore(ctx, cmd.Restore, cmd.OPID, ep) case ctrl.CmdReplay: a.OplogReplay(ctx, cmd.Replay, cmd.OPID, ep) + case ctrl.CmdAddConfigProfile: + a.handleAddConfigProfile(ctx, cmd.Profile, cmd.OPID, ep) + case ctrl.CmdRemoveConfigProfile: + a.handleRemoveConfigProfile(ctx, cmd.Profile, cmd.OPID, ep) case ctrl.CmdResync: - a.Resync(ctx, cmd.OPID, ep) + a.Resync(ctx, cmd.Resync, cmd.OPID, ep) case ctrl.CmdDeleteBackup: a.Delete(ctx, cmd.Delete, cmd.OPID, ep) case ctrl.CmdDeletePITR: @@ -175,7 +175,11 @@ func (a *Agent) Start(ctx context.Context) error { } // Resync uploads a backup list from the remote store -func (a *Agent) Resync(ctx context.Context, opid ctrl.OPID, ep config.Epoch) { +func (a *Agent) Resync(ctx context.Context, cmd *ctrl.ResyncCmd, opid ctrl.OPID, ep config.Epoch) { + if cmd == nil { + cmd = &ctrl.ResyncCmd{} + } + logger := log.FromContext(ctx) l := logger.NewEvent(string(ctrl.CmdResync), "", opid.String(), ep.TS()) ctx = log.SetLogEventToContext(ctx, l) @@ -194,13 +198,12 @@ func (a *Agent) Resync(ctx context.Context, opid ctrl.OPID, ep config.Epoch) { return } - epts := ep.TS() lock := lock.NewLock(a.leadConn, lock.LockHeader{ Type: ctrl.CmdResync, Replset: nodeInfo.SetName, Node: nodeInfo.Me, OPID: opid.String(), - Epoch: &epts, + Epoch: util.Ref(ep.TS()), }) got, err := a.acquireLock(ctx, lock, l) @@ -221,26 +224,61 @@ func (a *Agent) Resync(ctx context.Context, opid ctrl.OPID, ep config.Epoch) { l.Info("started") - stg, err := util.GetStorage(ctx, a.leadConn, l) - if err != nil { - l.Error("unable to get backup store: %v", err) - return - } + if cmd.All { + profiles, err := config.ListProfiles(ctx, a.leadConn) + if err != nil { + l.Error("get config profiles: %v", err) + return + } - err = resync.Resync(ctx, a.leadConn, stg) - if err != nil { - l.Error("%v", err) - return - } - l.Info("succeed") + err = resync.ClearAllBackupMetaFromExternal(ctx, a.leadConn) + if err != nil { + l.Error("clear all backup meta from external storages: %v", err) + return + } - epch, err := config.ResetEpoch(ctx, a.leadConn) - if err != nil { - l.Error("reset epoch: %v", err) - return + for i := range profiles { + cfg := profiles[i] + err = resync.SyncBackupList(ctx, a.leadConn, &cfg.Storage, cfg.Name) + if err != nil { + l.Error("sync backup list from external storage %q: %v", cfg.Name, err) + return + } + } + } else if !cmd.All && cmd.Name != "" { + cfg, err := config.GetProfile(ctx, a.leadConn, cmd.Name) + if err != nil { + l.Error("get config profile: %v", err) + return + } + + err = resync.SyncBackupList(ctx, a.leadConn, &cfg.Storage, cfg.Name) + if err != nil { + l.Error("sync backup list from external storage %q: %v", cfg.Name, err) + return + } + } else if !cmd.All && cmd.Name == "" { + cfg, err := config.GetConfig(ctx, a.leadConn) + if err != nil { + l.Error("get config: %v", err) + return + } + + err = resync.Resync(ctx, a.leadConn, &cfg.Storage) + if err != nil { + l.Error("resync from main storage: %v", err) + return + } + + epch, err := config.ResetEpoch(ctx, a.leadConn) + if err != nil { + l.Error("reset epoch: %v", err) + return + } + l.Debug("epoch set to %v", epch) } - l.Debug("epoch set to %v", epch) + l.Info("succeed") } // acquireLock tries to acquire the lock. If there is a stale lock diff --git a/cmd/pbm-agent/delete.go b/cmd/pbm-agent/delete.go index 5f02a1ad0..7831e003e 100644 --- a/cmd/pbm-agent/delete.go +++ b/cmd/pbm-agent/delete.go @@ -255,7 +255,12 @@ func (a *Agent) Cleanup(ctx context.Context, d *ctrl.CleanupCmd, opid ctrl.OPID, return } - stg, err := util.GetStorage(ctx, a.leadConn, l) + cfg, err := config.GetConfig(ctx, a.leadConn) + if err != nil { + l.Error("get config: %v", err) + } + + stg, err := util.StorageFromConfig(&cfg.Storage, l) if err != nil { l.Error("get storage: " + err.Error()) } @@ -293,7 +298,7 @@ func (a *Agent) Cleanup(ctx context.Context, d *ctrl.CleanupCmd, opid ctrl.OPID, l.Error(err.Error()) } - err = resync.Resync(ctx, a.leadConn, stg) + err = resync.Resync(ctx, a.leadConn, &cfg.Storage) if err != nil { l.Error("storage resync: " + err.Error()) } diff --git a/cmd/pbm-agent/profile.go b/cmd/pbm-agent/profile.go index 04a311adf..b9e2113e5 100644 --- a/cmd/pbm-agent/profile.go +++ b/cmd/pbm-agent/profile.go @@ -8,6 +8,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/lock" "github.com/percona/percona-backup-mongodb/pbm/log" + "github.com/percona/percona-backup-mongodb/pbm/resync" "github.com/percona/percona-backup-mongodb/pbm/storage" "github.com/percona/percona-backup-mongodb/pbm/topo" "github.com/percona/percona-backup-mongodb/pbm/util" @@ -26,13 +27,13 @@ func (a *Agent) handleAddConfigProfile( l.Error("missed command") return } + + l := logger.NewEvent(string(ctrl.CmdAddConfigProfile), cmd.Name, opid.String(), epoch.TS()) if cmd.Name == "" { - l := logger.NewEvent(string(ctrl.CmdAddConfigProfile), "", opid.String(), epoch.TS()) l.Error("missed config profile name") return } - l := logger.NewEvent(string(ctrl.CmdAddConfigProfile), cmd.Name, opid.String(), epoch.TS()) ctx = log.SetLogEventToContext(ctx, l) var err error @@ -47,7 +48,7 @@ func (a *Agent) handleAddConfigProfile( err = errors.Wrap(err, "get node info") return } - if !nodeInfo.IsClusterLeader() { + if !nodeInfo.IsLeader() { l.Debug("not leader. skip") return } @@ -128,13 +129,13 @@ func (a *Agent) handleRemoveConfigProfile( l.Error("missed command") return } + + l := logger.NewEvent(string(ctrl.CmdRemoveConfigProfile), cmd.Name, opid.String(), epoch.TS()) if cmd.Name == "" { - l := logger.NewEvent(string(ctrl.CmdRemoveConfigProfile), "", opid.String(), epoch.TS()) l.Error("missed config profile name") return } - l := logger.NewEvent(string(ctrl.CmdRemoveConfigProfile), cmd.Name, opid.String(), epoch.TS()) ctx = log.SetLogEventToContext(ctx, l) var err error @@ -149,7 +150,7 @@ func (a *Agent) handleRemoveConfigProfile( err = errors.Wrap(err, "get node info") return } - if !nodeInfo.IsClusterLeader() { + if !nodeInfo.IsLeader() { l.Debug("not leader. skip") return } @@ -179,9 +180,80 @@ func (a *Agent) handleRemoveConfigProfile( } }() + err = resync.ClearBackupList(ctx, a.leadConn, cmd.Name) + if err != nil { + l.Error("clear backup list: %v", err) + return + } + err = config.RemoveProfile(ctx, a.leadConn, cmd.Name) if err != nil { l.Error("delete document", err) return } } + +func (a *Agent) handleSyncMetaFrom( + ctx context.Context, + cmd *ctrl.ResyncCmd, + opid ctrl.OPID, + epoch config.Epoch, +) { + logger := log.FromContext(ctx) + l := logger.NewEvent(string(ctrl.CmdResync), "", opid.String(), epoch.TS()) + ctx = log.SetLogEventToContext(ctx, l) + + var err error + defer func() { + if err != nil { + l.Error("failed to add config profile: %v", err) + } + }() + + nodeInfo, err := topo.GetNodeInfo(ctx, a.nodeConn) + if err != nil { + err = errors.Wrap(err, "get node info") + return + } + if !nodeInfo.IsClusterLeader() { + l.Debug("not leader. skip") + return + } + + lck := lock.NewLock(a.leadConn, lock.LockHeader{ + Type: ctrl.CmdAddConfigProfile, + Replset: a.brief.SetName, + Node: a.brief.Me, + OPID: opid.String(), + Epoch: util.Ref(epoch.TS()), + }) + + got, err := a.acquireLock(ctx, lck, l) + if err != nil { + l.Error("acquiring lock: %v", err) + return + } + if !got { + l.Error("lock not acquired") + return + } + defer func() { + l.Debug("releasing lock") + err = lck.Release() + if err != nil { + l.Error("unable to release lock %v: %v", lck, err) + } + }() + + cfg, err := config.GetConfig(ctx, a.leadConn) + if err != nil { + err = errors.Wrap(err, "get storage config") + return + } + + err = resync.SyncBackupList(ctx, a.leadConn, &cfg.Storage, cmd.Name) + if err != nil { + err = errors.Wrap(err, "sync backup list") + return + } +} diff --git a/cmd/pbm/main.go b/cmd/pbm/main.go index be7676987..8492cb7cb 100644 --- a/cmd/pbm/main.go +++ b/cmd/pbm/main.go @@ -118,19 +118,26 @@ func main() { addConfigProfileCmd := configProfileCmd. Command("add", "Save configuration profile") addConfigProfileCmd. - Flag("name", "Profile name"). + Arg("name", "Profile name"). Required(). StringVar(&addConfigProfileOpts.name) addConfigProfileCmd. Arg("file", "Path to configuration file"). Required(). - StringVar(&addConfigProfileOpts.file) + FileVar(&addConfigProfileOpts.file) + addConfigProfileCmd. + Flag("sync", "Sync from the external storage"). + BoolVar(&addConfigProfileOpts.sync) + addConfigProfileCmd. + Flag("wait", "Wait for done by agents"). + Short('w'). + BoolVar(&addConfigProfileOpts.wait) removeConfigProfileOpts := removeConfigProfileOptions{} removeConfigProfileCmd := configProfileCmd. Command("remove", "Remove configuration profile") removeConfigProfileCmd. - Arg("profile-name", "Profile name"). + Arg("name", "Profile name"). Required(). StringVar(&removeConfigProfileOpts.name) removeConfigProfileCmd. @@ -138,6 +145,23 @@ func main() { Short('w'). BoolVar(&removeConfigProfileOpts.wait) + syncConfigProfileOpts := syncConfigProfileOptions{} + syncConfigProfileCmd := configProfileCmd. + Command("sync", "Sync backup list from configuration profile") + syncConfigProfileCmd. + Arg("profile", "Profile name"). + StringVar(&syncConfigProfileOpts.name) + syncConfigProfileCmd. + Flag("all", "Sync from all external storages"). + BoolVar(&syncConfigProfileOpts.all) + syncConfigProfileCmd. + Flag("clear", "Clear backup list (can be used with profile name or --all)"). + BoolVar(&syncConfigProfileOpts.clear) + syncConfigProfileCmd. + Flag("wait", "Wait for done by agents"). + Short('w'). + BoolVar(&syncConfigProfileOpts.wait) + backupCmd := pbmCmd.Command("backup", "Make backup") backupOptions := backupOpts{} backupCmd.Flag("compression", "Compression type //////"). @@ -457,6 +481,8 @@ func main() { out, err = handleAddConfigProfile(ctx, pbm, addConfigProfileOpts) case removeConfigProfileCmd.FullCommand(): out, err = handleRemoveConfigProfile(ctx, pbm, removeConfigProfileOpts) + case syncConfigProfileCmd.FullCommand(): + out, err = handleSyncConfigProfile(ctx, pbm, syncConfigProfileOpts) case backupCmd.FullCommand(): backupOptions.name = time.Now().UTC().Format(time.RFC3339) out, err = runBackup(ctx, conn, pbm, &backupOptions, pbmOutF) diff --git a/cmd/pbm/profile.go b/cmd/pbm/profile.go index f8a6047ca..b68c5ed61 100644 --- a/cmd/pbm/profile.go +++ b/cmd/pbm/profile.go @@ -16,8 +16,11 @@ type descConfigProfileOptions struct { } type addConfigProfileOptions struct { - name string - file string + name string + file *os.File + force bool + sync bool + wait bool } type removeConfigProfileOptions struct { @@ -25,6 +28,13 @@ type removeConfigProfileOptions struct { wait bool } +type syncConfigProfileOptions struct { + name string + all bool + clear bool + wait bool +} + type configProfileList struct { configs []config.Config } @@ -59,15 +69,10 @@ func handleDescibeConfigProfiles( opts descConfigProfileOptions, ) (fmt.Stringer, error) { if opts.name == "" { - return nil, errors.New("name is required") - } - - profile, err := pbm.GetConfigProfile(ctx, opts.name) - if err != nil { - return nil, err + return nil, errors.New("argument `name` should not be empty") } - return profile, nil + return pbm.GetConfigProfile(ctx, opts.name) } func handleAddConfigProfile( @@ -76,25 +81,43 @@ func handleAddConfigProfile( opts addConfigProfileOptions, ) (fmt.Stringer, error) { if opts.name == "" { - return nil, errors.New("name is required") + return nil, errors.New("argument `name` should not be empty") } - - var err error - var cfg *config.Config - if opts.file == "-" { - cfg, err = config.Parse(os.Stdin) - } else { - cfg, err = readConfigFromFile(opts.file) + if opts.file == nil { + return nil, errors.New("missed file: nil value") } + + cfg, err := config.Parse(opts.file) if err != nil { - return nil, errors.Wrap(err, "unable to get new config") + return nil, errors.Wrap(err, "parse config") } - _, err = pbm.AddConfigProfile(ctx, opts.name, cfg) + cid, err := pbm.AddConfigProfile(ctx, opts.name, cfg) if err != nil { return nil, errors.Wrap(err, "add config profile") } + if opts.wait { + err = sdk.WaitForAddProfile(ctx, pbm, cid) + if err != nil { + return nil, errors.Wrap(err, "wait") + } + } + + if opts.sync { + cid, err := pbm.SyncFromExternalStorage(ctx, opts.name) + if err != nil { + return nil, errors.Wrap(err, "sync") + } + + if opts.wait { + err = sdk.WaitForResync(ctx, pbm, cid) + if err != nil { + return nil, errors.Wrap(err, "wait") + } + } + } + return &outMsg{"OK"}, nil } @@ -104,13 +127,48 @@ func handleRemoveConfigProfile( opts removeConfigProfileOptions, ) (fmt.Stringer, error) { if opts.name == "" { - return nil, errors.New("name is required") + return nil, errors.New("argument `name` should not be empty") } - _, err := pbm.RemoveConfigProfile(ctx, opts.name) + cid, err := pbm.RemoveConfigProfile(ctx, opts.name) if err != nil { return nil, errors.Wrap(err, "sdk: remove config profile") } + if opts.wait { + err = sdk.WaitForRemoveProfile(ctx, pbm, cid) + if err != nil { + return nil, errors.Wrap(err, "wait") + } + } + + return &outMsg{"OK"}, nil +} + +func handleSyncConfigProfile( + ctx context.Context, + pbm sdk.Client, + opts syncConfigProfileOptions, +) (fmt.Stringer, error) { + if opts.name == "" && !opts.all { + return nil, errors.New("--profile or --all is required") + } + // TODO: finish here + if opts.name == "" { + return nil, errors.New("argument `name` should not be empty") + } + + cid, err := pbm.SyncFromExternalStorage(ctx, opts.name) + if err != nil { + return nil, errors.Wrap(err, "sync from storage") + } + + if opts.wait { + err = sdk.WaitForResync(ctx, pbm, cid) + if err != nil { + return nil, errors.Wrap(err, "wait") + } + } + return &outMsg{"OK"}, nil } diff --git a/e2e-tests/pkg/pbm/mongo_pbm.go b/e2e-tests/pkg/pbm/mongo_pbm.go index 9f6bffb9f..e8d7ea22a 100644 --- a/e2e-tests/pkg/pbm/mongo_pbm.go +++ b/e2e-tests/pkg/pbm/mongo_pbm.go @@ -8,6 +8,7 @@ import ( "go.mongodb.org/mongo-driver/mongo" "github.com/percona/percona-backup-mongodb/pbm/backup" + "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/ctrl" "github.com/percona/percona-backup-mongodb/pbm/defs" @@ -63,13 +64,14 @@ func (m *MongoPBM) Storage(ctx context.Context) (storage.Storage, error) { func (m *MongoPBM) StoreResync(ctx context.Context) error { l := log.FromContext(ctx). NewEvent(string(ctrl.CmdResync), "", "", primitive.Timestamp{}) + ctx = log.SetLogEventToContext(ctx, l) - stg, err := util.GetStorage(ctx, m.conn, l) + cfg, err := config.GetConfig(ctx, m.conn) if err != nil { - return errors.Wrap(err, "unable to get backup store") + return errors.Wrap(err, "get config") } - return resync.Resync(ctx, m.conn, stg) + return resync.Resync(ctx, m.conn, &cfg.Storage) } func (m *MongoPBM) Conn() connect.Client { diff --git a/pbm/config/config.go b/pbm/config/config.go index c83f8957b..50205d765 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -173,7 +173,7 @@ func (c *Config) BackupSlicerInterval() time.Duration { //nolint:lll type GlobalSlicer struct { Enabled bool `bson:"enabled" json:"enabled" yaml:"enabled"` - Interval float64 `bson:"oplogSpanMin" json:"oplogSpanMin" yaml:"oplogSpanMin"` + Interval float64 `bson:"oplogSpanMin,omitempty" json:"oplogSpanMin,omitempty" yaml:"oplogSpanMin,omitempty"` OplogOnly bool `bson:"oplogOnly,omitempty" json:"oplogOnly,omitempty" yaml:"oplogOnly,omitempty"` Compression compress.CompressionType `bson:"compression,omitempty" json:"compression,omitempty" yaml:"compression,omitempty"` CompressionLevel *int `bson:"compressionLevel,omitempty" json:"compressionLevel,omitempty" yaml:"compressionLevel,omitempty"` diff --git a/pbm/ctrl/cmd.go b/pbm/ctrl/cmd.go index 4a87286a0..2eb7542d6 100644 --- a/pbm/ctrl/cmd.go +++ b/pbm/ctrl/cmd.go @@ -82,6 +82,7 @@ func (o OPID) Obj() primitive.ObjectID { type Cmd struct { Cmd Command `bson:"cmd"` + Resync *ResyncCmd `bson:"resync,omitempty"` Profile *ProfileCmd `bson:"profile,omitempty"` Backup *BackupCmd `bson:"backup,omitempty"` Restore *RestoreCmd `bson:"restore,omitempty"` @@ -119,6 +120,12 @@ type ProfileCmd struct { Storage config.Storage `bson:"storage"` } +type ResyncCmd struct { + Name string `bson:"name,omitempty"` + All bool `bson:"all,omitempty"` + Clear bool `bson:"clear,omitempty"` +} + type BackupCmd struct { Type defs.BackupType `bson:"type"` IncrBase bool `bson:"base"` diff --git a/pbm/ctrl/send.go b/pbm/ctrl/send.go index dfb78cb89..1fc3fbef4 100644 --- a/pbm/ctrl/send.go +++ b/pbm/ctrl/send.go @@ -93,6 +93,27 @@ func SendRemoveConfigProfile(ctx context.Context, m connect.Client, name string) return sendCommand(ctx, m, cmd) } +func SendSyncMetaFrom(ctx context.Context, m connect.Client, name string) (OPID, error) { + cmd := Cmd{ + Cmd: CmdResync, + Resync: &ResyncCmd{ + Name: name, + }, + } + return sendCommand(ctx, m, cmd) +} + +func SendClearMetaFrom(ctx context.Context, m connect.Client, name string) (OPID, error) { + cmd := Cmd{ + Cmd: CmdResync, + Resync: &ResyncCmd{ + Name: name, + Clear: true, + }, + } + return sendCommand(ctx, m, cmd) +} + func SendResync(ctx context.Context, m connect.Client) (OPID, error) { return sendCommand(ctx, m, Cmd{Cmd: CmdResync}) } diff --git a/pbm/resync/rsync.go b/pbm/resync/rsync.go index 4ec8e9c4e..91ef14e6b 100644 --- a/pbm/resync/rsync.go +++ b/pbm/resync/rsync.go @@ -8,6 +8,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "github.com/percona/percona-backup-mongodb/pbm/backup" + "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" @@ -15,16 +16,22 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/oplog" "github.com/percona/percona-backup-mongodb/pbm/restore" "github.com/percona/percona-backup-mongodb/pbm/storage" + "github.com/percona/percona-backup-mongodb/pbm/util" ) // Resync sync oplog, backup, and restore meta from provided storage. // // It checks for read and write permissions, drops all meta from the database // and populate it again by reading meta from the storage. -func Resync(ctx context.Context, conn connect.Client, stg storage.Storage) error { +func Resync(ctx context.Context, conn connect.Client, cfg *config.Storage) error { l := log.LogEventFromContext(ctx) - err := storage.HasReadAccess(ctx, stg) + stg, err := util.StorageFromConfig(cfg, l) + if err != nil { + return errors.Wrap(err, "unable to get backup store") + } + + err = storage.HasReadAccess(ctx, stg) if err != nil { if !errors.Is(err, storage.ErrUninitialized) { return errors.Wrap(err, "check read access") @@ -47,7 +54,7 @@ func Resync(ctx context.Context, conn connect.Client, stg storage.Storage) error l.Error("resync physical restore metadata") } - err = resyncBackupList(ctx, conn, stg) + err = SyncBackupList(ctx, conn, cfg, "") if err != nil { l.Error("resync backup metadata") } @@ -60,13 +67,48 @@ func Resync(ctx context.Context, conn connect.Client, stg storage.Storage) error return nil } -func resyncBackupList( +func ClearAllBackupMetaFromExternal(ctx context.Context, conn connect.Client) error { + _, err := conn.BcpCollection().DeleteMany(ctx, bson.D{{"profile", true}}) + if err != nil { + return errors.Wrapf(err, "delete all backup meta from db") + } + + return nil +} + +func ClearBackupList(ctx context.Context, conn connect.Client, profile string) error { + storeFilter := bson.M{"profile": nil} + if profile != "" { + storeFilter["profile"] = true + storeFilter["name"] = profile + } + + _, err := conn.BcpCollection().DeleteMany(ctx, bson.D{{"store", storeFilter}}) + if err != nil { + return errors.Wrapf(err, "delete all backup meta from db") + } + + return nil +} + +func SyncBackupList( ctx context.Context, conn connect.Client, - stg storage.Storage, + cfg *config.Storage, + profile string, ) error { l := log.LogEventFromContext(ctx) + stg, err := util.StorageFromConfig(cfg, l) + if err != nil { + return errors.Wrap(err, "storage from config") + } + + err = ClearBackupList(ctx, conn, profile) + if err != nil { + return errors.Wrapf(err, "clear backup list") + } + backupList, err := getAllBackupMetaFromStorage(ctx, stg) if err != nil { return errors.Wrap(err, "get all backups meta from the storage") @@ -78,21 +120,24 @@ func resyncBackupList( return nil } + backupStore := backup.Storage{ + Name: profile, + IsProfile: profile != "", + Storage: *cfg, + } + docs := make([]any, len(backupList)) for i, m := range backupList { l.Debug("bcp: %v", m.Name) + // overwriting config allows PBM to download files from the current deployment + m.Store = backupStore docs[i] = m } - _, err = conn.BcpCollection().DeleteMany(ctx, bson.M{}) - if err != nil { - return errors.Wrapf(err, "delete all backup meta from db") - } - _, err = conn.BcpCollection().InsertMany(ctx, docs) if err != nil { - return errors.Wrap(err, "insert backups meta into db") + return errors.Wrap(err, "write backups meta into db") } return nil @@ -110,33 +155,31 @@ func resyncOplogRange( return errors.Wrapf(err, "clean up %s", defs.PITRChunksCollection) } - pitrf, err := stg.List(defs.PITRfsPrefix, "") + chunkFiles, err := stg.List(defs.PITRfsPrefix, "") if err != nil { return errors.Wrap(err, "get list of pitr chunks") } - if len(pitrf) == 0 { - return nil - } - var pitr []interface{} - for _, f := range pitrf { - stat, err := stg.FileStat(defs.PITRfsPrefix + "/" + f.Name) + var chunks []any + for _, file := range chunkFiles { + info, err := stg.FileStat(defs.PITRfsPrefix + "/" + file.Name) if err != nil { - l.Warning("skip pitr chunk %s/%s because of %v", defs.PITRfsPrefix, f.Name, err) + l.Warning("skip pitr chunk %s/%s because of %v", defs.PITRfsPrefix, file.Name, err) continue } - chnk := oplog.MakeChunkMetaFromFilepath(f.Name) - if chnk != nil { - chnk.Size = stat.Size - pitr = append(pitr, chnk) + + chunk := oplog.MakeChunkMetaFromFilepath(file.Name) + if chunk != nil { + chunk.Size = info.Size + chunks = append(chunks, chunk) } } - if len(pitr) == 0 { + if len(chunks) == 0 { return nil } - _, err = conn.PITRChunksCollection().InsertMany(ctx, pitr) + _, err = conn.PITRChunksCollection().InsertMany(ctx, chunks) if err != nil { return errors.Wrap(err, "insert retrieved pitr meta") } @@ -149,6 +192,11 @@ func resyncPhysicalRestores( conn connect.Client, stg storage.Storage, ) error { + _, err := conn.RestoresCollection().DeleteMany(ctx, bson.D{}) + if err != nil { + return errors.Wrap(err, "delete all documents") + } + restoreFiles, err := stg.List(defs.PhysRestoresDir, ".json") if err != nil { return errors.Wrap(err, "get physical restores list from the storage") @@ -171,11 +219,6 @@ func resyncPhysicalRestores( docs[i] = m } - _, err = conn.RestoresCollection().DeleteMany(ctx, bson.D{}) - if err != nil { - return errors.Wrap(err, "delete all documents") - } - _, err = conn.RestoresCollection().InsertMany(ctx, docs) if err != nil { return errors.Wrap(err, "insert restore meta into db") diff --git a/sdk/impl.go b/sdk/impl.go index e9c5f5a85..cd293500f 100644 --- a/sdk/impl.go +++ b/sdk/impl.go @@ -254,6 +254,11 @@ func (c *clientImpl) SyncFromStorage(ctx context.Context) (CommandID, error) { return CommandID(opid.String()), err } +func (c *clientImpl) SyncFromExternalStorage(ctx context.Context, name string) (CommandID, error) { + opid, err := ctrl.SendSyncMetaFrom(ctx, c.conn, name) + return CommandID(opid.String()), err +} + func (c *clientImpl) DeleteBackupByName(ctx context.Context, name string) (CommandID, error) { opts := GetBackupByNameOptions{FetchIncrements: true} bcp, err := c.GetBackupByName(ctx, name, opts) diff --git a/sdk/sdk.go b/sdk/sdk.go index 3ba679e7b..54f51920d 100644 --- a/sdk/sdk.go +++ b/sdk/sdk.go @@ -131,6 +131,7 @@ type Client interface { RunCleanup(ctx context.Context, beforeTS Timestamp) (CommandID, error) SyncFromStorage(ctx context.Context) (CommandID, error) + SyncFromExternalStorage(ctx context.Context, name string) (CommandID, error) } func NewClient(ctx context.Context, uri string) (*clientImpl, error) { @@ -142,6 +143,16 @@ func NewClient(ctx context.Context, uri string) (*clientImpl, error) { return &clientImpl{conn: conn}, nil } +func WaitForAddProfile(ctx context.Context, client Client, cid CommandID) error { + lck := &lock.LockHeader{Type: ctrl.CmdAddConfigProfile, OPID: string(cid)} + return waitOp(ctx, client.(*clientImpl).conn, lck) +} + +func WaitForRemoveProfile(ctx context.Context, client Client, cid CommandID) error { + lck := &lock.LockHeader{Type: ctrl.CmdRemoveConfigProfile, OPID: string(cid)} + return waitOp(ctx, client.(*clientImpl).conn, lck) +} + func WaitForCleanup(ctx context.Context, client Client) error { lck := &lock.LockHeader{Type: ctrl.CmdCleanup} return waitOp(ctx, client.(*clientImpl).conn, lck) From be024657a4976057cb159750c4cf51022b4a4cdf Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 3 Jun 2024 11:57:52 +0200 Subject: [PATCH 020/203] fix `profile add` params error --- cmd/pbm/profile.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/cmd/pbm/profile.go b/cmd/pbm/profile.go index b68c5ed61..afb432f04 100644 --- a/cmd/pbm/profile.go +++ b/cmd/pbm/profile.go @@ -150,12 +150,11 @@ func handleSyncConfigProfile( pbm sdk.Client, opts syncConfigProfileOptions, ) (fmt.Stringer, error) { - if opts.name == "" && !opts.all { + if !opts.all && opts.name == "" { return nil, errors.New("--profile or --all is required") } - // TODO: finish here - if opts.name == "" { - return nil, errors.New("argument `name` should not be empty") + if opts.all && opts.name != "" { + return nil, errors.New("ambiguous params: --profile and --all are set") } cid, err := pbm.SyncFromExternalStorage(ctx, opts.name) From 4de36f209e3a4be64c811d8571e92384497f9f34 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 3 Jun 2024 11:58:48 +0200 Subject: [PATCH 021/203] move backup.getMergedConfig to config.GetProfiledConfig --- cmd/pbm-agent/backup.go | 37 ++----------------------------------- pbm/config/util.go | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 35 deletions(-) create mode 100644 pbm/config/util.go diff --git a/cmd/pbm-agent/backup.go b/cmd/pbm-agent/backup.go index b603541ad..cc4c34a55 100644 --- a/cmd/pbm-agent/backup.go +++ b/cmd/pbm-agent/backup.go @@ -6,7 +6,6 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/backup" "github.com/percona/percona-backup-mongodb/pbm/config" - "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/ctrl" "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" @@ -80,9 +79,9 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, go a.sliceNow(opid) } - cfg, err := getMergedConfig(ctx, a.leadConn, cmd.Profile) + cfg, err := config.GetProfiledConfig(ctx, a.leadConn, cmd.Profile) if err != nil { - l.Error("get merged config: %v", err) + l.Error("get profiled config: %v", err) return } @@ -238,38 +237,6 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, } } -func getMergedConfig( - ctx context.Context, - conn connect.Client, - profileName string, -) (*config.Config, error) { - cfg, err := config.GetConfig(ctx, conn) - if err != nil { - return nil, errors.Wrap(err, "get main config") - } - - if profileName != "" { - custom, err := config.GetProfile(ctx, conn, profileName) - if err != nil { - return nil, errors.Wrap(err, "get config profile") - } - if err := custom.Storage.Cast(); err != nil { - return nil, errors.Wrap(err, "storage cast") - } - - // use storage config only - cfg.Storage = custom.Storage - cfg.Name = custom.Name - cfg.IsProfile = true - } - - if storage.ParseType(string(cfg.Storage.Type)) == storage.Undefined { - return nil, errors.New("backups cannot be saved because PBM storage configuration hasn't been set yet") - } - - return cfg, nil -} - const renominationFrame = 5 * time.Second func (a *Agent) nominateRS(ctx context.Context, bcp, rs string, nodes [][]string) error { diff --git a/pbm/config/util.go b/pbm/config/util.go new file mode 100644 index 000000000..e03c5a769 --- /dev/null +++ b/pbm/config/util.go @@ -0,0 +1,41 @@ +package config + +import ( + "context" + + "github.com/percona/percona-backup-mongodb/pbm/connect" + "github.com/percona/percona-backup-mongodb/pbm/errors" + "github.com/percona/percona-backup-mongodb/pbm/storage" +) + +func GetProfiledConfig( + ctx context.Context, + conn connect.Client, + profileName string, +) (*Config, error) { + cfg, err := GetConfig(ctx, conn) + if err != nil { + return nil, errors.Wrap(err, "get main config") + } + + if profileName != "" { + custom, err := GetProfile(ctx, conn, profileName) + if err != nil { + return nil, errors.Wrap(err, "get config profile") + } + if err := custom.Storage.Cast(); err != nil { + return nil, errors.Wrap(err, "storage cast") + } + + // use storage config only + cfg.Storage = custom.Storage + cfg.Name = custom.Name + cfg.IsProfile = true + } + + if storage.ParseType(string(cfg.Storage.Type)) == storage.Undefined { + return nil, errors.New("backups cannot be saved because PBM storage configuration hasn't been set yet") + } + + return cfg, nil +} From 262eb841c2e7b8a71510e38c4914e8029ab24a65 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 3 Jun 2024 11:59:09 +0200 Subject: [PATCH 022/203] decompose Agent.Resync() --- cmd/pbm-agent/agent.go | 87 +++++++++++++++++++++++++----------------- pbm/resync/rsync.go | 9 ----- 2 files changed, 52 insertions(+), 44 deletions(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 92ff67a97..9737e33e4 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -225,48 +225,21 @@ func (a *Agent) Resync(ctx context.Context, cmd *ctrl.ResyncCmd, opid ctrl.OPID, l.Info("started") if cmd.All { - profiles, err := config.ListProfiles(ctx, a.leadConn) + err = syncAllProfile(ctx, a.leadConn) if err != nil { - l.Error("get config profiles: %v", err) + l.Error("sync all profiles: %v", err) return } - - err = resync.ClearAllBackupMetaFromExternal(ctx, a.leadConn) - if err != nil { - l.Error("clear all backup meta from external storages: %v", err) - return - } - - for i := range profiles { - cfg := profiles[i] - err = resync.SyncBackupList(ctx, a.leadConn, &cfg.Storage, cfg.Name) - if err != nil { - l.Error("sync backup list from external storage %q: %v", cfg.Name, err) - return - } - } - } else if !cmd.All && cmd.Name != "" { - cfg, err := config.GetProfile(ctx, a.leadConn, cmd.Name) - if err != nil { - l.Error("get config profile: %v", err) - return - } - - err = resync.SyncBackupList(ctx, a.leadConn, &cfg.Storage, cfg.Name) - if err != nil { - l.Error("sync backup list from external storage %q: %v", cfg.Name, err) - return - } - } else if !cmd.All && cmd.Name == "" { - cfg, err := config.GetConfig(ctx, a.leadConn) + } else if cmd.Name != "" { + err = syncProfile(ctx, a.leadConn, cmd.Name) if err != nil { - l.Error("get config: %v", err) + l.Error("sync profile %q: %v", cmd.Name, err) return } - - err = resync.Resync(ctx, a.leadConn, &cfg.Storage) + } else { + err = syncMain(ctx, a.leadConn) if err != nil { - l.Error("resync from main storage: %v", err) + l.Error(err.Error()) return } @@ -281,6 +254,50 @@ func (a *Agent) Resync(ctx context.Context, cmd *ctrl.ResyncCmd, opid ctrl.OPID, l.Info("succeed") } +func syncProfile(ctx context.Context, conn connect.Client, profileName string) error { + cfg, err := config.GetProfile(ctx, conn, profileName) + if err != nil { + return errors.Wrap(err, "get config profile") + } + + err = resync.SyncBackupList(ctx, conn, &cfg.Storage, cfg.Name) + if err != nil { + return errors.Wrap(err, "sync backup list") + } + + return nil +} + +func syncAllProfile(ctx context.Context, conn connect.Client) error { + profiles, err := config.ListProfiles(ctx, conn) + if err != nil { + return errors.Wrap(err, "get config profiles") + } + + for i := range profiles { + err = syncProfile(ctx, conn, profiles[i].Name) + if err != nil { + return errors.Wrapf(err, "sync profile %q", profiles[i].Name) + } + } + + return nil +} + +func syncMain(ctx context.Context, conn connect.Client) error { + cfg, err := config.GetConfig(ctx, conn) + if err != nil { + return errors.Wrap(err, "get config") + } + + err = resync.Resync(ctx, conn, &cfg.Storage) + if err != nil { + return errors.Wrap(err, "resync") + } + + return nil +} + // acquireLock tries to acquire the lock. If there is a stale lock // it tries to mark op that held the lock (backup, [pitr]restore) as failed. func (a *Agent) acquireLock(ctx context.Context, l *lock.Lock, lg log.LogEvent) (bool, error) { diff --git a/pbm/resync/rsync.go b/pbm/resync/rsync.go index 91ef14e6b..2f3cfdcb1 100644 --- a/pbm/resync/rsync.go +++ b/pbm/resync/rsync.go @@ -67,15 +67,6 @@ func Resync(ctx context.Context, conn connect.Client, cfg *config.Storage) error return nil } -func ClearAllBackupMetaFromExternal(ctx context.Context, conn connect.Client) error { - _, err := conn.BcpCollection().DeleteMany(ctx, bson.D{{"profile", true}}) - if err != nil { - return errors.Wrapf(err, "delete all backup meta from db") - } - - return nil -} - func ClearBackupList(ctx context.Context, conn connect.Client, profile string) error { storeFilter := bson.M{"profile": nil} if profile != "" { From 8f3a9c263f13e589d1b51eaf35f1572fd96d9724 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 3 Jun 2024 20:38:51 +0200 Subject: [PATCH 023/203] wip --- cmd/pbm-agent/agent.go | 101 +++++++++++++++++++-------------------- cmd/pbm-agent/profile.go | 65 ------------------------- cmd/pbm/backup.go | 15 ++++-- cmd/pbm/list.go | 4 ++ cmd/pbm/main.go | 9 ++-- cmd/pbm/profile.go | 38 +++++++++++---- cmd/pbm/status.go | 4 ++ pbm/backup/query.go | 15 ++++-- pbm/backup/types.go | 9 +++- pbm/config/config.go | 21 ++++---- pbm/config/util.go | 21 ++++---- pbm/ctrl/send.go | 35 +++++++++----- pbm/resync/rsync.go | 17 +++++-- sdk/impl.go | 23 +++++++++ sdk/sdk.go | 3 ++ 15 files changed, 206 insertions(+), 174 deletions(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 9737e33e4..0b9b905e1 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -225,21 +225,64 @@ func (a *Agent) Resync(ctx context.Context, cmd *ctrl.ResyncCmd, opid ctrl.OPID, l.Info("started") if cmd.All { - err = syncAllProfile(ctx, a.leadConn) + profiles, err := config.ListProfiles(ctx, a.leadConn) if err != nil { - l.Error("sync all profiles: %v", err) + l.Error("get config profiles: %v", err) return } + + if cmd.Clear { + l.Debug("clearing backup list for %d config profiles", len(profiles)) + for i := range profiles { + name := profiles[i].Name + err = resync.ClearBackupList(ctx, a.leadConn, name) + if err != nil { + l.Error("clear backup list for %q: %v", name, err) + } + } + } else { + l.Debug("syncing backup list for %d config profiles", len(profiles)) + for i := range profiles { + profile := &profiles[i] + err = resync.SyncBackupList(ctx, a.leadConn, &profile.Storage, profile.Name) + if err != nil { + l.Error("sync backup list for %q: %v", profile.Name, err) + return + } + } + } } else if cmd.Name != "" { - err = syncProfile(ctx, a.leadConn, cmd.Name) + profile, err := config.GetProfile(ctx, a.leadConn, cmd.Name) + if err != nil { + l.Error("get config profile: %v", err) + return + } + + if cmd.Clear { + l.Debug("clearing backup list for %q", profile.Name) + err = resync.ClearBackupList(ctx, a.leadConn, profile.Name) + if err != nil { + l.Error("clear backup list for %q: %v", profile.Name, err) + } + } else { + l.Debug("syncing backup list for %q", profile.Name) + err = resync.SyncBackupList(ctx, a.leadConn, &profile.Storage, profile.Name) + if err != nil { + l.Error("sync backup list for %q: %v", profile.Name, err) + return + } + } + } else { // resync main storage only + l.Debug("resync from main storage") + cfg, err := config.GetConfig(ctx, a.leadConn) if err != nil { - l.Error("sync profile %q: %v", cmd.Name, err) + l.Error("get config: %v", err) return } - } else { - err = syncMain(ctx, a.leadConn) + + err = resync.Resync(ctx, a.leadConn, &cfg.Storage) if err != nil { - l.Error(err.Error()) + l.Error("resync: %v", err) return } @@ -254,50 +297,6 @@ func (a *Agent) Resync(ctx context.Context, cmd *ctrl.ResyncCmd, opid ctrl.OPID, l.Info("succeed") } -func syncProfile(ctx context.Context, conn connect.Client, profileName string) error { - cfg, err := config.GetProfile(ctx, conn, profileName) - if err != nil { - return errors.Wrap(err, "get config profile") - } - - err = resync.SyncBackupList(ctx, conn, &cfg.Storage, cfg.Name) - if err != nil { - return errors.Wrap(err, "sync backup list") - } - - return nil -} - -func syncAllProfile(ctx context.Context, conn connect.Client) error { - profiles, err := config.ListProfiles(ctx, conn) - if err != nil { - return errors.Wrap(err, "get config profiles") - } - - for i := range profiles { - err = syncProfile(ctx, conn, profiles[i].Name) - if err != nil { - return errors.Wrapf(err, "sync profile %q", profiles[i].Name) - } - } - - return nil -} - -func syncMain(ctx context.Context, conn connect.Client) error { - cfg, err := config.GetConfig(ctx, conn) - if err != nil { - return errors.Wrap(err, "get config") - } - - err = resync.Resync(ctx, conn, &cfg.Storage) - if err != nil { - return errors.Wrap(err, "resync") - } - - return nil -} - // acquireLock tries to acquire the lock. If there is a stale lock // it tries to mark op that held the lock (backup, [pitr]restore) as failed. func (a *Agent) acquireLock(ctx context.Context, l *lock.Lock, lg log.LogEvent) (bool, error) { diff --git a/cmd/pbm-agent/profile.go b/cmd/pbm-agent/profile.go index b9e2113e5..b3d2a827a 100644 --- a/cmd/pbm-agent/profile.go +++ b/cmd/pbm-agent/profile.go @@ -192,68 +192,3 @@ func (a *Agent) handleRemoveConfigProfile( return } } - -func (a *Agent) handleSyncMetaFrom( - ctx context.Context, - cmd *ctrl.ResyncCmd, - opid ctrl.OPID, - epoch config.Epoch, -) { - logger := log.FromContext(ctx) - l := logger.NewEvent(string(ctrl.CmdResync), "", opid.String(), epoch.TS()) - ctx = log.SetLogEventToContext(ctx, l) - - var err error - defer func() { - if err != nil { - l.Error("failed to add config profile: %v", err) - } - }() - - nodeInfo, err := topo.GetNodeInfo(ctx, a.nodeConn) - if err != nil { - err = errors.Wrap(err, "get node info") - return - } - if !nodeInfo.IsClusterLeader() { - l.Debug("not leader. skip") - return - } - - lck := lock.NewLock(a.leadConn, lock.LockHeader{ - Type: ctrl.CmdAddConfigProfile, - Replset: a.brief.SetName, - Node: a.brief.Me, - OPID: opid.String(), - Epoch: util.Ref(epoch.TS()), - }) - - got, err := a.acquireLock(ctx, lck, l) - if err != nil { - l.Error("acquiring lock: %v", err) - return - } - if !got { - l.Error("lock not acquired") - return - } - defer func() { - l.Debug("releasing lock") - err = lck.Release() - if err != nil { - l.Error("unable to release lock %v: %v", lck, err) - } - }() - - cfg, err := config.GetConfig(ctx, a.leadConn) - if err != nil { - err = errors.Wrap(err, "get storage config") - return - } - - err = resync.SyncBackupList(ctx, a.leadConn, &cfg.Storage, cmd.Name) - if err != nil { - err = errors.Wrap(err, "sync backup list") - return - } -} diff --git a/cmd/pbm/backup.go b/cmd/pbm/backup.go index 912519ef7..c92b3b1d0 100644 --- a/cmd/pbm/backup.go +++ b/cmd/pbm/backup.go @@ -8,13 +8,13 @@ import ( "strings" "time" - "go.mongodb.org/mongo-driver/mongo" "golang.org/x/mod/semver" "gopkg.in/yaml.v2" "github.com/percona/percona-backup-mongodb/pbm/archive" "github.com/percona/percona-backup-mongodb/pbm/backup" "github.com/percona/percona-backup-mongodb/pbm/compress" + "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/ctrl" "github.com/percona/percona-backup-mongodb/pbm/defs" @@ -112,12 +112,15 @@ func runBackup( } } - cfg, err := pbm.GetConfig(ctx) + cfg, err := config.GetProfiledConfig(ctx, conn, b.profile) if err != nil { - if errors.Is(err, mongo.ErrNoDocuments) { - return nil, errors.New("no store set. Set remote store with ") + if errors.Is(err, config.ErrMissedConfig) { + return nil, errors.New("no config set. Set config with ") } - return nil, errors.Wrap(err, "get remote-store") + if errors.Is(err, config.ErrMissedConfigProfile) { + return nil, errors.Errorf("invalid profile: %v", b.profile) + } + return nil, errors.Wrap(err, "get config") } compression := cfg.Backup.Compression @@ -306,6 +309,7 @@ type bcpDesc struct { Status defs.Status `json:"status" yaml:"status"` Size int64 `json:"size" yaml:"-"` HSize string `json:"size_h" yaml:"size_h"` + StorageName string `json:"storage_name,omitempty" yaml:"storage_name,omitempty"` Err *string `json:"error,omitempty" yaml:"error,omitempty"` Replsets []bcpReplDesc `json:"replsets" yaml:"replsets"` } @@ -386,6 +390,7 @@ func describeBackup(ctx context.Context, conn connect.Client, pbm sdk.Client, b Status: bcp.Status, Size: bcp.Size, HSize: byteCountIEC(bcp.Size), + StorageName: bcp.Store.Name, } if bcp.Err != "" { rv.Err = &bcp.Err diff --git a/cmd/pbm/list.go b/cmd/pbm/list.go index 1d15bb7ed..84f5ba0e5 100644 --- a/cmd/pbm/list.go +++ b/cmd/pbm/list.go @@ -175,6 +175,9 @@ func (bl backupListOut) String() string { } else if b.Type == defs.IncrementalBackup && b.SrcBackup == "" { t += ", base" } + if b.StoreName != "" { + t += ", *" + } s += fmt.Sprintf(" %s <%s> [restore_to_time: %s]\n", b.Name, t, fmtTS(int64(b.RestoreTS))) } if bl.PITR.On { @@ -280,6 +283,7 @@ func getSnapshotList( PBMVersion: b.PBMVersion, Type: b.Type, SrcBackup: b.SrcBackup, + StoreName: b.Store.Name, }) } diff --git a/cmd/pbm/main.go b/cmd/pbm/main.go index 8492cb7cb..4b4d49262 100644 --- a/cmd/pbm/main.go +++ b/cmd/pbm/main.go @@ -110,7 +110,7 @@ func main() { descConfigProfileCmd := configProfileCmd. Command("describe", "Describe configuration profile") descConfigProfileCmd. - Arg("name", "Profile name"). + Arg("profile-name", "Profile name"). Required(). StringVar(&descConfigProfileOpts.name) @@ -118,7 +118,7 @@ func main() { addConfigProfileCmd := configProfileCmd. Command("add", "Save configuration profile") addConfigProfileCmd. - Arg("name", "Profile name"). + Arg("profile-name", "Profile name"). Required(). StringVar(&addConfigProfileOpts.name) addConfigProfileCmd. @@ -137,7 +137,7 @@ func main() { removeConfigProfileCmd := configProfileCmd. Command("remove", "Remove configuration profile") removeConfigProfileCmd. - Arg("name", "Profile name"). + Arg("profile-name", "Profile name"). Required(). StringVar(&removeConfigProfileOpts.name) removeConfigProfileCmd. @@ -149,7 +149,7 @@ func main() { syncConfigProfileCmd := configProfileCmd. Command("sync", "Sync backup list from configuration profile") syncConfigProfileCmd. - Arg("profile", "Profile name"). + Arg("profile-name", "Profile name"). StringVar(&syncConfigProfileOpts.name) syncConfigProfileCmd. Flag("all", "Sync from all external storages"). @@ -689,6 +689,7 @@ type snapshotStat struct { PBMVersion string `json:"pbmVersion"` Type defs.BackupType `json:"type"` SrcBackup string `json:"src"` + StoreName string `json:"storage,omitempty"` } type pitrRange struct { diff --git a/cmd/pbm/profile.go b/cmd/pbm/profile.go index afb432f04..ce191c597 100644 --- a/cmd/pbm/profile.go +++ b/cmd/pbm/profile.go @@ -6,6 +6,8 @@ import ( "os" "strings" + "go.mongodb.org/mongo-driver/mongo" + "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/sdk" @@ -69,7 +71,7 @@ func handleDescibeConfigProfiles( opts descConfigProfileOptions, ) (fmt.Stringer, error) { if opts.name == "" { - return nil, errors.New("argument `name` should not be empty") + return nil, errors.New("argument `profile-name` should not be empty") } return pbm.GetConfigProfile(ctx, opts.name) @@ -81,10 +83,15 @@ func handleAddConfigProfile( opts addConfigProfileOptions, ) (fmt.Stringer, error) { if opts.name == "" { - return nil, errors.New("argument `name` should not be empty") + return nil, errors.New("argument `profile-name` should not be empty") } - if opts.file == nil { - return nil, errors.New("missed file: nil value") + + _, err := pbm.GetConfig(ctx) + if err != nil { + if errors.Is(err, mongo.ErrNoDocuments) { + return nil, errors.New("PBM is not configured") + } + return nil, errors.Wrap(err, "get config") } cfg, err := config.Parse(opts.file) @@ -127,7 +134,7 @@ func handleRemoveConfigProfile( opts removeConfigProfileOptions, ) (fmt.Stringer, error) { if opts.name == "" { - return nil, errors.New("argument `name` should not be empty") + return nil, errors.New("argument `profile-name` should not be empty") } cid, err := pbm.RemoveConfigProfile(ctx, opts.name) @@ -151,13 +158,28 @@ func handleSyncConfigProfile( opts syncConfigProfileOptions, ) (fmt.Stringer, error) { if !opts.all && opts.name == "" { - return nil, errors.New("--profile or --all is required") + return nil, errors.New(" or --all must be provided") } if opts.all && opts.name != "" { - return nil, errors.New("ambiguous params: --profile and --all are set") + return nil, errors.New("ambiguous: and --all are provided") } - cid, err := pbm.SyncFromExternalStorage(ctx, opts.name) + var err error + var cid sdk.CommandID + + if opts.clear { + if opts.all { + cid, err = pbm.ClearSyncFromAllExternalStorages(ctx) + } else { + cid, err = pbm.ClearSyncFromExternalStorage(ctx, opts.name) + } + } else { + if opts.all { + cid, err = pbm.SyncFromAllExternalStorages(ctx) + } else { + cid, err = pbm.SyncFromExternalStorage(ctx, opts.name) + } + } if err != nil { return nil, errors.Wrap(err, "sync from storage") } diff --git a/cmd/pbm/status.go b/cmd/pbm/status.go index b873f2fb2..c8c1a3347 100644 --- a/cmd/pbm/status.go +++ b/cmd/pbm/status.go @@ -598,6 +598,9 @@ func (s storageStat) String() string { } else if ss.Type == defs.IncrementalBackup && ss.SrcBackup == "" { t += ", base" } + if ss.StoreName != "" { + t += ", *" + } ret += fmt.Sprintf(" %s %s <%s> %s\n", ss.Name, fmtSize(ss.Size), t, status) } @@ -695,6 +698,7 @@ func getStorageStat( PBMVersion: bcp.PBMVersion, Type: bcp.Type, SrcBackup: bcp.SrcBackup, + StoreName: bcp.Store.Name, } if err := bcp.Error(); err != nil { snpsht.Err = err diff --git a/pbm/backup/query.go b/pbm/backup/query.go index de3c5efc1..8d614f0e6 100644 --- a/pbm/backup/query.go +++ b/pbm/backup/query.go @@ -228,13 +228,19 @@ func LastIncrementalBackup(ctx context.Context, conn connect.Client) (*BackupMet // or nil if there is no such backup yet. If ts isn't nil it will // search for the most recent backup that finished before specified timestamp func GetLastBackup(ctx context.Context, conn connect.Client, before *primitive.Timestamp) (*BackupMeta, error) { - return getRecentBackup(ctx, conn, nil, before, -1, - bson.D{{"nss", nil}, {"type", bson.M{"$ne": defs.ExternalBackup}}}) + return getRecentBackup(ctx, conn, nil, before, -1, bson.D{ + {"nss", nil}, + {"type", bson.M{"$ne": defs.ExternalBackup}}, + {"store.profile", nil}, + }) } func GetFirstBackup(ctx context.Context, conn connect.Client, after *primitive.Timestamp) (*BackupMeta, error) { - return getRecentBackup(ctx, conn, after, nil, 1, - bson.D{{"nss", nil}, {"type", bson.M{"$ne": defs.ExternalBackup}}}) + return getRecentBackup(ctx, conn, after, nil, 1, bson.D{ + {"nss", nil}, + {"type", bson.M{"$ne": defs.ExternalBackup}}, + {"store.profile", nil}, + }) } func getRecentBackup( @@ -297,6 +303,7 @@ func findBaseSnapshotLWImpl( f := bson.D{ {"nss", nil}, {"type", bson.M{"$ne": defs.ExternalBackup}}, + {"store.profile", nil}, {"last_write_ts", lwCond}, {"status", defs.StatusDone}, } diff --git a/pbm/backup/types.go b/pbm/backup/types.go index 821d808de..c22bd55cb 100644 --- a/pbm/backup/types.go +++ b/pbm/backup/types.go @@ -86,9 +86,14 @@ func (b *BackupMeta) RS(name string) *BackupReplset { return nil } +// Storage keeps storage configuration used during backup. +// +// If external configuration is used, IsProfile is `true` and Name is set. type Storage struct { - Name string `bson:"name,omitempty" json:"name,omitempty"` - IsProfile bool `bson:"profile,omitempty" json:"profile,omitempty"` + // Name is config profile name. + Name string `bson:"name,omitempty" json:"name,omitempty"` + // IsProfile is true when storage is non-main (external). + IsProfile bool `bson:"profile,omitempty" json:"profile,omitempty"` config.Storage `bson:",inline" json:",inline"` } diff --git a/pbm/config/config.go b/pbm/config/config.go index 50205d765..3aab07884 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -27,9 +27,11 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/topo" ) -var ErrUnkownStorageType = errors.New("unknown storage type") - -var errMissedConfig = errors.New("missed config") +var ( + ErrUnkownStorageType = errors.New("unknown storage type") + ErrMissedConfig = errors.New("missed config") + ErrMissedConfigProfile = errors.New("missed config profile") +) type confMap map[string]reflect.Kind @@ -67,11 +69,12 @@ type Config struct { Name string `bson:"name,omitempty" json:"name,omitempty" yaml:"name,omitempty"` IsProfile bool `bson:"profile,omitempty" json:"profile,omitempty" yaml:"profile,omitempty"` - Storage Storage `bson:"storage" json:"storage" yaml:"storage"` - Oplog *GlobalSlicer `bson:"pitr,omitempty" json:"pitr,omitempty" yaml:"pitr,omitempty"` - Backup *Backup `bson:"backup,omitempty" json:"backup,omitempty" yaml:"backup,omitempty"` - Restore *Restore `bson:"restore,omitempty" json:"restore,omitempty" yaml:"restore,omitempty"` - Epoch primitive.Timestamp `bson:"epoch" json:"-" yaml:"-"` + Storage Storage `bson:"storage" json:"storage" yaml:"storage"` + Oplog *GlobalSlicer `bson:"pitr,omitempty" json:"pitr,omitempty" yaml:"pitr,omitempty"` + Backup *Backup `bson:"backup,omitempty" json:"backup,omitempty" yaml:"backup,omitempty"` + Restore *Restore `bson:"restore,omitempty" json:"restore,omitempty" yaml:"restore,omitempty"` + + Epoch primitive.Timestamp `bson:"epoch" json:"-" yaml:"-"` } func Parse(r io.Reader) (*Config, error) { @@ -569,7 +572,7 @@ func IsPITREnabled(ctx context.Context, m connect.Client) (bool, bool, error) { cfg, err := GetConfig(ctx, m) if err != nil { if errors.Is(err, mongo.ErrNoDocuments) { - err = errMissedConfig + err = ErrMissedConfig } return false, false, errors.Wrap(err, "get config") diff --git a/pbm/config/util.go b/pbm/config/util.go index e03c5a769..219477a37 100644 --- a/pbm/config/util.go +++ b/pbm/config/util.go @@ -3,24 +3,28 @@ package config import ( "context" + "go.mongodb.org/mongo-driver/mongo" + "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/storage" ) -func GetProfiledConfig( - ctx context.Context, - conn connect.Client, - profileName string, -) (*Config, error) { +func GetProfiledConfig(ctx context.Context, conn connect.Client, profile string) (*Config, error) { cfg, err := GetConfig(ctx, conn) if err != nil { + if errors.Is(err, mongo.ErrNoDocuments) { + err = ErrMissedConfig + } return nil, errors.Wrap(err, "get main config") } - if profileName != "" { - custom, err := GetProfile(ctx, conn, profileName) + if profile != "" { + custom, err := GetProfile(ctx, conn, profile) if err != nil { + if errors.Is(err, mongo.ErrNoDocuments) { + err = ErrMissedConfigProfile + } return nil, errors.Wrap(err, "get config profile") } if err := custom.Storage.Cast(); err != nil { @@ -34,7 +38,8 @@ func GetProfiledConfig( } if storage.ParseType(string(cfg.Storage.Type)) == storage.Undefined { - return nil, errors.New("backups cannot be saved because PBM storage configuration hasn't been set yet") + return nil, errors.New( + "backups cannot be saved because PBM storage configuration hasn't been set yet") } return cfg, nil diff --git a/pbm/ctrl/send.go b/pbm/ctrl/send.go index 1fc3fbef4..4bccaa1d8 100644 --- a/pbm/ctrl/send.go +++ b/pbm/ctrl/send.go @@ -93,31 +93,40 @@ func SendRemoveConfigProfile(ctx context.Context, m connect.Client, name string) return sendCommand(ctx, m, cmd) } +func SendResync(ctx context.Context, m connect.Client) (OPID, error) { + return sendCommand(ctx, m, Cmd{Cmd: CmdResync}) +} + func SendSyncMetaFrom(ctx context.Context, m connect.Client, name string) (OPID, error) { + opts := &ResyncCmd{} + if name != "" { + opts.Name = name + } else { + opts.All = true + } + cmd := Cmd{ - Cmd: CmdResync, - Resync: &ResyncCmd{ - Name: name, - }, + Cmd: CmdResync, + Resync: opts, } return sendCommand(ctx, m, cmd) } func SendClearMetaFrom(ctx context.Context, m connect.Client, name string) (OPID, error) { + opts := &ResyncCmd{Clear: true} + if name != "" { + opts.Name = name + } else { + opts.All = true + } + cmd := Cmd{ - Cmd: CmdResync, - Resync: &ResyncCmd{ - Name: name, - Clear: true, - }, + Cmd: CmdResync, + Resync: opts, } return sendCommand(ctx, m, cmd) } -func SendResync(ctx context.Context, m connect.Client) (OPID, error) { - return sendCommand(ctx, m, Cmd{Cmd: CmdResync}) -} - func SendCancelBackup(ctx context.Context, m connect.Client) (OPID, error) { return sendCommand(ctx, m, Cmd{Cmd: CmdCancelBackup}) } diff --git a/pbm/resync/rsync.go b/pbm/resync/rsync.go index 2f3cfdcb1..3b1f8cb75 100644 --- a/pbm/resync/rsync.go +++ b/pbm/resync/rsync.go @@ -68,13 +68,20 @@ func Resync(ctx context.Context, conn connect.Client, cfg *config.Storage) error } func ClearBackupList(ctx context.Context, conn connect.Client, profile string) error { - storeFilter := bson.M{"profile": nil} - if profile != "" { - storeFilter["profile"] = true - storeFilter["name"] = profile + var filter bson.D + if profile == "" { + // from main storage + filter = bson.D{ + {"store.profile", nil}, + } + } else { + filter = bson.D{ + {"store.profile", true}, + {"store.name", profile}, + } } - _, err := conn.BcpCollection().DeleteMany(ctx, bson.D{{"store", storeFilter}}) + _, err := conn.BcpCollection().DeleteMany(ctx, filter) if err != nil { return errors.Wrapf(err, "delete all backup meta from db") } diff --git a/sdk/impl.go b/sdk/impl.go index cd293500f..2689ffb9b 100644 --- a/sdk/impl.go +++ b/sdk/impl.go @@ -255,10 +255,33 @@ func (c *clientImpl) SyncFromStorage(ctx context.Context) (CommandID, error) { } func (c *clientImpl) SyncFromExternalStorage(ctx context.Context, name string) (CommandID, error) { + if name == "" { + return NoOpID, errors.New("name is not provided") + } + opid, err := ctrl.SendSyncMetaFrom(ctx, c.conn, name) return CommandID(opid.String()), err } +func (c *clientImpl) SyncFromAllExternalStorages(ctx context.Context) (CommandID, error) { + opid, err := ctrl.SendSyncMetaFrom(ctx, c.conn, "") + return CommandID(opid.String()), err +} + +func (c *clientImpl) ClearSyncFromExternalStorage(ctx context.Context, name string) (CommandID, error) { + if name == "" { + return NoOpID, errors.New("name is not provided") + } + + opid, err := ctrl.SendClearMetaFrom(ctx, c.conn, name) + return CommandID(opid.String()), err +} + +func (c *clientImpl) ClearSyncFromAllExternalStorages(ctx context.Context) (CommandID, error) { + opid, err := ctrl.SendClearMetaFrom(ctx, c.conn, "") + return CommandID(opid.String()), err +} + func (c *clientImpl) DeleteBackupByName(ctx context.Context, name string) (CommandID, error) { opts := GetBackupByNameOptions{FetchIncrements: true} bcp, err := c.GetBackupByName(ctx, name, opts) diff --git a/sdk/sdk.go b/sdk/sdk.go index 54f51920d..e0fc3a069 100644 --- a/sdk/sdk.go +++ b/sdk/sdk.go @@ -132,6 +132,9 @@ type Client interface { SyncFromStorage(ctx context.Context) (CommandID, error) SyncFromExternalStorage(ctx context.Context, name string) (CommandID, error) + SyncFromAllExternalStorages(ctx context.Context) (CommandID, error) + ClearSyncFromExternalStorage(ctx context.Context, name string) (CommandID, error) + ClearSyncFromAllExternalStorages(ctx context.Context) (CommandID, error) } func NewClient(ctx context.Context, uri string) (*clientImpl, error) { From 1ce9a456793e7b8e9e0665d790720910f41ac8e2 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Tue, 4 Jun 2024 11:05:33 +0200 Subject: [PATCH 024/203] fix typo --- cmd/pbm/restore.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/pbm/restore.go b/cmd/pbm/restore.go index 45feacff8..bd71f4d0f 100644 --- a/cmd/pbm/restore.go +++ b/cmd/pbm/restore.go @@ -103,7 +103,7 @@ func runRestore(ctx context.Context, conn connect.Client, o *restoreOpts, outf o return nil, errors.Wrap(err, "parse --ns option") } if err := validateRestoreUsersAndRoles(o.usersAndRoles, nss); err != nil { - return nil, errors.Wrap(err, "parse --with-users-and-roles-option") + return nil, errors.Wrap(err, "parse --with-users-and-roles option") } rsMap, err := parseRSNamesMapping(o.rsMap) From 389f7349482cbd45b9999a98d4435b59101b80b7 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Tue, 4 Jun 2024 11:07:04 +0200 Subject: [PATCH 025/203] use slices.Contains() --- pbm/restore/logical.go | 4 ++-- pbm/restore/util.go | 11 ----------- 2 files changed, 2 insertions(+), 13 deletions(-) delete mode 100644 pbm/restore/util.go diff --git a/pbm/restore/logical.go b/pbm/restore/logical.go index ed613aaa6..47cce3479 100644 --- a/pbm/restore/logical.go +++ b/pbm/restore/logical.go @@ -292,7 +292,7 @@ func (r *Restore) PITR(ctx context.Context, cmd *ctrl.RestoreCmd, opid ctrl.OPID bcpShards[i] = bcp.Replsets[i].Name } - if !Contains(bcpShards, util.MakeReverseRSMapFunc(r.rsMap)(r.nodeInfo.SetName)) { + if !slices.Contains(bcpShards, util.MakeReverseRSMapFunc(r.rsMap)(r.nodeInfo.SetName)) { return r.Done(ctx) // skip. no backup for current rs } @@ -382,7 +382,7 @@ func (r *Restore) ReplayOplog(ctx context.Context, cmd *ctrl.ReplayCmd, opid ctr return errors.Wrap(err, "topology") } - if !Contains(oplogShards, util.MakeReverseRSMapFunc(r.rsMap)(r.nodeInfo.SetName)) { + if !slices.Contains(oplogShards, util.MakeReverseRSMapFunc(r.rsMap)(r.nodeInfo.SetName)) { return r.Done(ctx) // skip. no oplog for current rs } diff --git a/pbm/restore/util.go b/pbm/restore/util.go deleted file mode 100644 index 0301ea381..000000000 --- a/pbm/restore/util.go +++ /dev/null @@ -1,11 +0,0 @@ -package restore - -func Contains[T comparable](ss []T, s T) bool { - for _, e := range ss { - if e == s { - return true - } - } - - return false -} From b8d2fa60cf65a64d07c03aa2b8268406d51a1c88 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 5 Jun 2024 22:07:21 +0200 Subject: [PATCH 026/203] wip --- cmd/pbm-agent/restore.go | 16 +-- cmd/pbm/backup.go | 26 ++--- cmd/pbm/restore.go | 7 +- e2e-tests/docker/pbm.dockerfile | 2 +- e2e-tests/docker/tests.dockerfile | 2 +- packaging/scripts/mongodb-backup_builder.sh | 2 +- pbm/backup/delete.go | 13 ++- pbm/restore/logical.go | 120 +++++++++++++------- pbm/restore/physical.go | 39 ++++--- pbm/restore/restore.go | 46 ++++---- sdk/impl.go | 12 +- 11 files changed, 166 insertions(+), 119 deletions(-) diff --git a/cmd/pbm-agent/restore.go b/cmd/pbm-agent/restore.go index afa618588..7f0bfa0e8 100644 --- a/cmd/pbm-agent/restore.go +++ b/cmd/pbm-agent/restore.go @@ -374,20 +374,16 @@ func (a *Agent) Restore(ctx context.Context, r *ctrl.RestoreCmd, opid ctrl.OPID, a.removePitr() } - stg, err := util.GetStorage(ctx, a.leadConn, l) - if err != nil { - l.Error("get storage: %v", err) - return - } - var bcpType defs.BackupType - bcp := &backup.BackupMeta{} + var bcp *backup.BackupMeta if r.External && r.BackupName == "" { bcpType = defs.ExternalBackup } else { l.Info("backup: %s", r.BackupName) - bcp, err = restore.SnapshotMeta(ctx, a.leadConn, r.BackupName, stg) + + // XXX: why is backup searched on storage? + bcp, err = restore.LookupBackupMeta(ctx, a.leadConn, r.BackupName) if err != nil { l.Error("define base backup: %v", err) return @@ -410,9 +406,9 @@ func (a *Agent) Restore(ctx context.Context, r *ctrl.RestoreCmd, opid ctrl.OPID, return } if r.OplogTS.IsZero() { - err = restore.New(a.leadConn, a.nodeConn, a.brief, r.RSMap).Snapshot(ctx, r, opid, l) + err = restore.New(a.leadConn, a.nodeConn, a.brief, r.RSMap).Snapshot(ctx, r, opid, bcp) } else { - err = restore.New(a.leadConn, a.nodeConn, a.brief, r.RSMap).PITR(ctx, r, opid, l) + err = restore.New(a.leadConn, a.nodeConn, a.brief, r.RSMap).PITR(ctx, r, opid, bcp) } case defs.PhysicalBackup, defs.IncrementalBackup, defs.ExternalBackup: if lck != nil { diff --git a/cmd/pbm/backup.go b/cmd/pbm/backup.go index c92b3b1d0..fb0e75283 100644 --- a/cmd/pbm/backup.go +++ b/cmd/pbm/backup.go @@ -118,7 +118,7 @@ func runBackup( return nil, errors.New("no config set. Set config with ") } if errors.Is(err, config.ErrMissedConfigProfile) { - return nil, errors.Errorf("invalid profile: %v", b.profile) + return nil, errors.Errorf("profile %q is not found", b.profile) } return nil, errors.Wrap(err, "get config") } @@ -355,23 +355,23 @@ func byteCountIEC(b int64) string { } func describeBackup(ctx context.Context, conn connect.Client, pbm sdk.Client, b *descBcp) (fmt.Stringer, error) { - opts := sdk.GetBackupByNameOptions{} - bcp, err := pbm.GetBackupByName(ctx, b.name, opts) + bcp, err := pbm.GetBackupByName(ctx, b.name, sdk.GetBackupByNameOptions{}) if err != nil { - return nil, err + return nil, errors.Wrap(err, "get backup meta") } var stg storage.Storage - if b.coll { - l := log.FromContext(ctx).NewDefaultEvent() - stg, err = util.GetStorage(ctx, conn, l) + if b.coll || bcp.Size == 0 { + // to read backed up collection names + // or calculate size of files for legacy backups + stg, err = util.StorageFromConfig(&bcp.Store.Storage, log.LogEventFromContext(ctx)) if err != nil { return nil, errors.Wrap(err, "get storage") } - _, err = stg.FileStat(defs.StorInitFile) - if err != nil { - return nil, errors.Wrap(err, "check storage access") + err = storage.HasReadAccess(ctx, stg) + if err != nil && !errors.Is(err, storage.ErrUninitialized) { + return nil, errors.Wrap(err, "check read access") } } @@ -399,12 +399,6 @@ func describeBackup(ctx context.Context, conn connect.Client, pbm sdk.Client, b if bcp.Size == 0 { switch bcp.Status { case defs.StatusDone, defs.StatusCancelled, defs.StatusError: - l := log.FromContext(ctx).NewDefaultEvent() - stg, err := util.GetStorage(ctx, conn, l) - if err != nil { - return nil, errors.Wrap(err, "get storage") - } - rv.Size, err = getLegacySnapshotSize(bcp, stg) if errors.Is(err, errMissedFile) && bcp.Status != defs.StatusDone { // canceled/failed backup can be incomplete. ignore diff --git a/cmd/pbm/restore.go b/cmd/pbm/restore.go index bd71f4d0f..648dbf4eb 100644 --- a/cmd/pbm/restore.go +++ b/cmd/pbm/restore.go @@ -395,16 +395,15 @@ func doRestore( startCtx, cancel = context.WithTimeout(ctx, defs.WaitActionStart) } else { ep, _ := config.GetEpoch(ctx, conn) - logger := log.FromContext(ctx) - l := logger.NewEvent(string(ctrl.CmdRestore), bcp, "", ep.TS()) + l := log.FromContext(ctx).NewEvent(string(ctrl.CmdRestore), bcp, "", ep.TS()) + stg, err := util.GetStorage(ctx, conn, l) if err != nil { return nil, errors.Wrap(err, "get storage") } fn = func(_ context.Context, _ connect.Client, name string) (*restore.RestoreMeta, error) { - return restore.GetPhysRestoreMeta(name, stg, - logger.NewEvent(string(ctrl.CmdRestore), bcp, "", ep.TS())) + return restore.GetPhysRestoreMeta(name, stg, l) } startCtx, cancel = context.WithTimeout(ctx, waitPhysRestoreStart) } diff --git a/e2e-tests/docker/pbm.dockerfile b/e2e-tests/docker/pbm.dockerfile index 3487ac111..cc923c911 100644 --- a/e2e-tests/docker/pbm.dockerfile +++ b/e2e-tests/docker/pbm.dockerfile @@ -11,7 +11,7 @@ RUN mkdir -p /data/db COPY --from=mongo_image /bin/mongod /bin/ RUN dnf install epel-release && dnf update && dnf install make gcc krb5-devel iproute-tc libfaketime -RUN curl -sL -o /tmp/golang.tar.gz https://go.dev/dl/go1.22.2.linux-amd64.tar.gz && \ +RUN curl -sL -o /tmp/golang.tar.gz https://go.dev/dl/go1.22.4.linux-amd64.tar.gz && \ rm -rf /usr/local/go && tar -C /usr/local -xzf /tmp/golang.tar.gz && rm /tmp/golang.tar.gz ENV PATH=$PATH:/usr/local/go/bin diff --git a/e2e-tests/docker/tests.dockerfile b/e2e-tests/docker/tests.dockerfile index ca1b1c75f..005680db4 100644 --- a/e2e-tests/docker/tests.dockerfile +++ b/e2e-tests/docker/tests.dockerfile @@ -2,7 +2,7 @@ FROM oraclelinux:8 AS base-build WORKDIR /build RUN dnf update && dnf install make gcc krb5-devel -RUN curl -sL -o /tmp/golang.tar.gz https://go.dev/dl/go1.22.2.linux-amd64.tar.gz && \ +RUN curl -sL -o /tmp/golang.tar.gz https://go.dev/dl/go1.22.4.linux-amd64.tar.gz && \ rm -rf /usr/local/go && tar -C /usr/local -xzf /tmp/golang.tar.gz && rm /tmp/golang.tar.gz ENV PATH=$PATH:/usr/local/go/bin diff --git a/packaging/scripts/mongodb-backup_builder.sh b/packaging/scripts/mongodb-backup_builder.sh index b4f91cab0..cbf8e72d6 100644 --- a/packaging/scripts/mongodb-backup_builder.sh +++ b/packaging/scripts/mongodb-backup_builder.sh @@ -141,7 +141,7 @@ install_golang() { elif [ x"$ARCH" = "xaarch64" ]; then GO_ARCH="arm64" fi - wget https://go.dev/dl/go1.22.3.linux-${GO_ARCH}.tar.gz -O /tmp/go1.22.tar.gz + wget https://go.dev/dl/go1.22.4.linux-${GO_ARCH}.tar.gz -O /tmp/go1.22.tar.gz tar --transform=s,go,go1.22, -zxf /tmp/go1.22.tar.gz rm -rf /usr/local/go* mv go1.22 /usr/local/ diff --git a/pbm/backup/delete.go b/pbm/backup/delete.go index 19a34bbab..f0e29c8fd 100644 --- a/pbm/backup/delete.go +++ b/pbm/backup/delete.go @@ -74,7 +74,7 @@ func deleteBackupImpl(ctx context.Context, cc connect.Client, bcp *BackupMeta) e return err } - stg, err := util.GetStorage(ctx, cc, log.LogEventFromContext(ctx)) + stg, err := util.StorageFromConfig(&bcp.Store.Storage, log.LogEventFromContext(ctx)) if err != nil { return errors.Wrap(err, "get storage") } @@ -108,7 +108,7 @@ func deleteIncremetalChainImpl(ctx context.Context, cc connect.Client, bcp *Back all = append(all, bcps...) } - stg, err := util.GetStorage(ctx, cc, log.LogEventFromContext(ctx)) + stg, err := util.StorageFromConfig(&bcp.Store.Storage, log.LogEventFromContext(ctx)) if err != nil { return errors.Wrap(err, "get storage") } @@ -477,9 +477,14 @@ func MakeCleanupInfo(ctx context.Context, conn connect.Client, ts primitive.Time return CleanupInfo{Backups: backups, Chunks: chunks}, nil } -// listBackupsBefore returns backups with restore cluster time less than or equals to ts +// listBackupsBefore returns backups with restore cluster time less than or equals to ts. +// +// It does not include backups stored on an external storages. func listBackupsBefore(ctx context.Context, conn connect.Client, ts primitive.Timestamp) ([]BackupMeta, error) { - f := bson.D{{"last_write_ts", bson.M{"$lt": ts}}} + f := bson.D{ + {"store.profile", nil}, + {"last_write_ts", bson.M{"$lt": ts}}, + } o := options.Find().SetSort(bson.D{{"last_write_ts", 1}}) cur, err := conn.BcpCollection().Find(ctx, f, o) if err != nil { diff --git a/pbm/restore/logical.go b/pbm/restore/logical.go index 47cce3479..6ed35fff4 100644 --- a/pbm/restore/logical.go +++ b/pbm/restore/logical.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "path" + "slices" "strings" "time" @@ -64,6 +65,12 @@ type Restore struct { indexCatalog *idx.IndexCatalog } +type oplogRange struct { + chunks []oplog.OplogChunk + + storage storage.Storage +} + // PBM restore from temp collections (pbmRUsers/pbmRRoles)should be used type restoreUsersAndRolesOption bool @@ -141,17 +148,24 @@ func shouldRestoreUsersAndRoles(nssBackup, nssRestore []string, usingUsersAndRol // Snapshot do the snapshot's (mongo dump) restore // //nolint:nonamedreturns -func (r *Restore) Snapshot(ctx context.Context, cmd *ctrl.RestoreCmd, opid ctrl.OPID, l log.LogEvent) (err error) { +func (r *Restore) Snapshot( + ctx context.Context, + cmd *ctrl.RestoreCmd, + opid ctrl.OPID, + bcp *backup.BackupMeta, +) (err error) { + l := log.LogEventFromContext(ctx) + defer func() { r.exit(log.Copy(context.Background(), ctx), err) }() - bcp, err := SnapshotMeta(ctx, r.leadConn, cmd.BackupName, r.stg) + err = r.init(ctx, cmd.Name, opid, l) if err != nil { return err } - err = r.init(ctx, cmd.Name, opid, l) + r.stg, err = util.StorageFromConfig(&bcp.Store.Storage, r.log) if err != nil { - return err + return errors.Wrap(err, "get backup storage") } nss := resolveNamespace(bcp.Namespaces, cmd.Namespaces, cmd.UsersAndRoles) @@ -196,13 +210,15 @@ func (r *Restore) Snapshot(ctx context.Context, cmd *ctrl.RestoreCmd, opid ctrl. return err } + oplogRanges := []oplogRange{ + {chunks: chunks, storage: r.stg}, + } oplogOption := &applyOplogOption{end: &bcp.LastWriteTS, nss: nss} if r.nodeInfo.IsConfigSrv() && util.IsSelective(nss) { oplogOption.nss = []string{"config.databases"} oplogOption.filter = newConfigsvrOpFilter(nss) } - - err = r.applyOplog(ctx, chunks, oplogOption) + err = r.applyOplog(ctx, oplogRanges, oplogOption) if err != nil { return err } @@ -245,7 +261,14 @@ func newConfigsvrOpFilter(nss []string) oplog.OpFilter { // PITR do the Point-in-Time Recovery // //nolint:nonamedreturns -func (r *Restore) PITR(ctx context.Context, cmd *ctrl.RestoreCmd, opid ctrl.OPID, l log.LogEvent) (err error) { +func (r *Restore) PITR( + ctx context.Context, + cmd *ctrl.RestoreCmd, + opid ctrl.OPID, + bcp *backup.BackupMeta, +) (err error) { + l := log.LogEventFromContext(ctx) + defer func() { r.exit(log.Copy(context.Background(), ctx), err) }() err = r.init(ctx, cmd.Name, opid, l) @@ -253,15 +276,16 @@ func (r *Restore) PITR(ctx context.Context, cmd *ctrl.RestoreCmd, opid ctrl.OPID return err } - bcp, err := SnapshotMeta(ctx, r.leadConn, cmd.BackupName, r.stg) - if err != nil { - return errors.Wrap(err, "get base backup") - } if bcp.LastWriteTS.Compare(cmd.OplogTS) >= 0 { return errors.New("snapshot's last write is later than the target time. " + "Try to set an earlier snapshot. Or leave the snapshot empty so PBM will choose one.") } + r.stg, err = util.StorageFromConfig(&bcp.Store.Storage, r.log) + if err != nil { + return errors.Wrap(err, "get backup storage") + } + nss := resolveNamespace(bcp.Namespaces, cmd.Namespaces, cmd.UsersAndRoles) usersAndRolesOpt := shouldRestoreUsersAndRoles(bcp.Namespaces, cmd.Namespaces, cmd.UsersAndRoles) @@ -325,13 +349,21 @@ func (r *Restore) PITR(ctx context.Context, cmd *ctrl.RestoreCmd, opid ctrl.OPID return err } + oplogStorage, err := util.GetStorage(ctx, r.leadConn, l) + if err != nil { + return errors.Wrap(err, "get oplog storage") + } + + oplogRanges := []oplogRange{ + {chunks: bcpChunks, storage: r.stg}, + {chunks: chunks, storage: oplogStorage}, + } oplogOption := applyOplogOption{end: &cmd.OplogTS, nss: nss} if r.nodeInfo.IsConfigSrv() && util.IsSelective(nss) { oplogOption.nss = []string{"config.databases"} oplogOption.filter = newConfigsvrOpFilter(nss) } - - err = r.applyOplog(ctx, append(bcpChunks, chunks...), &oplogOption) + err = r.applyOplog(ctx, oplogRanges, &oplogOption) if err != nil { return err } @@ -396,12 +428,16 @@ func (r *Restore) ReplayOplog(ctx context.Context, cmd *ctrl.ReplayCmd, opid ctr return err } + oplogRanges := []oplogRange{ + {chunks: opChunks, storage: r.stg}, + } oplogOption := applyOplogOption{ start: &cmd.Start, end: &cmd.End, unsafe: true, } - if err = r.applyOplog(ctx, opChunks, &oplogOption); err != nil { + err = r.applyOplog(ctx, oplogRanges, &oplogOption) + if err != nil { return err } @@ -480,11 +516,6 @@ func (r *Restore) init(ctx context.Context, name string, opid ctrl.OPID, l log.L return errors.Wrap(err, "add shard's metadata") } - r.stg, err = util.GetStorage(ctx, r.leadConn, r.log) - if err != nil { - return errors.Wrap(err, "get backup storage") - } - return nil } @@ -520,18 +551,32 @@ func (r *Restore) chunks(ctx context.Context, from, to primitive.Timestamp) ([]o return chunks(ctx, r.leadConn, r.stg, from, to, r.nodeInfo.SetName, r.rsMap) } -func SnapshotMeta( +// LookupBackupMeta fetches backup metadata. +// +// It tries to find the metadata in database. If there is no such metadata in +// database, it tries to fetch from the main storage. +func LookupBackupMeta( ctx context.Context, conn connect.Client, backupName string, - stg storage.Storage, ) (*backup.BackupMeta, error) { bcp, err := backup.NewDBManager(conn).GetBackupByName(ctx, backupName) - if errors.Is(err, errors.ErrNotFound) { - bcp, err = GetMetaFromStore(stg, backupName) + if err == nil { + return bcp, nil + } + if !errors.Is(err, errors.ErrNotFound) { + return nil, errors.Wrap(err, "get backup metadata from db") + } + + var stg storage.Storage + stg, err = util.GetStorage(ctx, conn, log.LogEventFromContext(ctx)) + if err != nil { + return nil, errors.Wrap(err, "get storage") } + + bcp, err = GetMetaFromStore(stg, backupName) if err != nil { - return nil, errors.Wrap(err, "get backup metadata") + return nil, errors.Wrap(err, "get backup metadata from storage") } return bcp, nil @@ -724,19 +769,9 @@ func (r *Restore) RunSnapshot( // so we'll continue with selective restore } - var cfg *config.Config - // get pbm.Config for creating a storage.Storage later. - // while r.stg is already created storage for the restore, - // it triggers data race warnings during concurrent file downloading/reading. - // for that, it's better to create a new storage for each file - cfg, err = config.GetConfig(ctx, r.leadConn) - if err != nil { - return errors.Wrap(err, "get config") - } - rdr, err = snapshot.DownloadDump( func(ns string) (io.ReadCloser, error) { - stg, err := util.StorageFromConfig(&cfg.Storage, r.log) + stg, err := util.StorageFromConfig(&bcp.Store.Storage, r.log) if err != nil { return nil, errors.Wrap(err, "get storage") } @@ -1086,15 +1121,22 @@ func (r *Restore) getcommittedTxn(ctx context.Context) (map[string]primitive.Tim return txn, nil } -func (r *Restore) applyOplog(ctx context.Context, chunks []oplog.OplogChunk, options *applyOplogOption) error { +func (r *Restore) applyOplog(ctx context.Context, ranges []oplogRange, options *applyOplogOption) error { mgoV, err := version.GetMongoVersion(ctx, r.nodeConn) if err != nil || len(mgoV.Version) < 1 { return errors.Wrap(err, "define mongo version") } stat := phys.RestoreShardStat{} - partial, err := applyOplog(ctx, r.nodeConn, chunks, options, r.nodeInfo.IsSharded(), - r.indexCatalog, r.setcommittedTxn, r.getcommittedTxn, &stat.Txn, - &mgoV, r.stg, r.log) + partial, err := applyOplog(ctx, + r.nodeConn, + ranges, + options, + r.nodeInfo.IsSharded(), + r.indexCatalog, + r.setcommittedTxn, + r.getcommittedTxn, + &stat.Txn, + &mgoV) if err != nil { return errors.Wrap(err, "reply oplog") } diff --git a/pbm/restore/physical.go b/pbm/restore/physical.go index 7108ec348..88557afc4 100644 --- a/pbm/restore/physical.go +++ b/pbm/restore/physical.go @@ -35,7 +35,6 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/log" - "github.com/percona/percona-backup-mongodb/pbm/oplog" "github.com/percona/percona-backup-mongodb/pbm/restore/phys" "github.com/percona/percona-backup-mongodb/pbm/storage" "github.com/percona/percona-backup-mongodb/pbm/storage/s3" @@ -762,12 +761,14 @@ func (r *PhysRestore) Snapshot( meta.Type = r.bcp.Type } - var opChunks []oplog.OplogChunk + var oplogRanges []oplogRange if !pitr.IsZero() { - opChunks, err = chunks(ctx, r.leadConn, r.stg, r.restoreTS, pitr, r.rsConf.ID, r.rsMap) + chunks, err := chunks(ctx, r.leadConn, r.stg, r.restoreTS, pitr, r.rsConf.ID, r.rsMap) if err != nil { return err } + + oplogRanges = append(oplogRanges, oplogRange{chunks: chunks, storage: r.stg}) } if meta.Type == defs.IncrementalBackup { @@ -925,7 +926,7 @@ func (r *PhysRestore) Snapshot( if !pitr.IsZero() && r.nodeInfo.IsPrimary { l.Info("replaying pitr oplog") - err = r.replayOplog(r.bcp.LastWriteTS, pitr, opChunks, &stats) + err = r.replayOplog(r.bcp.LastWriteTS, pitr, oplogRanges, &stats) if err != nil { return errors.Wrap(err, "replay pitr oplog") } @@ -1292,8 +1293,9 @@ func (r *PhysRestore) recoverStandalone() error { } func (r *PhysRestore) replayOplog( - from, to primitive.Timestamp, - opChunks []oplog.OplogChunk, + from primitive.Timestamp, + to primitive.Timestamp, + oplogRanges []oplogRange, stat *phys.RestoreShardStat, ) error { err := r.startMongo("--dbpath", r.dbpath, @@ -1302,13 +1304,13 @@ func (r *PhysRestore) replayOplog( return errors.Wrap(err, "start mongo") } - c, err := tryConn(r.tmpPort, path.Join(r.dbpath, internalMongodLog)) + nodeConn, err := tryConn(r.tmpPort, path.Join(r.dbpath, internalMongodLog)) if err != nil { return errors.Wrap(err, "connect to mongo") } ctx := context.Background() - _, err = c.Database("local").Collection("system.replset").InsertOne(ctx, + _, err = nodeConn.Database("local").Collection("system.replset").InsertOne(ctx, topo.RSConfig{ ID: r.rsConf.ID, CSRS: r.nodeInfo.IsConfigSrv(), @@ -1326,7 +1328,7 @@ func (r *PhysRestore) replayOplog( return errors.Wrapf(err, "upate rs.member host to %s", r.nodeInfo.Me) } - err = shutdown(c, r.dbpath) + err = shutdown(nodeConn, r.dbpath) if err != nil { return errors.Wrap(err, "shutdown mongo") } @@ -1345,12 +1347,12 @@ func (r *PhysRestore) replayOplog( return errors.Wrap(err, "start mongo as rs") } - c, err = tryConn(r.tmpPort, path.Join(r.dbpath, internalMongodLog)) + nodeConn, err = tryConn(r.tmpPort, path.Join(r.dbpath, internalMongodLog)) if err != nil { return errors.Wrap(err, "connect to mongo rs") } - mgoV, err := version.GetMongoVersion(ctx, c) + mgoV, err := version.GetMongoVersion(ctx, nodeConn) if err != nil || len(mgoV.Version) < 1 { return errors.Wrap(err, "define mongo version") } @@ -1360,9 +1362,16 @@ func (r *PhysRestore) replayOplog( end: &to, unsafe: true, } - partial, err := applyOplog(ctx, c, opChunks, &oplogOption, r.nodeInfo.IsSharded(), - nil, r.setcommittedTxn, r.getcommittedTxn, &stat.Txn, - &mgoV, r.stg, r.log) + partial, err := applyOplog(ctx, + nodeConn, + oplogRanges, + &oplogOption, + r.nodeInfo.IsSharded(), + nil, + r.setcommittedTxn, + r.getcommittedTxn, + &stat.Txn, + &mgoV) if err != nil { return errors.Wrap(err, "reply oplog") } @@ -1383,7 +1392,7 @@ func (r *PhysRestore) replayOplog( } } - err = shutdown(c, r.dbpath) + err = shutdown(nodeConn, r.dbpath) if err != nil { return errors.Wrap(err, "shutdown mongo") } diff --git a/pbm/restore/restore.go b/pbm/restore/restore.go index 6811cd810..358c4ff8c 100644 --- a/pbm/restore/restore.go +++ b/pbm/restore/restore.go @@ -324,7 +324,7 @@ type ( func applyOplog( ctx context.Context, node *mongo.Client, - chunks []oplog.OplogChunk, + ranges []oplogRange, options *applyOplogOption, sharded bool, ic *idx.IndexCatalog, @@ -332,9 +332,8 @@ func applyOplog( getTxn getcommittedTxnFn, stat *phys.DistTxnStat, mgoV *version.MongoVersion, - stg storage.Storage, - log log.LogEvent, ) (partial []oplog.Txn, err error) { + log := log.LogEventFromContext(ctx) log.Info("starting oplog replay") var ( @@ -360,25 +359,28 @@ func applyOplog( oplogRestore.SetIncludeNS(options.nss) var lts primitive.Timestamp - for _, chnk := range chunks { - log.Debug("+ applying %v", chnk) - - // If the compression is Snappy and it failed we try S2. - // Up until v1.7.0 the compression of pitr chunks was always S2. - // But it was a mess in the code which lead to saving pitr chunk files - // with the `.snappy`` extension although it was S2 in fact. And during - // the restore, decompression treated .snappy as S2 ¯\_(ツ)_/¯ It wasn’t - // an issue since there was no choice. Now, Snappy produces `.snappy` files - // and S2 - `.s2` which is ok. But this means the old chunks (made by previous - // PBM versions) won’t be compatible - during the restore, PBM will treat such - // files as Snappy (judging by its suffix) but in fact, they are s2 files - // and restore will fail with snappy: corrupt input. So we try S2 in such a case. - lts, err = replayChunk(chnk.FName, oplogRestore, stg, chnk.Compression) - if err != nil && errors.Is(err, snappy.ErrCorrupt) { - lts, err = replayChunk(chnk.FName, oplogRestore, stg, compress.CompressionTypeS2) - } - if err != nil { - return nil, errors.Wrapf(err, "replay chunk %v.%v", chnk.StartTS.T, chnk.EndTS.T) + for _, oplogRange := range ranges { + stg := oplogRange.storage + for _, chnk := range oplogRange.chunks { + log.Debug("+ applying %v", chnk) + + // If the compression is Snappy and it failed we try S2. + // Up until v1.7.0 the compression of pitr chunks was always S2. + // But it was a mess in the code which lead to saving pitr chunk files + // with the `.snappy`` extension although it was S2 in fact. And during + // the restore, decompression treated .snappy as S2 ¯\_(ツ)_/¯ It wasn’t + // an issue since there was no choice. Now, Snappy produces `.snappy` files + // and S2 - `.s2` which is ok. But this means the old chunks (made by previous + // PBM versions) won’t be compatible - during the restore, PBM will treat such + // files as Snappy (judging by its suffix) but in fact, they are s2 files + // and restore will fail with snappy: corrupt input. So we try S2 in such a case. + lts, err = replayChunk(chnk.FName, oplogRestore, stg, chnk.Compression) + if err != nil && errors.Is(err, snappy.ErrCorrupt) { + lts, err = replayChunk(chnk.FName, oplogRestore, stg, compress.CompressionTypeS2) + } + if err != nil { + return nil, errors.Wrapf(err, "replay chunk %v.%v", chnk.StartTS.T, chnk.EndTS.T) + } } } diff --git a/sdk/impl.go b/sdk/impl.go index 2689ffb9b..0ddb3d578 100644 --- a/sdk/impl.go +++ b/sdk/impl.go @@ -141,7 +141,7 @@ func (c *clientImpl) GetBackupByName( } if options.FetchFilelist { - err = fillFilelistForBackup(ctx, c.conn, bcp) + err = fillFilelistForBackup(ctx, bcp) if err != nil { return nil, errors.Wrap(err, "fetch filelist") } @@ -150,7 +150,7 @@ func (c *clientImpl) GetBackupByName( return bcp, nil } -func fillFilelistForBackup(ctx context.Context, cc connect.Client, bcp *BackupMetadata) error { +func fillFilelistForBackup(ctx context.Context, bcp *BackupMetadata) error { var err error var stg storage.Storage @@ -158,7 +158,7 @@ func fillFilelistForBackup(ctx context.Context, cc connect.Client, bcp *BackupMe eg.SetLimit(runtime.NumCPU()) if version.HasFilelistFile(bcp.PBMVersion) { - stg, err = util.GetStorage(ctx, cc, nil) + stg, err = util.StorageFromConfig(&bcp.Store.Storage, log.LogEventFromContext(ctx)) if err != nil { return errors.Wrap(err, "get storage") } @@ -191,7 +191,7 @@ func fillFilelistForBackup(ctx context.Context, cc connect.Client, bcp *BackupMe if stg == nil { // in case if it is the first backup made with filelist file - stg, err = getStorageForRead(ctx, cc) + stg, err = getStorageForRead(ctx, bcp) if err != nil { return errors.Wrap(err, "get storage") } @@ -216,8 +216,8 @@ func fillFilelistForBackup(ctx context.Context, cc connect.Client, bcp *BackupMe return eg.Wait() } -func getStorageForRead(ctx context.Context, cc connect.Client) (storage.Storage, error) { - stg, err := util.GetStorage(ctx, cc, nil) +func getStorageForRead(ctx context.Context, bcp *backup.BackupMeta) (storage.Storage, error) { + stg, err := util.StorageFromConfig(&bcp.Store.Storage, log.LogEventFromContext(ctx)) if err != nil { return nil, errors.Wrap(err, "get storage") } From fcb9cb425402903d6d12dfb6401208ce555be69c Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 10 Jun 2024 09:51:05 +0200 Subject: [PATCH 027/203] PBM-1043: PITR priorities refactoring (#950) * Extract priority logic into prio package * Rename function for getting nodes priorities: CalcNodesPriority. Previously the function has been used just for the Backup priorities, but now it'll be reused also for PITR priorities calculation. * Remove config dependency from priority calculation * Add tests for priority logic * Extract PITR agent's logic into separate file --- cmd/pbm-agent/backup.go | 3 +- cmd/pbm-agent/pitr.go | 310 +++++++++++++++++ cmd/pbm-agent/restore.go | 294 ---------------- {cmd/pbm-agent => pbm/prio}/priority.go | 29 +- pbm/prio/priority_test.go | 441 ++++++++++++++++++++++++ 5 files changed, 764 insertions(+), 313 deletions(-) create mode 100644 cmd/pbm-agent/pitr.go rename {cmd/pbm-agent => pbm/prio}/priority.go (72%) create mode 100644 pbm/prio/priority_test.go diff --git a/cmd/pbm-agent/backup.go b/cmd/pbm-agent/backup.go index f0f683844..b93c714ed 100644 --- a/cmd/pbm-agent/backup.go +++ b/cmd/pbm-agent/backup.go @@ -11,6 +11,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/lock" "github.com/percona/percona-backup-mongodb/pbm/log" + "github.com/percona/percona-backup-mongodb/pbm/prio" "github.com/percona/percona-backup-mongodb/pbm/storage" "github.com/percona/percona-backup-mongodb/pbm/topo" "github.com/percona/percona-backup-mongodb/pbm/version" @@ -172,7 +173,7 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, validCandidates = append(validCandidates, s) } - nodes, err := BcpNodesPriority(ctx, a.leadConn, c, validCandidates) + nodes, err := prio.CalcNodesPriority(ctx, c, cfg.Backup.Priority, validCandidates) if err != nil { l.Error("get nodes priority: %v", err) return diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go new file mode 100644 index 000000000..3dad41628 --- /dev/null +++ b/cmd/pbm-agent/pitr.go @@ -0,0 +1,310 @@ +package main + +import ( + "context" + "time" + + "go.mongodb.org/mongo-driver/mongo" + + "github.com/percona/percona-backup-mongodb/pbm/backup" + "github.com/percona/percona-backup-mongodb/pbm/config" + "github.com/percona/percona-backup-mongodb/pbm/connect" + "github.com/percona/percona-backup-mongodb/pbm/ctrl" + "github.com/percona/percona-backup-mongodb/pbm/defs" + "github.com/percona/percona-backup-mongodb/pbm/errors" + "github.com/percona/percona-backup-mongodb/pbm/lock" + "github.com/percona/percona-backup-mongodb/pbm/log" + "github.com/percona/percona-backup-mongodb/pbm/slicer" + "github.com/percona/percona-backup-mongodb/pbm/topo" + "github.com/percona/percona-backup-mongodb/pbm/util" +) + +type currentPitr struct { + slicer *slicer.Slicer + w chan ctrl.OPID // to wake up a slicer on demand (not to wait for the tick) + cancel context.CancelFunc +} + +func (a *Agent) setPitr(p *currentPitr) { + a.mx.Lock() + defer a.mx.Unlock() + + if a.pitrjob != nil { + a.pitrjob.cancel() + } + + a.pitrjob = p +} + +func (a *Agent) removePitr() { + a.setPitr(nil) +} + +func (a *Agent) getPitr() *currentPitr { + a.mx.Lock() + defer a.mx.Unlock() + + return a.pitrjob +} + +func (a *Agent) sliceNow(opid ctrl.OPID) { + a.mx.Lock() + defer a.mx.Unlock() + + if a.pitrjob == nil { + return + } + + a.pitrjob.w <- opid +} + +const pitrCheckPeriod = time.Second * 15 + +// PITR starts PITR processing routine +func (a *Agent) PITR(ctx context.Context) { + l := log.FromContext(ctx) + l.Printf("starting PITR routine") + + for { + wait := pitrCheckPeriod + + err := a.pitr(ctx) + if err != nil { + // we need epoch just to log pitr err with an extra context + // so not much care if we get it or not + ep, _ := config.GetEpoch(ctx, a.leadConn) + l.Error(string(ctrl.CmdPITR), "", "", ep.TS(), "init: %v", err) + + // penalty to the failed node so healthy nodes would have priority on next try + wait *= 2 + } + + time.Sleep(wait) + } +} + +func (a *Agent) stopPitrOnOplogOnlyChange(currOO bool) { + if a.prevOO == nil { + a.prevOO = &currOO + return + } + + if *a.prevOO == currOO { + return + } + + a.prevOO = &currOO + a.removePitr() +} + +// canSlicingNow returns lock.ConcurrentOpError if there is a parallel operation. +// Only physical backups (full, incremental, external) is allowed. +func canSlicingNow(ctx context.Context, conn connect.Client) error { + locks, err := lock.GetLocks(ctx, conn, &lock.LockHeader{}) + if err != nil { + return errors.Wrap(err, "get locks data") + } + + for i := range locks { + l := &locks[i] + + if l.Type != ctrl.CmdBackup { + return lock.ConcurrentOpError{l.LockHeader} + } + + bcp, err := backup.GetBackupByOPID(ctx, conn, l.OPID) + if err != nil { + return errors.Wrap(err, "get backup metadata") + } + + if bcp.Type == defs.LogicalBackup { + return lock.ConcurrentOpError{l.LockHeader} + } + } + + return nil +} + +func (a *Agent) pitr(ctx context.Context) error { + cfg, err := config.GetConfig(ctx, a.leadConn) + if err != nil { + if !errors.Is(err, mongo.ErrNoDocuments) { + return errors.Wrap(err, "get conf") + } + cfg = &config.Config{} + } + + a.stopPitrOnOplogOnlyChange(cfg.PITR.OplogOnly) + + if !cfg.PITR.Enabled { + a.removePitr() + return nil + } + + ep := config.Epoch(cfg.Epoch) + l := log.FromContext(ctx).NewEvent(string(ctrl.CmdPITR), "", "", ep.TS()) + ctx = log.SetLogEventToContext(ctx, l) + + if err := canSlicingNow(ctx, a.leadConn); err != nil { + e := lock.ConcurrentOpError{} + if errors.As(err, &e) { + l.Info("oplog slicer is paused for lock [%s, opid: %s]", e.Lock.Type, e.Lock.OPID) + return nil + } + + return err + } + + slicerInterval := cfg.OplogSlicerInterval() + + if p := a.getPitr(); p != nil { + // already do the job + currInterval := p.slicer.GetSpan() + if currInterval != slicerInterval { + p.slicer.SetSpan(slicerInterval) + + // wake up slicer only if a new interval is smaller + if currInterval > slicerInterval { + a.sliceNow(ctrl.NilOPID) + } + } + + return nil + } + + // just a check before a real locking + // just trying to avoid redundant heavy operations + moveOn, err := a.pitrLockCheck(ctx) + if err != nil { + return errors.Wrap(err, "check if already run") + } + + if !moveOn { + return nil + } + + // should be after the lock pre-check + // + // if node failing, then some other agent with healthy node will hopefully catch up + // so this code won't be reached and will not pollute log with "pitr" errors while + // the other node does successfully slice + ninf, err := topo.GetNodeInfoExt(ctx, a.nodeConn) + if err != nil { + return errors.Wrap(err, "get node info") + } + q, err := topo.NodeSuits(ctx, a.nodeConn, ninf) + if err != nil { + return errors.Wrap(err, "node check") + } + + // node is not suitable for doing backup + if !q { + return nil + } + + epts := ep.TS() + lck := lock.NewOpLock(a.leadConn, lock.LockHeader{ + Replset: a.brief.SetName, + Node: a.brief.Me, + Type: ctrl.CmdPITR, + Epoch: &epts, + }) + + got, err := a.acquireLock(ctx, lck, l) + if err != nil { + return errors.Wrap(err, "acquiring lock") + } + if !got { + l.Debug("skip: lock not acquired") + return nil + } + + stg, err := util.StorageFromConfig(cfg.Storage, l) + if err != nil { + return errors.Wrap(err, "unable to get storage configuration") + } + + s := slicer.NewSlicer(a.brief.SetName, a.leadConn, a.nodeConn, stg, cfg, log.FromContext(ctx)) + s.SetSpan(slicerInterval) + + if cfg.PITR.OplogOnly { + err = s.OplogOnlyCatchup(ctx) + } else { + err = s.Catchup(ctx) + } + if err != nil { + if err := lck.Release(); err != nil { + l.Error("release lock: %v", err) + } + return errors.Wrap(err, "catchup") + } + + go func() { + stopSlicingCtx, stopSlicing := context.WithCancel(ctx) + defer stopSlicing() + stopC := make(chan struct{}) + + w := make(chan ctrl.OPID) + a.setPitr(¤tPitr{ + slicer: s, + cancel: stopSlicing, + w: w, + }) + + go func() { + <-stopSlicingCtx.Done() + close(stopC) + a.removePitr() + }() + + streamErr := s.Stream(ctx, + stopC, + w, + cfg.PITR.Compression, + cfg.PITR.CompressionLevel, + cfg.Backup.Timeouts) + if streamErr != nil { + out := l.Error + if errors.Is(streamErr, slicer.OpMovedError{}) { + out = l.Info + } + out("streaming oplog: %v", streamErr) + } + + if err := lck.Release(); err != nil { + l.Error("release lock: %v", err) + } + + // Penalty to the failed node so healthy nodes would have priority on next try. + // But lock has to be released first. Otherwise, healthy nodes would wait for the lock release + // and the penalty won't have any sense. + if streamErr != nil { + time.Sleep(pitrCheckPeriod * 2) + } + }() + + return nil +} + +func (a *Agent) pitrLockCheck(ctx context.Context) (bool, error) { + ts, err := topo.GetClusterTime(ctx, a.leadConn) + if err != nil { + return false, errors.Wrap(err, "read cluster time") + } + + tl, err := lock.GetOpLockData(ctx, a.leadConn, &lock.LockHeader{ + Replset: a.brief.SetName, + Type: ctrl.CmdPITR, + }) + if err != nil { + if errors.Is(err, mongo.ErrNoDocuments) { + // no lock. good to move on + return true, nil + } + + return false, errors.Wrap(err, "get lock") + } + + // stale lock means we should move on and clean it up during the lock.Acquire + return tl.Heartbeat.T+defs.StaleFrameSec < ts.T, nil +} diff --git a/cmd/pbm-agent/restore.go b/cmd/pbm-agent/restore.go index 2329380c3..3da81bd01 100644 --- a/cmd/pbm-agent/restore.go +++ b/cmd/pbm-agent/restore.go @@ -4,312 +4,18 @@ import ( "context" "time" - "go.mongodb.org/mongo-driver/mongo" - "github.com/percona/percona-backup-mongodb/pbm/backup" "github.com/percona/percona-backup-mongodb/pbm/config" - "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/ctrl" "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/lock" "github.com/percona/percona-backup-mongodb/pbm/log" "github.com/percona/percona-backup-mongodb/pbm/restore" - "github.com/percona/percona-backup-mongodb/pbm/slicer" "github.com/percona/percona-backup-mongodb/pbm/topo" "github.com/percona/percona-backup-mongodb/pbm/util" ) -type currentPitr struct { - slicer *slicer.Slicer - w chan ctrl.OPID // to wake up a slicer on demand (not to wait for the tick) - cancel context.CancelFunc -} - -func (a *Agent) setPitr(p *currentPitr) { - a.mx.Lock() - defer a.mx.Unlock() - - if a.pitrjob != nil { - a.pitrjob.cancel() - } - - a.pitrjob = p -} - -func (a *Agent) removePitr() { - a.setPitr(nil) -} - -func (a *Agent) getPitr() *currentPitr { - a.mx.Lock() - defer a.mx.Unlock() - - return a.pitrjob -} - -func (a *Agent) sliceNow(opid ctrl.OPID) { - a.mx.Lock() - defer a.mx.Unlock() - - if a.pitrjob == nil { - return - } - - a.pitrjob.w <- opid -} - -const pitrCheckPeriod = time.Second * 15 - -// PITR starts PITR processing routine -func (a *Agent) PITR(ctx context.Context) { - l := log.FromContext(ctx) - l.Printf("starting PITR routine") - - for { - wait := pitrCheckPeriod - - err := a.pitr(ctx) - if err != nil { - // we need epoch just to log pitr err with an extra context - // so not much care if we get it or not - ep, _ := config.GetEpoch(ctx, a.leadConn) - l.Error(string(ctrl.CmdPITR), "", "", ep.TS(), "init: %v", err) - - // penalty to the failed node so healthy nodes would have priority on next try - wait *= 2 - } - - time.Sleep(wait) - } -} - -func (a *Agent) stopPitrOnOplogOnlyChange(currOO bool) { - if a.prevOO == nil { - a.prevOO = &currOO - return - } - - if *a.prevOO == currOO { - return - } - - a.prevOO = &currOO - a.removePitr() -} - -// canSlicingNow returns lock.ConcurrentOpError if there is a parallel operation. -// Only physical backups (full, incremental, external) is allowed. -func canSlicingNow(ctx context.Context, conn connect.Client) error { - locks, err := lock.GetLocks(ctx, conn, &lock.LockHeader{}) - if err != nil { - return errors.Wrap(err, "get locks data") - } - - for i := range locks { - l := &locks[i] - - if l.Type != ctrl.CmdBackup { - return lock.ConcurrentOpError{l.LockHeader} - } - - bcp, err := backup.GetBackupByOPID(ctx, conn, l.OPID) - if err != nil { - return errors.Wrap(err, "get backup metadata") - } - - if bcp.Type == defs.LogicalBackup { - return lock.ConcurrentOpError{l.LockHeader} - } - } - - return nil -} - -func (a *Agent) pitr(ctx context.Context) error { - cfg, err := config.GetConfig(ctx, a.leadConn) - if err != nil { - if !errors.Is(err, mongo.ErrNoDocuments) { - return errors.Wrap(err, "get conf") - } - cfg = &config.Config{} - } - - a.stopPitrOnOplogOnlyChange(cfg.PITR.OplogOnly) - - if !cfg.PITR.Enabled { - a.removePitr() - return nil - } - - ep := config.Epoch(cfg.Epoch) - l := log.FromContext(ctx).NewEvent(string(ctrl.CmdPITR), "", "", ep.TS()) - ctx = log.SetLogEventToContext(ctx, l) - - if err := canSlicingNow(ctx, a.leadConn); err != nil { - e := lock.ConcurrentOpError{} - if errors.As(err, &e) { - l.Info("oplog slicer is paused for lock [%s, opid: %s]", e.Lock.Type, e.Lock.OPID) - return nil - } - - return err - } - - slicerInterval := cfg.OplogSlicerInterval() - - if p := a.getPitr(); p != nil { - // already do the job - currInterval := p.slicer.GetSpan() - if currInterval != slicerInterval { - p.slicer.SetSpan(slicerInterval) - - // wake up slicer only if a new interval is smaller - if currInterval > slicerInterval { - a.sliceNow(ctrl.NilOPID) - } - } - - return nil - } - - // just a check before a real locking - // just trying to avoid redundant heavy operations - moveOn, err := a.pitrLockCheck(ctx) - if err != nil { - return errors.Wrap(err, "check if already run") - } - - if !moveOn { - return nil - } - - // should be after the lock pre-check - // - // if node failing, then some other agent with healthy node will hopefully catch up - // so this code won't be reached and will not pollute log with "pitr" errors while - // the other node does successfully slice - ninf, err := topo.GetNodeInfoExt(ctx, a.nodeConn) - if err != nil { - return errors.Wrap(err, "get node info") - } - q, err := topo.NodeSuits(ctx, a.nodeConn, ninf) - if err != nil { - return errors.Wrap(err, "node check") - } - - // node is not suitable for doing backup - if !q { - return nil - } - - epts := ep.TS() - lck := lock.NewOpLock(a.leadConn, lock.LockHeader{ - Replset: a.brief.SetName, - Node: a.brief.Me, - Type: ctrl.CmdPITR, - Epoch: &epts, - }) - - got, err := a.acquireLock(ctx, lck, l) - if err != nil { - return errors.Wrap(err, "acquiring lock") - } - if !got { - l.Debug("skip: lock not acquired") - return nil - } - - stg, err := util.StorageFromConfig(cfg.Storage, l) - if err != nil { - return errors.Wrap(err, "unable to get storage configuration") - } - - ibcp := slicer.NewSlicer(a.brief.SetName, a.leadConn, a.nodeConn, stg, cfg, log.FromContext(ctx)) - ibcp.SetSpan(slicerInterval) - - if cfg.PITR.OplogOnly { - err = ibcp.OplogOnlyCatchup(ctx) - } else { - err = ibcp.Catchup(ctx) - } - if err != nil { - if err := lck.Release(); err != nil { - l.Error("release lock: %v", err) - } - return errors.Wrap(err, "catchup") - } - - go func() { - stopSlicingCtx, stopSlicing := context.WithCancel(ctx) - defer stopSlicing() - stopC := make(chan struct{}) - - w := make(chan ctrl.OPID) - a.setPitr(¤tPitr{ - slicer: ibcp, - cancel: stopSlicing, - w: w, - }) - - go func() { - <-stopSlicingCtx.Done() - close(stopC) - a.removePitr() - }() - - streamErr := ibcp.Stream(ctx, - stopC, - w, - cfg.PITR.Compression, - cfg.PITR.CompressionLevel, - cfg.Backup.Timeouts) - if streamErr != nil { - out := l.Error - if errors.Is(streamErr, slicer.OpMovedError{}) { - out = l.Info - } - out("streaming oplog: %v", streamErr) - } - - if err := lck.Release(); err != nil { - l.Error("release lock: %v", err) - } - - // Penalty to the failed node so healthy nodes would have priority on next try. - // But lock has to be released first. Otherwise, healthy nodes would wait for the lock release - // and the penalty won't have any sense. - if streamErr != nil { - time.Sleep(pitrCheckPeriod * 2) - } - }() - - return nil -} - -func (a *Agent) pitrLockCheck(ctx context.Context) (bool, error) { - ts, err := topo.GetClusterTime(ctx, a.leadConn) - if err != nil { - return false, errors.Wrap(err, "read cluster time") - } - - tl, err := lock.GetOpLockData(ctx, a.leadConn, &lock.LockHeader{ - Replset: a.brief.SetName, - Type: ctrl.CmdPITR, - }) - if err != nil { - if errors.Is(err, mongo.ErrNoDocuments) { - // no lock. good to move on - return true, nil - } - - return false, errors.Wrap(err, "get lock") - } - - // stale lock means we should move on and clean it up during the lock.Acquire - return tl.Heartbeat.T+defs.StaleFrameSec < ts.T, nil -} - func (a *Agent) Restore(ctx context.Context, r *ctrl.RestoreCmd, opid ctrl.OPID, ep config.Epoch) { logger := log.FromContext(ctx) if r == nil { diff --git a/cmd/pbm-agent/priority.go b/pbm/prio/priority.go similarity index 72% rename from cmd/pbm-agent/priority.go rename to pbm/prio/priority.go index aa35a5c3e..38f4eac69 100644 --- a/cmd/pbm-agent/priority.go +++ b/pbm/prio/priority.go @@ -1,13 +1,10 @@ -package main +package prio import ( "context" "sort" - "github.com/percona/percona-backup-mongodb/pbm/config" - "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/defs" - "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/topo" ) @@ -41,22 +38,18 @@ func (n *NodesPriority) RS(rs string) [][]string { type agentScore func(topo.AgentStat) float64 -// BcpNodesPriority returns list nodes grouped by backup preferences -// in descended order. First are nodes with the highest priority. +// CalcNodesPriority calculates and returns list nodes grouped by +// backup/pitr preferences in descended order. +// First are nodes with the highest priority. // Custom coefficients might be passed. These will be ignored though // if the config is set. -func BcpNodesPriority( +func CalcNodesPriority( ctx context.Context, - m connect.Client, c map[string]float64, + cfgPrio map[string]float64, agents []topo.AgentStat, ) (*NodesPriority, error) { - cfg, err := config.GetConfig(ctx, m) - if err != nil { - return nil, errors.Wrap(err, "get config") - } - - // if cfg.Backup.Priority doesn't set apply defaults + // if config level priorities (cfgPrio) aren't set, apply defaults f := func(a topo.AgentStat) float64 { if coeff, ok := c[a.Node]; ok && c != nil { return defaultScore * coeff @@ -68,9 +61,9 @@ func BcpNodesPriority( return defaultScore } - if cfg.Backup.Priority != nil || len(cfg.Backup.Priority) > 0 { + if cfgPrio != nil || len(cfgPrio) > 0 { f = func(a topo.AgentStat) float64 { - sc, ok := cfg.Backup.Priority[a.Node] + sc, ok := cfgPrio[a.Node] if !ok || sc < 0 { return defaultScore } @@ -79,10 +72,10 @@ func BcpNodesPriority( } } - return bcpNodesPriority(agents, f), nil + return calcNodesPriority(agents, f), nil } -func bcpNodesPriority(agents []topo.AgentStat, f agentScore) *NodesPriority { +func calcNodesPriority(agents []topo.AgentStat, f agentScore) *NodesPriority { scores := NewNodesPriority() for _, a := range agents { diff --git a/pbm/prio/priority_test.go b/pbm/prio/priority_test.go new file mode 100644 index 000000000..fb126ab1d --- /dev/null +++ b/pbm/prio/priority_test.go @@ -0,0 +1,441 @@ +package prio + +import ( + "context" + "reflect" + "testing" + + "github.com/percona/percona-backup-mongodb/pbm/defs" + "github.com/percona/percona-backup-mongodb/pbm/topo" +) + +func TestCalcNodesPriority(t *testing.T) { + t.Run("implicit priorities - rs", func(t *testing.T) { + testCases := []struct { + desc string + agents []topo.AgentStat + res [][]string + }{ + { + desc: "implicit priorities for PSS", + agents: []topo.AgentStat{ + newP("rs0", "rs01"), + newS("rs0", "rs02"), + newS("rs0", "rs03"), + }, + res: [][]string{ + {"rs02", "rs03"}, + {"rs01"}, + }, + }, + { + desc: "implicit priorities for PSH", + agents: []topo.AgentStat{ + newP("rs0", "rs01"), + newS("rs0", "rs02"), + newH("rs0", "rs03"), + }, + res: [][]string{ + {"rs03"}, + {"rs02"}, + {"rs01"}, + }, + }, + { + desc: "implicit priorities for PSA", + agents: []topo.AgentStat{ + newP("rs0", "rs01"), + newS("rs0", "rs02"), + newA("rs0", "rs03"), + }, + res: [][]string{ + {"rs02"}, + {"rs01"}, + }, + }, + { + desc: "5 members mix", + agents: []topo.AgentStat{ + newS("rs0", "rs01"), + newH("rs0", "rs02"), + newP("rs0", "rs03"), + newA("rs0", "rs04"), + newH("rs0", "rs05"), + }, + res: [][]string{ + {"rs02", "rs05"}, + {"rs01"}, + {"rs03"}, + }, + }, + } + for _, tC := range testCases { + t.Run(tC.desc, func(t *testing.T) { + np, err := CalcNodesPriority(context.Background(), nil, nil, tC.agents) + if err != nil { + t.Fatalf("unexpected error while calculating nodes priority: %v", err) + } + + prioByScore := np.RS(tC.agents[0].RS) + + if !reflect.DeepEqual(prioByScore, tC.res) { + t.Fatalf("wrong nodes priority calculation: want=%v, got=%v", tC.res, prioByScore) + } + }) + } + }) + + t.Run("implicit priorities - sharded cluster", func(t *testing.T) { + testCases := []struct { + desc string + agents []topo.AgentStat + resCfg [][]string + resRS0 [][]string + resRS1 [][]string + }{ + { + desc: "implicit priorities for PSS", + agents: []topo.AgentStat{ + newP("rs0", "rs01"), + newS("rs0", "rs02"), + newS("rs0", "rs03"), + newS("rs1", "rs11"), + newP("rs1", "rs12"), + newS("rs1", "rs13"), + newS("cfg", "cfg1"), + newS("cfg", "cfg2"), + newP("cfg", "cfg3"), + }, + resCfg: [][]string{ + {"cfg1", "cfg2"}, + {"cfg3"}, + }, + resRS0: [][]string{ + {"rs02", "rs03"}, + {"rs01"}, + }, + resRS1: [][]string{ + {"rs11", "rs13"}, + {"rs12"}, + }, + }, + { + desc: "implicit priorities for sharded mix", + agents: []topo.AgentStat{ + newS("cfg", "cfg1"), + newP("cfg", "cfg2"), + newS("cfg", "cfg3"), + newS("rs0", "rs01"), + newP("rs0", "rs02"), + newA("rs0", "rs03"), + newP("rs1", "rs11"), + newH("rs1", "rs12"), + newS("rs1", "rs13"), + }, + resCfg: [][]string{ + {"cfg1", "cfg3"}, + {"cfg2"}, + }, + resRS0: [][]string{ + {"rs01"}, + {"rs02"}, + }, + resRS1: [][]string{ + {"rs12"}, + {"rs13"}, + {"rs11"}, + }, + }, + } + for _, tC := range testCases { + t.Run(tC.desc, func(t *testing.T) { + np, err := CalcNodesPriority(context.Background(), nil, nil, tC.agents) + if err != nil { + t.Fatalf("unexpected error while calculating nodes priority: %v", err) + } + + prioByScoreCfg := np.RS("cfg") + prioByScoreRs0 := np.RS("rs0") + prioByScoreRs1 := np.RS("rs1") + + if !reflect.DeepEqual(prioByScoreCfg, tC.resCfg) { + t.Fatalf("wrong nodes priority calculation for config cluster: want=%v, got=%v", tC.resCfg, prioByScoreCfg) + } + if !reflect.DeepEqual(prioByScoreRs0, tC.resRS0) { + t.Fatalf("wrong nodes priority calculation for rs1 cluster: want=%v, got=%v", tC.resRS0, prioByScoreRs0) + } + if !reflect.DeepEqual(prioByScoreRs1, tC.resRS1) { + t.Fatalf("wrong nodes priority calculation for rs2 cluster: want=%v, got=%v", tC.resRS1, prioByScoreRs1) + } + }) + } + }) + + t.Run("explicit priorities - rs", func(t *testing.T) { + testCases := []struct { + desc string + agents []topo.AgentStat + expPrio map[string]float64 + res [][]string + }{ + { + desc: "all priorities are different", + agents: []topo.AgentStat{ + newP("rs0", "rs01"), + newS("rs0", "rs02"), + newS("rs0", "rs03"), + }, + expPrio: map[string]float64{ + "rs01": 2.0, + "rs02": 3.0, + "rs03": 1.0, + }, + res: [][]string{ + {"rs02"}, + {"rs01"}, + {"rs03"}, + }, + }, + { + desc: "5 members, 3 different priority groups", + agents: []topo.AgentStat{ + newP("rs0", "rs01"), + newS("rs0", "rs02"), + newS("rs0", "rs03"), + newS("rs0", "rs04"), + newS("rs0", "rs05"), + }, + expPrio: map[string]float64{ + "rs01": 2.0, + "rs02": 3.0, + "rs03": 1.0, + "rs04": 1.0, + "rs05": 3.0, + }, + res: [][]string{ + {"rs02", "rs05"}, + {"rs01"}, + {"rs03", "rs04"}, + }, + }, + { + desc: "default priorities", + agents: []topo.AgentStat{ + newP("rs0", "rs01"), + newS("rs0", "rs02"), + newS("rs0", "rs03"), + }, + expPrio: map[string]float64{ + "rs01": 0.5, + }, + res: [][]string{ + {"rs02", "rs03"}, + {"rs01"}, + }, + }, + { + desc: "all defaults", + agents: []topo.AgentStat{ + newP("rs0", "rs01"), + newS("rs0", "rs02"), + newS("rs0", "rs03"), + }, + expPrio: map[string]float64{}, + res: [][]string{ + {"rs01", "rs02", "rs03"}, + }, + }, + { + desc: "priorities are not defined -> implicit are applied", + agents: []topo.AgentStat{ + newP("rs0", "rs01"), + newS("rs0", "rs02"), + newS("rs0", "rs03"), + }, + expPrio: nil, + res: [][]string{ + {"rs02", "rs03"}, + {"rs01"}, + }, + }, + } + for _, tC := range testCases { + t.Run(tC.desc, func(t *testing.T) { + np, err := CalcNodesPriority(context.Background(), nil, tC.expPrio, tC.agents) + if err != nil { + t.Fatalf("unexpected error while calculating nodes priority: %v", err) + } + + prioByScore := np.RS(tC.agents[0].RS) + + if !reflect.DeepEqual(prioByScore, tC.res) { + t.Fatalf("wrong nodes priority calculation: want=%v, got=%v", tC.res, prioByScore) + } + }) + } + }) + + t.Run("explicit priorities - sharded cluster", func(t *testing.T) { + testCases := []struct { + desc string + agents []topo.AgentStat + expPrio map[string]float64 + res [][]string + resCfg [][]string + resRS0 [][]string + resRS1 [][]string + }{ + { + desc: "all priorities are different", + agents: []topo.AgentStat{ + newP("rs0", "rs01"), + newS("rs0", "rs02"), + newS("rs0", "rs03"), + newS("rs1", "rs11"), + newP("rs1", "rs12"), + newS("rs1", "rs13"), + newS("cfg", "cfg1"), + newS("cfg", "cfg2"), + newP("cfg", "cfg3"), + }, + expPrio: map[string]float64{ + "rs01": 2.0, + "rs02": 3.0, + "rs03": 1.0, + "rs11": 2.0, + "rs12": 3.0, + "rs13": 1.0, + "cfg2": 2.0, + }, + resCfg: [][]string{ + {"cfg2"}, + {"cfg1", "cfg3"}, + }, + resRS0: [][]string{ + {"rs02"}, + {"rs01"}, + {"rs03"}, + }, + resRS1: [][]string{ + {"rs12"}, + {"rs11"}, + {"rs13"}, + }, + }, + { + desc: "only primary is down prioritized", + agents: []topo.AgentStat{ + newP("rs0", "rs01"), + newS("rs0", "rs02"), + newS("rs0", "rs03"), + newS("rs1", "rs11"), + newP("rs1", "rs12"), + newS("rs1", "rs13"), + newS("cfg", "cfg1"), + newS("cfg", "cfg2"), + newP("cfg", "cfg3"), + }, + expPrio: map[string]float64{ + "rs01": 0.5, + "rs12": 0.5, + "cfg3": 0.5, + }, + resCfg: [][]string{ + {"cfg1", "cfg2"}, + {"cfg3"}, + }, + resRS0: [][]string{ + {"rs02", "rs03"}, + {"rs01"}, + }, + resRS1: [][]string{ + {"rs11", "rs13"}, + {"rs12"}, + }, + }, + } + for _, tC := range testCases { + t.Run(tC.desc, func(t *testing.T) { + np, err := CalcNodesPriority(context.Background(), nil, tC.expPrio, tC.agents) + if err != nil { + t.Fatalf("unexpected error while calculating nodes priority: %v", err) + } + + prioByScoreCfg := np.RS("cfg") + prioByScoreRs0 := np.RS("rs0") + prioByScoreRs1 := np.RS("rs1") + + if !reflect.DeepEqual(prioByScoreCfg, tC.resCfg) { + t.Fatalf("wrong nodes priority calculation for config cluster: want=%v, got=%v", tC.resCfg, prioByScoreCfg) + } + if !reflect.DeepEqual(prioByScoreRs0, tC.resRS0) { + t.Fatalf("wrong nodes priority calculation for rs1 cluster: want=%v, got=%v", tC.resRS0, prioByScoreRs0) + } + if !reflect.DeepEqual(prioByScoreRs1, tC.resRS1) { + t.Fatalf("wrong nodes priority calculation for rs2 cluster: want=%v, got=%v", tC.resRS1, prioByScoreRs1) + } + }) + } + }) + + t.Run("coeficients", func(t *testing.T) { + agents := []topo.AgentStat{ + newP("rs0", "rs01"), + newS("rs0", "rs02"), + newS("rs0", "rs03"), + } + res := [][]string{ + {"rs03"}, + {"rs02"}, + {"rs01"}, + } + c := map[string]float64{ + "rs03": 3.0, + } + + np, err := CalcNodesPriority(context.Background(), c, nil, agents) + if err != nil { + t.Fatalf("unexpected error while calculating nodes priority: %v", err) + } + + prioByScore := np.RS(agents[0].RS) + if !reflect.DeepEqual(prioByScore, res) { + t.Fatalf("wrong nodes priority calculation: want=%v, got=%v", res, prioByScore) + } + }) +} + +func newP(rs, node string) topo.AgentStat { + return newAgent(rs, node, defs.NodeStatePrimary, false) +} + +func newS(rs, node string) topo.AgentStat { + return newAgent(rs, node, defs.NodeStateSecondary, false) +} + +func newH(rs, node string) topo.AgentStat { + return newAgent(rs, node, defs.NodeStateSecondary, true) +} + +func newA(rs, node string) topo.AgentStat { + return newAgent(rs, node, defs.NodeStateArbiter, false) +} + +func newAgent(rs, node string, state defs.NodeState, isHidden bool) topo.AgentStat { + return topo.AgentStat{ + Node: node, + RS: rs, + State: state, + Hidden: isHidden, + Arbiter: state == defs.NodeStateArbiter, + PBMStatus: topo.SubsysStatus{ + OK: true, + }, + NodeStatus: topo.SubsysStatus{ + OK: state == defs.NodeStatePrimary || state == defs.NodeStateSecondary, + }, + StorageStatus: topo.SubsysStatus{ + OK: true, + }, + } +} From f2fc7d482ca2fe216f4c5cdaf30374caa51a344b Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 12 Jun 2024 12:25:05 +0200 Subject: [PATCH 028/203] Add Priority field as configuration item for PITR Priority is extracted as separate type. --- pbm/config/config.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pbm/config/config.go b/pbm/config/config.go index 96e906499..494ad0df7 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -125,6 +125,10 @@ func (c Config) BackupSlicerInterval() time.Duration { return time.Duration(c.Backup.OplogSpanMin * float64(time.Minute)) } +// Priority contains priority values for cluster members. +// It is used for specifying Backup and PITR configuration priorities. +type Priority map[string]float64 + // PITRConf is a Point-In-Time Recovery options // //nolint:lll @@ -132,6 +136,7 @@ type PITRConf struct { Enabled bool `bson:"enabled" json:"enabled" yaml:"enabled"` OplogSpanMin float64 `bson:"oplogSpanMin" json:"oplogSpanMin" yaml:"oplogSpanMin"` OplogOnly bool `bson:"oplogOnly,omitempty" json:"oplogOnly,omitempty" yaml:"oplogOnly,omitempty"` + Priority Priority `bson:"priority,omitempty" json:"priority,omitempty" yaml:"priority,omitempty"` Compression compress.CompressionType `bson:"compression,omitempty" json:"compression,omitempty" yaml:"compression,omitempty"` CompressionLevel *int `bson:"compressionLevel,omitempty" json:"compressionLevel,omitempty" yaml:"compressionLevel,omitempty"` } @@ -218,7 +223,7 @@ type RestoreConf struct { //nolint:lll type BackupConf struct { OplogSpanMin float64 `bson:"oplogSpanMin" json:"oplogSpanMin" yaml:"oplogSpanMin"` - Priority map[string]float64 `bson:"priority,omitempty" json:"priority,omitempty" yaml:"priority,omitempty"` + Priority Priority `bson:"priority,omitempty" json:"priority,omitempty" yaml:"priority,omitempty"` Timeouts *BackupTimeouts `bson:"timeouts,omitempty" json:"timeouts,omitempty" yaml:"timeouts,omitempty"` Compression compress.CompressionType `bson:"compression,omitempty" json:"compression,omitempty" yaml:"compression,omitempty"` CompressionLevel *int `bson:"compressionLevel,omitempty" json:"compressionLevel,omitempty" yaml:"compressionLevel,omitempty"` From b11bb6db373089ceaf59d03a1b1eb0172484c8a8 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 12 Jun 2024 12:26:46 +0200 Subject: [PATCH 029/203] Add pbmPITR collection pbmPITR collection is used for operational data during execution of PITR process: - nomination-ack data - error reporting - status reporting --- pbm/connect/connect.go | 5 +++++ pbm/defs/defs.go | 2 ++ pbm/snapshot/restore.go | 1 + 3 files changed, 8 insertions(+) diff --git a/pbm/connect/connect.go b/pbm/connect/connect.go index 728539858..8d0a58cf6 100644 --- a/pbm/connect/connect.go +++ b/pbm/connect/connect.go @@ -327,6 +327,10 @@ func (l *clientImpl) PITRChunksCollection() *mongo.Collection { return l.client.Database(defs.DB).Collection(defs.PITRChunksCollection) } +func (l *clientImpl) PITRCollection() *mongo.Collection { + return l.client.Database(defs.DB).Collection(defs.PITRCollection) +} + func (l *clientImpl) PBMOpLogCollection() *mongo.Collection { return l.client.Database(defs.DB).Collection(defs.PBMOpLogCollection) } @@ -376,6 +380,7 @@ type Client interface { RestoresCollection() *mongo.Collection CmdStreamCollection() *mongo.Collection PITRChunksCollection() *mongo.Collection + PITRCollection() *mongo.Collection PBMOpLogCollection() *mongo.Collection AgentsStatusCollection() *mongo.Collection } diff --git a/pbm/defs/defs.go b/pbm/defs/defs.go index 575d26098..224be5f15 100644 --- a/pbm/defs/defs.go +++ b/pbm/defs/defs.go @@ -24,6 +24,8 @@ const ( CmdStreamCollection = "pbmCmd" // PITRChunksCollection contains index metadata of PITR chunks PITRChunksCollection = "pbmPITRChunks" + // pbmPITR is a collection for PITR operational data + PITRCollection = "pbmPITR" // PBMOpLogCollection contains log of acquired locks (hence run ops) PBMOpLogCollection = "pbmOpLog" // AgentsStatusCollection is an agents registry with its status/health checks diff --git a/pbm/snapshot/restore.go b/pbm/snapshot/restore.go index 8a39f64a1..b4f56c589 100644 --- a/pbm/snapshot/restore.go +++ b/pbm/snapshot/restore.go @@ -28,6 +28,7 @@ var ExcludeFromRestore = []string{ defs.DB + "." + defs.LockCollection, defs.DB + "." + defs.LockOpCollection, defs.DB + "." + defs.PITRChunksCollection, + defs.DB + "." + defs.PITRCollection, defs.DB + "." + defs.AgentsStatusCollection, defs.DB + "." + defs.PBMOpLogCollection, "admin.system.version", From 5a578b1e73dd75394ecc0b21e8218a1d504a8f49 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 12 Jun 2024 17:40:53 +0200 Subject: [PATCH 030/203] Add PITR nomination db related logic --- pbm/oplog/nomination.go | 129 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 129 insertions(+) create mode 100644 pbm/oplog/nomination.go diff --git a/pbm/oplog/nomination.go b/pbm/oplog/nomination.go new file mode 100644 index 000000000..8219a078b --- /dev/null +++ b/pbm/oplog/nomination.go @@ -0,0 +1,129 @@ +package oplog + +import ( + "context" + "time" + + "github.com/percona/percona-backup-mongodb/pbm/connect" + "github.com/percona/percona-backup-mongodb/pbm/errors" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +// PITRMeta contains all operational data about PITR execution process. +type PITRMeta struct { + StartTS int64 `bson:"start_ts" json:"start_ts"` + // Hb primitive.Timestamp `bson:"hb" json:"hb"` + // Status defs.Status `bson:"status" json:"status"` + Nomination []PITRNomination `bson:"n" json:"n"` +} + +// PITRNomination is used to choose (nominate and elect) member(s) +// which will perform PITR process within a replica set(s). +type PITRNomination struct { + RS string `bson:"rs" json:"rs"` + Nodes []string `bson:"n" json:"n"` + Ack string `bson:"ack" json:"ack"` +} + +// Init add initial PITR document. +func InitMeta(ctx context.Context, conn connect.Client) error { + pitrMeta := PITRMeta{ + StartTS: time.Now().Unix(), + } + _, err := conn.PITRCollection().ReplaceOne( + ctx, + bson.D{}, + pitrMeta, + options.Replace().SetUpsert(true), + ) + + return errors.Wrap(err, "pitr meta replace") +} + +// SetPITRNomination adds nomination fragment for specified RS within PITRMeta. +func SetPITRNomination(ctx context.Context, conn connect.Client, rs string) error { + n := PITRNomination{ + RS: rs, + Nodes: []string{}, + } + _, err := conn.PITRCollection(). + UpdateOne( + ctx, + bson.D{}, + bson.D{{"$addToSet", bson.M{"n": n}}}, + options.Update().SetUpsert(true), + ) + + return errors.Wrap(err, "update pirt nomination") +} + +// GetPITRNominees fetches nomination fragment for specified RS +// from PITRMeta document. +func GetPITRNominees( + ctx context.Context, + conn connect.Client, + rs string, +) (*PITRNomination, error) { + res := conn.PITRCollection().FindOne(ctx, bson.D{}) + if err := res.Err(); err != nil { + if errors.Is(err, mongo.ErrNoDocuments) { + return nil, errors.ErrNotFound + } + return nil, errors.Wrap(err, "find pitr meta") + } + + meta := &PITRMeta{} + if err := res.Decode(meta); err != nil { + errors.Wrap(err, "decode") + } + + for _, n := range meta.Nomination { + if n.RS == rs { + return &n, nil + } + } + + return nil, errors.ErrNotFound +} + +// SetPITRNominees add nominee(s) for specific RS. +// It is used by cluster leader within nomination process. +func SetPITRNominees( + ctx context.Context, + conn connect.Client, + rs string, + nodes []string, +) error { + _, err := conn.PITRCollection().UpdateOne( + ctx, + bson.D{ + {"n.rs", rs}, + }, + bson.D{ + {"$addToSet", bson.M{"n.$.n": bson.M{"$each": nodes}}}, + }, + ) + + return errors.Wrap(err, "update pitr nominees") +} + +// SetPITRNomineeACK add ack for specific nomination. +// It is used by nominee, after the nomination is created by cluster leader. +func SetPITRNomineeACK( + ctx context.Context, + conn connect.Client, + rs, + node string, +) error { + _, err := conn.PITRCollection().UpdateOne( + ctx, + bson.D{{"n.rs", rs}}, + bson.D{ + {"$set", bson.M{"n.$.ack": node}}, + }, + ) + + return errors.Wrap(err, "update pitr nominee ack") +} From 20118251c357365bc8e46801735a0b6bb78c27a8 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 13 Jun 2024 11:56:57 +0200 Subject: [PATCH 031/203] add profile not found message --- cmd/pbm-agent/agent.go | 4 ++++ cmd/pbm-agent/profile.go | 13 +++++++++++++ cmd/pbm/profile.go | 18 +++++++++++++++++- sdk/impl.go | 10 +++++++++- 4 files changed, 43 insertions(+), 2 deletions(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 0b9b905e1..759634ae3 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -254,6 +254,10 @@ func (a *Agent) Resync(ctx context.Context, cmd *ctrl.ResyncCmd, opid ctrl.OPID, } else if cmd.Name != "" { profile, err := config.GetProfile(ctx, a.leadConn, cmd.Name) if err != nil { + if errors.Is(err, mongo.ErrNoDocuments) { + err = errors.Errorf("profile %q not found", cmd.Name) + } + l.Error("get config profile: %v", err) return } diff --git a/cmd/pbm-agent/profile.go b/cmd/pbm-agent/profile.go index b3d2a827a..48c6ee470 100644 --- a/cmd/pbm-agent/profile.go +++ b/cmd/pbm-agent/profile.go @@ -3,6 +3,8 @@ package main import ( "context" + "go.mongodb.org/mongo-driver/mongo" + "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/ctrl" "github.com/percona/percona-backup-mongodb/pbm/errors" @@ -180,6 +182,17 @@ func (a *Agent) handleRemoveConfigProfile( } }() + _, err = config.GetProfile(ctx, a.leadConn, cmd.Name) + if err != nil { + if errors.Is(err, mongo.ErrNoDocuments) { + l.Warning("profile %q is not found", cmd.Name) + return + } + + l.Error("get config profile: %v", err) + return + } + err = resync.ClearBackupList(ctx, a.leadConn, cmd.Name) if err != nil { l.Error("clear backup list: %v", err) diff --git a/cmd/pbm/profile.go b/cmd/pbm/profile.go index ce191c597..cc83819ab 100644 --- a/cmd/pbm/profile.go +++ b/cmd/pbm/profile.go @@ -74,7 +74,15 @@ func handleDescibeConfigProfiles( return nil, errors.New("argument `profile-name` should not be empty") } - return pbm.GetConfigProfile(ctx, opts.name) + profile, err := pbm.GetConfigProfile(ctx, opts.name) + if err != nil { + if errors.Is(err, config.ErrMissedConfigProfile) { + err = errors.Errorf("profile %q is not found", opts.name) + } + return nil, err + } + + return profile, nil } func handleAddConfigProfile( @@ -137,6 +145,14 @@ func handleRemoveConfigProfile( return nil, errors.New("argument `profile-name` should not be empty") } + _, err := pbm.GetConfigProfile(ctx, opts.name) + if err != nil { + if errors.Is(err, config.ErrMissedConfigProfile) { + err = errors.Errorf("profile %q is not found", opts.name) + } + return nil, err + } + cid, err := pbm.RemoveConfigProfile(ctx, opts.name) if err != nil { return nil, errors.Wrap(err, "sdk: remove config profile") diff --git a/sdk/impl.go b/sdk/impl.go index 0ddb3d578..3ada4e03f 100644 --- a/sdk/impl.go +++ b/sdk/impl.go @@ -83,7 +83,15 @@ func (c *clientImpl) ListConfigProfiles(ctx context.Context) ([]config.Config, e } func (c *clientImpl) GetConfigProfile(ctx context.Context, name string) (*config.Config, error) { - return config.GetProfile(ctx, c.conn, name) + profile, err := config.GetProfile(ctx, c.conn, name) + if err != nil { + if errors.Is(err, mongo.ErrNoDocuments) { + err = config.ErrMissedConfigProfile + } + return nil, err + } + + return profile, nil } func (c *clientImpl) AddConfigProfile(ctx context.Context, name string, cfg *Config) (CommandID, error) { From ec229a6b89cf5ad79c3bed9232aa23c10ee4ba51 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 13 Jun 2024 11:58:02 +0200 Subject: [PATCH 032/203] use show subcommand --- cmd/pbm/main.go | 14 +++++++------- cmd/pbm/profile.go | 6 +++--- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/cmd/pbm/main.go b/cmd/pbm/main.go index 4b4d49262..1e822e3d8 100644 --- a/cmd/pbm/main.go +++ b/cmd/pbm/main.go @@ -106,13 +106,13 @@ func main() { Command("list", "List configuration profiles"). Default() - descConfigProfileOpts := descConfigProfileOptions{} - descConfigProfileCmd := configProfileCmd. - Command("describe", "Describe configuration profile") - descConfigProfileCmd. + showConfigProfileOpts := showConfigProfileOptions{} + showConfigProfileCmd := configProfileCmd. + Command("show", "Show configuration profile") + showConfigProfileCmd. Arg("profile-name", "Profile name"). Required(). - StringVar(&descConfigProfileOpts.name) + StringVar(&showConfigProfileOpts.name) addConfigProfileOpts := addConfigProfileOptions{} addConfigProfileCmd := configProfileCmd. @@ -475,8 +475,8 @@ func main() { out, err = runConfig(ctx, conn, pbm, &cfg) case listConfigProfileCmd.FullCommand(): out, err = handleListConfigProfiles(ctx, pbm) - case descConfigProfileCmd.FullCommand(): - out, err = handleDescibeConfigProfiles(ctx, pbm, descConfigProfileOpts) + case showConfigProfileCmd.FullCommand(): + out, err = handleShowConfigProfiles(ctx, pbm, showConfigProfileOpts) case addConfigProfileCmd.FullCommand(): out, err = handleAddConfigProfile(ctx, pbm, addConfigProfileOpts) case removeConfigProfileCmd.FullCommand(): diff --git a/cmd/pbm/profile.go b/cmd/pbm/profile.go index cc83819ab..74cbffcf3 100644 --- a/cmd/pbm/profile.go +++ b/cmd/pbm/profile.go @@ -13,7 +13,7 @@ import ( "github.com/percona/percona-backup-mongodb/sdk" ) -type descConfigProfileOptions struct { +type showConfigProfileOptions struct { name string } @@ -65,10 +65,10 @@ func handleListConfigProfiles(ctx context.Context, pbm sdk.Client) (fmt.Stringer return configProfileList{profiles}, nil } -func handleDescibeConfigProfiles( +func handleShowConfigProfiles( ctx context.Context, pbm sdk.Client, - opts descConfigProfileOptions, + opts showConfigProfileOptions, ) (fmt.Stringer, error) { if opts.name == "" { return nil, errors.New("argument `profile-name` should not be empty") From 23b07bbc8b50842c0b79e544d2bd5803dca3c39b Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 13 Jun 2024 11:58:19 +0200 Subject: [PATCH 033/203] return empty profile list --- cmd/pbm/profile.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/cmd/pbm/profile.go b/cmd/pbm/profile.go index 74cbffcf3..12efeb0d4 100644 --- a/cmd/pbm/profile.go +++ b/cmd/pbm/profile.go @@ -38,17 +38,17 @@ type syncConfigProfileOptions struct { } type configProfileList struct { - configs []config.Config + Profiles []config.Config `json:"profiles"` } func (l configProfileList) String() string { - if len(l.configs) == 0 { + if len(l.Profiles) == 0 { return "" } sb := strings.Builder{} - sb.WriteString(l.configs[0].String()) - for _, profile := range l.configs[1:] { + sb.WriteString(l.Profiles[0].String()) + for _, profile := range l.Profiles[1:] { sb.WriteString("---\n") sb.WriteString(profile.String()) } @@ -61,6 +61,10 @@ func handleListConfigProfiles(ctx context.Context, pbm sdk.Client) (fmt.Stringer if err != nil { return nil, err } + if profiles == nil { + // (for JSON) to have {"profiles":[]} instead of {"profiles":null} + profiles = []config.Config{} + } return configProfileList{profiles}, nil } From 442d1534f6bcaae796de67208d6eac14e0b21ba3 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 13 Jun 2024 12:00:40 +0200 Subject: [PATCH 034/203] disallow to store increments on different storages --- pbm/backup/physical.go | 5 ++ pbm/config/config.go | 6 +- pbm/storage/azure/azure.go | 24 +++++++ pbm/storage/fs/fs.go | 18 +++-- pbm/storage/s3/s3.go | 130 +++++++++++++++++++++++++------------ 5 files changed, 134 insertions(+), 49 deletions(-) diff --git a/pbm/backup/physical.go b/pbm/backup/physical.go index 23a3703f2..3b3375015 100644 --- a/pbm/backup/physical.go +++ b/pbm/backup/physical.go @@ -232,6 +232,11 @@ func (b *Backup) doPhysical( } } + if !b.config.Storage.Equal(&src.Store.Storage) { + return errors.New("cannot use the configured storage: " + + "source backup is stored on a different storage") + } + // realSrcID is actual thisBackupName of the replset var realSrcID string for _, rs := range src.Replsets { diff --git a/pbm/config/config.go b/pbm/config/config.go index 3aab07884..8e088dc10 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -232,11 +232,11 @@ func (s *Storage) Equal(other *Storage) bool { switch s.Type { case storage.S3: - return reflect.DeepEqual(s.S3, other.S3) + return s.S3.Equal(other.S3) case storage.Azure: - return reflect.DeepEqual(s.Azure, other.Azure) + return s.Azure.Equal(other.Azure) case storage.Filesystem: - return reflect.DeepEqual(s.Filesystem, other.Filesystem) + return s.Filesystem.Equal(other.Filesystem) } return false diff --git a/pbm/storage/azure/azure.go b/pbm/storage/azure/azure.go index 6f949cdac..df5f957f7 100644 --- a/pbm/storage/azure/azure.go +++ b/pbm/storage/azure/azure.go @@ -48,6 +48,30 @@ func (cfg *Config) Clone() *Config { return &rv } +func (cfg *Config) Equal(other *Config) bool { + if cfg == nil || other == nil { + return cfg == other + } + + if cfg.Account != other.Account { + return false + } + if cfg.Container != other.Container { + return false + } + if cfg.EndpointURL != other.EndpointURL { + return false + } + if cfg.Prefix != other.Prefix { + return false + } + if cfg.Credentials.Key != other.Credentials.Key { + return false + } + + return true +} + type Credentials struct { Key string `bson:"key" json:"key,omitempty" yaml:"key,omitempty"` } diff --git a/pbm/storage/fs/fs.go b/pbm/storage/fs/fs.go index dc0168143..4a42b5eaa 100644 --- a/pbm/storage/fs/fs.go +++ b/pbm/storage/fs/fs.go @@ -15,16 +15,24 @@ type Config struct { Path string `bson:"path" json:"path" yaml:"path"` } -func (c *Config) Clone() *Config { - if c == nil { +func (cfg *Config) Clone() *Config { + if cfg == nil { return nil } - return &Config{Path: c.Path} + return &Config{Path: cfg.Path} } -func (c *Config) Cast() error { - if c.Path == "" { +func (cfg *Config) Equal(other *Config) bool { + if cfg == nil || other == nil { + return cfg == other + } + + return cfg.Path == other.Path +} + +func (cfg *Config) Cast() error { + if cfg.Path == "" { return errors.New("path can't be empty") } diff --git a/pbm/storage/s3/s3.go b/pbm/storage/s3/s3.go index 0d5fb9714..e738c0c63 100644 --- a/pbm/storage/s3/s3.go +++ b/pbm/storage/s3/s3.go @@ -10,6 +10,7 @@ import ( "net/url" "os" "path" + "reflect" "runtime" "strings" "time" @@ -71,28 +72,6 @@ type Config struct { Retryer *Retryer `bson:"retryer,omitempty" json:"retryer,omitempty" yaml:"retryer,omitempty"` } -func (c *Config) Clone() *Config { - if c == nil { - return nil - } - - rv := *c - if c.ForcePathStyle != nil { - a := *c.ForcePathStyle - rv.ForcePathStyle = &a - } - if c.ServerSideEncryption != nil { - a := *c.ServerSideEncryption - rv.ServerSideEncryption = &a - } - if c.Retryer != nil { - a := *c.Retryer - rv.Retryer = &a - } - - return &rv -} - type Retryer struct { // Num max Retries is the number of max retries that will be performed. // https://pkg.go.dev/github.com/aws/aws-sdk-go/aws/client#DefaultRetryer.NumMaxRetries @@ -151,38 +130,107 @@ type AWSsse struct { SseCustomerKey string `bson:"sseCustomerKey" json:"sseCustomerKey" yaml:"sseCustomerKey"` } -func (c *Config) Cast() error { - if c.Region == "" { - c.Region = defaultS3Region +func (cfg *Config) Clone() *Config { + if cfg == nil { + return nil + } + + rv := *cfg + if cfg.ForcePathStyle != nil { + a := *cfg.ForcePathStyle + rv.ForcePathStyle = &a + } + if cfg.ServerSideEncryption != nil { + a := *cfg.ServerSideEncryption + rv.ServerSideEncryption = &a + } + if cfg.Retryer != nil { + a := *cfg.Retryer + rv.Retryer = &a + } + + return &rv +} + +func (cfg *Config) Equal(other *Config) bool { + if cfg == nil || other == nil { + return cfg == other + } + + if cfg.Provider != other.Provider { + return false + } + if cfg.Region != other.Region { + return false + } + if cfg.EndpointURL != other.EndpointURL { + return false + } + if cfg.Bucket != other.Bucket { + return false + } + if cfg.Prefix != other.Prefix { + return false + } + if cfg.StorageClass != other.StorageClass { + return false + } + + lhs, rhs := true, true + if cfg.ForcePathStyle != nil { + lhs = *cfg.ForcePathStyle + } + if other.ForcePathStyle != nil { + rhs = *other.ForcePathStyle + } + if lhs != rhs { + return false + } + + // TODO: check only required fields + if !reflect.DeepEqual(cfg.Credentials, other.Credentials) { + return false + } + // TODO: check only required fields + if !reflect.DeepEqual(cfg.ServerSideEncryption, other.ServerSideEncryption) { + return false + } + + return true +} + +func (cfg *Config) Cast() error { + if cfg.Region == "" { + cfg.Region = defaultS3Region } - if c.ForcePathStyle == nil { - c.ForcePathStyle = aws.Bool(true) + if cfg.ForcePathStyle == nil { + cfg.ForcePathStyle = aws.Bool(true) } - if c.Provider == S3ProviderUndef { - c.Provider = S3ProviderAWS - if c.EndpointURL != "" { - eu, err := url.Parse(c.EndpointURL) + if cfg.Provider == S3ProviderUndef { + cfg.Provider = S3ProviderAWS + if cfg.EndpointURL != "" { + eu, err := url.Parse(cfg.EndpointURL) if err != nil { return errors.Wrap(err, "parse EndpointURL") } if eu.Host == GCSEndpointURL { - c.Provider = S3ProviderGCS + cfg.Provider = S3ProviderGCS } } } - if c.MaxUploadParts <= 0 { - c.MaxUploadParts = s3manager.MaxUploadParts + if cfg.MaxUploadParts <= 0 { + cfg.MaxUploadParts = s3manager.MaxUploadParts } - if c.StorageClass == "" { - c.StorageClass = s3.StorageClassStandard + if cfg.StorageClass == "" { + cfg.StorageClass = s3.StorageClassStandard } - if c.Retryer != nil { - if c.Retryer.MinRetryDelay == 0 { - c.Retryer.MinRetryDelay = client.DefaultRetryerMinRetryDelay + if cfg.Retryer != nil { + if cfg.Retryer.MinRetryDelay == 0 { + cfg.Retryer.MinRetryDelay = client.DefaultRetryerMinRetryDelay } - if c.Retryer.MaxRetryDelay == 0 { - c.Retryer.MaxRetryDelay = client.DefaultRetryerMaxRetryDelay + if cfg.Retryer.MaxRetryDelay == 0 { + cfg.Retryer.MaxRetryDelay = client.DefaultRetryerMaxRetryDelay } } From 69a5bcc783f5ebdb0ad94ecea76b2e16b2fc2b95 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 13 Jun 2024 12:01:44 +0200 Subject: [PATCH 035/203] misc --- cmd/pbm/backup.go | 11 +++++++++-- pbm/backup/storage.go | 12 ++++++------ 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/cmd/pbm/backup.go b/cmd/pbm/backup.go index fb0e75283..af94cdc26 100644 --- a/cmd/pbm/backup.go +++ b/cmd/pbm/backup.go @@ -461,7 +461,7 @@ func describeBackup(ctx context.Context, conn connect.Client, pbm sdk.Client, b // in given `bcps`. func bcpsMatchCluster( bcps []backup.BackupMeta, - ver, + ver string, fcv string, shards []topo.Shard, confsrv string, @@ -478,7 +478,14 @@ func bcpsMatchCluster( } } -func bcpMatchCluster(bcp *backup.BackupMeta, ver, fcv string, shards map[string]bool, mapRS, mapRevRS util.RSMapFunc) { +func bcpMatchCluster( + bcp *backup.BackupMeta, + ver string, + fcv string, + shards map[string]bool, + mapRS util.RSMapFunc, + mapRevRS util.RSMapFunc, +) { if bcp.Status != defs.StatusDone { return } diff --git a/pbm/backup/storage.go b/pbm/backup/storage.go index 5a72c7d6f..745bdac33 100644 --- a/pbm/backup/storage.go +++ b/pbm/backup/storage.go @@ -262,11 +262,11 @@ func deletePhysicalBackupFiles(meta *BackupMeta, stg storage.Storage) error { } err := stg.Delete(meta.Name + defs.MetadataFileSuffix) - if errors.Is(err, storage.ErrNotExist) { - return nil + if err != nil && !errors.Is(err, storage.ErrNotExist) { + return errors.Wrap(err, "delete metadata file from storage") } - return errors.Wrap(err, "delete metadata file from storage") + return nil } // deleteLogicalBackupFiles removes backup's artifacts from storage @@ -320,9 +320,9 @@ func deleteLegacyLogicalBackupFiles(meta *BackupMeta, stg storage.Storage) error } err := stg.Delete(meta.Name + defs.MetadataFileSuffix) - if errors.Is(err, storage.ErrNotExist) { - return nil + if err != nil && !errors.Is(err, storage.ErrNotExist) { + return errors.Wrap(err, "delete metadata file from storage") } - return errors.Wrap(err, "delete metadata file from storage") + return nil } From 01a9599791026325175a4080f7368cf0b05e7094 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 13 Jun 2024 13:12:00 +0200 Subject: [PATCH 036/203] use backup storage from physical restore --- pbm/restore/physical.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/pbm/restore/physical.go b/pbm/restore/physical.go index 88557afc4..7aad10c39 100644 --- a/pbm/restore/physical.go +++ b/pbm/restore/physical.go @@ -81,6 +81,7 @@ type PhysRestore struct { opid string nodeInfo *topo.NodeInfo stg storage.Storage + bcpStg storage.Storage bcp *backup.BackupMeta files []files restoreTS primitive.Timestamp @@ -1081,8 +1082,8 @@ func (r *PhysRestore) dumpMeta(meta *RestoreMeta, s defs.Status, msg string) err func (r *PhysRestore) copyFiles() (*s3.DownloadStat, error) { var stat *s3.DownloadStat - readFn := r.stg.SourceReader - if t, ok := r.stg.(*s3.S3); ok { + readFn := r.bcpStg.SourceReader + if t, ok := r.bcpStg.(*s3.S3); ok { d := t.NewDownload(r.confOpts.NumDownloadWorkers, r.confOpts.MaxDownloadBufferMb, r.confOpts.DownloadChunkMb) readFn = d.SourceReader @@ -1996,7 +1997,7 @@ func (r *PhysRestore) setBcpFiles(ctx context.Context) error { if version.HasFilelistFile(bcp.PBMVersion) { filelistPath := path.Join(bcp.Name, setName, backup.FilelistName) - rdr, err := r.stg.SourceReader(filelistPath) + rdr, err := r.bcpStg.SourceReader(filelistPath) if err != nil { return errors.Wrapf(err, "open filelist %q", filelistPath) } @@ -2054,7 +2055,7 @@ func (r *PhysRestore) setBcpFiles(ctx context.Context) error { if version.HasFilelistFile(bcp.PBMVersion) { filelistPath := path.Join(bcp.Name, setName, backup.FilelistName) - rdr, err := r.stg.SourceReader(filelistPath) + rdr, err := r.bcpStg.SourceReader(filelistPath) if err != nil { return errors.Wrapf(err, "open filelist %q", filelistPath) } @@ -2148,6 +2149,11 @@ func (r *PhysRestore) prepareBackup(ctx context.Context, backupName string) erro return errors.Wrap(err, "get backup metadata") } + r.bcpStg, err = util.StorageFromConfig(&r.bcp.Store.Storage, log.LogEventFromContext(ctx)) + if err != nil { + return errors.Wrap(err, "get backup storage") + } + if r.bcp == nil { return errors.New("snapshot name doesn't set") } From d7b292bf374a49862b6fc1f3554b4d947fe8a8a8 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 13 Jun 2024 13:20:20 +0200 Subject: [PATCH 037/203] remove unused --- cmd/pbm/backup.go | 2 +- cmd/pbm/main.go | 2 +- cmd/pbm/profile.go | 9 ++++----- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/cmd/pbm/backup.go b/cmd/pbm/backup.go index af94cdc26..9d8534d18 100644 --- a/cmd/pbm/backup.go +++ b/cmd/pbm/backup.go @@ -354,7 +354,7 @@ func byteCountIEC(b int64) string { return fmt.Sprintf("%.1f %ciB", float64(b)/float64(div), "KMGTPE"[exp]) } -func describeBackup(ctx context.Context, conn connect.Client, pbm sdk.Client, b *descBcp) (fmt.Stringer, error) { +func describeBackup(ctx context.Context, pbm sdk.Client, b *descBcp) (fmt.Stringer, error) { bcp, err := pbm.GetBackupByName(ctx, b.name, sdk.GetBackupByNameOptions{}) if err != nil { return nil, errors.Wrap(err, "get backup meta") diff --git a/cmd/pbm/main.go b/cmd/pbm/main.go index 1e822e3d8..7e89afda0 100644 --- a/cmd/pbm/main.go +++ b/cmd/pbm/main.go @@ -493,7 +493,7 @@ func main() { case restoreFinishCmd.FullCommand(): out, err = runFinishRestore(finishRestore) case descBcpCmd.FullCommand(): - out, err = describeBackup(ctx, conn, pbm, &descBcp) + out, err = describeBackup(ctx, pbm, &descBcp) case restoreCmd.FullCommand(): out, err = runRestore(ctx, conn, &restore, pbmOutF) case replayCmd.FullCommand(): diff --git a/cmd/pbm/profile.go b/cmd/pbm/profile.go index 12efeb0d4..d3a6caa92 100644 --- a/cmd/pbm/profile.go +++ b/cmd/pbm/profile.go @@ -18,11 +18,10 @@ type showConfigProfileOptions struct { } type addConfigProfileOptions struct { - name string - file *os.File - force bool - sync bool - wait bool + name string + file *os.File + sync bool + wait bool } type removeConfigProfileOptions struct { From db87adc5c97b305dbf73954947b23e909557467a Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 13 Jun 2024 14:05:28 +0200 Subject: [PATCH 038/203] bring back changes after merge --- cmd/pbm-agent/pitr.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index fab65ebe3..72b17f433 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -99,7 +99,7 @@ func (a *Agent) stopPitrOnOplogOnlyChange(currOO bool) { // canSlicingNow returns lock.ConcurrentOpError if there is a parallel operation. // Only physical backups (full, incremental, external) is allowed. -func canSlicingNow(ctx context.Context, conn connect.Client) error { +func canSlicingNow(ctx context.Context, conn connect.Client, stgCfg *config.Storage) error { locks, err := lock.GetLocks(ctx, conn, &lock.LockHeader{}) if err != nil { return errors.Wrap(err, "get locks data") @@ -117,7 +117,7 @@ func canSlicingNow(ctx context.Context, conn connect.Client) error { return errors.Wrap(err, "get backup metadata") } - if bcp.Type == defs.LogicalBackup { + if bcp.Type == defs.LogicalBackup && bcp.Store.Equal(stgCfg) { return lock.ConcurrentOpError{l.LockHeader} } } @@ -131,7 +131,9 @@ func (a *Agent) pitr(ctx context.Context) error { if !errors.Is(err, mongo.ErrNoDocuments) { return errors.Wrap(err, "get conf") } - cfg = &config.Config{} + cfg = &config.Config{ + Oplog: &config.GlobalSlicer{}, + } } a.stopPitrOnOplogOnlyChange(cfg.Oplog.OplogOnly) @@ -145,7 +147,7 @@ func (a *Agent) pitr(ctx context.Context) error { l := log.FromContext(ctx).NewEvent(string(ctrl.CmdPITR), "", "", ep.TS()) ctx = log.SetLogEventToContext(ctx, l) - if err := canSlicingNow(ctx, a.leadConn); err != nil { + if err := canSlicingNow(ctx, a.leadConn, &cfg.Storage); err != nil { e := lock.ConcurrentOpError{} if errors.As(err, &e) { l.Info("oplog slicer is paused for lock [%s, opid: %s]", e.Lock.Type, e.Lock.OPID) From c3f4d6a1c596fa637824de63ad0655121daa2823 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 13 Jun 2024 16:36:00 +0200 Subject: [PATCH 039/203] Add basic nomination-election logic for pitr job --- cmd/pbm-agent/pitr.go | 135 ++++++++++++++++++++++++++++++++++++---- pbm/oplog/nomination.go | 2 +- 2 files changed, 125 insertions(+), 12 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 3dad41628..35428dac5 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -14,6 +14,8 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/lock" "github.com/percona/percona-backup-mongodb/pbm/log" + "github.com/percona/percona-backup-mongodb/pbm/oplog" + "github.com/percona/percona-backup-mongodb/pbm/prio" "github.com/percona/percona-backup-mongodb/pbm/slicer" "github.com/percona/percona-backup-mongodb/pbm/topo" "github.com/percona/percona-backup-mongodb/pbm/util" @@ -59,6 +61,7 @@ func (a *Agent) sliceNow(opid ctrl.OPID) { } const pitrCheckPeriod = time.Second * 15 +const pitrRenominationFrame = 5 * time.Second // PITR starts PITR processing routine func (a *Agent) PITR(ctx context.Context) { @@ -134,16 +137,18 @@ func (a *Agent) pitr(ctx context.Context) error { cfg = &config.Config{} } - a.stopPitrOnOplogOnlyChange(cfg.PITR.OplogOnly) + slicerInterval := cfg.OplogSlicerInterval() + + ep := config.Epoch(cfg.Epoch) + l := log.FromContext(ctx).NewEvent(string(ctrl.CmdPITR), "", "", ep.TS()) + ctx = log.SetLogEventToContext(ctx, l) if !cfg.PITR.Enabled { a.removePitr() return nil } - ep := config.Epoch(cfg.Epoch) - l := log.FromContext(ctx).NewEvent(string(ctrl.CmdPITR), "", "", ep.TS()) - ctx = log.SetLogEventToContext(ctx, l) + a.stopPitrOnOplogOnlyChange(cfg.PITR.OplogOnly) if err := canSlicingNow(ctx, a.leadConn); err != nil { e := lock.ConcurrentOpError{} @@ -155,10 +160,9 @@ func (a *Agent) pitr(ctx context.Context) error { return err } - slicerInterval := cfg.OplogSlicerInterval() - if p := a.getPitr(); p != nil { // already do the job + //todo: remove this span changing detaction to leader currInterval := p.slicer.GetSpan() if currInterval != slicerInterval { p.slicer.SetSpan(slicerInterval) @@ -178,8 +182,8 @@ func (a *Agent) pitr(ctx context.Context) error { if err != nil { return errors.Wrap(err, "check if already run") } - if !moveOn { + l.Debug("pitr running on another RS member") return nil } @@ -188,20 +192,64 @@ func (a *Agent) pitr(ctx context.Context) error { // if node failing, then some other agent with healthy node will hopefully catch up // so this code won't be reached and will not pollute log with "pitr" errors while // the other node does successfully slice - ninf, err := topo.GetNodeInfoExt(ctx, a.nodeConn) + nodeInfo, err := topo.GetNodeInfoExt(ctx, a.nodeConn) if err != nil { + l.Error("get node info: %v", err) return errors.Wrap(err, "get node info") } - q, err := topo.NodeSuits(ctx, a.nodeConn, ninf) + + q, err := topo.NodeSuits(ctx, a.nodeConn, nodeInfo) if err != nil { return errors.Wrap(err, "node check") } - - // node is not suitable for doing backup + // node is not suitable for doing pitr if !q { return nil } + isClusterLeader := nodeInfo.IsClusterLeader() + + if isClusterLeader { + //todo: init meta on first usage + //oplog.InitMeta(ctx, a.leadConn) + + agents, err := topo.ListAgentStatuses(ctx, a.leadConn) + if err != nil { + l.Error("get agents list: %v", err) + return errors.Wrap(err, "list agents statuses") + } + + nodes, err := prio.CalcNodesPriority(ctx, nil, cfg.Backup.Priority, agents) + if err != nil { + l.Error("get nodes priority: %v", err) + return errors.Wrap(err, "get nodes priorities") + } + + shards, err := topo.ClusterMembers(ctx, a.leadConn.MongoClient()) + if err != nil { + l.Error("get cluster members: %v", err) + return errors.Wrap(err, "get cluster members") + } + + for _, sh := range shards { + go func(rs string) { + if err := a.nominateRSForPITR(ctx, rs, nodes.RS(rs)); err != nil { + l.Error("nodes nomination error for %s: %v", rs, err) + } + }(sh.RS) + } + } + + nominated, err := a.waitNominationForPITR(ctx, nodeInfo.SetName, nodeInfo.Me) + if err != nil { + l.Error("wait for pitr nomination: %v", err) + return errors.Wrap(err, "wait nomination for pitr") + } + if !nominated { + l.Debug("skip after pitr nomination, probably started by another node") + return nil + } + epts := ep.TS() lck := lock.NewOpLock(a.leadConn, lock.LockHeader{ Replset: a.brief.SetName, @@ -218,6 +266,7 @@ func (a *Agent) pitr(ctx context.Context) error { l.Debug("skip: lock not acquired") return nil } + err = oplog.SetPITRNomineeACK(ctx, a.leadConn, a.brief.SetName, a.brief.Me) stg, err := util.StorageFromConfig(cfg.Storage, l) if err != nil { @@ -286,6 +335,36 @@ func (a *Agent) pitr(ctx context.Context) error { return nil } +func (a *Agent) nominateRSForPITR(ctx context.Context, rs string, nodes [][]string) error { + l := log.LogEventFromContext(ctx) + l.Debug("pitr nomination list for %s: %v", rs, nodes) + err := oplog.SetPITRNomination(ctx, a.leadConn, rs) + if err != nil { + return errors.Wrap(err, "set pitr nomination meta") + } + + for _, n := range nodes { + nms, err := oplog.GetPITRNominees(ctx, a.leadConn, rs) + if err != nil && !errors.Is(err, errors.ErrNotFound) { + return errors.Wrap(err, "get pitr nominees") + } + if nms != nil && len(nms.Ack) > 0 { + l.Debug("pitr nomination: %s won by %s", rs, nms.Ack) + return nil + } + + err = oplog.SetPITRNominees(ctx, a.leadConn, rs, n) + if err != nil { + return errors.Wrap(err, "set pitr nominees") + } + l.Debug("pitr nomination %s, set candidates %v", rs, n) + + time.Sleep(pitrRenominationFrame) + } + + return nil +} + func (a *Agent) pitrLockCheck(ctx context.Context) (bool, error) { ts, err := topo.GetClusterTime(ctx, a.leadConn) if err != nil { @@ -308,3 +387,37 @@ func (a *Agent) pitrLockCheck(ctx context.Context) (bool, error) { // stale lock means we should move on and clean it up during the lock.Acquire return tl.Heartbeat.T+defs.StaleFrameSec < ts.T, nil } + +func (a *Agent) waitNominationForPITR(ctx context.Context, rs, node string) (bool, error) { + l := log.LogEventFromContext(ctx) + + tk := time.NewTicker(time.Millisecond * 500) + defer tk.Stop() + + l.Debug("waiting pitr nomination") + for { + select { + case <-tk.C: + + nm, err := oplog.GetPITRNominees(ctx, a.leadConn, rs) + if err != nil { + if errors.Is(err, errors.ErrNotFound) { + continue + } + return false, errors.Wrap(err, "check pitr nomination") + } + if len(nm.Ack) > 0 { + return false, nil + } + for _, n := range nm.Nodes { + if n == node { + return true, nil + } + } + } + //todo: we should handle cancelation here also, for e.g.: + // - pitr is disabled + // - configuration has been changed + // - cluster topology has been changed + } +} diff --git a/pbm/oplog/nomination.go b/pbm/oplog/nomination.go index 8219a078b..e53e756b4 100644 --- a/pbm/oplog/nomination.go +++ b/pbm/oplog/nomination.go @@ -56,7 +56,7 @@ func SetPITRNomination(ctx context.Context, conn connect.Client, rs string) erro options.Update().SetUpsert(true), ) - return errors.Wrap(err, "update pirt nomination") + return errors.Wrap(err, "update pitr nomination") } // GetPITRNominees fetches nomination fragment for specified RS From edbd8ba3eb087f123c1e06ed4dc23e15286d55b6 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 13 Jun 2024 17:03:01 +0200 Subject: [PATCH 040/203] Apply config.Priority type on PITR logic --- cmd/pbm-agent/pitr.go | 2 +- pbm/prio/priority.go | 3 ++- pbm/prio/priority_test.go | 17 +++++++++-------- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 35428dac5..9e452d858 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -219,7 +219,7 @@ func (a *Agent) pitr(ctx context.Context) error { return errors.Wrap(err, "list agents statuses") } - nodes, err := prio.CalcNodesPriority(ctx, nil, cfg.Backup.Priority, agents) + nodes, err := prio.CalcNodesPriority(ctx, nil, cfg.PITR.Priority, agents) if err != nil { l.Error("get nodes priority: %v", err) return errors.Wrap(err, "get nodes priorities") diff --git a/pbm/prio/priority.go b/pbm/prio/priority.go index 38f4eac69..c1193b6cd 100644 --- a/pbm/prio/priority.go +++ b/pbm/prio/priority.go @@ -4,6 +4,7 @@ import ( "context" "sort" + "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/topo" ) @@ -46,7 +47,7 @@ type agentScore func(topo.AgentStat) float64 func CalcNodesPriority( ctx context.Context, c map[string]float64, - cfgPrio map[string]float64, + cfgPrio config.Priority, agents []topo.AgentStat, ) (*NodesPriority, error) { // if config level priorities (cfgPrio) aren't set, apply defaults diff --git a/pbm/prio/priority_test.go b/pbm/prio/priority_test.go index fb126ab1d..9f2239108 100644 --- a/pbm/prio/priority_test.go +++ b/pbm/prio/priority_test.go @@ -5,6 +5,7 @@ import ( "reflect" "testing" + "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/topo" ) @@ -175,7 +176,7 @@ func TestCalcNodesPriority(t *testing.T) { testCases := []struct { desc string agents []topo.AgentStat - expPrio map[string]float64 + expPrio config.Priority res [][]string }{ { @@ -185,7 +186,7 @@ func TestCalcNodesPriority(t *testing.T) { newS("rs0", "rs02"), newS("rs0", "rs03"), }, - expPrio: map[string]float64{ + expPrio: config.Priority{ "rs01": 2.0, "rs02": 3.0, "rs03": 1.0, @@ -205,7 +206,7 @@ func TestCalcNodesPriority(t *testing.T) { newS("rs0", "rs04"), newS("rs0", "rs05"), }, - expPrio: map[string]float64{ + expPrio: config.Priority{ "rs01": 2.0, "rs02": 3.0, "rs03": 1.0, @@ -225,7 +226,7 @@ func TestCalcNodesPriority(t *testing.T) { newS("rs0", "rs02"), newS("rs0", "rs03"), }, - expPrio: map[string]float64{ + expPrio: config.Priority{ "rs01": 0.5, }, res: [][]string{ @@ -240,7 +241,7 @@ func TestCalcNodesPriority(t *testing.T) { newS("rs0", "rs02"), newS("rs0", "rs03"), }, - expPrio: map[string]float64{}, + expPrio: config.Priority{}, res: [][]string{ {"rs01", "rs02", "rs03"}, }, @@ -279,7 +280,7 @@ func TestCalcNodesPriority(t *testing.T) { testCases := []struct { desc string agents []topo.AgentStat - expPrio map[string]float64 + expPrio config.Priority res [][]string resCfg [][]string resRS0 [][]string @@ -298,7 +299,7 @@ func TestCalcNodesPriority(t *testing.T) { newS("cfg", "cfg2"), newP("cfg", "cfg3"), }, - expPrio: map[string]float64{ + expPrio: config.Priority{ "rs01": 2.0, "rs02": 3.0, "rs03": 1.0, @@ -335,7 +336,7 @@ func TestCalcNodesPriority(t *testing.T) { newS("cfg", "cfg2"), newP("cfg", "cfg3"), }, - expPrio: map[string]float64{ + expPrio: config.Priority{ "rs01": 0.5, "rs12": 0.5, "cfg3": 0.5, From af46ae9ae08e29b09dd331bec7c631425ec411d7 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 13 Jun 2024 16:55:48 +0200 Subject: [PATCH 041/203] fix external restore --- cmd/pbm-agent/restore.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/pbm-agent/restore.go b/cmd/pbm-agent/restore.go index 359ff189c..bd6d794a2 100644 --- a/cmd/pbm-agent/restore.go +++ b/cmd/pbm-agent/restore.go @@ -98,6 +98,7 @@ func (a *Agent) Restore(ctx context.Context, r *ctrl.RestoreCmd, opid ctrl.OPID, return } bcpType = bcp.Type + r.BackupName = bcp.Name } l.Info("recovery started") @@ -130,7 +131,6 @@ func (a *Agent) Restore(ctx context.Context, r *ctrl.RestoreCmd, opid ctrl.OPID, return } - r.BackupName = bcp.Name err = rstr.Snapshot(ctx, r, r.OplogTS, opid, l, a.closeCMD, a.HbPause) } if err != nil { From caf16c039ee19205ace1009f364e854b89308253 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Fri, 14 Jun 2024 10:59:02 +0200 Subject: [PATCH 042/203] use documented error cast --- pbm/storage/s3/download.go | 5 +++-- pbm/storage/s3/s3.go | 6 ++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/pbm/storage/s3/download.go b/pbm/storage/s3/download.go index 733c766e6..b3254aeed 100644 --- a/pbm/storage/s3/download.go +++ b/pbm/storage/s3/download.go @@ -448,10 +448,11 @@ func (pr *partReader) getChunk(buf *arena, s *s3.S3, start, end int64) (io.ReadC if err != nil { // if object size is undefined, we would read // until HTTP code 416 (Requested Range Not Satisfiable) - var er awserr.RequestFailure - if errors.As(err, &er) && er.StatusCode() == http.StatusRequestedRangeNotSatisfiable { + rerr, ok := err.(awserr.RequestFailure) + if ok && rerr.StatusCode() == http.StatusRequestedRangeNotSatisfiable { return nil, io.EOF } + pr.l.Warning("errGetObj Err: %v", err) return nil, getObjError{err} } diff --git a/pbm/storage/s3/s3.go b/pbm/storage/s3/s3.go index e738c0c63..de1157e88 100644 --- a/pbm/storage/s3/s3.go +++ b/pbm/storage/s3/s3.go @@ -550,8 +550,7 @@ func (s *S3) FileStat(name string) (storage.FileInfo, error) { h, err := s.s3s.HeadObject(headOpts) if err != nil { - var aerr awserr.Error - if errors.As(err, &aerr) && aerr.Code() == "NotFound" { + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { return inf, storage.ErrNotExist } @@ -578,8 +577,7 @@ func (s *S3) Delete(name string) error { Key: aws.String(path.Join(s.opts.Prefix, name)), }) if err != nil { - var aerr awserr.Error - if errors.As(err, &aerr) && aerr.Code() == s3.ErrCodeNoSuchKey { + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == s3.ErrCodeNoSuchKey { return storage.ErrNotExist } return errors.Wrapf(err, "delete '%s/%s' file from S3", s.opts.Bucket, name) From a38418447d9f3d0bb25302a28da5711ab7a0a9b9 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Fri, 14 Jun 2024 10:59:18 +0200 Subject: [PATCH 043/203] fix error cast --- cmd/pbm/backup.go | 2 +- cmd/pbm/backup_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/pbm/backup.go b/cmd/pbm/backup.go index 9d8534d18..9c49f0756 100644 --- a/cmd/pbm/backup.go +++ b/cmd/pbm/backup.go @@ -103,7 +103,7 @@ func runBackup( if err := checkConcurrentOp(ctx, conn); err != nil { // PITR slicing can be run along with the backup start - agents will resolve it. - var e concurentOpError + var e *concurentOpError if !errors.As(err, &e) { return nil, err } diff --git a/cmd/pbm/backup_test.go b/cmd/pbm/backup_test.go index dae85d8ce..7533a1d58 100644 --- a/cmd/pbm/backup_test.go +++ b/cmd/pbm/backup_test.go @@ -329,7 +329,7 @@ func checkBcpMatchClusterError(err, target error) string { return fmt.Sprintf("unknown errIncompatible error: %T", err) } var err2 missedReplsetsError - if !errors.As(err, &err2) { + if !errors.As(target, &err2) { return fmt.Sprintf("expect errMissedReplsets, got %T", err) } From 25812b0a77771752a1884765d96f60b364af0d20 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 17 Jun 2024 10:24:03 +0200 Subject: [PATCH 044/203] Remove backoff strategy for PITR Priorities and nomination process is used to determinate the cluster member for the PITR. --- cmd/pbm-agent/pitr.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 9e452d858..375e49a17 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -69,20 +69,15 @@ func (a *Agent) PITR(ctx context.Context) { l.Printf("starting PITR routine") for { - wait := pitrCheckPeriod - err := a.pitr(ctx) if err != nil { // we need epoch just to log pitr err with an extra context // so not much care if we get it or not ep, _ := config.GetEpoch(ctx, a.leadConn) l.Error(string(ctrl.CmdPITR), "", "", ep.TS(), "init: %v", err) - - // penalty to the failed node so healthy nodes would have priority on next try - wait *= 2 } - time.Sleep(wait) + time.Sleep(pitrCheckPeriod) } } From 193a67af3a6219cea09516624499392b7601f3ec Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 17 Jun 2024 15:02:23 +0200 Subject: [PATCH 045/203] fix typo --- pbm/config/config.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pbm/config/config.go b/pbm/config/config.go index 8e088dc10..ddef028cd 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -249,6 +249,7 @@ func (s *Storage) Cast() error { case storage.S3: return s.S3.Cast() case storage.Azure: // noop + return nil } return errors.Wrap(ErrUnkownStorageType, string(s.Type)) From 94f55170b17afe9a8a6447babb3ae6d2b7c7c197 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 17 Jun 2024 16:04:56 +0200 Subject: [PATCH 046/203] Add InitMeta for the first start (PITR enabled) --- cmd/pbm-agent/pitr.go | 5 ++--- pbm/oplog/nomination.go | 3 ++- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 375e49a17..37f7ac81a 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -61,7 +61,7 @@ func (a *Agent) sliceNow(opid ctrl.OPID) { } const pitrCheckPeriod = time.Second * 15 -const pitrRenominationFrame = 5 * time.Second +const pitrRenominationFrame = 30 * time.Second // PITR starts PITR processing routine func (a *Agent) PITR(ctx context.Context) { @@ -205,8 +205,7 @@ func (a *Agent) pitr(ctx context.Context) error { isClusterLeader := nodeInfo.IsClusterLeader() if isClusterLeader { - //todo: init meta on first usage - //oplog.InitMeta(ctx, a.leadConn) + oplog.InitMeta(ctx, a.leadConn) agents, err := topo.ListAgentStatuses(ctx, a.leadConn) if err != nil { diff --git a/pbm/oplog/nomination.go b/pbm/oplog/nomination.go index e53e756b4..8b702f58f 100644 --- a/pbm/oplog/nomination.go +++ b/pbm/oplog/nomination.go @@ -30,7 +30,8 @@ type PITRNomination struct { // Init add initial PITR document. func InitMeta(ctx context.Context, conn connect.Client) error { pitrMeta := PITRMeta{ - StartTS: time.Now().Unix(), + StartTS: time.Now().Unix(), + Nomination: []PITRNomination{}, } _, err := conn.PITRCollection().ReplaceOne( ctx, From 3b59794320540291dccd270c8c4f63d462ab21aa Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 17 Jun 2024 17:44:02 +0200 Subject: [PATCH 047/203] fix unset storage for oplog-replay --- pbm/restore/logical.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pbm/restore/logical.go b/pbm/restore/logical.go index 6ed35fff4..69903c967 100644 --- a/pbm/restore/logical.go +++ b/pbm/restore/logical.go @@ -418,6 +418,11 @@ func (r *Restore) ReplayOplog(ctx context.Context, cmd *ctrl.ReplayCmd, opid ctr return r.Done(ctx) // skip. no oplog for current rs } + r.stg, err = util.GetStorage(ctx, r.leadConn, log.LogEventFromContext(ctx)) + if err != nil { + return errors.Wrapf(err, "get storage") + } + opChunks, err := r.chunks(ctx, cmd.Start, cmd.End) if err != nil { return err From a0f87bd88f25a01e215f2a2168118cc27b46d46f Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 17 Jun 2024 21:35:18 +0200 Subject: [PATCH 048/203] Move IsOplogSlicing func from status to oplog ... package. The same function will be reused by cluster leader to check if slicing is still in progress. --- cmd/pbm/status.go | 27 +-------------------------- pbm/oplog/oplog.go | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 26 deletions(-) diff --git a/cmd/pbm/status.go b/cmd/pbm/status.go index b873f2fb2..0f02d7f57 100644 --- a/cmd/pbm/status.go +++ b/cmd/pbm/status.go @@ -359,31 +359,6 @@ func (p pitrStat) String() string { return s } -// isOplogSlicing checks if PITR slicing is running. It looks for PITR locks -// and returns true if there is at least one not stale. -func isOplogSlicing(ctx context.Context, conn connect.Client) (bool, error) { - locks, err := lock.GetOpLocks(ctx, conn, &lock.LockHeader{Type: ctrl.CmdPITR}) - if err != nil { - return false, errors.Wrap(err, "get locks") - } - if len(locks) == 0 { - return false, nil - } - - ct, err := topo.GetClusterTime(ctx, conn) - if err != nil { - return false, errors.Wrap(err, "get cluster time") - } - - for i := range locks { - if locks[i].Heartbeat.T+defs.StaleFrameSec >= ct.T { - return true, nil - } - } - - return false, nil -} - func getPitrStatus(ctx context.Context, conn connect.Client) (fmt.Stringer, error) { var p pitrStat var err error @@ -392,7 +367,7 @@ func getPitrStatus(ctx context.Context, conn connect.Client) (fmt.Stringer, erro return p, errors.Wrap(err, "unable check PITR config status") } - p.Running, err = isOplogSlicing(ctx, conn) + p.Running, err = oplog.IsOplogSlicing(ctx, conn) if err != nil { return p, errors.Wrap(err, "unable check PITR running status") } diff --git a/pbm/oplog/oplog.go b/pbm/oplog/oplog.go index 717b603c9..d0a089d34 100644 --- a/pbm/oplog/oplog.go +++ b/pbm/oplog/oplog.go @@ -9,7 +9,12 @@ import ( "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/readconcern" + "github.com/percona/percona-backup-mongodb/pbm/connect" + "github.com/percona/percona-backup-mongodb/pbm/ctrl" + "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" + "github.com/percona/percona-backup-mongodb/pbm/lock" + "github.com/percona/percona-backup-mongodb/pbm/topo" ) var errNoTransaction = errors.New("no transaction found") @@ -68,3 +73,28 @@ func findLastOplogTS(ctx context.Context, m *mongo.Client) (primitive.Timestamp, return primitive.Timestamp{T: t, I: i}, nil } + +// IsOplogSlicing checks if PITR slicing is running. It looks for PITR locks +// and returns true if there is at least one not stale. +func IsOplogSlicing(ctx context.Context, conn connect.Client) (bool, error) { + locks, err := lock.GetOpLocks(ctx, conn, &lock.LockHeader{Type: ctrl.CmdPITR}) + if err != nil { + return false, errors.Wrap(err, "get locks") + } + if len(locks) == 0 { + return false, nil + } + + ct, err := topo.GetClusterTime(ctx, conn) + if err != nil { + return false, errors.Wrap(err, "get cluster time") + } + + for i := range locks { + if locks[i].Heartbeat.T+defs.StaleFrameSec >= ct.T { + return true, nil + } + } + + return false, nil +} From bf488632e8339c5a47b06f34d03cc3d007a0d792 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Tue, 18 Jun 2024 16:44:46 +0200 Subject: [PATCH 049/203] agentCheckup: warn if storage is not initialized --- cmd/pbm-agent/agent.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 759634ae3..d65a22965 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -472,7 +472,7 @@ func (a *Agent) storStatus(ctx context.Context, log log.LogEvent, forceCheckStor return topo.SubsysStatus{Err: errStr} } if !ok { - return topo.SubsysStatus{Err: "storage is not initialized"} + log.Warning("storage is not initialized") } return topo.SubsysStatus{OK: true} From 2c4fd5d072c22baffa88e46f3f23c0ec17e72a5c Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Tue, 18 Jun 2024 16:55:00 +0200 Subject: [PATCH 050/203] print error when adding profile --- cmd/pbm-agent/profile.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cmd/pbm-agent/profile.go b/cmd/pbm-agent/profile.go index 48c6ee470..b429d9ff4 100644 --- a/cmd/pbm-agent/profile.go +++ b/cmd/pbm-agent/profile.go @@ -95,13 +95,13 @@ func (a *Agent) handleAddConfigProfile( err = storage.HasReadAccess(ctx, stg) if err != nil { if !errors.Is(err, storage.ErrUninitialized) { - err = errors.Wrap(err, "check read access") + l.Error("check read access: %v", err) return } err = storage.Initialize(ctx, stg) if err != nil { - err = errors.Wrap(err, "init storage") + l.Error("init storage: %v", err) return } } @@ -113,9 +113,11 @@ func (a *Agent) handleAddConfigProfile( } err = config.AddProfile(ctx, a.leadConn, profile) if err != nil { - err = errors.Wrap(err, "add profile config") + l.Error("add profile config: %v", err) return } + + l.Info("profile saved") } func (a *Agent) handleRemoveConfigProfile( From ef2d50946e3d393b3cb075e2b096f4c0d5b2b6b1 Mon Sep 17 00:00:00 2001 From: Sandra Romanchenko <53295797+sandraromanchenko@users.noreply.github.com> Date: Wed, 19 Jun 2024 15:59:36 +0300 Subject: [PATCH 051/203] PBM_tests. Make tests ARM friendly (#953) --- e2e-tests/docker/mongodb.dockerfile | 2 +- e2e-tests/docker/pbm.dockerfile | 5 +++-- e2e-tests/docker/tests.dockerfile | 3 ++- e2e-tests/functions | 4 ++-- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/e2e-tests/docker/mongodb.dockerfile b/e2e-tests/docker/mongodb.dockerfile index 3d14eaa24..d68fe861c 100644 --- a/e2e-tests/docker/mongodb.dockerfile +++ b/e2e-tests/docker/mongodb.dockerfile @@ -1,6 +1,6 @@ ARG MONGODB_VERSION=4.4 ARG MONGODB_IMAGE=percona/percona-server-mongodb -FROM ${MONGODB_IMAGE}:${MONGODB_VERSION} +FROM ${MONGODB_IMAGE}:${MONGODB_VERSION}-multi USER root COPY e2e-tests/docker/keyFile /opt/keyFile RUN chown mongodb /opt/keyFile && chmod 400 /opt/keyFile && mkdir -p /home/mongodb/ && chown mongodb /home/mongodb diff --git a/e2e-tests/docker/pbm.dockerfile b/e2e-tests/docker/pbm.dockerfile index 3487ac111..142686a4a 100644 --- a/e2e-tests/docker/pbm.dockerfile +++ b/e2e-tests/docker/pbm.dockerfile @@ -1,7 +1,7 @@ ARG MONGODB_VERSION=4.4 ARG MONGODB_IMAGE=percona/percona-server-mongodb -FROM ${MONGODB_IMAGE}:${MONGODB_VERSION} as mongo_image +FROM ${MONGODB_IMAGE}:${MONGODB_VERSION}-multi as mongo_image FROM oraclelinux:8 as base-build WORKDIR /build @@ -11,7 +11,8 @@ RUN mkdir -p /data/db COPY --from=mongo_image /bin/mongod /bin/ RUN dnf install epel-release && dnf update && dnf install make gcc krb5-devel iproute-tc libfaketime -RUN curl -sL -o /tmp/golang.tar.gz https://go.dev/dl/go1.22.2.linux-amd64.tar.gz && \ +RUN arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) && \ +curl -sL -o /tmp/golang.tar.gz https://go.dev/dl/go1.22.2.linux-${arch}.tar.gz && \ rm -rf /usr/local/go && tar -C /usr/local -xzf /tmp/golang.tar.gz && rm /tmp/golang.tar.gz ENV PATH=$PATH:/usr/local/go/bin diff --git a/e2e-tests/docker/tests.dockerfile b/e2e-tests/docker/tests.dockerfile index ca1b1c75f..620632030 100644 --- a/e2e-tests/docker/tests.dockerfile +++ b/e2e-tests/docker/tests.dockerfile @@ -2,7 +2,8 @@ FROM oraclelinux:8 AS base-build WORKDIR /build RUN dnf update && dnf install make gcc krb5-devel -RUN curl -sL -o /tmp/golang.tar.gz https://go.dev/dl/go1.22.2.linux-amd64.tar.gz && \ +RUN arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) && \ +curl -sL -o /tmp/golang.tar.gz https://go.dev/dl/go1.22.2.linux-${arch}.tar.gz && \ rm -rf /usr/local/go && tar -C /usr/local -xzf /tmp/golang.tar.gz && rm /tmp/golang.tar.gz ENV PATH=$PATH:/usr/local/go/bin diff --git a/e2e-tests/functions b/e2e-tests/functions index 828655d1f..ab641ee7b 100644 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -158,7 +158,7 @@ start_cluster() { genMongoKey echo 'Build agents and tests' - docker-compose -f $COMPOSE_PATH build + docker-compose -f $COMPOSE_PATH build --no-cache --pull mongo="mongo" if [ "${mongo_version:0:1}" -ge 6 ]; then @@ -203,7 +203,7 @@ start_replset() { genMongoKey echo 'Build agents and tests' - docker-compose -f $compose build + docker-compose -f $compose build --no-cache --pull if [ ! -d "${test_dir}/docker/backups" ]; then mkdir "${test_dir}/docker/backups" From e1ea796f4e2d7a147168647400bf2975b84d75fd Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 20 Jun 2024 15:31:03 +0200 Subject: [PATCH 052/203] Add waitAllOpLockRelease for syncing up cluster leader ... before the nomination-election process --- cmd/pbm-agent/pitr.go | 59 ++++++++++++++++++++++++++++++++++++----- pbm/oplog/nomination.go | 2 ++ 2 files changed, 55 insertions(+), 6 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 37f7ac81a..33626af9b 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -60,8 +60,12 @@ func (a *Agent) sliceNow(opid ctrl.OPID) { a.pitrjob.w <- opid } -const pitrCheckPeriod = time.Second * 15 -const pitrRenominationFrame = 30 * time.Second +const ( + pitrCheckPeriod = 15 * time.Second + pitrRenominationFrame = 30 * time.Second + pitrOpLockPollingCycle = 15 * time.Second + pitrOpLockPollingTimeOut = 2 * time.Minute +) // PITR starts PITR processing routine func (a *Agent) PITR(ctx context.Context) { @@ -205,6 +209,18 @@ func (a *Agent) pitr(ctx context.Context) error { isClusterLeader := nodeInfo.IsClusterLeader() if isClusterLeader { + l.Debug("checking locks in the whole cluster") + noLocks, err := a.waitAllOpLockRelease(ctx) + if err != nil { + l.Error("wait for all oplock release: %v", err) + return errors.Wrap(err, "wait all oplock release") + } + if !noLocks { + l.Debug("there are still working pitr members, members nomination will not be continued") + return nil + } + + l.Debug("init pitr meta on the first usage") oplog.InitMeta(ctx, a.leadConn) agents, err := topo.ListAgentStatuses(ctx, a.leadConn) @@ -382,6 +398,40 @@ func (a *Agent) pitrLockCheck(ctx context.Context) (bool, error) { return tl.Heartbeat.T+defs.StaleFrameSec < ts.T, nil } +// waitAllOpLockRelease waits to not have any live OpLock and in such a case returns true. +// Waiting process duration is deadlined, and in that case false will be returned. +func (a *Agent) waitAllOpLockRelease(ctx context.Context) (bool, error) { + l := log.LogEventFromContext(ctx) + + tick := time.NewTicker(pitrOpLockPollingCycle) + defer tick.Stop() + + tout := time.NewTimer(pitrOpLockPollingTimeOut) + defer tout.Stop() + + for { + select { + case <-tick.C: + running, err := oplog.IsOplogSlicing(ctx, a.leadConn) + if err != nil { + return false, errors.Wrap(err, "is oplog slicing check") + } + if !running { + return true, nil + } + l.Debug("oplog slicing still running") + case <-tout.C: + l.Warning("timeout while waiting for relese all OpLocks") + return false, nil + } + } +} + +// waitNominationForPITR is used by potentional nominee to determinate if it +// is nominated by the leader. It returns true if member receive nomination. +// If nomination document is not found, nominee tries again on another tick. +// If Ack is found in fetched fragment, that means that another member confirmed +// nomination, so in that case current member lost nomination and false is returned. func (a *Agent) waitNominationForPITR(ctx context.Context, rs, node string) (bool, error) { l := log.LogEventFromContext(ctx) @@ -409,9 +459,6 @@ func (a *Agent) waitNominationForPITR(ctx context.Context, rs, node string) (boo } } } - //todo: we should handle cancelation here also, for e.g.: - // - pitr is disabled - // - configuration has been changed - // - cluster topology has been changed } + //todo: add timeout: e.g. 2 minutes } diff --git a/pbm/oplog/nomination.go b/pbm/oplog/nomination.go index 8b702f58f..97ee5a64a 100644 --- a/pbm/oplog/nomination.go +++ b/pbm/oplog/nomination.go @@ -62,6 +62,8 @@ func SetPITRNomination(ctx context.Context, conn connect.Client, rs string) erro // GetPITRNominees fetches nomination fragment for specified RS // from PITRMeta document. +// If document is not found, or document fragment for specific RS is not found, +// error ErrNotFound is returned. func GetPITRNominees( ctx context.Context, conn connect.Client, From 435e238f42edb5941684d28b1fa3ee880a493697 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Tue, 25 Jun 2024 12:51:23 +0200 Subject: [PATCH 053/203] handle duplicated backup meta --- pbm/errors/errors.go | 6 ++-- pbm/resync/rsync.go | 65 ++++++++++++++++++++++++++++++++++++++------ 2 files changed, 59 insertions(+), 12 deletions(-) diff --git a/pbm/errors/errors.go b/pbm/errors/errors.go index c9f239364..601026d3a 100644 --- a/pbm/errors/errors.go +++ b/pbm/errors/errors.go @@ -41,6 +41,6 @@ func Cause(err error) error { return gerrs.Cause(err) } -// func Join(errs ...error) error { -// return stderrors.Join(errs...) -// } +func Join(errs ...error) error { + return stderrors.Join(errs...) +} diff --git a/pbm/resync/rsync.go b/pbm/resync/rsync.go index 3b1f8cb75..3deb98bf3 100644 --- a/pbm/resync/rsync.go +++ b/pbm/resync/rsync.go @@ -3,9 +3,12 @@ package resync import ( "context" "encoding/json" + "runtime" "strings" + "sync" "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" "github.com/percona/percona-backup-mongodb/pbm/backup" "github.com/percona/percona-backup-mongodb/pbm/config" @@ -124,18 +127,62 @@ func SyncBackupList( Storage: *cfg, } - docs := make([]any, len(backupList)) - for i, m := range backupList { - l.Debug("bcp: %v", m.Name) - + for i := range backupList { // overwriting config allows PBM to download files from the current deployment - m.Store = backupStore - docs[i] = m + backupList[i].Store = backupStore } - _, err = conn.BcpCollection().InsertMany(ctx, docs) - if err != nil { - return errors.Wrap(err, "write backups meta into db") + return insertBackupList(ctx, conn, backupList) +} + +func insertBackupList( + ctx context.Context, + conn connect.Client, + backups []*backup.BackupMeta, +) error { + concurrencyNumber := runtime.NumCPU() + + inC := make(chan *backup.BackupMeta) + errC := make(chan error, concurrencyNumber) + + wg := &sync.WaitGroup{} + wg.Add(concurrencyNumber) + for range concurrencyNumber { + go func() { + defer wg.Done() + l := log.LogEventFromContext(ctx) + + for bcp := range inC { + l.Debug("bcp: %v", bcp.Name) + + _, err := conn.BcpCollection().InsertOne(ctx, bcp) + if err != nil { + if mongo.IsDuplicateKeyError(err) { + l.Warning("backup %q already exists", bcp.Name) + continue + } + errC <- errors.Wrapf(err, "backup %q", bcp.Name) + } + } + }() + } + + go func() { + for _, bcp := range backups { + inC <- bcp + } + + close(inC) + wg.Wait() + close(errC) + }() + + var errs []error + for err := range errC { + errs = append(errs, err) + } + if len(errs) != 0 { + return errors.Errorf("write backup meta:\n%v", errors.Join(errs...)) } return nil From 8b727dd7da54125058da2e5109919a6410c485ad Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Tue, 25 Jun 2024 13:47:43 +0200 Subject: [PATCH 054/203] show error from agent workload --- pbm/resync/rsync.go | 12 ++++++------ sdk/util.go | 8 +++++++- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/pbm/resync/rsync.go b/pbm/resync/rsync.go index 3deb98bf3..04681a4d8 100644 --- a/pbm/resync/rsync.go +++ b/pbm/resync/rsync.go @@ -52,19 +52,19 @@ func Resync(ctx context.Context, conn connect.Client, cfg *config.Storage) error } } - err = resyncPhysicalRestores(ctx, conn, stg) + err = SyncBackupList(ctx, conn, cfg, "") if err != nil { - l.Error("resync physical restore metadata") + l.Error("failed sync backup metadata: %v", err) } - err = SyncBackupList(ctx, conn, cfg, "") + err = resyncOplogRange(ctx, conn, stg) if err != nil { - l.Error("resync backup metadata") + l.Error("failed sync oplog range: %v", err) } - err = resyncOplogRange(ctx, conn, stg) + err = resyncPhysicalRestores(ctx, conn, stg) if err != nil { - l.Error("resync oplog range") + l.Error("failed sync physical restore metadata: %v", err) } return nil diff --git a/sdk/util.go b/sdk/util.go index e323b24c0..b4b533cc6 100644 --- a/sdk/util.go +++ b/sdk/util.go @@ -56,9 +56,15 @@ func WaitForResync(ctx context.Context, c Client, cid CommandID) error { for { select { case entry := <-outC: - if entry != nil && entry.Msg == "succeed" { + if entry == nil { + continue + } + if entry.Msg == "succeed" { return nil } + if entry.Severity == log.Error { + return errors.New(entry.Msg) + } case err := <-errC: return err } From 9516f482fef608076d670c919c81e4640f34c577 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Tue, 25 Jun 2024 14:11:27 +0200 Subject: [PATCH 055/203] fix unexpectedly leaked error --- cmd/pbm-agent/profile.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/pbm-agent/profile.go b/cmd/pbm-agent/profile.go index b429d9ff4..f4a432f45 100644 --- a/cmd/pbm-agent/profile.go +++ b/cmd/pbm-agent/profile.go @@ -74,7 +74,7 @@ func (a *Agent) handleAddConfigProfile( } defer func() { l.Debug("releasing lock") - err = lck.Release() + err := lck.Release() if err != nil { l.Error("unable to release lock %v: %v", lck, err) } @@ -178,7 +178,7 @@ func (a *Agent) handleRemoveConfigProfile( } defer func() { l.Debug("releasing lock") - err = lck.Release() + err := lck.Release() if err != nil { l.Error("unable to release lock %v: %v", lck, err) } From af4fc528ed849498c8133ba81a3dcac96c588dfd Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Tue, 25 Jun 2024 14:47:21 +0200 Subject: [PATCH 056/203] rename to previous names --- cmd/pbm-agent/pitr.go | 14 +++--- cmd/pbm/backup.go | 2 +- e2e-tests/cmd/ensure-oplog/main.go | 6 +-- pbm/backup/backup.go | 6 +-- pbm/backup/delete.go | 4 +- pbm/backup/physical.go | 2 +- pbm/backup/storage.go | 4 +- pbm/backup/types.go | 2 +- pbm/config/config.go | 76 +++++++++++++++--------------- pbm/ctrl/cmd.go | 6 +-- pbm/ctrl/send.go | 2 +- pbm/restore/logical.go | 6 +-- pbm/restore/physical.go | 4 +- pbm/resync/rsync.go | 10 ++-- pbm/slicer/slicer.go | 4 +- pbm/util/storage.go | 2 +- sdk/impl.go | 4 +- 17 files changed, 77 insertions(+), 77 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 72b17f433..7bc724e9f 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -99,7 +99,7 @@ func (a *Agent) stopPitrOnOplogOnlyChange(currOO bool) { // canSlicingNow returns lock.ConcurrentOpError if there is a parallel operation. // Only physical backups (full, incremental, external) is allowed. -func canSlicingNow(ctx context.Context, conn connect.Client, stgCfg *config.Storage) error { +func canSlicingNow(ctx context.Context, conn connect.Client, stgCfg *config.StorageConf) error { locks, err := lock.GetLocks(ctx, conn, &lock.LockHeader{}) if err != nil { return errors.Wrap(err, "get locks data") @@ -132,13 +132,13 @@ func (a *Agent) pitr(ctx context.Context) error { return errors.Wrap(err, "get conf") } cfg = &config.Config{ - Oplog: &config.GlobalSlicer{}, + PITR: &config.PITRConf{}, } } - a.stopPitrOnOplogOnlyChange(cfg.Oplog.OplogOnly) + a.stopPitrOnOplogOnlyChange(cfg.PITR.OplogOnly) - if !cfg.Oplog.Enabled { + if !cfg.PITR.Enabled { a.removePitr() return nil } @@ -229,7 +229,7 @@ func (a *Agent) pitr(ctx context.Context) error { s := slicer.NewSlicer(a.brief.SetName, a.leadConn, a.nodeConn, stg, cfg, log.FromContext(ctx)) s.SetSpan(slicerInterval) - if cfg.Oplog.OplogOnly { + if cfg.PITR.OplogOnly { err = s.OplogOnlyCatchup(ctx) } else { err = s.Catchup(ctx) @@ -262,8 +262,8 @@ func (a *Agent) pitr(ctx context.Context) error { streamErr := s.Stream(ctx, stopC, w, - cfg.Oplog.Compression, - cfg.Oplog.CompressionLevel, + cfg.PITR.Compression, + cfg.PITR.CompressionLevel, cfg.Backup.Timeouts) if streamErr != nil { out := l.Error diff --git a/cmd/pbm/backup.go b/cmd/pbm/backup.go index 9c49f0756..ad71a3fe8 100644 --- a/cmd/pbm/backup.go +++ b/cmd/pbm/backup.go @@ -364,7 +364,7 @@ func describeBackup(ctx context.Context, pbm sdk.Client, b *descBcp) (fmt.String if b.coll || bcp.Size == 0 { // to read backed up collection names // or calculate size of files for legacy backups - stg, err = util.StorageFromConfig(&bcp.Store.Storage, log.LogEventFromContext(ctx)) + stg, err = util.StorageFromConfig(&bcp.Store.StorageConf, log.LogEventFromContext(ctx)) if err != nil { return nil, errors.Wrap(err, "get storage") } diff --git a/e2e-tests/cmd/ensure-oplog/main.go b/e2e-tests/cmd/ensure-oplog/main.go index e7897ef5e..d2d453d47 100644 --- a/e2e-tests/cmd/ensure-oplog/main.go +++ b/e2e-tests/cmd/ensure-oplog/main.go @@ -258,9 +258,9 @@ func ensureReplsetOplog(ctx context.Context, uri string, from, till primitive.Ti compression := defs.DefaultCompression compressionLevel := (*int)(nil) - if cfg.Oplog != nil { - compression = compress.CompressionType(cfg.Oplog.Compression) - compressionLevel = cfg.Oplog.CompressionLevel + if cfg.PITR != nil { + compression = compress.CompressionType(cfg.PITR.Compression) + compressionLevel = cfg.PITR.CompressionLevel } for _, t := range missedChunks { diff --git a/pbm/backup/backup.go b/pbm/backup/backup.go index e0da6f9dd..44e843642 100644 --- a/pbm/backup/backup.go +++ b/pbm/backup/backup.go @@ -116,9 +116,9 @@ func (b *Backup) Init( Namespaces: bcp.Namespaces, Compression: bcp.Compression, Store: Storage{ - Name: b.config.Name, - IsProfile: b.config.IsProfile, - Storage: b.config.Storage, + Name: b.config.Name, + IsProfile: b.config.IsProfile, + StorageConf: b.config.Storage, }, StartTS: time.Now().Unix(), Status: defs.StatusStarting, diff --git a/pbm/backup/delete.go b/pbm/backup/delete.go index f0e29c8fd..99c4bf4f2 100644 --- a/pbm/backup/delete.go +++ b/pbm/backup/delete.go @@ -74,7 +74,7 @@ func deleteBackupImpl(ctx context.Context, cc connect.Client, bcp *BackupMeta) e return err } - stg, err := util.StorageFromConfig(&bcp.Store.Storage, log.LogEventFromContext(ctx)) + stg, err := util.StorageFromConfig(&bcp.Store.StorageConf, log.LogEventFromContext(ctx)) if err != nil { return errors.Wrap(err, "get storage") } @@ -108,7 +108,7 @@ func deleteIncremetalChainImpl(ctx context.Context, cc connect.Client, bcp *Back all = append(all, bcps...) } - stg, err := util.StorageFromConfig(&bcp.Store.Storage, log.LogEventFromContext(ctx)) + stg, err := util.StorageFromConfig(&bcp.Store.StorageConf, log.LogEventFromContext(ctx)) if err != nil { return errors.Wrap(err, "get storage") } diff --git a/pbm/backup/physical.go b/pbm/backup/physical.go index 3b3375015..3ee1d81cb 100644 --- a/pbm/backup/physical.go +++ b/pbm/backup/physical.go @@ -232,7 +232,7 @@ func (b *Backup) doPhysical( } } - if !b.config.Storage.Equal(&src.Store.Storage) { + if !b.config.Storage.Equal(&src.Store.StorageConf) { return errors.New("cannot use the configured storage: " + "source backup is stored on a different storage") } diff --git a/pbm/backup/storage.go b/pbm/backup/storage.go index 745bdac33..0ed7df098 100644 --- a/pbm/backup/storage.go +++ b/pbm/backup/storage.go @@ -24,11 +24,11 @@ type StorageManager interface { } type storageManagerImpl struct { - cfg *config.Storage + cfg *config.StorageConf stg storage.Storage } -func NewStorageManager(ctx context.Context, cfg *config.Storage) (*storageManagerImpl, error) { +func NewStorageManager(ctx context.Context, cfg *config.StorageConf) (*storageManagerImpl, error) { stg, err := util.StorageFromConfig(cfg, log.LogEventFromContext(ctx)) if err != nil { return nil, errors.Wrap(err, "unable to get backup store") diff --git a/pbm/backup/types.go b/pbm/backup/types.go index c22bd55cb..52e387872 100644 --- a/pbm/backup/types.go +++ b/pbm/backup/types.go @@ -95,7 +95,7 @@ type Storage struct { // IsProfile is true when storage is non-main (external). IsProfile bool `bson:"profile,omitempty" json:"profile,omitempty"` - config.Storage `bson:",inline" json:",inline"` + config.StorageConf `bson:",inline" json:",inline"` } // BackupRsNomination is used to choose (nominate and elect) nodes for the backup diff --git a/pbm/config/config.go b/pbm/config/config.go index ddef028cd..5a48aad03 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -69,10 +69,10 @@ type Config struct { Name string `bson:"name,omitempty" json:"name,omitempty" yaml:"name,omitempty"` IsProfile bool `bson:"profile,omitempty" json:"profile,omitempty" yaml:"profile,omitempty"` - Storage Storage `bson:"storage" json:"storage" yaml:"storage"` - Oplog *GlobalSlicer `bson:"pitr,omitempty" json:"pitr,omitempty" yaml:"pitr,omitempty"` - Backup *Backup `bson:"backup,omitempty" json:"backup,omitempty" yaml:"backup,omitempty"` - Restore *Restore `bson:"restore,omitempty" json:"restore,omitempty" yaml:"restore,omitempty"` + Storage StorageConf `bson:"storage" json:"storage" yaml:"storage"` + PITR *PITRConf `bson:"pitr,omitempty" json:"pitr,omitempty" yaml:"pitr,omitempty"` + Backup *BackupConf `bson:"backup,omitempty" json:"backup,omitempty" yaml:"backup,omitempty"` + Restore *RestoreConf `bson:"restore,omitempty" json:"restore,omitempty" yaml:"restore,omitempty"` Epoch primitive.Timestamp `bson:"epoch" json:"-" yaml:"-"` } @@ -104,7 +104,7 @@ func (c *Config) Clone() *Config { Name: c.Name, IsProfile: c.IsProfile, Storage: *c.Storage.Clone(), - Oplog: c.Oplog.Clone(), + PITR: c.PITR.Clone(), Restore: c.Restore.Clone(), Backup: c.Backup.Clone(), Epoch: c.Epoch, @@ -154,35 +154,35 @@ func (c *Config) String() string { // OplogSlicerInterval returns interval for general oplog slicer routine. // If it is not configured, the function returns default (hardcoded) value 10 mins. func (c *Config) OplogSlicerInterval() time.Duration { - if c.Oplog == nil || c.Oplog.Interval == 0 { + if c.PITR == nil || c.PITR.OplogSpanMin == 0 { return defs.DefaultPITRInterval } - return time.Duration(c.Oplog.Interval * float64(time.Minute)) + return time.Duration(c.PITR.OplogSpanMin * float64(time.Minute)) } // BackupSlicerInterval returns interval for backup slicer routine. // If it is not confugured, the function returns general oplog slicer interval. func (c *Config) BackupSlicerInterval() time.Duration { - if c.Backup == nil || c.Backup.SlicingInterval == 0 { + if c.Backup == nil || c.Backup.OplogSpanMin == 0 { return c.OplogSlicerInterval() } - return time.Duration(c.Backup.SlicingInterval * float64(time.Minute)) + return time.Duration(c.Backup.OplogSpanMin * float64(time.Minute)) } -// GlobalSlicer is a Point-In-Time Recovery options +// PITRConf is a Point-In-Time Recovery options // //nolint:lll -type GlobalSlicer struct { +type PITRConf struct { Enabled bool `bson:"enabled" json:"enabled" yaml:"enabled"` - Interval float64 `bson:"oplogSpanMin,omitempty" json:"oplogSpanMin,omitempty" yaml:"oplogSpanMin,omitempty"` + OplogSpanMin float64 `bson:"oplogSpanMin,omitempty" json:"oplogSpanMin,omitempty" yaml:"oplogSpanMin,omitempty"` OplogOnly bool `bson:"oplogOnly,omitempty" json:"oplogOnly,omitempty" yaml:"oplogOnly,omitempty"` Compression compress.CompressionType `bson:"compression,omitempty" json:"compression,omitempty" yaml:"compression,omitempty"` CompressionLevel *int `bson:"compressionLevel,omitempty" json:"compressionLevel,omitempty" yaml:"compressionLevel,omitempty"` } -func (cfg *GlobalSlicer) Clone() *GlobalSlicer { +func (cfg *PITRConf) Clone() *PITRConf { if cfg == nil { return nil } @@ -196,20 +196,20 @@ func (cfg *GlobalSlicer) Clone() *GlobalSlicer { return &rv } -// Storage is a configuration of the backup storage -type Storage struct { +// StorageConf is a configuration of the backup storage +type StorageConf struct { Type storage.Type `bson:"type" json:"type" yaml:"type"` S3 *s3.Config `bson:"s3,omitempty" json:"s3,omitempty" yaml:"s3,omitempty"` Azure *azure.Config `bson:"azure,omitempty" json:"azure,omitempty" yaml:"azure,omitempty"` Filesystem *fs.Config `bson:"filesystem,omitempty" json:"filesystem,omitempty" yaml:"filesystem,omitempty"` } -func (s *Storage) Clone() *Storage { +func (s *StorageConf) Clone() *StorageConf { if s == nil { return nil } - rv := &Storage{ + rv := &StorageConf{ Type: s.Type, } @@ -225,7 +225,7 @@ func (s *Storage) Clone() *Storage { return rv } -func (s *Storage) Equal(other *Storage) bool { +func (s *StorageConf) Equal(other *StorageConf) bool { if s.Type != other.Type { return false } @@ -242,7 +242,7 @@ func (s *Storage) Equal(other *Storage) bool { return false } -func (s *Storage) Cast() error { +func (s *StorageConf) Cast() error { switch s.Type { case storage.Filesystem: return s.Filesystem.Cast() @@ -255,7 +255,7 @@ func (s *Storage) Cast() error { return errors.Wrap(ErrUnkownStorageType, string(s.Type)) } -func (s *Storage) Typ() string { +func (s *StorageConf) Typ() string { switch s.Type { case storage.S3: return "S3" @@ -270,7 +270,7 @@ func (s *Storage) Typ() string { } } -func (s *Storage) Path() string { +func (s *StorageConf) Path() string { path := "" switch s.Type { case storage.S3: @@ -298,10 +298,10 @@ func (s *Storage) Path() string { return path } -// Restore is config options for the restore +// RestoreConf is config options for the restore // //nolint:lll -type Restore struct { +type RestoreConf struct { // Logical restore // // num of documents to buffer @@ -322,7 +322,7 @@ type Restore struct { MongodLocationMap map[string]string `bson:"mongodLocationMap" json:"mongodLocationMap,omitempty" yaml:"mongodLocationMap,omitempty"` } -func (cfg *Restore) Clone() *Restore { +func (cfg *RestoreConf) Clone() *RestoreConf { if cfg == nil { return nil } @@ -339,15 +339,15 @@ func (cfg *Restore) Clone() *Restore { } //nolint:lll -type Backup struct { - SlicingInterval float64 `bson:"oplogSpanMin" json:"oplogSpanMin" yaml:"oplogSpanMin"` +type BackupConf struct { + OplogSpanMin float64 `bson:"oplogSpanMin" json:"oplogSpanMin" yaml:"oplogSpanMin"` Priority map[string]float64 `bson:"priority,omitempty" json:"priority,omitempty" yaml:"priority,omitempty"` Timeouts *BackupTimeouts `bson:"timeouts,omitempty" json:"timeouts,omitempty" yaml:"timeouts,omitempty"` Compression compress.CompressionType `bson:"compression,omitempty" json:"compression,omitempty" yaml:"compression,omitempty"` CompressionLevel *int `bson:"compressionLevel,omitempty" json:"compressionLevel,omitempty" yaml:"compressionLevel,omitempty"` } -func (cfg *Backup) Clone() *Backup { +func (cfg *BackupConf) Clone() *BackupConf { if cfg == nil { return nil } @@ -400,24 +400,24 @@ func GetConfig(ctx context.Context, m connect.Client) (*Config, error) { return nil, errors.Wrap(err, "decode") } - if cfg.Oplog == nil { - cfg.Oplog = &GlobalSlicer{} + if cfg.PITR == nil { + cfg.PITR = &PITRConf{} } if cfg.Backup == nil { - cfg.Backup = &Backup{} + cfg.Backup = &BackupConf{} } if cfg.Restore == nil { - cfg.Restore = &Restore{} + cfg.Restore = &RestoreConf{} } if cfg.Backup.Compression == "" { cfg.Backup.Compression = defs.DefaultCompression } - if cfg.Oplog.Compression == "" { - cfg.Oplog.Compression = cfg.Backup.Compression + if cfg.PITR.Compression == "" { + cfg.PITR.Compression = cfg.Backup.Compression } - if cfg.Oplog.CompressionLevel == nil { - cfg.Oplog.CompressionLevel = cfg.Backup.CompressionLevel + if cfg.PITR.CompressionLevel == nil { + cfg.PITR.CompressionLevel = cfg.Backup.CompressionLevel } return cfg, nil @@ -437,8 +437,8 @@ func SetConfig(ctx context.Context, m connect.Client, cfg *Config) error { s3.SDKLogLevel(cfg.Storage.S3.DebugLogLevels, os.Stderr) } - if cfg.Oplog != nil { - if c := string(cfg.Oplog.Compression); c != "" && !compress.IsValidCompressionType(c) { + if cfg.PITR != nil { + if c := string(cfg.PITR.Compression); c != "" && !compress.IsValidCompressionType(c) { return errors.Errorf("unsupported compression type: %q", c) } } @@ -579,7 +579,7 @@ func IsPITREnabled(ctx context.Context, m connect.Client) (bool, bool, error) { return false, false, errors.Wrap(err, "get config") } - return cfg.Oplog.Enabled, cfg.Oplog.OplogOnly, nil + return cfg.PITR.Enabled, cfg.PITR.OplogOnly, nil } type Epoch primitive.Timestamp diff --git a/pbm/ctrl/cmd.go b/pbm/ctrl/cmd.go index 2eb7542d6..3778681d6 100644 --- a/pbm/ctrl/cmd.go +++ b/pbm/ctrl/cmd.go @@ -115,9 +115,9 @@ func (c Cmd) String() string { } type ProfileCmd struct { - Name string `bson:"name"` - IsProfile bool `bson:"profile"` - Storage config.Storage `bson:"storage"` + Name string `bson:"name"` + IsProfile bool `bson:"profile"` + Storage config.StorageConf `bson:"storage"` } type ResyncCmd struct { diff --git a/pbm/ctrl/send.go b/pbm/ctrl/send.go index 4bccaa1d8..8195f7327 100644 --- a/pbm/ctrl/send.go +++ b/pbm/ctrl/send.go @@ -70,7 +70,7 @@ func SendAddConfigProfile( ctx context.Context, m connect.Client, name string, - storage config.Storage, + storage config.StorageConf, ) (OPID, error) { cmd := Cmd{ Cmd: CmdAddConfigProfile, diff --git a/pbm/restore/logical.go b/pbm/restore/logical.go index 69903c967..2382bcba0 100644 --- a/pbm/restore/logical.go +++ b/pbm/restore/logical.go @@ -163,7 +163,7 @@ func (r *Restore) Snapshot( return err } - r.stg, err = util.StorageFromConfig(&bcp.Store.Storage, r.log) + r.stg, err = util.StorageFromConfig(&bcp.Store.StorageConf, r.log) if err != nil { return errors.Wrap(err, "get backup storage") } @@ -281,7 +281,7 @@ func (r *Restore) PITR( "Try to set an earlier snapshot. Or leave the snapshot empty so PBM will choose one.") } - r.stg, err = util.StorageFromConfig(&bcp.Store.Storage, r.log) + r.stg, err = util.StorageFromConfig(&bcp.Store.StorageConf, r.log) if err != nil { return errors.Wrap(err, "get backup storage") } @@ -776,7 +776,7 @@ func (r *Restore) RunSnapshot( rdr, err = snapshot.DownloadDump( func(ns string) (io.ReadCloser, error) { - stg, err := util.StorageFromConfig(&bcp.Store.Storage, r.log) + stg, err := util.StorageFromConfig(&bcp.Store.StorageConf, r.log) if err != nil { return nil, errors.Wrap(err, "get storage") } diff --git a/pbm/restore/physical.go b/pbm/restore/physical.go index 7aad10c39..ffb46f008 100644 --- a/pbm/restore/physical.go +++ b/pbm/restore/physical.go @@ -86,7 +86,7 @@ type PhysRestore struct { files []files restoreTS primitive.Timestamp - confOpts *config.Restore + confOpts *config.RestoreConf mongod string // location of mongod used for internal restarts @@ -2149,7 +2149,7 @@ func (r *PhysRestore) prepareBackup(ctx context.Context, backupName string) erro return errors.Wrap(err, "get backup metadata") } - r.bcpStg, err = util.StorageFromConfig(&r.bcp.Store.Storage, log.LogEventFromContext(ctx)) + r.bcpStg, err = util.StorageFromConfig(&r.bcp.Store.StorageConf, log.LogEventFromContext(ctx)) if err != nil { return errors.Wrap(err, "get backup storage") } diff --git a/pbm/resync/rsync.go b/pbm/resync/rsync.go index 04681a4d8..78f223d91 100644 --- a/pbm/resync/rsync.go +++ b/pbm/resync/rsync.go @@ -26,7 +26,7 @@ import ( // // It checks for read and write permissions, drops all meta from the database // and populate it again by reading meta from the storage. -func Resync(ctx context.Context, conn connect.Client, cfg *config.Storage) error { +func Resync(ctx context.Context, conn connect.Client, cfg *config.StorageConf) error { l := log.LogEventFromContext(ctx) stg, err := util.StorageFromConfig(cfg, l) @@ -95,7 +95,7 @@ func ClearBackupList(ctx context.Context, conn connect.Client, profile string) e func SyncBackupList( ctx context.Context, conn connect.Client, - cfg *config.Storage, + cfg *config.StorageConf, profile string, ) error { l := log.LogEventFromContext(ctx) @@ -122,9 +122,9 @@ func SyncBackupList( } backupStore := backup.Storage{ - Name: profile, - IsProfile: profile != "", - Storage: *cfg, + Name: profile, + IsProfile: profile != "", + StorageConf: *cfg, } for i := range backupList { diff --git a/pbm/slicer/slicer.go b/pbm/slicer/slicer.go index 740510e22..fcb85e1c2 100644 --- a/pbm/slicer/slicer.go +++ b/pbm/slicer/slicer.go @@ -141,8 +141,8 @@ func (s *Slicer) Catchup(ctx context.Context) error { err = s.upload(ctx, lastChunk.EndTS, rs.FirstWriteTS, - cfg.Oplog.Compression, - cfg.Oplog.CompressionLevel) + cfg.PITR.Compression, + cfg.PITR.CompressionLevel) if err != nil { return err } diff --git a/pbm/util/storage.go b/pbm/util/storage.go index 1d3447a98..13e4669b6 100644 --- a/pbm/util/storage.go +++ b/pbm/util/storage.go @@ -17,7 +17,7 @@ import ( var ErrStorageUndefined = errors.New("storage undefined") // StorageFromConfig creates and returns a storage object based on a given config -func StorageFromConfig(cfg *config.Storage, l log.LogEvent) (storage.Storage, error) { +func StorageFromConfig(cfg *config.StorageConf, l log.LogEvent) (storage.Storage, error) { switch cfg.Type { case storage.S3: return s3.New(cfg.S3, l) diff --git a/sdk/impl.go b/sdk/impl.go index 3ada4e03f..1c3c0c802 100644 --- a/sdk/impl.go +++ b/sdk/impl.go @@ -166,7 +166,7 @@ func fillFilelistForBackup(ctx context.Context, bcp *BackupMetadata) error { eg.SetLimit(runtime.NumCPU()) if version.HasFilelistFile(bcp.PBMVersion) { - stg, err = util.StorageFromConfig(&bcp.Store.Storage, log.LogEventFromContext(ctx)) + stg, err = util.StorageFromConfig(&bcp.Store.StorageConf, log.LogEventFromContext(ctx)) if err != nil { return errors.Wrap(err, "get storage") } @@ -225,7 +225,7 @@ func fillFilelistForBackup(ctx context.Context, bcp *BackupMetadata) error { } func getStorageForRead(ctx context.Context, bcp *backup.BackupMeta) (storage.Storage, error) { - stg, err := util.StorageFromConfig(&bcp.Store.Storage, log.LogEventFromContext(ctx)) + stg, err := util.StorageFromConfig(&bcp.Store.StorageConf, log.LogEventFromContext(ctx)) if err != nil { return nil, errors.Wrap(err, "get storage") } From 70a80c4e34e9d85bf0d79d3116faf150c0b51a57 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 25 Jun 2024 18:54:27 +0200 Subject: [PATCH 057/203] Expand PITRMeta with cluster status and RS info --- pbm/oplog/nomination.go | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/pbm/oplog/nomination.go b/pbm/oplog/nomination.go index 97ee5a64a..585c53c1f 100644 --- a/pbm/oplog/nomination.go +++ b/pbm/oplog/nomination.go @@ -13,10 +13,10 @@ import ( // PITRMeta contains all operational data about PITR execution process. type PITRMeta struct { - StartTS int64 `bson:"start_ts" json:"start_ts"` - // Hb primitive.Timestamp `bson:"hb" json:"hb"` - // Status defs.Status `bson:"status" json:"status"` + StartTS int64 `bson:"start_ts" json:"start_ts"` + Status Status `bson:"status" json:"status"` Nomination []PITRNomination `bson:"n" json:"n"` + Replsets []PITRReplset `bson:"replsets" json:"replsets"` } // PITRNomination is used to choose (nominate and elect) member(s) @@ -27,11 +27,33 @@ type PITRNomination struct { Ack string `bson:"ack" json:"ack"` } +// PITRReplset holds status for each replica set. +// Each replicaset tries to reach cluster status set by Cluser Leader. +type PITRReplset struct { + Name string `bson:"name" json:"name"` + Node string `bson:"node" json:"node"` + Status Status `bson:"status" json:"status"` + Error string `bson:"error,omitempty" json:"error,omitempty"` +} + +// Status is a PITR status. +// It is used within pbmPITR collection to sync operation between +// cluster leader and agents. +type Status string + +const ( + StatusReady Status = "ready" + StatusRunning Status = "running" + StatusReconfig Status = "reconfig" + StatusError Status = "error" +) + // Init add initial PITR document. func InitMeta(ctx context.Context, conn connect.Client) error { pitrMeta := PITRMeta{ StartTS: time.Now().Unix(), Nomination: []PITRNomination{}, + Replsets: []PITRReplset{}, } _, err := conn.PITRCollection().ReplaceOne( ctx, From 7daa8ee6a6a8c6744a649f33da31a4aa41390bc0 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 26 Jun 2024 09:41:42 +0200 Subject: [PATCH 058/203] log error once --- cmd/pbm-agent/profile.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/cmd/pbm-agent/profile.go b/cmd/pbm-agent/profile.go index f4a432f45..bd23edf81 100644 --- a/cmd/pbm-agent/profile.go +++ b/cmd/pbm-agent/profile.go @@ -65,11 +65,11 @@ func (a *Agent) handleAddConfigProfile( got, err := a.acquireLock(ctx, lck, l) if err != nil { - l.Error("acquiring lock: %v", err) + err = errors.Wrap(err, "acquiring lock") return } if !got { - l.Error("lock not acquired") + err = errors.Wrap(err, "lock not acquired") return } defer func() { @@ -82,7 +82,7 @@ func (a *Agent) handleAddConfigProfile( err = cmd.Storage.Cast() if err != nil { - l.Error("storage cast: %v", err) + err = errors.Wrap(err, "storage cast") return } @@ -95,13 +95,13 @@ func (a *Agent) handleAddConfigProfile( err = storage.HasReadAccess(ctx, stg) if err != nil { if !errors.Is(err, storage.ErrUninitialized) { - l.Error("check read access: %v", err) + err = errors.Wrap(err, "check read access") return } err = storage.Initialize(ctx, stg) if err != nil { - l.Error("init storage: %v", err) + err = errors.Wrap(err, "init storage") return } } @@ -113,7 +113,7 @@ func (a *Agent) handleAddConfigProfile( } err = config.AddProfile(ctx, a.leadConn, profile) if err != nil { - l.Error("add profile config: %v", err) + err = errors.Wrap(err, "add profile config") return } @@ -169,11 +169,11 @@ func (a *Agent) handleRemoveConfigProfile( got, err := a.acquireLock(ctx, lck, l) if err != nil { - l.Error("acquiring lock: %v", err) + err = errors.Wrap(err, "acquiring lock") return } if !got { - l.Error("lock not acquired") + err = errors.New("lock not acquired") return } defer func() { @@ -187,23 +187,23 @@ func (a *Agent) handleRemoveConfigProfile( _, err = config.GetProfile(ctx, a.leadConn, cmd.Name) if err != nil { if errors.Is(err, mongo.ErrNoDocuments) { - l.Warning("profile %q is not found", cmd.Name) + err = errors.Errorf("profile %q is not found", cmd.Name) return } - l.Error("get config profile: %v", err) + err = errors.Wrap(err, "get config profile") return } err = resync.ClearBackupList(ctx, a.leadConn, cmd.Name) if err != nil { - l.Error("clear backup list: %v", err) + err = errors.Wrap(err, "clear backup list") return } err = config.RemoveProfile(ctx, a.leadConn, cmd.Name) if err != nil { - l.Error("delete document", err) + err = errors.Wrap(err, "delete document") return } } From 06fab90680e2d98040d41edf4b5d105070aea41f Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 26 Jun 2024 09:59:11 +0200 Subject: [PATCH 059/203] fix: panic on arbiter node during backup --- pbm/topo/status.go | 4 ++++ pbm/topo/topo.go | 3 +++ 2 files changed, 7 insertions(+) diff --git a/pbm/topo/status.go b/pbm/topo/status.go index a6f89a195..c55c688fe 100644 --- a/pbm/topo/status.go +++ b/pbm/topo/status.go @@ -34,6 +34,10 @@ type NodeStatus struct { SyncingTo string `bson:"syncingTo,omitempty" json:"syncingTo,omitempty"` } +func (s *NodeStatus) IsArbiter() bool { + return s.State == 7 // StateStr == "ARBITER" +} + type StatusOpTimes struct { LastCommittedOpTime *OpTime `bson:"lastCommittedOpTime" json:"lastCommittedOpTime"` ReadConcernMajorityOpTime *OpTime `bson:"readConcernMajorityOpTime" json:"readConcernMajorityOpTime"` diff --git a/pbm/topo/topo.go b/pbm/topo/topo.go index ab4f2c493..4374ac674 100644 --- a/pbm/topo/topo.go +++ b/pbm/topo/topo.go @@ -158,6 +158,9 @@ func NodeSuits(ctx context.Context, m *mongo.Client, inf *NodeInfo) (bool, error if err != nil { return false, errors.Wrap(err, "get node status") } + if status.IsArbiter() { + return false, nil + } replLag, err := ReplicationLag(ctx, m, inf.Me) if err != nil { From e5c356b5dd6d308fe2b8019c08e872c580ead9b2 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 26 Jun 2024 10:23:03 +0200 Subject: [PATCH 060/203] fix: wrong storage is used for oplog chunks --- pbm/restore/logical.go | 38 +++++++++++++++++++------------------- pbm/restore/selective.go | 8 ++++---- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/pbm/restore/logical.go b/pbm/restore/logical.go index 2382bcba0..f46b89ea2 100644 --- a/pbm/restore/logical.go +++ b/pbm/restore/logical.go @@ -44,7 +44,8 @@ type Restore struct { brief topo.NodeBrief stopHB chan struct{} nodeInfo *topo.NodeInfo - stg storage.Storage + bcpStg storage.Storage + oplogStg storage.Storage // Shards to participate in restore. Num of shards in bcp could // be less than in the cluster and this is ok. Only these shards // would be expected to run restore (distributed transactions sync, @@ -163,7 +164,7 @@ func (r *Restore) Snapshot( return err } - r.stg, err = util.StorageFromConfig(&bcp.Store.StorageConf, r.log) + r.bcpStg, err = util.StorageFromConfig(&bcp.Store.StorageConf, r.log) if err != nil { return errors.Wrap(err, "get backup storage") } @@ -211,7 +212,7 @@ func (r *Restore) Snapshot( } oplogRanges := []oplogRange{ - {chunks: chunks, storage: r.stg}, + {chunks: chunks, storage: r.bcpStg}, } oplogOption := &applyOplogOption{end: &bcp.LastWriteTS, nss: nss} if r.nodeInfo.IsConfigSrv() && util.IsSelective(nss) { @@ -281,10 +282,14 @@ func (r *Restore) PITR( "Try to set an earlier snapshot. Or leave the snapshot empty so PBM will choose one.") } - r.stg, err = util.StorageFromConfig(&bcp.Store.StorageConf, r.log) + r.bcpStg, err = util.StorageFromConfig(&bcp.Store.StorageConf, r.log) if err != nil { return errors.Wrap(err, "get backup storage") } + r.oplogStg, err = util.GetStorage(ctx, r.leadConn, log.LogEventFromContext(ctx)) + if err != nil { + return errors.Wrap(err, "get oplog storage") + } nss := resolveNamespace(bcp.Namespaces, cmd.Namespaces, cmd.UsersAndRoles) usersAndRolesOpt := shouldRestoreUsersAndRoles(bcp.Namespaces, cmd.Namespaces, cmd.UsersAndRoles) @@ -349,14 +354,9 @@ func (r *Restore) PITR( return err } - oplogStorage, err := util.GetStorage(ctx, r.leadConn, l) - if err != nil { - return errors.Wrap(err, "get oplog storage") - } - oplogRanges := []oplogRange{ - {chunks: bcpChunks, storage: r.stg}, - {chunks: chunks, storage: oplogStorage}, + {chunks: bcpChunks, storage: r.bcpStg}, + {chunks: chunks, storage: r.oplogStg}, } oplogOption := applyOplogOption{end: &cmd.OplogTS, nss: nss} if r.nodeInfo.IsConfigSrv() && util.IsSelective(nss) { @@ -418,9 +418,9 @@ func (r *Restore) ReplayOplog(ctx context.Context, cmd *ctrl.ReplayCmd, opid ctr return r.Done(ctx) // skip. no oplog for current rs } - r.stg, err = util.GetStorage(ctx, r.leadConn, log.LogEventFromContext(ctx)) + r.oplogStg, err = util.GetStorage(ctx, r.leadConn, log.LogEventFromContext(ctx)) if err != nil { - return errors.Wrapf(err, "get storage") + return errors.Wrapf(err, "get oplog storage") } opChunks, err := r.chunks(ctx, cmd.Start, cmd.End) @@ -434,7 +434,7 @@ func (r *Restore) ReplayOplog(ctx context.Context, cmd *ctrl.ReplayCmd, opid ctr } oplogRanges := []oplogRange{ - {chunks: opChunks, storage: r.stg}, + {chunks: opChunks, storage: r.oplogStg}, } oplogOption := applyOplogOption{ start: &cmd.Start, @@ -553,7 +553,7 @@ func (r *Restore) checkTopologyForOplog(currShards []topo.Shard, oplogShards []s // is contiguous - there are no gaps), checks for respective files on storage and returns // chunks list if all checks passed func (r *Restore) chunks(ctx context.Context, from, to primitive.Timestamp) ([]oplog.OplogChunk, error) { - return chunks(ctx, r.leadConn, r.stg, from, to, r.nodeInfo.SetName, r.rsMap) + return chunks(ctx, r.leadConn, r.oplogStg, from, to, r.nodeInfo.SetName, r.rsMap) } // LookupBackupMeta fetches backup metadata. @@ -646,11 +646,11 @@ func (r *Restore) snapshotObjects(bcp *backup.BackupMeta) (string, []oplog.Oplog return "", nil, ErrNoDataForShard } - if _, err := r.stg.FileStat(rsMeta.DumpName); err != nil { + if _, err := r.bcpStg.FileStat(rsMeta.DumpName); err != nil { return "", nil, errors.Wrapf(err, "failed to ensure snapshot file %s", rsMeta.DumpName) } if version.IsLegacyBackupOplog(bcp.PBMVersion) { - if _, err := r.stg.FileStat(rsMeta.OplogName); err != nil { + if _, err := r.bcpStg.FileStat(rsMeta.OplogName); err != nil { return "", nil, errors.Errorf("failed to ensure oplog file %s: %v", rsMeta.OplogName, err) } @@ -664,7 +664,7 @@ func (r *Restore) snapshotObjects(bcp *backup.BackupMeta) (string, []oplog.Oplog return rsMeta.DumpName, chunks, nil } - files, err := r.stg.List(rsMeta.OplogName, "") + files, err := r.bcpStg.List(rsMeta.OplogName, "") if err != nil { return "", nil, errors.Wrap(err, "failed to list oplog files") } @@ -742,7 +742,7 @@ func (r *Restore) RunSnapshot( var err error if version.IsLegacyArchive(bcp.PBMVersion) { - sr, err := r.stg.SourceReader(dump) + sr, err := r.bcpStg.SourceReader(dump) if err != nil { return errors.Wrapf(err, "get object %s for the storage", dump) } diff --git a/pbm/restore/selective.go b/pbm/restore/selective.go index f38b1c86d..be9619307 100644 --- a/pbm/restore/selective.go +++ b/pbm/restore/selective.go @@ -34,7 +34,7 @@ func (r *Restore) configsvrRestore( mapRS util.RSMapFunc, ) error { mapS := util.MakeRSMapFunc(r.sMap) - available, err := fetchAvailability(bcp, r.stg) + available, err := fetchAvailability(bcp, r.bcpStg) if err != nil { return err } @@ -123,7 +123,7 @@ func (r *Restore) configsvrRestoreDatabases( mapRS, mapS util.RSMapFunc, ) error { filepath := path.Join(bcp.Name, mapRS(r.brief.SetName), "config.databases"+bcp.Compression.Suffix()) - rdr, err := r.stg.SourceReader(filepath) + rdr, err := r.bcpStg.SourceReader(filepath) if err != nil { return err } @@ -206,7 +206,7 @@ func (r *Restore) configsvrRestoreCollections( } filepath := path.Join(bcp.Name, mapRS(r.brief.SetName), "config.collections"+bcp.Compression.Suffix()) - rdr, err := r.stg.SourceReader(filepath) + rdr, err := r.bcpStg.SourceReader(filepath) if err != nil { return nil, err } @@ -270,7 +270,7 @@ func (r *Restore) configsvrRestoreChunks( mapS util.RSMapFunc, ) error { filepath := path.Join(bcp.Name, mapRS(r.brief.SetName), "config.chunks"+bcp.Compression.Suffix()) - rdr, err := r.stg.SourceReader(filepath) + rdr, err := r.bcpStg.SourceReader(filepath) if err != nil { return err } From c14e04e0097c6bdbfd56e6e7d9ab0e9eacdf36b7 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 26 Jun 2024 10:36:37 +0200 Subject: [PATCH 061/203] Add db logic for pbmPITR status related operations --- pbm/oplog/nomination.go | 70 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 69 insertions(+), 1 deletion(-) diff --git a/pbm/oplog/nomination.go b/pbm/oplog/nomination.go index 585c53c1f..6d4b5e2c4 100644 --- a/pbm/oplog/nomination.go +++ b/pbm/oplog/nomination.go @@ -65,6 +65,75 @@ func InitMeta(ctx context.Context, conn connect.Client) error { return errors.Wrap(err, "pitr meta replace") } +// GetMeta fetches PITR meta doc from pbmPITR collection. +func GetMeta( + ctx context.Context, + conn connect.Client, +) (*PITRMeta, error) { + res := conn.PITRCollection().FindOne(ctx, bson.D{}) + if err := res.Err(); err != nil { + if errors.Is(err, mongo.ErrNoDocuments) { + return nil, errors.ErrNotFound + } + return nil, errors.Wrap(err, "find pitr meta") + } + + meta := &PITRMeta{} + if err := res.Decode(meta); err != nil { + errors.Wrap(err, "decode") + } + return meta, nil +} + +// SetClusterStatus sets cluster status field of PITR Meta doc. +// It also resets all content of replsets field doc. +func SetClusterStatus(ctx context.Context, conn connect.Client, status Status) error { + _, err := conn.PITRCollection(). + UpdateOne( + ctx, + bson.D{}, + bson.D{{"$set", bson.M{ + "status": status, + "replsets": []PITRReplset{}, + }}}, + options.Update().SetUpsert(true), + ) + return errors.Wrap(err, "update pitr doc to status") +} + +// SetReadyRSStatus sets Ready status for specified replicaset. +func SetReadyRSStatus(ctx context.Context, conn connect.Client, rs, node string) error { + repliset := PITRReplset{ + Name: rs, + Node: node, + Status: StatusReady, + } + _, err := conn.PITRCollection(). + UpdateOne( + ctx, + bson.D{}, + bson.D{{"$addToSet", bson.M{"replsets": repliset}}}, + options.Update().SetUpsert(true), + ) + return errors.Wrap(err, "update pitr doc for RS ready status") +} + +// GetReadyReplSets fetches all replicasets which reported Ready status +func GetReadyReplSets(ctx context.Context, conn connect.Client) ([]PITRReplset, error) { + meta, err := GetMeta(ctx, conn) + if err != nil { + return nil, errors.Wrap(err, "get meta") + } + + readyReplsets := []PITRReplset{} + for _, rs := range meta.Replsets { + if rs.Status == StatusReady { + readyReplsets = append(readyReplsets, rs) + } + } + return readyReplsets, nil +} + // SetPITRNomination adds nomination fragment for specified RS within PITRMeta. func SetPITRNomination(ctx context.Context, conn connect.Client, rs string) error { n := PITRNomination{ @@ -78,7 +147,6 @@ func SetPITRNomination(ctx context.Context, conn connect.Client, rs string) erro bson.D{{"$addToSet", bson.M{"n": n}}}, options.Update().SetUpsert(true), ) - return errors.Wrap(err, "update pitr nomination") } From 9eb6bd335e459ebd0b80927be64fb296c6120fdc Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 26 Jun 2024 11:49:08 +0200 Subject: [PATCH 062/203] Add get cluster status func --- pbm/oplog/nomination.go | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/pbm/oplog/nomination.go b/pbm/oplog/nomination.go index 6d4b5e2c4..83090659b 100644 --- a/pbm/oplog/nomination.go +++ b/pbm/oplog/nomination.go @@ -46,6 +46,7 @@ const ( StatusRunning Status = "running" StatusReconfig Status = "reconfig" StatusError Status = "error" + StatusUnset Status = "" ) // Init add initial PITR document. @@ -101,6 +102,18 @@ func SetClusterStatus(ctx context.Context, conn connect.Client, status Status) e return errors.Wrap(err, "update pitr doc to status") } +func GetClusterStatus(ctx context.Context, conn connect.Client) (Status, error) { + meta, err := GetMeta(ctx, conn) + if err != nil { + if errors.Is(err, errors.ErrNotFound) { + return StatusUnset, err + } + return StatusUnset, errors.Wrap(err, "getting meta") + } + + return meta.Status, nil +} + // SetReadyRSStatus sets Ready status for specified replicaset. func SetReadyRSStatus(ctx context.Context, conn connect.Client, rs, node string) error { repliset := PITRReplset{ @@ -151,7 +164,7 @@ func SetPITRNomination(ctx context.Context, conn connect.Client, rs string) erro } // GetPITRNominees fetches nomination fragment for specified RS -// from PITRMeta document. +// from pmbPITR document. // If document is not found, or document fragment for specific RS is not found, // error ErrNotFound is returned. func GetPITRNominees( @@ -159,17 +172,9 @@ func GetPITRNominees( conn connect.Client, rs string, ) (*PITRNomination, error) { - res := conn.PITRCollection().FindOne(ctx, bson.D{}) - if err := res.Err(); err != nil { - if errors.Is(err, mongo.ErrNoDocuments) { - return nil, errors.ErrNotFound - } - return nil, errors.Wrap(err, "find pitr meta") - } - - meta := &PITRMeta{} - if err := res.Decode(meta); err != nil { - errors.Wrap(err, "decode") + meta, err := GetMeta(ctx, conn) + if err != nil { + errors.Wrap(err, "get meta") } for _, n := range meta.Nomination { From 94b621cf0485184aa5fe6c179bbb4e9ca4cf97ef Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 26 Jun 2024 11:49:32 +0200 Subject: [PATCH 063/203] rename sdk method --- cmd/pbm/profile.go | 2 +- sdk/impl.go | 2 +- sdk/sdk.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/pbm/profile.go b/cmd/pbm/profile.go index d3a6caa92..e7b1a7eee 100644 --- a/cmd/pbm/profile.go +++ b/cmd/pbm/profile.go @@ -56,7 +56,7 @@ func (l configProfileList) String() string { } func handleListConfigProfiles(ctx context.Context, pbm sdk.Client) (fmt.Stringer, error) { - profiles, err := pbm.ListConfigProfiles(ctx) + profiles, err := pbm.GetAllConfigProfiles(ctx) if err != nil { return nil, err } diff --git a/sdk/impl.go b/sdk/impl.go index 1c3c0c802..7be8b9a20 100644 --- a/sdk/impl.go +++ b/sdk/impl.go @@ -78,7 +78,7 @@ func (c *clientImpl) GetConfig(ctx context.Context) (*Config, error) { return config.GetConfig(ctx, c.conn) } -func (c *clientImpl) ListConfigProfiles(ctx context.Context) ([]config.Config, error) { +func (c *clientImpl) GetAllConfigProfiles(ctx context.Context) ([]config.Config, error) { return config.ListProfiles(ctx, c.conn) } diff --git a/sdk/sdk.go b/sdk/sdk.go index e0fc3a069..cfb54cbe0 100644 --- a/sdk/sdk.go +++ b/sdk/sdk.go @@ -109,7 +109,7 @@ type Client interface { CommandInfo(ctx context.Context, id CommandID) (*Command, error) GetConfig(ctx context.Context) (*Config, error) - ListConfigProfiles(ctx context.Context) ([]config.Config, error) + GetAllConfigProfiles(ctx context.Context) ([]config.Config, error) GetConfigProfile(ctx context.Context, name string) (*config.Config, error) AddConfigProfile(ctx context.Context, name string, cfg *config.Config) (CommandID, error) RemoveConfigProfile(ctx context.Context, name string) (CommandID, error) From ee6310800dd3fe689fbf6b29331ac6a7bf12da87 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 26 Jun 2024 11:53:59 +0200 Subject: [PATCH 064/203] simplify resync command --- cmd/pbm-agent/agent.go | 141 ++++++++++++++++++++++------------------- 1 file changed, 77 insertions(+), 64 deletions(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index d65a22965..4117f4c03 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -9,6 +9,7 @@ import ( "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/mongo" + "golang.org/x/sync/errgroup" "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/connect" @@ -225,80 +226,92 @@ func (a *Agent) Resync(ctx context.Context, cmd *ctrl.ResyncCmd, opid ctrl.OPID, l.Info("started") if cmd.All { - profiles, err := config.ListProfiles(ctx, a.leadConn) - if err != nil { - l.Error("get config profiles: %v", err) - return - } - - if cmd.Clear { - l.Debug("clearing backup list for %d config profiles", len(profiles)) - for i := range profiles { - name := profiles[i].Name - err = resync.ClearBackupList(ctx, a.leadConn, name) - if err != nil { - l.Error("clear backup list for %q: %v", name, err) - } - } - } else { - l.Debug("syncing backup list for %d config profiles", len(profiles)) - for i := range profiles { - profile := &profiles[i] - err = resync.SyncBackupList(ctx, a.leadConn, &profile.Storage, profile.Name) - if err != nil { - l.Error("sync backup list for %q: %v", profile.Name, err) - return - } - } - } + err = handleSyncAllProfiles(ctx, a.leadConn, cmd.Clear) } else if cmd.Name != "" { - profile, err := config.GetProfile(ctx, a.leadConn, cmd.Name) - if err != nil { - if errors.Is(err, mongo.ErrNoDocuments) { - err = errors.Errorf("profile %q not found", cmd.Name) - } + err = handleSyncProfile(ctx, a.leadConn, cmd.Name, cmd.Clear) + } else { + err = handleSyncMainStorage(ctx, a.leadConn) + } + if err != nil { + l.Error(err.Error()) + return + } - l.Error("get config profile: %v", err) - return - } + l.Info("succeed") +} - if cmd.Clear { - l.Debug("clearing backup list for %q", profile.Name) - err = resync.ClearBackupList(ctx, a.leadConn, profile.Name) - if err != nil { - l.Error("clear backup list for %q: %v", profile.Name, err) - } - } else { - l.Debug("syncing backup list for %q", profile.Name) - err = resync.SyncBackupList(ctx, a.leadConn, &profile.Storage, profile.Name) - if err != nil { - l.Error("sync backup list for %q: %v", profile.Name, err) - return - } +func handleSyncAllProfiles(ctx context.Context, conn connect.Client, clear bool) error { + profiles, err := config.ListProfiles(ctx, conn) + if err != nil { + return errors.Wrap(err, "get config profiles") + } + + eg, ctx := errgroup.WithContext(ctx) + if clear { + for i := range profiles { + eg.Go(func() error { + return helpClearProfileBackups(ctx, conn, profiles[i].Name) + }) } - } else { // resync main storage only - l.Debug("resync from main storage") - cfg, err := config.GetConfig(ctx, a.leadConn) - if err != nil { - l.Error("get config: %v", err) - return + } else { + for i := range profiles { + eg.Go(func() error { + return helpSyncProfileBackups(ctx, conn, &profiles[i]) + }) } + } - err = resync.Resync(ctx, a.leadConn, &cfg.Storage) - if err != nil { - l.Error("resync: %v", err) - return - } + return eg.Wait() +} - epch, err := config.ResetEpoch(ctx, a.leadConn) - if err != nil { - l.Error("reset epoch: %v", err) - return +func handleSyncProfile(ctx context.Context, conn connect.Client, name string, clear bool) error { + profile, err := config.GetProfile(ctx, conn, name) + if err != nil { + if errors.Is(err, mongo.ErrNoDocuments) { + err = errors.Errorf("profile %q not found", name) } - l.Debug("epoch set to %v", epch) + + return errors.Wrap(err, "get config profile") } - l.Info("succeed") + if clear { + err = helpClearProfileBackups(ctx, conn, profile.Name) + } else { + err = helpSyncProfileBackups(ctx, conn, profile) + } + + return err +} + +func helpClearProfileBackups(ctx context.Context, conn connect.Client, profileName string) error { + err := resync.ClearBackupList(ctx, conn, profileName) + return errors.Wrapf(err, "clear backup list for %q", profileName) +} + +func helpSyncProfileBackups(ctx context.Context, conn connect.Client, profile *config.Config) error { + err := resync.SyncBackupList(ctx, conn, &profile.Storage, profile.Name) + return errors.Wrapf(err, "sync backup list for %q", profile.Name) +} + +func handleSyncMainStorage(ctx context.Context, conn connect.Client) error { + cfg, err := config.GetConfig(ctx, conn) + if err != nil { + return errors.Wrap(err, "get config") + } + + err = resync.Resync(ctx, conn, &cfg.Storage) + if err != nil { + return errors.Wrap(err, "resync") + } + + epch, err := config.ResetEpoch(ctx, conn) + if err != nil { + return errors.Wrap(err, "reset epoch") + } + log.LogEventFromContext(ctx). + Debug("epoch set to %v", epch) + + return nil } // acquireLock tries to acquire the lock. If there is a stale lock From 632f4b352043d623d607c76510cc40c7b0205eae Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 26 Jun 2024 11:54:53 +0200 Subject: [PATCH 065/203] move resync into separate file --- cmd/pbm-agent/agent.go | 141 ------------------------------------ cmd/pbm-agent/resync.go | 157 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 157 insertions(+), 141 deletions(-) create mode 100644 cmd/pbm-agent/resync.go diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 4117f4c03..67dcc5624 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -9,7 +9,6 @@ import ( "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/mongo" - "golang.org/x/sync/errgroup" "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/connect" @@ -18,7 +17,6 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/lock" "github.com/percona/percona-backup-mongodb/pbm/log" - "github.com/percona/percona-backup-mongodb/pbm/resync" "github.com/percona/percona-backup-mongodb/pbm/storage" "github.com/percona/percona-backup-mongodb/pbm/topo" "github.com/percona/percona-backup-mongodb/pbm/util" @@ -175,145 +173,6 @@ func (a *Agent) Start(ctx context.Context) error { } } -// Resync uploads a backup list from the remote store -func (a *Agent) Resync(ctx context.Context, cmd *ctrl.ResyncCmd, opid ctrl.OPID, ep config.Epoch) { - if cmd == nil { - cmd = &ctrl.ResyncCmd{} - } - - logger := log.FromContext(ctx) - l := logger.NewEvent(string(ctrl.CmdResync), "", opid.String(), ep.TS()) - ctx = log.SetLogEventToContext(ctx, l) - - a.HbResume() - logger.ResumeMgo() - - nodeInfo, err := topo.GetNodeInfoExt(ctx, a.nodeConn) - if err != nil { - l.Error("get node info data: %v", err) - return - } - - if !nodeInfo.IsLeader() { - l.Info("not a member of the leader rs") - return - } - - lock := lock.NewLock(a.leadConn, lock.LockHeader{ - Type: ctrl.CmdResync, - Replset: nodeInfo.SetName, - Node: nodeInfo.Me, - OPID: opid.String(), - Epoch: util.Ref(ep.TS()), - }) - - got, err := a.acquireLock(ctx, lock, l) - if err != nil { - l.Error("acquiring lock: %v", err) - return - } - if !got { - l.Debug("lock not acquired") - return - } - - defer func() { - if err := lock.Release(); err != nil { - l.Error("release lock %v: %v", lock, err) - } - }() - - l.Info("started") - - if cmd.All { - err = handleSyncAllProfiles(ctx, a.leadConn, cmd.Clear) - } else if cmd.Name != "" { - err = handleSyncProfile(ctx, a.leadConn, cmd.Name, cmd.Clear) - } else { - err = handleSyncMainStorage(ctx, a.leadConn) - } - if err != nil { - l.Error(err.Error()) - return - } - - l.Info("succeed") -} - -func handleSyncAllProfiles(ctx context.Context, conn connect.Client, clear bool) error { - profiles, err := config.ListProfiles(ctx, conn) - if err != nil { - return errors.Wrap(err, "get config profiles") - } - - eg, ctx := errgroup.WithContext(ctx) - if clear { - for i := range profiles { - eg.Go(func() error { - return helpClearProfileBackups(ctx, conn, profiles[i].Name) - }) - } - } else { - for i := range profiles { - eg.Go(func() error { - return helpSyncProfileBackups(ctx, conn, &profiles[i]) - }) - } - } - - return eg.Wait() -} - -func handleSyncProfile(ctx context.Context, conn connect.Client, name string, clear bool) error { - profile, err := config.GetProfile(ctx, conn, name) - if err != nil { - if errors.Is(err, mongo.ErrNoDocuments) { - err = errors.Errorf("profile %q not found", name) - } - - return errors.Wrap(err, "get config profile") - } - - if clear { - err = helpClearProfileBackups(ctx, conn, profile.Name) - } else { - err = helpSyncProfileBackups(ctx, conn, profile) - } - - return err -} - -func helpClearProfileBackups(ctx context.Context, conn connect.Client, profileName string) error { - err := resync.ClearBackupList(ctx, conn, profileName) - return errors.Wrapf(err, "clear backup list for %q", profileName) -} - -func helpSyncProfileBackups(ctx context.Context, conn connect.Client, profile *config.Config) error { - err := resync.SyncBackupList(ctx, conn, &profile.Storage, profile.Name) - return errors.Wrapf(err, "sync backup list for %q", profile.Name) -} - -func handleSyncMainStorage(ctx context.Context, conn connect.Client) error { - cfg, err := config.GetConfig(ctx, conn) - if err != nil { - return errors.Wrap(err, "get config") - } - - err = resync.Resync(ctx, conn, &cfg.Storage) - if err != nil { - return errors.Wrap(err, "resync") - } - - epch, err := config.ResetEpoch(ctx, conn) - if err != nil { - return errors.Wrap(err, "reset epoch") - } - log.LogEventFromContext(ctx). - Debug("epoch set to %v", epch) - - return nil -} - // acquireLock tries to acquire the lock. If there is a stale lock // it tries to mark op that held the lock (backup, [pitr]restore) as failed. func (a *Agent) acquireLock(ctx context.Context, l *lock.Lock, lg log.LogEvent) (bool, error) { diff --git a/cmd/pbm-agent/resync.go b/cmd/pbm-agent/resync.go new file mode 100644 index 000000000..fa87c384a --- /dev/null +++ b/cmd/pbm-agent/resync.go @@ -0,0 +1,157 @@ +package main + +import ( + "context" + + "go.mongodb.org/mongo-driver/mongo" + "golang.org/x/sync/errgroup" + + "github.com/percona/percona-backup-mongodb/pbm/config" + "github.com/percona/percona-backup-mongodb/pbm/connect" + "github.com/percona/percona-backup-mongodb/pbm/ctrl" + "github.com/percona/percona-backup-mongodb/pbm/errors" + "github.com/percona/percona-backup-mongodb/pbm/lock" + "github.com/percona/percona-backup-mongodb/pbm/log" + "github.com/percona/percona-backup-mongodb/pbm/resync" + "github.com/percona/percona-backup-mongodb/pbm/topo" + "github.com/percona/percona-backup-mongodb/pbm/util" +) + +// Resync uploads a backup list from the remote store +func (a *Agent) Resync(ctx context.Context, cmd *ctrl.ResyncCmd, opid ctrl.OPID, ep config.Epoch) { + if cmd == nil { + cmd = &ctrl.ResyncCmd{} + } + + logger := log.FromContext(ctx) + l := logger.NewEvent(string(ctrl.CmdResync), "", opid.String(), ep.TS()) + ctx = log.SetLogEventToContext(ctx, l) + + a.HbResume() + logger.ResumeMgo() + + nodeInfo, err := topo.GetNodeInfoExt(ctx, a.nodeConn) + if err != nil { + l.Error("get node info data: %v", err) + return + } + + if !nodeInfo.IsLeader() { + l.Info("not a member of the leader rs") + return + } + + lock := lock.NewLock(a.leadConn, lock.LockHeader{ + Type: ctrl.CmdResync, + Replset: nodeInfo.SetName, + Node: nodeInfo.Me, + OPID: opid.String(), + Epoch: util.Ref(ep.TS()), + }) + + got, err := a.acquireLock(ctx, lock, l) + if err != nil { + l.Error("acquiring lock: %v", err) + return + } + if !got { + l.Debug("lock not acquired") + return + } + + defer func() { + if err := lock.Release(); err != nil { + l.Error("release lock %v: %v", lock, err) + } + }() + + l.Info("started") + + if cmd.All { + err = handleSyncAllProfiles(ctx, a.leadConn, cmd.Clear) + } else if cmd.Name != "" { + err = handleSyncProfile(ctx, a.leadConn, cmd.Name, cmd.Clear) + } else { + err = handleSyncMainStorage(ctx, a.leadConn) + } + if err != nil { + l.Error(err.Error()) + return + } + + l.Info("succeed") +} + +func handleSyncAllProfiles(ctx context.Context, conn connect.Client, clear bool) error { + profiles, err := config.ListProfiles(ctx, conn) + if err != nil { + return errors.Wrap(err, "get config profiles") + } + + eg, ctx := errgroup.WithContext(ctx) + if clear { + for i := range profiles { + eg.Go(func() error { + return helpClearProfileBackups(ctx, conn, profiles[i].Name) + }) + } + } else { + for i := range profiles { + eg.Go(func() error { + return helpSyncProfileBackups(ctx, conn, &profiles[i]) + }) + } + } + + return eg.Wait() +} + +func handleSyncProfile(ctx context.Context, conn connect.Client, name string, clear bool) error { + profile, err := config.GetProfile(ctx, conn, name) + if err != nil { + if errors.Is(err, mongo.ErrNoDocuments) { + err = errors.Errorf("profile %q not found", name) + } + + return errors.Wrap(err, "get config profile") + } + + if clear { + err = helpClearProfileBackups(ctx, conn, profile.Name) + } else { + err = helpSyncProfileBackups(ctx, conn, profile) + } + + return err +} + +func helpClearProfileBackups(ctx context.Context, conn connect.Client, profileName string) error { + err := resync.ClearBackupList(ctx, conn, profileName) + return errors.Wrapf(err, "clear backup list for %q", profileName) +} + +func helpSyncProfileBackups(ctx context.Context, conn connect.Client, profile *config.Config) error { + err := resync.SyncBackupList(ctx, conn, &profile.Storage, profile.Name) + return errors.Wrapf(err, "sync backup list for %q", profile.Name) +} + +func handleSyncMainStorage(ctx context.Context, conn connect.Client) error { + cfg, err := config.GetConfig(ctx, conn) + if err != nil { + return errors.Wrap(err, "get config") + } + + err = resync.Resync(ctx, conn, &cfg.Storage) + if err != nil { + return errors.Wrap(err, "resync") + } + + epch, err := config.ResetEpoch(ctx, conn) + if err != nil { + return errors.Wrap(err, "reset epoch") + } + log.LogEventFromContext(ctx). + Debug("epoch set to %v", epch) + + return nil +} From 6bc5e184abc153c80feb46e15993d3e3f3d5caf9 Mon Sep 17 00:00:00 2001 From: Sandra Romanchenko <53295797+sandraromanchenko@users.noreply.github.com> Date: Wed, 26 Jun 2024 13:20:56 +0300 Subject: [PATCH 066/203] PBM_tests. Update go version and remove outdated parameter (#955) --- e2e-tests/docker/docker-compose-remapping.yaml | 1 - e2e-tests/docker/docker-compose-rs.yaml | 1 - e2e-tests/docker/docker-compose-single.yaml | 1 - e2e-tests/docker/docker-compose.yaml | 1 - e2e-tests/docker/pbm.dockerfile | 2 +- e2e-tests/docker/tests.dockerfile | 2 +- 6 files changed, 2 insertions(+), 6 deletions(-) diff --git a/e2e-tests/docker/docker-compose-remapping.yaml b/e2e-tests/docker/docker-compose-remapping.yaml index 9b8e72a26..77ebd36a8 100644 --- a/e2e-tests/docker/docker-compose-remapping.yaml +++ b/e2e-tests/docker/docker-compose-remapping.yaml @@ -1,4 +1,3 @@ -version: "3.4" services: tests: build: diff --git a/e2e-tests/docker/docker-compose-rs.yaml b/e2e-tests/docker/docker-compose-rs.yaml index 8790e34ed..cc880f71d 100644 --- a/e2e-tests/docker/docker-compose-rs.yaml +++ b/e2e-tests/docker/docker-compose-rs.yaml @@ -1,4 +1,3 @@ -version: "3.4" services: tests: build: diff --git a/e2e-tests/docker/docker-compose-single.yaml b/e2e-tests/docker/docker-compose-single.yaml index 86bf9e628..fbf414b25 100644 --- a/e2e-tests/docker/docker-compose-single.yaml +++ b/e2e-tests/docker/docker-compose-single.yaml @@ -1,4 +1,3 @@ -version: "3.4" services: tests: build: diff --git a/e2e-tests/docker/docker-compose.yaml b/e2e-tests/docker/docker-compose.yaml index 26b929e3e..f304e69c9 100644 --- a/e2e-tests/docker/docker-compose.yaml +++ b/e2e-tests/docker/docker-compose.yaml @@ -1,4 +1,3 @@ -version: "3.4" services: tests: build: diff --git a/e2e-tests/docker/pbm.dockerfile b/e2e-tests/docker/pbm.dockerfile index 142686a4a..4c733609a 100644 --- a/e2e-tests/docker/pbm.dockerfile +++ b/e2e-tests/docker/pbm.dockerfile @@ -12,7 +12,7 @@ COPY --from=mongo_image /bin/mongod /bin/ RUN dnf install epel-release && dnf update && dnf install make gcc krb5-devel iproute-tc libfaketime RUN arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) && \ -curl -sL -o /tmp/golang.tar.gz https://go.dev/dl/go1.22.2.linux-${arch}.tar.gz && \ +curl -sL -o /tmp/golang.tar.gz https://go.dev/dl/go1.22.4.linux-${arch}.tar.gz && \ rm -rf /usr/local/go && tar -C /usr/local -xzf /tmp/golang.tar.gz && rm /tmp/golang.tar.gz ENV PATH=$PATH:/usr/local/go/bin diff --git a/e2e-tests/docker/tests.dockerfile b/e2e-tests/docker/tests.dockerfile index 620632030..1b16ca2be 100644 --- a/e2e-tests/docker/tests.dockerfile +++ b/e2e-tests/docker/tests.dockerfile @@ -3,7 +3,7 @@ WORKDIR /build RUN dnf update && dnf install make gcc krb5-devel RUN arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) && \ -curl -sL -o /tmp/golang.tar.gz https://go.dev/dl/go1.22.2.linux-${arch}.tar.gz && \ +curl -sL -o /tmp/golang.tar.gz https://go.dev/dl/go1.22.4.linux-${arch}.tar.gz && \ rm -rf /usr/local/go && tar -C /usr/local -xzf /tmp/golang.tar.gz && rm /tmp/golang.tar.gz ENV PATH=$PATH:/usr/local/go/bin From eadf9201d7efad9495d12a08a6adb4979d65ece6 Mon Sep 17 00:00:00 2001 From: Sandra Romanchenko <53295797+sandraromanchenko@users.noreply.github.com> Date: Wed, 26 Jun 2024 13:28:37 +0300 Subject: [PATCH 067/203] PBM tests. Avoid checking storage unless it's s3 (#952) --- e2e-tests/pkg/tests/sharded/test_delete_backup.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/e2e-tests/pkg/tests/sharded/test_delete_backup.go b/e2e-tests/pkg/tests/sharded/test_delete_backup.go index c0d1884e4..a6de214c5 100644 --- a/e2e-tests/pkg/tests/sharded/test_delete_backup.go +++ b/e2e-tests/pkg/tests/sharded/test_delete_backup.go @@ -184,6 +184,10 @@ func checkArtefacts(conf string, shouldStay map[string]struct{}) { stg := cfg.Storage + if stg.Type == "azure" || stg.Type == "filesystem" { + return + } + endopintURL := awsurl if stg.S3.EndpointURL != "" { eu, err := url.Parse(stg.S3.EndpointURL) From 1eb05adc5dbb9a7418be534238044edb0527c68b Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 26 Jun 2024 12:58:59 +0200 Subject: [PATCH 068/203] Add agent's logic for handling Ready status --- cmd/pbm-agent/pitr.go | 87 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 81 insertions(+), 6 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 33626af9b..821019275 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -61,10 +61,12 @@ func (a *Agent) sliceNow(opid ctrl.OPID) { } const ( - pitrCheckPeriod = 15 * time.Second - pitrRenominationFrame = 30 * time.Second - pitrOpLockPollingCycle = 15 * time.Second - pitrOpLockPollingTimeOut = 2 * time.Minute + pitrCheckPeriod = 15 * time.Second + pitrRenominationFrame = 5 * time.Second + pitrOpLockPollingCycle = 15 * time.Second + pitrOpLockPollingTimeOut = 2 * time.Minute + pitrNominationPollingCycle = 2 * time.Second + pitrNominationPollingTimeOut = 2 * time.Minute ) // PITR starts PITR processing routine @@ -429,14 +431,23 @@ func (a *Agent) waitAllOpLockRelease(ctx context.Context) (bool, error) { // waitNominationForPITR is used by potentional nominee to determinate if it // is nominated by the leader. It returns true if member receive nomination. +// First, nominee needs to sync up about Ready status with cluster leader. +// After cluster Ready status is reached, nomination process will start. // If nomination document is not found, nominee tries again on another tick. // If Ack is found in fetched fragment, that means that another member confirmed // nomination, so in that case current member lost nomination and false is returned. func (a *Agent) waitNominationForPITR(ctx context.Context, rs, node string) (bool, error) { l := log.LogEventFromContext(ctx) - tk := time.NewTicker(time.Millisecond * 500) + err := a.confirmReadyStatus(ctx) + if err != nil { + return false, errors.Wrap(err, "confirming ready status") + } + + tk := time.NewTicker(pitrNominationPollingCycle) defer tk.Stop() + tout := time.NewTimer(pitrNominationPollingTimeOut) + defer tout.Stop() l.Debug("waiting pitr nomination") for { @@ -458,7 +469,71 @@ func (a *Agent) waitNominationForPITR(ctx context.Context, rs, node string) (boo return true, nil } } + case <-tout.C: + return false, nil } } - //todo: add timeout: e.g. 2 minutes } + +func (a *Agent) confirmReadyStatus(ctx context.Context) error { + l := log.LogEventFromContext(ctx) + + tk := time.NewTicker(pitrNominationPollingCycle) + defer tk.Stop() + tout := time.NewTimer(pitrNominationPollingTimeOut) + defer tout.Stop() + + l.Debug("waiting for cluster ready status") + for { + select { + case <-tk.C: + status, err := oplog.GetClusterStatus(ctx, a.leadConn) + if err != nil { + if errors.Is(err, errors.ErrNotFound) { + continue + } + return errors.Wrap(err, "getting cluser status") + } + if status == oplog.StatusReady { + err = oplog.SetReadyRSStatus(ctx, a.leadConn, a.brief.SetName, a.brief.Me) + if err != nil { + return errors.Wrap(err, "setting ready status for RS") + } + return nil + } + case <-tout.C: + return errors.New("timeout while waiting for ready status") + } + } +} + +func (a *Agent) reconcileReadyStatus(ctx context.Context, agents []topo.AgentStat) error { + l := log.LogEventFromContext(ctx) + + tk := time.NewTicker(pitrNominationPollingCycle) + defer tk.Stop() + + tout := time.NewTimer(pitrNominationPollingTimeOut) + defer tout.Stop() + + l.Debug("reconciling ready status from all agents") + for { + select { + case <-tk.C: + nodes, err := oplog.GetReadyReplSets(ctx, a.leadConn) + if err != nil { + if errors.Is(err, errors.ErrNotFound) { + continue + } + return errors.Wrap(err, "getting all nodes with ready status") + } + l.Debug("agents in ready: %d; waiting for agents: %d", len(nodes), len(agents)) + if len(nodes) >= len(agents) { + return nil + } + case <-tout.C: + return errors.New("timeout while roconciling ready status") + } + } +} + From d5ef97507a33387cab0769d2a68e587e9f111125 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 26 Jun 2024 14:44:47 +0200 Subject: [PATCH 069/203] move the Blackhole back --- cmd/pbm-speed-test/main.go | 3 ++- pbm/config/config.go | 7 +++++++ .../pbm-speed-test => pbm/storage/blackhole}/blackhole.go | 8 +++++--- pbm/storage/storage.go | 3 +++ pbm/util/storage.go | 3 +++ 5 files changed, 20 insertions(+), 4 deletions(-) rename {cmd/pbm-speed-test => pbm/storage/blackhole}/blackhole.go (89%) diff --git a/cmd/pbm-speed-test/main.go b/cmd/pbm-speed-test/main.go index 505947024..833b8a088 100644 --- a/cmd/pbm-speed-test/main.go +++ b/cmd/pbm-speed-test/main.go @@ -13,6 +13,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/compress" "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/log" + "github.com/percona/percona-backup-mongodb/pbm/storage/blackhole" "github.com/percona/percona-backup-mongodb/pbm/util" "github.com/percona/percona-backup-mongodb/pbm/version" ) @@ -92,7 +93,7 @@ func testCompression(mURL string, compression compress.CompressionType, level *i defer cn.Disconnect(ctx) //nolint:errcheck } - stg := newBlackhole() + stg := blackhole.New() done := make(chan struct{}) go printw(done) diff --git a/pbm/config/config.go b/pbm/config/config.go index 5a48aad03..2c12b26b3 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -220,6 +220,7 @@ func (s *StorageConf) Clone() *StorageConf { rv.S3 = s.S3.Clone() case storage.Azure: rv.Azure = s.Azure.Clone() + case storage.Blackhole: // no config } return rv @@ -237,6 +238,8 @@ func (s *StorageConf) Equal(other *StorageConf) bool { return s.Azure.Equal(other.Azure) case storage.Filesystem: return s.Filesystem.Equal(other.Filesystem) + case storage.Blackhole: + return true } return false @@ -250,6 +253,8 @@ func (s *StorageConf) Cast() error { return s.S3.Cast() case storage.Azure: // noop return nil + case storage.Blackhole: // noop + return nil } return errors.Wrap(ErrUnkownStorageType, string(s.Type)) @@ -263,6 +268,8 @@ func (s *StorageConf) Typ() string { return "Azure" case storage.Filesystem: return "FS" + case storage.Blackhole: + return "blackhole" case storage.Undefined: fallthrough default: diff --git a/cmd/pbm-speed-test/blackhole.go b/pbm/storage/blackhole/blackhole.go similarity index 89% rename from cmd/pbm-speed-test/blackhole.go rename to pbm/storage/blackhole/blackhole.go index 4d5ccb541..238a51498 100644 --- a/cmd/pbm-speed-test/blackhole.go +++ b/pbm/storage/blackhole/blackhole.go @@ -1,4 +1,4 @@ -package main +package blackhole import ( "io" @@ -8,12 +8,14 @@ import ( type Blackhole struct{} -func newBlackhole() *Blackhole { +var _ storage.Storage = &Blackhole{} + +func New() *Blackhole { return &Blackhole{} } func (*Blackhole) Type() storage.Type { - return "blackhole" + return storage.Blackhole } func (*Blackhole) Save(_ string, data io.Reader, _ int64) error { diff --git a/pbm/storage/storage.go b/pbm/storage/storage.go index c94de5f7f..ded8ad6f2 100644 --- a/pbm/storage/storage.go +++ b/pbm/storage/storage.go @@ -27,6 +27,7 @@ const ( S3 Type = "s3" Azure Type = "azure" Filesystem Type = "filesystem" + Blackhole Type = "blackhole" ) type FileInfo struct { @@ -59,6 +60,8 @@ func ParseType(s string) Type { return Azure case string(Filesystem): return Filesystem + case string(Blackhole): + return Blackhole default: return Undefined } diff --git a/pbm/util/storage.go b/pbm/util/storage.go index 13e4669b6..e887ba22d 100644 --- a/pbm/util/storage.go +++ b/pbm/util/storage.go @@ -9,6 +9,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/log" "github.com/percona/percona-backup-mongodb/pbm/storage" "github.com/percona/percona-backup-mongodb/pbm/storage/azure" + "github.com/percona/percona-backup-mongodb/pbm/storage/blackhole" "github.com/percona/percona-backup-mongodb/pbm/storage/fs" "github.com/percona/percona-backup-mongodb/pbm/storage/s3" ) @@ -25,6 +26,8 @@ func StorageFromConfig(cfg *config.StorageConf, l log.LogEvent) (storage.Storage return azure.New(cfg.Azure, l) case storage.Filesystem: return fs.New(cfg.Filesystem) + case storage.Blackhole: + return blackhole.New(), nil case storage.Undefined: return nil, ErrStorageUndefined default: From c1c30b49e1176fb46a80328a3995312396f1fdd8 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 20 Jun 2024 16:59:13 +0200 Subject: [PATCH 070/203] add sdk.ClusterMembers() and sdk.AgentStatuses() --- cmd/pbm-agent/agent.go | 7 +- cmd/pbm/status.go | 181 +++++++++++++++++++++++------------------ pbm/topo/agent.go | 79 ++++++++++++++---- pbm/topo/cluster.go | 32 ++++++-- pbm/topo/node.go | 4 +- sdk/impl.go | 4 - sdk/sdk.go | 65 ++++++++++++++- sdk/util.go | 80 +++++------------- 8 files changed, 278 insertions(+), 174 deletions(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 6e4dfcf57..325c47274 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -340,7 +340,7 @@ func (a *Agent) HbStatus(ctx context.Context) { hb.Hidden = false hb.Passive = false - inf, err := topo.GetNodeInfoExt(ctx, a.nodeConn) + inf, err := topo.GetNodeInfo(ctx, a.nodeConn) if err != nil { l.Error("get NodeInfo: %v", err) hb.Err += fmt.Sprintf("get NodeInfo: %v", err) @@ -348,6 +348,11 @@ func (a *Agent) HbStatus(ctx context.Context) { hb.Hidden = inf.Hidden hb.Passive = inf.Passive hb.Arbiter = inf.ArbiterOnly + if inf.SecondaryDelayOld != 0 { + hb.DelaySecs = inf.SecondaryDelayOld + } else { + hb.DelaySecs = inf.SecondaryDelaySecs + } } err = topo.SetAgentStatus(ctx, a.leadConn, hb) diff --git a/cmd/pbm/status.go b/cmd/pbm/status.go index b873f2fb2..c43a4b208 100644 --- a/cmd/pbm/status.go +++ b/cmd/pbm/status.go @@ -12,7 +12,6 @@ import ( "time" "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/mongo" "golang.org/x/sync/errgroup" "github.com/percona/percona-backup-mongodb/pbm/backup" @@ -117,8 +116,8 @@ func status( data: []*statusSect{ { "cluster", "Cluster", nil, - func(ctx context.Context, conn connect.Client) (fmt.Stringer, error) { - return clusterStatus(ctx, conn, curi) + func(ctx context.Context, _ connect.Client) (fmt.Stringer, error) { + return clusterStatus(ctx, pbm, makeGetRSMembers(curi)) }, }, {"pitr", "PITR incremental backup", nil, getPitrStatus}, @@ -199,12 +198,7 @@ func (n node) String() string { return fmt.Sprintf("%s [!Arbiter]: arbiter node is not supported", n.Host) } - role := n.Role - if role != RolePrimary && role != RoleSecondary { - role = RoleSecondary - } - - s := fmt.Sprintf("%s [%s]: pbm-agent %v", n.Host, role, n.Ver) + s := fmt.Sprintf("%s [%s]: pbm-agent %v", n.Host, n.Role, n.Ver) if n.OK { s += " OK" return s @@ -228,72 +222,96 @@ func (c cluster) String() string { return s } -func clusterStatus(ctx context.Context, conn connect.Client, uri string) (fmt.Stringer, error) { - clstr, err := topo.ClusterMembers(ctx, conn.MongoClient()) +func clusterStatus( + ctx context.Context, + pbm sdk.Client, + getRSMembers getRSMembersFunc, +) (fmt.Stringer, error) { + clusterMembers, err := sdk.ClusterMembers(ctx, pbm) + if err != nil { + return nil, errors.Wrap(err, "get agent statuses") + } + agentStatuses, err := sdk.AgentStatuses(ctx, pbm) if err != nil { return nil, errors.Wrap(err, "get cluster members") } - - clusterTime, err := topo.GetClusterTime(ctx, conn) + clusterTime, err := sdk.ClusterTime(ctx, pbm) if err != nil { return nil, errors.Wrap(err, "read cluster time") } + agentMap := make(map[topo.ReplsetName]map[string]*sdk.AgentStatus, len(clusterMembers)) + for i := range agentStatuses { + agent := &agentStatuses[i] + rs, ok := agentMap[agent.RS] + if !ok { + rs = make(map[string]*topo.AgentStat) + agentMap[agent.RS] = rs + } + + rs[agent.Node] = agent + agentMap[agent.RS] = rs + } + eg, ctx := errgroup.WithContext(ctx) m := sync.Mutex{} var ret cluster - for _, c := range clstr { - c := c - + for _, c := range clusterMembers { eg.Go(func() error { - client, err := directConnect(ctx, uri, c.Host) - if err != nil { - return errors.Wrapf(err, "connect to `%s` [%s]", c.RS, c.Host) - } - - rsConfig, err := topo.GetReplSetConfig(ctx, client) + rsMembers, err := getRSMembers(ctx, c.Host) if err != nil { - _ = client.Disconnect(ctx) return errors.Wrapf(err, "get replset status for `%s`", c.RS) } - info, err := topo.GetNodeInfo(ctx, client) - // don't need the connection anymore despite the result - _ = client.Disconnect(ctx) - if err != nil { - return errors.Wrap(err, "get node info") + + lrs := rs{ + Name: c.RS, + Nodes: make([]node, len(rsMembers)), } - lrs := rs{Name: c.RS} - for i, n := range rsConfig.Members { - lrs.Nodes = append(lrs.Nodes, node{Host: c.RS + "/" + n.Host}) + for i, member := range rsMembers { + node := &lrs.Nodes[i] + node.Host = member.Host - nd := &lrs.Nodes[i] - switch { - case n.Host == info.Primary: - nd.Role = RolePrimary - case n.ArbiterOnly: - nd.Role = RoleArbiter - case n.SecondaryDelayOld != 0 || n.SecondaryDelaySecs != 0: - nd.Role = RoleDelayed - case n.Hidden: - nd.Role = RoleHidden - } - - stat, err := topo.GetAgentStatus(ctx, conn, c.RS, n.Host) - if errors.Is(err, mongo.ErrNoDocuments) { - nd.Ver = "NOT FOUND" + rsAgents := agentMap[c.RS] + if rsAgents == nil { + node.Ver = "NOT FOUND" continue - } else if err != nil { - nd.Errs = append(nd.Errs, fmt.Sprintf("ERROR: get agent status: %v", err)) + } + agent := rsAgents[member.Host] + if agent == nil { + node.Ver = "NOT FOUND" continue } - if stat.Heartbeat.T+defs.StaleFrameSec < clusterTime.T { - nd.Errs = append(nd.Errs, fmt.Sprintf("ERROR: lost agent, last heartbeat: %v", stat.Heartbeat.T)) + + node.Ver = "v" + agent.AgentVer + + switch { + case agent.State == 1: // agent.StateStr == "PRIMARY" + node.Role = RolePrimary + case agent.State == 7: // agent.StateStr == "ARBITER" + node.Role = RoleArbiter + case agent.State == 2: // agent.StateStr == "SECONDARY" + if agent.DelaySecs != 0 { + node.Role = RoleDelayed + } else if agent.Hidden { + node.Role = RoleHidden + } else { + node.Role = RoleSecondary + } + default: + // unexpected state. show actual state + node.Role = RSRole(agent.StateStr) + } + + if agent.IsStale(clusterTime) { + node.Errs = []string{ + fmt.Sprintf("ERROR: lost agent, last heartbeat: %v", agent.Heartbeat.T), + } continue } - nd.Ver = "v" + stat.AgentVer - nd.OK, nd.Errs = stat.OK() + + node.OK, node.Errs = agent.OK() } m.Lock() @@ -307,38 +325,43 @@ func clusterStatus(ctx context.Context, conn connect.Client, uri string) (fmt.St return ret, err } -func directConnect(ctx context.Context, uri, hosts string) (*mongo.Client, error) { - var host string - chost := strings.Split(hosts, "/") - if len(chost) > 1 { - host = chost[1] - } else { - host = chost[0] - } +type getRSMembersFunc func(ctx context.Context, hosts string) ([]topo.RSMember, error) - curi, err := url.Parse("mongodb://" + strings.Replace(uri, "mongodb://", "", 1)) - if err != nil { - return nil, errors.Wrapf(err, "parse mongo-uri '%s'", uri) - } +func makeGetRSMembers(uri string) getRSMembersFunc { + return func(ctx context.Context, hosts string) ([]topo.RSMember, error) { + var host string + chost := strings.Split(hosts, "/") + if len(chost) > 1 { + host = chost[1] + } else { + host = chost[0] + } + + curi, err := url.Parse("mongodb://" + strings.Replace(uri, "mongodb://", "", 1)) + if err != nil { + return nil, errors.Wrapf(err, "parse mongo-uri '%s'", uri) + } - // Preserving the `replicaSet` parameter will cause an error - // while connecting to the ConfigServer (mismatched replicaset names) - query := curi.Query() - query.Del("replicaSet") - curi.RawQuery = query.Encode() - curi.Host = host + // Preserving the `replicaSet` parameter will cause an error + // while connecting to the ConfigServer (mismatched replicaset names) + query := curi.Query() + query.Del("replicaSet") + curi.RawQuery = query.Encode() + curi.Host = host - conn, err := connect.MongoConnect(ctx, curi.String(), connect.AppName("pbm-status")) - if err != nil { - return nil, errors.Wrap(err, "connect") - } + conn, err := connect.MongoConnect(ctx, curi.String(), connect.AppName("pbm-status")) + if err != nil { + return nil, errors.Wrap(err, "connect") + } + defer conn.Disconnect(context.Background()) - err = conn.Ping(ctx, nil) - if err != nil { - return nil, errors.Wrap(err, "ping") - } + rsConfig, err := topo.GetReplSetConfig(ctx, conn) + if err != nil { + return nil, errors.Wrapf(err, "get replset config") + } - return conn, nil + return rsConfig.Members, nil + } } type pitrStat struct { diff --git a/pbm/topo/agent.go b/pbm/topo/agent.go index 670189511..063e50a7f 100644 --- a/pbm/topo/agent.go +++ b/pbm/topo/agent.go @@ -18,28 +18,75 @@ import ( ) type AgentStat struct { - Node string `bson:"n"` - RS string `bson:"rs"` - State defs.NodeState `bson:"s"` - StateStr string `bson:"str"` - Hidden bool `bson:"hdn"` - Passive bool `bson:"psv"` - Arbiter bool `bson:"arb"` - AgentVer string `bson:"v"` - MongoVer string `bson:"mv"` - PerconaVer string `bson:"pv,omitempty"` - PBMStatus SubsysStatus `bson:"pbms"` - NodeStatus SubsysStatus `bson:"nodes"` - StorageStatus SubsysStatus `bson:"stors"` - Heartbeat primitive.Timestamp `bson:"hb"` - Err string `bson:"e"` + // Node is like agent ID. Looks like `rs00:27017` (host:port of direct mongod). + Node string `bson:"n"` + + // RS is the direct node replset name. + RS string `bson:"rs"` + + // State is the mongod state code. + State defs.NodeState `bson:"s"` + + // StateStr is the mongod state string (e.g. PRIMARY, SECONDARY, ARBITER) + StateStr string `bson:"str"` + + // Hidden is set for hidden node. + Hidden bool `bson:"hdn"` + + // Passive is set when node cannot be primary (priority 0). + // + // Hidden and delayed nodes are always passive. + // Arbiter cannot be primary because it is not a data-bearing member (non-writable) + // but it is not passive (has 1 election vote). + Passive bool `bson:"psv"` + + // Arbiter is true for argiter node. + Arbiter bool `bson:"arb"` + + // DelaySecs is the node configured replication delay (lag). + DelaySecs int32 `bson:"delay"` + + // AgentVer has the PBM Agent version (looks like `v2.3.4`) + AgentVer string `bson:"v"` + + // MongoVer is the mongod version (looks like `v7.0.0`) + MongoVer string `bson:"mv"` + + // PerconaVer is the PSMDB version (looks like `v7.0.0-1`). + // + // Empty for non-PSMDB (e.i MongoDB CE). + PerconaVer string `bson:"pv,omitempty"` + + // PBMStatus is the agent status. + PBMStatus SubsysStatus `bson:"pbms"` + + // NodeStatus is the mongod/connection status. + NodeStatus SubsysStatus `bson:"nodes"` + + // StorageStatus is the remote storage status. + StorageStatus SubsysStatus `bson:"stors"` + + // Heartbeat is agent's last seen cluster time. + Heartbeat primitive.Timestamp `bson:"hb"` + + // Err can be any error. + Err string `bson:"e"` } +// SubsysStatus is generic status. type SubsysStatus struct { - OK bool `bson:"ok"` + // OK is false if there is an error. Otherwise true. + OK bool `bson:"ok"` + + // Err is error string. Err string `bson:"e"` } +// IsStale returns true if agent's heartbeat is steal for the give `t` cluster time. +func (s *AgentStat) IsStale(t primitive.Timestamp) bool { + return s.Heartbeat.T+defs.StaleFrameSec < t.T +} + func (s *AgentStat) OK() (bool, []string) { var errs []string ok := true diff --git a/pbm/topo/cluster.go b/pbm/topo/cluster.go index 221839d42..4fc4960fc 100644 --- a/pbm/topo/cluster.go +++ b/pbm/topo/cluster.go @@ -13,12 +13,27 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/errors" ) -// Shard represent config.shard https://docs.mongodb.com/manual/reference/config-database/#config.shards -// _id may differ from the rs name, so extract rs name from the host (format like "rs2/localhost:27017") -// see https://jira.percona.com/browse/PBM-595 +// Shard represent a config.shard document. +// +// https://docs.mongodb.com/manual/reference/config-database/#config.shards type Shard struct { - ID string `bson:"_id"` - RS string `bson:"-"` + // ID is the shard ID. + // + // Usual it is the same as replset name. Except for configsvr - it is always `config`. + // Can be customized by name param in `addShard` command. + // + // https://www.mongodb.com/docs/manual/reference/command/addShard/ + ID string `bson:"_id"` + + // RS is the replset name. + RS string `bson:"-"` + + // Host is a node URI. + // + // Looks like `rs0/rs00:27018` where + // - `rs0` is a replset name + // - `rs00` is a hostname or IP + // - `27018` is a port Host string `bson:"host"` } @@ -93,9 +108,10 @@ func IsWriteMajorityRequested( return w >= s.WriteMajorityCount, nil } -// ClusterMembers returns list of replicasets current cluster consists of -// (shards + configserver). The list would consist of on rs if cluster is -// a non-sharded rs. +// ClusterMembers returns list of replsets in the cluster. +// +// For sharded cluster: configsvr (with `config` id) and all shards. +// For non-sharded cluster: the replset. func ClusterMembers(ctx context.Context, m *mongo.Client) ([]Shard, error) { // it would be a config server in sharded cluster inf, err := GetNodeInfo(ctx, m) diff --git a/pbm/topo/node.go b/pbm/topo/node.go index f52bb96c2..5821dcb22 100644 --- a/pbm/topo/node.go +++ b/pbm/topo/node.go @@ -72,8 +72,8 @@ type NodeInfo struct { Hidden bool `bson:"hidden,omitempty"` Passive bool `bson:"passive,omitempty"` ArbiterOnly bool `bson:"arbiterOnly"` - SecondaryDelayOld int `bson:"slaveDelay"` - SecondaryDelaySecs int `bson:"secondaryDelaySecs"` + SecondaryDelayOld int32 `bson:"slaveDelay"` + SecondaryDelaySecs int32 `bson:"secondaryDelaySecs"` ConfigSvr int `bson:"configsvr,omitempty"` Me string `bson:"me"` LastWrite MongoLastWrite `bson:"lastWrite"` diff --git a/sdk/impl.go b/sdk/impl.go index c515679ed..415f38738 100644 --- a/sdk/impl.go +++ b/sdk/impl.go @@ -43,10 +43,6 @@ func (c *clientImpl) Close(ctx context.Context) error { return c.conn.Disconnect(ctx) } -func (c *clientImpl) ClusterMembers(ctx context.Context) ([]topo.Shard, error) { - return topo.ClusterMembers(ctx, c.conn.MongoClient()) -} - func (c *clientImpl) CommandInfo(ctx context.Context, id CommandID) (*Command, error) { opid, err := ctrl.OPIDfromStr(string(id)) if err != nil { diff --git a/sdk/sdk.go b/sdk/sdk.go index 9819950e7..c12282218 100644 --- a/sdk/sdk.go +++ b/sdk/sdk.go @@ -13,9 +13,9 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/lock" + "github.com/percona/percona-backup-mongodb/pbm/log" "github.com/percona/percona-backup-mongodb/pbm/oplog" "github.com/percona/percona-backup-mongodb/pbm/restore" - "github.com/percona/percona-backup-mongodb/pbm/topo" ) var ( @@ -104,8 +104,6 @@ type Command = ctrl.Cmd type Client interface { Close(ctx context.Context) error - ClusterMembers(ctx context.Context) ([]topo.Shard, error) - CommandInfo(ctx context.Context, id CommandID) (*Command, error) GetConfig(ctx context.Context) (*Config, error) @@ -156,3 +154,64 @@ func WaitForDeleteOplogRange(ctx context.Context, client Client) error { func WaitForErrorLog(ctx context.Context, client Client, cmd *Command) (string, error) { return lastLogErr(ctx, client.(*clientImpl).conn, cmd.Cmd, cmd.TS) } + +func WaitForResync(ctx context.Context, c Client, cid CommandID) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + r := &log.LogRequest{ + LogKeys: log.LogKeys{ + Event: string(ctrl.CmdResync), + OPID: string(cid), + Severity: log.Info, + }, + } + + outC, errC := log.Follow(ctx, c.(*clientImpl).conn, r, false) + + for { + select { + case entry := <-outC: + if entry != nil && entry.Msg == "succeed" { + return nil + } + case err := <-errC: + return err + } + } +} + +func CanDeleteBackup(ctx context.Context, sc Client, bcp *BackupMetadata) error { + return backup.CanDeleteBackup(ctx, sc.(*clientImpl).conn, bcp) +} + +func CanDeleteIncrementalBackup( + ctx context.Context, + sc Client, + bcp *BackupMetadata, + increments [][]*BackupMetadata, +) error { + return backup.CanDeleteIncrementalChain(ctx, sc.(*clientImpl).conn, bcp, increments) +} + +func ListDeleteBackupBefore( + ctx context.Context, + sc Client, + ts primitive.Timestamp, + bcpType BackupType, +) ([]BackupMetadata, error) { + return backup.ListDeleteBackupBefore(ctx, sc.(*clientImpl).conn, ts, bcpType) +} + +func ListDeleteChunksBefore( + ctx context.Context, + sc Client, + ts primitive.Timestamp, +) ([]OplogChunk, error) { + r, err := backup.MakeCleanupInfo(ctx, sc.(*clientImpl).conn, ts) + return r.Chunks, err +} + +func ParseDeleteBackupType(s string) (BackupType, error) { + return backup.ParseDeleteBackupType(s) +} diff --git a/sdk/util.go b/sdk/util.go index e323b24c0..595077381 100644 --- a/sdk/util.go +++ b/sdk/util.go @@ -4,31 +4,29 @@ import ( "context" "go.mongodb.org/mongo-driver/bson/primitive" - "go.mongodb.org/mongo-driver/mongo" "github.com/percona/percona-backup-mongodb/pbm/backup" - "github.com/percona/percona-backup-mongodb/pbm/ctrl" "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" - "github.com/percona/percona-backup-mongodb/pbm/log" "github.com/percona/percona-backup-mongodb/pbm/topo" ) +type ( + ReplsetInfo = topo.Shard + AgentStatus = topo.AgentStat +) + var ( ErrMissedClusterTime = errors.New("missed cluster time") ErrInvalidDeleteBackupType = backup.ErrInvalidDeleteBackupType ) -func ParseDeleteBackupType(s string) (BackupType, error) { - return backup.ParseDeleteBackupType(s) -} - func IsHeartbeatStale(clusterTime, other Timestamp) bool { return clusterTime.T >= other.T+defs.StaleFrameSec } -func GetClusterTime(ctx context.Context, m *mongo.Client) (Timestamp, error) { - info, err := topo.GetNodeInfo(ctx, m) +func ClusterTime(ctx context.Context, client Client) (Timestamp, error) { + info, err := topo.GetNodeInfo(ctx, client.(*clientImpl).conn.MongoClient()) if err != nil { return primitive.Timestamp{}, err } @@ -39,59 +37,19 @@ func GetClusterTime(ctx context.Context, m *mongo.Client) (Timestamp, error) { return info.ClusterTime.ClusterTime, nil } -func WaitForResync(ctx context.Context, c Client, cid CommandID) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - r := &log.LogRequest{ - LogKeys: log.LogKeys{ - Event: string(ctrl.CmdResync), - OPID: string(cid), - Severity: log.Info, - }, - } - - outC, errC := log.Follow(ctx, c.(*clientImpl).conn, r, false) - - for { - select { - case entry := <-outC: - if entry != nil && entry.Msg == "succeed" { - return nil - } - case err := <-errC: - return err - } +// ClusterMembers returns list of replsets in the cluster. +// +// For sharded cluster: the configsvr (with ID `config`) and all shards. +// For non-sharded cluster: the replset. +func ClusterMembers(ctx context.Context, client Client) ([]ReplsetInfo, error) { + shards, err := topo.ClusterMembers(ctx, client.(*clientImpl).conn.MongoClient()) + if err != nil { + return nil, errors.Wrap(err, "topo") } + return shards, nil } -func CanDeleteBackup(ctx context.Context, sc Client, bcp *BackupMetadata) error { - return backup.CanDeleteBackup(ctx, sc.(*clientImpl).conn, bcp) -} - -func CanDeleteIncrementalBackup( - ctx context.Context, - sc Client, - bcp *BackupMetadata, - increments [][]*BackupMetadata, -) error { - return backup.CanDeleteIncrementalChain(ctx, sc.(*clientImpl).conn, bcp, increments) -} - -func ListDeleteBackupBefore( - ctx context.Context, - sc Client, - ts primitive.Timestamp, - bcpType BackupType, -) ([]BackupMetadata, error) { - return backup.ListDeleteBackupBefore(ctx, sc.(*clientImpl).conn, ts, bcpType) -} - -func ListDeleteChunksBefore( - ctx context.Context, - sc Client, - ts primitive.Timestamp, -) ([]OplogChunk, error) { - r, err := backup.MakeCleanupInfo(ctx, sc.(*clientImpl).conn, ts) - return r.Chunks, err +// AgentStatuses returns list of all PBM Agents statuses. +func AgentStatuses(ctx context.Context, sc Client) ([]AgentStatus, error) { + return topo.ListAgents(ctx, sc.(*clientImpl).conn) } From e6fccd27e4d46ad2c657883f5def3164e9d2eef5 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 20 Jun 2024 16:59:24 +0200 Subject: [PATCH 071/203] misc --- .editorconfig | 2 +- .vscode/settings.json | 16 ++++++++++------ 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/.editorconfig b/.editorconfig index da3b010a1..05847c656 100644 --- a/.editorconfig +++ b/.editorconfig @@ -7,7 +7,7 @@ indent_size = 4 trim_trailing_whitespace = true insert_final_newline = true -[*.js,json,yml,yaml] +[*.{json,yml,yaml}] indent_size = 2 [{Makefile,go.mod,go.sum,*.go}] diff --git a/.vscode/settings.json b/.vscode/settings.json index 4dfb78533..f9472ad38 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -2,31 +2,35 @@ "[go]": { "editor.defaultFormatter": "golang.go", "editor.insertSpaces": false, - "editor.tabSize": 4, + "editor.tabSize": 4 }, "[json][jsonc][yaml]": { "editor.insertSpaces": true, - "editor.tabSize": 2, + "editor.tabSize": 2 }, "[shellscript]": { "editor.insertSpaces": false, - "editor.tabSize": 4, + "editor.tabSize": 4 }, + "files.encoding": "utf8", "files.eol": "\n", "files.insertFinalNewline": true, "files.trimFinalNewlines": true, "files.trimTrailingWhitespace": true, "go.formatTool": "gofumpt", + "go.formatFlags": [ + "-extra" + ], "go.lintTool": "golangci-lint", "go.useLanguageServer": true, "gopls": { "analyses": { "composites": false, - "deepequalerrors": false + "fieldalignment": false }, "formatting.gofumpt": true, - "formatting.local": "github.com/percona" + "formatting.local": "github.com/percona", + "ui.semanticTokens": true }, - "groupImports.onSave": true, "shellformat.flag": "-bn -ci -s" } From 759367e5b23722de65a55efca11477d90ed0a249 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 24 Jun 2024 10:56:50 +0200 Subject: [PATCH 072/203] add sdk/cli.ClusterStatus() --- cmd/pbm/status.go | 196 +++++++++++----------------------------------- pbm/topo/agent.go | 11 ++- pbm/topo/topo.go | 4 +- sdk/cli/status.go | 185 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 236 insertions(+), 160 deletions(-) create mode 100644 sdk/cli/status.go diff --git a/cmd/pbm/status.go b/cmd/pbm/status.go index c43a4b208..0e7c0a323 100644 --- a/cmd/pbm/status.go +++ b/cmd/pbm/status.go @@ -5,14 +5,11 @@ import ( "encoding/json" "fmt" stdlog "log" - "net/url" "sort" "strings" - "sync" "time" "go.mongodb.org/mongo-driver/bson/primitive" - "golang.org/x/sync/errgroup" "github.com/percona/percona-backup-mongodb/pbm/backup" "github.com/percona/percona-backup-mongodb/pbm/config" @@ -30,6 +27,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/util" "github.com/percona/percona-backup-mongodb/pbm/version" "github.com/percona/percona-backup-mongodb/sdk" + "github.com/percona/percona-backup-mongodb/sdk/cli" ) type statusOptions struct { @@ -117,7 +115,7 @@ func status( { "cluster", "Cluster", nil, func(ctx context.Context, _ connect.Client) (fmt.Stringer, error) { - return clusterStatus(ctx, pbm, makeGetRSMembers(curi)) + return clusterStatus(ctx, pbm, cli.RSConfGetter(curi)) }, }, {"pitr", "PITR incremental backup", nil, getPitrStatus}, @@ -175,34 +173,36 @@ type rs struct { Nodes []node `json:"nodes"` } -type RSRole string - -const ( - RolePrimary RSRole = "P" - RoleSecondary RSRole = "S" - RoleArbiter RSRole = "A" - RoleHidden RSRole = "H" - RoleDelayed RSRole = "D" -) - type node struct { - Host string `json:"host"` - Ver string `json:"agent"` - Role RSRole `json:"role"` - OK bool `json:"ok"` - Errs []string `json:"errors,omitempty"` + Host string `json:"host"` + Ver string `json:"agent"` + Role cli.RSRole `json:"role"` + OK bool `json:"ok"` + Errs []string `json:"errors,omitempty"` } func (n node) String() string { - if n.Role == RoleArbiter { + if n.Role == cli.RoleArbiter { return fmt.Sprintf("%s [!Arbiter]: arbiter node is not supported", n.Host) } - s := fmt.Sprintf("%s [%s]: pbm-agent %v", n.Host, n.Role, n.Ver) + role := n.Role + if role == "" { + role = " " + } + ver := n.Ver + if ver == "" { + ver = "NOT FOUND" + } + s := fmt.Sprintf("%s [%s]: pbm-agent %v", n.Host, role, ver) if n.OK { s += " OK" return s } + if len(n.Errs) == 0 { + return s + } + s += " FAILED status:" for _, e := range n.Errs { s += fmt.Sprintf("\n > ERROR with %s", e) @@ -225,143 +225,37 @@ func (c cluster) String() string { func clusterStatus( ctx context.Context, pbm sdk.Client, - getRSMembers getRSMembersFunc, + confGetter cli.RSConfGetter, ) (fmt.Stringer, error) { - clusterMembers, err := sdk.ClusterMembers(ctx, pbm) - if err != nil { - return nil, errors.Wrap(err, "get agent statuses") - } - agentStatuses, err := sdk.AgentStatuses(ctx, pbm) - if err != nil { - return nil, errors.Wrap(err, "get cluster members") - } - clusterTime, err := sdk.ClusterTime(ctx, pbm) + status, err := cli.ClusterStatus(ctx, pbm, confGetter) if err != nil { - return nil, errors.Wrap(err, "read cluster time") - } - - agentMap := make(map[topo.ReplsetName]map[string]*sdk.AgentStatus, len(clusterMembers)) - for i := range agentStatuses { - agent := &agentStatuses[i] - rs, ok := agentMap[agent.RS] - if !ok { - rs = make(map[string]*topo.AgentStat) - agentMap[agent.RS] = rs - } - - rs[agent.Node] = agent - agentMap[agent.RS] = rs - } - - eg, ctx := errgroup.WithContext(ctx) - m := sync.Mutex{} - - var ret cluster - for _, c := range clusterMembers { - eg.Go(func() error { - rsMembers, err := getRSMembers(ctx, c.Host) - if err != nil { - return errors.Wrapf(err, "get replset status for `%s`", c.RS) - } - - lrs := rs{ - Name: c.RS, - Nodes: make([]node, len(rsMembers)), - } - - for i, member := range rsMembers { - node := &lrs.Nodes[i] - node.Host = member.Host - - rsAgents := agentMap[c.RS] - if rsAgents == nil { - node.Ver = "NOT FOUND" - continue + return nil, errors.Wrap(err, "get cluster status") + } + + rv := make(cluster, 0, len(status)) + for rsName, rsMembers := range status { + nodes := make([]node, len(rsMembers)) + for i, agent := range rsMembers { + node := &nodes[i] + node.Host = agent.Host + node.Ver = agent.Ver + node.Role = agent.Role + node.OK = agent.OK + + if len(agent.Errs) != 0 { + node.Errs = make([]string, len(agent.Errs)) + for j, e := range agent.Errs { + node.Errs[j] = e.Error() } - agent := rsAgents[member.Host] - if agent == nil { - node.Ver = "NOT FOUND" - continue - } - - node.Ver = "v" + agent.AgentVer - - switch { - case agent.State == 1: // agent.StateStr == "PRIMARY" - node.Role = RolePrimary - case agent.State == 7: // agent.StateStr == "ARBITER" - node.Role = RoleArbiter - case agent.State == 2: // agent.StateStr == "SECONDARY" - if agent.DelaySecs != 0 { - node.Role = RoleDelayed - } else if agent.Hidden { - node.Role = RoleHidden - } else { - node.Role = RoleSecondary - } - default: - // unexpected state. show actual state - node.Role = RSRole(agent.StateStr) - } - - if agent.IsStale(clusterTime) { - node.Errs = []string{ - fmt.Sprintf("ERROR: lost agent, last heartbeat: %v", agent.Heartbeat.T), - } - continue - } - - node.OK, node.Errs = agent.OK() } - - m.Lock() - ret = append(ret, lrs) - m.Unlock() - return nil + } + rv = append(rv, rs{ + Name: rsName, + Nodes: nodes, }) } - err = eg.Wait() - return ret, err -} - -type getRSMembersFunc func(ctx context.Context, hosts string) ([]topo.RSMember, error) - -func makeGetRSMembers(uri string) getRSMembersFunc { - return func(ctx context.Context, hosts string) ([]topo.RSMember, error) { - var host string - chost := strings.Split(hosts, "/") - if len(chost) > 1 { - host = chost[1] - } else { - host = chost[0] - } - - curi, err := url.Parse("mongodb://" + strings.Replace(uri, "mongodb://", "", 1)) - if err != nil { - return nil, errors.Wrapf(err, "parse mongo-uri '%s'", uri) - } - - // Preserving the `replicaSet` parameter will cause an error - // while connecting to the ConfigServer (mismatched replicaset names) - query := curi.Query() - query.Del("replicaSet") - curi.RawQuery = query.Encode() - curi.Host = host - - conn, err := connect.MongoConnect(ctx, curi.String(), connect.AppName("pbm-status")) - if err != nil { - return nil, errors.Wrap(err, "connect") - } - defer conn.Disconnect(context.Background()) - - rsConfig, err := topo.GetReplSetConfig(ctx, conn) - if err != nil { - return nil, errors.Wrapf(err, "get replset config") - } - - return rsConfig.Members, nil - } + return rv, nil } type pitrStat struct { diff --git a/pbm/topo/agent.go b/pbm/topo/agent.go index 063e50a7f..3e616a8b1 100644 --- a/pbm/topo/agent.go +++ b/pbm/topo/agent.go @@ -2,7 +2,6 @@ package topo import ( "context" - "fmt" "strconv" "strings" @@ -87,20 +86,20 @@ func (s *AgentStat) IsStale(t primitive.Timestamp) bool { return s.Heartbeat.T+defs.StaleFrameSec < t.T } -func (s *AgentStat) OK() (bool, []string) { - var errs []string +func (s *AgentStat) OK() (bool, []error) { + var errs []error ok := true if !s.PBMStatus.OK { ok = false - errs = append(errs, fmt.Sprintf("PBM connection: %s", s.PBMStatus.Err)) + errs = append(errs, errors.Errorf("PBM connection: %s", s.PBMStatus.Err)) } if !s.NodeStatus.OK { ok = false - errs = append(errs, fmt.Sprintf("node connection: %s", s.NodeStatus.Err)) + errs = append(errs, errors.Errorf("node connection: %s", s.NodeStatus.Err)) } if !s.StorageStatus.OK { ok = false - errs = append(errs, fmt.Sprintf("storage: %s", s.StorageStatus.Err)) + errs = append(errs, errors.Errorf("storage: %s", s.StorageStatus.Err)) } return ok, errs diff --git a/pbm/topo/topo.go b/pbm/topo/topo.go index ab4f2c493..a4b02bed5 100644 --- a/pbm/topo/topo.go +++ b/pbm/topo/topo.go @@ -117,9 +117,7 @@ func collectTopoCheckErrors( errs = append(errs, errors.New(a.Err)) } if ok, estrs := a.OK(); !ok { - for _, e := range estrs { - errs = append(errs, errors.New(e)) - } + errs = append(errs, estrs...) } const maxReplicationLag uint32 = 35 diff --git a/sdk/cli/status.go b/sdk/cli/status.go new file mode 100644 index 000000000..f22352a4f --- /dev/null +++ b/sdk/cli/status.go @@ -0,0 +1,185 @@ +package cli + +import ( + "context" + "fmt" + "net/url" + "strings" + "sync" + + "go.mongodb.org/mongo-driver/bson/primitive" + "golang.org/x/sync/errgroup" + + "github.com/percona/percona-backup-mongodb/pbm/connect" + "github.com/percona/percona-backup-mongodb/pbm/errors" + "github.com/percona/percona-backup-mongodb/pbm/topo" + "github.com/percona/percona-backup-mongodb/sdk" +) + +type LostAgentError struct { + heartbeat primitive.Timestamp +} + +func (e LostAgentError) Error() string { + return fmt.Sprintf("lost agent, last heartbeat: %v", e.heartbeat.T) +} + +type RSRole string + +const ( + RolePrimary RSRole = "P" + RoleSecondary RSRole = "S" + RoleArbiter RSRole = "A" + RoleHidden RSRole = "H" + RoleDelayed RSRole = "D" +) + +type Node struct { + Host string + Ver string + Role RSRole + OK bool + Errs []error +} + +func (n Node) IsLost() bool { + if len(n.Errs) == 0 { + return false + } + + lostErr := LostAgentError{} + for _, err := range n.Errs { + if errors.As(err, &lostErr) { + return true + } + } + + return false +} + +func ClusterStatus( + ctx context.Context, + pbm sdk.Client, + confGetter RSConfGetter, +) (map[string][]Node, error) { + clusterMembers, err := sdk.ClusterMembers(ctx, pbm) + if err != nil { + return nil, errors.Wrap(err, "get agent statuses") + } + agentStatuses, err := sdk.AgentStatuses(ctx, pbm) + if err != nil { + return nil, errors.Wrap(err, "get cluster members") + } + clusterTime, err := sdk.ClusterTime(ctx, pbm) + if err != nil { + return nil, errors.Wrap(err, "read cluster time") + } + + agentMap := make(map[topo.ReplsetName]map[string]*sdk.AgentStatus, len(clusterMembers)) + for i := range agentStatuses { + agent := &agentStatuses[i] + rs, ok := agentMap[agent.RS] + if !ok { + rs = make(map[string]*topo.AgentStat) + agentMap[agent.RS] = rs + } + + rs[agent.Node] = agent + agentMap[agent.RS] = rs + } + + eg, ctx := errgroup.WithContext(ctx) + m := sync.Mutex{} + + pbmCluster := make(map[string][]Node) + for _, c := range clusterMembers { + eg.Go(func() error { + rsConf, err := confGetter.Get(ctx, c.Host) + if err != nil { + return errors.Wrapf(err, "get replset status for `%s`", c.RS) + } + + nodes := make([]Node, len(rsConf.Members)) + for i, member := range rsConf.Members { + node := &nodes[i] + node.Host = member.Host + + rsAgents := agentMap[c.RS] + if rsAgents == nil { + continue + } + agent := rsAgents[member.Host] + if agent == nil { + continue + } + + node.Ver = "v" + agent.AgentVer + + switch { + case agent.State == 1: // agent.StateStr == "PRIMARY" + node.Role = RolePrimary + case agent.State == 7: // agent.StateStr == "ARBITER" + node.Role = RoleArbiter + case agent.State == 2: // agent.StateStr == "SECONDARY" + if agent.DelaySecs != 0 { + node.Role = RoleDelayed + } else if agent.Hidden { + node.Role = RoleHidden + } else { + node.Role = RoleSecondary + } + default: + // unexpected state. show actual state + node.Role = RSRole(agent.StateStr) + } + + if agent.IsStale(clusterTime) { + node.Errs = []error{LostAgentError{agent.Heartbeat}} + continue + } + + node.OK, node.Errs = agent.OK() + } + + m.Lock() + pbmCluster[c.RS] = nodes + m.Unlock() + return nil + }) + } + + err = eg.Wait() + return pbmCluster, err +} + +type RSConfGetter string + +func (g RSConfGetter) Get(ctx context.Context, host string) (*topo.RSConfig, error) { + rsName, host, ok := strings.Cut(host, "/") + if !ok { + host = rsName + } + + if !strings.HasPrefix(string(g), "mongodb://") { + g = "mongodb://" + g + } + curi, err := url.Parse(string(g)) + if err != nil { + return nil, errors.Wrapf(err, "parse mongo-uri '%s'", g) + } + + // Preserving the `replicaSet` parameter will cause an error + // while connecting to the ConfigServer (mismatched replicaset names) + query := curi.Query() + query.Del("replicaSet") + curi.RawQuery = query.Encode() + curi.Host = host + + conn, err := connect.MongoConnect(ctx, curi.String(), connect.AppName("pbm-sdk")) + if err != nil { + return nil, errors.Wrap(err, "connect") + } + defer conn.Disconnect(context.Background()) + + return topo.GetReplSetConfig(ctx, conn) +} From 285c8e85501d9dba90dced2c5dc0504c40610da2 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 24 Jun 2024 21:17:47 +0200 Subject: [PATCH 073/203] add (sdk.Client).OpLocks() --- pbm/ctrl/cmd.go | 2 +- pbm/lock/lock.go | 16 +++--------- sdk/impl.go | 65 +++++++++++++++++++++++++++++++++++------------- sdk/sdk.go | 30 ++++++++++++++++++---- 4 files changed, 77 insertions(+), 36 deletions(-) diff --git a/pbm/ctrl/cmd.go b/pbm/ctrl/cmd.go index d9dcf3295..aa7e90876 100644 --- a/pbm/ctrl/cmd.go +++ b/pbm/ctrl/cmd.go @@ -55,7 +55,7 @@ func (c Command) String() string { type OPID primitive.ObjectID -func OPIDfromStr(s string) (OPID, error) { +func ParseOPID(s string) (OPID, error) { o, err := primitive.ObjectIDFromHex(s) if err != nil { return OPID(primitive.NilObjectID), err diff --git a/pbm/lock/lock.go b/pbm/lock/lock.go index 9445f956e..6cdadafb9 100644 --- a/pbm/lock/lock.go +++ b/pbm/lock/lock.go @@ -234,22 +234,12 @@ func GetOpLocks(ctx context.Context, m connect.Client, lh *LockHeader) ([]LockDa } func getLocks(ctx context.Context, lh *LockHeader, cl *mongo.Collection) ([]LockData, error) { - var locks []LockData - cur, err := cl.Find(ctx, lh) if err != nil { return nil, errors.Wrap(err, "get locks") } - for cur.Next(ctx) { - var l LockData - err := cur.Decode(&l) - if err != nil { - return nil, errors.Wrap(err, "lock decode") - } - - locks = append(locks, l) - } - - return locks, cur.Err() + var locks []LockData + err = cur.All(ctx, &locks) + return locks, err } diff --git a/sdk/impl.go b/sdk/impl.go index 415f38738..dc8640787 100644 --- a/sdk/impl.go +++ b/sdk/impl.go @@ -44,7 +44,7 @@ func (c *clientImpl) Close(ctx context.Context) error { } func (c *clientImpl) CommandInfo(ctx context.Context, id CommandID) (*Command, error) { - opid, err := ctrl.OPIDfromStr(string(id)) + opid, err := ctrl.ParseOPID(string(id)) if err != nil { return nil, ErrInvalidCommandID } @@ -66,10 +66,6 @@ func (c *clientImpl) CommandInfo(ctx context.Context, id CommandID) (*Command, e return cmd, nil } -func (c *clientImpl) CurrentLocks(ctx context.Context) ([]Lock, error) { - return nil, ErrNotImplemented -} - func (c *clientImpl) GetConfig(ctx context.Context) (*Config, error) { return config.GetConfig(ctx, c.conn) } @@ -101,9 +97,30 @@ func (c *clientImpl) GetBackupByName( ) (*BackupMetadata, error) { bcp, err := backup.NewDBManager(c.conn).GetBackupByName(ctx, name) if err != nil { - return nil, err + return nil, errors.Wrap(err, "get backup meta") } + return c.getBackupHelper(ctx, bcp, options) +} + +func (c *clientImpl) GetBackupByOpID( + ctx context.Context, + opid string, + options GetBackupByNameOptions, +) (*BackupMetadata, error) { + bcp, err := backup.NewDBManager(c.conn).GetBackupByOpID(ctx, opid) + if err != nil { + return nil, errors.Wrap(err, "get backup meta") + } + + return c.getBackupHelper(ctx, bcp, options) +} + +func (c *clientImpl) getBackupHelper( + ctx context.Context, + bcp *BackupMetadata, + options GetBackupByNameOptions, +) (*BackupMetadata, error) { if options.FetchIncrements && bcp.Type == IncrementalBackup { if bcp.SrcBackup != "" { return nil, ErrNotBaseIncrement @@ -123,7 +140,7 @@ func (c *clientImpl) GetBackupByName( } if options.FetchFilelist { - err = fillFilelistForBackup(ctx, c.conn, bcp) + err := fillFilelistForBackup(ctx, c.conn, bcp) if err != nil { return nil, errors.Wrap(err, "fetch filelist") } @@ -234,6 +251,10 @@ func (c *clientImpl) GetRestoreByName(ctx context.Context, name string) (*Restor return restore.GetRestoreMeta(ctx, c.conn, name) } +func (c *clientImpl) GetRestoreByOpID(ctx context.Context, opid string) (*RestoreMetadata, error) { + return restore.GetRestoreMetaByOPID(ctx, c.conn, opid) +} + func (c *clientImpl) SyncFromStorage(ctx context.Context) (CommandID, error) { opid, err := ctrl.SendResync(ctx, c.conn) return CommandID(opid.String()), err @@ -318,25 +339,35 @@ func (l lockImpl) Heartbeat() Timestamp { return l.LockData.Heartbeat } -func (c *clientImpl) CurrentOperations(ctx context.Context) ([]Lock, error) { - rv := []Lock{} +var ErrStaleHearbeat = errors.New("stale heartbeat") +func (c *clientImpl) OpLocks(ctx context.Context) ([]OpLock, error) { locks, err := lock.GetLocks(ctx, c.conn, &lock.LockHeader{}) if err != nil { - return nil, err + return nil, errors.Wrap(err, "get locks") } - for _, l := range locks { - rv = append(rv, lockImpl{l}) + if len(locks) == 0 { + // no current op + return nil, nil } - locks, err = lock.GetOpLocks(ctx, c.conn, &lock.LockHeader{}) + clusterTime, err := ClusterTime(ctx, c) if err != nil { - return nil, err - } - for _, l := range locks { - rv = append(rv, lockImpl{l}) + return nil, errors.Wrap(err, "get cluster time") } + rv := make([]OpLock, len(locks)) + for i := range locks { + rv[i].OpID = CommandID(locks[i].OPID) + rv[i].Cmd = locks[i].Type + rv[i].Replset = locks[i].Replset + rv[i].Node = locks[i].Node + rv[i].Heartbeat = locks[i].Heartbeat + + if rv[i].Heartbeat.T+defs.StaleFrameSec > (clusterTime.T) { + rv[i].err = ErrStaleHearbeat + } + } return rv, nil } diff --git a/sdk/sdk.go b/sdk/sdk.go index c12282218..e9ebec228 100644 --- a/sdk/sdk.go +++ b/sdk/sdk.go @@ -64,11 +64,6 @@ type ( CleanupReport = backup.CleanupInfo ) -type Lock interface { - Type() string - CommandID() string -} - type LogicalBackupOptions struct { CompressionType CompressionType CompressionLevel CompressionLevel @@ -101,18 +96,43 @@ type DeleteBackupBeforeOptions struct { type Command = ctrl.Cmd +// OpLock represents internal PBM lock. +// +// Some commands can have many locks (one lock per replset). +type OpLock struct { + // OpID is its command id. + OpID CommandID `json:"opid,omitempty"` + // Cmd is the type of command + Cmd ctrl.Command `json:"cmd,omitempty"` + // Replset is name of a replset that acquired the lock. + Replset string `json:"rs,omitempty"` + // Node is `host:port` pair of an agent that acquired the lock. + Node string `json:"node,omitempty"` + // Heartbeat is the last cluster time seen by an agent that acquired the lock. + Heartbeat primitive.Timestamp `json:"hb"` + + err error +} + +func (l *OpLock) Err() error { + return l.err +} + type Client interface { Close(ctx context.Context) error CommandInfo(ctx context.Context, id CommandID) (*Command, error) + OpLocks(ctx context.Context) ([]OpLock, error) GetConfig(ctx context.Context) (*Config, error) GetAllBackups(ctx context.Context) ([]BackupMetadata, error) GetBackupByName(ctx context.Context, name string, options GetBackupByNameOptions) (*BackupMetadata, error) + GetBackupByOpID(ctx context.Context, opid string, options GetBackupByNameOptions) (*BackupMetadata, error) GetAllRestores(ctx context.Context, m connect.Client, options GetAllRestoresOptions) ([]RestoreMetadata, error) GetRestoreByName(ctx context.Context, name string) (*RestoreMetadata, error) + GetRestoreByOpID(ctx context.Context, opid string) (*RestoreMetadata, error) CancelBackup(ctx context.Context) (CommandID, error) From 4eb47851458c57eb54ec78401493b5b221f62f9d Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 24 Jun 2024 21:18:12 +0200 Subject: [PATCH 074/203] use sdk in pbm cli --- cmd/pbm/list.go | 50 ++++++++++++++++++++++++++++++----- cmd/pbm/main.go | 53 +++---------------------------------- cmd/pbm/status.go | 67 ++++++++++++++++++++++------------------------- 3 files changed, 80 insertions(+), 90 deletions(-) diff --git a/cmd/pbm/list.go b/cmd/pbm/list.go index 1d15bb7ed..90a0ab6fc 100644 --- a/cmd/pbm/list.go +++ b/cmd/pbm/list.go @@ -15,7 +15,6 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/ctrl" "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" - "github.com/percona/percona-backup-mongodb/pbm/lock" "github.com/percona/percona-backup-mongodb/pbm/oplog" "github.com/percona/percona-backup-mongodb/pbm/topo" "github.com/percona/percona-backup-mongodb/pbm/util" @@ -103,18 +102,57 @@ func runList(ctx context.Context, conn connect.Client, pbm sdk.Client, l *listOp return nil, errors.Wrap(err, "cannot parse replset mapping") } + // show message and skip when resync is running + lk, err := findLock(ctx, pbm) + if err == nil && lk != nil && lk.Cmd == ctrl.CmdResync { + const msg = "Storage resync is running. Backups list will be available after sync finishes." + return outMsg{msg}, nil + } + if l.restore { return restoreList(ctx, conn, pbm, int64(l.size)) } - // show message and skip when resync is running - lk, err := findLock(ctx, conn, lock.GetLocks) - if err == nil && lk != nil && lk.Type == ctrl.CmdResync { - return outMsg{"Storage resync is running. Backups list will be available after sync finishes."}, nil - } return backupList(ctx, conn, l.size, l.full, l.unbacked, rsMap) } +func findLock(ctx context.Context, pbm sdk.Client) (*sdk.OpLock, error) { + locks, err := pbm.OpLocks(ctx) + if err != nil { + return nil, errors.Wrap(err, "get locks") + } + if len(locks) == 0 { + return nil, nil + } + + var lck *sdk.OpLock + for _, l := range locks { + if err := l.Err(); err != nil { + continue + } + + // Just check if all locks are for the same op + // + // It could happen that the healthy `lk` became stale by the time of this check + // or the op was finished and the new one was started. So the `l.Type != lk.Type` + // would be true but for the legit reason (no error). + // But chances for that are quite low and on the next run of `pbm status` everything + // would be ok. So no reason to complicate code to avoid that. + if lck != nil && l.OpID != lck.OpID { + return nil, errors.Errorf("conflicting ops running: [%s/%s::%s-%s] [%s/%s::%s-%s]. "+ + "This conflict may naturally resolve after 10 seconds", + l.Replset, l.Node, l.Cmd, l.OpID, + lck.Replset, lck.Node, lck.Cmd, lck.OpID, + ) + } + + l := l + lck = &l + } + + return lck, nil +} + func restoreList(ctx context.Context, conn connect.Client, pbm sdk.Client, limit int64) (*restoreListOut, error) { opts := sdk.GetAllRestoresOptions{Limit: limit} rlist, err := pbm.GetAllRestores(ctx, conn, opts) diff --git a/cmd/pbm/main.go b/cmd/pbm/main.go index 312de2daf..aca23a915 100644 --- a/cmd/pbm/main.go +++ b/cmd/pbm/main.go @@ -14,7 +14,6 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/backup" "github.com/percona/percona-backup-mongodb/pbm/compress" "github.com/percona/percona-backup-mongodb/pbm/connect" - "github.com/percona/percona-backup-mongodb/pbm/ctrl" "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/lock" @@ -679,59 +678,15 @@ func parseDateT(v string) (time.Time, error) { return time.Time{}, errInvalidFormat } -type findLockFn = func(ctx context.Context, conn connect.Client, lh *lock.LockHeader) ([]lock.LockData, error) - -func findLock(ctx context.Context, conn connect.Client, fn findLockFn) (*lock.LockData, error) { - locks, err := fn(ctx, conn, &lock.LockHeader{}) - if err != nil { - return nil, errors.Wrap(err, "get locks") - } - - ct, err := topo.GetClusterTime(ctx, conn) - if err != nil { - return nil, errors.Wrap(err, "get cluster time") - } - - var lck *lock.LockData - for _, l := range locks { - // We don't care about the PITR slicing here. It is a subject of other status sections - if l.Type == ctrl.CmdPITR || l.Heartbeat.T+defs.StaleFrameSec < ct.T { - continue - } - - // Just check if all locks are for the same op - // - // It could happen that the healthy `lk` became stale by the time of this check - // or the op was finished and the new one was started. So the `l.Type != lk.Type` - // would be true but for the legit reason (no error). - // But chances for that are quite low and on the next run of `pbm status` everything - // would be ok. So no reason to complicate code to avoid that. - if lck != nil && l.OPID != lck.OPID { - if err != nil { - return nil, errors.Errorf("conflicting ops running: [%s/%s::%s-%s] [%s/%s::%s-%s]. "+ - "This conflict may naturally resolve after 10 seconds", - l.Replset, l.Node, l.Type, l.OPID, - lck.Replset, lck.Node, lck.Type, lck.OPID, - ) - } - } - - l := l - lck = &l - } - - return lck, nil -} - type concurentOpError struct { op *lock.LockHeader } -func (e concurentOpError) Error() string { +func (e *concurentOpError) Error() string { return fmt.Sprintf("another operation in progress, %s/%s [%s/%s]", e.op.Type, e.op.OPID, e.op.Replset, e.op.Node) } -func (e concurentOpError) As(err any) bool { +func (e *concurentOpError) As(err any) bool { if err == nil { return false } @@ -745,7 +700,7 @@ func (e concurentOpError) As(err any) bool { return true } -func (e concurentOpError) MarshalJSON() ([]byte, error) { +func (e *concurentOpError) MarshalJSON() ([]byte, error) { s := make(map[string]interface{}) s["error"] = "another operation in progress" s["operation"] = e.op @@ -768,7 +723,7 @@ func checkConcurrentOp(ctx context.Context, conn connect.Client) error { // and leave it for agents to deal with. for _, l := range locks { if l.Heartbeat.T+defs.StaleFrameSec >= ts.T { - return concurentOpError{&l.LockHeader} + return &concurentOpError{&l.LockHeader} } } diff --git a/cmd/pbm/status.go b/cmd/pbm/status.go index 0e7c0a323..cff54bb87 100644 --- a/cmd/pbm/status.go +++ b/cmd/pbm/status.go @@ -20,7 +20,6 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/lock" "github.com/percona/percona-backup-mongodb/pbm/log" "github.com/percona/percona-backup-mongodb/pbm/oplog" - "github.com/percona/percona-backup-mongodb/pbm/restore" "github.com/percona/percona-backup-mongodb/pbm/slicer" "github.com/percona/percona-backup-mongodb/pbm/storage" "github.com/percona/percona-backup-mongodb/pbm/topo" @@ -106,10 +105,6 @@ func status( return nil, errors.Wrap(err, "cannot parse replset mapping") } - storageStatFn := func(ctx context.Context, conn connect.Client) (fmt.Stringer, error) { - return getStorageStat(ctx, conn, pbm, rsMap) - } - out := statusOut{ data: []*statusSect{ { @@ -119,8 +114,18 @@ func status( }, }, {"pitr", "PITR incremental backup", nil, getPitrStatus}, - {"running", "Currently running", nil, getCurrOps}, - {"backups", "Backups", nil, storageStatFn}, + { + "running", "Currently running", nil, + func(ctx context.Context, _ connect.Client) (fmt.Stringer, error) { + return getCurrOps(ctx, pbm) + }, + }, + { + "backups", "Backups", nil, + func(ctx context.Context, conn connect.Client) (fmt.Stringer, error) { + return getStorageStat(ctx, conn, pbm, rsMap) + }, + }, }, pretty: pretty, } @@ -381,10 +386,10 @@ LOOP: type currOp struct { Type ctrl.Command `json:"type,omitempty"` + OPID string `json:"opID,omitempty"` Name string `json:"name,omitempty"` StartTS int64 `json:"startTS,omitempty"` Status string `json:"status,omitempty"` - OPID string `json:"opID,omitempty"` } func (c currOp) String() string { @@ -403,62 +408,54 @@ func (c currOp) String() string { } } -func getCurrOps(ctx context.Context, conn connect.Client) (fmt.Stringer, error) { - var r currOp - - // check for ops - lk, err := findLock(ctx, conn, lock.GetLocks) +func getCurrOps(ctx context.Context, pbm sdk.Client) (fmt.Stringer, error) { + locks, err := pbm.OpLocks(ctx) if err != nil { - return r, errors.Wrap(err, "get ops") + return nil, errors.Wrap(err, "get locks") } - - if lk == nil { - // check for delete ops - lk, err = findLock(ctx, conn, lock.GetOpLocks) - if err != nil { - return r, errors.Wrap(err, "get delete ops") - } - } - - if lk == nil { - return r, nil + if len(locks) == 0 { + return currOp{}, nil } - r = currOp{ - Type: lk.Type, - OPID: lk.OPID, + r := currOp{ + Type: locks[0].Cmd, + OPID: string(locks[0].OpID), } - // reaching here means no conflict operation, hence all locks are the same, - // hence any lock in `lk` contains info on the current op - switch r.Type { + switch locks[0].Cmd { case ctrl.CmdBackup: - bcp, err := backup.GetBackupByOPID(ctx, conn, r.OPID) + bcp, err := pbm.GetBackupByOpID(ctx, r.OPID, sdk.GetBackupByNameOptions{}) if err != nil { return r, errors.Wrap(err, "get backup info") } + r.Name = bcp.Name r.StartTS = bcp.StartTS - r.Status = string(bcp.Status) + switch bcp.Status { case defs.StatusRunning: r.Status = "snapshot backup" case defs.StatusDumpDone: r.Status = "oplog backup" + default: + r.Status = string(bcp.Status) } case ctrl.CmdRestore: - rst, err := restore.GetRestoreMetaByOPID(ctx, conn, r.OPID) + rst, err := pbm.GetRestoreByOpID(ctx, r.OPID) if err != nil { return r, errors.Wrap(err, "get restore info") } + r.Name = rst.Backup r.StartTS = rst.StartTS - r.Status = string(rst.Status) + switch rst.Status { case defs.StatusRunning: r.Status = "snapshot restore" case defs.StatusDumpDone: r.Status = "oplog restore" + default: + r.Status = string(rst.Status) } } From e3088d6447eb4f0536ef0451bd4600026188db96 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 26 Jun 2024 13:57:02 +0200 Subject: [PATCH 075/203] remove sdk.Client interface --- cmd/pbm/backup.go | 4 +-- cmd/pbm/config.go | 2 +- cmd/pbm/delete.go | 14 ++++---- cmd/pbm/list.go | 6 ++-- cmd/pbm/main.go | 4 +-- cmd/pbm/status.go | 8 ++--- pbm/backup/delete.go | 77 ++++++++++++++++++++++++++------------------ pbm/backup/query.go | 8 ++--- pbm/log/history.go | 4 +-- sdk/cli/status.go | 2 +- sdk/impl.go | 67 ++++++++++++++++++++------------------ sdk/sdk.go | 69 ++++++++++++--------------------------- sdk/util.go | 12 +++---- 13 files changed, 133 insertions(+), 144 deletions(-) diff --git a/cmd/pbm/backup.go b/cmd/pbm/backup.go index 73148ee7b..581b2a42a 100644 --- a/cmd/pbm/backup.go +++ b/cmd/pbm/backup.go @@ -81,7 +81,7 @@ type descBcp struct { func runBackup( ctx context.Context, conn connect.Client, - pbm sdk.Client, + pbm *sdk.Client, b *backupOpts, outf outFormat, ) (fmt.Stringer, error) { @@ -348,7 +348,7 @@ func byteCountIEC(b int64) string { return fmt.Sprintf("%.1f %ciB", float64(b)/float64(div), "KMGTPE"[exp]) } -func describeBackup(ctx context.Context, conn connect.Client, pbm sdk.Client, b *descBcp) (fmt.Stringer, error) { +func describeBackup(ctx context.Context, conn connect.Client, pbm *sdk.Client, b *descBcp) (fmt.Stringer, error) { opts := sdk.GetBackupByNameOptions{} bcp, err := pbm.GetBackupByName(ctx, b.name, opts) if err != nil { diff --git a/cmd/pbm/config.go b/cmd/pbm/config.go index 30e38c92a..c323c7057 100644 --- a/cmd/pbm/config.go +++ b/cmd/pbm/config.go @@ -49,7 +49,7 @@ func (c confVals) String() string { return s } -func runConfig(ctx context.Context, conn connect.Client, pbm sdk.Client, c *configOpts) (fmt.Stringer, error) { +func runConfig(ctx context.Context, conn connect.Client, pbm *sdk.Client, c *configOpts) (fmt.Stringer, error) { switch { case len(c.set) > 0: var o confVals diff --git a/cmd/pbm/delete.go b/cmd/pbm/delete.go index 652b8ef94..da6920bd0 100644 --- a/cmd/pbm/delete.go +++ b/cmd/pbm/delete.go @@ -31,7 +31,7 @@ type deleteBcpOpts struct { func deleteBackup( ctx context.Context, conn connect.Client, - pbm sdk.Client, + pbm *sdk.Client, d *deleteBcpOpts, ) (fmt.Stringer, error) { if d.name == "" && d.olderThan == "" { @@ -65,7 +65,7 @@ func deleteBackup( return waitForDelete(ctx, conn, pbm, cid) } -func deleteBackupByName(ctx context.Context, pbm sdk.Client, d *deleteBcpOpts) (sdk.CommandID, error) { +func deleteBackupByName(ctx context.Context, pbm *sdk.Client, d *deleteBcpOpts) (sdk.CommandID, error) { opts := sdk.GetBackupByNameOptions{FetchIncrements: true} bcp, err := pbm.GetBackupByName(ctx, d.name, opts) if err != nil { @@ -112,7 +112,7 @@ func deleteBackupByName(ctx context.Context, pbm sdk.Client, d *deleteBcpOpts) ( return cid, errors.Wrap(err, "schedule delete") } -func deleteManyBackup(ctx context.Context, pbm sdk.Client, d *deleteBcpOpts) (sdk.CommandID, error) { +func deleteManyBackup(ctx context.Context, pbm *sdk.Client, d *deleteBcpOpts) (sdk.CommandID, error) { var ts primitive.Timestamp ts, err := parseOlderThan(d.olderThan) if err != nil { @@ -159,7 +159,7 @@ type deletePitrOpts struct { func deletePITR( ctx context.Context, conn connect.Client, - pbm sdk.Client, + pbm *sdk.Client, d *deletePitrOpts, ) (fmt.Stringer, error) { if d.olderThan == "" && !d.all { @@ -232,7 +232,7 @@ type cleanupOptions struct { dryRun bool } -func doCleanup(ctx context.Context, conn connect.Client, pbm sdk.Client, d *cleanupOptions) (fmt.Stringer, error) { +func doCleanup(ctx context.Context, conn connect.Client, pbm *sdk.Client, d *cleanupOptions) (fmt.Stringer, error) { ts, err := parseOlderThan(d.olderThan) if err != nil { return nil, errors.Wrap(err, "parse --older-than") @@ -400,7 +400,7 @@ func askConfirmation(question string) error { return errUserCanceled } -func waitForDelete(ctx context.Context, conn connect.Client, pbm sdk.Client, cid sdk.CommandID) (fmt.Stringer, error) { +func waitForDelete(ctx context.Context, conn connect.Client, pbm *sdk.Client, cid sdk.CommandID) (fmt.Stringer, error) { progressCtx, stopProgress := context.WithCancel(ctx) defer stopProgress() @@ -422,7 +422,7 @@ func waitForDelete(ctx context.Context, conn connect.Client, pbm sdk.Client, cid return nil, errors.Wrap(err, "get command info") } - var waitFn func(ctx context.Context, client sdk.Client) error + var waitFn func(ctx context.Context, client *sdk.Client) error switch cmd.Cmd { case ctrl.CmdCleanup: waitFn = sdk.WaitForCleanup diff --git a/cmd/pbm/list.go b/cmd/pbm/list.go index 90a0ab6fc..037866403 100644 --- a/cmd/pbm/list.go +++ b/cmd/pbm/list.go @@ -96,7 +96,7 @@ func (r restoreListOut) MarshalJSON() ([]byte, error) { return json.Marshal(r.list) } -func runList(ctx context.Context, conn connect.Client, pbm sdk.Client, l *listOpts) (fmt.Stringer, error) { +func runList(ctx context.Context, conn connect.Client, pbm *sdk.Client, l *listOpts) (fmt.Stringer, error) { rsMap, err := parseRSNamesMapping(l.rsMap) if err != nil { return nil, errors.Wrap(err, "cannot parse replset mapping") @@ -116,7 +116,7 @@ func runList(ctx context.Context, conn connect.Client, pbm sdk.Client, l *listOp return backupList(ctx, conn, l.size, l.full, l.unbacked, rsMap) } -func findLock(ctx context.Context, pbm sdk.Client) (*sdk.OpLock, error) { +func findLock(ctx context.Context, pbm *sdk.Client) (*sdk.OpLock, error) { locks, err := pbm.OpLocks(ctx) if err != nil { return nil, errors.Wrap(err, "get locks") @@ -153,7 +153,7 @@ func findLock(ctx context.Context, pbm sdk.Client) (*sdk.OpLock, error) { return lck, nil } -func restoreList(ctx context.Context, conn connect.Client, pbm sdk.Client, limit int64) (*restoreListOut, error) { +func restoreList(ctx context.Context, conn connect.Client, pbm *sdk.Client, limit int64) (*restoreListOut, error) { opts := sdk.GetAllRestoresOptions{Limit: limit} rlist, err := pbm.GetAllRestores(ctx, conn, opts) if err != nil { diff --git a/cmd/pbm/main.go b/cmd/pbm/main.go index aca23a915..ef12e1ede 100644 --- a/cmd/pbm/main.go +++ b/cmd/pbm/main.go @@ -379,7 +379,7 @@ func main() { defer cancel() var conn connect.Client - var pbm sdk.Client + var pbm *sdk.Client // we don't need pbm connection if it is `pbm describe-restore -c ...` // or `pbm restore-finish ` if describeRestoreOpts.cfg == "" && finishRestore.cfg == "" { @@ -658,7 +658,7 @@ func (c outCaption) MarshalJSON() ([]byte, error) { return b.Bytes(), nil } -func cancelBcp(ctx context.Context, pbm sdk.Client) (fmt.Stringer, error) { +func cancelBcp(ctx context.Context, pbm *sdk.Client) (fmt.Stringer, error) { if _, err := pbm.CancelBackup(ctx); err != nil { return nil, errors.Wrap(err, "send backup canceling") } diff --git a/cmd/pbm/status.go b/cmd/pbm/status.go index cff54bb87..b279bc390 100644 --- a/cmd/pbm/status.go +++ b/cmd/pbm/status.go @@ -95,7 +95,7 @@ func (o statusOut) set(ctx context.Context, conn connect.Client, sfilter map[str func status( ctx context.Context, conn connect.Client, - pbm sdk.Client, + pbm *sdk.Client, curi string, opts statusOptions, pretty bool, @@ -229,7 +229,7 @@ func (c cluster) String() string { func clusterStatus( ctx context.Context, - pbm sdk.Client, + pbm *sdk.Client, confGetter cli.RSConfGetter, ) (fmt.Stringer, error) { status, err := cli.ClusterStatus(ctx, pbm, confGetter) @@ -408,7 +408,7 @@ func (c currOp) String() string { } } -func getCurrOps(ctx context.Context, pbm sdk.Client) (fmt.Stringer, error) { +func getCurrOps(ctx context.Context, pbm *sdk.Client) (fmt.Stringer, error) { locks, err := pbm.OpLocks(ctx) if err != nil { return nil, errors.Wrap(err, "get locks") @@ -544,7 +544,7 @@ func (s storageStat) String() string { func getStorageStat( ctx context.Context, conn connect.Client, - pbm sdk.Client, + pbm *sdk.Client, rsMap map[string]string, ) (fmt.Stringer, error) { var s storageStat diff --git a/pbm/backup/delete.go b/pbm/backup/delete.go index 22adb151c..403ffff47 100644 --- a/pbm/backup/delete.go +++ b/pbm/backup/delete.go @@ -55,26 +55,26 @@ type CleanupInfo struct { // DeleteBackup deletes backup with the given name from the current storage // and pbm database -func DeleteBackup(ctx context.Context, cc connect.Client, name string) error { - bcp, err := NewDBManager(cc).GetBackupByName(ctx, name) +func DeleteBackup(ctx context.Context, conn connect.Client, name string) error { + bcp, err := NewDBManager(conn).GetBackupByName(ctx, name) if err != nil { return errors.Wrap(err, "get backup meta") } if bcp.Type == defs.IncrementalBackup { - return deleteIncremetalChainImpl(ctx, cc, bcp) + return deleteIncremetalChainImpl(ctx, conn, bcp) } - return deleteBackupImpl(ctx, cc, bcp) + return deleteBackupImpl(ctx, conn, bcp) } -func deleteBackupImpl(ctx context.Context, cc connect.Client, bcp *BackupMeta) error { - err := CanDeleteBackup(ctx, cc, bcp) +func deleteBackupImpl(ctx context.Context, conn connect.Client, bcp *BackupMeta) error { + err := CanDeleteBackup(ctx, conn, bcp) if err != nil { return err } - stg, err := util.GetStorage(ctx, cc, log.LogEventFromContext(ctx)) + stg, err := util.GetStorage(ctx, conn, log.LogEventFromContext(ctx)) if err != nil { return errors.Wrap(err, "get storage") } @@ -84,7 +84,7 @@ func deleteBackupImpl(ctx context.Context, cc connect.Client, bcp *BackupMeta) e return errors.Wrap(err, "delete files from storage") } - _, err = cc.BcpCollection().DeleteOne(ctx, bson.M{"name": bcp.Name}) + _, err = conn.BcpCollection().DeleteOne(ctx, bson.M{"name": bcp.Name}) if err != nil { return errors.Wrap(err, "delete metadata from db") } @@ -92,13 +92,13 @@ func deleteBackupImpl(ctx context.Context, cc connect.Client, bcp *BackupMeta) e return nil } -func deleteIncremetalChainImpl(ctx context.Context, cc connect.Client, bcp *BackupMeta) error { - increments, err := FetchAllIncrements(ctx, cc, bcp) +func deleteIncremetalChainImpl(ctx context.Context, conn connect.Client, bcp *BackupMeta) error { + increments, err := FetchAllIncrements(ctx, conn, bcp) if err != nil { return err } - err = CanDeleteIncrementalChain(ctx, cc, bcp, increments) + err = CanDeleteIncrementalChain(ctx, conn, bcp, increments) if err != nil { return err } @@ -108,7 +108,7 @@ func deleteIncremetalChainImpl(ctx context.Context, cc connect.Client, bcp *Back all = append(all, bcps...) } - stg, err := util.GetStorage(ctx, cc, log.LogEventFromContext(ctx)) + stg, err := util.GetStorage(ctx, conn, log.LogEventFromContext(ctx)) if err != nil { return errors.Wrap(err, "get storage") } @@ -121,7 +121,7 @@ func deleteIncremetalChainImpl(ctx context.Context, cc connect.Client, bcp *Back return errors.Wrap(err, "delete files from storage") } - _, err = cc.BcpCollection().DeleteOne(ctx, bson.M{"name": bcp.Name}) + _, err = conn.BcpCollection().DeleteOne(ctx, bson.M{"name": bcp.Name}) if err != nil { return errors.Wrap(err, "delete metadata from db") } @@ -131,7 +131,7 @@ func deleteIncremetalChainImpl(ctx context.Context, cc connect.Client, bcp *Back return nil } -func CanDeleteBackup(ctx context.Context, cc connect.Client, bcp *BackupMeta) error { +func CanDeleteBackup(ctx context.Context, conn connect.Client, bcp *BackupMeta) error { if bcp.Status.IsRunning() { return ErrBackupInProgress } @@ -142,7 +142,7 @@ func CanDeleteBackup(ctx context.Context, cc connect.Client, bcp *BackupMeta) er return ErrIncrementalBackup } - required, err := isRequiredForOplogSlicing(ctx, cc, bcp.LastWriteTS, primitive.Timestamp{}) + required, err := isRequiredForOplogSlicing(ctx, conn, bcp.LastWriteTS, primitive.Timestamp{}) if err != nil { return errors.Wrap(err, "check pitr requirements") } @@ -155,7 +155,7 @@ func CanDeleteBackup(ctx context.Context, cc connect.Client, bcp *BackupMeta) er func CanDeleteIncrementalChain( ctx context.Context, - cc connect.Client, + conn connect.Client, base *BackupMeta, increments [][]*BackupMeta, ) error { @@ -178,7 +178,7 @@ func CanDeleteIncrementalChain( } } - required, err := isRequiredForOplogSlicing(ctx, cc, lastWrite, base.LastWriteTS) + required, err := isRequiredForOplogSlicing(ctx, conn, lastWrite, base.LastWriteTS) if err != nil { return errors.Wrap(err, "check pitr requirements") } @@ -189,7 +189,11 @@ func CanDeleteIncrementalChain( return nil } -func FetchAllIncrements(ctx context.Context, cc connect.Client, base *BackupMeta) ([][]*BackupMeta, error) { +func FetchAllIncrements( + ctx context.Context, + conn connect.Client, + base *BackupMeta, +) ([][]*BackupMeta, error) { if base.SrcBackup != "" { return nil, ErrNotBaseIncrement } @@ -198,7 +202,7 @@ func FetchAllIncrements(ctx context.Context, cc connect.Client, base *BackupMeta lastInc := base for { - cur, err := cc.BcpCollection().Find(ctx, bson.D{{"src_backup", lastInc.Name}}) + cur, err := conn.BcpCollection().Find(ctx, bson.D{{"src_backup", lastInc.Name}}) if err != nil { return nil, errors.Wrap(err, "query") } @@ -230,13 +234,17 @@ func FetchAllIncrements(ctx context.Context, cc connect.Client, base *BackupMeta return chain, nil } -func isSourceForIncremental(ctx context.Context, cc connect.Client, bcpName string) (bool, error) { +func isSourceForIncremental( + ctx context.Context, + conn connect.Client, + bcpName string, +) (bool, error) { // check if there is an increment based on the backup f := bson.D{ {"src_backup", bcpName}, {"status", bson.M{"$nin": bson.A{defs.StatusCancelled, defs.StatusError}}}, } - res := cc.BcpCollection().FindOne(ctx, f) + res := conn.BcpCollection().FindOne(ctx, f) if err := res.Err(); err != nil { if errors.Is(err, mongo.ErrNoDocuments) { // the backup is the last increment in the chain @@ -264,11 +272,11 @@ func isValidBaseSnapshot(bcp *BackupMeta) bool { func isRequiredForOplogSlicing( ctx context.Context, - cc connect.Client, + conn connect.Client, lw primitive.Timestamp, baseLW primitive.Timestamp, ) (bool, error) { - enabled, oplogOnly, err := config.IsPITREnabled(ctx, cc) + enabled, oplogOnly, err := config.IsPITREnabled(ctx, conn) if err != nil { return false, err } @@ -276,7 +284,7 @@ func isRequiredForOplogSlicing( return false, nil } - nextRestoreTime, err := FindBaseSnapshotLWAfter(ctx, cc, lw) + nextRestoreTime, err := FindBaseSnapshotLWAfter(ctx, conn, lw) if err != nil { return false, errors.Wrap(err, "find next snapshot") } @@ -285,12 +293,12 @@ func isRequiredForOplogSlicing( return false, nil } - prevRestoreTime, err := FindBaseSnapshotLWBefore(ctx, cc, lw, baseLW) + prevRestoreTime, err := FindBaseSnapshotLWBefore(ctx, conn, lw, baseLW) if err != nil { return false, errors.Wrap(err, "find previous snapshot") } - timelines, err := oplog.PITRTimelinesBetween(ctx, cc, prevRestoreTime, lw) + timelines, err := oplog.PITRTimelinesBetween(ctx, conn, prevRestoreTime, lw) if err != nil { return false, errors.Wrap(err, "get oplog range from previous backup") } @@ -303,8 +311,13 @@ func isRequiredForOplogSlicing( } // DeleteBackupBefore deletes backups which are older than given time -func DeleteBackupBefore(ctx context.Context, cc connect.Client, t time.Time, bcpType defs.BackupType) error { - backups, err := ListDeleteBackupBefore(ctx, cc, primitive.Timestamp{T: uint32(t.Unix())}, bcpType) +func DeleteBackupBefore( + ctx context.Context, + conn connect.Client, + t time.Time, + bcpType defs.BackupType, +) error { + backups, err := ListDeleteBackupBefore(ctx, conn, primitive.Timestamp{T: uint32(t.Unix())}, bcpType) if err != nil { return err } @@ -312,7 +325,7 @@ func DeleteBackupBefore(ctx context.Context, cc connect.Client, t time.Time, bcp return nil } - stg, err := util.GetStorage(ctx, cc, log.LogEventFromContext(ctx)) + stg, err := util.GetStorage(ctx, conn, log.LogEventFromContext(ctx)) if err != nil { return errors.Wrap(err, "get storage") } @@ -325,7 +338,7 @@ func DeleteBackupBefore(ctx context.Context, cc connect.Client, t time.Time, bcp return errors.Wrapf(err, "delete files from storage for %q", bcp.Name) } - _, err = cc.BcpCollection().DeleteOne(ctx, bson.M{"name": bcp.Name}) + _, err = conn.BcpCollection().DeleteOne(ctx, bson.M{"name": bcp.Name}) if err != nil { return errors.Wrapf(err, "delete metadata from db for %q", bcp.Name) } @@ -336,11 +349,11 @@ func DeleteBackupBefore(ctx context.Context, cc connect.Client, t time.Time, bcp func ListDeleteBackupBefore( ctx context.Context, - cc connect.Client, + conn connect.Client, ts primitive.Timestamp, bcpType defs.BackupType, ) ([]BackupMeta, error) { - info, err := MakeCleanupInfo(ctx, cc, ts) + info, err := MakeCleanupInfo(ctx, conn, ts) if err != nil { return nil, err } diff --git a/pbm/backup/query.go b/pbm/backup/query.go index 99ecdfe3c..dff58f83a 100644 --- a/pbm/backup/query.go +++ b/pbm/backup/query.go @@ -273,19 +273,19 @@ func getRecentBackup( func FindBaseSnapshotLWAfter( ctx context.Context, - cc connect.Client, + conn connect.Client, lw primitive.Timestamp, ) (primitive.Timestamp, error) { - return findBaseSnapshotLWImpl(ctx, cc, bson.M{"$gt": lw}, 1) + return findBaseSnapshotLWImpl(ctx, conn, bson.M{"$gt": lw}, 1) } func FindBaseSnapshotLWBefore( ctx context.Context, - cc connect.Client, + conn connect.Client, lw primitive.Timestamp, exclude primitive.Timestamp, ) (primitive.Timestamp, error) { - return findBaseSnapshotLWImpl(ctx, cc, bson.M{"$lt": lw, "$ne": exclude}, -1) + return findBaseSnapshotLWImpl(ctx, conn, bson.M{"$lt": lw, "$ne": exclude}, -1) } func findBaseSnapshotLWImpl( diff --git a/pbm/log/history.go b/pbm/log/history.go index d0205745a..55ca41d52 100644 --- a/pbm/log/history.go +++ b/pbm/log/history.go @@ -183,7 +183,7 @@ func fetch( func Follow( ctx context.Context, - cc connect.Client, + conn connect.Client, r *LogRequest, exactSeverity bool, ) (<-chan *Entry, <-chan error) { @@ -196,7 +196,7 @@ func Follow( opt := options.Find().SetCursorType(options.TailableAwait) - cur, err := cc.LogCollection().Find(ctx, filter, opt) + cur, err := conn.LogCollection().Find(ctx, filter, opt) if err != nil { errC <- errors.Wrap(err, "query") return diff --git a/sdk/cli/status.go b/sdk/cli/status.go index f22352a4f..4f116600a 100644 --- a/sdk/cli/status.go +++ b/sdk/cli/status.go @@ -59,7 +59,7 @@ func (n Node) IsLost() bool { func ClusterStatus( ctx context.Context, - pbm sdk.Client, + pbm *sdk.Client, confGetter RSConfGetter, ) (map[string][]Node, error) { clusterMembers, err := sdk.ClusterMembers(ctx, pbm) diff --git a/sdk/impl.go b/sdk/impl.go index dc8640787..fc898863f 100644 --- a/sdk/impl.go +++ b/sdk/impl.go @@ -35,15 +35,15 @@ var ( ErrBaseForPITR = backup.ErrBaseForPITR ) -type clientImpl struct { +type Client struct { conn connect.Client } -func (c *clientImpl) Close(ctx context.Context) error { +func (c *Client) Close(ctx context.Context) error { return c.conn.Disconnect(ctx) } -func (c *clientImpl) CommandInfo(ctx context.Context, id CommandID) (*Command, error) { +func (c *Client) CommandInfo(ctx context.Context, id CommandID) (*Command, error) { opid, err := ctrl.ParseOPID(string(id)) if err != nil { return nil, ErrInvalidCommandID @@ -66,19 +66,19 @@ func (c *clientImpl) CommandInfo(ctx context.Context, id CommandID) (*Command, e return cmd, nil } -func (c *clientImpl) GetConfig(ctx context.Context) (*Config, error) { +func (c *Client) GetConfig(ctx context.Context) (*Config, error) { return config.GetConfig(ctx, c.conn) } -func (c *clientImpl) SetConfig(ctx context.Context, cfg Config) (CommandID, error) { +func (c *Client) SetConfig(ctx context.Context, cfg Config) (CommandID, error) { return NoOpID, config.SetConfig(ctx, c.conn, &cfg) } -func (c *clientImpl) GetAllBackups(ctx context.Context) ([]BackupMetadata, error) { +func (c *Client) GetAllBackups(ctx context.Context) ([]BackupMetadata, error) { return backup.BackupsList(ctx, c.conn, 0) } -func (c *clientImpl) GetAllRestores( +func (c *Client) GetAllRestores( ctx context.Context, m connect.Client, options GetAllRestoresOptions, @@ -90,7 +90,7 @@ func (c *clientImpl) GetAllRestores( return restore.RestoreList(ctx, c.conn, limit) } -func (c *clientImpl) GetBackupByName( +func (c *Client) GetBackupByName( ctx context.Context, name string, options GetBackupByNameOptions, @@ -103,7 +103,7 @@ func (c *clientImpl) GetBackupByName( return c.getBackupHelper(ctx, bcp, options) } -func (c *clientImpl) GetBackupByOpID( +func (c *Client) GetBackupByOpID( ctx context.Context, opid string, options GetBackupByNameOptions, @@ -116,7 +116,7 @@ func (c *clientImpl) GetBackupByOpID( return c.getBackupHelper(ctx, bcp, options) } -func (c *clientImpl) getBackupHelper( +func (c *Client) getBackupHelper( ctx context.Context, bcp *BackupMetadata, options GetBackupByNameOptions, @@ -149,7 +149,7 @@ func (c *clientImpl) getBackupHelper( return bcp, nil } -func fillFilelistForBackup(ctx context.Context, cc connect.Client, bcp *BackupMetadata) error { +func fillFilelistForBackup(ctx context.Context, conn connect.Client, bcp *BackupMetadata) error { var err error var stg storage.Storage @@ -157,7 +157,7 @@ func fillFilelistForBackup(ctx context.Context, cc connect.Client, bcp *BackupMe eg.SetLimit(runtime.NumCPU()) if version.HasFilelistFile(bcp.PBMVersion) { - stg, err = util.GetStorage(ctx, cc, nil) + stg, err = util.GetStorage(ctx, conn, nil) if err != nil { return errors.Wrap(err, "get storage") } @@ -190,7 +190,7 @@ func fillFilelistForBackup(ctx context.Context, cc connect.Client, bcp *BackupMe if stg == nil { // in case if it is the first backup made with filelist file - stg, err = getStorageForRead(ctx, cc) + stg, err = getStorageForRead(ctx, conn) if err != nil { return errors.Wrap(err, "get storage") } @@ -215,8 +215,8 @@ func fillFilelistForBackup(ctx context.Context, cc connect.Client, bcp *BackupMe return eg.Wait() } -func getStorageForRead(ctx context.Context, cc connect.Client) (storage.Storage, error) { - stg, err := util.GetStorage(ctx, cc, nil) +func getStorageForRead(ctx context.Context, conn connect.Client) (storage.Storage, error) { + stg, err := util.GetStorage(ctx, conn, nil) if err != nil { return nil, errors.Wrap(err, "get storage") } @@ -247,20 +247,20 @@ func getFilelistForReplset(stg storage.Storage, bcpName, rsName string) (backup. return filelist, nil } -func (c *clientImpl) GetRestoreByName(ctx context.Context, name string) (*RestoreMetadata, error) { +func (c *Client) GetRestoreByName(ctx context.Context, name string) (*RestoreMetadata, error) { return restore.GetRestoreMeta(ctx, c.conn, name) } -func (c *clientImpl) GetRestoreByOpID(ctx context.Context, opid string) (*RestoreMetadata, error) { +func (c *Client) GetRestoreByOpID(ctx context.Context, opid string) (*RestoreMetadata, error) { return restore.GetRestoreMetaByOPID(ctx, c.conn, opid) } -func (c *clientImpl) SyncFromStorage(ctx context.Context) (CommandID, error) { +func (c *Client) SyncFromStorage(ctx context.Context) (CommandID, error) { opid, err := ctrl.SendResync(ctx, c.conn) return CommandID(opid.String()), err } -func (c *clientImpl) DeleteBackupByName(ctx context.Context, name string) (CommandID, error) { +func (c *Client) DeleteBackupByName(ctx context.Context, name string) (CommandID, error) { opts := GetBackupByNameOptions{FetchIncrements: true} bcp, err := c.GetBackupByName(ctx, name, opts) if err != nil { @@ -279,7 +279,7 @@ func (c *clientImpl) DeleteBackupByName(ctx context.Context, name string) (Comma return CommandID(opid.String()), err } -func (c *clientImpl) DeleteBackupBefore( +func (c *Client) DeleteBackupBefore( ctx context.Context, beforeTS Timestamp, options DeleteBackupBeforeOptions, @@ -288,38 +288,38 @@ func (c *clientImpl) DeleteBackupBefore( return CommandID(opid.String()), err } -func (c *clientImpl) DeleteOplogRange(ctx context.Context, until Timestamp) (CommandID, error) { +func (c *Client) DeleteOplogRange(ctx context.Context, until Timestamp) (CommandID, error) { opid, err := ctrl.SendDeleteOplogRangeBefore(ctx, c.conn, until) return CommandID(opid.String()), err } -func (c *clientImpl) CleanupReport(ctx context.Context, beforeTS Timestamp) (CleanupReport, error) { +func (c *Client) CleanupReport(ctx context.Context, beforeTS Timestamp) (CleanupReport, error) { return backup.MakeCleanupInfo(ctx, c.conn, beforeTS) } -func (c *clientImpl) RunCleanup(ctx context.Context, beforeTS Timestamp) (CommandID, error) { +func (c *Client) RunCleanup(ctx context.Context, beforeTS Timestamp) (CommandID, error) { opid, err := ctrl.SendCleanup(ctx, c.conn, beforeTS) return CommandID(opid.String()), err } -func (c *clientImpl) CancelBackup(ctx context.Context) (CommandID, error) { +func (c *Client) CancelBackup(ctx context.Context) (CommandID, error) { opid, err := ctrl.SendCancelBackup(ctx, c.conn) return CommandID(opid.String()), err } -func (c *clientImpl) RunLogicalBackup(ctx context.Context, options LogicalBackupOptions) (CommandID, error) { +func (c *Client) RunLogicalBackup(ctx context.Context, options LogicalBackupOptions) (CommandID, error) { return NoOpID, ErrNotImplemented } -func (c *clientImpl) RunPhysicalBackup(ctx context.Context, options PhysicalBackupOptions) (CommandID, error) { +func (c *Client) RunPhysicalBackup(ctx context.Context, options PhysicalBackupOptions) (CommandID, error) { return NoOpID, ErrNotImplemented } -func (c *clientImpl) RunIncrementalBackup(ctx context.Context, options IncrementalBackupOptions) (CommandID, error) { +func (c *Client) RunIncrementalBackup(ctx context.Context, options IncrementalBackupOptions) (CommandID, error) { return NoOpID, ErrNotImplemented } -func (c *clientImpl) Restore(ctx context.Context, backupName string, clusterTS Timestamp) (CommandID, error) { +func (c *Client) Restore(ctx context.Context, backupName string, clusterTS Timestamp) (CommandID, error) { return NoOpID, ErrNotImplemented } @@ -341,7 +341,7 @@ func (l lockImpl) Heartbeat() Timestamp { var ErrStaleHearbeat = errors.New("stale heartbeat") -func (c *clientImpl) OpLocks(ctx context.Context) ([]OpLock, error) { +func (c *Client) OpLocks(ctx context.Context) ([]OpLock, error) { locks, err := lock.GetLocks(ctx, c.conn, &lock.LockHeader{}) if err != nil { return nil, errors.Wrap(err, "get locks") @@ -403,7 +403,12 @@ func waitOp(ctx context.Context, conn connect.Client, lck *lock.LockHeader) erro } } -func lastLogErr(ctx context.Context, cc connect.Client, op ctrl.Command, after int64) (string, error) { +func lastLogErr( + ctx context.Context, + conn connect.Client, + op ctrl.Command, + after int64, +) (string, error) { r := &log.LogRequest{ LogKeys: log.LogKeys{ Severity: log.Error, @@ -412,7 +417,7 @@ func lastLogErr(ctx context.Context, cc connect.Client, op ctrl.Command, after i TimeMin: time.Unix(after, 0), } - outC, errC := log.Follow(ctx, cc, r, false) + outC, errC := log.Follow(ctx, conn, r, false) for { select { diff --git a/sdk/sdk.go b/sdk/sdk.go index e9ebec228..f1643a1a2 100644 --- a/sdk/sdk.go +++ b/sdk/sdk.go @@ -118,64 +118,35 @@ func (l *OpLock) Err() error { return l.err } -type Client interface { - Close(ctx context.Context) error - - CommandInfo(ctx context.Context, id CommandID) (*Command, error) - OpLocks(ctx context.Context) ([]OpLock, error) - - GetConfig(ctx context.Context) (*Config, error) - - GetAllBackups(ctx context.Context) ([]BackupMetadata, error) - GetBackupByName(ctx context.Context, name string, options GetBackupByNameOptions) (*BackupMetadata, error) - GetBackupByOpID(ctx context.Context, opid string, options GetBackupByNameOptions) (*BackupMetadata, error) - - GetAllRestores(ctx context.Context, m connect.Client, options GetAllRestoresOptions) ([]RestoreMetadata, error) - GetRestoreByName(ctx context.Context, name string) (*RestoreMetadata, error) - GetRestoreByOpID(ctx context.Context, opid string) (*RestoreMetadata, error) - - CancelBackup(ctx context.Context) (CommandID, error) - - DeleteBackupByName(ctx context.Context, name string) (CommandID, error) - DeleteBackupBefore(ctx context.Context, beforeTS Timestamp, options DeleteBackupBeforeOptions) (CommandID, error) - - DeleteOplogRange(ctx context.Context, until Timestamp) (CommandID, error) - - CleanupReport(ctx context.Context, beforeTS Timestamp) (CleanupReport, error) - RunCleanup(ctx context.Context, beforeTS Timestamp) (CommandID, error) - - SyncFromStorage(ctx context.Context) (CommandID, error) -} - -func NewClient(ctx context.Context, uri string) (Client, error) { +func NewClient(ctx context.Context, uri string) (*Client, error) { conn, err := connect.Connect(ctx, uri, "sdk") if err != nil { return nil, err } - return &clientImpl{conn: conn}, nil + return &Client{conn: conn}, nil } -func WaitForCleanup(ctx context.Context, client Client) error { +func WaitForCleanup(ctx context.Context, client *Client) error { lck := &lock.LockHeader{Type: ctrl.CmdCleanup} - return waitOp(ctx, client.(*clientImpl).conn, lck) + return waitOp(ctx, client.conn, lck) } -func WaitForDeleteBackup(ctx context.Context, client Client) error { +func WaitForDeleteBackup(ctx context.Context, client *Client) error { lck := &lock.LockHeader{Type: ctrl.CmdDeleteBackup} - return waitOp(ctx, client.(*clientImpl).conn, lck) + return waitOp(ctx, client.conn, lck) } -func WaitForDeleteOplogRange(ctx context.Context, client Client) error { +func WaitForDeleteOplogRange(ctx context.Context, client *Client) error { lck := &lock.LockHeader{Type: ctrl.CmdDeletePITR} - return waitOp(ctx, client.(*clientImpl).conn, lck) + return waitOp(ctx, client.conn, lck) } -func WaitForErrorLog(ctx context.Context, client Client, cmd *Command) (string, error) { - return lastLogErr(ctx, client.(*clientImpl).conn, cmd.Cmd, cmd.TS) +func WaitForErrorLog(ctx context.Context, client *Client, cmd *Command) (string, error) { + return lastLogErr(ctx, client.conn, cmd.Cmd, cmd.TS) } -func WaitForResync(ctx context.Context, c Client, cid CommandID) error { +func WaitForResync(ctx context.Context, c *Client, cid CommandID) error { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -187,7 +158,7 @@ func WaitForResync(ctx context.Context, c Client, cid CommandID) error { }, } - outC, errC := log.Follow(ctx, c.(*clientImpl).conn, r, false) + outC, errC := log.Follow(ctx, c.conn, r, false) for { select { @@ -201,34 +172,34 @@ func WaitForResync(ctx context.Context, c Client, cid CommandID) error { } } -func CanDeleteBackup(ctx context.Context, sc Client, bcp *BackupMetadata) error { - return backup.CanDeleteBackup(ctx, sc.(*clientImpl).conn, bcp) +func CanDeleteBackup(ctx context.Context, client *Client, bcp *BackupMetadata) error { + return backup.CanDeleteBackup(ctx, client.conn, bcp) } func CanDeleteIncrementalBackup( ctx context.Context, - sc Client, + client *Client, bcp *BackupMetadata, increments [][]*BackupMetadata, ) error { - return backup.CanDeleteIncrementalChain(ctx, sc.(*clientImpl).conn, bcp, increments) + return backup.CanDeleteIncrementalChain(ctx, client.conn, bcp, increments) } func ListDeleteBackupBefore( ctx context.Context, - sc Client, + client *Client, ts primitive.Timestamp, bcpType BackupType, ) ([]BackupMetadata, error) { - return backup.ListDeleteBackupBefore(ctx, sc.(*clientImpl).conn, ts, bcpType) + return backup.ListDeleteBackupBefore(ctx, client.conn, ts, bcpType) } func ListDeleteChunksBefore( ctx context.Context, - sc Client, + client *Client, ts primitive.Timestamp, ) ([]OplogChunk, error) { - r, err := backup.MakeCleanupInfo(ctx, sc.(*clientImpl).conn, ts) + r, err := backup.MakeCleanupInfo(ctx, client.conn, ts) return r.Chunks, err } diff --git a/sdk/util.go b/sdk/util.go index 595077381..cb3f1d61b 100644 --- a/sdk/util.go +++ b/sdk/util.go @@ -25,8 +25,8 @@ func IsHeartbeatStale(clusterTime, other Timestamp) bool { return clusterTime.T >= other.T+defs.StaleFrameSec } -func ClusterTime(ctx context.Context, client Client) (Timestamp, error) { - info, err := topo.GetNodeInfo(ctx, client.(*clientImpl).conn.MongoClient()) +func ClusterTime(ctx context.Context, client *Client) (Timestamp, error) { + info, err := topo.GetNodeInfo(ctx, client.conn.MongoClient()) if err != nil { return primitive.Timestamp{}, err } @@ -41,8 +41,8 @@ func ClusterTime(ctx context.Context, client Client) (Timestamp, error) { // // For sharded cluster: the configsvr (with ID `config`) and all shards. // For non-sharded cluster: the replset. -func ClusterMembers(ctx context.Context, client Client) ([]ReplsetInfo, error) { - shards, err := topo.ClusterMembers(ctx, client.(*clientImpl).conn.MongoClient()) +func ClusterMembers(ctx context.Context, client *Client) ([]ReplsetInfo, error) { + shards, err := topo.ClusterMembers(ctx, client.conn.MongoClient()) if err != nil { return nil, errors.Wrap(err, "topo") } @@ -50,6 +50,6 @@ func ClusterMembers(ctx context.Context, client Client) ([]ReplsetInfo, error) { } // AgentStatuses returns list of all PBM Agents statuses. -func AgentStatuses(ctx context.Context, sc Client) ([]AgentStatus, error) { - return topo.ListAgents(ctx, sc.(*clientImpl).conn) +func AgentStatuses(ctx context.Context, client *Client) ([]AgentStatus, error) { + return topo.ListAgents(ctx, client.conn) } From ce0dcb168429d115f112f85ed407fec1f6aa80a3 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 26 Jun 2024 16:50:17 +0200 Subject: [PATCH 076/203] expose some types --- sdk/cli/status.go | 2 +- sdk/sdk.go | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/sdk/cli/status.go b/sdk/cli/status.go index 4f116600a..e073f788b 100644 --- a/sdk/cli/status.go +++ b/sdk/cli/status.go @@ -42,7 +42,7 @@ type Node struct { Errs []error } -func (n Node) IsLost() bool { +func (n Node) IsAgentLost() bool { if len(n.Errs) == 0 { return false } diff --git a/sdk/sdk.go b/sdk/sdk.go index f1643a1a2..489f87168 100644 --- a/sdk/sdk.go +++ b/sdk/sdk.go @@ -25,8 +25,10 @@ var ( ) type ( - CommandID string - Timestamp = primitive.Timestamp + Command = ctrl.Cmd + CommandID string + CommandType = ctrl.Command + Timestamp = primitive.Timestamp ) var NoOpID = CommandID(ctrl.NilOPID.String()) @@ -94,8 +96,6 @@ type DeleteBackupBeforeOptions struct { Type BackupType } -type Command = ctrl.Cmd - // OpLock represents internal PBM lock. // // Some commands can have many locks (one lock per replset). @@ -103,7 +103,7 @@ type OpLock struct { // OpID is its command id. OpID CommandID `json:"opid,omitempty"` // Cmd is the type of command - Cmd ctrl.Command `json:"cmd,omitempty"` + Cmd CommandType `json:"cmd,omitempty"` // Replset is name of a replset that acquired the lock. Replset string `json:"rs,omitempty"` // Node is `host:port` pair of an agent that acquired the lock. From 0e0dfe11a8e41ceb4d1ebd5b011a3dac76378b53 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 27 Jun 2024 13:07:02 +0200 Subject: [PATCH 077/203] v2 --- cmd/pbm/backup.go | 2 +- cmd/pbm/config.go | 2 +- cmd/pbm/delete.go | 2 +- cmd/pbm/list.go | 2 +- cmd/pbm/main.go | 2 +- cmd/pbm/status.go | 4 ++-- {sdk => v2}/cli/status.go | 2 +- {sdk => v2}/impl.go | 0 {sdk => v2}/sdk.go | 12 ++++++++++++ {sdk => v2}/util.go | 0 10 files changed, 20 insertions(+), 8 deletions(-) rename {sdk => v2}/cli/status.go (98%) rename {sdk => v2}/impl.go (100%) rename {sdk => v2}/sdk.go (94%) rename {sdk => v2}/util.go (100%) diff --git a/cmd/pbm/backup.go b/cmd/pbm/backup.go index 581b2a42a..5704b4fac 100644 --- a/cmd/pbm/backup.go +++ b/cmd/pbm/backup.go @@ -24,7 +24,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/topo" "github.com/percona/percona-backup-mongodb/pbm/util" "github.com/percona/percona-backup-mongodb/pbm/version" - "github.com/percona/percona-backup-mongodb/sdk" + sdk "github.com/percona/percona-backup-mongodb/v2" ) type backupOpts struct { diff --git a/cmd/pbm/config.go b/cmd/pbm/config.go index c323c7057..10f66bca9 100644 --- a/cmd/pbm/config.go +++ b/cmd/pbm/config.go @@ -15,7 +15,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/errors" - "github.com/percona/percona-backup-mongodb/sdk" + sdk "github.com/percona/percona-backup-mongodb/v2" ) const resyncWaitDuration = 30 * time.Second diff --git a/cmd/pbm/delete.go b/cmd/pbm/delete.go index da6920bd0..459e60fc7 100644 --- a/cmd/pbm/delete.go +++ b/cmd/pbm/delete.go @@ -17,7 +17,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/oplog" - "github.com/percona/percona-backup-mongodb/sdk" + sdk "github.com/percona/percona-backup-mongodb/v2" ) type deleteBcpOpts struct { diff --git a/cmd/pbm/list.go b/cmd/pbm/list.go index 037866403..8ba6a228c 100644 --- a/cmd/pbm/list.go +++ b/cmd/pbm/list.go @@ -19,7 +19,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/topo" "github.com/percona/percona-backup-mongodb/pbm/util" "github.com/percona/percona-backup-mongodb/pbm/version" - "github.com/percona/percona-backup-mongodb/sdk" + sdk "github.com/percona/percona-backup-mongodb/v2" ) type listOpts struct { diff --git a/cmd/pbm/main.go b/cmd/pbm/main.go index ef12e1ede..c4c560ff2 100644 --- a/cmd/pbm/main.go +++ b/cmd/pbm/main.go @@ -21,7 +21,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/oplog" "github.com/percona/percona-backup-mongodb/pbm/topo" "github.com/percona/percona-backup-mongodb/pbm/version" - "github.com/percona/percona-backup-mongodb/sdk" + sdk "github.com/percona/percona-backup-mongodb/v2" ) const ( diff --git a/cmd/pbm/status.go b/cmd/pbm/status.go index b279bc390..78db4f021 100644 --- a/cmd/pbm/status.go +++ b/cmd/pbm/status.go @@ -25,8 +25,8 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/topo" "github.com/percona/percona-backup-mongodb/pbm/util" "github.com/percona/percona-backup-mongodb/pbm/version" - "github.com/percona/percona-backup-mongodb/sdk" - "github.com/percona/percona-backup-mongodb/sdk/cli" + sdk "github.com/percona/percona-backup-mongodb/v2" + "github.com/percona/percona-backup-mongodb/v2/cli" ) type statusOptions struct { diff --git a/sdk/cli/status.go b/v2/cli/status.go similarity index 98% rename from sdk/cli/status.go rename to v2/cli/status.go index e073f788b..3b44e79ce 100644 --- a/sdk/cli/status.go +++ b/v2/cli/status.go @@ -13,7 +13,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/topo" - "github.com/percona/percona-backup-mongodb/sdk" + sdk "github.com/percona/percona-backup-mongodb/v2" ) type LostAgentError struct { diff --git a/sdk/impl.go b/v2/impl.go similarity index 100% rename from sdk/impl.go rename to v2/impl.go diff --git a/sdk/sdk.go b/v2/sdk.go similarity index 94% rename from sdk/sdk.go rename to v2/sdk.go index 489f87168..018e9d2c8 100644 --- a/sdk/sdk.go +++ b/v2/sdk.go @@ -31,6 +31,18 @@ type ( Timestamp = primitive.Timestamp ) +const ( + CmdBackup = ctrl.CmdBackup + CmdRestore = ctrl.CmdRestore + CmdReplay = ctrl.CmdReplay + CmdCancelBackup = ctrl.CmdCancelBackup + CmdResync = ctrl.CmdResync + CmdPITR = ctrl.CmdPITR + CmdDeleteBackup = ctrl.CmdDeleteBackup + CmdDeletePITR = ctrl.CmdDeletePITR + CmdCleanup = ctrl.CmdCleanup +) + var NoOpID = CommandID(ctrl.NilOPID.String()) type BackupType = defs.BackupType diff --git a/sdk/util.go b/v2/util.go similarity index 100% rename from sdk/util.go rename to v2/util.go From 3187f431fe5bdc0e5b232678e54bcb5c194b2255 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 27 Jun 2024 13:12:37 +0200 Subject: [PATCH 078/203] Remove config epoch check and reload in Slicer This feature is in responsibility of config monitor go routine which will restart all slicers in case when PITR related config params change. --- pbm/slicer/slicer.go | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/pbm/slicer/slicer.go b/pbm/slicer/slicer.go index 108f427eb..fc5d631eb 100644 --- a/pbm/slicer/slicer.go +++ b/pbm/slicer/slicer.go @@ -402,21 +402,6 @@ func (s *Slicer) Stream( } } - // if this is the last slice, epoch probably already changed (e.g. due to config changes) and that's ok - if !lastSlice { - cep, err := config.GetEpoch(ctx, s.leadClient) - if err != nil { - return errors.Wrap(err, "get epoch") - } - - if !s.cfg.Epoch.Equal(cep.TS()) { - return errors.Errorf( - "epoch mismatch. Got sleep in %v, woke up in %v. Too old for that stuff.", - s.cfg.Epoch, cep.TS(), - ) - } - } - err = s.upload(ctx, s.lastTS, sliceTo, compression, level) if err != nil { return err From 9571a75224190400423a6821333a6e4dc9d6bb4c Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 27 Jun 2024 13:29:14 +0200 Subject: [PATCH 079/203] Add logic for reporting error for PITR slicing It also refactors (make is universal) function for getting replica sets with specified status. --- pbm/oplog/nomination.go | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/pbm/oplog/nomination.go b/pbm/oplog/nomination.go index 83090659b..a41df5728 100644 --- a/pbm/oplog/nomination.go +++ b/pbm/oplog/nomination.go @@ -131,20 +131,38 @@ func SetReadyRSStatus(ctx context.Context, conn connect.Client, rs, node string) return errors.Wrap(err, "update pitr doc for RS ready status") } -// GetReadyReplSets fetches all replicasets which reported Ready status -func GetReadyReplSets(ctx context.Context, conn connect.Client) ([]PITRReplset, error) { +// SetErrorRSStatus sets Error status for specified replicaset and includes error descrioption. +func SetErrorRSStatus(ctx context.Context, conn connect.Client, rs, node, errText string) error { + repliset := PITRReplset{ + Name: rs, + Node: node, + Status: StatusError, + Error: errText, + } + _, err := conn.PITRCollection(). + UpdateOne( + ctx, + bson.D{}, + bson.D{{"$addToSet", bson.M{"replsets": repliset}}}, + options.Update().SetUpsert(true), + ) + return errors.Wrap(err, "update pitr doc for RS error status") +} + +// GetReplSetsWithStatus fetches all replica sets which reported status specified with parameter. +func GetReplSetsWithStatus(ctx context.Context, conn connect.Client, status Status) ([]PITRReplset, error) { meta, err := GetMeta(ctx, conn) if err != nil { return nil, errors.Wrap(err, "get meta") } - readyReplsets := []PITRReplset{} + replSetsWithStatus := []PITRReplset{} for _, rs := range meta.Replsets { - if rs.Status == StatusReady { - readyReplsets = append(readyReplsets, rs) + if rs.Status == status { + replSetsWithStatus = append(replSetsWithStatus, rs) } } - return readyReplsets, nil + return replSetsWithStatus, nil } // SetPITRNomination adds nomination fragment for specified RS within PITRMeta. From 3e4249db5a4944ec952c7812e332b63d86e04443 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 27 Jun 2024 13:54:29 +0200 Subject: [PATCH 080/203] Add agent's helper to check PITR cluster status --- cmd/pbm-agent/pitr.go | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 821019275..1c018196e 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -453,7 +453,6 @@ func (a *Agent) waitNominationForPITR(ctx context.Context, rs, node string) (boo for { select { case <-tk.C: - nm, err := oplog.GetPITRNominees(ctx, a.leadConn, rs) if err != nil { if errors.Is(err, errors.ErrNotFound) { @@ -520,7 +519,7 @@ func (a *Agent) reconcileReadyStatus(ctx context.Context, agents []topo.AgentSta for { select { case <-tk.C: - nodes, err := oplog.GetReadyReplSets(ctx, a.leadConn) + nodes, err := oplog.GetReplSetsWithStatus(ctx, a.leadConn, oplog.StatusReady) if err != nil { if errors.Is(err, errors.ErrNotFound) { continue @@ -537,3 +536,17 @@ func (a *Agent) reconcileReadyStatus(ctx context.Context, agents []topo.AgentSta } } +// isPITRClusterStatus checks within pbmPITR collection if cluster status +// is set to specified status. +func (a *Agent) isPITRClusterStatus(ctx context.Context, status oplog.Status) bool { + l := log.LogEventFromContext(ctx) + + meta, err := oplog.GetMeta(ctx, a.leadConn) + if err != nil { + if errors.Is(err, errors.ErrNotFound) { + return false + } + l.Error("getting metta for reconfig status check: %v", err) + } + return meta.Status == status +} From 3c00aa4c3f4740312da9f7fd09f698071678978f Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 27 Jun 2024 14:23:28 +0200 Subject: [PATCH 081/203] Add draft for PITR config monitor routine --- cmd/pbm-agent/pitr.go | 50 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 1c018196e..a116863b9 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -550,3 +550,53 @@ func (a *Agent) isPITRClusterStatus(ctx context.Context, status oplog.Status) bo } return meta.Status == status } + +// pitrConfigMonitor watches changes in PITR section within PBM configuration. +// If relevant changes are detected (e.g. priorities, oplogOnly), it sets +// Reconfig cluster status, meaning that slicing process needs to be restarted. +func (a *Agent) pitrConfigMonitor(ctx context.Context, currentConf config.PITRConf) { + l := log.LogEventFromContext(ctx) + l.Debug("start pitr config monitor") + + tk := time.NewTicker(5 * time.Second) + defer tk.Stop() + + for { + select { + case <-tk.C: + cfg, err := config.GetConfig(ctx, a.leadConn) + if err != nil { + if !errors.Is(err, mongo.ErrNoDocuments) { + l.Error("error while monitoring for pitr conf change: %v", err) + } + continue + } + + if !cfg.PITR.Enabled { + //todo check this + continue + } + + oldP := currentConf.Priority + newP := cfg.PITR.Priority + //todo: add change chet for other config params + if newP == nil && oldP == nil { + continue + } + if maps.Equal(newP, oldP) { + continue + } + + l.Info("pitr config has changed, re-config will be done") + err = oplog.SetClusterStatus(ctx, a.leadConn, oplog.StatusReconfig) + if err != nil { + l.Error("error while setting cluster status reconfig: %v", err) + } + return + + case <-ctx.Done(): + return + } + } +} + From a245ebad17b5b0bcd011d2132bc8e7b239fd82a0 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 27 Jun 2024 14:37:56 +0200 Subject: [PATCH 082/203] prioritize main storage --- pbm/resync/rsync.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/pbm/resync/rsync.go b/pbm/resync/rsync.go index 78f223d91..ecf06f228 100644 --- a/pbm/resync/rsync.go +++ b/pbm/resync/rsync.go @@ -9,6 +9,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" "github.com/percona/percona-backup-mongodb/pbm/backup" "github.com/percona/percona-backup-mongodb/pbm/config" @@ -152,15 +153,22 @@ func insertBackupList( defer wg.Done() l := log.LogEventFromContext(ctx) + var err error for bcp := range inC { l.Debug("bcp: %v", bcp.Name) - _, err := conn.BcpCollection().InsertOne(ctx, bcp) - if err != nil { + if bcp.Store.IsProfile { + _, err = conn.BcpCollection().InsertOne(ctx, bcp) if mongo.IsDuplicateKeyError(err) { - l.Warning("backup %q already exists", bcp.Name) continue } + } else { + _, err = conn.BcpCollection().ReplaceOne(ctx, + bson.D{{"name", bcp.Name}}, + bcp, + options.Replace().SetUpsert(true)) + } + if err != nil { errC <- errors.Wrapf(err, "backup %q", bcp.Name) } } From 01b120e6ccfeacfc54104f628fc3cba2ec02c16f Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 27 Jun 2024 14:38:47 +0200 Subject: [PATCH 083/203] do profile work on cluster leader --- cmd/pbm-agent/profile.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/pbm-agent/profile.go b/cmd/pbm-agent/profile.go index bd23edf81..787a67592 100644 --- a/cmd/pbm-agent/profile.go +++ b/cmd/pbm-agent/profile.go @@ -45,13 +45,13 @@ func (a *Agent) handleAddConfigProfile( } }() - nodeInfo, err := topo.GetNodeInfo(ctx, a.nodeConn) + nodeInfo, err := topo.GetNodeInfoExt(ctx, a.nodeConn) if err != nil { err = errors.Wrap(err, "get node info") return } - if !nodeInfo.IsLeader() { - l.Debug("not leader. skip") + if !nodeInfo.IsClusterLeader() { + l.Debug("not the leader. skip") return } @@ -149,13 +149,13 @@ func (a *Agent) handleRemoveConfigProfile( } }() - nodeInfo, err := topo.GetNodeInfo(ctx, a.nodeConn) + nodeInfo, err := topo.GetNodeInfoExt(ctx, a.nodeConn) if err != nil { err = errors.Wrap(err, "get node info") return } - if !nodeInfo.IsLeader() { - l.Debug("not leader. skip") + if !nodeInfo.IsClusterLeader() { + l.Debug("not the leader. skip") return } From aad5b3e41bfbde7438b33671d5727acfbb98e8d0 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 27 Jun 2024 14:41:11 +0200 Subject: [PATCH 084/203] remove profile before adding it again --- cmd/pbm/profile.go | 34 ++++++++++++++++++++++------------ pbm/config/profile.go | 5 +++++ 2 files changed, 27 insertions(+), 12 deletions(-) diff --git a/cmd/pbm/profile.go b/cmd/pbm/profile.go index e7b1a7eee..4f9ceaeb5 100644 --- a/cmd/pbm/profile.go +++ b/cmd/pbm/profile.go @@ -110,18 +110,31 @@ func handleAddConfigProfile( return nil, errors.Wrap(err, "parse config") } - cid, err := pbm.AddConfigProfile(ctx, opts.name, cfg) + _, err = pbm.GetConfigProfile(ctx, opts.name) if err != nil { - return nil, errors.Wrap(err, "add config profile") - } - - if opts.wait { - err = sdk.WaitForAddProfile(ctx, pbm, cid) + if !errors.Is(err, config.ErrMissedConfigProfile) { + return nil, errors.Wrap(err, "find saved profile") + } + } else { + cid, err := pbm.RemoveConfigProfile(ctx, opts.name) + if err != nil { + return nil, errors.Wrap(err, "clear profile list") + } + err = sdk.WaitForRemoveProfile(ctx, pbm, cid) if err != nil { return nil, errors.Wrap(err, "wait") } } + cid, err := pbm.AddConfigProfile(ctx, opts.name, cfg) + if err != nil { + return nil, errors.Wrap(err, "add config profile") + } + err = sdk.WaitForAddProfile(ctx, pbm, cid) + if err != nil { + return nil, errors.Wrap(err, "wait") + } + if opts.sync { cid, err := pbm.SyncFromExternalStorage(ctx, opts.name) if err != nil { @@ -160,12 +173,9 @@ func handleRemoveConfigProfile( if err != nil { return nil, errors.Wrap(err, "sdk: remove config profile") } - - if opts.wait { - err = sdk.WaitForRemoveProfile(ctx, pbm, cid) - if err != nil { - return nil, errors.Wrap(err, "wait") - } + err = sdk.WaitForRemoveProfile(ctx, pbm, cid) + if err != nil { + return nil, errors.Wrap(err, "wait") } return &outMsg{"OK"}, nil diff --git a/pbm/config/profile.go b/pbm/config/profile.go index 8f6e5a6e4..1b81af85e 100644 --- a/pbm/config/profile.go +++ b/pbm/config/profile.go @@ -5,6 +5,7 @@ import ( "os" "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" "github.com/percona/percona-backup-mongodb/pbm/connect" @@ -36,6 +37,10 @@ func GetProfile(ctx context.Context, m connect.Client, name string) (*Config, er {"name", name}, }) if err := res.Err(); err != nil { + if errors.Is(err, mongo.ErrNoDocuments) { + return nil, ErrMissedConfigProfile + } + return nil, errors.Wrap(err, "query") } From 6ef34c2f59eb206bd8a1a6a8b8aada258f433d5f Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 27 Jun 2024 16:44:12 +0200 Subject: [PATCH 085/203] Add error monitor PITR logic --- cmd/pbm-agent/pitr.go | 38 +++++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index a116863b9..e76334850 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -67,6 +67,7 @@ const ( pitrOpLockPollingTimeOut = 2 * time.Minute pitrNominationPollingCycle = 2 * time.Second pitrNominationPollingTimeOut = 2 * time.Minute + pitrWatchMonitorPollingCycle = 5 * time.Second ) // PITR starts PITR processing routine @@ -558,7 +559,7 @@ func (a *Agent) pitrConfigMonitor(ctx context.Context, currentConf config.PITRCo l := log.LogEventFromContext(ctx) l.Debug("start pitr config monitor") - tk := time.NewTicker(5 * time.Second) + tk := time.NewTicker(pitrWatchMonitorPollingCycle) defer tk.Stop() for { @@ -600,3 +601,38 @@ func (a *Agent) pitrConfigMonitor(ctx context.Context, currentConf config.PITRCo } } +// pitrErrorMonitor watches reported errors by agents on replica set(s) +// which are running PITR. +// In case of any reported error within pbmPITR collection, cluster status +// Error is set. +func (a *Agent) pitrErrorMonitor(ctx context.Context) { + l := log.LogEventFromContext(ctx) + l.Debug("start pitr error monitor") + + tk := time.NewTicker(pitrWatchMonitorPollingCycle) + defer tk.Stop() + + for { + select { + case <-tk.C: + replsets, err := oplog.GetReplSetsWithStatus(ctx, a.leadConn, oplog.StatusError) + if err != nil { + l.Error("get error replsets", err) + } + + if len(replsets) == 0 { + continue + } + + l.Debug("error while executing pitr, pitr procedure will be restarted") + err = oplog.SetClusterStatus(ctx, a.leadConn, oplog.StatusError) + if err != nil { + l.Error("error while setting cluster status Error: %v", err) + } + return + + case <-ctx.Done(): + return + } + } +} From 51533508ac64456880481728f7430d387bdad454 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 27 Jun 2024 17:37:07 +0200 Subject: [PATCH 086/203] fix stale condition --- v2/impl.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/v2/impl.go b/v2/impl.go index fc898863f..3dd45a072 100644 --- a/v2/impl.go +++ b/v2/impl.go @@ -364,7 +364,7 @@ func (c *Client) OpLocks(ctx context.Context) ([]OpLock, error) { rv[i].Node = locks[i].Node rv[i].Heartbeat = locks[i].Heartbeat - if rv[i].Heartbeat.T+defs.StaleFrameSec > (clusterTime.T) { + if rv[i].Heartbeat.T+defs.StaleFrameSec < clusterTime.T { rv[i].err = ErrStaleHearbeat } } From d871b95b4615e28bace7538db2fa5fc25473ea3b Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 27 Jun 2024 17:52:49 +0200 Subject: [PATCH 087/203] v2 go.mod --- go.mod | 3 + v2/go.mod | 44 ++ v2/go.sum | 150 ++++++ .../percona-backup-mongodb/v2/cli/status.go | 185 ++++++++ .../percona/percona-backup-mongodb/v2/impl.go | 430 ++++++++++++++++++ .../percona/percona-backup-mongodb/v2/sdk.go | 220 +++++++++ .../percona/percona-backup-mongodb/v2/util.go | 55 +++ vendor/modules.txt | 5 + 8 files changed, 1092 insertions(+) create mode 100644 v2/go.mod create mode 100644 v2/go.sum create mode 100644 vendor/github.com/percona/percona-backup-mongodb/v2/cli/status.go create mode 100644 vendor/github.com/percona/percona-backup-mongodb/v2/impl.go create mode 100644 vendor/github.com/percona/percona-backup-mongodb/v2/sdk.go create mode 100644 vendor/github.com/percona/percona-backup-mongodb/v2/util.go diff --git a/go.mod b/go.mod index 5e21daa69..60fcf814e 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,7 @@ require ( github.com/klauspost/pgzip v1.2.6 github.com/minio/minio-go v6.0.14+incompatible github.com/mongodb/mongo-tools v0.0.0-20231117185435-bf0bef9e9f19 + github.com/percona/percona-backup-mongodb/v2 v2.0.0-00010101000000-000000000000 github.com/pierrec/lz4 v2.6.1+incompatible github.com/pkg/errors v0.9.1 go.mongodb.org/mongo-driver v1.13.0 @@ -65,3 +66,5 @@ require ( golang.org/x/time v0.5.0 // indirect gotest.tools/v3 v3.5.1 // indirect ) + +replace github.com/percona/percona-backup-mongodb/v2 => ./v2 diff --git a/v2/go.mod b/v2/go.mod new file mode 100644 index 000000000..bd876ef91 --- /dev/null +++ b/v2/go.mod @@ -0,0 +1,44 @@ +module github.com/percona/percona-backup-mongodb/v2 + +go 1.22 + +require ( + github.com/percona/percona-backup-mongodb v1.0.0 + go.mongodb.org/mongo-driver v1.13.0 + golang.org/x/sync v0.6.0 +) + +require ( + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 // indirect + github.com/aws/aws-sdk-go v1.50.31 // indirect + github.com/go-ini/ini v1.67.0 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/jessevdk/go-flags v1.5.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/klauspost/compress v1.17.7 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/minio/minio-go v6.0.14+incompatible // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mongodb/mongo-tools v0.0.0-20231117185435-bf0bef9e9f19 // indirect + github.com/montanaflynn/stats v0.6.6 // indirect + github.com/pierrec/lz4 v2.6.1+incompatible // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/scram v1.1.2 // indirect + github.com/xdg-go/stringprep v1.0.4 // indirect + github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect + golang.org/x/crypto v0.21.0 // indirect + golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect + golang.org/x/mod v0.16.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect + golang.org/x/text v0.14.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) + +replace github.com/percona/percona-backup-mongodb => .. diff --git a/v2/go.sum b/v2/go.sum new file mode 100644 index 000000000..e6e094f3b --- /dev/null +++ b/v2/go.sum @@ -0,0 +1,150 @@ +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 h1:fXPMAmuh0gDuRDey0atC8cXBuKIlqCzCkL8sm1n9Ov0= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/aws/aws-sdk-go v1.50.31 h1:gx2NRLLEDUmQFC4YUsfMUKkGCwpXVO8ijUecq/nOQGA= +github.com/aws/aws-sdk-go v1.50.31/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= +github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/minio/minio-go v6.0.14+incompatible h1:fnV+GD28LeqdN6vT2XdGKW8Qe/IfjJDswNVuni6km9o= +github.com/minio/minio-go v6.0.14+incompatible/go.mod h1:7guKYtitv8dktvNUGrhzmNlA5wrAABTQXCoesZdFQO8= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mongodb/mongo-tools v0.0.0-20231117185435-bf0bef9e9f19 h1:B0nhjnm3za73rABZa3HdMhn9WuOXPPHweBBqhZnWinI= +github.com/mongodb/mongo-tools v0.0.0-20231117185435-bf0bef9e9f19/go.mod h1:2Rl3k3e333g2AJN74N9hx9N4IIhB0IcTU3m92oNsOyE= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.6.6 h1:Duep6KMIDpY4Yo11iFsvyqJDyfzLF9+sndUKT+v64GQ= +github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.mongodb.org/mongo-driver v1.13.0 h1:67DgFFjYOCMWdtTEmKFpV3ffWlFnh+CYZ8ZS/tXWUfY= +go.mongodb.org/mongo-driver v1.13.0/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= +golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/percona/percona-backup-mongodb/v2/cli/status.go b/vendor/github.com/percona/percona-backup-mongodb/v2/cli/status.go new file mode 100644 index 000000000..3b44e79ce --- /dev/null +++ b/vendor/github.com/percona/percona-backup-mongodb/v2/cli/status.go @@ -0,0 +1,185 @@ +package cli + +import ( + "context" + "fmt" + "net/url" + "strings" + "sync" + + "go.mongodb.org/mongo-driver/bson/primitive" + "golang.org/x/sync/errgroup" + + "github.com/percona/percona-backup-mongodb/pbm/connect" + "github.com/percona/percona-backup-mongodb/pbm/errors" + "github.com/percona/percona-backup-mongodb/pbm/topo" + sdk "github.com/percona/percona-backup-mongodb/v2" +) + +type LostAgentError struct { + heartbeat primitive.Timestamp +} + +func (e LostAgentError) Error() string { + return fmt.Sprintf("lost agent, last heartbeat: %v", e.heartbeat.T) +} + +type RSRole string + +const ( + RolePrimary RSRole = "P" + RoleSecondary RSRole = "S" + RoleArbiter RSRole = "A" + RoleHidden RSRole = "H" + RoleDelayed RSRole = "D" +) + +type Node struct { + Host string + Ver string + Role RSRole + OK bool + Errs []error +} + +func (n Node) IsAgentLost() bool { + if len(n.Errs) == 0 { + return false + } + + lostErr := LostAgentError{} + for _, err := range n.Errs { + if errors.As(err, &lostErr) { + return true + } + } + + return false +} + +func ClusterStatus( + ctx context.Context, + pbm *sdk.Client, + confGetter RSConfGetter, +) (map[string][]Node, error) { + clusterMembers, err := sdk.ClusterMembers(ctx, pbm) + if err != nil { + return nil, errors.Wrap(err, "get agent statuses") + } + agentStatuses, err := sdk.AgentStatuses(ctx, pbm) + if err != nil { + return nil, errors.Wrap(err, "get cluster members") + } + clusterTime, err := sdk.ClusterTime(ctx, pbm) + if err != nil { + return nil, errors.Wrap(err, "read cluster time") + } + + agentMap := make(map[topo.ReplsetName]map[string]*sdk.AgentStatus, len(clusterMembers)) + for i := range agentStatuses { + agent := &agentStatuses[i] + rs, ok := agentMap[agent.RS] + if !ok { + rs = make(map[string]*topo.AgentStat) + agentMap[agent.RS] = rs + } + + rs[agent.Node] = agent + agentMap[agent.RS] = rs + } + + eg, ctx := errgroup.WithContext(ctx) + m := sync.Mutex{} + + pbmCluster := make(map[string][]Node) + for _, c := range clusterMembers { + eg.Go(func() error { + rsConf, err := confGetter.Get(ctx, c.Host) + if err != nil { + return errors.Wrapf(err, "get replset status for `%s`", c.RS) + } + + nodes := make([]Node, len(rsConf.Members)) + for i, member := range rsConf.Members { + node := &nodes[i] + node.Host = member.Host + + rsAgents := agentMap[c.RS] + if rsAgents == nil { + continue + } + agent := rsAgents[member.Host] + if agent == nil { + continue + } + + node.Ver = "v" + agent.AgentVer + + switch { + case agent.State == 1: // agent.StateStr == "PRIMARY" + node.Role = RolePrimary + case agent.State == 7: // agent.StateStr == "ARBITER" + node.Role = RoleArbiter + case agent.State == 2: // agent.StateStr == "SECONDARY" + if agent.DelaySecs != 0 { + node.Role = RoleDelayed + } else if agent.Hidden { + node.Role = RoleHidden + } else { + node.Role = RoleSecondary + } + default: + // unexpected state. show actual state + node.Role = RSRole(agent.StateStr) + } + + if agent.IsStale(clusterTime) { + node.Errs = []error{LostAgentError{agent.Heartbeat}} + continue + } + + node.OK, node.Errs = agent.OK() + } + + m.Lock() + pbmCluster[c.RS] = nodes + m.Unlock() + return nil + }) + } + + err = eg.Wait() + return pbmCluster, err +} + +type RSConfGetter string + +func (g RSConfGetter) Get(ctx context.Context, host string) (*topo.RSConfig, error) { + rsName, host, ok := strings.Cut(host, "/") + if !ok { + host = rsName + } + + if !strings.HasPrefix(string(g), "mongodb://") { + g = "mongodb://" + g + } + curi, err := url.Parse(string(g)) + if err != nil { + return nil, errors.Wrapf(err, "parse mongo-uri '%s'", g) + } + + // Preserving the `replicaSet` parameter will cause an error + // while connecting to the ConfigServer (mismatched replicaset names) + query := curi.Query() + query.Del("replicaSet") + curi.RawQuery = query.Encode() + curi.Host = host + + conn, err := connect.MongoConnect(ctx, curi.String(), connect.AppName("pbm-sdk")) + if err != nil { + return nil, errors.Wrap(err, "connect") + } + defer conn.Disconnect(context.Background()) + + return topo.GetReplSetConfig(ctx, conn) +} diff --git a/vendor/github.com/percona/percona-backup-mongodb/v2/impl.go b/vendor/github.com/percona/percona-backup-mongodb/v2/impl.go new file mode 100644 index 000000000..3dd45a072 --- /dev/null +++ b/vendor/github.com/percona/percona-backup-mongodb/v2/impl.go @@ -0,0 +1,430 @@ +package sdk + +import ( + "context" + "path" + "runtime" + "time" + + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "golang.org/x/sync/errgroup" + + "github.com/percona/percona-backup-mongodb/pbm/backup" + "github.com/percona/percona-backup-mongodb/pbm/config" + "github.com/percona/percona-backup-mongodb/pbm/connect" + "github.com/percona/percona-backup-mongodb/pbm/ctrl" + "github.com/percona/percona-backup-mongodb/pbm/defs" + "github.com/percona/percona-backup-mongodb/pbm/errors" + "github.com/percona/percona-backup-mongodb/pbm/lock" + "github.com/percona/percona-backup-mongodb/pbm/log" + "github.com/percona/percona-backup-mongodb/pbm/restore" + "github.com/percona/percona-backup-mongodb/pbm/storage" + "github.com/percona/percona-backup-mongodb/pbm/topo" + "github.com/percona/percona-backup-mongodb/pbm/util" + "github.com/percona/percona-backup-mongodb/pbm/version" +) + +var ErrNotImplemented = errors.New("not implemented") + +var ( + ErrBackupInProgress = backup.ErrBackupInProgress + ErrIncrementalBackup = backup.ErrIncrementalBackup + ErrNonIncrementalBackup = backup.ErrNonIncrementalBackup + ErrNotBaseIncrement = backup.ErrNotBaseIncrement + ErrBaseForPITR = backup.ErrBaseForPITR +) + +type Client struct { + conn connect.Client +} + +func (c *Client) Close(ctx context.Context) error { + return c.conn.Disconnect(ctx) +} + +func (c *Client) CommandInfo(ctx context.Context, id CommandID) (*Command, error) { + opid, err := ctrl.ParseOPID(string(id)) + if err != nil { + return nil, ErrInvalidCommandID + } + + res := c.conn.CmdStreamCollection().FindOne(ctx, bson.D{{"_id", opid.Obj()}}) + if err := res.Err(); err != nil { + if errors.Is(err, mongo.ErrNoDocuments) { + return nil, ErrNotFound + } + return nil, errors.Wrap(err, "query") + } + + cmd := &Command{} + if err = res.Decode(&cmd); err != nil { + return nil, errors.Wrap(err, "decode") + } + + cmd.OPID = opid + return cmd, nil +} + +func (c *Client) GetConfig(ctx context.Context) (*Config, error) { + return config.GetConfig(ctx, c.conn) +} + +func (c *Client) SetConfig(ctx context.Context, cfg Config) (CommandID, error) { + return NoOpID, config.SetConfig(ctx, c.conn, &cfg) +} + +func (c *Client) GetAllBackups(ctx context.Context) ([]BackupMetadata, error) { + return backup.BackupsList(ctx, c.conn, 0) +} + +func (c *Client) GetAllRestores( + ctx context.Context, + m connect.Client, + options GetAllRestoresOptions, +) ([]RestoreMetadata, error) { + limit := options.Limit + if limit < 0 { + limit = 0 + } + return restore.RestoreList(ctx, c.conn, limit) +} + +func (c *Client) GetBackupByName( + ctx context.Context, + name string, + options GetBackupByNameOptions, +) (*BackupMetadata, error) { + bcp, err := backup.NewDBManager(c.conn).GetBackupByName(ctx, name) + if err != nil { + return nil, errors.Wrap(err, "get backup meta") + } + + return c.getBackupHelper(ctx, bcp, options) +} + +func (c *Client) GetBackupByOpID( + ctx context.Context, + opid string, + options GetBackupByNameOptions, +) (*BackupMetadata, error) { + bcp, err := backup.NewDBManager(c.conn).GetBackupByOpID(ctx, opid) + if err != nil { + return nil, errors.Wrap(err, "get backup meta") + } + + return c.getBackupHelper(ctx, bcp, options) +} + +func (c *Client) getBackupHelper( + ctx context.Context, + bcp *BackupMetadata, + options GetBackupByNameOptions, +) (*BackupMetadata, error) { + if options.FetchIncrements && bcp.Type == IncrementalBackup { + if bcp.SrcBackup != "" { + return nil, ErrNotBaseIncrement + } + + increments, err := backup.FetchAllIncrements(ctx, c.conn, bcp) + if err != nil { + return nil, errors.New("get increments") + } + if increments == nil { + // use non-nil empty slice to mark fetch. + // nil means it never tried to fetch before + increments = make([][]*backup.BackupMeta, 0) + } + + bcp.Increments = increments + } + + if options.FetchFilelist { + err := fillFilelistForBackup(ctx, c.conn, bcp) + if err != nil { + return nil, errors.Wrap(err, "fetch filelist") + } + } + + return bcp, nil +} + +func fillFilelistForBackup(ctx context.Context, conn connect.Client, bcp *BackupMetadata) error { + var err error + var stg storage.Storage + + eg, _ := errgroup.WithContext(ctx) + eg.SetLimit(runtime.NumCPU()) + + if version.HasFilelistFile(bcp.PBMVersion) { + stg, err = util.GetStorage(ctx, conn, nil) + if err != nil { + return errors.Wrap(err, "get storage") + } + + for i := range bcp.Replsets { + rs := &bcp.Replsets[i] + + eg.Go(func() error { + filelist, err := getFilelistForReplset(stg, bcp.Name, rs.Name) + if err != nil { + return errors.Wrapf(err, "get filelist for %q [rs: %s] backup", bcp.Name, rs.Name) + } + + rs.Files = filelist + return nil + }) + } + } + + for i := range bcp.Increments { + for j := range bcp.Increments[i] { + bcp := bcp.Increments[i][j] + + if bcp.Status != defs.StatusDone { + continue + } + if !version.HasFilelistFile(bcp.PBMVersion) { + continue + } + + if stg == nil { + // in case if it is the first backup made with filelist file + stg, err = getStorageForRead(ctx, conn) + if err != nil { + return errors.Wrap(err, "get storage") + } + } + + for i := range bcp.Replsets { + rs := &bcp.Replsets[i] + + eg.Go(func() error { + filelist, err := getFilelistForReplset(stg, bcp.Name, rs.Name) + if err != nil { + return errors.Wrapf(err, "fetch files for %q [rs: %s] backup", bcp.Name, rs.Name) + } + + rs.Files = filelist + return nil + }) + } + } + } + + return eg.Wait() +} + +func getStorageForRead(ctx context.Context, conn connect.Client) (storage.Storage, error) { + stg, err := util.GetStorage(ctx, conn, nil) + if err != nil { + return nil, errors.Wrap(err, "get storage") + } + ok, err := storage.HasReadAccess(ctx, stg) + if err != nil { + return nil, errors.Wrap(err, "check storage access") + } + if !ok { + return nil, errors.New("no read permission for configured storage") + } + + return stg, nil +} + +func getFilelistForReplset(stg storage.Storage, bcpName, rsName string) (backup.Filelist, error) { + pfFilepath := path.Join(bcpName, rsName, backup.FilelistName) + rdr, err := stg.SourceReader(pfFilepath) + if err != nil { + return nil, errors.Wrapf(err, "open %q", pfFilepath) + } + defer rdr.Close() + + filelist, err := backup.ReadFilelist(rdr) + if err != nil { + return nil, errors.Wrapf(err, "parse filelist %q", pfFilepath) + } + + return filelist, nil +} + +func (c *Client) GetRestoreByName(ctx context.Context, name string) (*RestoreMetadata, error) { + return restore.GetRestoreMeta(ctx, c.conn, name) +} + +func (c *Client) GetRestoreByOpID(ctx context.Context, opid string) (*RestoreMetadata, error) { + return restore.GetRestoreMetaByOPID(ctx, c.conn, opid) +} + +func (c *Client) SyncFromStorage(ctx context.Context) (CommandID, error) { + opid, err := ctrl.SendResync(ctx, c.conn) + return CommandID(opid.String()), err +} + +func (c *Client) DeleteBackupByName(ctx context.Context, name string) (CommandID, error) { + opts := GetBackupByNameOptions{FetchIncrements: true} + bcp, err := c.GetBackupByName(ctx, name, opts) + if err != nil { + return NoOpID, errors.Wrap(err, "get backup meta") + } + if bcp.Type == defs.IncrementalBackup { + err = CanDeleteIncrementalBackup(ctx, c, bcp, bcp.Increments) + } else { + err = CanDeleteBackup(ctx, c, bcp) + } + if err != nil { + return NoOpID, err + } + + opid, err := ctrl.SendDeleteBackupByName(ctx, c.conn, name) + return CommandID(opid.String()), err +} + +func (c *Client) DeleteBackupBefore( + ctx context.Context, + beforeTS Timestamp, + options DeleteBackupBeforeOptions, +) (CommandID, error) { + opid, err := ctrl.SendDeleteBackupBefore(ctx, c.conn, beforeTS, options.Type) + return CommandID(opid.String()), err +} + +func (c *Client) DeleteOplogRange(ctx context.Context, until Timestamp) (CommandID, error) { + opid, err := ctrl.SendDeleteOplogRangeBefore(ctx, c.conn, until) + return CommandID(opid.String()), err +} + +func (c *Client) CleanupReport(ctx context.Context, beforeTS Timestamp) (CleanupReport, error) { + return backup.MakeCleanupInfo(ctx, c.conn, beforeTS) +} + +func (c *Client) RunCleanup(ctx context.Context, beforeTS Timestamp) (CommandID, error) { + opid, err := ctrl.SendCleanup(ctx, c.conn, beforeTS) + return CommandID(opid.String()), err +} + +func (c *Client) CancelBackup(ctx context.Context) (CommandID, error) { + opid, err := ctrl.SendCancelBackup(ctx, c.conn) + return CommandID(opid.String()), err +} + +func (c *Client) RunLogicalBackup(ctx context.Context, options LogicalBackupOptions) (CommandID, error) { + return NoOpID, ErrNotImplemented +} + +func (c *Client) RunPhysicalBackup(ctx context.Context, options PhysicalBackupOptions) (CommandID, error) { + return NoOpID, ErrNotImplemented +} + +func (c *Client) RunIncrementalBackup(ctx context.Context, options IncrementalBackupOptions) (CommandID, error) { + return NoOpID, ErrNotImplemented +} + +func (c *Client) Restore(ctx context.Context, backupName string, clusterTS Timestamp) (CommandID, error) { + return NoOpID, ErrNotImplemented +} + +type lockImpl struct { + lock.LockData +} + +func (l lockImpl) Type() string { + return string(l.LockData.Type) +} + +func (l lockImpl) CommandID() string { + return l.OPID +} + +func (l lockImpl) Heartbeat() Timestamp { + return l.LockData.Heartbeat +} + +var ErrStaleHearbeat = errors.New("stale heartbeat") + +func (c *Client) OpLocks(ctx context.Context) ([]OpLock, error) { + locks, err := lock.GetLocks(ctx, c.conn, &lock.LockHeader{}) + if err != nil { + return nil, errors.Wrap(err, "get locks") + } + if len(locks) == 0 { + // no current op + return nil, nil + } + + clusterTime, err := ClusterTime(ctx, c) + if err != nil { + return nil, errors.Wrap(err, "get cluster time") + } + + rv := make([]OpLock, len(locks)) + for i := range locks { + rv[i].OpID = CommandID(locks[i].OPID) + rv[i].Cmd = locks[i].Type + rv[i].Replset = locks[i].Replset + rv[i].Node = locks[i].Node + rv[i].Heartbeat = locks[i].Heartbeat + + if rv[i].Heartbeat.T+defs.StaleFrameSec < clusterTime.T { + rv[i].err = ErrStaleHearbeat + } + } + return rv, nil +} + +// waitOp waits until operations which acquires a given lock are finished +func waitOp(ctx context.Context, conn connect.Client, lck *lock.LockHeader) error { + tick := time.NewTicker(time.Second) + defer tick.Stop() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-tick.C: + lock, err := lock.GetLockData(ctx, conn, lck) + if err != nil { + if errors.Is(err, mongo.ErrNoDocuments) { + // No lock, so operation has finished + return nil + } + + return errors.Wrap(err, "get lock data") + } + + clusterTime, err := topo.GetClusterTime(ctx, conn) + if err != nil { + return errors.Wrap(err, "read cluster time") + } + + if clusterTime.T-lock.Heartbeat.T >= defs.StaleFrameSec { + return errors.Errorf("operation stale, last beat ts: %d", lock.Heartbeat.T) + } + } + } +} + +func lastLogErr( + ctx context.Context, + conn connect.Client, + op ctrl.Command, + after int64, +) (string, error) { + r := &log.LogRequest{ + LogKeys: log.LogKeys{ + Severity: log.Error, + Event: string(op), + }, + TimeMin: time.Unix(after, 0), + } + + outC, errC := log.Follow(ctx, conn, r, false) + + for { + select { + case entry := <-outC: + return entry.Msg, nil + case err := <-errC: + return "", err + } + } +} diff --git a/vendor/github.com/percona/percona-backup-mongodb/v2/sdk.go b/vendor/github.com/percona/percona-backup-mongodb/v2/sdk.go new file mode 100644 index 000000000..018e9d2c8 --- /dev/null +++ b/vendor/github.com/percona/percona-backup-mongodb/v2/sdk.go @@ -0,0 +1,220 @@ +package sdk + +import ( + "context" + + "go.mongodb.org/mongo-driver/bson/primitive" + + "github.com/percona/percona-backup-mongodb/pbm/backup" + "github.com/percona/percona-backup-mongodb/pbm/compress" + "github.com/percona/percona-backup-mongodb/pbm/config" + "github.com/percona/percona-backup-mongodb/pbm/connect" + "github.com/percona/percona-backup-mongodb/pbm/ctrl" + "github.com/percona/percona-backup-mongodb/pbm/defs" + "github.com/percona/percona-backup-mongodb/pbm/errors" + "github.com/percona/percona-backup-mongodb/pbm/lock" + "github.com/percona/percona-backup-mongodb/pbm/log" + "github.com/percona/percona-backup-mongodb/pbm/oplog" + "github.com/percona/percona-backup-mongodb/pbm/restore" +) + +var ( + ErrUnsupported = errors.New("unsupported") + ErrInvalidCommandID = errors.New("invalid command id") + ErrNotFound = errors.New("not found") +) + +type ( + Command = ctrl.Cmd + CommandID string + CommandType = ctrl.Command + Timestamp = primitive.Timestamp +) + +const ( + CmdBackup = ctrl.CmdBackup + CmdRestore = ctrl.CmdRestore + CmdReplay = ctrl.CmdReplay + CmdCancelBackup = ctrl.CmdCancelBackup + CmdResync = ctrl.CmdResync + CmdPITR = ctrl.CmdPITR + CmdDeleteBackup = ctrl.CmdDeleteBackup + CmdDeletePITR = ctrl.CmdDeletePITR + CmdCleanup = ctrl.CmdCleanup +) + +var NoOpID = CommandID(ctrl.NilOPID.String()) + +type BackupType = defs.BackupType + +const ( + LogicalBackup = defs.LogicalBackup + PhysicalBackup = defs.PhysicalBackup + IncrementalBackup = defs.IncrementalBackup + ExternalBackup = defs.ExternalBackup + SelectiveBackup = backup.SelectiveBackup +) + +type ( + CompressionType = compress.CompressionType + CompressionLevel *int +) + +const ( + CompressionTypeNone = compress.CompressionTypeNone + CompressionTypeGZIP = compress.CompressionTypeGZIP + CompressionTypePGZIP = compress.CompressionTypePGZIP + CompressionTypeSNAPPY = compress.CompressionTypeSNAPPY + CompressionTypeLZ4 = compress.CompressionTypeLZ4 + CompressionTypeS2 = compress.CompressionTypeS2 + CompressionTypeZstandard = compress.CompressionTypeZstandard +) + +type ( + Config = config.Config + BackupMetadata = backup.BackupMeta + RestoreMetadata = restore.RestoreMeta + OplogChunk = oplog.OplogChunk + CleanupReport = backup.CleanupInfo +) + +type LogicalBackupOptions struct { + CompressionType CompressionType + CompressionLevel CompressionLevel + Namespaces []string +} + +type PhysicalBackupOptions struct { + CompressionType CompressionType + CompressionLevel CompressionLevel +} + +type IncrementalBackupOptions struct { + NewBase bool + CompressionType CompressionType + CompressionLevel CompressionLevel +} + +type GetBackupByNameOptions struct { + FetchIncrements bool + FetchFilelist bool +} + +type GetAllRestoresOptions struct { + Limit int64 +} + +type DeleteBackupBeforeOptions struct { + Type BackupType +} + +// OpLock represents internal PBM lock. +// +// Some commands can have many locks (one lock per replset). +type OpLock struct { + // OpID is its command id. + OpID CommandID `json:"opid,omitempty"` + // Cmd is the type of command + Cmd CommandType `json:"cmd,omitempty"` + // Replset is name of a replset that acquired the lock. + Replset string `json:"rs,omitempty"` + // Node is `host:port` pair of an agent that acquired the lock. + Node string `json:"node,omitempty"` + // Heartbeat is the last cluster time seen by an agent that acquired the lock. + Heartbeat primitive.Timestamp `json:"hb"` + + err error +} + +func (l *OpLock) Err() error { + return l.err +} + +func NewClient(ctx context.Context, uri string) (*Client, error) { + conn, err := connect.Connect(ctx, uri, "sdk") + if err != nil { + return nil, err + } + + return &Client{conn: conn}, nil +} + +func WaitForCleanup(ctx context.Context, client *Client) error { + lck := &lock.LockHeader{Type: ctrl.CmdCleanup} + return waitOp(ctx, client.conn, lck) +} + +func WaitForDeleteBackup(ctx context.Context, client *Client) error { + lck := &lock.LockHeader{Type: ctrl.CmdDeleteBackup} + return waitOp(ctx, client.conn, lck) +} + +func WaitForDeleteOplogRange(ctx context.Context, client *Client) error { + lck := &lock.LockHeader{Type: ctrl.CmdDeletePITR} + return waitOp(ctx, client.conn, lck) +} + +func WaitForErrorLog(ctx context.Context, client *Client, cmd *Command) (string, error) { + return lastLogErr(ctx, client.conn, cmd.Cmd, cmd.TS) +} + +func WaitForResync(ctx context.Context, c *Client, cid CommandID) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + r := &log.LogRequest{ + LogKeys: log.LogKeys{ + Event: string(ctrl.CmdResync), + OPID: string(cid), + Severity: log.Info, + }, + } + + outC, errC := log.Follow(ctx, c.conn, r, false) + + for { + select { + case entry := <-outC: + if entry != nil && entry.Msg == "succeed" { + return nil + } + case err := <-errC: + return err + } + } +} + +func CanDeleteBackup(ctx context.Context, client *Client, bcp *BackupMetadata) error { + return backup.CanDeleteBackup(ctx, client.conn, bcp) +} + +func CanDeleteIncrementalBackup( + ctx context.Context, + client *Client, + bcp *BackupMetadata, + increments [][]*BackupMetadata, +) error { + return backup.CanDeleteIncrementalChain(ctx, client.conn, bcp, increments) +} + +func ListDeleteBackupBefore( + ctx context.Context, + client *Client, + ts primitive.Timestamp, + bcpType BackupType, +) ([]BackupMetadata, error) { + return backup.ListDeleteBackupBefore(ctx, client.conn, ts, bcpType) +} + +func ListDeleteChunksBefore( + ctx context.Context, + client *Client, + ts primitive.Timestamp, +) ([]OplogChunk, error) { + r, err := backup.MakeCleanupInfo(ctx, client.conn, ts) + return r.Chunks, err +} + +func ParseDeleteBackupType(s string) (BackupType, error) { + return backup.ParseDeleteBackupType(s) +} diff --git a/vendor/github.com/percona/percona-backup-mongodb/v2/util.go b/vendor/github.com/percona/percona-backup-mongodb/v2/util.go new file mode 100644 index 000000000..cb3f1d61b --- /dev/null +++ b/vendor/github.com/percona/percona-backup-mongodb/v2/util.go @@ -0,0 +1,55 @@ +package sdk + +import ( + "context" + + "go.mongodb.org/mongo-driver/bson/primitive" + + "github.com/percona/percona-backup-mongodb/pbm/backup" + "github.com/percona/percona-backup-mongodb/pbm/defs" + "github.com/percona/percona-backup-mongodb/pbm/errors" + "github.com/percona/percona-backup-mongodb/pbm/topo" +) + +type ( + ReplsetInfo = topo.Shard + AgentStatus = topo.AgentStat +) + +var ( + ErrMissedClusterTime = errors.New("missed cluster time") + ErrInvalidDeleteBackupType = backup.ErrInvalidDeleteBackupType +) + +func IsHeartbeatStale(clusterTime, other Timestamp) bool { + return clusterTime.T >= other.T+defs.StaleFrameSec +} + +func ClusterTime(ctx context.Context, client *Client) (Timestamp, error) { + info, err := topo.GetNodeInfo(ctx, client.conn.MongoClient()) + if err != nil { + return primitive.Timestamp{}, err + } + if info.ClusterTime == nil { + return primitive.Timestamp{}, ErrMissedClusterTime + } + + return info.ClusterTime.ClusterTime, nil +} + +// ClusterMembers returns list of replsets in the cluster. +// +// For sharded cluster: the configsvr (with ID `config`) and all shards. +// For non-sharded cluster: the replset. +func ClusterMembers(ctx context.Context, client *Client) ([]ReplsetInfo, error) { + shards, err := topo.ClusterMembers(ctx, client.conn.MongoClient()) + if err != nil { + return nil, errors.Wrap(err, "topo") + } + return shards, nil +} + +// AgentStatuses returns list of all PBM Agents statuses. +func AgentStatuses(ctx context.Context, client *Client) ([]AgentStatus, error) { + return topo.ListAgents(ctx, client.conn) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 8b532001e..06b605d67 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -240,6 +240,10 @@ github.com/opencontainers/go-digest ## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 +# github.com/percona/percona-backup-mongodb/v2 v2.0.0-00010101000000-000000000000 => ./v2 +## explicit; go 1.22 +github.com/percona/percona-backup-mongodb/v2 +github.com/percona/percona-backup-mongodb/v2/cli # github.com/pierrec/lz4 v2.6.1+incompatible ## explicit github.com/pierrec/lz4 @@ -384,3 +388,4 @@ golang.org/x/text/unicode/norm gopkg.in/yaml.v2 # gotest.tools/v3 v3.5.1 ## explicit; go 1.17 +# github.com/percona/percona-backup-mongodb/v2 => ./v2 From a8530e0d876e2cee6728b83e00df4c337118e351 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 27 Jun 2024 18:13:55 +0200 Subject: [PATCH 088/203] Revert "v2 go.mod" This reverts commit d871b95b4615e28bace7538db2fa5fc25473ea3b. --- go.mod | 3 - v2/go.mod | 44 -- v2/go.sum | 150 ------ .../percona-backup-mongodb/v2/cli/status.go | 185 -------- .../percona/percona-backup-mongodb/v2/impl.go | 430 ------------------ .../percona/percona-backup-mongodb/v2/sdk.go | 220 --------- .../percona/percona-backup-mongodb/v2/util.go | 55 --- vendor/modules.txt | 5 - 8 files changed, 1092 deletions(-) delete mode 100644 v2/go.mod delete mode 100644 v2/go.sum delete mode 100644 vendor/github.com/percona/percona-backup-mongodb/v2/cli/status.go delete mode 100644 vendor/github.com/percona/percona-backup-mongodb/v2/impl.go delete mode 100644 vendor/github.com/percona/percona-backup-mongodb/v2/sdk.go delete mode 100644 vendor/github.com/percona/percona-backup-mongodb/v2/util.go diff --git a/go.mod b/go.mod index 60fcf814e..5e21daa69 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,6 @@ require ( github.com/klauspost/pgzip v1.2.6 github.com/minio/minio-go v6.0.14+incompatible github.com/mongodb/mongo-tools v0.0.0-20231117185435-bf0bef9e9f19 - github.com/percona/percona-backup-mongodb/v2 v2.0.0-00010101000000-000000000000 github.com/pierrec/lz4 v2.6.1+incompatible github.com/pkg/errors v0.9.1 go.mongodb.org/mongo-driver v1.13.0 @@ -66,5 +65,3 @@ require ( golang.org/x/time v0.5.0 // indirect gotest.tools/v3 v3.5.1 // indirect ) - -replace github.com/percona/percona-backup-mongodb/v2 => ./v2 diff --git a/v2/go.mod b/v2/go.mod deleted file mode 100644 index bd876ef91..000000000 --- a/v2/go.mod +++ /dev/null @@ -1,44 +0,0 @@ -module github.com/percona/percona-backup-mongodb/v2 - -go 1.22 - -require ( - github.com/percona/percona-backup-mongodb v1.0.0 - go.mongodb.org/mongo-driver v1.13.0 - golang.org/x/sync v0.6.0 -) - -require ( - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 // indirect - github.com/aws/aws-sdk-go v1.50.31 // indirect - github.com/go-ini/ini v1.67.0 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/jessevdk/go-flags v1.5.0 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/klauspost/compress v1.17.7 // indirect - github.com/klauspost/pgzip v1.2.6 // indirect - github.com/kr/pretty v0.3.1 // indirect - github.com/minio/minio-go v6.0.14+incompatible // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mongodb/mongo-tools v0.0.0-20231117185435-bf0bef9e9f19 // indirect - github.com/montanaflynn/stats v0.6.6 // indirect - github.com/pierrec/lz4 v2.6.1+incompatible // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/xdg-go/pbkdf2 v1.0.0 // indirect - github.com/xdg-go/scram v1.1.2 // indirect - github.com/xdg-go/stringprep v1.0.4 // indirect - github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect - golang.org/x/mod v0.16.0 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/term v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect -) - -replace github.com/percona/percona-backup-mongodb => .. diff --git a/v2/go.sum b/v2/go.sum deleted file mode 100644 index e6e094f3b..000000000 --- a/v2/go.sum +++ /dev/null @@ -1,150 +0,0 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= -github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 h1:fXPMAmuh0gDuRDey0atC8cXBuKIlqCzCkL8sm1n9Ov0= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= -github.com/aws/aws-sdk-go v1.50.31 h1:gx2NRLLEDUmQFC4YUsfMUKkGCwpXVO8ijUecq/nOQGA= -github.com/aws/aws-sdk-go v1.50.31/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= -github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= -github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= -github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= -github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= -github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= -github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/minio/minio-go v6.0.14+incompatible h1:fnV+GD28LeqdN6vT2XdGKW8Qe/IfjJDswNVuni6km9o= -github.com/minio/minio-go v6.0.14+incompatible/go.mod h1:7guKYtitv8dktvNUGrhzmNlA5wrAABTQXCoesZdFQO8= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mongodb/mongo-tools v0.0.0-20231117185435-bf0bef9e9f19 h1:B0nhjnm3za73rABZa3HdMhn9WuOXPPHweBBqhZnWinI= -github.com/mongodb/mongo-tools v0.0.0-20231117185435-bf0bef9e9f19/go.mod h1:2Rl3k3e333g2AJN74N9hx9N4IIhB0IcTU3m92oNsOyE= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/montanaflynn/stats v0.6.6 h1:Duep6KMIDpY4Yo11iFsvyqJDyfzLF9+sndUKT+v64GQ= -github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= -github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= -github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= -github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= -github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= -github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= -github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.mongodb.org/mongo-driver v1.13.0 h1:67DgFFjYOCMWdtTEmKFpV3ffWlFnh+CYZ8ZS/tXWUfY= -go.mongodb.org/mongo-driver v1.13.0/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/percona/percona-backup-mongodb/v2/cli/status.go b/vendor/github.com/percona/percona-backup-mongodb/v2/cli/status.go deleted file mode 100644 index 3b44e79ce..000000000 --- a/vendor/github.com/percona/percona-backup-mongodb/v2/cli/status.go +++ /dev/null @@ -1,185 +0,0 @@ -package cli - -import ( - "context" - "fmt" - "net/url" - "strings" - "sync" - - "go.mongodb.org/mongo-driver/bson/primitive" - "golang.org/x/sync/errgroup" - - "github.com/percona/percona-backup-mongodb/pbm/connect" - "github.com/percona/percona-backup-mongodb/pbm/errors" - "github.com/percona/percona-backup-mongodb/pbm/topo" - sdk "github.com/percona/percona-backup-mongodb/v2" -) - -type LostAgentError struct { - heartbeat primitive.Timestamp -} - -func (e LostAgentError) Error() string { - return fmt.Sprintf("lost agent, last heartbeat: %v", e.heartbeat.T) -} - -type RSRole string - -const ( - RolePrimary RSRole = "P" - RoleSecondary RSRole = "S" - RoleArbiter RSRole = "A" - RoleHidden RSRole = "H" - RoleDelayed RSRole = "D" -) - -type Node struct { - Host string - Ver string - Role RSRole - OK bool - Errs []error -} - -func (n Node) IsAgentLost() bool { - if len(n.Errs) == 0 { - return false - } - - lostErr := LostAgentError{} - for _, err := range n.Errs { - if errors.As(err, &lostErr) { - return true - } - } - - return false -} - -func ClusterStatus( - ctx context.Context, - pbm *sdk.Client, - confGetter RSConfGetter, -) (map[string][]Node, error) { - clusterMembers, err := sdk.ClusterMembers(ctx, pbm) - if err != nil { - return nil, errors.Wrap(err, "get agent statuses") - } - agentStatuses, err := sdk.AgentStatuses(ctx, pbm) - if err != nil { - return nil, errors.Wrap(err, "get cluster members") - } - clusterTime, err := sdk.ClusterTime(ctx, pbm) - if err != nil { - return nil, errors.Wrap(err, "read cluster time") - } - - agentMap := make(map[topo.ReplsetName]map[string]*sdk.AgentStatus, len(clusterMembers)) - for i := range agentStatuses { - agent := &agentStatuses[i] - rs, ok := agentMap[agent.RS] - if !ok { - rs = make(map[string]*topo.AgentStat) - agentMap[agent.RS] = rs - } - - rs[agent.Node] = agent - agentMap[agent.RS] = rs - } - - eg, ctx := errgroup.WithContext(ctx) - m := sync.Mutex{} - - pbmCluster := make(map[string][]Node) - for _, c := range clusterMembers { - eg.Go(func() error { - rsConf, err := confGetter.Get(ctx, c.Host) - if err != nil { - return errors.Wrapf(err, "get replset status for `%s`", c.RS) - } - - nodes := make([]Node, len(rsConf.Members)) - for i, member := range rsConf.Members { - node := &nodes[i] - node.Host = member.Host - - rsAgents := agentMap[c.RS] - if rsAgents == nil { - continue - } - agent := rsAgents[member.Host] - if agent == nil { - continue - } - - node.Ver = "v" + agent.AgentVer - - switch { - case agent.State == 1: // agent.StateStr == "PRIMARY" - node.Role = RolePrimary - case agent.State == 7: // agent.StateStr == "ARBITER" - node.Role = RoleArbiter - case agent.State == 2: // agent.StateStr == "SECONDARY" - if agent.DelaySecs != 0 { - node.Role = RoleDelayed - } else if agent.Hidden { - node.Role = RoleHidden - } else { - node.Role = RoleSecondary - } - default: - // unexpected state. show actual state - node.Role = RSRole(agent.StateStr) - } - - if agent.IsStale(clusterTime) { - node.Errs = []error{LostAgentError{agent.Heartbeat}} - continue - } - - node.OK, node.Errs = agent.OK() - } - - m.Lock() - pbmCluster[c.RS] = nodes - m.Unlock() - return nil - }) - } - - err = eg.Wait() - return pbmCluster, err -} - -type RSConfGetter string - -func (g RSConfGetter) Get(ctx context.Context, host string) (*topo.RSConfig, error) { - rsName, host, ok := strings.Cut(host, "/") - if !ok { - host = rsName - } - - if !strings.HasPrefix(string(g), "mongodb://") { - g = "mongodb://" + g - } - curi, err := url.Parse(string(g)) - if err != nil { - return nil, errors.Wrapf(err, "parse mongo-uri '%s'", g) - } - - // Preserving the `replicaSet` parameter will cause an error - // while connecting to the ConfigServer (mismatched replicaset names) - query := curi.Query() - query.Del("replicaSet") - curi.RawQuery = query.Encode() - curi.Host = host - - conn, err := connect.MongoConnect(ctx, curi.String(), connect.AppName("pbm-sdk")) - if err != nil { - return nil, errors.Wrap(err, "connect") - } - defer conn.Disconnect(context.Background()) - - return topo.GetReplSetConfig(ctx, conn) -} diff --git a/vendor/github.com/percona/percona-backup-mongodb/v2/impl.go b/vendor/github.com/percona/percona-backup-mongodb/v2/impl.go deleted file mode 100644 index 3dd45a072..000000000 --- a/vendor/github.com/percona/percona-backup-mongodb/v2/impl.go +++ /dev/null @@ -1,430 +0,0 @@ -package sdk - -import ( - "context" - "path" - "runtime" - "time" - - "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/mongo" - "golang.org/x/sync/errgroup" - - "github.com/percona/percona-backup-mongodb/pbm/backup" - "github.com/percona/percona-backup-mongodb/pbm/config" - "github.com/percona/percona-backup-mongodb/pbm/connect" - "github.com/percona/percona-backup-mongodb/pbm/ctrl" - "github.com/percona/percona-backup-mongodb/pbm/defs" - "github.com/percona/percona-backup-mongodb/pbm/errors" - "github.com/percona/percona-backup-mongodb/pbm/lock" - "github.com/percona/percona-backup-mongodb/pbm/log" - "github.com/percona/percona-backup-mongodb/pbm/restore" - "github.com/percona/percona-backup-mongodb/pbm/storage" - "github.com/percona/percona-backup-mongodb/pbm/topo" - "github.com/percona/percona-backup-mongodb/pbm/util" - "github.com/percona/percona-backup-mongodb/pbm/version" -) - -var ErrNotImplemented = errors.New("not implemented") - -var ( - ErrBackupInProgress = backup.ErrBackupInProgress - ErrIncrementalBackup = backup.ErrIncrementalBackup - ErrNonIncrementalBackup = backup.ErrNonIncrementalBackup - ErrNotBaseIncrement = backup.ErrNotBaseIncrement - ErrBaseForPITR = backup.ErrBaseForPITR -) - -type Client struct { - conn connect.Client -} - -func (c *Client) Close(ctx context.Context) error { - return c.conn.Disconnect(ctx) -} - -func (c *Client) CommandInfo(ctx context.Context, id CommandID) (*Command, error) { - opid, err := ctrl.ParseOPID(string(id)) - if err != nil { - return nil, ErrInvalidCommandID - } - - res := c.conn.CmdStreamCollection().FindOne(ctx, bson.D{{"_id", opid.Obj()}}) - if err := res.Err(); err != nil { - if errors.Is(err, mongo.ErrNoDocuments) { - return nil, ErrNotFound - } - return nil, errors.Wrap(err, "query") - } - - cmd := &Command{} - if err = res.Decode(&cmd); err != nil { - return nil, errors.Wrap(err, "decode") - } - - cmd.OPID = opid - return cmd, nil -} - -func (c *Client) GetConfig(ctx context.Context) (*Config, error) { - return config.GetConfig(ctx, c.conn) -} - -func (c *Client) SetConfig(ctx context.Context, cfg Config) (CommandID, error) { - return NoOpID, config.SetConfig(ctx, c.conn, &cfg) -} - -func (c *Client) GetAllBackups(ctx context.Context) ([]BackupMetadata, error) { - return backup.BackupsList(ctx, c.conn, 0) -} - -func (c *Client) GetAllRestores( - ctx context.Context, - m connect.Client, - options GetAllRestoresOptions, -) ([]RestoreMetadata, error) { - limit := options.Limit - if limit < 0 { - limit = 0 - } - return restore.RestoreList(ctx, c.conn, limit) -} - -func (c *Client) GetBackupByName( - ctx context.Context, - name string, - options GetBackupByNameOptions, -) (*BackupMetadata, error) { - bcp, err := backup.NewDBManager(c.conn).GetBackupByName(ctx, name) - if err != nil { - return nil, errors.Wrap(err, "get backup meta") - } - - return c.getBackupHelper(ctx, bcp, options) -} - -func (c *Client) GetBackupByOpID( - ctx context.Context, - opid string, - options GetBackupByNameOptions, -) (*BackupMetadata, error) { - bcp, err := backup.NewDBManager(c.conn).GetBackupByOpID(ctx, opid) - if err != nil { - return nil, errors.Wrap(err, "get backup meta") - } - - return c.getBackupHelper(ctx, bcp, options) -} - -func (c *Client) getBackupHelper( - ctx context.Context, - bcp *BackupMetadata, - options GetBackupByNameOptions, -) (*BackupMetadata, error) { - if options.FetchIncrements && bcp.Type == IncrementalBackup { - if bcp.SrcBackup != "" { - return nil, ErrNotBaseIncrement - } - - increments, err := backup.FetchAllIncrements(ctx, c.conn, bcp) - if err != nil { - return nil, errors.New("get increments") - } - if increments == nil { - // use non-nil empty slice to mark fetch. - // nil means it never tried to fetch before - increments = make([][]*backup.BackupMeta, 0) - } - - bcp.Increments = increments - } - - if options.FetchFilelist { - err := fillFilelistForBackup(ctx, c.conn, bcp) - if err != nil { - return nil, errors.Wrap(err, "fetch filelist") - } - } - - return bcp, nil -} - -func fillFilelistForBackup(ctx context.Context, conn connect.Client, bcp *BackupMetadata) error { - var err error - var stg storage.Storage - - eg, _ := errgroup.WithContext(ctx) - eg.SetLimit(runtime.NumCPU()) - - if version.HasFilelistFile(bcp.PBMVersion) { - stg, err = util.GetStorage(ctx, conn, nil) - if err != nil { - return errors.Wrap(err, "get storage") - } - - for i := range bcp.Replsets { - rs := &bcp.Replsets[i] - - eg.Go(func() error { - filelist, err := getFilelistForReplset(stg, bcp.Name, rs.Name) - if err != nil { - return errors.Wrapf(err, "get filelist for %q [rs: %s] backup", bcp.Name, rs.Name) - } - - rs.Files = filelist - return nil - }) - } - } - - for i := range bcp.Increments { - for j := range bcp.Increments[i] { - bcp := bcp.Increments[i][j] - - if bcp.Status != defs.StatusDone { - continue - } - if !version.HasFilelistFile(bcp.PBMVersion) { - continue - } - - if stg == nil { - // in case if it is the first backup made with filelist file - stg, err = getStorageForRead(ctx, conn) - if err != nil { - return errors.Wrap(err, "get storage") - } - } - - for i := range bcp.Replsets { - rs := &bcp.Replsets[i] - - eg.Go(func() error { - filelist, err := getFilelistForReplset(stg, bcp.Name, rs.Name) - if err != nil { - return errors.Wrapf(err, "fetch files for %q [rs: %s] backup", bcp.Name, rs.Name) - } - - rs.Files = filelist - return nil - }) - } - } - } - - return eg.Wait() -} - -func getStorageForRead(ctx context.Context, conn connect.Client) (storage.Storage, error) { - stg, err := util.GetStorage(ctx, conn, nil) - if err != nil { - return nil, errors.Wrap(err, "get storage") - } - ok, err := storage.HasReadAccess(ctx, stg) - if err != nil { - return nil, errors.Wrap(err, "check storage access") - } - if !ok { - return nil, errors.New("no read permission for configured storage") - } - - return stg, nil -} - -func getFilelistForReplset(stg storage.Storage, bcpName, rsName string) (backup.Filelist, error) { - pfFilepath := path.Join(bcpName, rsName, backup.FilelistName) - rdr, err := stg.SourceReader(pfFilepath) - if err != nil { - return nil, errors.Wrapf(err, "open %q", pfFilepath) - } - defer rdr.Close() - - filelist, err := backup.ReadFilelist(rdr) - if err != nil { - return nil, errors.Wrapf(err, "parse filelist %q", pfFilepath) - } - - return filelist, nil -} - -func (c *Client) GetRestoreByName(ctx context.Context, name string) (*RestoreMetadata, error) { - return restore.GetRestoreMeta(ctx, c.conn, name) -} - -func (c *Client) GetRestoreByOpID(ctx context.Context, opid string) (*RestoreMetadata, error) { - return restore.GetRestoreMetaByOPID(ctx, c.conn, opid) -} - -func (c *Client) SyncFromStorage(ctx context.Context) (CommandID, error) { - opid, err := ctrl.SendResync(ctx, c.conn) - return CommandID(opid.String()), err -} - -func (c *Client) DeleteBackupByName(ctx context.Context, name string) (CommandID, error) { - opts := GetBackupByNameOptions{FetchIncrements: true} - bcp, err := c.GetBackupByName(ctx, name, opts) - if err != nil { - return NoOpID, errors.Wrap(err, "get backup meta") - } - if bcp.Type == defs.IncrementalBackup { - err = CanDeleteIncrementalBackup(ctx, c, bcp, bcp.Increments) - } else { - err = CanDeleteBackup(ctx, c, bcp) - } - if err != nil { - return NoOpID, err - } - - opid, err := ctrl.SendDeleteBackupByName(ctx, c.conn, name) - return CommandID(opid.String()), err -} - -func (c *Client) DeleteBackupBefore( - ctx context.Context, - beforeTS Timestamp, - options DeleteBackupBeforeOptions, -) (CommandID, error) { - opid, err := ctrl.SendDeleteBackupBefore(ctx, c.conn, beforeTS, options.Type) - return CommandID(opid.String()), err -} - -func (c *Client) DeleteOplogRange(ctx context.Context, until Timestamp) (CommandID, error) { - opid, err := ctrl.SendDeleteOplogRangeBefore(ctx, c.conn, until) - return CommandID(opid.String()), err -} - -func (c *Client) CleanupReport(ctx context.Context, beforeTS Timestamp) (CleanupReport, error) { - return backup.MakeCleanupInfo(ctx, c.conn, beforeTS) -} - -func (c *Client) RunCleanup(ctx context.Context, beforeTS Timestamp) (CommandID, error) { - opid, err := ctrl.SendCleanup(ctx, c.conn, beforeTS) - return CommandID(opid.String()), err -} - -func (c *Client) CancelBackup(ctx context.Context) (CommandID, error) { - opid, err := ctrl.SendCancelBackup(ctx, c.conn) - return CommandID(opid.String()), err -} - -func (c *Client) RunLogicalBackup(ctx context.Context, options LogicalBackupOptions) (CommandID, error) { - return NoOpID, ErrNotImplemented -} - -func (c *Client) RunPhysicalBackup(ctx context.Context, options PhysicalBackupOptions) (CommandID, error) { - return NoOpID, ErrNotImplemented -} - -func (c *Client) RunIncrementalBackup(ctx context.Context, options IncrementalBackupOptions) (CommandID, error) { - return NoOpID, ErrNotImplemented -} - -func (c *Client) Restore(ctx context.Context, backupName string, clusterTS Timestamp) (CommandID, error) { - return NoOpID, ErrNotImplemented -} - -type lockImpl struct { - lock.LockData -} - -func (l lockImpl) Type() string { - return string(l.LockData.Type) -} - -func (l lockImpl) CommandID() string { - return l.OPID -} - -func (l lockImpl) Heartbeat() Timestamp { - return l.LockData.Heartbeat -} - -var ErrStaleHearbeat = errors.New("stale heartbeat") - -func (c *Client) OpLocks(ctx context.Context) ([]OpLock, error) { - locks, err := lock.GetLocks(ctx, c.conn, &lock.LockHeader{}) - if err != nil { - return nil, errors.Wrap(err, "get locks") - } - if len(locks) == 0 { - // no current op - return nil, nil - } - - clusterTime, err := ClusterTime(ctx, c) - if err != nil { - return nil, errors.Wrap(err, "get cluster time") - } - - rv := make([]OpLock, len(locks)) - for i := range locks { - rv[i].OpID = CommandID(locks[i].OPID) - rv[i].Cmd = locks[i].Type - rv[i].Replset = locks[i].Replset - rv[i].Node = locks[i].Node - rv[i].Heartbeat = locks[i].Heartbeat - - if rv[i].Heartbeat.T+defs.StaleFrameSec < clusterTime.T { - rv[i].err = ErrStaleHearbeat - } - } - return rv, nil -} - -// waitOp waits until operations which acquires a given lock are finished -func waitOp(ctx context.Context, conn connect.Client, lck *lock.LockHeader) error { - tick := time.NewTicker(time.Second) - defer tick.Stop() - - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-tick.C: - lock, err := lock.GetLockData(ctx, conn, lck) - if err != nil { - if errors.Is(err, mongo.ErrNoDocuments) { - // No lock, so operation has finished - return nil - } - - return errors.Wrap(err, "get lock data") - } - - clusterTime, err := topo.GetClusterTime(ctx, conn) - if err != nil { - return errors.Wrap(err, "read cluster time") - } - - if clusterTime.T-lock.Heartbeat.T >= defs.StaleFrameSec { - return errors.Errorf("operation stale, last beat ts: %d", lock.Heartbeat.T) - } - } - } -} - -func lastLogErr( - ctx context.Context, - conn connect.Client, - op ctrl.Command, - after int64, -) (string, error) { - r := &log.LogRequest{ - LogKeys: log.LogKeys{ - Severity: log.Error, - Event: string(op), - }, - TimeMin: time.Unix(after, 0), - } - - outC, errC := log.Follow(ctx, conn, r, false) - - for { - select { - case entry := <-outC: - return entry.Msg, nil - case err := <-errC: - return "", err - } - } -} diff --git a/vendor/github.com/percona/percona-backup-mongodb/v2/sdk.go b/vendor/github.com/percona/percona-backup-mongodb/v2/sdk.go deleted file mode 100644 index 018e9d2c8..000000000 --- a/vendor/github.com/percona/percona-backup-mongodb/v2/sdk.go +++ /dev/null @@ -1,220 +0,0 @@ -package sdk - -import ( - "context" - - "go.mongodb.org/mongo-driver/bson/primitive" - - "github.com/percona/percona-backup-mongodb/pbm/backup" - "github.com/percona/percona-backup-mongodb/pbm/compress" - "github.com/percona/percona-backup-mongodb/pbm/config" - "github.com/percona/percona-backup-mongodb/pbm/connect" - "github.com/percona/percona-backup-mongodb/pbm/ctrl" - "github.com/percona/percona-backup-mongodb/pbm/defs" - "github.com/percona/percona-backup-mongodb/pbm/errors" - "github.com/percona/percona-backup-mongodb/pbm/lock" - "github.com/percona/percona-backup-mongodb/pbm/log" - "github.com/percona/percona-backup-mongodb/pbm/oplog" - "github.com/percona/percona-backup-mongodb/pbm/restore" -) - -var ( - ErrUnsupported = errors.New("unsupported") - ErrInvalidCommandID = errors.New("invalid command id") - ErrNotFound = errors.New("not found") -) - -type ( - Command = ctrl.Cmd - CommandID string - CommandType = ctrl.Command - Timestamp = primitive.Timestamp -) - -const ( - CmdBackup = ctrl.CmdBackup - CmdRestore = ctrl.CmdRestore - CmdReplay = ctrl.CmdReplay - CmdCancelBackup = ctrl.CmdCancelBackup - CmdResync = ctrl.CmdResync - CmdPITR = ctrl.CmdPITR - CmdDeleteBackup = ctrl.CmdDeleteBackup - CmdDeletePITR = ctrl.CmdDeletePITR - CmdCleanup = ctrl.CmdCleanup -) - -var NoOpID = CommandID(ctrl.NilOPID.String()) - -type BackupType = defs.BackupType - -const ( - LogicalBackup = defs.LogicalBackup - PhysicalBackup = defs.PhysicalBackup - IncrementalBackup = defs.IncrementalBackup - ExternalBackup = defs.ExternalBackup - SelectiveBackup = backup.SelectiveBackup -) - -type ( - CompressionType = compress.CompressionType - CompressionLevel *int -) - -const ( - CompressionTypeNone = compress.CompressionTypeNone - CompressionTypeGZIP = compress.CompressionTypeGZIP - CompressionTypePGZIP = compress.CompressionTypePGZIP - CompressionTypeSNAPPY = compress.CompressionTypeSNAPPY - CompressionTypeLZ4 = compress.CompressionTypeLZ4 - CompressionTypeS2 = compress.CompressionTypeS2 - CompressionTypeZstandard = compress.CompressionTypeZstandard -) - -type ( - Config = config.Config - BackupMetadata = backup.BackupMeta - RestoreMetadata = restore.RestoreMeta - OplogChunk = oplog.OplogChunk - CleanupReport = backup.CleanupInfo -) - -type LogicalBackupOptions struct { - CompressionType CompressionType - CompressionLevel CompressionLevel - Namespaces []string -} - -type PhysicalBackupOptions struct { - CompressionType CompressionType - CompressionLevel CompressionLevel -} - -type IncrementalBackupOptions struct { - NewBase bool - CompressionType CompressionType - CompressionLevel CompressionLevel -} - -type GetBackupByNameOptions struct { - FetchIncrements bool - FetchFilelist bool -} - -type GetAllRestoresOptions struct { - Limit int64 -} - -type DeleteBackupBeforeOptions struct { - Type BackupType -} - -// OpLock represents internal PBM lock. -// -// Some commands can have many locks (one lock per replset). -type OpLock struct { - // OpID is its command id. - OpID CommandID `json:"opid,omitempty"` - // Cmd is the type of command - Cmd CommandType `json:"cmd,omitempty"` - // Replset is name of a replset that acquired the lock. - Replset string `json:"rs,omitempty"` - // Node is `host:port` pair of an agent that acquired the lock. - Node string `json:"node,omitempty"` - // Heartbeat is the last cluster time seen by an agent that acquired the lock. - Heartbeat primitive.Timestamp `json:"hb"` - - err error -} - -func (l *OpLock) Err() error { - return l.err -} - -func NewClient(ctx context.Context, uri string) (*Client, error) { - conn, err := connect.Connect(ctx, uri, "sdk") - if err != nil { - return nil, err - } - - return &Client{conn: conn}, nil -} - -func WaitForCleanup(ctx context.Context, client *Client) error { - lck := &lock.LockHeader{Type: ctrl.CmdCleanup} - return waitOp(ctx, client.conn, lck) -} - -func WaitForDeleteBackup(ctx context.Context, client *Client) error { - lck := &lock.LockHeader{Type: ctrl.CmdDeleteBackup} - return waitOp(ctx, client.conn, lck) -} - -func WaitForDeleteOplogRange(ctx context.Context, client *Client) error { - lck := &lock.LockHeader{Type: ctrl.CmdDeletePITR} - return waitOp(ctx, client.conn, lck) -} - -func WaitForErrorLog(ctx context.Context, client *Client, cmd *Command) (string, error) { - return lastLogErr(ctx, client.conn, cmd.Cmd, cmd.TS) -} - -func WaitForResync(ctx context.Context, c *Client, cid CommandID) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - r := &log.LogRequest{ - LogKeys: log.LogKeys{ - Event: string(ctrl.CmdResync), - OPID: string(cid), - Severity: log.Info, - }, - } - - outC, errC := log.Follow(ctx, c.conn, r, false) - - for { - select { - case entry := <-outC: - if entry != nil && entry.Msg == "succeed" { - return nil - } - case err := <-errC: - return err - } - } -} - -func CanDeleteBackup(ctx context.Context, client *Client, bcp *BackupMetadata) error { - return backup.CanDeleteBackup(ctx, client.conn, bcp) -} - -func CanDeleteIncrementalBackup( - ctx context.Context, - client *Client, - bcp *BackupMetadata, - increments [][]*BackupMetadata, -) error { - return backup.CanDeleteIncrementalChain(ctx, client.conn, bcp, increments) -} - -func ListDeleteBackupBefore( - ctx context.Context, - client *Client, - ts primitive.Timestamp, - bcpType BackupType, -) ([]BackupMetadata, error) { - return backup.ListDeleteBackupBefore(ctx, client.conn, ts, bcpType) -} - -func ListDeleteChunksBefore( - ctx context.Context, - client *Client, - ts primitive.Timestamp, -) ([]OplogChunk, error) { - r, err := backup.MakeCleanupInfo(ctx, client.conn, ts) - return r.Chunks, err -} - -func ParseDeleteBackupType(s string) (BackupType, error) { - return backup.ParseDeleteBackupType(s) -} diff --git a/vendor/github.com/percona/percona-backup-mongodb/v2/util.go b/vendor/github.com/percona/percona-backup-mongodb/v2/util.go deleted file mode 100644 index cb3f1d61b..000000000 --- a/vendor/github.com/percona/percona-backup-mongodb/v2/util.go +++ /dev/null @@ -1,55 +0,0 @@ -package sdk - -import ( - "context" - - "go.mongodb.org/mongo-driver/bson/primitive" - - "github.com/percona/percona-backup-mongodb/pbm/backup" - "github.com/percona/percona-backup-mongodb/pbm/defs" - "github.com/percona/percona-backup-mongodb/pbm/errors" - "github.com/percona/percona-backup-mongodb/pbm/topo" -) - -type ( - ReplsetInfo = topo.Shard - AgentStatus = topo.AgentStat -) - -var ( - ErrMissedClusterTime = errors.New("missed cluster time") - ErrInvalidDeleteBackupType = backup.ErrInvalidDeleteBackupType -) - -func IsHeartbeatStale(clusterTime, other Timestamp) bool { - return clusterTime.T >= other.T+defs.StaleFrameSec -} - -func ClusterTime(ctx context.Context, client *Client) (Timestamp, error) { - info, err := topo.GetNodeInfo(ctx, client.conn.MongoClient()) - if err != nil { - return primitive.Timestamp{}, err - } - if info.ClusterTime == nil { - return primitive.Timestamp{}, ErrMissedClusterTime - } - - return info.ClusterTime.ClusterTime, nil -} - -// ClusterMembers returns list of replsets in the cluster. -// -// For sharded cluster: the configsvr (with ID `config`) and all shards. -// For non-sharded cluster: the replset. -func ClusterMembers(ctx context.Context, client *Client) ([]ReplsetInfo, error) { - shards, err := topo.ClusterMembers(ctx, client.conn.MongoClient()) - if err != nil { - return nil, errors.Wrap(err, "topo") - } - return shards, nil -} - -// AgentStatuses returns list of all PBM Agents statuses. -func AgentStatuses(ctx context.Context, client *Client) ([]AgentStatus, error) { - return topo.ListAgents(ctx, client.conn) -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 06b605d67..8b532001e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -240,10 +240,6 @@ github.com/opencontainers/go-digest ## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/percona/percona-backup-mongodb/v2 v2.0.0-00010101000000-000000000000 => ./v2 -## explicit; go 1.22 -github.com/percona/percona-backup-mongodb/v2 -github.com/percona/percona-backup-mongodb/v2/cli # github.com/pierrec/lz4 v2.6.1+incompatible ## explicit github.com/pierrec/lz4 @@ -388,4 +384,3 @@ golang.org/x/text/unicode/norm gopkg.in/yaml.v2 # gotest.tools/v3 v3.5.1 ## explicit; go 1.17 -# github.com/percona/percona-backup-mongodb/v2 => ./v2 From 56734ad785cfb16f92929b61211af507fed9a0db Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 27 Jun 2024 18:16:39 +0200 Subject: [PATCH 089/203] revert "sdk => v2" --- cmd/pbm/backup.go | 2 +- cmd/pbm/config.go | 2 +- cmd/pbm/delete.go | 2 +- cmd/pbm/list.go | 2 +- cmd/pbm/main.go | 2 +- cmd/pbm/status.go | 4 ++-- {v2 => sdk}/cli/status.go | 2 +- {v2 => sdk}/impl.go | 0 {v2 => sdk}/sdk.go | 0 {v2 => sdk}/util.go | 0 10 files changed, 8 insertions(+), 8 deletions(-) rename {v2 => sdk}/cli/status.go (98%) rename {v2 => sdk}/impl.go (100%) rename {v2 => sdk}/sdk.go (100%) rename {v2 => sdk}/util.go (100%) diff --git a/cmd/pbm/backup.go b/cmd/pbm/backup.go index 5704b4fac..581b2a42a 100644 --- a/cmd/pbm/backup.go +++ b/cmd/pbm/backup.go @@ -24,7 +24,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/topo" "github.com/percona/percona-backup-mongodb/pbm/util" "github.com/percona/percona-backup-mongodb/pbm/version" - sdk "github.com/percona/percona-backup-mongodb/v2" + "github.com/percona/percona-backup-mongodb/sdk" ) type backupOpts struct { diff --git a/cmd/pbm/config.go b/cmd/pbm/config.go index 10f66bca9..c323c7057 100644 --- a/cmd/pbm/config.go +++ b/cmd/pbm/config.go @@ -15,7 +15,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/errors" - sdk "github.com/percona/percona-backup-mongodb/v2" + "github.com/percona/percona-backup-mongodb/sdk" ) const resyncWaitDuration = 30 * time.Second diff --git a/cmd/pbm/delete.go b/cmd/pbm/delete.go index 459e60fc7..da6920bd0 100644 --- a/cmd/pbm/delete.go +++ b/cmd/pbm/delete.go @@ -17,7 +17,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/oplog" - sdk "github.com/percona/percona-backup-mongodb/v2" + "github.com/percona/percona-backup-mongodb/sdk" ) type deleteBcpOpts struct { diff --git a/cmd/pbm/list.go b/cmd/pbm/list.go index 8ba6a228c..037866403 100644 --- a/cmd/pbm/list.go +++ b/cmd/pbm/list.go @@ -19,7 +19,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/topo" "github.com/percona/percona-backup-mongodb/pbm/util" "github.com/percona/percona-backup-mongodb/pbm/version" - sdk "github.com/percona/percona-backup-mongodb/v2" + "github.com/percona/percona-backup-mongodb/sdk" ) type listOpts struct { diff --git a/cmd/pbm/main.go b/cmd/pbm/main.go index c4c560ff2..ef12e1ede 100644 --- a/cmd/pbm/main.go +++ b/cmd/pbm/main.go @@ -21,7 +21,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/oplog" "github.com/percona/percona-backup-mongodb/pbm/topo" "github.com/percona/percona-backup-mongodb/pbm/version" - sdk "github.com/percona/percona-backup-mongodb/v2" + "github.com/percona/percona-backup-mongodb/sdk" ) const ( diff --git a/cmd/pbm/status.go b/cmd/pbm/status.go index 78db4f021..b279bc390 100644 --- a/cmd/pbm/status.go +++ b/cmd/pbm/status.go @@ -25,8 +25,8 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/topo" "github.com/percona/percona-backup-mongodb/pbm/util" "github.com/percona/percona-backup-mongodb/pbm/version" - sdk "github.com/percona/percona-backup-mongodb/v2" - "github.com/percona/percona-backup-mongodb/v2/cli" + "github.com/percona/percona-backup-mongodb/sdk" + "github.com/percona/percona-backup-mongodb/sdk/cli" ) type statusOptions struct { diff --git a/v2/cli/status.go b/sdk/cli/status.go similarity index 98% rename from v2/cli/status.go rename to sdk/cli/status.go index 3b44e79ce..e073f788b 100644 --- a/v2/cli/status.go +++ b/sdk/cli/status.go @@ -13,7 +13,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/topo" - sdk "github.com/percona/percona-backup-mongodb/v2" + "github.com/percona/percona-backup-mongodb/sdk" ) type LostAgentError struct { diff --git a/v2/impl.go b/sdk/impl.go similarity index 100% rename from v2/impl.go rename to sdk/impl.go diff --git a/v2/sdk.go b/sdk/sdk.go similarity index 100% rename from v2/sdk.go rename to sdk/sdk.go diff --git a/v2/util.go b/sdk/util.go similarity index 100% rename from v2/util.go rename to sdk/util.go From c4fcd7c6228b0687917af096b0cde00fc258d419 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 27 Jun 2024 20:13:20 +0200 Subject: [PATCH 090/203] fix defs.NodeState enum --- pbm/defs/defs.go | 2 ++ pbm/topo/status.go | 14 +++++++++----- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/pbm/defs/defs.go b/pbm/defs/defs.go index 575d26098..02d48358b 100644 --- a/pbm/defs/defs.go +++ b/pbm/defs/defs.go @@ -57,11 +57,13 @@ const ( type NodeState int +// https://github.com/mongodb/mongo/blob/v8.0/src/mongo/db/repl/member_state.h#L52-L109 const ( NodeStateStartup NodeState = iota NodeStatePrimary NodeStateSecondary NodeStateRecovering + _NodeStateRSFatal // mongo::repl::MemberState::MS::OBSOLETE_RS_FATAL NodeStateStartup2 NodeStateUnknown NodeStateArbiter diff --git a/pbm/topo/status.go b/pbm/topo/status.go index a6f89a195..6ebac8ead 100644 --- a/pbm/topo/status.go +++ b/pbm/topo/status.go @@ -13,11 +13,15 @@ import ( ) type NodeStatus struct { - ID int `bson:"_id" json:"_id"` - Name string `bson:"name" json:"name"` - Health defs.NodeHealth `bson:"health" json:"health"` - State defs.NodeState `bson:"state" json:"state"` - StateStr string `bson:"stateStr" json:"stateStr"` + ID int `bson:"_id" json:"_id"` + Name string `bson:"name" json:"name"` + Health defs.NodeHealth `bson:"health" json:"health"` + + // https://github.com/mongodb/mongo/blob/v8.0/src/mongo/db/repl/member_state.h#L52-L109 + State defs.NodeState `bson:"state" json:"state"` + // https://github.com/mongodb/mongo/blob/v8.0/src/mongo/db/repl/member_state.h#L170-L193 + StateStr string `bson:"stateStr" json:"stateStr"` + Uptime int64 `bson:"uptime" json:"uptime"` Optime *OpTime `bson:"optime" json:"optime"` OptimeDate time.Time `bson:"optimeDate" json:"optimeDate"` From de05cbe71c1c950ab0c22ee822a1b4ffe953ca87 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 27 Jun 2024 20:13:58 +0200 Subject: [PATCH 091/203] fix arbiter node status in sharded cluster --- cmd/pbm-agent/agent.go | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 325c47274..6aa89f3d1 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -77,7 +77,7 @@ func newAgent(ctx context.Context, leadConn connect.Client, uri string, dumpConn } func (a *Agent) CanStart(ctx context.Context) error { - info, err := topo.GetNodeInfoExt(ctx, a.nodeConn) + info, err := topo.GetNodeInfo(ctx, a.nodeConn) if err != nil { return errors.Wrap(err, "get node info") } @@ -325,18 +325,6 @@ func (a *Agent) HbStatus(ctx context.Context) { } hb.Err = "" - - hb.State = defs.NodeStateUnknown - hb.StateStr = "unknown" - n, err := topo.GetNodeStatus(ctx, a.nodeConn, a.brief.Me) - if err != nil { - l.Error("get replSetGetStatus: %v", err) - hb.Err += fmt.Sprintf("get replSetGetStatus: %v", err) - } else { - hb.State = n.State - hb.StateStr = n.StateStr - } - hb.Hidden = false hb.Passive = false @@ -355,6 +343,22 @@ func (a *Agent) HbStatus(ctx context.Context) { } } + if inf.ArbiterOnly { + hb.State = defs.NodeStateArbiter + hb.StateStr = "ARBITER" + } else { + n, err := topo.GetNodeStatus(ctx, a.nodeConn, a.brief.Me) + if err != nil { + l.Error("get replSetGetStatus: %v", err) + hb.Err += fmt.Sprintf("get replSetGetStatus: %v", err) + hb.State = defs.NodeStateUnknown + hb.StateStr = "UNKNOWN" + } else { + hb.State = n.State + hb.StateStr = n.StateStr + } + } + err = topo.SetAgentStatus(ctx, a.leadConn, hb) if err != nil { l.Error("set status: %v", err) From 7af6987b5bdaebe6e22ff4574bd556ad8fbffebb Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 27 Jun 2024 20:20:36 +0200 Subject: [PATCH 092/203] lint edits --- cmd/pbm/delete.go | 6 +++--- cmd/pbm/list.go | 2 +- e2e-tests/pkg/pbm/pbm_ctl.go | 12 ++++++------ .../tests/sharded/test_backup_cancellation.go | 3 ++- sdk/cli/status.go | 2 +- sdk/impl.go | 16 ---------------- 6 files changed, 13 insertions(+), 28 deletions(-) diff --git a/cmd/pbm/delete.go b/cmd/pbm/delete.go index da6920bd0..f7048a1c4 100644 --- a/cmd/pbm/delete.go +++ b/cmd/pbm/delete.go @@ -59,7 +59,7 @@ func deleteBackup( } if d.dryRun { - return nil, nil + return nil, nil //nolint:nilnil } return waitForDelete(ctx, conn, pbm, cid) @@ -198,7 +198,7 @@ func deletePITR( printDeleteInfoTo(os.Stdout, nil, chunks) if d.dryRun { - return nil, nil + return nil, nil //nolint:nilnil } if !d.yes { q := "Are you sure you want to delete chunks?" @@ -254,7 +254,7 @@ func doCleanup(ctx context.Context, conn connect.Client, pbm *sdk.Client, d *cle printDeleteInfoTo(os.Stdout, info.Backups, info.Chunks) if d.dryRun { - return nil, nil + return nil, nil //nolint:nilnil } if !d.yes { if err := askConfirmation("Are you sure you want to delete?"); err != nil { diff --git a/cmd/pbm/list.go b/cmd/pbm/list.go index 037866403..137173748 100644 --- a/cmd/pbm/list.go +++ b/cmd/pbm/list.go @@ -122,7 +122,7 @@ func findLock(ctx context.Context, pbm *sdk.Client) (*sdk.OpLock, error) { return nil, errors.Wrap(err, "get locks") } if len(locks) == 0 { - return nil, nil + return nil, nil //nolint:nilnil } var lck *sdk.OpLock diff --git a/e2e-tests/pkg/pbm/pbm_ctl.go b/e2e-tests/pkg/pbm/pbm_ctl.go index 6798721e1..94e3e0407 100644 --- a/e2e-tests/pkg/pbm/pbm_ctl.go +++ b/e2e-tests/pkg/pbm/pbm_ctl.go @@ -150,12 +150,12 @@ func skipCtl(str string) []byte { func (c *Ctl) CheckRestore(bcpName string, waitFor time.Duration) error { type rlist struct { - Start int - Status defs.Status - Type string - Name string - Snapshot string - Error string + Start int `json:"Start"` + Status defs.Status `json:"Status"` + Type string `json:"Type"` + Name string `json:"Name"` + Snapshot string `json:"Snapshot"` + Error string `json:"Error"` } tmr := time.NewTimer(waitFor) tkr := time.NewTicker(500 * time.Millisecond) diff --git a/e2e-tests/pkg/tests/sharded/test_backup_cancellation.go b/e2e-tests/pkg/tests/sharded/test_backup_cancellation.go index 16539dde4..cfce7c72d 100644 --- a/e2e-tests/pkg/tests/sharded/test_backup_cancellation.go +++ b/e2e-tests/pkg/tests/sharded/test_backup_cancellation.go @@ -48,7 +48,8 @@ func (c *Cluster) BackupCancellation(storage string) { } } -func checkNoBackupFiles(backupName, conf string) { +// checkNoBackupFiles +func _(backupName, conf string) { log.Println("check no artifacts left for backup", backupName) buf, err := os.ReadFile(conf) if err != nil { diff --git a/sdk/cli/status.go b/sdk/cli/status.go index e073f788b..04ce4b62b 100644 --- a/sdk/cli/status.go +++ b/sdk/cli/status.go @@ -179,7 +179,7 @@ func (g RSConfGetter) Get(ctx context.Context, host string) (*topo.RSConfig, err if err != nil { return nil, errors.Wrap(err, "connect") } - defer conn.Disconnect(context.Background()) + defer func() { _ = conn.Disconnect(context.Background()) }() return topo.GetReplSetConfig(ctx, conn) } diff --git a/sdk/impl.go b/sdk/impl.go index 3dd45a072..a2ec9ef88 100644 --- a/sdk/impl.go +++ b/sdk/impl.go @@ -323,22 +323,6 @@ func (c *Client) Restore(ctx context.Context, backupName string, clusterTS Times return NoOpID, ErrNotImplemented } -type lockImpl struct { - lock.LockData -} - -func (l lockImpl) Type() string { - return string(l.LockData.Type) -} - -func (l lockImpl) CommandID() string { - return l.OPID -} - -func (l lockImpl) Heartbeat() Timestamp { - return l.LockData.Heartbeat -} - var ErrStaleHearbeat = errors.New("stale heartbeat") func (c *Client) OpLocks(ctx context.Context) ([]OpLock, error) { From f47794606ee306bb8c2018bade32516a6a982a88 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 27 Jun 2024 20:25:43 +0200 Subject: [PATCH 093/203] Fix old PITR main logic Remove prioritization based on healthy nodes. Add error handling for failing ACK. --- cmd/pbm-agent/pitr.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index e76334850..3158842e3 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -280,6 +280,9 @@ func (a *Agent) pitr(ctx context.Context) error { return nil } err = oplog.SetPITRNomineeACK(ctx, a.leadConn, a.brief.SetName, a.brief.Me) + if err != nil { + l.Error("set nominee ack: %v", err) + } stg, err := util.StorageFromConfig(cfg.Storage, l) if err != nil { @@ -336,13 +339,6 @@ func (a *Agent) pitr(ctx context.Context) error { if err := lck.Release(); err != nil { l.Error("release lock: %v", err) } - - // Penalty to the failed node so healthy nodes would have priority on next try. - // But lock has to be released first. Otherwise, healthy nodes would wait for the lock release - // and the penalty won't have any sense. - if streamErr != nil { - time.Sleep(pitrCheckPeriod * 2) - } }() return nil From 1dd75275fac4c4f5b012063d6b535496991d4391 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Fri, 28 Jun 2024 09:47:58 +0200 Subject: [PATCH 094/203] fix: panic on arbiter node during backup and oplog slicing --- pbm/topo/status.go | 4 ++++ pbm/topo/topo.go | 3 +++ 2 files changed, 7 insertions(+) diff --git a/pbm/topo/status.go b/pbm/topo/status.go index 6ebac8ead..4cad00a51 100644 --- a/pbm/topo/status.go +++ b/pbm/topo/status.go @@ -38,6 +38,10 @@ type NodeStatus struct { SyncingTo string `bson:"syncingTo,omitempty" json:"syncingTo,omitempty"` } +func (s *NodeStatus) IsArbiter() bool { + return s.State == defs.NodeStateArbiter +} + type StatusOpTimes struct { LastCommittedOpTime *OpTime `bson:"lastCommittedOpTime" json:"lastCommittedOpTime"` ReadConcernMajorityOpTime *OpTime `bson:"readConcernMajorityOpTime" json:"readConcernMajorityOpTime"` diff --git a/pbm/topo/topo.go b/pbm/topo/topo.go index a4b02bed5..2da4ae5b4 100644 --- a/pbm/topo/topo.go +++ b/pbm/topo/topo.go @@ -156,6 +156,9 @@ func NodeSuits(ctx context.Context, m *mongo.Client, inf *NodeInfo) (bool, error if err != nil { return false, errors.Wrap(err, "get node status") } + if status.IsArbiter() { + return false, nil + } replLag, err := ReplicationLag(ctx, m, inf.Me) if err != nil { From c5ca1349988a985d5c9809387ee5b9936022649f Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Fri, 28 Jun 2024 10:36:24 +0200 Subject: [PATCH 095/203] do not run slicer on arbiter and delayed nodes --- cmd/pbm-agent/agent.go | 11 +++++++++++ cmd/pbm-agent/main.go | 11 +++++++++-- pbm/topo/node.go | 4 ++++ 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 6aa89f3d1..1b086df6b 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -76,6 +76,11 @@ func newAgent(ctx context.Context, leadConn connect.Client, uri string, dumpConn return a, nil } +var ( + ErrArbiterNode = errors.New("arbiter") + ErrDelayedNode = errors.New("delayed") +) + func (a *Agent) CanStart(ctx context.Context) error { info, err := topo.GetNodeInfo(ctx, a.nodeConn) if err != nil { @@ -85,6 +90,12 @@ func (a *Agent) CanStart(ctx context.Context) error { if info.Msg == "isdbgrid" { return errors.New("mongos is not supported") } + if info.ArbiterOnly { + return ErrArbiterNode + } + if info.IsDelayed() { + return ErrDelayedNode + } ver, err := version.GetMongoVersion(ctx, a.leadConn.MongoClient()) if err != nil { diff --git a/cmd/pbm-agent/main.go b/cmd/pbm-agent/main.go index ec5485948..65758ffb0 100644 --- a/cmd/pbm-agent/main.go +++ b/cmd/pbm-agent/main.go @@ -101,8 +101,13 @@ func runAgent(mongoURI string, dumpConns int) error { ctx = log.SetLoggerToContext(ctx, logger) + canRunSlicer := true if err := agent.CanStart(ctx); err != nil { - return errors.Wrap(err, "pre-start check") + if errors.Is(err, ErrArbiterNode) || errors.Is(err, ErrDelayedNode) { + canRunSlicer = false + } else { + return errors.Wrap(err, "pre-start check") + } } err = setupNewDB(ctx, agent.leadConn) @@ -110,7 +115,9 @@ func runAgent(mongoURI string, dumpConns int) error { return errors.Wrap(err, "setup pbm collections") } - go agent.PITR(ctx) + if canRunSlicer { + go agent.PITR(ctx) + } go agent.HbStatus(ctx) return errors.Wrap(agent.Start(ctx), "listen the commands stream") diff --git a/pbm/topo/node.go b/pbm/topo/node.go index 5821dcb22..04c0b472b 100644 --- a/pbm/topo/node.go +++ b/pbm/topo/node.go @@ -83,6 +83,10 @@ type NodeInfo struct { Opts MongodOpts `bson:"-"` } +func (i *NodeInfo) IsDelayed() bool { + return i.SecondaryDelayOld != 0 || i.SecondaryDelaySecs != 0 +} + // IsSharded returns true is replset is part sharded cluster func (i *NodeInfo) IsMongos() bool { return i.Msg == "isdbgrid" From 77367928c112d96770e3eba12fc3cef303cc576b Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Fri, 28 Jun 2024 10:36:46 +0200 Subject: [PATCH 096/203] do not run backup and restore on arbiter node --- cmd/pbm-agent/backup.go | 4 ++++ cmd/pbm-agent/oplog.go | 4 ++++ cmd/pbm-agent/restore.go | 4 ++++ 3 files changed, 12 insertions(+) diff --git a/cmd/pbm-agent/backup.go b/cmd/pbm-agent/backup.go index b93c714ed..994dbda92 100644 --- a/cmd/pbm-agent/backup.go +++ b/cmd/pbm-agent/backup.go @@ -66,6 +66,10 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, "to make it compatible with PBM's backup method using the oplog") return } + if nodeInfo.ArbiterOnly { + l.Debug("arbiter node. skip") + return + } isClusterLeader := nodeInfo.IsClusterLeader() canRunBackup, err := topo.NodeSuitsExt(ctx, a.nodeConn, nodeInfo, cmd.Type) diff --git a/cmd/pbm-agent/oplog.go b/cmd/pbm-agent/oplog.go index cd2286d14..cb1bb1d03 100644 --- a/cmd/pbm-agent/oplog.go +++ b/cmd/pbm-agent/oplog.go @@ -40,6 +40,10 @@ func (a *Agent) OplogReplay(ctx context.Context, r *ctrl.ReplayCmd, opID ctrl.OP l.Info("node in not suitable for restore") return } + if nodeInfo.ArbiterOnly { + l.Debug("arbiter node. skip") + return + } epoch := ep.TS() lck := lock.NewLock(a.leadConn, lock.LockHeader{ diff --git a/cmd/pbm-agent/restore.go b/cmd/pbm-agent/restore.go index 3da81bd01..9a1427e07 100644 --- a/cmd/pbm-agent/restore.go +++ b/cmd/pbm-agent/restore.go @@ -36,6 +36,10 @@ func (a *Agent) Restore(ctx context.Context, r *ctrl.RestoreCmd, opid ctrl.OPID, l.Error("get node info: %v", err) return } + if nodeInfo.ArbiterOnly { + l.Debug("arbiter node. skip") + return + } var lck *lock.Lock if nodeInfo.IsPrimary { From 3fdcdd8fe0bfe4d4908986b0197d75ca7acae385 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Fri, 28 Jun 2024 10:48:55 +0200 Subject: [PATCH 097/203] print s3.uploadPartSize and s3.maxUploadParts --- pbm/storage/s3/s3.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pbm/storage/s3/s3.go b/pbm/storage/s3/s3.go index 51fec7ca7..693cf6942 100644 --- a/pbm/storage/s3/s3.go +++ b/pbm/storage/s3/s3.go @@ -321,6 +321,11 @@ func (s *S3) Save(name string, data io.Reader, sizeb int64) error { } } + if s.log != nil { + s.log.Info("s3.uploadPartSize is set to %d (~%dMb)", partSize, partSize>>20) + s.log.Info("s3.maxUploadParts is set to %d", s.opts.MaxUploadParts) + } + _, err = s3manager.NewUploader(awsSession, func(u *s3manager.Uploader) { u.MaxUploadParts = s.opts.MaxUploadParts u.PartSize = partSize // 10MB part size From 2b3b374e622d90c7201d308283cca5d828f1fa5e Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Fri, 28 Jun 2024 12:35:33 +0200 Subject: [PATCH 098/203] fix deadlock --- cmd/pbm-agent/agent.go | 3 ++- cmd/pbm-agent/backup.go | 8 ++++---- cmd/pbm-agent/pitr.go | 12 ++++++------ 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 67dcc5624..23ae93bef 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -28,7 +28,8 @@ type Agent struct { nodeConn *mongo.Client bcp *currentBackup pitrjob *currentPitr - mx sync.Mutex + slicerMx sync.Mutex + bcpMx sync.Mutex brief topo.NodeBrief diff --git a/cmd/pbm-agent/backup.go b/cmd/pbm-agent/backup.go index ff1af186c..5cf5f0cd4 100644 --- a/cmd/pbm-agent/backup.go +++ b/cmd/pbm-agent/backup.go @@ -22,16 +22,16 @@ type currentBackup struct { } func (a *Agent) setBcp(b *currentBackup) { - a.mx.Lock() - defer a.mx.Unlock() + a.bcpMx.Lock() + defer a.bcpMx.Unlock() a.bcp = b } // CancelBackup cancels current backup func (a *Agent) CancelBackup() { - a.mx.Lock() - defer a.mx.Unlock() + a.bcpMx.Lock() + defer a.bcpMx.Unlock() if a.bcp == nil { return diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 7bc724e9f..93d4c9e4b 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -26,8 +26,8 @@ type currentPitr struct { } func (a *Agent) setPitr(p *currentPitr) { - a.mx.Lock() - defer a.mx.Unlock() + a.slicerMx.Lock() + defer a.slicerMx.Unlock() if a.pitrjob != nil { a.pitrjob.cancel() @@ -41,15 +41,15 @@ func (a *Agent) removePitr() { } func (a *Agent) getPitr() *currentPitr { - a.mx.Lock() - defer a.mx.Unlock() + a.slicerMx.Lock() + defer a.slicerMx.Unlock() return a.pitrjob } func (a *Agent) sliceNow(opid ctrl.OPID) { - a.mx.Lock() - defer a.mx.Unlock() + a.slicerMx.Lock() + defer a.slicerMx.Unlock() if a.pitrjob == nil { return From 01b4492130e103c7bfa518da1e222deec112c85a Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Fri, 28 Jun 2024 13:21:51 +0200 Subject: [PATCH 099/203] reset original trailing window on upload failure --- pbm/oplog/backup.go | 5 +++++ pbm/slicer/slicer.go | 2 ++ 2 files changed, 7 insertions(+) diff --git a/pbm/oplog/backup.go b/pbm/oplog/backup.go index a8559229e..5e9d9b0d8 100644 --- a/pbm/oplog/backup.go +++ b/pbm/oplog/backup.go @@ -41,6 +41,11 @@ func NewOplogBackup(m *mongo.Client) *OplogBackup { return &OplogBackup{cl: m} } +// GetTailingSpan returns oplog tailing window +func (ot *OplogBackup) GetTailingSpan() (primitive.Timestamp, primitive.Timestamp) { + return ot.start, ot.end +} + // SetTailingSpan sets oplog tailing window func (ot *OplogBackup) SetTailingSpan(start, end primitive.Timestamp) { ot.start = start diff --git a/pbm/slicer/slicer.go b/pbm/slicer/slicer.go index fcb85e1c2..36f9932d0 100644 --- a/pbm/slicer/slicer.go +++ b/pbm/slicer/slicer.go @@ -454,6 +454,7 @@ func (s *Slicer) upload( compression compress.CompressionType, level *int, ) error { + originalStart, originalEnd := s.oplog.GetTailingSpan() s.oplog.SetTailingSpan(from, to) fname := oplog.FormatChunkFilepath(s.rs, from, to, compression) // if use parent ctx, upload will be canceled on the "done" signal @@ -463,6 +464,7 @@ func (s *Slicer) upload( // wrong during the data read we may end up with an already created file. Although // the failed range won't be saved in db as the available for restore. It would get // in there after the storage resync. see: https://jira.percona.com/browse/PBM-602 + s.oplog.SetTailingSpan(originalStart, originalEnd) s.l.Debug("remove %s due to upload errors", fname) derr := s.storage.Delete(fname) if derr != nil { From 053a2122b85b5951f581386d4348e68306f4b132 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Fri, 28 Jun 2024 15:13:33 +0200 Subject: [PATCH 100/203] try to set node role even if no agent is running --- pbm/topo/node.go | 27 +++++++++++++++++++++++++++ sdk/cli/status.go | 14 ++++++++------ 2 files changed, 35 insertions(+), 6 deletions(-) diff --git a/pbm/topo/node.go b/pbm/topo/node.go index 04c0b472b..6689853e4 100644 --- a/pbm/topo/node.go +++ b/pbm/topo/node.go @@ -21,6 +21,16 @@ const ( RoleConfigSrv ReplsetRole = "configsrv" ) +type NodeRole string + +const ( + RolePrimary NodeRole = "P" + RoleSecondary NodeRole = "S" + RoleArbiter NodeRole = "A" + RoleHidden NodeRole = "H" + RoleDelayed NodeRole = "D" +) + type OpTime struct { TS primitive.Timestamp `bson:"ts" json:"ts"` Term int64 `bson:"t" json:"t"` @@ -278,6 +288,23 @@ type RSMember struct { Votes int `bson:"votes" json:"votes"` } +func (m *RSMember) IsDelayed() bool { + return m.SecondaryDelayOld != 0 || m.SecondaryDelaySecs != 0 +} + +func (m *RSMember) Role() NodeRole { + switch { + case m.ArbiterOnly: + return RoleArbiter + case m.IsDelayed(): + return RoleDelayed + case m.Hidden: + return RoleHidden + } + + return "" +} + func GetReplSetConfig(ctx context.Context, m *mongo.Client) (*RSConfig, error) { res := m.Database("admin").RunCommand(ctx, bson.D{{"replSetGetConfig", 1}}) if err := res.Err(); err != nil { diff --git a/sdk/cli/status.go b/sdk/cli/status.go index 04ce4b62b..e0a38196a 100644 --- a/sdk/cli/status.go +++ b/sdk/cli/status.go @@ -24,14 +24,14 @@ func (e LostAgentError) Error() string { return fmt.Sprintf("lost agent, last heartbeat: %v", e.heartbeat.T) } -type RSRole string +type RSRole = topo.NodeRole const ( - RolePrimary RSRole = "P" - RoleSecondary RSRole = "S" - RoleArbiter RSRole = "A" - RoleHidden RSRole = "H" - RoleDelayed RSRole = "D" + RolePrimary = topo.RolePrimary + RoleSecondary = topo.RoleSecondary + RoleArbiter = topo.RoleArbiter + RoleHidden = topo.RoleHidden + RoleDelayed = topo.RoleDelayed ) type Node struct { @@ -106,10 +106,12 @@ func ClusterStatus( rsAgents := agentMap[c.RS] if rsAgents == nil { + node.Role = member.Role() continue } agent := rsAgents[member.Host] if agent == nil { + node.Role = member.Role() continue } From 6ac99d69f7bc288059105ee01b55aa941e7d31d0 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Fri, 28 Jun 2024 15:26:23 +0200 Subject: [PATCH 101/203] Revert "reset original trailing window on upload failure" This reverts commit 01b4492130e103c7bfa518da1e222deec112c85a. --- pbm/oplog/backup.go | 5 ----- pbm/slicer/slicer.go | 2 -- 2 files changed, 7 deletions(-) diff --git a/pbm/oplog/backup.go b/pbm/oplog/backup.go index 5e9d9b0d8..a8559229e 100644 --- a/pbm/oplog/backup.go +++ b/pbm/oplog/backup.go @@ -41,11 +41,6 @@ func NewOplogBackup(m *mongo.Client) *OplogBackup { return &OplogBackup{cl: m} } -// GetTailingSpan returns oplog tailing window -func (ot *OplogBackup) GetTailingSpan() (primitive.Timestamp, primitive.Timestamp) { - return ot.start, ot.end -} - // SetTailingSpan sets oplog tailing window func (ot *OplogBackup) SetTailingSpan(start, end primitive.Timestamp) { ot.start = start diff --git a/pbm/slicer/slicer.go b/pbm/slicer/slicer.go index 36f9932d0..fcb85e1c2 100644 --- a/pbm/slicer/slicer.go +++ b/pbm/slicer/slicer.go @@ -454,7 +454,6 @@ func (s *Slicer) upload( compression compress.CompressionType, level *int, ) error { - originalStart, originalEnd := s.oplog.GetTailingSpan() s.oplog.SetTailingSpan(from, to) fname := oplog.FormatChunkFilepath(s.rs, from, to, compression) // if use parent ctx, upload will be canceled on the "done" signal @@ -464,7 +463,6 @@ func (s *Slicer) upload( // wrong during the data read we may end up with an already created file. Although // the failed range won't be saved in db as the available for restore. It would get // in there after the storage resync. see: https://jira.percona.com/browse/PBM-602 - s.oplog.SetTailingSpan(originalStart, originalEnd) s.l.Debug("remove %s due to upload errors", fname) derr := s.storage.Delete(fname) if derr != nil { From 0db29f89cc4192057d57484fada0ea23aa6355ca Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Sat, 29 Jun 2024 00:00:57 +0200 Subject: [PATCH 102/203] Extract agent's leadNomination method --- cmd/pbm-agent/pitr.go | 117 ++++++++++++++++++++++++++---------------- 1 file changed, 74 insertions(+), 43 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 3158842e3..db7aab22e 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -209,49 +209,7 @@ func (a *Agent) pitr(ctx context.Context) error { return nil } - isClusterLeader := nodeInfo.IsClusterLeader() - - if isClusterLeader { - l.Debug("checking locks in the whole cluster") - noLocks, err := a.waitAllOpLockRelease(ctx) - if err != nil { - l.Error("wait for all oplock release: %v", err) - return errors.Wrap(err, "wait all oplock release") - } - if !noLocks { - l.Debug("there are still working pitr members, members nomination will not be continued") - return nil - } - - l.Debug("init pitr meta on the first usage") - oplog.InitMeta(ctx, a.leadConn) - - agents, err := topo.ListAgentStatuses(ctx, a.leadConn) - if err != nil { - l.Error("get agents list: %v", err) - return errors.Wrap(err, "list agents statuses") - } - - nodes, err := prio.CalcNodesPriority(ctx, nil, cfg.PITR.Priority, agents) - if err != nil { - l.Error("get nodes priority: %v", err) - return errors.Wrap(err, "get nodes priorities") - } - - shards, err := topo.ClusterMembers(ctx, a.leadConn.MongoClient()) - if err != nil { - l.Error("get cluster members: %v", err) - return errors.Wrap(err, "get cluster members") - } - - for _, sh := range shards { - go func(rs string) { - if err := a.nominateRSForPITR(ctx, rs, nodes.RS(rs)); err != nil { - l.Error("nodes nomination error for %s: %v", rs, err) - } - }(sh.RS) - } - } + go a.leadNomination(ctx, nodeInfo, cfg) nominated, err := a.waitNominationForPITR(ctx, nodeInfo.SetName, nodeInfo.Me) if err != nil { @@ -344,6 +302,79 @@ func (a *Agent) pitr(ctx context.Context) error { return nil } +// leadNomination does priority calculation and nomination part of PITR process. +// It requires to be run in separate go routine on cluster leader. +func (a *Agent) leadNomination( + ctx context.Context, + nodeInfo *topo.NodeInfo, + cfg *config.Config) { + l := log.LogEventFromContext(ctx) + + if !nodeInfo.IsClusterLeader() { + return + } + + l.Debug("checking locks in the whole cluster") + noLocks, err := a.waitAllOpLockRelease(ctx) + if err != nil { + l.Error("wait for all oplock release: %v", err) + return + } + if !noLocks { + l.Debug("there are still working pitr members, members nomination will not be continued") + return + } + + l.Debug("init pitr meta on the first usage") + oplog.InitMeta(ctx, a.leadConn) + + agents, err := topo.ListAgentStatuses(ctx, a.leadConn) + if err != nil { + l.Error("get agents list: %v", err) + return + } + + nodes, err := prio.CalcNodesPriority(ctx, nil, cfg.PITR.Priority, agents) + if err != nil { + l.Error("get nodes priority: %v", err) + return + } + + shards, err := topo.ClusterMembers(ctx, a.leadConn.MongoClient()) + if err != nil { + l.Error("get cluster members: %v", err) + return + } + + l.Debug("cluster is ready for nomination") + err = oplog.SetClusterStatus(ctx, a.leadConn, oplog.StatusReady) + if err != nil { + l.Error("set cluster status ready: %v", err) + return + } + + err = a.reconcileReadyStatus(ctx, agents) + if err != nil { + l.Error("reconciling ready status: %v", err) + return + } + + l.Debug("cluster leader sets running status") + err = oplog.SetClusterStatus(ctx, a.leadConn, oplog.StatusRunning) + if err != nil { + l.Error("set running status: %v", err) + return + } + + for _, sh := range shards { + go func(rs string) { + if err := a.nominateRSForPITR(ctx, rs, nodes.RS(rs)); err != nil { + l.Error("nodes nomination error for %s: %v", rs, err) + } + }(sh.RS) + } +} + func (a *Agent) nominateRSForPITR(ctx context.Context, rs string, nodes [][]string) error { l := log.LogEventFromContext(ctx) l.Debug("pitr nomination list for %s: %v", rs, nodes) From ff733e131caf4d1119f01c95055b0c6864dd55ca Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Sun, 30 Jun 2024 22:43:55 +0200 Subject: [PATCH 103/203] Add agent's start/stop monitor logic The aim is to controll lifecycle of monitor jobs (go routines) during the period when PITR is enabled. --- cmd/pbm-agent/agent.go | 5 +++ cmd/pbm-agent/pitr.go | 75 +++++++++++++++++++++++++++++++++++++----- 2 files changed, 71 insertions(+), 9 deletions(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 6e4dfcf57..92cd69227 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -43,6 +43,11 @@ type Agent struct { // prevOO is previous pitr.oplogOnly value prevOO *bool + + // pitr monitor (watcher) jobs are started + monStarted bool + // signal for stoppint pitr monitor jobs + monStopSig chan struct{} } func newAgent(ctx context.Context, leadConn connect.Client, uri string, dumpConns int) (*Agent, error) { diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index db7aab22e..78f810927 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -49,6 +49,32 @@ func (a *Agent) getPitr() *currentPitr { return a.pitrjob } +// startMon starts monitor (watcher) jobs only on cluster leader. +func (a *Agent) startMon(ctx context.Context, nodeInfo *topo.NodeInfo, cfg *config.Config) { + + if !nodeInfo.IsClusterLeader() { + return + } + if a.monStarted { + return + } + a.monStopSig = make(chan struct{}) + + go a.pitrConfigMonitor(ctx, cfg) + go a.pitrErrorMonitor(ctx) + + a.monStarted = true +} + +// stopMon stops monitor (watcher) jobs +func (a *Agent) stopMon() { + if !a.monStarted { + return + } + close(a.monStopSig) + a.monStarted = false +} + func (a *Agent) sliceNow(opid ctrl.OPID) { a.mx.Lock() defer a.mx.Unlock() @@ -147,6 +173,7 @@ func (a *Agent) pitr(ctx context.Context) error { if !cfg.PITR.Enabled { a.removePitr() + a.stopMon() return nil } @@ -209,6 +236,10 @@ func (a *Agent) pitr(ctx context.Context) error { return nil } + // start monitor jobs on cluster leader + a.startMon(ctx, nodeInfo, cfg) + + // start nomination process on cluster leader go a.leadNomination(ctx, nodeInfo, cfg) nominated, err := a.waitNominationForPITR(ctx, nodeInfo.SetName, nodeInfo.Me) @@ -373,6 +404,7 @@ func (a *Agent) leadNomination( } }(sh.RS) } + } func (a *Agent) nominateRSForPITR(ctx context.Context, rs string, nodes [][]string) error { @@ -581,14 +613,21 @@ func (a *Agent) isPITRClusterStatus(ctx context.Context, status oplog.Status) bo // pitrConfigMonitor watches changes in PITR section within PBM configuration. // If relevant changes are detected (e.g. priorities, oplogOnly), it sets -// Reconfig cluster status, meaning that slicing process needs to be restarted. -func (a *Agent) pitrConfigMonitor(ctx context.Context, currentConf config.PITRConf) { +// Reconfig cluster status, which means that slicing process needs to be restarted. +func (a *Agent) pitrConfigMonitor(ctx context.Context, firstConf *config.Config) { l := log.LogEventFromContext(ctx) l.Debug("start pitr config monitor") + defer l.Debug("stop pitr config monitor") tk := time.NewTicker(pitrWatchMonitorPollingCycle) defer tk.Stop() + updateCurrConf := func(c *config.Config) (config.PITRConf, primitive.Timestamp) { + return c.PITR, c.Epoch + } + + currConf, currEpoh := updateCurrConf(firstConf) + for { select { case <-tk.C: @@ -600,18 +639,27 @@ func (a *Agent) pitrConfigMonitor(ctx context.Context, currentConf config.PITRCo continue } + if currEpoh == cfg.Epoch { + continue + } + if !cfg.PITR.Enabled { - //todo check this + // If pitr is disabled, there is no need to check its properties. + // Enable/disbale change is handled out of the monitor logic (in pitr main loop). + currConf, currEpoh = updateCurrConf(cfg) continue } - oldP := currentConf.Priority - newP := cfg.PITR.Priority //todo: add change chet for other config params + + oldP := currConf.Priority + newP := cfg.PITR.Priority if newP == nil && oldP == nil { + currConf, currEpoh = updateCurrConf(cfg) continue } if maps.Equal(newP, oldP) { + currConf, currEpoh = updateCurrConf(cfg) continue } @@ -620,21 +668,25 @@ func (a *Agent) pitrConfigMonitor(ctx context.Context, currentConf config.PITRCo if err != nil { l.Error("error while setting cluster status reconfig: %v", err) } - return + currConf, currEpoh = updateCurrConf(cfg) case <-ctx.Done(): return + + case <-a.monStopSig: + return } } } // pitrErrorMonitor watches reported errors by agents on replica set(s) // which are running PITR. -// In case of any reported error within pbmPITR collection, cluster status -// Error is set. +// In case of any reported error within pbmPITR collection (replicaset subdoc), +// cluster status Error is set. func (a *Agent) pitrErrorMonitor(ctx context.Context) { l := log.LogEventFromContext(ctx) l.Debug("start pitr error monitor") + defer l.Debug("stop pitr error monitor") tk := time.NewTicker(pitrWatchMonitorPollingCycle) defer tk.Stop() @@ -644,6 +696,9 @@ func (a *Agent) pitrErrorMonitor(ctx context.Context) { case <-tk.C: replsets, err := oplog.GetReplSetsWithStatus(ctx, a.leadConn, oplog.StatusError) if err != nil { + if errors.Is(err, errors.ErrNotFound) { + continue + } l.Error("get error replsets", err) } @@ -656,10 +711,12 @@ func (a *Agent) pitrErrorMonitor(ctx context.Context) { if err != nil { l.Error("error while setting cluster status Error: %v", err) } - return case <-ctx.Done(): return + + case <-a.monStopSig: + return } } } From be5a2c471c3ba26b222eef0fbbd8ba2ed523d67d Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Sun, 30 Jun 2024 23:10:23 +0200 Subject: [PATCH 104/203] Add errors reporting on RS level for pbmPITR --- cmd/pbm-agent/pitr.go | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 78f810927..27c44c1f3 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -273,9 +273,17 @@ func (a *Agent) pitr(ctx context.Context) error { l.Error("set nominee ack: %v", err) } + defer func() { + if err != nil { + l.Debug("setting RS error status: %v", err) + oplog.SetErrorRSStatus(ctx, a.leadConn, nodeInfo.SetName, nodeInfo.Me, err.Error()) + } + }() + stg, err := util.StorageFromConfig(cfg.Storage, l) if err != nil { - return errors.Wrap(err, "unable to get storage configuration") + err = errors.Wrap(err, "unable to get storage configuration") + return err } s := slicer.NewSlicer(a.brief.SetName, a.leadConn, a.nodeConn, stg, cfg, log.FromContext(ctx)) @@ -290,7 +298,8 @@ func (a *Agent) pitr(ctx context.Context) error { if err := lck.Release(); err != nil { l.Error("release lock: %v", err) } - return errors.Wrap(err, "catchup") + err = errors.Wrap(err, "catchup") + return err } go func() { @@ -322,7 +331,9 @@ func (a *Agent) pitr(ctx context.Context) error { if errors.Is(streamErr, slicer.OpMovedError{}) { out = l.Info } - out("streaming oplog: %v", streamErr) + retErr := errors.Wrap(streamErr, "streaming oplog: %v") + oplog.SetErrorRSStatus(ctx, a.leadConn, nodeInfo.SetName, nodeInfo.Me, retErr.Error()) + out(retErr.Error()) } if err := lck.Release(); err != nil { From fa3843ff49771d4d625de0157c3c171fa80a4e97 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 1 Jul 2024 09:56:18 +0200 Subject: [PATCH 105/203] Stopping the slicer in case of reconfig/error ... cluster status --- cmd/pbm-agent/pitr.go | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 27c44c1f3..9fd573c63 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -2,8 +2,10 @@ package main import ( "context" + "maps" "time" + "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/mongo" "github.com/percona/percona-backup-mongodb/pbm/backup" @@ -275,7 +277,7 @@ func (a *Agent) pitr(ctx context.Context) error { defer func() { if err != nil { - l.Debug("setting RS error status: %v", err) + l.Debug("setting RS error status for err: %v", err) oplog.SetErrorRSStatus(ctx, a.leadConn, nodeInfo.SetName, nodeInfo.Me, err.Error()) } }() @@ -320,6 +322,30 @@ func (a *Agent) pitr(ctx context.Context) error { a.removePitr() }() + go func() { + tk := time.NewTicker(5 * time.Second) + defer tk.Stop() + + for { + select { + case <-tk.C: + if reconf := a.isPITRClusterStatus(ctx, oplog.StatusReconfig); reconf { + l.Debug("stop slicing because of reconfig") + stopSlicing() + return + } + if pitrErr := a.isPITRClusterStatus(ctx, oplog.StatusError); pitrErr { + l.Debug("stop slicing because of error") + stopSlicing() + return + } + + case <-stopSlicingCtx.Done(): + return + } + } + }() + streamErr := s.Stream(ctx, stopC, w, From 6704ec93644458af00f63c5c3c8872713e75e9a1 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 1 Jul 2024 10:58:02 +0200 Subject: [PATCH 106/203] Fix cluster status after reconcile goes in timeout --- cmd/pbm-agent/pitr.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 9fd573c63..0691cddba 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -219,7 +219,6 @@ func (a *Agent) pitr(ctx context.Context) error { } // should be after the lock pre-check - // // if node failing, then some other agent with healthy node will hopefully catch up // so this code won't be reached and will not pollute log with "pitr" errors while // the other node does successfully slice @@ -603,6 +602,8 @@ func (a *Agent) confirmReadyStatus(ctx context.Context) error { } } +// reconcileReadyStatus waits all members to confirm Ready status. +// In case of timeout Ready status will be removed. func (a *Agent) reconcileReadyStatus(ctx context.Context, agents []topo.AgentStat) error { l := log.LogEventFromContext(ctx) @@ -628,6 +629,10 @@ func (a *Agent) reconcileReadyStatus(ctx context.Context, agents []topo.AgentSta return nil } case <-tout.C: + // clean up cluster Ready status to not have an issue in next run + if err := oplog.SetClusterStatus(ctx, a.leadConn, oplog.StatusUnset); err != nil { + l.Error("error while cleaning cluster status: %v", err) + } return errors.New("timeout while roconciling ready status") } } From c263b0b6d7fad4beda1fe89a058ed8c29d2bd2a8 Mon Sep 17 00:00:00 2001 From: t-yrka <78016060+t-yrka@users.noreply.github.com> Date: Mon, 1 Jul 2024 12:01:37 +0200 Subject: [PATCH 107/203] PBM-1343: Release the lock when failed to read the storage config (#957) --- cmd/pbm-agent/pitr.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 93d4c9e4b..2e23a8843 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -223,6 +223,9 @@ func (a *Agent) pitr(ctx context.Context) error { stg, err := util.StorageFromConfig(&cfg.Storage, l) if err != nil { + if err := lck.Release(); err != nil { + l.Error("release lock: %v", err) + } return errors.Wrap(err, "unable to get storage configuration") } From 311e4317f83513d3a8609bf61a69bd1d4cd8527a Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 1 Jul 2024 18:14:55 +0200 Subject: [PATCH 108/203] Fix reviewdog suggestions --- cmd/pbm-agent/pitr.go | 24 +++++++++++++++--------- pbm/oplog/nomination.go | 9 +++++---- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 5629a8141..88c8fedcd 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -53,7 +53,6 @@ func (a *Agent) getPitr() *currentPitr { // startMon starts monitor (watcher) jobs only on cluster leader. func (a *Agent) startMon(ctx context.Context, nodeInfo *topo.NodeInfo, cfg *config.Config) { - if !nodeInfo.IsClusterLeader() { return } @@ -194,8 +193,8 @@ func (a *Agent) pitr(ctx context.Context) error { } if p := a.getPitr(); p != nil { + // todo: remove this span changing detaction to leader // already do the job - //todo: remove this span changing detaction to leader currInterval := p.slicer.GetSpan() if currInterval != slicerInterval { p.slicer.SetSpan(slicerInterval) @@ -279,7 +278,9 @@ func (a *Agent) pitr(ctx context.Context) error { defer func() { if err != nil { l.Debug("setting RS error status for err: %v", err) - oplog.SetErrorRSStatus(ctx, a.leadConn, nodeInfo.SetName, nodeInfo.Me, err.Error()) + if err := oplog.SetErrorRSStatus(ctx, a.leadConn, nodeInfo.SetName, nodeInfo.Me, err.Error()); err != nil { + l.Error("error while setting error status: %v", err) + } } }() @@ -362,7 +363,9 @@ func (a *Agent) pitr(ctx context.Context) error { out = l.Info } retErr := errors.Wrap(streamErr, "streaming oplog: %v") - oplog.SetErrorRSStatus(ctx, a.leadConn, nodeInfo.SetName, nodeInfo.Me, retErr.Error()) + if err := oplog.SetErrorRSStatus(ctx, a.leadConn, nodeInfo.SetName, nodeInfo.Me, retErr.Error()); err != nil { + l.Error("setting RS status to status error, err = %v", err) + } out(retErr.Error()) } @@ -379,7 +382,8 @@ func (a *Agent) pitr(ctx context.Context) error { func (a *Agent) leadNomination( ctx context.Context, nodeInfo *topo.NodeInfo, - cfg *config.Config) { + cfg *config.Config, +) { l := log.LogEventFromContext(ctx) if !nodeInfo.IsClusterLeader() { @@ -398,7 +402,10 @@ func (a *Agent) leadNomination( } l.Debug("init pitr meta on the first usage") - oplog.InitMeta(ctx, a.leadConn) + err = oplog.InitMeta(ctx, a.leadConn) + if err != nil { + l.Error("init meta: %v", err) + } agents, err := topo.ListAgentStatuses(ctx, a.leadConn) if err != nil { @@ -445,7 +452,6 @@ func (a *Agent) leadNomination( } }(sh.RS) } - } func (a *Agent) nominateRSForPITR(ctx context.Context, rs string, nodes [][]string) error { @@ -692,12 +698,12 @@ func (a *Agent) pitrConfigMonitor(ctx context.Context, firstConf *config.Config) if !cfg.PITR.Enabled { // If pitr is disabled, there is no need to check its properties. - // Enable/disbale change is handled out of the monitor logic (in pitr main loop). + // Enable/disable change is handled out of the monitor logic (in pitr main loop). currConf, currEpoh = updateCurrConf(cfg) continue } - //todo: add change chet for other config params + // todo: add change detection for other config params oldP := currConf.Priority newP := cfg.PITR.Priority diff --git a/pbm/oplog/nomination.go b/pbm/oplog/nomination.go index a41df5728..b5b99e795 100644 --- a/pbm/oplog/nomination.go +++ b/pbm/oplog/nomination.go @@ -4,11 +4,12 @@ import ( "context" "time" - "github.com/percona/percona-backup-mongodb/pbm/connect" - "github.com/percona/percona-backup-mongodb/pbm/errors" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" + + "github.com/percona/percona-backup-mongodb/pbm/connect" + "github.com/percona/percona-backup-mongodb/pbm/errors" ) // PITRMeta contains all operational data about PITR execution process. @@ -81,7 +82,7 @@ func GetMeta( meta := &PITRMeta{} if err := res.Decode(meta); err != nil { - errors.Wrap(err, "decode") + return nil, errors.Wrap(err, "decode") } return meta, nil } @@ -192,7 +193,7 @@ func GetPITRNominees( ) (*PITRNomination, error) { meta, err := GetMeta(ctx, conn) if err != nil { - errors.Wrap(err, "get meta") + return nil, errors.Wrap(err, "get meta") } for _, n := range meta.Nomination { From e2e7f4f0701ec219369fdcdb6d536d7c8731d51a Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 1 Jul 2024 20:24:53 +0200 Subject: [PATCH 109/203] Fix bug when winning pitr nominee is not reporting .. in logs --- cmd/pbm-agent/pitr.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 88c8fedcd..66ec006aa 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -463,6 +463,14 @@ func (a *Agent) nominateRSForPITR(ctx context.Context, rs string, nodes [][]stri } for _, n := range nodes { + err = oplog.SetPITRNominees(ctx, a.leadConn, rs, n) + if err != nil { + return errors.Wrap(err, "set pitr nominees") + } + l.Debug("pitr nomination %s, set candidates %v", rs, n) + + time.Sleep(pitrRenominationFrame) + nms, err := oplog.GetPITRNominees(ctx, a.leadConn, rs) if err != nil && !errors.Is(err, errors.ErrNotFound) { return errors.Wrap(err, "get pitr nominees") @@ -471,14 +479,6 @@ func (a *Agent) nominateRSForPITR(ctx context.Context, rs string, nodes [][]stri l.Debug("pitr nomination: %s won by %s", rs, nms.Ack) return nil } - - err = oplog.SetPITRNominees(ctx, a.leadConn, rs, n) - if err != nil { - return errors.Wrap(err, "set pitr nominees") - } - l.Debug("pitr nomination %s, set candidates %v", rs, n) - - time.Sleep(pitrRenominationFrame) } return nil From 53aa80cb4594634e74ec30de41e8e039a9de2064 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 1 Jul 2024 23:00:50 +0200 Subject: [PATCH 110/203] Add complete logic for config monitor --- cmd/pbm-agent/pitr.go | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 66ec006aa..c87715a6d 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -678,6 +678,22 @@ func (a *Agent) pitrConfigMonitor(ctx context.Context, firstConf *config.Config) updateCurrConf := func(c *config.Config) (*config.PITRConf, primitive.Timestamp) { return c.PITR, c.Epoch } + equal := func(c1 *config.PITRConf, c2 *config.PITRConf) bool { + if c1 == nil || c2 == nil { + return c1 == c2 + } + if c1.OplogOnly != c2.OplogOnly { + return false + } + if c1.OplogSpanMin != c2.OplogSpanMin { + return false + } + if !maps.Equal(c1.Priority, c2.Priority) { + return false + } + + return true + } currConf, currEpoh := updateCurrConf(firstConf) @@ -695,27 +711,19 @@ func (a *Agent) pitrConfigMonitor(ctx context.Context, firstConf *config.Config) if currEpoh == cfg.Epoch { continue } - if !cfg.PITR.Enabled { // If pitr is disabled, there is no need to check its properties. // Enable/disable change is handled out of the monitor logic (in pitr main loop). currConf, currEpoh = updateCurrConf(cfg) continue } - - // todo: add change detection for other config params - - oldP := currConf.Priority - newP := cfg.PITR.Priority - if newP == nil && oldP == nil { - currConf, currEpoh = updateCurrConf(cfg) - continue - } - if maps.Equal(newP, oldP) { - currConf, currEpoh = updateCurrConf(cfg) + if equal(cfg.PITR, currConf) { continue } + // there are differences between privious and new config in following + // fields: OplogOnly, OplogSpanMin, Priority + l.Info("pitr config has changed, re-config will be done") err = oplog.SetClusterStatus(ctx, a.leadConn, oplog.StatusReconfig) if err != nil { From 8163f5ea6a324f241cc9435c1a4167125987c072 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 1 Jul 2024 23:01:54 +0200 Subject: [PATCH 111/203] Upgrade Clone method for PITRConf --- pbm/config/config.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/pbm/config/config.go b/pbm/config/config.go index d8c19fec9..f1bee8bb2 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "maps" "os" "reflect" "strconv" @@ -193,6 +194,7 @@ func (cfg *PITRConf) Clone() *PITRConf { } rv := *cfg + rv.Priority = maps.Clone(cfg.Priority) if cfg.CompressionLevel != nil { a := *cfg.CompressionLevel rv.CompressionLevel = &a @@ -365,12 +367,8 @@ func (cfg *BackupConf) Clone() *BackupConf { } rv := *cfg - if len(cfg.Priority) != 0 { - rv.Priority = make(map[string]float64, len(cfg.Priority)) - for k, v := range cfg.Priority { - rv.Priority[k] = v - } - } + + rv.Priority = maps.Clone(cfg.Priority) if cfg.Timeouts != nil { if cfg.Timeouts.Starting != nil { rv.Timeouts = &BackupTimeouts{ From fcb7e1aa14dee6ea9b5422e5c677f085960531d0 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 1 Jul 2024 23:18:30 +0200 Subject: [PATCH 112/203] Remove old OplogOnly change logic --- cmd/pbm-agent/agent.go | 3 --- cmd/pbm-agent/pitr.go | 16 ---------------- 2 files changed, 19 deletions(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 136ab112c..00ba94c61 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -40,9 +40,6 @@ type Agent struct { closeCMD chan struct{} pauseHB int32 - // prevOO is previous pitr.oplogOnly value - prevOO *bool - // pitr monitor (watcher) jobs are started monStarted bool // signal for stoppint pitr monitor jobs diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index c87715a6d..338bcff33 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -115,20 +115,6 @@ func (a *Agent) PITR(ctx context.Context) { } } -func (a *Agent) stopPitrOnOplogOnlyChange(currOO bool) { - if a.prevOO == nil { - a.prevOO = &currOO - return - } - - if *a.prevOO == currOO { - return - } - - a.prevOO = &currOO - a.removePitr() -} - // canSlicingNow returns lock.ConcurrentOpError if there is a parallel operation. // Only physical backups (full, incremental, external) is allowed. func canSlicingNow(ctx context.Context, conn connect.Client, stgCfg *config.StorageConf) error { @@ -180,8 +166,6 @@ func (a *Agent) pitr(ctx context.Context) error { return nil } - a.stopPitrOnOplogOnlyChange(cfg.PITR.OplogOnly) - if err := canSlicingNow(ctx, a.leadConn, &cfg.Storage); err != nil { e := lock.ConcurrentOpError{} if errors.As(err, &e) { From c9760fc0d030dc00d7b749e81cbc9008ebf53d23 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 2 Jul 2024 13:15:30 +0200 Subject: [PATCH 113/203] Remove OplogSpan diff from config monitor --- cmd/pbm-agent/pitr.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 338bcff33..ac9ed6a2c 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -662,16 +662,17 @@ func (a *Agent) pitrConfigMonitor(ctx context.Context, firstConf *config.Config) updateCurrConf := func(c *config.Config) (*config.PITRConf, primitive.Timestamp) { return c.PITR, c.Epoch } - equal := func(c1 *config.PITRConf, c2 *config.PITRConf) bool { + equal := func(c1, c2 *config.PITRConf) bool { if c1 == nil || c2 == nil { return c1 == c2 } if c1.OplogOnly != c2.OplogOnly { return false } - if c1.OplogSpanMin != c2.OplogSpanMin { - return false - } + // OplogSpanMin is compared and updated in the main pitr loop for now + // if c1.OplogSpanMin != c2.OplogSpanMin { + // return false + // } if !maps.Equal(c1.Priority, c2.Priority) { return false } From 3e7bad60b5fe91f30d7fce0d335c44a0da8ef9e4 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 3 Jul 2024 11:11:07 +0200 Subject: [PATCH 114/203] Add topo change detection for cluster leader ... purpose --- cmd/pbm-agent/agent.go | 1 + cmd/pbm-agent/pitr.go | 49 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 00ba94c61..68b5f1aa9 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -40,6 +40,7 @@ type Agent struct { closeCMD chan struct{} pauseHB int32 + monMx sync.Mutex // pitr monitor (watcher) jobs are started monStarted bool // signal for stoppint pitr monitor jobs diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index ac9ed6a2c..508156519 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -53,6 +53,9 @@ func (a *Agent) getPitr() *currentPitr { // startMon starts monitor (watcher) jobs only on cluster leader. func (a *Agent) startMon(ctx context.Context, nodeInfo *topo.NodeInfo, cfg *config.Config) { + a.monMx.Lock() + defer a.monMx.Unlock() + if !nodeInfo.IsClusterLeader() { return } @@ -63,12 +66,16 @@ func (a *Agent) startMon(ctx context.Context, nodeInfo *topo.NodeInfo, cfg *conf go a.pitrConfigMonitor(ctx, cfg) go a.pitrErrorMonitor(ctx) + go a.pitrTopoMonitor(ctx) a.monStarted = true } // stopMon stops monitor (watcher) jobs func (a *Agent) stopMon() { + a.monMx.Lock() + defer a.monMx.Unlock() + if !a.monStarted { return } @@ -95,6 +102,7 @@ const ( pitrNominationPollingCycle = 2 * time.Second pitrNominationPollingTimeOut = 2 * time.Minute pitrWatchMonitorPollingCycle = 5 * time.Second + pitrTopoMonitorPollingCycle = 2 * time.Minute ) // PITR starts PITR processing routine @@ -725,6 +733,47 @@ func (a *Agent) pitrConfigMonitor(ctx context.Context, firstConf *config.Config) } } +func (a *Agent) pitrTopoMonitor(ctx context.Context) { + l := log.LogEventFromContext(ctx) + l.Debug("start pitr topo monitor") + defer l.Debug("stop pitr topo monitor") + + tk := time.NewTicker(pitrTopoMonitorPollingCycle) + defer tk.Stop() + + for { + select { + case <-tk.C: + nodeInfo, err := topo.GetNodeInfo(ctx, a.nodeConn) + if err != nil { + l.Error("topo monitor node info error", err) + continue + } + + if nodeInfo.IsClusterLeader() { + continue + } + + l.Info("topo/cluster leader has changed, re-configuring pitr members") + err = oplog.SetClusterStatus(ctx, a.leadConn, oplog.StatusReconfig) + if err != nil { + l.Error("topo monitor reconfig status set", err) + continue + } + a.removePitr() + a.stopMon() + + return + + case <-ctx.Done(): + return + + case <-a.monStopSig: + return + } + } +} + // pitrErrorMonitor watches reported errors by agents on replica set(s) // which are running PITR. // In case of any reported error within pbmPITR collection (replicaset subdoc), From abd811d1bdaac13bdf057f2d64618ed12b5a7ba0 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 3 Jul 2024 12:20:45 +0200 Subject: [PATCH 115/203] Move polling consts on top of pitr file --- cmd/pbm-agent/pitr.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 508156519..db5467528 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -23,6 +23,17 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/util" ) +const ( + pitrCheckPeriod = 15 * time.Second + pitrRenominationFrame = 5 * time.Second + pitrOpLockPollingCycle = 15 * time.Second + pitrOpLockPollingTimeOut = 2 * time.Minute + pitrNominationPollingCycle = 2 * time.Second + pitrNominationPollingTimeOut = 2 * time.Minute + pitrWatchMonitorPollingCycle = 15 * time.Second + pitrTopoMonitorPollingCycle = 2 * time.Minute +) + type currentPitr struct { slicer *slicer.Slicer w chan ctrl.OPID // to wake up a slicer on demand (not to wait for the tick) @@ -94,17 +105,6 @@ func (a *Agent) sliceNow(opid ctrl.OPID) { a.pitrjob.w <- opid } -const ( - pitrCheckPeriod = 15 * time.Second - pitrRenominationFrame = 5 * time.Second - pitrOpLockPollingCycle = 15 * time.Second - pitrOpLockPollingTimeOut = 2 * time.Minute - pitrNominationPollingCycle = 2 * time.Second - pitrNominationPollingTimeOut = 2 * time.Minute - pitrWatchMonitorPollingCycle = 5 * time.Second - pitrTopoMonitorPollingCycle = 2 * time.Minute -) - // PITR starts PITR processing routine func (a *Agent) PITR(ctx context.Context) { l := log.FromContext(ctx) From 094ce4ba97f141392ef1b7c25bd296a2b3674fe5 Mon Sep 17 00:00:00 2001 From: Oleksandr Havryliak <88387200+olexandr-havryliak@users.noreply.github.com> Date: Thu, 4 Jul 2024 13:50:58 +0300 Subject: [PATCH 116/203] PSMDB 4.4 EOL (#961) --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f3bbd890b..8696a691c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,7 +32,7 @@ jobs: strategy: fail-fast: false matrix: - psmdb: ["4.4", "5.0", "6.0", "7.0"] + psmdb: ["5.0", "6.0", "7.0"] test: [logical, physical, incremental, external] env: PBM_BRANCH: ${{ github.event.inputs.pbm_branch || 'main' }} From 68cc18d806925960aa2e5b4042c6d4bc7add75ba Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 4 Jul 2024 13:19:50 +0200 Subject: [PATCH 117/203] Add PITR slicing activity check --- cmd/pbm-agent/pitr.go | 79 ++++++++++++++++++++++++++++++++++++----- pbm/oplog/nomination.go | 21 +++++++++++ pbm/oplog/oplog.go | 28 +++++++++++++++ 3 files changed, 120 insertions(+), 8 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index db5467528..e9ab5fd78 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -24,14 +24,15 @@ import ( ) const ( - pitrCheckPeriod = 15 * time.Second - pitrRenominationFrame = 5 * time.Second - pitrOpLockPollingCycle = 15 * time.Second - pitrOpLockPollingTimeOut = 2 * time.Minute - pitrNominationPollingCycle = 2 * time.Second - pitrNominationPollingTimeOut = 2 * time.Minute - pitrWatchMonitorPollingCycle = 15 * time.Second - pitrTopoMonitorPollingCycle = 2 * time.Minute + pitrCheckPeriod = 15 * time.Second + pitrRenominationFrame = 5 * time.Second + pitrOpLockPollingCycle = 15 * time.Second + pitrOpLockPollingTimeOut = 2 * time.Minute + pitrNominationPollingCycle = 2 * time.Second + pitrNominationPollingTimeOut = 2 * time.Minute + pitrWatchMonitorPollingCycle = 15 * time.Second + pitrTopoMonitorPollingCycle = 2 * time.Minute + pitrActivityMonitorPollingCycle = 2 * time.Minute ) type currentPitr struct { @@ -78,6 +79,7 @@ func (a *Agent) startMon(ctx context.Context, nodeInfo *topo.NodeInfo, cfg *conf go a.pitrConfigMonitor(ctx, cfg) go a.pitrErrorMonitor(ctx) go a.pitrTopoMonitor(ctx) + go a.pitrActivityMonitor(ctx) a.monStarted = true } @@ -774,6 +776,67 @@ func (a *Agent) pitrTopoMonitor(ctx context.Context) { } } +func (a *Agent) pitrActivityMonitor(ctx context.Context) { + l := log.LogEventFromContext(ctx) + l.Debug("start pitr agent activity monitor") + defer l.Debug("stop pitr agent activity monitor") + + tk := time.NewTicker(pitrActivityMonitorPollingCycle) + defer tk.Stop() + + for { + select { + case <-tk.C: + status, err := oplog.GetClusterStatus(ctx, a.leadConn) + if err != nil { + if errors.Is(err, errors.ErrNotFound) { + continue + } + l.Error("agent activity get cluster status", err) + continue + } + if status != oplog.StatusRunning { + continue + } + + ackedAgents, err := oplog.GetAgentsWithACK(ctx, a.leadConn) + if err != nil { + l.Error("activity get acked agents", err) + continue + } + + activeLocks, err := oplog.FetchSlicersWithActiveLocks(ctx, a.leadConn) + if err != nil { + l.Error("fetching active pitr locks", err) + continue + } + + if len(ackedAgents) == len(activeLocks) { + continue + } + + l.Debug("expected agents: %v; working agents: %v", ackedAgents, activeLocks) + + l.Info("not all ack agents are working, re-configuring pitr members") + err = oplog.SetClusterStatus(ctx, a.leadConn, oplog.StatusReconfig) + if err != nil { + l.Error("activity monitor reconfig status set", err) + continue + } + a.removePitr() + a.stopMon() + + return + + case <-ctx.Done(): + return + + case <-a.monStopSig: + return + } + } +} + // pitrErrorMonitor watches reported errors by agents on replica set(s) // which are running PITR. // In case of any reported error within pbmPITR collection (replicaset subdoc), diff --git a/pbm/oplog/nomination.go b/pbm/oplog/nomination.go index b5b99e795..d7ced267a 100644 --- a/pbm/oplog/nomination.go +++ b/pbm/oplog/nomination.go @@ -2,6 +2,7 @@ package oplog import ( "context" + "fmt" "time" "go.mongodb.org/mongo-driver/bson" @@ -244,3 +245,23 @@ func SetPITRNomineeACK( return errors.Wrap(err, "update pitr nominee ack") } + +// GetAgentsWithACK returns the list of all acknowledged agents. +func GetAgentsWithACK(ctx context.Context, conn connect.Client) ([]string, error) { + agents := []string{} + meta, err := GetMeta(ctx, conn) + if err != nil { + if errors.Is(err, errors.ErrNotFound) { + return agents, err + } + return agents, errors.Wrap(err, "getting meta") + } + + for _, n := range meta.Nomination { + if len(n.Ack) > 0 { + agents = append(agents, fmt.Sprintf("%s/%s", n.RS, n.Ack)) + } + } + + return agents, nil +} diff --git a/pbm/oplog/oplog.go b/pbm/oplog/oplog.go index d0a089d34..14e6264d8 100644 --- a/pbm/oplog/oplog.go +++ b/pbm/oplog/oplog.go @@ -2,6 +2,7 @@ package oplog import ( "context" + "fmt" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" @@ -98,3 +99,30 @@ func IsOplogSlicing(ctx context.Context, conn connect.Client) (bool, error) { return false, nil } + +// FetchSlicersWithActiveLocks fetches the list of slicers (agents) +// that are holding active OpLock. +func FetchSlicersWithActiveLocks(ctx context.Context, conn connect.Client) ([]string, error) { + res := []string{} + + locks, err := lock.GetOpLocks(ctx, conn, &lock.LockHeader{Type: ctrl.CmdPITR}) + if err != nil { + return res, errors.Wrap(err, "get locks") + } + if len(locks) == 0 { + return res, nil + } + + ct, err := topo.GetClusterTime(ctx, conn) + if err != nil { + return res, errors.Wrap(err, "get cluster time") + } + + for _, lock := range locks { + if lock.Heartbeat.T+defs.StaleFrameSec >= ct.T { + res = append(res, fmt.Sprintf("%s/%s", lock.Replset, lock.Node)) + } + } + + return res, nil +} From 5e8e4ae4b7640834f14f97685c5887c59d0a889d Mon Sep 17 00:00:00 2001 From: Sandra Romanchenko <53295797+sandraromanchenko@users.noreply.github.com> Date: Tue, 9 Jul 2024 17:50:57 +0300 Subject: [PATCH 118/203] PBM-1190. Remove unnecessary option to align URI with doc examples (#962) --- packaging/conf/pbm-agent.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/conf/pbm-agent.env b/packaging/conf/pbm-agent.env index d4fbb490b..5716883f1 100644 --- a/packaging/conf/pbm-agent.env +++ b/packaging/conf/pbm-agent.env @@ -1 +1 @@ -PBM_MONGODB_URI="mongodb://backupUser:backupPassword@localhost:27017/?authSource=admin&replicaSet=rs1" +PBM_MONGODB_URI="mongodb://backupUser:backupPassword@localhost:27017/?authSource=admin" From ae58fb510bc6cdb2648f154e729a205e78aae9cd Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 16 Jul 2024 11:48:58 +0200 Subject: [PATCH 119/203] Migrate e2e-tests to Docker Compose V2 (#963) --- e2e-tests/README.md | 4 ++-- e2e-tests/functions | 50 +++++++++++++++++++-------------------- e2e-tests/run-new-cluster | 2 +- 3 files changed, 28 insertions(+), 28 deletions(-) diff --git a/e2e-tests/README.md b/e2e-tests/README.md index 8a9060bcb..47f5b781a 100644 --- a/e2e-tests/README.md +++ b/e2e-tests/README.md @@ -28,9 +28,9 @@ $ MONGODB_VERSION=4.4 ./start-cluster You need to set up PBM though: ``` -$ docker-compose -f ./docker/docker-compose.yaml exec agent-rs101 pbm config --file=/etc/pbm/minio.yaml +$ docker compose -f ./docker/docker-compose.yaml exec agent-rs101 pbm config --file=/etc/pbm/minio.yaml ``` Run pbm commands: ``` -$ docker-compose -f ./docker/docker-compose.yaml exec agent-rs101 pbm list +$ docker compose -f ./docker/docker-compose.yaml exec agent-rs101 pbm list ``` diff --git a/e2e-tests/functions b/e2e-tests/functions index ab641ee7b..9dc66dcf1 100644 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -29,22 +29,22 @@ run() { ;; esac - docker-compose -f $compose up -d docker-host + docker compose -f $compose up -d docker-host desc 'Run tests' - docker-compose -f $compose run tests + docker compose -f $compose run tests EXIT_CODE=$? if [ "$EXIT_CODE" != 0 ]; then - docker-compose -f $compose logs --no-color --tail=100 + docker compose -f $compose logs --no-color --tail=100 desc 'PBM logs' - docker-compose -f $compose exec -T agent-rs101 pbm logs -t 0 -s D -x || true + docker compose -f $compose exec -T agent-rs101 pbm logs -t 0 -s D -x || true if [ $compose == $COMPOSE_REMAPPING_PATH ]; then - docker-compose -f $compose exec -T agent-rs201 pbm logs -t 0 -s D -x || true + docker compose -f $compose exec -T agent-rs201 pbm logs -t 0 -s D -x || true fi desc 'PBM status' - docker-compose -f $compose exec -T agent-rs101 pbm status || true + docker compose -f $compose exec -T agent-rs101 pbm status || true if [ $compose == $COMPOSE_REMAPPING_PATH ]; then - docker-compose -f $compose exec -T agent-rs201 pbm status || true + docker compose -f $compose exec -T agent-rs201 pbm status || true fi fi @@ -59,7 +59,7 @@ run() { pbm_run() { local cmd="$@" - docker-compose -f $COMPOSE_PATH exec -T agent-rs101 pbm $cmd + docker compose -f $COMPOSE_PATH exec -T agent-rs101 pbm $cmd } mongo_run() { @@ -71,7 +71,7 @@ mongo_run() { mongo="mongosh" fi - docker-compose -f $COMPOSE_PATH exec -T "${rs}01" $mongo "mongodb://${BACKUP_USER}:${MONGO_PASS}@localhost/?replicaSet=${rs}" --quiet --eval="${cmd}" + docker compose -f $COMPOSE_PATH exec -T "${rs}01" $mongo "mongodb://${BACKUP_USER}:${MONGO_PASS}@localhost/?replicaSet=${rs}" --quiet --eval="${cmd}" } wait_backup() { @@ -158,7 +158,7 @@ start_cluster() { genMongoKey echo 'Build agents and tests' - docker-compose -f $COMPOSE_PATH build --no-cache --pull + docker compose -f $COMPOSE_PATH build --no-cache --pull mongo="mongo" if [ "${mongo_version:0:1}" -ge 6 ]; then @@ -171,18 +171,18 @@ start_cluster() { fi export MONGODB_VERSION=${mongo_version:-"4.4"} export MONGODB_IMAGE=${MONGODB_IMAGE:-"percona/percona-server-mongodb"} - docker-compose -f $COMPOSE_PATH up --quiet-pull --no-color -d \ + docker compose -f $COMPOSE_PATH up --quiet-pull --no-color -d \ cfg01 cfg02 cfg03 rs101 rs102 rs103 rs201 rs202 rs203 mongos minio createbucket sleep 25 - docker-compose -f $COMPOSE_PATH ps + docker compose -f $COMPOSE_PATH ps export COMPOSE_INTERACTIVE_NO_CLI=1 - docker-compose -f $COMPOSE_PATH exec -T cfg01 /opt/start.sh - docker-compose -f $COMPOSE_PATH exec -T rs101 /opt/start.sh - docker-compose -f $COMPOSE_PATH exec -T rs201 /opt/start.sh + docker compose -f $COMPOSE_PATH exec -T cfg01 /opt/start.sh + docker compose -f $COMPOSE_PATH exec -T rs101 /opt/start.sh + docker compose -f $COMPOSE_PATH exec -T rs201 /opt/start.sh sleep 15 - docker-compose -f $COMPOSE_PATH exec -T mongos $mongo mongodb://${BACKUP_USER}:${MONGO_PASS}@localhost /opt/mongos_init.js + docker compose -f $COMPOSE_PATH exec -T mongos $mongo mongodb://${BACKUP_USER}:${MONGO_PASS}@localhost /opt/mongos_init.js - docker-compose -f $COMPOSE_PATH up --quiet-pull --no-color -d \ + docker compose -f $COMPOSE_PATH up --quiet-pull --no-color -d \ agent-cfg01 agent-cfg02 agent-cfg03 agent-rs101 agent-rs102 agent-rs103 agent-rs201 agent-rs202 agent-rs203 } @@ -203,7 +203,7 @@ start_replset() { genMongoKey echo 'Build agents and tests' - docker-compose -f $compose build --no-cache --pull + docker compose -f $compose build --no-cache --pull if [ ! -d "${test_dir}/docker/backups" ]; then mkdir "${test_dir}/docker/backups" @@ -212,19 +212,19 @@ start_replset() { export MONGODB_VERSION=${mongo_version:-"4.4"} export MONGODB_IMAGE=${MONGODB_IMAGE:-"percona/percona-server-mongodb"} - docker-compose -f $compose up --quiet-pull --no-color -d \ + docker compose -f $compose up --quiet-pull --no-color -d \ $nodes sleep 25 - docker-compose -f $compose ps + docker compose -f $compose ps export COMPOSE_INTERACTIVE_NO_CLI=1 - docker-compose -f $compose exec -T rs101 /opt/start.sh + docker compose -f $compose exec -T rs101 /opt/start.sh if [ $compose == $COMPOSE_REMAPPING_PATH ]; then - docker-compose -f $compose exec -T rs201 /opt/start.sh + docker compose -f $compose exec -T rs201 /opt/start.sh fi sleep 15 - docker-compose -f $compose up -d $agents + docker compose -f $compose up -d $agents } genMongoKey() { @@ -239,6 +239,6 @@ genMongoKey() { destroy_cluster() { local compose=$1 - docker-compose -f $compose ps - docker-compose -f $compose down -v -t 2 + docker compose -f $compose ps + docker compose -f $compose down -v -t 2 } diff --git a/e2e-tests/run-new-cluster b/e2e-tests/run-new-cluster index 579f69f73..e5eada9d7 100755 --- a/e2e-tests/run-new-cluster +++ b/e2e-tests/run-new-cluster @@ -47,7 +47,7 @@ users_og=$(mongo_run 'db.getSiblingDB("admin").system.users.find({}, {_id: 1}).s date desc 'Destroy cluster' -docker-compose -f ${test_dir}/docker/docker-compose.yaml down +docker compose -f ${test_dir}/docker/docker-compose.yaml down desc 'Removing PSMDB volumes' docker volume rm $(docker volume ls --filter name=data -q) From 8e954bac25ad002d8b80b72dbcdc091d7413d4ce Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 16 Jul 2024 19:54:14 +0200 Subject: [PATCH 120/203] Remove unused Context --- cmd/pbm-agent/backup.go | 2 +- cmd/pbm-agent/pitr.go | 2 +- pbm/prio/priority.go | 2 -- pbm/prio/priority_test.go | 11 +++++------ 4 files changed, 7 insertions(+), 10 deletions(-) diff --git a/cmd/pbm-agent/backup.go b/cmd/pbm-agent/backup.go index 5cf5f0cd4..b44df9b14 100644 --- a/cmd/pbm-agent/backup.go +++ b/cmd/pbm-agent/backup.go @@ -163,7 +163,7 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, validCandidates = append(validCandidates, s) } - nodes, err := prio.CalcNodesPriority(ctx, c, cfg.Backup.Priority, validCandidates) + nodes, err := prio.CalcNodesPriority(c, cfg.Backup.Priority, validCandidates) if err != nil { l.Error("get nodes priority: %v", err) return diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index e9ab5fd78..d0eb5b615 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -407,7 +407,7 @@ func (a *Agent) leadNomination( return } - nodes, err := prio.CalcNodesPriority(ctx, nil, cfg.PITR.Priority, agents) + nodes, err := prio.CalcNodesPriority(nil, cfg.PITR.Priority, agents) if err != nil { l.Error("get nodes priority: %v", err) return diff --git a/pbm/prio/priority.go b/pbm/prio/priority.go index c1193b6cd..30b491d2b 100644 --- a/pbm/prio/priority.go +++ b/pbm/prio/priority.go @@ -1,7 +1,6 @@ package prio import ( - "context" "sort" "github.com/percona/percona-backup-mongodb/pbm/config" @@ -45,7 +44,6 @@ type agentScore func(topo.AgentStat) float64 // Custom coefficients might be passed. These will be ignored though // if the config is set. func CalcNodesPriority( - ctx context.Context, c map[string]float64, cfgPrio config.Priority, agents []topo.AgentStat, diff --git a/pbm/prio/priority_test.go b/pbm/prio/priority_test.go index 9f2239108..66af8ebd3 100644 --- a/pbm/prio/priority_test.go +++ b/pbm/prio/priority_test.go @@ -1,7 +1,6 @@ package prio import ( - "context" "reflect" "testing" @@ -72,7 +71,7 @@ func TestCalcNodesPriority(t *testing.T) { } for _, tC := range testCases { t.Run(tC.desc, func(t *testing.T) { - np, err := CalcNodesPriority(context.Background(), nil, nil, tC.agents) + np, err := CalcNodesPriority(nil, nil, tC.agents) if err != nil { t.Fatalf("unexpected error while calculating nodes priority: %v", err) } @@ -150,7 +149,7 @@ func TestCalcNodesPriority(t *testing.T) { } for _, tC := range testCases { t.Run(tC.desc, func(t *testing.T) { - np, err := CalcNodesPriority(context.Background(), nil, nil, tC.agents) + np, err := CalcNodesPriority(nil, nil, tC.agents) if err != nil { t.Fatalf("unexpected error while calculating nodes priority: %v", err) } @@ -262,7 +261,7 @@ func TestCalcNodesPriority(t *testing.T) { } for _, tC := range testCases { t.Run(tC.desc, func(t *testing.T) { - np, err := CalcNodesPriority(context.Background(), nil, tC.expPrio, tC.agents) + np, err := CalcNodesPriority(nil, tC.expPrio, tC.agents) if err != nil { t.Fatalf("unexpected error while calculating nodes priority: %v", err) } @@ -357,7 +356,7 @@ func TestCalcNodesPriority(t *testing.T) { } for _, tC := range testCases { t.Run(tC.desc, func(t *testing.T) { - np, err := CalcNodesPriority(context.Background(), nil, tC.expPrio, tC.agents) + np, err := CalcNodesPriority(nil, tC.expPrio, tC.agents) if err != nil { t.Fatalf("unexpected error while calculating nodes priority: %v", err) } @@ -394,7 +393,7 @@ func TestCalcNodesPriority(t *testing.T) { "rs03": 3.0, } - np, err := CalcNodesPriority(context.Background(), c, nil, agents) + np, err := CalcNodesPriority(c, nil, agents) if err != nil { t.Fatalf("unexpected error while calculating nodes priority: %v", err) } From 0037176f52062d5b3faf689c7efdbd7ad1f98376 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 19 Jul 2024 15:35:20 +0200 Subject: [PATCH 121/203] Use mongStopSig as the flag Instead of using monStarted field for the same purpose. --- cmd/pbm-agent/agent.go | 4 +--- cmd/pbm-agent/pitr.go | 8 +++----- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 68b5f1aa9..5a21207ee 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -41,9 +41,7 @@ type Agent struct { pauseHB int32 monMx sync.Mutex - // pitr monitor (watcher) jobs are started - monStarted bool - // signal for stoppint pitr monitor jobs + // signal for stopping pitr monitor jobs and flag that jobs are started/stopped monStopSig chan struct{} } diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index d0eb5b615..0a673e240 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -71,7 +71,7 @@ func (a *Agent) startMon(ctx context.Context, nodeInfo *topo.NodeInfo, cfg *conf if !nodeInfo.IsClusterLeader() { return } - if a.monStarted { + if a.monStopSig != nil { return } a.monStopSig = make(chan struct{}) @@ -80,8 +80,6 @@ func (a *Agent) startMon(ctx context.Context, nodeInfo *topo.NodeInfo, cfg *conf go a.pitrErrorMonitor(ctx) go a.pitrTopoMonitor(ctx) go a.pitrActivityMonitor(ctx) - - a.monStarted = true } // stopMon stops monitor (watcher) jobs @@ -89,11 +87,11 @@ func (a *Agent) stopMon() { a.monMx.Lock() defer a.monMx.Unlock() - if !a.monStarted { + if a.monStopSig == nil { return } close(a.monStopSig) - a.monStarted = false + a.monStopSig = nil } func (a *Agent) sliceNow(opid ctrl.OPID) { From 2ef6314269e049a6a47b504ab88fa9d58a50aac9 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Sat, 20 Jul 2024 20:35:50 +0200 Subject: [PATCH 122/203] Pull out cluster leader check from startMon & leadNomination --- cmd/pbm-agent/pitr.go | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 0a673e240..70e7e5993 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -64,13 +64,10 @@ func (a *Agent) getPitr() *currentPitr { } // startMon starts monitor (watcher) jobs only on cluster leader. -func (a *Agent) startMon(ctx context.Context, nodeInfo *topo.NodeInfo, cfg *config.Config) { +func (a *Agent) startMon(ctx context.Context, cfg *config.Config) { a.monMx.Lock() defer a.monMx.Unlock() - if !nodeInfo.IsClusterLeader() { - return - } if a.monStopSig != nil { return } @@ -230,11 +227,13 @@ func (a *Agent) pitr(ctx context.Context) error { return nil } - // start monitor jobs on cluster leader - a.startMon(ctx, nodeInfo, cfg) + if nodeInfo.IsClusterLeader() { + // start monitor jobs on cluster leader + a.startMon(ctx, cfg) - // start nomination process on cluster leader - go a.leadNomination(ctx, nodeInfo, cfg) + // start nomination process on cluster leader + go a.leadNomination(ctx, cfg.PITR.Priority) + } nominated, err := a.waitNominationForPITR(ctx, nodeInfo.SetName, nodeInfo.Me) if err != nil { @@ -373,15 +372,10 @@ func (a *Agent) pitr(ctx context.Context) error { // It requires to be run in separate go routine on cluster leader. func (a *Agent) leadNomination( ctx context.Context, - nodeInfo *topo.NodeInfo, - cfg *config.Config, + cfgPrio config.Priority, ) { l := log.LogEventFromContext(ctx) - if !nodeInfo.IsClusterLeader() { - return - } - l.Debug("checking locks in the whole cluster") noLocks, err := a.waitAllOpLockRelease(ctx) if err != nil { @@ -405,7 +399,7 @@ func (a *Agent) leadNomination( return } - nodes, err := prio.CalcNodesPriority(nil, cfg.PITR.Priority, agents) + nodes, err := prio.CalcNodesPriority(nil, cfgPrio, agents) if err != nil { l.Error("get nodes priority: %v", err) return From b6ad2df6ba0d24377c8ba6406870003926aeb8af Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Sun, 21 Jul 2024 17:33:16 +0200 Subject: [PATCH 123/203] Refactor pitrConfigMon --- cmd/pbm-agent/pitr.go | 51 +++++++++++++++++++------------------------ 1 file changed, 23 insertions(+), 28 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 70e7e5993..c3fef69d1 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -5,7 +5,6 @@ import ( "maps" "time" - "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/mongo" "github.com/percona/percona-backup-mongodb/pbm/backup" @@ -661,28 +660,7 @@ func (a *Agent) pitrConfigMonitor(ctx context.Context, firstConf *config.Config) tk := time.NewTicker(pitrWatchMonitorPollingCycle) defer tk.Stop() - updateCurrConf := func(c *config.Config) (*config.PITRConf, primitive.Timestamp) { - return c.PITR, c.Epoch - } - equal := func(c1, c2 *config.PITRConf) bool { - if c1 == nil || c2 == nil { - return c1 == c2 - } - if c1.OplogOnly != c2.OplogOnly { - return false - } - // OplogSpanMin is compared and updated in the main pitr loop for now - // if c1.OplogSpanMin != c2.OplogSpanMin { - // return false - // } - if !maps.Equal(c1.Priority, c2.Priority) { - return false - } - - return true - } - - currConf, currEpoh := updateCurrConf(firstConf) + currConf, currEpoh := firstConf.PITR, firstConf.Epoch for { select { @@ -701,22 +679,21 @@ func (a *Agent) pitrConfigMonitor(ctx context.Context, firstConf *config.Config) if !cfg.PITR.Enabled { // If pitr is disabled, there is no need to check its properties. // Enable/disable change is handled out of the monitor logic (in pitr main loop). - currConf, currEpoh = updateCurrConf(cfg) + currConf, currEpoh = cfg.PITR, cfg.Epoch continue } - if equal(cfg.PITR, currConf) { + if isPITRConfigChanged(cfg.PITR, currConf) { continue } // there are differences between privious and new config in following - // fields: OplogOnly, OplogSpanMin, Priority - + // fields: Priority, OplogOnly, (OplogSpanMin) l.Info("pitr config has changed, re-config will be done") err = oplog.SetClusterStatus(ctx, a.leadConn, oplog.StatusReconfig) if err != nil { l.Error("error while setting cluster status reconfig: %v", err) } - currConf, currEpoh = updateCurrConf(cfg) + currConf, currEpoh = cfg.PITR, cfg.Epoch case <-ctx.Done(): return @@ -727,6 +704,24 @@ func (a *Agent) pitrConfigMonitor(ctx context.Context, firstConf *config.Config) } } +func isPITRConfigChanged(c1, c2 *config.PITRConf) bool { + if c1 == nil || c2 == nil { + return c1 == c2 + } + if c1.OplogOnly != c2.OplogOnly { + return false + } + // OplogSpanMin is compared and updated in the main pitr loop for now + // if c1.OplogSpanMin != c2.OplogSpanMin { + // return false + // } + if !maps.Equal(c1.Priority, c2.Priority) { + return false + } + + return true +} + func (a *Agent) pitrTopoMonitor(ctx context.Context) { l := log.LogEventFromContext(ctx) l.Debug("start pitr topo monitor") From d77a4fd10d00cf30cf3d96455f92dcddb935598a Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Sun, 21 Jul 2024 19:08:38 +0200 Subject: [PATCH 124/203] Fix logging of errors within pitr loop --- cmd/pbm-agent/pitr.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index c3fef69d1..bc74688b0 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -177,7 +177,7 @@ func (a *Agent) pitr(ctx context.Context) error { return nil } - return err + return errors.Wrap(err, "can slicing now") } if p := a.getPitr(); p != nil { @@ -213,7 +213,6 @@ func (a *Agent) pitr(ctx context.Context) error { // the other node does successfully slice nodeInfo, err := topo.GetNodeInfoExt(ctx, a.nodeConn) if err != nil { - l.Error("get node info: %v", err) return errors.Wrap(err, "get node info") } @@ -236,7 +235,6 @@ func (a *Agent) pitr(ctx context.Context) error { nominated, err := a.waitNominationForPITR(ctx, nodeInfo.SetName, nodeInfo.Me) if err != nil { - l.Error("wait for pitr nomination: %v", err) return errors.Wrap(err, "wait nomination for pitr") } if !nominated { @@ -279,8 +277,7 @@ func (a *Agent) pitr(ctx context.Context) error { if err := lck.Release(); err != nil { l.Error("release lock: %v", err) } - err = errors.Wrap(err, "unable to get storage configuration") - return err + return errors.Wrap(err, "unable to get storage configuration") } s := slicer.NewSlicer(a.brief.SetName, a.leadConn, a.nodeConn, stg, cfg, log.FromContext(ctx)) @@ -295,8 +292,7 @@ func (a *Agent) pitr(ctx context.Context) error { if err := lck.Release(); err != nil { l.Error("release lock: %v", err) } - err = errors.Wrap(err, "catchup") - return err + return errors.Wrap(err, "catchup") } go func() { From d7bc06331b7eabaf9eb697c9f30364148575ec3c Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Sun, 21 Jul 2024 19:25:50 +0200 Subject: [PATCH 125/203] Fix stopping slicer logic for reconfig and errors ... cluster status --- cmd/pbm-agent/pitr.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index bc74688b0..e127dc729 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -320,12 +320,13 @@ func (a *Agent) pitr(ctx context.Context) error { for { select { case <-tk.C: - if reconf := a.isPITRClusterStatus(ctx, oplog.StatusReconfig); reconf { + cStatus := a.getPITRClusterStatus(ctx) + if cStatus == oplog.StatusReconfig { l.Debug("stop slicing because of reconfig") stopSlicing() return } - if pitrErr := a.isPITRClusterStatus(ctx, oplog.StatusError); pitrErr { + if cStatus == oplog.StatusError { l.Debug("stop slicing because of error") stopSlicing() return @@ -630,19 +631,19 @@ func (a *Agent) reconcileReadyStatus(ctx context.Context, agents []topo.AgentSta } } -// isPITRClusterStatus checks within pbmPITR collection if cluster status -// is set to specified status. -func (a *Agent) isPITRClusterStatus(ctx context.Context, status oplog.Status) bool { +// getPITRClusterStatus gets cluster status from pbmPITR collection. +// In case of error, it returns StatusUnset and log the error. +func (a *Agent) getPITRClusterStatus(ctx context.Context) oplog.Status { l := log.LogEventFromContext(ctx) meta, err := oplog.GetMeta(ctx, a.leadConn) if err != nil { - if errors.Is(err, errors.ErrNotFound) { - return false + if !errors.Is(err, errors.ErrNotFound) { + l.Error("getting metta for reconfig status check: %v", err) } - l.Error("getting metta for reconfig status check: %v", err) + return oplog.StatusUnset } - return meta.Status == status + return meta.Status } // pitrConfigMonitor watches changes in PITR section within PBM configuration. From 29096bf576a46bb4c27bc716ff771c12ec473a18 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 22 Jul 2024 16:27:14 +0200 Subject: [PATCH 126/203] Refactor priority calculation logic --- cmd/pbm-agent/backup.go | 7 ++-- cmd/pbm-agent/pitr.go | 6 +--- pbm/prio/priority.go | 75 +++++++++++++++++++++++---------------- pbm/prio/priority_test.go | 25 +++---------- 4 files changed, 52 insertions(+), 61 deletions(-) diff --git a/cmd/pbm-agent/backup.go b/cmd/pbm-agent/backup.go index b44df9b14..264460e2e 100644 --- a/cmd/pbm-agent/backup.go +++ b/cmd/pbm-agent/backup.go @@ -163,11 +163,8 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, validCandidates = append(validCandidates, s) } - nodes, err := prio.CalcNodesPriority(c, cfg.Backup.Priority, validCandidates) - if err != nil { - l.Error("get nodes priority: %v", err) - return - } + nodes := prio.CalcNodesPriority(c, cfg.Backup.Priority, validCandidates) + shards, err := topo.ClusterMembers(ctx, a.leadConn.MongoClient()) if err != nil { l.Error("get cluster members: %v", err) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index e127dc729..58bb302d0 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -395,11 +395,7 @@ func (a *Agent) leadNomination( return } - nodes, err := prio.CalcNodesPriority(nil, cfgPrio, agents) - if err != nil { - l.Error("get nodes priority: %v", err) - return - } + nodes := prio.CalcNodesPriority(nil, cfgPrio, agents) shards, err := topo.ClusterMembers(ctx, a.leadConn.MongoClient()) if err != nil { diff --git a/pbm/prio/priority.go b/pbm/prio/priority.go index 30b491d2b..403d07a60 100644 --- a/pbm/prio/priority.go +++ b/pbm/prio/priority.go @@ -36,8 +36,6 @@ func (n *NodesPriority) RS(rs string) [][]string { return n.m[rs].list() } -type agentScore func(topo.AgentStat) float64 - // CalcNodesPriority calculates and returns list nodes grouped by // backup/pitr preferences in descended order. // First are nodes with the highest priority. @@ -47,34 +45,7 @@ func CalcNodesPriority( c map[string]float64, cfgPrio config.Priority, agents []topo.AgentStat, -) (*NodesPriority, error) { - // if config level priorities (cfgPrio) aren't set, apply defaults - f := func(a topo.AgentStat) float64 { - if coeff, ok := c[a.Node]; ok && c != nil { - return defaultScore * coeff - } else if a.State == defs.NodeStatePrimary { - return defaultScore / 2 - } else if a.Hidden { - return defaultScore * 2 - } - return defaultScore - } - - if cfgPrio != nil || len(cfgPrio) > 0 { - f = func(a topo.AgentStat) float64 { - sc, ok := cfgPrio[a.Node] - if !ok || sc < 0 { - return defaultScore - } - - return sc - } - } - - return calcNodesPriority(agents, f), nil -} - -func calcNodesPriority(agents []topo.AgentStat, f agentScore) *NodesPriority { +) *NodesPriority { scores := NewNodesPriority() for _, a := range agents { @@ -82,7 +53,7 @@ func calcNodesPriority(agents []topo.AgentStat, f agentScore) *NodesPriority { continue } - scores.Add(a.RS, a.Node, f(a)) + scores.Add(a.RS, a.Node, CalcPriorityForAgent(a, cfgPrio, c)) } return scores @@ -111,3 +82,45 @@ func (s nodeScores) list() [][]string { return ret } + +// CalcPriorityForAgent calculates priority for the specified agent. +func CalcPriorityForAgent( + agent topo.AgentStat, + cfgPrio config.Priority, + coeffRules map[string]float64, +) float64 { + if cfgPrio != nil || len(cfgPrio) > 0 { + // apply config level priorities + return explicitPrioCalc(agent, cfgPrio) + } + + // if config level priorities (cfgPrio) aren't set, + // apply priorities based on topology rules + return implicitPrioCalc(agent, coeffRules) +} + +// implicitPrioCalc provides priority calculation based on topology rules. +// Instead of using explicitly specified priority numbers, topology rules are +// allied for primary, secondary and hidden member. +func implicitPrioCalc(a topo.AgentStat, rule map[string]float64) float64 { + if coeff, ok := rule[a.Node]; ok && rule != nil { + return defaultScore * coeff + } else if a.State == defs.NodeStatePrimary { + return defaultScore / 2 + } else if a.Hidden { + return defaultScore * 2 + } + return defaultScore +} + +// explicitPrioCalc uses priority numbers from configuration to calculate +// priority for the specified agent. +// In case when priority is not specified, default one is used instead. +func explicitPrioCalc(a topo.AgentStat, rule map[string]float64) float64 { + sc, ok := rule[a.Node] + if !ok || sc < 0 { + return defaultScore + } + + return sc +} diff --git a/pbm/prio/priority_test.go b/pbm/prio/priority_test.go index 66af8ebd3..c08e42dec 100644 --- a/pbm/prio/priority_test.go +++ b/pbm/prio/priority_test.go @@ -71,10 +71,7 @@ func TestCalcNodesPriority(t *testing.T) { } for _, tC := range testCases { t.Run(tC.desc, func(t *testing.T) { - np, err := CalcNodesPriority(nil, nil, tC.agents) - if err != nil { - t.Fatalf("unexpected error while calculating nodes priority: %v", err) - } + np := CalcNodesPriority(nil, nil, tC.agents) prioByScore := np.RS(tC.agents[0].RS) @@ -149,10 +146,7 @@ func TestCalcNodesPriority(t *testing.T) { } for _, tC := range testCases { t.Run(tC.desc, func(t *testing.T) { - np, err := CalcNodesPriority(nil, nil, tC.agents) - if err != nil { - t.Fatalf("unexpected error while calculating nodes priority: %v", err) - } + np := CalcNodesPriority(nil, nil, tC.agents) prioByScoreCfg := np.RS("cfg") prioByScoreRs0 := np.RS("rs0") @@ -261,10 +255,7 @@ func TestCalcNodesPriority(t *testing.T) { } for _, tC := range testCases { t.Run(tC.desc, func(t *testing.T) { - np, err := CalcNodesPriority(nil, tC.expPrio, tC.agents) - if err != nil { - t.Fatalf("unexpected error while calculating nodes priority: %v", err) - } + np := CalcNodesPriority(nil, tC.expPrio, tC.agents) prioByScore := np.RS(tC.agents[0].RS) @@ -356,10 +347,7 @@ func TestCalcNodesPriority(t *testing.T) { } for _, tC := range testCases { t.Run(tC.desc, func(t *testing.T) { - np, err := CalcNodesPriority(nil, tC.expPrio, tC.agents) - if err != nil { - t.Fatalf("unexpected error while calculating nodes priority: %v", err) - } + np := CalcNodesPriority(nil, tC.expPrio, tC.agents) prioByScoreCfg := np.RS("cfg") prioByScoreRs0 := np.RS("rs0") @@ -393,10 +381,7 @@ func TestCalcNodesPriority(t *testing.T) { "rs03": 3.0, } - np, err := CalcNodesPriority(c, nil, agents) - if err != nil { - t.Fatalf("unexpected error while calculating nodes priority: %v", err) - } + np := CalcNodesPriority(c, nil, agents) prioByScore := np.RS(agents[0].RS) if !reflect.DeepEqual(prioByScore, res) { From 38bed35224c31e291e51badff8a711ee87a89a5e Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 22 Jul 2024 19:14:09 +0200 Subject: [PATCH 127/203] Add PITR and Bcp priority statuses within status cmd --- cmd/pbm/status.go | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/cmd/pbm/status.go b/cmd/pbm/status.go index e1053eff0..5b65f3111 100644 --- a/cmd/pbm/status.go +++ b/cmd/pbm/status.go @@ -24,6 +24,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/lock" "github.com/percona/percona-backup-mongodb/pbm/log" "github.com/percona/percona-backup-mongodb/pbm/oplog" + "github.com/percona/percona-backup-mongodb/pbm/prio" "github.com/percona/percona-backup-mongodb/pbm/restore" "github.com/percona/percona-backup-mongodb/pbm/slicer" "github.com/percona/percona-backup-mongodb/pbm/storage" @@ -187,11 +188,13 @@ const ( ) type node struct { - Host string `json:"host"` - Ver string `json:"agent"` - Role RSRole `json:"role"` - OK bool `json:"ok"` - Errs []string `json:"errors,omitempty"` + Host string `json:"host"` + Ver string `json:"agent"` + Role RSRole `json:"role"` + PrioPITR float64 `json:"prio_pitr"` + PrioBcp float64 `json:"prio_backup"` + OK bool `json:"ok"` + Errs []string `json:"errors,omitempty"` } func (n node) String() string { @@ -204,7 +207,7 @@ func (n node) String() string { role = RoleSecondary } - s := fmt.Sprintf("%s [%s]: pbm-agent %v", n.Host, role, n.Ver) + s := fmt.Sprintf("%s [%s], Bkp Prio: [%.1f], PITR Prio: [%.1f]: pbm-agent [%s]", n.Host, role, n.PrioBcp, n.PrioPITR, n.Ver) if n.OK { s += " OK" return s @@ -239,6 +242,11 @@ func clusterStatus(ctx context.Context, conn connect.Client, uri string) (fmt.St return nil, errors.Wrap(err, "read cluster time") } + cfg, err := config.GetConfig(ctx, conn) + if err != nil { + return nil, errors.Wrap(err, "fetch config") + } + eg, ctx := errgroup.WithContext(ctx) m := sync.Mutex{} @@ -294,6 +302,8 @@ func clusterStatus(ctx context.Context, conn connect.Client, uri string) (fmt.St } nd.Ver = "v" + stat.AgentVer nd.OK, nd.Errs = stat.OK() + nd.PrioBcp = prio.CalcPriorityForAgent(stat, cfg.Backup.Priority, nil) + nd.PrioPITR = prio.CalcPriorityForAgent(stat, cfg.PITR.Priority, nil) } m.Lock() From fb4e868809bc6cdeb36b632a5b36ad94d6be5684 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 23 Jul 2024 11:08:45 +0200 Subject: [PATCH 128/203] Expand status with pitr running memebers info --- cmd/pbm/status.go | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/cmd/pbm/status.go b/cmd/pbm/status.go index 5b65f3111..aa71bd1a3 100644 --- a/cmd/pbm/status.go +++ b/cmd/pbm/status.go @@ -352,9 +352,10 @@ func directConnect(ctx context.Context, uri, hosts string) (*mongo.Client, error } type pitrStat struct { - InConf bool `json:"conf"` - Running bool `json:"run"` - Err string `json:"error,omitempty"` + InConf bool `json:"conf"` + Running bool `json:"run"` + RunningNodes []string `json:"nodes"` + Err string `json:"error,omitempty"` } func (p pitrStat) String() string { @@ -363,6 +364,13 @@ func (p pitrStat) String() string { status = "ON" } s := fmt.Sprintf("Status [%s]", status) + runningNodes := "" + for _, n := range p.RunningNodes { + runningNodes += fmt.Sprintf("%s; ", n) + } + if len(runningNodes) != 0 { + s += fmt.Sprintf("\nRunning members: %s", runningNodes) + } if p.Err != "" { s += fmt.Sprintf("\n! ERROR while running PITR backup: %s", p.Err) } @@ -382,6 +390,13 @@ func getPitrStatus(ctx context.Context, conn connect.Client) (fmt.Stringer, erro return p, errors.Wrap(err, "unable check PITR running status") } + if p.InConf && p.Running { + p.RunningNodes, err = oplog.GetAgentsWithACK(ctx, conn) + if err != nil && err != errors.ErrNotFound { + return p, errors.Wrap(err, "unable to fetch PITR running nodes") + } + } + p.Err, err = getPitrErr(ctx, conn) return p, errors.Wrap(err, "check for errors") From f4cfc8da8bf47f2f2bc50cd11384abda2a6fbd21 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 23 Jul 2024 16:34:24 +0200 Subject: [PATCH 129/203] Hide priority in status cmd under the flag Flag name is: --priority or -p --- cmd/pbm/main.go | 4 ++++ cmd/pbm/status.go | 22 +++++++++++++++------- e2e-tests/docker/conf/p2.yaml | 12 ++++++++++++ 3 files changed, 31 insertions(+), 7 deletions(-) create mode 100644 e2e-tests/docker/conf/p2.yaml diff --git a/cmd/pbm/main.go b/cmd/pbm/main.go index 7e89afda0..532c45c78 100644 --- a/cmd/pbm/main.go +++ b/cmd/pbm/main.go @@ -401,6 +401,10 @@ func main() { statusCmd.Flag("sections", "Sections of status to display ///."). Short('s'). EnumsVar(&statusOpts.sections, "cluster", "pitr", "running", "backups") + statusCmd.Flag("priority", "Show backup and PITR priorities"). + Short('p'). + Default("false"). + BoolVar(&statusOpts.priority) describeRestoreCmd := pbmCmd.Command("describe-restore", "Describe restore") describeRestoreOpts := descrRestoreOpts{} diff --git a/cmd/pbm/status.go b/cmd/pbm/status.go index aa71bd1a3..3411dc331 100644 --- a/cmd/pbm/status.go +++ b/cmd/pbm/status.go @@ -37,6 +37,7 @@ import ( type statusOptions struct { rsMap string sections []string + priority bool } type statusOut struct { @@ -119,7 +120,7 @@ func status( { "cluster", "Cluster", nil, func(ctx context.Context, conn connect.Client) (fmt.Stringer, error) { - return clusterStatus(ctx, conn, curi) + return clusterStatus(ctx, conn, curi, opts.priority) }, }, {"pitr", "PITR incremental backup", nil, getPitrStatus}, @@ -191,8 +192,8 @@ type node struct { Host string `json:"host"` Ver string `json:"agent"` Role RSRole `json:"role"` - PrioPITR float64 `json:"prio_pitr"` - PrioBcp float64 `json:"prio_backup"` + PrioPITR string `json:"prio_pitr"` + PrioBcp string `json:"prio_backup"` OK bool `json:"ok"` Errs []string `json:"errors,omitempty"` } @@ -207,7 +208,12 @@ func (n node) String() string { role = RoleSecondary } - s := fmt.Sprintf("%s [%s], Bkp Prio: [%.1f], PITR Prio: [%.1f]: pbm-agent [%s]", n.Host, role, n.PrioBcp, n.PrioPITR, n.Ver) + var s string + if len(n.PrioBcp) == 0 || len(n.PrioPITR) == 0 { + s = fmt.Sprintf("%s [%s]: pbm-agent [%s]", n.Host, role, n.Ver) + } else { + s = fmt.Sprintf("%s [%s], Bkp Prio: [%s], PITR Prio: [%s]: pbm-agent [%s]", n.Host, role, n.PrioBcp, n.PrioPITR, n.Ver) + } if n.OK { s += " OK" return s @@ -231,7 +237,7 @@ func (c cluster) String() string { return s } -func clusterStatus(ctx context.Context, conn connect.Client, uri string) (fmt.Stringer, error) { +func clusterStatus(ctx context.Context, conn connect.Client, uri string, prioOpt bool) (fmt.Stringer, error) { clstr, err := topo.ClusterMembers(ctx, conn.MongoClient()) if err != nil { return nil, errors.Wrap(err, "get cluster members") @@ -302,8 +308,10 @@ func clusterStatus(ctx context.Context, conn connect.Client, uri string) (fmt.St } nd.Ver = "v" + stat.AgentVer nd.OK, nd.Errs = stat.OK() - nd.PrioBcp = prio.CalcPriorityForAgent(stat, cfg.Backup.Priority, nil) - nd.PrioPITR = prio.CalcPriorityForAgent(stat, cfg.PITR.Priority, nil) + if prioOpt { + nd.PrioBcp = fmt.Sprintf("%.1f", prio.CalcPriorityForAgent(stat, cfg.Backup.Priority, nil)) + nd.PrioPITR = fmt.Sprintf("%.1f", prio.CalcPriorityForAgent(stat, cfg.PITR.Priority, nil)) + } } m.Lock() diff --git a/e2e-tests/docker/conf/p2.yaml b/e2e-tests/docker/conf/p2.yaml new file mode 100644 index 000000000..b8946f518 --- /dev/null +++ b/e2e-tests/docker/conf/p2.yaml @@ -0,0 +1,12 @@ +storage: + type: s3 + s3: + endpointUrl: http://minio:9000 + bucket: bcp + prefix: pbme2etest + credentials: + access-key-id: "minio1234" + secret-access-key: "minio1234" +pitr: + enabled: true + From 8f2dc9a8d3472a33f32574ec61faed8ccbf7881a Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 23 Jul 2024 17:25:08 +0200 Subject: [PATCH 130/203] Fix reviewdog issues --- cmd/pbm/status.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/pbm/status.go b/cmd/pbm/status.go index 3411dc331..b732030dc 100644 --- a/cmd/pbm/status.go +++ b/cmd/pbm/status.go @@ -212,7 +212,8 @@ func (n node) String() string { if len(n.PrioBcp) == 0 || len(n.PrioPITR) == 0 { s = fmt.Sprintf("%s [%s]: pbm-agent [%s]", n.Host, role, n.Ver) } else { - s = fmt.Sprintf("%s [%s], Bkp Prio: [%s], PITR Prio: [%s]: pbm-agent [%s]", n.Host, role, n.PrioBcp, n.PrioPITR, n.Ver) + s = fmt.Sprintf("%s [%s], Bkp Prio: [%s], PITR Prio: [%s]: pbm-agent [%s]", + n.Host, role, n.PrioBcp, n.PrioPITR, n.Ver) } if n.OK { s += " OK" @@ -400,7 +401,7 @@ func getPitrStatus(ctx context.Context, conn connect.Client) (fmt.Stringer, erro if p.InConf && p.Running { p.RunningNodes, err = oplog.GetAgentsWithACK(ctx, conn) - if err != nil && err != errors.ErrNotFound { + if err != nil && !errors.Is(err, errors.ErrNotFound) { return p, errors.Wrap(err, "unable to fetch PITR running nodes") } } From e7ef8ee5932a5629bc96444eae2d48c0582f50b9 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 24 Jul 2024 09:31:03 +0200 Subject: [PATCH 131/203] Fix typo --- pbm/prio/priority.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbm/prio/priority.go b/pbm/prio/priority.go index 403d07a60..231cbd694 100644 --- a/pbm/prio/priority.go +++ b/pbm/prio/priority.go @@ -101,7 +101,7 @@ func CalcPriorityForAgent( // implicitPrioCalc provides priority calculation based on topology rules. // Instead of using explicitly specified priority numbers, topology rules are -// allied for primary, secondary and hidden member. +// applied for primary, secondary and hidden member. func implicitPrioCalc(a topo.AgentStat, rule map[string]float64) float64 { if coeff, ok := rule[a.Node]; ok && rule != nil { return defaultScore * coeff From e2a631f0ae806fb324a17c291979fefc26d8f4f3 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 24 Jul 2024 10:53:50 +0200 Subject: [PATCH 132/203] update mongo-tools (v100.10.0) --- go.mod | 26 +- go.sum | 74 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 3854 ++++++++++-- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../protocol/query/queryutil/queryutil.go | 4 +- .../aws/aws-sdk-go/service/s3/api.go | 309 +- .../service/s3/s3manager/upload_input.go | 2 +- .../aws/aws-sdk-go/service/ssooidc/api.go | 162 +- .../aws/aws-sdk-go/service/ssooidc/errors.go | 8 + .../github.com/klauspost/compress/README.md | 7 + .../compress/internal/snapref/encode_other.go | 2 +- .../klauspost/compress/s2/writer.go | 2 +- .../klauspost/compress/zstd/blockdec.go | 3 + .../klauspost/compress/zstd/blockenc.go | 20 + .../klauspost/compress/zstd/decoder.go | 2 +- .../klauspost/compress/zstd/enc_best.go | 12 + .../klauspost/compress/zstd/enc_better.go | 13 +- .../github.com/mongodb/mongo-tools/LICENSE.md | 14 +- .../mongo-tools/common/archive/spec.md | 131 +- .../mongo-tools/common/bsonutil/bsonutil.go | 18 + .../mongo-tools/common/bsonutil/indexes.go | 2 +- .../mongo-tools/common/db/buffered_bulk.go | 13 +- .../mongodb/mongo-tools/common/db/db.go | 34 + .../mongodb/mongo-tools/common/db/oplog.go | 25 +- .../mongodb/mongo-tools/common/db/optime.go | 1 + .../mongodb/mongo-tools/common/json/date.go | 3 +- .../mongodb/mongo-tools/common/json/decode.go | 2 +- .../mongodb/mongo-tools/common/json/encode.go | 5 +- .../mongodb/mongo-tools/common/json/fold.go | 2 +- .../mongodb/mongo-tools/common/json/indent.go | 2 +- .../mongo-tools/common/json/scanner.go | 2 +- .../mongodb/mongo-tools/common/json/stream.go | 2 +- .../mongodb/mongo-tools/common/json/tags.go | 2 +- .../mongo-tools/common/options/options.go | 23 +- .../mongodb/mongo-tools/common/txn/meta.go | 6 + .../mongodb/mongo-tools/common/util/bool.go | 3 +- .../mongo-tools/mongodump/metadata_dump.go | 3 +- .../mongodb/mongo-tools/mongodump/options.go | 2 +- .../mongodb/mongo-tools/mongodump/prepare.go | 2 +- .../mongo-tools/mongorestore/metadata.go | 15 + .../mongo-tools/mongorestore/options.go | 2 +- .../mongo-tools/mongorestore/restore.go | 20 +- .../github.com/montanaflynn/stats/.gitignore | 4 +- .../github.com/montanaflynn/stats/.travis.yml | 29 - .../montanaflynn/stats/CHANGELOG.md | 678 +- .../montanaflynn/stats/DOCUMENTATION.md | 40 +- vendor/github.com/montanaflynn/stats/LICENSE | 2 +- .../github.com/montanaflynn/stats/README.md | 29 +- .../github.com/montanaflynn/stats/describe.go | 81 + .../montanaflynn/stats/distances.go | 21 +- .../stats/geometric_distribution.go | 42 + .../bson/bsoncodec/array_codec.go | 7 +- .../bson/bsoncodec/byte_slice_codec.go | 25 +- .../bson/bsoncodec/default_value_decoders.go | 14 +- .../bson/bsoncodec/default_value_encoders.go | 12 +- .../bson/bsoncodec/empty_interface_codec.go | 24 +- .../mongo-driver/bson/bsoncodec/map_codec.go | 38 +- .../bson/bsoncodec/pointer_codec.go | 16 +- .../mongo-driver/bson/bsoncodec/registry.go | 9 +- .../bson/bsoncodec/slice_codec.go | 27 +- .../bson/bsoncodec/string_codec.go | 16 +- .../bson/bsoncodec/struct_codec.go | 62 +- .../mongo-driver/bson/bsoncodec/time_codec.go | 24 +- .../mongo-driver/bson/bsoncodec/uint_codec.go | 32 +- .../mongo-driver/bson/bsonrw/copier.go | 5 +- .../bson/bsonrw/extjson_parser.go | 2 +- .../bson/bsonrw/extjson_reader.go | 5 +- .../bson/bsonrw/extjson_wrappers.go | 4 +- .../mongo-driver/bson/bsonrw/json_scanner.go | 26 +- .../mongo-driver/bson/bsonrw/value_reader.go | 12 +- .../go.mongodb.org/mongo-driver/bson/doc.go | 89 +- .../mongo-driver/bson/primitive/decimal.go | 3 - .../mongo-driver/bson/primitive/objectid.go | 4 +- .../mongo-driver/bson/raw_value.go | 8 +- .../mongo-driver/bson/registry.go | 18 +- .../mongo-driver/event/monitoring.go | 6 +- .../mongo-driver/internal/csfle/csfle.go | 3 +- .../mongo-driver/internal/csot/csot.go | 6 +- .../mongo-driver/internal/logger/io_sink.go | 7 +- .../mongo-driver/internal/logger/logger.go | 2 +- .../mongo-driver/mongo/bulk_write.go | 17 +- .../mongo-driver/mongo/change_stream.go | 18 +- .../mongo-driver/mongo/client.go | 6 +- .../mongo-driver/mongo/client_encryption.go | 2 +- .../mongo-driver/mongo/collection.go | 48 +- .../mongo-driver/mongo/cursor.go | 12 +- .../mongo-driver/mongo/database.go | 23 +- .../mongo/description/server_selector.go | 12 +- .../mongo-driver/mongo/errors.go | 4 +- .../mongo-driver/mongo/index_view.go | 10 + .../mongo-driver/mongo/mongo.go | 10 +- .../mongo/options/clientoptions.go | 27 +- .../mongo/options/collectionoptions.go | 3 + .../mongo-driver/mongo/options/dboptions.go | 3 + .../mongo/options/mongooptions.go | 2 +- .../mongo/options/searchindexoptions.go | 7 + .../mongo-driver/mongo/search_index_view.go | 43 +- .../mongo/writeconcern/writeconcern.go | 4 +- .../mongo-driver/version/version.go | 2 +- .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 40 +- .../mongo-driver/x/bsonx/bsoncore/doc.go | 23 +- .../mongo-driver/x/mongo/driver/DESIGN.md | 27 - .../x/mongo/driver/auth/creds/doc.go | 14 + .../mongo-driver/x/mongo/driver/auth/doc.go | 21 +- .../mongo-driver/x/mongo/driver/auth/sasl.go | 4 +- .../mongo-driver/x/mongo/driver/auth/scram.go | 3 +- .../x/mongo/driver/batch_cursor.go | 4 +- .../x/mongo/driver/compression.go | 21 +- .../x/mongo/driver/connstring/connstring.go | 1274 ++-- .../mongo-driver/x/mongo/driver/crypt.go | 3 +- .../mongo-driver/x/mongo/driver/dns/dns.go | 11 +- .../mongo-driver/x/mongo/driver/driver.go | 7 + .../mongo-driver/x/mongo/driver/errors.go | 19 +- .../mongocrypt/mongocrypt_not_enabled.go | 7 + .../x/mongo/driver/mongocrypt/options/doc.go | 14 + .../x/mongo/driver/ocsp/config.go | 4 +- .../mongo-driver/x/mongo/driver/ocsp/ocsp.go | 13 +- .../mongo-driver/x/mongo/driver/operation.go | 71 +- .../x/mongo/driver/operation/aggregate.go | 14 + .../x/mongo/driver/operation/command.go | 13 - .../driver/operation/create_search_indexes.go | 42 +- .../x/mongo/driver/operation/doc.go | 14 + .../driver/operation/drop_search_index.go | 37 +- .../x/mongo/driver/operation/find.go | 14 + .../x/mongo/driver/operation/hello.go | 10 +- .../driver/operation/update_search_index.go | 39 +- .../x/mongo/driver/session/doc.go | 14 + .../x/mongo/driver/topology/connection.go | 40 +- .../x/mongo/driver/topology/errors.go | 7 +- .../x/mongo/driver/topology/pool.go | 294 +- .../topology/pool_generation_counter.go | 14 +- .../x/mongo/driver/topology/rtt_monitor.go | 19 +- .../x/mongo/driver/topology/server.go | 53 +- .../x/mongo/driver/topology/topology.go | 76 +- .../x/mongo/driver/wiremessage/wiremessage.go | 40 +- vendor/golang.org/x/crypto/ocsp/ocsp.go | 2 +- vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 2 +- vendor/golang.org/x/crypto/scrypt/scrypt.go | 2 +- vendor/golang.org/x/exp/slices/cmp.go | 44 + vendor/golang.org/x/exp/slices/slices.go | 417 +- vendor/golang.org/x/exp/slices/sort.go | 125 +- .../slices/{zsortfunc.go => zsortanyfunc.go} | 154 +- .../golang.org/x/exp/slices/zsortordered.go | 34 +- .../golang.org/x/net/http/httpguts/httplex.go | 13 +- vendor/golang.org/x/net/http2/frame.go | 13 +- vendor/golang.org/x/net/http2/server.go | 11 +- vendor/golang.org/x/net/http2/transport.go | 15 +- vendor/golang.org/x/sys/cpu/cpu.go | 1 + vendor/golang.org/x/sys/cpu/cpu_arm64.go | 10 + vendor/golang.org/x/sys/cpu/cpu_arm64.s | 8 + vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 1 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 5 + vendor/golang.org/x/sys/unix/asm_zos_s390x.s | 665 +- vendor/golang.org/x/sys/unix/bpxsvc_zos.go | 657 ++ vendor/golang.org/x/sys/unix/bpxsvc_zos.s | 192 + vendor/golang.org/x/sys/unix/epoll_zos.go | 220 - vendor/golang.org/x/sys/unix/fstatfs_zos.go | 163 - vendor/golang.org/x/sys/unix/mkerrors.sh | 2 + vendor/golang.org/x/sys/unix/mmap_nomremap.go | 2 +- vendor/golang.org/x/sys/unix/mremap.go | 5 + vendor/golang.org/x/sys/unix/pagesize_unix.go | 2 +- .../x/sys/unix/readdirent_getdirentries.go | 2 +- vendor/golang.org/x/sys/unix/sockcmsg_zos.go | 58 + .../golang.org/x/sys/unix/symaddr_zos_s390x.s | 75 + .../golang.org/x/sys/unix/syscall_darwin.go | 12 + vendor/golang.org/x/sys/unix/syscall_unix.go | 9 + .../x/sys/unix/syscall_zos_s390x.go | 1509 ++++- vendor/golang.org/x/sys/unix/sysvshm_unix.go | 2 +- .../x/sys/unix/sysvshm_unix_other.go | 2 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 29 +- .../x/sys/unix/zerrors_linux_386.go | 1 + .../x/sys/unix/zerrors_linux_amd64.go | 1 + .../x/sys/unix/zerrors_linux_arm64.go | 1 + .../x/sys/unix/zerrors_zos_s390x.go | 233 +- .../x/sys/unix/zsymaddr_zos_s390x.s | 364 ++ .../x/sys/unix/zsyscall_darwin_amd64.go | 33 + .../x/sys/unix/zsyscall_darwin_amd64.s | 10 + .../x/sys/unix/zsyscall_darwin_arm64.go | 33 + .../x/sys/unix/zsyscall_darwin_arm64.s | 10 + .../x/sys/unix/zsyscall_zos_s390x.go | 3113 ++++++++-- .../x/sys/unix/zsysnum_linux_386.go | 5 + .../x/sys/unix/zsysnum_linux_amd64.go | 5 + .../x/sys/unix/zsysnum_linux_arm.go | 5 + .../x/sys/unix/zsysnum_linux_arm64.go | 5 + .../x/sys/unix/zsysnum_linux_loong64.go | 5 + .../x/sys/unix/zsysnum_linux_mips.go | 5 + .../x/sys/unix/zsysnum_linux_mips64.go | 5 + .../x/sys/unix/zsysnum_linux_mips64le.go | 5 + .../x/sys/unix/zsysnum_linux_mipsle.go | 5 + .../x/sys/unix/zsysnum_linux_ppc.go | 5 + .../x/sys/unix/zsysnum_linux_ppc64.go | 5 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 5 + .../x/sys/unix/zsysnum_linux_riscv64.go | 5 + .../x/sys/unix/zsysnum_linux_s390x.go | 5 + .../x/sys/unix/zsysnum_linux_sparc64.go | 5 + .../x/sys/unix/zsysnum_zos_s390x.go | 5507 +++++++++-------- vendor/golang.org/x/sys/unix/ztypes_linux.go | 59 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 8 - .../x/sys/unix/ztypes_linux_amd64.go | 9 - .../golang.org/x/sys/unix/ztypes_linux_arm.go | 9 - .../x/sys/unix/ztypes_linux_arm64.go | 9 - .../x/sys/unix/ztypes_linux_loong64.go | 9 - .../x/sys/unix/ztypes_linux_mips.go | 9 - .../x/sys/unix/ztypes_linux_mips64.go | 9 - .../x/sys/unix/ztypes_linux_mips64le.go | 9 - .../x/sys/unix/ztypes_linux_mipsle.go | 9 - .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 9 - .../x/sys/unix/ztypes_linux_ppc64.go | 9 - .../x/sys/unix/ztypes_linux_ppc64le.go | 9 - .../x/sys/unix/ztypes_linux_riscv64.go | 9 - .../x/sys/unix/ztypes_linux_s390x.go | 9 - .../x/sys/unix/ztypes_linux_sparc64.go | 9 - .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 146 +- vendor/golang.org/x/sys/windows/aliases.go | 2 +- vendor/golang.org/x/sys/windows/empty.s | 8 - .../x/sys/windows/security_windows.go | 25 +- .../x/sys/windows/syscall_windows.go | 82 + .../golang.org/x/sys/windows/types_windows.go | 24 + .../x/sys/windows/zsyscall_windows.go | 144 +- vendor/modules.txt | 34 +- 220 files changed, 16348 insertions(+), 6995 deletions(-) delete mode 100644 vendor/github.com/montanaflynn/stats/.travis.yml create mode 100644 vendor/github.com/montanaflynn/stats/describe.go create mode 100644 vendor/github.com/montanaflynn/stats/geometric_distribution.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/doc.go create mode 100644 vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/doc.go create mode 100644 vendor/golang.org/x/exp/slices/cmp.go rename vendor/golang.org/x/exp/slices/{zsortfunc.go => zsortanyfunc.go} (64%) create mode 100644 vendor/golang.org/x/sys/unix/bpxsvc_zos.go create mode 100644 vendor/golang.org/x/sys/unix/bpxsvc_zos.s delete mode 100644 vendor/golang.org/x/sys/unix/epoll_zos.go delete mode 100644 vendor/golang.org/x/sys/unix/fstatfs_zos.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_zos.go create mode 100644 vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s create mode 100644 vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s delete mode 100644 vendor/golang.org/x/sys/windows/empty.s diff --git a/go.mod b/go.mod index 5e21daa69..6fbdd764f 100644 --- a/go.mod +++ b/go.mod @@ -6,19 +6,19 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 github.com/alecthomas/kingpin v2.2.6+incompatible - github.com/aws/aws-sdk-go v1.50.31 + github.com/aws/aws-sdk-go v1.55.1 github.com/docker/docker v26.1.2+incompatible github.com/golang/snappy v0.0.4 github.com/google/uuid v1.6.0 - github.com/klauspost/compress v1.17.7 + github.com/klauspost/compress v1.17.8 github.com/klauspost/pgzip v1.2.6 github.com/minio/minio-go v6.0.14+incompatible - github.com/mongodb/mongo-tools v0.0.0-20231117185435-bf0bef9e9f19 + github.com/mongodb/mongo-tools v0.0.0-20240723193119-837c2bc263f4 github.com/pierrec/lz4 v2.6.1+incompatible github.com/pkg/errors v0.9.1 - go.mongodb.org/mongo-driver v1.13.0 - golang.org/x/mod v0.16.0 - golang.org/x/sync v0.6.0 + go.mongodb.org/mongo-driver v1.16.0 + golang.org/x/mod v0.19.0 + golang.org/x/sync v0.7.0 gopkg.in/yaml.v2 v2.4.0 ) @@ -42,7 +42,7 @@ require ( github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/term v0.5.0 // indirect - github.com/montanaflynn/stats v0.6.6 // indirect + github.com/montanaflynn/stats v0.7.1 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect @@ -56,12 +56,12 @@ require ( go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/sdk v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - golang.org/x/crypto v0.21.0 // indirect - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect - golang.org/x/net v0.23.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/term v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/exp v0.0.0-20240529005216-23cca8864a10 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sys v0.22.0 // indirect + golang.org/x/term v0.22.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect gotest.tools/v3 v3.5.1 // indirect ) diff --git a/go.sum b/go.sum index 9cf27b0f1..617625bee 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafo github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/aws/aws-sdk-go v1.50.31 h1:gx2NRLLEDUmQFC4YUsfMUKkGCwpXVO8ijUecq/nOQGA= -github.com/aws/aws-sdk-go v1.50.31/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.55.1 h1:ZTNPmbRMxaK5RlTJrBullX9r/rF1MPf3yAJOLlwDiT8= +github.com/aws/aws-sdk-go v1.55.1/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -57,10 +57,8 @@ github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1 github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -80,9 +78,8 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= -github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -100,11 +97,10 @@ github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3N github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= -github.com/mongodb/mongo-tools v0.0.0-20231117185435-bf0bef9e9f19 h1:B0nhjnm3za73rABZa3HdMhn9WuOXPPHweBBqhZnWinI= -github.com/mongodb/mongo-tools v0.0.0-20231117185435-bf0bef9e9f19/go.mod h1:2Rl3k3e333g2AJN74N9hx9N4IIhB0IcTU3m92oNsOyE= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/montanaflynn/stats v0.6.6 h1:Duep6KMIDpY4Yo11iFsvyqJDyfzLF9+sndUKT+v64GQ= -github.com/montanaflynn/stats v0.6.6/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/mongodb/mongo-tools v0.0.0-20240723193119-837c2bc263f4 h1:23sRjM+3p+4yFL9tOg9qfNJHtBMl5PN5XA2iLWrYR+Y= +github.com/mongodb/mongo-tools v0.0.0-20240723193119-837c2bc263f4/go.mod h1:mq5q2Rrbw6+VEtDc+p5haujgWoQv3foL2YS5YISr2UA= +github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= +github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= @@ -126,30 +122,29 @@ github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/f github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= +github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= +github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= +github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.mongodb.org/mongo-driver v1.13.0 h1:67DgFFjYOCMWdtTEmKFpV3ffWlFnh+CYZ8ZS/tXWUfY= -go.mongodb.org/mongo-driver v1.13.0/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ= +go.mongodb.org/mongo-driver v1.16.0 h1:tpRsfBJMROVHKpdGyc1BBEzzjDUWjItxbVSZ8Ls4BQ4= +go.mongodb.org/mongo-driver v1.16.0/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= @@ -171,31 +166,29 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/exp v0.0.0-20240529005216-23cca8864a10 h1:vpzMC/iZhYFAjJzHU0Cfuq+w1vLLsF2vLkDrPjzKYck= +golang.org/x/exp v0.0.0-20240529005216-23cca8864a10/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= -golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -203,24 +196,21 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 25055d6b8..d517a35a4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -74,7 +74,9 @@ const ( ) // AWS ISOE (Europe) partition's regions. -const () +const ( + EuIsoeWest1RegionID = "eu-isoe-west-1" // EU ISOE West. +) // AWS ISOF partition's regions. const () @@ -244,13 +246,6 @@ var awsPartition = partition{ }, }, Services: services{ - "a4b": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, "access-analyzer": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -298,6 +293,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-west-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -331,6 +332,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -807,30 +817,60 @@ var awsPartition = partition{ }, "airflow": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -840,6 +880,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -849,6 +898,9 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -865,6 +917,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -923,6 +978,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -981,6 +1039,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -1036,18 +1097,33 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -1900,6 +1976,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -1924,6 +2003,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -3776,6 +3858,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -4405,6 +4496,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -4522,91 +4616,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "backupstorage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, "batch": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -4771,9 +4780,15 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, endpointKey{ Region: "bedrock-ap-northeast-1", }: endpoint{ @@ -4782,6 +4797,14 @@ var awsPartition = partition{ Region: "ap-northeast-1", }, }, + endpointKey{ + Region: "bedrock-ap-south-1", + }: endpoint{ + Hostname: "bedrock.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, endpointKey{ Region: "bedrock-ap-southeast-1", }: endpoint{ @@ -4790,6 +4813,22 @@ var awsPartition = partition{ Region: "ap-southeast-1", }, }, + endpointKey{ + Region: "bedrock-ap-southeast-2", + }: endpoint{ + Hostname: "bedrock.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "bedrock-ca-central-1", + }: endpoint{ + Hostname: "bedrock.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "bedrock-eu-central-1", }: endpoint{ @@ -4798,6 +4837,38 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "bedrock-eu-west-1", + }: endpoint{ + Hostname: "bedrock.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "bedrock-eu-west-2", + }: endpoint{ + Hostname: "bedrock.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "bedrock-eu-west-3", + }: endpoint{ + Hostname: "bedrock.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "bedrock-fips-ca-central-1", + }: endpoint{ + Hostname: "bedrock-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "bedrock-fips-us-east-1", }: endpoint{ @@ -4822,6 +4893,14 @@ var awsPartition = partition{ Region: "ap-northeast-1", }, }, + endpointKey{ + Region: "bedrock-runtime-ap-south-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, endpointKey{ Region: "bedrock-runtime-ap-southeast-1", }: endpoint{ @@ -4830,6 +4909,22 @@ var awsPartition = partition{ Region: "ap-southeast-1", }, }, + endpointKey{ + Region: "bedrock-runtime-ap-southeast-2", + }: endpoint{ + Hostname: "bedrock-runtime.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ca-central-1", + }: endpoint{ + Hostname: "bedrock-runtime.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "bedrock-runtime-eu-central-1", }: endpoint{ @@ -4838,6 +4933,38 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "bedrock-runtime-eu-west-1", + }: endpoint{ + Hostname: "bedrock-runtime.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-eu-west-2", + }: endpoint{ + Hostname: "bedrock-runtime.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-eu-west-3", + }: endpoint{ + Hostname: "bedrock-runtime.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-ca-central-1", + }: endpoint{ + Hostname: "bedrock-runtime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, endpointKey{ Region: "bedrock-runtime-fips-us-east-1", }: endpoint{ @@ -4854,6 +4981,14 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "bedrock-runtime-sa-east-1", + }: endpoint{ + Hostname: "bedrock-runtime.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, endpointKey{ Region: "bedrock-runtime-us-east-1", }: endpoint{ @@ -4870,6 +5005,14 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "bedrock-sa-east-1", + }: endpoint{ + Hostname: "bedrock.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, endpointKey{ Region: "bedrock-us-east-1", }: endpoint{ @@ -4886,9 +5029,24 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -4913,6 +5071,9 @@ var awsPartition = partition{ }, "braket": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, @@ -4943,6 +5104,12 @@ var awsPartition = partition{ }, "cases": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -5157,69 +5324,262 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.api.aws", + }, }, }, "cloudcontrolapi": service{ @@ -5227,78 +5587,216 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.af-south-1.api.aws", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-south-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-3.api.aws", + }, endpointKey{ Region: "ap-southeast-4", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-west-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ca-west-1.api.aws", + }, endpointKey{ Region: "ca-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-central-2", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-north-1.api.aws", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-south-1.api.aws", + }, endpointKey{ Region: "eu-south-2", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-3.api.aws", + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -5356,51 +5854,123 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.sa-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-2.api.aws", + }, }, }, "clouddirectory": service{ @@ -6828,6 +7398,9 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -6840,6 +7413,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -6849,18 +7425,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -6909,6 +7497,9 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6958,6 +7549,9 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -6970,6 +7564,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -6979,18 +7576,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7039,6 +7648,9 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7218,12 +7830,27 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -7862,6 +8489,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "controltower-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9061,9 +9706,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "ca-west-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9088,6 +9745,24 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "directconnect-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "directconnect-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -12393,12 +13068,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -13494,6 +14175,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fms-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -13620,6 +14310,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "fms-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-central-1", }: endpoint{ @@ -14005,6 +14704,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "fsx-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -14038,6 +14746,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-prod-ca-central-1", }: endpoint{ @@ -14047,6 +14764,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-prod-ca-west-1", + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-prod-us-east-1", }: endpoint{ @@ -14146,6 +14872,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "prod-ca-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "prod-us-east-1", }: endpoint{ @@ -14519,6 +15263,18 @@ var awsPartition = partition{ }, }, }, + "globalaccelerator": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "globalaccelerator-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "glue": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -14989,6 +15745,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -15167,13 +15926,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "honeycode": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, "iam": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -15278,6 +16030,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -15293,6 +16048,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -15305,6 +16063,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -16321,16 +17082,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "iotroborunner": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, "iotsecuredtunneling": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -16998,6 +17749,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "kafka-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17031,6 +17791,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "kafka-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -17186,12 +17955,27 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "kendra-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -17611,6 +18395,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -18286,6 +19073,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -18759,6 +19549,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -19058,6 +19851,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -19933,12 +20729,30 @@ var awsPartition = partition{ }, "media-pipelines-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -20142,6 +20956,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -20197,6 +21014,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -20251,6 +21071,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -20490,6 +21313,9 @@ var awsPartition = partition{ }, "meetings-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -20508,6 +21334,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -21366,6 +22207,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -21681,6 +22525,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -22010,6 +22857,14 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "oidc.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -22034,6 +22889,14 @@ var awsPartition = partition{ Region: "ap-southeast-3", }, }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "oidc.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -22042,6 +22905,14 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "oidc.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -22074,6 +22945,14 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "oidc.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -22677,91 +23556,490 @@ var awsPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.af-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-northeast-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-northeast-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-northeast-3", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-3.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-south-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-south-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-3", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-3.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-4", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-4.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ca-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ca-central-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.ca-central-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.ca-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ca-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ca-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.ca-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.ca-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-central-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-central-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-north-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-north-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-south-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-south-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-west-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-west-3", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-3.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "pi-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "pi-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "pi-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "pi-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "pi-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "pi-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "il-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.il-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "me-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.me-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "me-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.me-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "sa-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.sa-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-east-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-east-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-east-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-west-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-west-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-2.api.aws", + Protocols: []string{"https"}, + }, }, }, "pinpoint": service{ @@ -23159,6 +24437,14 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "portal.sso.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -23183,6 +24469,14 @@ var awsPartition = partition{ Region: "ap-southeast-3", }, }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -23191,6 +24485,14 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "portal.sso.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -23223,6 +24525,14 @@ var awsPartition = partition{ Region: "eu-south-1", }, }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "portal.sso.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -23726,6 +25036,9 @@ var awsPartition = partition{ }, "quicksight": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -23741,15 +25054,27 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "api", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -24797,9 +26122,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -24854,6 +26185,12 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -25214,6 +26551,12 @@ var awsPartition = partition{ }, "resource-explorer-2": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -25226,6 +26569,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -25235,15 +26581,30 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -25253,6 +26614,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -25487,6 +26854,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -25718,9 +27088,91 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "rum": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -25744,40 +27196,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "rum": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, "runtime-v2-lex": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -26542,6 +27960,44 @@ var awsPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "s3-control.af-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.af-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "s3-control.ap-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{ @@ -26618,6 +28074,25 @@ var awsPartition = partition{ Region: "ap-south-1", }, }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "s3-control.ap-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{ @@ -26656,6 +28131,44 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "s3-control.ap-southeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "s3-control.ap-southeast-4.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-4.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -26705,6 +28218,55 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "s3-control.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.ca-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -26724,6 +28286,25 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "s3-control.eu-central-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-central-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -26743,6 +28324,44 @@ var awsPartition = partition{ Region: "eu-north-1", }, }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "s3-control.eu-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "s3-control.eu-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-south-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, endpointKey{ Region: "eu-west-1", }: endpoint{ @@ -26800,6 +28419,63 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "s3-control.il-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.il-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "s3-control.me-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.me-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "s3-control.me-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.me-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, endpointKey{ Region: "sa-east-1", }: endpoint{ @@ -28112,21 +29788,85 @@ var awsPartition = partition{ }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-2", }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-1", }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-2", }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, }, }, "servicecatalog": service{ @@ -28574,6 +30314,36 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -29427,6 +31197,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "sms-voice-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -29806,6 +31579,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -30772,6 +32548,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -30781,6 +32560,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -30796,6 +32578,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -31035,6 +32820,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -31655,42 +33458,157 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, - endpointKey{ - Region: "fips-us-east-1", - }: endpoint{ - Hostname: "synthetics-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "synthetics-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-1", - }: endpoint{ - Hostname: "synthetics-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-west-2", - }: endpoint{ - Hostname: "synthetics-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, - Deprecated: boxedTrue, - }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "synthetics-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "synthetics-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "synthetics-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "synthetics-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-west-2.amazonaws.com", + }, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "il-central-1", }: endpoint{}, @@ -31706,167 +33624,126 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, - endpointKey{ - Region: "us-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "synthetics-fips.us-east-1.amazonaws.com", - }, endpointKey{ Region: "us-east-2", }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, - }: endpoint{ - Hostname: "synthetics-fips.us-east-2.amazonaws.com", - }, endpointKey{ Region: "us-west-1", }: endpoint{}, - endpointKey{ - Region: "us-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "synthetics-fips.us-west-1.amazonaws.com", - }, endpointKey{ Region: "us-west-2", }: endpoint{}, + }, + }, + "tax": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, + Region: "aws-global", }: endpoint{ - Hostname: "synthetics-fips.us-west-2.amazonaws.com", + Hostname: "tax.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, }, }, }, - "tagging": service{ + "textract": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, endpointKey{ - Region: "ap-south-2", - }: endpoint{}, + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-southeast-1.api.aws", + }, endpointKey{ - Region: "ap-southeast-3", + Region: "ap-southeast-2", }: endpoint{}, endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "textract": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-3.api.aws", + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -31915,39 +33792,87 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-2.api.aws", + }, }, }, "thinclient": service{ @@ -32354,6 +34279,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "transfer-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -32387,6 +34321,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "transfer-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -32555,6 +34498,21 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "translate-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -32613,6 +34571,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -32637,6 +34610,63 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -32649,15 +34679,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com", + }, }, }, "voice-chime": service{ @@ -32817,6 +34871,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -32838,12 +34898,21 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -33846,6 +35915,23 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "wafv2.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -34090,6 +36176,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "wafv2-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-central-1", }: endpoint{ @@ -34931,6 +37026,21 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "acm-pca": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "airflow": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -35154,16 +37264,6 @@ var awscnPartition = partition{ }: endpoint{}, }, }, - "backupstorage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{}, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{}, - }, - }, "batch": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -35217,9 +37317,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "cloudformation": service{ @@ -35691,6 +37803,19 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "entitlement.marketplace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "entitlement-marketplace.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "es": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -36128,7 +38253,7 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-northwest-1", }: endpoint{ - Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + Hostname: "mediaconvert.cn-northwest-1.amazonaws.com.cn", CredentialScope: credentialScope{ Region: "cn-northwest-1", }, @@ -36200,6 +38325,16 @@ var awscnPartition = partition{ }, }, }, + "network-firewall": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "oam": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -36255,10 +38390,28 @@ var awscnPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.cn-north-1.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + }, endpointKey{ Region: "cn-northwest-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.cn-northwest-1.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + }, }, }, "pipes": service{ @@ -36375,6 +38528,9 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, "resource-groups": service{ @@ -37845,13 +40001,37 @@ var awsusgovPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, }: endpoint{ + Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, }, endpointKey{ Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", }: endpoint{ + Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, }, }, }, @@ -37875,16 +40055,6 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, - "backupstorage": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{}, - }, - }, "batch": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -37935,6 +40105,38 @@ var awsusgovPartition = partition{ }, "bedrock": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "bedrock-fips-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock-runtime.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "bedrock-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -38019,21 +40221,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-west-1.api.aws", + }, }, }, "clouddirectory": service{ @@ -38550,9 +40776,39 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, "data-ats.iot": service{ @@ -38733,20 +40989,40 @@ var awsusgovPartition = partition{ "directconnect": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-east-1", + Region: "fips-us-gov-east-1", }: endpoint{ - Hostname: "directconnect.us-gov-east-1.amazonaws.com", + Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ - Hostname: "directconnect.us-gov-west-1.amazonaws.com", + Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com", }, }, }, @@ -39432,6 +41708,15 @@ var awsusgovPartition = partition{ }, "email": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "email-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -39441,6 +41726,15 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -39454,22 +41748,82 @@ var awsusgovPartition = partition{ }, "emr-containers": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "emr-containers.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "emr-containers.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers.us-gov-west-1.amazonaws.com", + }, }, }, "emr-serverless": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "emr-serverless.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "emr-serverless.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless.us-gov-west-1.amazonaws.com", + }, }, }, "es": service{ @@ -40655,6 +43009,62 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "kinesisvideo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "kms": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -40870,6 +43280,16 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "license-manager-user-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "logs": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -41076,6 +43496,13 @@ var awsusgovPartition = partition{ }, }, }, + "models-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "models.lex": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -41409,12 +43836,76 @@ var awsusgovPartition = partition{ }, "pi": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-gov-east-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-gov-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, }, }, "pinpoint": service{ @@ -41965,6 +44456,13 @@ var awsusgovPartition = partition{ }, }, }, + "runtime-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "runtime.lex": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -42372,6 +44870,46 @@ var awsusgovPartition = partition{ }, }, }, + "securitylake": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "securitylake.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "securitylake.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, "serverlessrepo": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -42623,6 +45161,78 @@ var awsusgovPartition = partition{ }, }, }, + "signer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "signer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "signer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-verification-us-gov-east-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-gov-west-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "verification-us-gov-east-1", + }: endpoint{ + Hostname: "verification.signer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "verification-us-gov-west-1", + }: endpoint{ + Hostname: "verification.signer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "simspaceweaver": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -43303,21 +45913,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-west-1.api.aws", + }, }, }, "transcribe": service{ @@ -43448,6 +46082,46 @@ var awsusgovPartition = partition{ }, }, }, + "verifiedpermissions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "waf-regional": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -43780,6 +46454,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "athena": service{ @@ -44207,6 +46884,55 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "fsx": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-prod-us-iso-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-iso-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov", + }, + }, + }, "glacier": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -44389,42 +47115,12 @@ var awsisoPartition = partition{ }, "ram": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-iso-east-1", - }: endpoint{ - Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-us-iso-west-1", - }: endpoint{ - Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "us-iso-east-1", }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", - }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", - }, }, }, "rbin": service{ @@ -44469,37 +47165,10 @@ var awsisoPartition = partition{ }, "rds": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "rds-fips.us-iso-east-1", - }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds-fips.us-iso-west-1", - }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "rds.us-iso-east-1", }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-iso-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "rds.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, @@ -44508,16 +47177,7 @@ var awsisoPartition = partition{ endpointKey{ Region: "rds.us-iso-west-1", }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-iso-west-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "rds.us-iso-west-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-west-1", }, @@ -44530,12 +47190,12 @@ var awsisoPartition = partition{ Region: "us-iso-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "rds.us-iso-east-1.c2s.ic.gov", }, endpointKey{ Region: "us-iso-east-1-fips", }: endpoint{ - Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "rds.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, @@ -44548,12 +47208,12 @@ var awsisoPartition = partition{ Region: "us-iso-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "rds.us-iso-west-1.c2s.ic.gov", }, endpointKey{ Region: "us-iso-west-1-fips", }: endpoint{ - Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "rds.us-iso-west-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-west-1", }, @@ -44564,40 +47224,20 @@ var awsisoPartition = partition{ "redshift": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "fips-us-iso-east-1", + Region: "us-iso-east-1", }: endpoint{ - Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "redshift.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, - Deprecated: boxedTrue, }, endpointKey{ - Region: "fips-us-iso-west-1", + Region: "us-iso-west-1", }: endpoint{ - Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "redshift.us-iso-west-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-west-1", }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", }, }, }, @@ -44706,6 +47346,131 @@ var awsisoPartition = partition{ }, }, }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Hostname: "s3-control.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{ + Hostname: "s3-control.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, "secretsmanager": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -44721,6 +47486,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "sns": service{ @@ -44837,6 +47605,13 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "textract": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "transcribe": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -45232,6 +48007,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "glacier": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -45319,6 +48101,20 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "medialive": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "mediapackage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "metering.marketplace": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -45356,24 +48152,9 @@ var awsisobPartition = partition{ }, "ram": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "fips-us-isob-east-1", - }: endpoint{ - Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", - }, }, }, "rbin": service{ @@ -45400,28 +48181,10 @@ var awsisobPartition = partition{ }, "rds": service{ Endpoints: serviceEndpoints{ - endpointKey{ - Region: "rds-fips.us-isob-east-1", - }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, endpointKey{ Region: "rds.us-isob-east-1", }: endpoint{ - CredentialScope: credentialScope{ - Region: "us-isob-east-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "rds.us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, @@ -45434,12 +48197,12 @@ var awsisobPartition = partition{ Region: "us-isob-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", }, endpointKey{ Region: "us-isob-east-1-fips", }: endpoint{ - Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "rds.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, @@ -45450,22 +48213,12 @@ var awsisobPartition = partition{ "redshift": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "fips-us-isob-east-1", + Region: "us-isob-east-1", }: endpoint{ - Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "redshift.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", }, }, }, @@ -45538,6 +48291,82 @@ var awsisobPartition = partition{ }, }, }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{ + Hostname: "s3-control.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, "secretsmanager": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -45591,6 +48420,37 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "storagegateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, "streams.dynamodb": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -45687,7 +48547,11 @@ var awsisoePartition = partition{ SignatureVersions: []string{"v4"}, }, }, - Regions: regions{}, + Regions: regions{ + "eu-isoe-west-1": region{ + Description: "EU ISOE West", + }, + }, Services: services{}, } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 6fe12613d..ae3853dba 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.50.31" +const SDKVersion = "1.55.1" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 058334053..2ca0b19db 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -122,8 +122,8 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri } func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { - // If it's empty, generate an empty value - if !value.IsNil() && value.Len() == 0 { + // If it's empty, and not ec2, generate an empty value + if !value.IsNil() && value.Len() == 0 && !q.isEC2 { v.Set(prefix, "") return nil } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 01ec8099e..f1fa8dcf0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -228,8 +228,8 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // don't use exceptions, they return an error). // // Note that if CompleteMultipartUpload fails, applications should be prepared -// to retry the failed requests. For more information, see Amazon S3 Error Best -// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). +// to retry any failed requests (including 500 error responses). For more information, +// see Amazon S3 Error Best Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). // // You can't use Content-Type: application/x-www-form-urlencoded for the CompleteMultipartUpload // requests. Also, if you don't provide a Content-Type header, CompleteMultipartUpload @@ -391,7 +391,10 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // in the Amazon S3 User Guide. // // Both the Region that you want to copy the object from and the Region that -// you want to copy the object to must be enabled for your account. +// you want to copy the object to must be enabled for your account. For more +// information about how to enable a Region for your account, see Enable or +// disable a Region for standalone accounts (https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone) +// in the Amazon Web Services Account Management Guide. // // Amazon S3 transfer acceleration does not support cross-Region copies. If // you request a cross-Region copy using a transfer acceleration endpoint, you @@ -421,7 +424,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // IAM policy based on the source and destination bucket types in a CopyObject // operation. If the source object is in a general purpose bucket, you must // have s3:GetObject permission to read the source object that is being copied. -// If the destination bucket is a general purpose bucket, you must have s3:PubObject +// If the destination bucket is a general purpose bucket, you must have s3:PutObject // permission to write the object copy to the destination bucket. // // - Directory bucket permissions - You must have permissions in a bucket @@ -446,7 +449,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // When the request is an HTTP 1.1 request, the response is chunk encoded. When // the request is not an HTTP 1.1 request, the response would not contain the // Content-Length. You always need to read the entire response body to check -// if the copy succeeds. to keep the connection alive while we copy the data. +// if the copy succeeds. // // - If the copy is successful, you receive a response with information about // the copied object. @@ -458,7 +461,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // during the copy operation, the error response is embedded in the 200 OK // response. For example, in a cross-region copy, you may encounter throttling // and receive a 200 OK response. For more information, see Resolve the Error -// 200 response when copying objects to Amazon S3 (repost.aws/knowledge-center/s3-resolve-200-internalerror). +// 200 response when copying objects to Amazon S3 (https://repost.aws/knowledge-center/s3-resolve-200-internalerror). // The 200 OK status code means the copy was accepted, but it doesn't mean // the copy is complete. Another example is when you disconnect from Amazon // S3 before the copy is complete, Amazon S3 might cancel the copy and you @@ -477,7 +480,9 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // The copy request charge is based on the storage class and Region that you // specify for the destination object. The request can also result in a data // retrieval charge for the source if the source storage class bills for data -// retrieval. For pricing information, see Amazon S3 pricing (http://aws.amazon.com/s3/pricing/). +// retrieval. If the copy source is in a different region, the data transfer +// is billed to the copy source account. For pricing information, see Amazon +// S3 pricing (http://aws.amazon.com/s3/pricing/). // // # HTTP Host header syntax // @@ -612,12 +617,20 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // and s3:PutBucketVersioning permissions are required. S3 Object Ownership // // - If your CreateBucket request includes the x-amz-object-ownership header, -// then the s3:PutBucketOwnershipControls permission is required. If your -// CreateBucket request sets BucketOwnerEnforced for Amazon S3 Object Ownership -// and specifies a bucket ACL that provides access to an external Amazon -// Web Services account, your request fails with a 400 error and returns -// the InvalidBucketAcLWithObjectOwnership error code. For more information, -// see Setting Object Ownership on an existing bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-ownership-existing-bucket.html) +// then the s3:PutBucketOwnershipControls permission is required. To set +// an ACL on a bucket as part of a CreateBucket request, you must explicitly +// set S3 Object Ownership for the bucket to a different value than the default, +// BucketOwnerEnforced. Additionally, if your desired bucket ACL grants public +// access, you must first create the bucket (without the bucket ACL) and +// then explicitly disable Block Public Access on the bucket before using +// PutBucketAcl to set the ACL. If you try to create a bucket with a public +// ACL, the request will fail. For the majority of modern use cases in S3, +// we recommend that you keep all Block Public Access settings enabled and +// keep ACLs disabled. If you would like to share data with users outside +// of your account, you can use bucket policies as needed. For more information, +// see Controlling ownership of objects and disabling ACLs for your bucket +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// and Blocking public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) // in the Amazon S3 User Guide. S3 Block Public Access - If your specific // use case requires granting public access to your S3 resources, you can // disable Block Public Access. Specifically, you can create a new bucket @@ -2373,14 +2386,23 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // Removes an object from a bucket. The behavior depends on the bucket's versioning // state: // -// - If versioning is enabled, the operation removes the null version (if -// there is one) of an object and inserts a delete marker, which becomes -// the latest version of the object. If there isn't a null version, Amazon -// S3 does not remove any objects but will still respond that the command -// was successful. +// - If bucket versioning is not enabled, the operation permanently deletes +// the object. // -// - If versioning is suspended or not enabled, the operation permanently -// deletes the object. +// - If bucket versioning is enabled, the operation inserts a delete marker, +// which becomes the current version of the object. To permanently delete +// an object in a versioned bucket, you must include the object’s versionId +// in the request. For more information about versioning-enabled buckets, +// see Deleting object versions from a versioning-enabled bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html). +// +// - If bucket versioning is suspended, the operation removes the object +// that has a null versionId, if there is one, and inserts a delete marker +// that becomes the current version of the object. If there isn't an object +// with a null versionId, and all versions of the object have a versionId, +// Amazon S3 does not remove the object and only inserts a delete marker. +// To permanently delete an object that has a versionId, you must include +// the object’s versionId in the request. For more information about versioning-suspended +// buckets, see Deleting objects from versioning-suspended buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html). // // - Directory buckets - S3 Versioning isn't enabled and supported for directory // buckets. For this API operation, only the null value of the version ID @@ -2423,7 +2445,7 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // in your policies when your DeleteObjects request includes specific headers. // s3:DeleteObject - To delete an object from a bucket, you must always have // the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific -// version of an object from a versiong-enabled bucket, you must have the +// version of an object from a versioning-enabled bucket, you must have the // s3:DeleteObjectVersion permission. // // - Directory bucket permissions - To grant access to this API operation @@ -2657,7 +2679,7 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque // in your policies when your DeleteObjects request includes specific headers. // s3:DeleteObject - To delete an object from a bucket, you must always specify // the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific -// version of an object from a versiong-enabled bucket, you must specify +// version of an object from a versioning-enabled bucket, you must specify // the s3:DeleteObjectVersion permission. // // - Directory bucket permissions - To grant access to this API operation @@ -3651,12 +3673,15 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // This operation is not supported by directory buckets. // // Bucket lifecycle configuration now supports specifying a lifecycle rule using -// an object key name prefix, one or more object tags, or a combination of both. +// an object key name prefix, one or more object tags, object size, or any combination +// of these. Accordingly, this section describes the latest API. The previous +// version of the API supported filtering based only on an object key name prefix, +// which is supported for backward compatibility. For the related API description, +// see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). // Accordingly, this section describes the latest API. The response describes // the new filter element that you can use to specify a filter to select a subset // of objects to which the rule applies. If you are using a previous version -// of the lifecycle configuration, it still works. For the earlier action, see -// GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). +// of the lifecycle configuration, it still works. For the earlier action, // // Returns the lifecycle configuration information set on the bucket. For information // about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). @@ -6018,7 +6043,7 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou // If the bucket does not exist or you do not have permission to access it, // the HEAD request returns a generic 400 Bad Request, 403 Forbidden or 404 // Not Found code. A message body is not included, so you cannot determine the -// exception beyond these error codes. +// exception beyond these HTTP response codes. // // Directory buckets - You must make requests for this API operation to the // Zonal endpoint. These endpoints support virtual-hosted-style requests in @@ -8931,10 +8956,10 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // about lifecycle configuration, see Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html). // // Bucket lifecycle configuration now supports specifying a lifecycle rule using -// an object key name prefix, one or more object tags, or a combination of both. -// Accordingly, this section describes the latest API. The previous version -// of the API supported filtering based only on an object key name prefix, which -// is supported for backward compatibility. For the related API description, +// an object key name prefix, one or more object tags, object size, or any combination +// of these. Accordingly, this section describes the latest API. The previous +// version of the API supported filtering based only on an object key name prefix, +// which is supported for backward compatibility. For the related API description, // see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html). // // # Rules @@ -8945,8 +8970,8 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // adjustable. Each rule consists of the following: // // - A filter identifying a subset of objects to which the rule applies. -// The filter can be based on a key name prefix, object tags, or a combination -// of both. +// The filter can be based on a key name prefix, object tags, object size, +// or any combination of these. // // - A status indicating whether the rule is in effect. // @@ -11175,8 +11200,6 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // This action performs the following types of requests: // -// - select - Perform a select query on an archived object -// // - restore an archive - Restore an archived object // // For more information about the S3 structure in the request body, see the @@ -11190,44 +11213,6 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // - Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) // in the Amazon S3 User Guide // -// Define the SQL expression for the SELECT type of restoration for your query -// in the request body's SelectParameters structure. You can use expressions -// like the following examples. -// -// - The following expression returns all records from the specified object. -// SELECT * FROM Object -// -// - Assuming that you are not using any headers for data stored in the object, -// you can specify columns with positional headers. SELECT s._1, s._2 FROM -// Object s WHERE s._3 > 100 -// -// - If you have headers and you set the fileHeaderInfo in the CSV structure -// in the request body to USE, you can specify headers in the query. (If -// you set the fileHeaderInfo field to IGNORE, the first row is skipped for -// the query.) You cannot mix ordinal positions with header column names. -// SELECT s.Id, s.FirstName, s.SSN FROM S3Object s -// -// When making a select request, you can also do the following: -// -// - To expedite your queries, specify the Expedited tier. For more information -// about tiers, see "Restoring Archives," later in this topic. -// -// - Specify details about the data serialization format of both the input -// object that is being queried and the serialization of the CSV-encoded -// query results. -// -// The following are additional important facts about the select feature: -// -// - The output results are new Amazon S3 objects. Unlike archive retrievals, -// they are stored until explicitly deleted-manually or through a lifecycle -// configuration. -// -// - You can issue more than one select request on the same Amazon S3 object. -// Amazon S3 doesn't duplicate requests, so avoid issuing duplicate requests. -// -// - Amazon S3 accepts a select request even if the object has already been -// restored. A select request doesn’t return error response 409. -// // # Permissions // // To use this operation, you must have permissions to perform the s3:RestoreObject @@ -11331,8 +11316,8 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // response. // // - Special errors: Code: RestoreAlreadyInProgress Cause: Object restore -// is already in progress. (This error does not apply to SELECT type requests.) -// HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client +// is already in progress. HTTP Status Code: 409 Conflict SOAP Fault Code +// Prefix: Client // // - Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals // are currently not available. Try again later. (Returned if there is insufficient @@ -12014,17 +11999,17 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // bucket in an UploadPartCopy operation. If the source object is in a general // purpose bucket, you must have the s3:GetObject permission to read the // source object that is being copied. If the destination bucket is a general -// purpose bucket, you must have the s3:PubObject permission to write the +// purpose bucket, you must have the s3:PutObject permission to write the // object copy to the destination bucket. For information about permissions -// required to use the multipart upload API, see Multipart Upload and Permissions -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// required to use the multipart upload API, see Multipart upload API and +// permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) // in the Amazon S3 User Guide. // // - Directory bucket permissions - You must have permissions in a bucket // policy or an IAM identity-based policy based on the source and destination // bucket types in an UploadPartCopy operation. If the source object that // you want to copy is in a directory bucket, you must have the s3express:CreateSession -// permission in the Action element of a policy to read the object . By default, +// permission in the Action element of a policy to read the object. By default, // the session is in the ReadWrite mode. If you want to restrict the access, // you can explicitly set the s3express:SessionMode condition key to ReadOnly // on the copy source bucket. If the copy destination is a directory bucket, @@ -12270,7 +12255,7 @@ type AbortMultipartUploadInput struct { // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. // Path-style requests are not supported. Directory bucket names must be unique // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 - // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. // @@ -13730,7 +13715,7 @@ type CompleteMultipartUploadInput struct { // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. // Path-style requests are not supported. Directory bucket names must be unique // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 - // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. // @@ -14507,7 +14492,7 @@ type CopyObjectInput struct { // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. // Path-style requests are not supported. Directory bucket names must be unique // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 - // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. // @@ -15828,7 +15813,7 @@ type CreateBucketInput struct { // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name // . Virtual-hosted-style requests aren't supported. Directory bucket names // must be unique in the chosen Availability Zone. Bucket names must also follow - // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). // For information about bucket naming restrictions, see Directory bucket naming // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide @@ -16061,7 +16046,7 @@ type CreateMultipartUploadInput struct { // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. // Path-style requests are not supported. Directory bucket names must be unique // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 - // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. // @@ -16955,7 +16940,7 @@ func (s CreateSessionInput) updateArnableField(v string) (interface{}, error) { type CreateSessionOutput struct { _ struct{} `type:"structure"` - // The established temporary security credentials for the created session.. + // The established temporary security credentials for the created session. // // Credentials is a required field Credentials *SessionCredentials `locationName:"Credentials" type:"structure" required:"true"` @@ -17488,7 +17473,7 @@ type DeleteBucketInput struct { // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name // . Virtual-hosted-style requests aren't supported. Directory bucket names // must be unique in the chosen Availability Zone. Bucket names must also follow - // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). // For information about bucket naming restrictions, see Directory bucket naming // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide @@ -18230,7 +18215,7 @@ type DeleteBucketPolicyInput struct { // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name // . Virtual-hosted-style requests aren't supported. Directory bucket names // must be unique in the chosen Availability Zone. Bucket names must also follow - // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). // For information about bucket naming restrictions, see Directory bucket naming // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide @@ -18822,7 +18807,7 @@ type DeleteObjectInput struct { // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. // Path-style requests are not supported. Directory bucket names must be unique // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 - // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. // @@ -19248,7 +19233,7 @@ type DeleteObjectsInput struct { // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. // Path-style requests are not supported. Directory bucket names must be unique // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 - // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. // @@ -20561,8 +20546,15 @@ func (s *ExistingObjectReplication) SetStatus(v string) *ExistingObjectReplicati return s } -// Specifies the Amazon S3 object key name to filter on and whether to filter -// on the suffix or prefix of the key name. +// Specifies the Amazon S3 object key name to filter on. An object key name +// is the name assigned to an object in your Amazon S3 bucket. You specify whether +// to filter on the suffix or prefix of the object key name. A prefix is a specific +// string of characters at the beginning of an object key name, which you can +// use to organize objects. For example, you can start the key names of related +// objects with a prefix, such as 2023- or engineering/. Then, you can use FilterRule +// to find objects in a bucket with key names that have the same prefix. A suffix +// is similar to a prefix, but it is at the end of the object key name instead +// of at the beginning. type FilterRule struct { _ struct{} `type:"structure"` @@ -22464,7 +22456,7 @@ type GetBucketPolicyInput struct { // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name // . Virtual-hosted-style requests aren't supported. Directory bucket names // must be unique in the chosen Availability Zone. Bucket names must also follow - // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). // For information about bucket naming restrictions, see Directory bucket naming // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide @@ -23607,7 +23599,7 @@ type GetObjectAttributesInput struct { // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. // Path-style requests are not supported. Directory bucket names must be unique // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 - // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. // @@ -24071,7 +24063,7 @@ type GetObjectInput struct { // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. // Path-style requests are not supported. Directory bucket names must be unique // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 - // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. // @@ -24648,7 +24640,7 @@ type GetObjectLegalHoldOutput struct { _ struct{} `type:"structure" payload:"LegalHold"` // The current legal hold status for the specified object. - LegalHold *ObjectLockLegalHold `type:"structure"` + LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure"` } // String returns the string representation. @@ -25407,7 +25399,7 @@ type GetObjectRetentionOutput struct { _ struct{} `type:"structure" payload:"Retention"` // The container element for an object's retention settings. - Retention *ObjectLockRetention `type:"structure"` + Retention *ObjectLockRetention `locationName:"Retention" type:"structure"` } // String returns the string representation. @@ -26148,7 +26140,7 @@ type HeadBucketInput struct { // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. // Path-style requests are not supported. Directory bucket names must be unique // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 - // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. // @@ -26281,7 +26273,7 @@ type HeadBucketOutput struct { // The name of the location where the bucket will be created. // // For directory buckets, the AZ ID of the Availability Zone where the bucket - // is created. An example AZ ID value is usw2-az2. + // is created. An example AZ ID value is usw2-az1. // // This functionality is only supported by directory buckets. BucketLocationName *string `location:"header" locationName:"x-amz-bucket-location-name" type:"string"` @@ -26348,7 +26340,7 @@ type HeadObjectInput struct { // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. // Path-style requests are not supported. Directory bucket names must be unique // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 - // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. // @@ -26475,6 +26467,24 @@ type HeadObjectInput struct { // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response. + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"rfc822"` + // Specifies the algorithm to use when encrypting the object (for example, AES256). // // This functionality is not supported for directory buckets. @@ -26620,6 +26630,42 @@ func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput { return s } +// SetResponseCacheControl sets the ResponseCacheControl field's value. +func (s *HeadObjectInput) SetResponseCacheControl(v string) *HeadObjectInput { + s.ResponseCacheControl = &v + return s +} + +// SetResponseContentDisposition sets the ResponseContentDisposition field's value. +func (s *HeadObjectInput) SetResponseContentDisposition(v string) *HeadObjectInput { + s.ResponseContentDisposition = &v + return s +} + +// SetResponseContentEncoding sets the ResponseContentEncoding field's value. +func (s *HeadObjectInput) SetResponseContentEncoding(v string) *HeadObjectInput { + s.ResponseContentEncoding = &v + return s +} + +// SetResponseContentLanguage sets the ResponseContentLanguage field's value. +func (s *HeadObjectInput) SetResponseContentLanguage(v string) *HeadObjectInput { + s.ResponseContentLanguage = &v + return s +} + +// SetResponseContentType sets the ResponseContentType field's value. +func (s *HeadObjectInput) SetResponseContentType(v string) *HeadObjectInput { + s.ResponseContentType = &v + return s +} + +// SetResponseExpires sets the ResponseExpires field's value. +func (s *HeadObjectInput) SetResponseExpires(v time.Time) *HeadObjectInput { + s.ResponseExpires = &v + return s +} + // SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput { s.SSECustomerAlgorithm = &v @@ -27163,9 +27209,9 @@ type IndexDocument struct { _ struct{} `type:"structure"` // A suffix that is appended to a request that is for a directory on the website - // endpoint (for example,if the suffix is index.html and you make a request - // to samplebucket/images/ the data that is returned will be for the object - // with the key name images/index.html) The suffix must not be empty and must + // endpoint. (For example, if the suffix is index.html and you make a request + // to samplebucket/images/, the data that is returned will be for the object + // with the key name images/index.html.) The suffix must not be empty and must // not include a slash character. // // Replacement must be made for object keys containing special characters (such @@ -28557,7 +28603,9 @@ func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { } // The Filter is used to identify objects that a Lifecycle Rule applies to. -// A Filter must have exactly one of Prefix, Tag, or And specified. +// A Filter can have exactly one of Prefix, Tag, ObjectSizeGreaterThan, ObjectSizeLessThan, +// or And specified. If the Filter element is left empty, the Lifecycle Rule +// applies to all objects in the bucket. type LifecycleRuleFilter struct { _ struct{} `type:"structure"` @@ -29470,7 +29518,7 @@ type ListMultipartUploadsInput struct { // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. // Path-style requests are not supported. Directory bucket names must be unique // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 - // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. // @@ -29765,7 +29813,11 @@ type ListMultipartUploadsOutput struct { // This functionality is not supported for directory buckets. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // Upload ID after which listing began. + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker. // // This functionality is not supported for directory buckets. UploadIdMarker *string `type:"string"` @@ -30252,7 +30304,7 @@ type ListObjectsInput struct { // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. // Path-style requests are not supported. Directory bucket names must be unique // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 - // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. // @@ -30470,7 +30522,9 @@ type ListObjectsOutput struct { // the MaxKeys value. Delimiter *string `type:"string"` - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode object keys in the response. If + // using url, non-ASCII characters used in an object's key name will be URL + // encoded. For example, the object test_file(3).png will appear as test_file%283%29.png. EncodingType *string `type:"string" enum:"EncodingType"` // A flag that indicates whether Amazon S3 returned all of the results that @@ -30600,7 +30654,7 @@ type ListObjectsV2Input struct { // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. // Path-style requests are not supported. Directory bucket names must be unique // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 - // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. // @@ -30645,7 +30699,9 @@ type ListObjectsV2Input struct { // the Amazon S3 User Guide. Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode object keys in the response. If + // using url, non-ASCII characters used in an object's key name will be URL + // encoded. For example, the object test_file(3).png will appear as test_file%283%29.png. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` // The account ID of the expected bucket owner. If the account ID that you provide @@ -31030,7 +31086,7 @@ type ListPartsInput struct { // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. // Path-style requests are not supported. Directory bucket names must be unique // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 - // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. // @@ -31324,9 +31380,8 @@ type ListPartsOutput struct { // all the parts. Owner *Owner `type:"structure"` - // When a list is truncated, this element specifies the last part in the list, - // as well as the value to use for the part-number-marker request parameter - // in a subsequent request. + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. PartNumberMarker *int64 `type:"integer"` // Container for elements related to a particular part. A response can contain @@ -31612,8 +31667,8 @@ type LocationInfo struct { // The name of the location where the bucket will be created. // - // For directory buckets, the AZ ID of the Availability Zone where the bucket - // will be created. An example AZ ID value is usw2-az2. + // For directory buckets, the name of the location is the AZ ID of the Availability + // Zone where the bucket will be created. An example AZ ID value is usw2-az1. Name *string `type:"string"` // The type of location where the bucket will be created. @@ -32178,9 +32233,9 @@ func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { type NoncurrentVersionExpiration struct { _ struct{} `type:"structure"` - // Specifies how many newer noncurrent versions must exist before Amazon S3 - // can perform the associated action on a given version. If there are this many - // more recent noncurrent versions, Amazon S3 will take the associated action. + // Specifies how many noncurrent versions Amazon S3 will retain. You can specify + // up to 100 noncurrent versions to retain. Amazon S3 will permanently delete + // any additional noncurrent versions beyond the specified number to retain. // For more information about noncurrent versions, see Lifecycle configuration // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) // in the Amazon S3 User Guide. @@ -32234,11 +32289,11 @@ func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVers type NoncurrentVersionTransition struct { _ struct{} `type:"structure"` - // Specifies how many newer noncurrent versions must exist before Amazon S3 - // can perform the associated action on a given version. If there are this many - // more recent noncurrent versions, Amazon S3 will take the associated action. - // For more information about noncurrent versions, see Lifecycle configuration - // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) + // Specifies how many noncurrent versions Amazon S3 will retain in the same + // storage class before transitioning objects. You can specify up to 100 noncurrent + // versions to retain. Amazon S3 will transition any additional noncurrent versions + // beyond the specified number to retain. For more information about noncurrent + // versions, see Lifecycle configuration elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) // in the Amazon S3 User Guide. NewerNoncurrentVersions *int64 `type:"integer"` @@ -35951,7 +36006,7 @@ type PutBucketPolicyInput struct { // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name // . Virtual-hosted-style requests aren't supported. Directory bucket names // must be unique in the chosen Availability Zone. Bucket names must also follow - // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). // For information about bucket naming restrictions, see Directory bucket naming // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide @@ -37310,7 +37365,7 @@ type PutObjectInput struct { // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. // Path-style requests are not supported. Directory bucket names must be unique // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 - // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. // @@ -41519,7 +41574,7 @@ type ServerSideEncryptionByDefault struct { // Amazon Web Services Key Management Service (KMS) customer Amazon Web Services // KMS key ID to use for the default encryption. This parameter is allowed if - // and only if SSEAlgorithm is set to aws:kms. + // and only if SSEAlgorithm is set to aws:kms or aws:kms:dsse. // // You can specify the key ID, key alias, or the Amazon Resource Name (ARN) // of the KMS key. @@ -42696,7 +42751,7 @@ type UploadPartCopyInput struct { // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. // Path-style requests are not supported. Directory bucket names must be unique // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 - // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. // @@ -43264,7 +43319,7 @@ type UploadPartInput struct { // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. // Path-style requests are not supported. Directory bucket names must be unique // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 - // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go index 8f9e068f7..f9c6e786d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go @@ -60,7 +60,7 @@ type UploadInput struct { // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. // Path-style requests are not supported. Directory bucket names must be unique // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 - // (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3). For information about + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) // in the Amazon S3 User Guide. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go index 04f6c811b..827bd5194 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go @@ -179,8 +179,8 @@ func (c *SSOOIDC) CreateTokenWithIAMRequest(input *CreateTokenWithIAMInput) (req // // Creates and returns access and refresh tokens for clients and applications // that are authenticated using IAM entities. The access token can be used to -// fetch short-term credentials for the assigned AWS accounts or to access application -// APIs using bearer authentication. +// fetch short-term credentials for the assigned Amazon Web Services accounts +// or to access application APIs using bearer authentication. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -331,6 +331,13 @@ func (c *SSOOIDC) RegisterClientRequest(input *RegisterClientInput) (req *reques // Indicates that an error from the service occurred while trying to process // a request. // +// - InvalidRedirectUriException +// Indicates that one or more redirect URI in the request is not supported for +// this operation. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient func (c *SSOOIDC) RegisterClient(input *RegisterClientInput) (*RegisterClientOutput, error) { req, out := c.RegisterClientRequest(input) @@ -619,6 +626,15 @@ type CreateTokenInput struct { // type is currently unsupported for the CreateToken API. Code *string `locationName:"code" type:"string"` + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + // + // CodeVerifier is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenInput's + // String and GoString methods. + CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"` + // Used only when calling this API for the Device Code grant type. This short-term // code is used to identify this authorization request. This comes from the // result of the StartDeviceAuthorization API. @@ -718,6 +734,12 @@ func (s *CreateTokenInput) SetCode(v string) *CreateTokenInput { return s } +// SetCodeVerifier sets the CodeVerifier field's value. +func (s *CreateTokenInput) SetCodeVerifier(v string) *CreateTokenInput { + s.CodeVerifier = &v + return s +} + // SetDeviceCode sets the DeviceCode field's value. func (s *CreateTokenInput) SetDeviceCode(v string) *CreateTokenInput { s.DeviceCode = &v @@ -751,7 +773,8 @@ func (s *CreateTokenInput) SetScope(v []*string) *CreateTokenInput { type CreateTokenOutput struct { _ struct{} `type:"structure"` - // A bearer token to access AWS accounts and applications assigned to a user. + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateTokenOutput's @@ -863,6 +886,15 @@ type CreateTokenWithIAMInput struct { // persisted in the Authorization Code GrantOptions for the application. Code *string `locationName:"code" type:"string"` + // Used only when calling this API for the Authorization Code grant type. This + // value is generated by the client and presented to validate the original code + // challenge value the client passed at authorization time. + // + // CodeVerifier is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"` + // Supports the following OAuth grant types: Authorization Code, Refresh Token, // JWT Bearer, and Token Exchange. Specify one of the following values, depending // on the grant type that you want: @@ -982,6 +1014,12 @@ func (s *CreateTokenWithIAMInput) SetCode(v string) *CreateTokenWithIAMInput { return s } +// SetCodeVerifier sets the CodeVerifier field's value. +func (s *CreateTokenWithIAMInput) SetCodeVerifier(v string) *CreateTokenWithIAMInput { + s.CodeVerifier = &v + return s +} + // SetGrantType sets the GrantType field's value. func (s *CreateTokenWithIAMInput) SetGrantType(v string) *CreateTokenWithIAMInput { s.GrantType = &v @@ -1027,7 +1065,8 @@ func (s *CreateTokenWithIAMInput) SetSubjectTokenType(v string) *CreateTokenWith type CreateTokenWithIAMOutput struct { _ struct{} `type:"structure"` - // A bearer token to access AWS accounts and applications assigned to a user. + // A bearer token to access Amazon Web Services accounts and applications assigned + // to a user. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's @@ -1495,6 +1534,78 @@ func (s *InvalidGrantException) RequestID() string { return s.RespMetadata.RequestID } +// Indicates that one or more redirect URI in the request is not supported for +// this operation. +type InvalidRedirectUriException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_redirect_uri. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRedirectUriException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRedirectUriException) GoString() string { + return s.String() +} + +func newErrorInvalidRedirectUriException(v protocol.ResponseMetadata) error { + return &InvalidRedirectUriException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRedirectUriException) Code() string { + return "InvalidRedirectUriException" +} + +// Message returns the exception's message. +func (s *InvalidRedirectUriException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRedirectUriException) OrigErr() error { + return nil +} + +func (s *InvalidRedirectUriException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRedirectUriException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRedirectUriException) RequestID() string { + return s.RespMetadata.RequestID +} + // Indicates that something is wrong with the input to the request. For example, // a required parameter might be missing or out of range. type InvalidRequestException struct { @@ -1731,6 +1842,25 @@ type RegisterClientInput struct { // ClientType is a required field ClientType *string `locationName:"clientType" type:"string" required:"true"` + // This IAM Identity Center application ARN is used to define administrator-managed + // configuration for public client access to resources. At authorization, the + // scopes, grants, and redirect URI available to this client will be restricted + // by this application resource. + EntitledApplicationArn *string `locationName:"entitledApplicationArn" type:"string"` + + // The list of OAuth 2.0 grant types that are defined by the client. This list + // is used to restrict the token granting flows available to the client. + GrantTypes []*string `locationName:"grantTypes" type:"list"` + + // The IAM Identity Center Issuer URL associated with an instance of IAM Identity + // Center. This value is needed for user access to resources through the client. + IssuerUrl *string `locationName:"issuerUrl" type:"string"` + + // The list of redirect URI that are defined by the client. At completion of + // authorization, this list is used to restrict what locations the user agent + // can be redirected back to. + RedirectUris []*string `locationName:"redirectUris" type:"list"` + // The list of scopes that are defined by the client. Upon authorization, this // list is used to restrict permissions when granting an access token. Scopes []*string `locationName:"scopes" type:"list"` @@ -1782,6 +1912,30 @@ func (s *RegisterClientInput) SetClientType(v string) *RegisterClientInput { return s } +// SetEntitledApplicationArn sets the EntitledApplicationArn field's value. +func (s *RegisterClientInput) SetEntitledApplicationArn(v string) *RegisterClientInput { + s.EntitledApplicationArn = &v + return s +} + +// SetGrantTypes sets the GrantTypes field's value. +func (s *RegisterClientInput) SetGrantTypes(v []*string) *RegisterClientInput { + s.GrantTypes = v + return s +} + +// SetIssuerUrl sets the IssuerUrl field's value. +func (s *RegisterClientInput) SetIssuerUrl(v string) *RegisterClientInput { + s.IssuerUrl = &v + return s +} + +// SetRedirectUris sets the RedirectUris field's value. +func (s *RegisterClientInput) SetRedirectUris(v []*string) *RegisterClientInput { + s.RedirectUris = v + return s +} + // SetScopes sets the Scopes field's value. func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput { s.Scopes = v diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go index e6242e492..cadf4584d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go @@ -57,6 +57,13 @@ const ( // makes a CreateToken request with an invalid grant type. ErrCodeInvalidGrantException = "InvalidGrantException" + // ErrCodeInvalidRedirectUriException for service response error code + // "InvalidRedirectUriException". + // + // Indicates that one or more redirect URI in the request is not supported for + // this operation. + ErrCodeInvalidRedirectUriException = "InvalidRedirectUriException" + // ErrCodeInvalidRequestException for service response error code // "InvalidRequestException". // @@ -106,6 +113,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "InvalidClientException": newErrorInvalidClientException, "InvalidClientMetadataException": newErrorInvalidClientMetadataException, "InvalidGrantException": newErrorInvalidGrantException, + "InvalidRedirectUriException": newErrorInvalidRedirectUriException, "InvalidRequestException": newErrorInvalidRequestException, "InvalidRequestRegionException": newErrorInvalidRequestRegionException, "InvalidScopeException": newErrorInvalidScopeException, diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 1f72cdde1..05c7359e4 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -55,6 +55,10 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +
+ See changes to v1.16.x + * July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 @@ -93,6 +97,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 +
See changes to v1.15.x @@ -560,6 +565,8 @@ the stateless compress described below. For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). +To disable all assembly add `-tags=noasm`. This works across all packages. + # Stateless compression This package offers stateless compression as a special option for gzip/deflate. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 2aa6a95a0..2754bac6f 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -51,7 +51,7 @@ func emitCopy(dst []byte, offset, length int) int { i := 0 // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because + // length emitted down below is a little lower (at 60 = 64 - 4), because // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as diff --git a/vendor/github.com/klauspost/compress/s2/writer.go b/vendor/github.com/klauspost/compress/s2/writer.go index 1253ea675..637c93147 100644 --- a/vendor/github.com/klauspost/compress/s2/writer.go +++ b/vendor/github.com/klauspost/compress/s2/writer.go @@ -937,7 +937,7 @@ func WriterUncompressed() WriterOption { // WriterBlockSize allows to override the default block size. // Blocks will be this size or smaller. -// Minimum size is 4KB and and maximum size is 4MB. +// Minimum size is 4KB and maximum size is 4MB. // // Bigger blocks may give bigger throughput on systems with many cores, // and will increase compression slightly, but it will limit the possible diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 9f17ce601..03744fbc7 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -554,6 +554,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { if debugDecoder { printf("Compression modes: 0b%b", compMode) } + if compMode&3 != 0 { + return errors.New("corrupt block: reserved bits not zero") + } for i := uint(0); i < 3; i++ { mode := seqCompMode((compMode >> (6 - i*2)) & 3) if debugDecoder { diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 2cfe925ad..32a7f401d 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -427,6 +427,16 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { return nil } +// encodeRLE will encode an RLE block. +func (b *blockEnc) encodeRLE(val byte, length uint32) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(length) + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, val) +} + // fuzzFseEncoder can be used to fuzz the FSE encoder. func fuzzFseEncoder(data []byte) int { if len(data) > maxSequences || len(data) < 2 { @@ -479,6 +489,16 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if len(b.sequences) == 0 { return b.encodeLits(b.literals, rawAllLits) } + if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { + // Check common RLE cases. + seq := b.sequences[0] + if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { + // Offset == 1 and 0 or 1 literals. + b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) + return nil + } + } + // We want some difference to at least account for the headers. saved := b.size - len(b.literals) - (b.size >> 6) if saved < 16 { diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f04aaa21e..bbca17234 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -82,7 +82,7 @@ var ( // can run multiple concurrent stateless decodes. It is even possible to // use stateless decodes while a stream is being decoded. // -// The Reset function can be used to initiate a new stream, which is will considerably +// The Reset function can be used to initiate a new stream, which will considerably // reduce the allocations normally caused by NewReader. func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { initPredefined() diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index 87f42879a..4613724e9 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -135,8 +135,20 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { break } + // Add block to history s := e.addBlock(src) blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + if len(src) < minNonLiteralBlockSize { blk.extraLits = len(src) blk.literals = blk.literals[:len(src)] diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 20d25b0e0..a4f5bf91f 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -102,9 +102,20 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { e.cur = e.maxMatchOff break } - + // Add block to history s := e.addBlock(src) blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + if len(src) < minNonLiteralBlockSize { blk.extraLits = len(src) blk.literals = blk.literals[:len(src)] diff --git a/vendor/github.com/mongodb/mongo-tools/LICENSE.md b/vendor/github.com/mongodb/mongo-tools/LICENSE.md index 01b6a37e4..d46ac5b05 100644 --- a/vendor/github.com/mongodb/mongo-tools/LICENSE.md +++ b/vendor/github.com/mongodb/mongo-tools/LICENSE.md @@ -1,13 +1,11 @@ Copyright 2014 MongoDB, Inc. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in +compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. +Unless required by applicable law or agreed to in writing, software distributed under the License is +distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing permissions and limitations under the +License. diff --git a/vendor/github.com/mongodb/mongo-tools/common/archive/spec.md b/vendor/github.com/mongodb/mongo-tools/common/archive/spec.md index 12e09a879..a195e0917 100644 --- a/vendor/github.com/mongodb/mongo-tools/common/archive/spec.md +++ b/vendor/github.com/mongodb/mongo-tools/common/archive/spec.md @@ -1,21 +1,23 @@ # The Mongodump archive format specification -The mongodump archive format contains metadata about collections and data dumped from those collections. Data from multiple collections can be interleaved so multiple threads can dump data from different collections into the archive concurrently. +The mongodump archive format contains metadata about collections and data dumped from those +collections. Data from multiple collections can be interleaved so multiple threads can dump data +from different collections into the archive concurrently. Here is the definition in BNF-like syntax: ```ebnf -archive = magic-number , - header , - *collection-metadata , - terminator-bytes , +archive = magic-number , + header , + *collection-metadata , + terminator-bytes , *(namespace-segment | namespace-eof) ; magic-number = 0x6de29981 ; (* little-endian representation of 0x8199e26d *) -header = document ; +header = document ; -collection-metadata = document ; +collection-metadata = document ; terminator-bytes = 0xffffffff ; @@ -32,61 +34,72 @@ eof-header = document ; ## Explanatory notes -`document` is a BSON document as defined by the [BSON Spec](https://bsonspec.org). The fields of each document are defined here: +`document` is a BSON document as defined by the [BSON Spec](https://bsonspec.org). The fields of +each document are defined here: - `header`: - ``` - { - int32 concurrent_collections, - string version, - string server_version, - string tool_version - } - ``` - - `concurrent_collections` - the number of collections dumped concurrently by mongodump as set by the `--numParallelCollections` options. Mongorestore will choose the larger of `concurrent_collections` and `--numParallelCollections` to set the number of collections to restore in parallel. - - `version` - the archive format version. Currently there is only one version, `"0.1"`. - - `server_version` - the MongoDB version of the source database. - - `tool_version` - the version of mongodump that created the archive. + + ``` + { + int32 concurrent_collections, + string version, + string server_version, + string tool_version + } + ``` + + - `concurrent_collections` - the number of collections dumped concurrently by mongodump as set by + the `--numParallelCollections` options. Mongorestore will choose the larger of + `concurrent_collections` and `--numParallelCollections` to set the number of collections to + restore in parallel. + - `version` - the archive format version. Currently there is only one version, `"0.1"`. + - `server_version` - the MongoDB version of the source database. + - `tool_version` - the version of mongodump that created the archive. - `collection-metadata`: - ``` - { - string db, - string collection, - string metadata, - int32 size, - string type - } - ``` - - `db` - databse name. - - `collection` - collection name. - - `metadata` - the collection metadata (including options, index definitions, and collection type) encoded in canonical [Extended JSON v2](https://docs.mongodb.com/manual/reference/mongodb-extended-json/). This is the same data written to `metadata.json` files. - - `size` - the total uncompressed size of the collection in bytes. - - `type` - set to `"timeseries"` for timeseries collections, `"view"` for views, and `""` otherwise. -- `namespace-data`: One or more BSON documents from the collection. The collection's documents can be split across multiple segments. + ``` + { + string db, + string collection, + string metadata, + int32 size, + string type + } + ``` + - `db` - databse name. + - `collection` - collection name. + - `metadata` - the collection metadata (including options, index definitions, and collection type) + encoded in canonical + [Extended JSON v2](https://docs.mongodb.com/manual/reference/mongodb-extended-json/). This is + the same data written to `metadata.json` files. + - `size` - the total uncompressed size of the collection in bytes. + - `type` - set to `"timeseries"` for timeseries collections, `"view"` for views, and `""` + otherwise. +- `namespace-data`: One or more BSON documents from the collection. The collection's documents can + be split across multiple segments. - `namespace-header`: - ``` - { - string db, - string collection, - bool EOF, - int64 CRC - } - ``` - - `db` - databse name. - - `collection` - collection name. - - `EOF` - always `false`. - - `CRC` - always `0`. + ``` + { + string db, + string collection, + bool EOF, + int64 CRC + } + ``` + - `db` - databse name. + - `collection` - collection name. + - `EOF` - always `false`. + - `CRC` - always `0`. - `eof-header`: - ``` - { - string db, - string collection, - bool EOF, - int64 CRC - } - ``` - - `db` - databse name. - - `collection` - collection name. - - `EOF` - always `true`. - - `CRC` - the CRC-64-ECMA of all documents in the namespace (across all `namespace-segment`s). + ``` + { + string db, + string collection, + bool EOF, + int64 CRC + } + ``` + - `db` - databse name. + - `collection` - collection name. + - `EOF` - always `true`. + - `CRC` - the CRC-64-ECMA of all documents in the namespace (across all `namespace-segment`s). diff --git a/vendor/github.com/mongodb/mongo-tools/common/bsonutil/bsonutil.go b/vendor/github.com/mongodb/mongo-tools/common/bsonutil/bsonutil.go index 1b47d06be..ace571c8a 100644 --- a/vendor/github.com/mongodb/mongo-tools/common/bsonutil/bsonutil.go +++ b/vendor/github.com/mongodb/mongo-tools/common/bsonutil/bsonutil.go @@ -13,11 +13,13 @@ import ( "encoding/hex" "errors" "fmt" + "reflect" "strconv" "time" "github.com/mongodb/mongo-tools/common/json" "github.com/mongodb/mongo-tools/common/util" + errors2 "github.com/pkg/errors" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" ) @@ -470,3 +472,19 @@ func MtoD(m bson.M) bson.D { } return doc } + +// MarshalExtJSONReversible is a wrapper around bson.MarshalExtJSON function, +// but would return an error if it cannot be reversed by bson.UnmarshalExtJSON. +// +// It is preferred to be used in mongodump to avoid generating un-reversible ext JSON. +func MarshalExtJSONReversible(val interface{}, canonical bool, escapeHTML bool) ([]byte, error) { + jsonBytes, err := bson.MarshalExtJSON(val, canonical, escapeHTML) + if err != nil { + return nil, err + } + reversedVal := reflect.New(reflect.TypeOf(val)).Elem().Interface() + if unmarshalErr := bson.UnmarshalExtJSON(jsonBytes, canonical, &reversedVal); unmarshalErr != nil { + return nil, errors2.Wrap(unmarshalErr, "marshal is not reversible") + } + return jsonBytes, nil +} diff --git a/vendor/github.com/mongodb/mongo-tools/common/bsonutil/indexes.go b/vendor/github.com/mongodb/mongo-tools/common/bsonutil/indexes.go index 5690f2d3d..8f00257e1 100644 --- a/vendor/github.com/mongodb/mongo-tools/common/bsonutil/indexes.go +++ b/vendor/github.com/mongodb/mongo-tools/common/bsonutil/indexes.go @@ -185,7 +185,7 @@ func CreateExtJSONString(doc interface{}) string { // want to throw an error when formatting informational messages. // An error would be inconsequential. JSONString := "" - JSONBytes, err := bson.MarshalExtJSON(doc, false, false) + JSONBytes, err := MarshalExtJSONReversible(doc, false, false) if err == nil { JSONString = string(JSONBytes) } diff --git a/vendor/github.com/mongodb/mongo-tools/common/db/buffered_bulk.go b/vendor/github.com/mongodb/mongo-tools/common/db/buffered_bulk.go index 3abc199f2..3d87d2fb6 100644 --- a/vendor/github.com/mongodb/mongo-tools/common/db/buffered_bulk.go +++ b/vendor/github.com/mongodb/mongo-tools/common/db/buffered_bulk.go @@ -74,7 +74,7 @@ func (bb *BufferedBulkInserter) SetUpsert(upsert bool) *BufferedBulkInserter { } // throw away the old bulk and init a new one -func (bb *BufferedBulkInserter) resetBulk() { +func (bb *BufferedBulkInserter) ResetBulk() { bb.writeModels = bb.writeModels[:0] bb.docCount = 0 bb.byteCount = 0 @@ -144,10 +144,19 @@ func (bb *BufferedBulkInserter) addModel(model mongo.WriteModel) (*mongo.BulkWri // Flush writes all buffered documents in one bulk write and then resets the buffer. func (bb *BufferedBulkInserter) Flush() (*mongo.BulkWriteResult, error) { + defer bb.ResetBulk() + return bb.flush() +} + +// TryFlush writes all buffered documents in one bulk write without resetting the buffer. +func (bb *BufferedBulkInserter) TryFlush() (*mongo.BulkWriteResult, error) { + return bb.flush() +} + +func (bb *BufferedBulkInserter) flush() (*mongo.BulkWriteResult, error) { if bb.docCount == 0 { return nil, nil } - defer bb.resetBulk() return bb.collection.BulkWrite(context.Background(), bb.writeModels, bb.bulkWriteOpts) } diff --git a/vendor/github.com/mongodb/mongo-tools/common/db/db.go b/vendor/github.com/mongodb/mongo-tools/common/db/db.go index 1d7a4f1a5..fba9b94e0 100644 --- a/vendor/github.com/mongodb/mongo-tools/common/db/db.go +++ b/vendor/github.com/mongodb/mongo-tools/common/db/db.go @@ -72,6 +72,9 @@ const ( ErrDuplicateKeyCode = 11000 ErrFailedDocumentValidation = 121 ErrUnacknowledgedWrite = "unacknowledged write" + + // ErrCannotInsertTimeseriesBucketsWithMixedSchema can be handled by turning TimeseriesBucketsWithMixedSchema off. + ErrCannotInsertTimeseriesBucketsWithMixedSchema = 408 ) var ignorableWriteErrorCodes = map[int]bool{ErrDuplicateKeyCode: true, ErrFailedDocumentValidation: true} @@ -452,6 +455,7 @@ func configureClient(opts options.ToolOptions) (*mongo.Client, error) { return nil, fmt.Errorf("CRL files are not supported on this platform") } + // #nosec G402 -- we intentionally allow old TLS versions for backwards compatibility tlsConfig := &tls.Config{} if opts.SSLAllowInvalidCert || opts.SSLAllowInvalidHost || opts.TLSInsecure { tlsConfig.InsecureSkipVerify = true @@ -547,6 +551,27 @@ func CanIgnoreError(err error) bool { return false } +// Returns a boolean based on whether the given error indicates that this timeseries collection needs to be updated to set `timeseriesBucketsMayHaveMixedSchemaData` to `true`. +func TimeseriesBucketNeedsMixedSchema(err error) bool { + if err == nil { + return false + } + + switch mongoErr := err.(type) { + case mongo.WriteError: + return mongoErr.Code == ErrCannotInsertTimeseriesBucketsWithMixedSchema + + case mongo.BulkWriteException: + for _, writeErr := range mongoErr.WriteErrors { + if writeErr.Code == ErrCannotInsertTimeseriesBucketsWithMixedSchema { + return true + } + } + return false + } + return false +} + // IsMMAPV1 returns whether the storage engine is MMAPV1. Also returns false // if the storage engine type cannot be determined for some reason. func IsMMAPV1(database *mongo.Database, collectionName string) (bool, error) { @@ -569,3 +594,12 @@ func IsMMAPV1(database *mongo.Database, collectionName string) (bool, error) { _, ok := collStats[numExtents] return ok, nil } + +// GetTimeseriesCollNameFromBucket returns a timeseries collection name from its bucket collection name. +func GetTimeseriesCollNameFromBucket(bucketCollName string) (string, error) { + collName := strings.TrimPrefix(bucketCollName, "system.buckets.") + if collName == bucketCollName || collName == "" { + return "", errors.New("invalid timeseries bucket name: " + bucketCollName) + } + return collName, nil +} diff --git a/vendor/github.com/mongodb/mongo-tools/common/db/oplog.go b/vendor/github.com/mongodb/mongo-tools/common/db/oplog.go index bd16f52a1..8f26fc0a7 100644 --- a/vendor/github.com/mongodb/mongo-tools/common/db/oplog.go +++ b/vendor/github.com/mongodb/mongo-tools/common/db/oplog.go @@ -20,18 +20,19 @@ type ApplyOpsResponse struct { // Oplog represents a MongoDB oplog document. type Oplog struct { - Timestamp primitive.Timestamp `bson:"ts"` - Term *int64 `bson:"t"` - Hash *int64 `bson:"h,omitempty"` - Version int `bson:"v"` - Operation string `bson:"op"` - Namespace string `bson:"ns"` - Object bson.D `bson:"o"` - Query bson.D `bson:"o2,omitempty"` - UI *primitive.Binary `bson:"ui,omitempty"` - LSID bson.Raw `bson:"lsid,omitempty"` - TxnNumber *int64 `bson:"txnNumber,omitempty"` - PrevOpTime bson.Raw `bson:"prevOpTime,omitempty"` + Timestamp primitive.Timestamp `bson:"ts"` + Term *int64 `bson:"t"` + Hash *int64 `bson:"h,omitempty"` + Version int `bson:"v"` + Operation string `bson:"op"` + Namespace string `bson:"ns"` + Object bson.D `bson:"o"` + Query bson.D `bson:"o2,omitempty"` + UI *primitive.Binary `bson:"ui,omitempty"` + LSID bson.Raw `bson:"lsid,omitempty"` + TxnNumber *int64 `bson:"txnNumber,omitempty"` + PrevOpTime bson.Raw `bson:"prevOpTime,omitempty"` + MultiOpType *int `bson:"multiOpType,omitempty"` } // OplogTailTime represents two ways of describing the "end" of the oplog at a diff --git a/vendor/github.com/mongodb/mongo-tools/common/db/optime.go b/vendor/github.com/mongodb/mongo-tools/common/db/optime.go index 6973894f9..668ded2ae 100644 --- a/vendor/github.com/mongodb/mongo-tools/common/db/optime.go +++ b/vendor/github.com/mongodb/mongo-tools/common/db/optime.go @@ -2,6 +2,7 @@ package db import ( "fmt" + "github.com/mongodb/mongo-tools/common/util" "go.mongodb.org/mongo-driver/bson/primitive" ) diff --git a/vendor/github.com/mongodb/mongo-tools/common/json/date.go b/vendor/github.com/mongodb/mongo-tools/common/json/date.go index 925952b46..0acb8e1d6 100644 --- a/vendor/github.com/mongodb/mongo-tools/common/json/date.go +++ b/vendor/github.com/mongodb/mongo-tools/common/json/date.go @@ -8,8 +8,9 @@ package json import ( "fmt" - "github.com/mongodb/mongo-tools/common/util" "reflect" + + "github.com/mongodb/mongo-tools/common/util" ) // Transition functions for recognizing Date. diff --git a/vendor/github.com/mongodb/mongo-tools/common/json/decode.go b/vendor/github.com/mongodb/mongo-tools/common/json/decode.go index 229dfee80..c87efe836 100644 --- a/vendor/github.com/mongodb/mongo-tools/common/json/decode.go +++ b/vendor/github.com/mongodb/mongo-tools/common/json/decode.go @@ -5,7 +5,7 @@ // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 // // Based on github.com/golang/go by The Go Authors -// See THIRD-PARTY-NOTICES for original license terms. +// See cyclonedx.sbom.json for original license terms. // Represents JSON data structure using native Go types: booleans, floats, // strings, arrays, and maps. diff --git a/vendor/github.com/mongodb/mongo-tools/common/json/encode.go b/vendor/github.com/mongodb/mongo-tools/common/json/encode.go index 68e378517..4229269fb 100644 --- a/vendor/github.com/mongodb/mongo-tools/common/json/encode.go +++ b/vendor/github.com/mongodb/mongo-tools/common/json/encode.go @@ -5,7 +5,7 @@ // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 // // Based on github.com/golang/go by The Go Authors -// See THIRD-PARTY-NOTICES for original license terms. +// See cyclonedx.sbom.json for original license terms. // Package json implements encoding and decoding of JSON objects as defined in // RFC 4627. The mapping between JSON objects and Go values is described @@ -241,6 +241,9 @@ func (e *MarshalerError) Error() string { var hex = "0123456789abcdef" +// Any write on the encodeState cannot fail because we're writing to a +// `bytes.Buffer`, which never fails. +// // An encodeState encodes JSON into a bytes.Buffer. type encodeState struct { bytes.Buffer // accumulated output diff --git a/vendor/github.com/mongodb/mongo-tools/common/json/fold.go b/vendor/github.com/mongodb/mongo-tools/common/json/fold.go index b573c8ba2..cb8161061 100644 --- a/vendor/github.com/mongodb/mongo-tools/common/json/fold.go +++ b/vendor/github.com/mongodb/mongo-tools/common/json/fold.go @@ -5,7 +5,7 @@ // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 // // Based on github.com/golang/go by The Go Authors -// See THIRD-PARTY-NOTICES for original license terms. +// See cyclonedx.sbom.json for original license terms. package json diff --git a/vendor/github.com/mongodb/mongo-tools/common/json/indent.go b/vendor/github.com/mongodb/mongo-tools/common/json/indent.go index 70ea7ed8e..96ed18fed 100644 --- a/vendor/github.com/mongodb/mongo-tools/common/json/indent.go +++ b/vendor/github.com/mongodb/mongo-tools/common/json/indent.go @@ -5,7 +5,7 @@ // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 // // Based on github.com/golang/go by The Go Authors -// See THIRD-PARTY-NOTICES for original license terms. +// See cyclonedx.sbom.json for original license terms. package json diff --git a/vendor/github.com/mongodb/mongo-tools/common/json/scanner.go b/vendor/github.com/mongodb/mongo-tools/common/json/scanner.go index 139b714cd..40482f0f7 100644 --- a/vendor/github.com/mongodb/mongo-tools/common/json/scanner.go +++ b/vendor/github.com/mongodb/mongo-tools/common/json/scanner.go @@ -5,7 +5,7 @@ // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 // // Based on github.com/golang/go by The Go Authors -// See THIRD-PARTY-NOTICES for original license terms. +// See cyclonedx.sbom.json for original license terms. package json diff --git a/vendor/github.com/mongodb/mongo-tools/common/json/stream.go b/vendor/github.com/mongodb/mongo-tools/common/json/stream.go index dda4351ee..84d841d4a 100644 --- a/vendor/github.com/mongodb/mongo-tools/common/json/stream.go +++ b/vendor/github.com/mongodb/mongo-tools/common/json/stream.go @@ -5,7 +5,7 @@ // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 // // Based on github.com/golang/go by The Go Authors -// See THIRD-PARTY-NOTICES for original license terms. +// See cyclonedx.sbom.json for original license terms. package json diff --git a/vendor/github.com/mongodb/mongo-tools/common/json/tags.go b/vendor/github.com/mongodb/mongo-tools/common/json/tags.go index 763085f9f..d3ba73cff 100644 --- a/vendor/github.com/mongodb/mongo-tools/common/json/tags.go +++ b/vendor/github.com/mongodb/mongo-tools/common/json/tags.go @@ -5,7 +5,7 @@ // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 // // Based on github.com/golang/go by The Go Authors -// See THIRD-PARTY-NOTICES for original license terms. +// See cyclonedx.sbom.json for original license terms. package json diff --git a/vendor/github.com/mongodb/mongo-tools/common/options/options.go b/vendor/github.com/mongodb/mongo-tools/common/options/options.go index 5f495a28c..4cbf413a8 100644 --- a/vendor/github.com/mongodb/mongo-tools/common/options/options.go +++ b/vendor/github.com/mongodb/mongo-tools/common/options/options.go @@ -33,10 +33,7 @@ import ( // XXX Force these true as the Go driver supports them always. Once the // conditionals that depend on them are removed, these can be removed. -var ( - BuiltWithSSL = true - BuiltWithGSSAPI = true -) +var BuiltWithGSSAPI = true const IncompatibleArgsErrorFormat = "illegal argument combination: cannot specify %s and --uri" @@ -142,7 +139,7 @@ type URI struct { knownURIParameters []string extraOptionsRegistry []ExtraOptions - ConnString connstring.ConnString + ConnString *connstring.ConnString } // Struct holding connection-related options @@ -367,7 +364,7 @@ type URISetter interface { // SetOptionsFromURI provides a way for tools to fetch any options that were // set in the URI and set them on the ExtraOptions that they pass to the options // package. - SetOptionsFromURI(connstring.ConnString) error + SetOptionsFromURI(*connstring.ConnString) error } func (auth *Auth) RequiresExternalDB() bool { @@ -430,7 +427,7 @@ func (uri *URI) ParsedConnString() *connstring.ConnString { if uri.ConnectionString == "" { return nil } - return &uri.ConnString + return uri.ConnString } func (opts *ToolOptions) EnabledToolOptions() EnabledOptions { @@ -632,7 +629,7 @@ func (opts *ToolOptions) ParseConfigFile(args []string) error { func (opts *ToolOptions) setURIFromPositionalArg(args []string) ([]string, error) { newArgs := []string{} var foundURI bool - var parsedURI connstring.ConnString + var parsedURI *connstring.ConnString for _, arg := range args { if arg == "" { @@ -744,7 +741,7 @@ func (opts *ToolOptions) handleUnknownOption(option string, arg flags.SplitArgum // Some options (e.g. host and port) are more complicated. To check if a CLI option is set, // we check that it is not equal to its default value. To check that a URI option is set, // some options have an "OptionSet" field. -func (opts *ToolOptions) setOptionsFromURI(cs connstring.ConnString) error { +func (opts *ToolOptions) setOptionsFromURI(cs *connstring.ConnString) error { opts.URI.ConnString = cs if opts.enabledOptions.Connection { @@ -974,14 +971,6 @@ func (opts *ToolOptions) setOptionsFromURI(cs connstring.ConnString) error { return fmt.Errorf("loadBalanced cannot be set to true if the direct connection option is specified") } - if (cs.SSL || opts.UseSSL) && !BuiltWithSSL { - if strings.HasPrefix(cs.Original, "mongodb+srv") { - return fmt.Errorf("SSL enabled by default when using SRV but tool not built with SSL: " + - "SSL must be explicitly disabled with ssl=false in the connection string") - } - return fmt.Errorf("cannot use ssl: tool not built with SSL support") - } - if cs.RetryWritesSet { opts.RetryWrites = &cs.RetryWrites } diff --git a/vendor/github.com/mongodb/mongo-tools/common/txn/meta.go b/vendor/github.com/mongodb/mongo-tools/common/txn/meta.go index be2b2a7b4..c2993bfcf 100644 --- a/vendor/github.com/mongodb/mongo-tools/common/txn/meta.go +++ b/vendor/github.com/mongodb/mongo-tools/common/txn/meta.go @@ -50,6 +50,12 @@ type Meta struct { // future if we change the db.Oplog.Object to bson.Raw, so the API is designed // with failure as a possibility. func NewMeta(op db.Oplog) (Meta, error) { + // If a vectored insert is within a retryable session, it will contain `lsid`, `txnNumber`, and `prevOpTime` fields + // and also a `multiOpType: 1` field which distinguishes vectored inserts from transactions. + if op.MultiOpType != nil && *op.MultiOpType == 1 { + return Meta{}, nil + } + if op.LSID == nil || op.TxnNumber == nil || op.Operation != "c" { return Meta{}, nil } diff --git a/vendor/github.com/mongodb/mongo-tools/common/util/bool.go b/vendor/github.com/mongodb/mongo-tools/common/util/bool.go index bb37782e1..9c1407af4 100644 --- a/vendor/github.com/mongodb/mongo-tools/common/util/bool.go +++ b/vendor/github.com/mongodb/mongo-tools/common/util/bool.go @@ -7,8 +7,9 @@ package util import ( - "go.mongodb.org/mongo-driver/bson/primitive" "reflect" + + "go.mongodb.org/mongo-driver/bson/primitive" ) // IsTruthy returns true for values the server will interpret as "true". diff --git a/vendor/github.com/mongodb/mongo-tools/mongodump/metadata_dump.go b/vendor/github.com/mongodb/mongo-tools/mongodump/metadata_dump.go index a2990118d..2e6cd6332 100644 --- a/vendor/github.com/mongodb/mongo-tools/mongodump/metadata_dump.go +++ b/vendor/github.com/mongodb/mongo-tools/mongodump/metadata_dump.go @@ -11,6 +11,7 @@ import ( "fmt" "io" + "github.com/mongodb/mongo-tools/common/bsonutil" "github.com/mongodb/mongo-tools/common/db" "github.com/mongodb/mongo-tools/common/intents" "github.com/mongodb/mongo-tools/common/log" @@ -101,7 +102,7 @@ func (dump *MongoDump) dumpMetadata(intent *intents.Intent, buffer resettableOut } // Finally, we send the results to the writer as JSON bytes - jsonBytes, err := bson.MarshalExtJSON(meta, true, false) + jsonBytes, err := bsonutil.MarshalExtJSONReversible(meta, true, false) if err != nil { return fmt.Errorf("error marshalling metadata json for collection `%v`: %v", intent.Namespace(), err) } diff --git a/vendor/github.com/mongodb/mongo-tools/mongodump/options.go b/vendor/github.com/mongodb/mongo-tools/mongodump/options.go index 4f59a428b..d38a38e8e 100644 --- a/vendor/github.com/mongodb/mongo-tools/mongodump/options.go +++ b/vendor/github.com/mongodb/mongo-tools/mongodump/options.go @@ -57,7 +57,7 @@ func (inputOptions *InputOptions) GetQuery() ([]byte, error) { type OutputOptions struct { Out string `long:"out" value-name:"" short:"o" description:"output directory, or '-' for stdout (default: 'dump')"` Gzip bool `long:"gzip" description:"compress archive or collection output with Gzip"` - Oplog bool `long:"oplog" description:"use oplog for taking a point-in-time snapshot"` + Oplog bool `long:"oplog" description:"for taking a point-in-time snapshot on a replica set that is not part of a sharded cluster."` Archive string `long:"archive" value-name:"" optional:"true" optional-value:"-" description:"dump as an archive to the specified path. If flag is specified without a value, archive is written to stdout"` DumpDBUsersAndRoles bool `long:"dumpDbUsersAndRoles" description:"dump user and role definitions for the specified database"` ExcludedCollections []string `long:"excludeCollection" value-name:"" description:"collection to exclude from the dump (may be specified multiple times to exclude additional collections)"` diff --git a/vendor/github.com/mongodb/mongo-tools/mongodump/prepare.go b/vendor/github.com/mongodb/mongo-tools/mongodump/prepare.go index fb45207e5..b9448b86f 100644 --- a/vendor/github.com/mongodb/mongo-tools/mongodump/prepare.go +++ b/vendor/github.com/mongodb/mongo-tools/mongodump/prepare.go @@ -24,7 +24,6 @@ import ( "github.com/mongodb/mongo-tools/common/intents" "github.com/mongodb/mongo-tools/common/log" "github.com/mongodb/mongo-tools/common/util" - "golang.org/x/exp/slices" ) @@ -223,6 +222,7 @@ func (dump *MongoDump) outputPath(dbName, colName string) string { escapedColName := util.EscapeCollectionName(colName) if len(escapedColName) > 238 { colNameTruncated := escapedColName[:208] + // #nosec G401 -- we do not use this digest algorithm in a security-sensitive way. colNameHashBytes := sha1.Sum([]byte(colName)) colNameHashBase64 := base64.RawURLEncoding.EncodeToString(colNameHashBytes[:]) diff --git a/vendor/github.com/mongodb/mongo-tools/mongorestore/metadata.go b/vendor/github.com/mongodb/mongo-tools/mongorestore/metadata.go index dac7b6d5c..50403dc3b 100644 --- a/vendor/github.com/mongodb/mongo-tools/mongorestore/metadata.go +++ b/vendor/github.com/mongodb/mongo-tools/mongorestore/metadata.go @@ -7,6 +7,7 @@ package mongorestore import ( + "context" "encoding/hex" "fmt" "strings" @@ -618,3 +619,17 @@ func (restore *MongoRestore) DropCollection(intent *intents.Intent) error { } return nil } + +// EnableMixedSchemaInTimeseriesBucket runs collMod to turn on timeseriesBucketsMayHaveMixedSchemaData +// for a timeseries collection. +func (restore *MongoRestore) EnableMixedSchemaInTimeseriesBucket(dbName, colName string) error { + session, err := restore.SessionProvider.GetSession() + if err != nil { + return fmt.Errorf("error establishing connection: %v", err) + } + + return session.Database(dbName).RunCommand(context.Background(), bson.D{ + {"collMod", colName}, + {"timeseriesBucketsMayHaveMixedSchemaData", true}, + }).Err() +} diff --git a/vendor/github.com/mongodb/mongo-tools/mongorestore/options.go b/vendor/github.com/mongodb/mongo-tools/mongorestore/options.go index 6a73ca9bb..3f6c036e6 100644 --- a/vendor/github.com/mongodb/mongo-tools/mongorestore/options.go +++ b/vendor/github.com/mongodb/mongo-tools/mongorestore/options.go @@ -51,7 +51,7 @@ const ( // InputOptions defines the set of options to use in configuring the restore process. type InputOptions struct { Objcheck bool `long:"objcheck" description:"validate all objects before inserting"` - OplogReplay bool `long:"oplogReplay" description:"replay oplog for point-in-time restore"` + OplogReplay bool `long:"oplogReplay" description:"for recovering a point-in-time snapshot on a replica set that is not part of a sharded cluster."` OplogLimit string `long:"oplogLimit" value-name:"[:ordinal]" description:"only include oplog entries before the provided Timestamp"` OplogFile string `long:"oplogFile" value-name:"" description:"oplog file to use for replay of oplog"` Archive string `long:"archive" value-name:"" optional:"true" optional-value:"-" description:"restore dump from the specified archive file. If flag is specified without a value, archive is read from stdin"` diff --git a/vendor/github.com/mongodb/mongo-tools/mongorestore/restore.go b/vendor/github.com/mongodb/mongo-tools/mongorestore/restore.go index 15b29a408..8ee79e26a 100644 --- a/vendor/github.com/mongodb/mongo-tools/mongorestore/restore.go +++ b/vendor/github.com/mongodb/mongo-tools/mongorestore/restore.go @@ -20,6 +20,7 @@ import ( "github.com/mongodb/mongo-tools/common/options" "github.com/mongodb/mongo-tools/common/progress" "github.com/mongodb/mongo-tools/common/util" + "github.com/pkg/errors" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" @@ -530,7 +531,24 @@ func (restore *MongoRestore) RestoreCollectionToDB(dbName, colName string, watchProgressor.Set(file.Pos()) } // flush the remaining docs - result.combineWith(NewResultFromBulkResult(bulk.Flush())) + bwResult, bwErr := bulk.TryFlush() + defer bulk.ResetBulk() + + if db.TimeseriesBucketNeedsMixedSchema(bwErr) { + // Modify the timeseries collection and retry flushing the bulk writer. + logicalColName, nameErr := db.GetTimeseriesCollNameFromBucket(colName) + if nameErr != nil { + resultChan <- result.withErr(nameErr) + return + } + + if collModErr := restore.EnableMixedSchemaInTimeseriesBucket(dbName, logicalColName); collModErr != nil { + resultChan <- result.withErr(errors.Wrap(collModErr, "failed to enable mixed schema in a timeseries bucket")) + return + } + bwResult, bwErr = bulk.TryFlush() + } + result.combineWith(NewResultFromBulkResult(bwResult, bwErr)) resultChan <- result.withErr(db.FilterError(restore.OutputOptions.StopOnError, result.Err)) return }() diff --git a/vendor/github.com/montanaflynn/stats/.gitignore b/vendor/github.com/montanaflynn/stats/.gitignore index e0a38e1c1..75a2a3a3b 100644 --- a/vendor/github.com/montanaflynn/stats/.gitignore +++ b/vendor/github.com/montanaflynn/stats/.gitignore @@ -1,5 +1,7 @@ coverage.out +coverage.txt release-notes.txt .directory .chglog -.vscode \ No newline at end of file +.vscode +.DS_Store \ No newline at end of file diff --git a/vendor/github.com/montanaflynn/stats/.travis.yml b/vendor/github.com/montanaflynn/stats/.travis.yml deleted file mode 100644 index 28118fbfe..000000000 --- a/vendor/github.com/montanaflynn/stats/.travis.yml +++ /dev/null @@ -1,29 +0,0 @@ -language: go -go: - - "1.7" - - "1.8" - - "1.9" - - "1.10" - - "1.11" - - "1.12" - - "1.13" - - "1.14" - - "1.15" - - "1.16" - - stable - - master -arch: - - amd64 - - arm64 -before_install: - - go get github.com/mattn/goveralls -script: - - go test -v -covermode=count -coverprofile=coverage.out -after_success: - - $GOPATH/bin/goveralls -coverprofile=coverage.out -service=travis-ci -notifications: - email: - recipients: - - montana@montanaflynn.me - on_success: change - on_failure: always diff --git a/vendor/github.com/montanaflynn/stats/CHANGELOG.md b/vendor/github.com/montanaflynn/stats/CHANGELOG.md index ad842a540..73c3b782b 100644 --- a/vendor/github.com/montanaflynn/stats/CHANGELOG.md +++ b/vendor/github.com/montanaflynn/stats/CHANGELOG.md @@ -1,598 +1,534 @@ + +## [Unreleased] - -## [v0.6.6](https://github.com/montanaflynn/stats/compare/v0.6.5...v0.6.6) (2021-04-26) + +## [v0.7.1] - 2023-05-11 ### Add +- Add describe functions ([#77](https://github.com/montanaflynn/stats/issues/77)) -* Add support for string and io.Reader in LoadRawData (pr [#68](https://github.com/montanaflynn/stats/issues/68)) -* Add latest versions of Go to test against +### Update +- Update .gitignore +- Update README.md, LICENSE and DOCUMENTATION.md files +- Update github action go workflow to run on push -### Use -* Use math.Sqrt in StandardDeviation (PR [#64](https://github.com/montanaflynn/stats/issues/64)) + +## [v0.7.0] - 2023-01-08 +### Add +- Add geometric distribution functions ([#75](https://github.com/montanaflynn/stats/issues/75)) +- Add GitHub action go workflow +### Remove +- Remove travis CI config - -## [v0.6.5](https://github.com/montanaflynn/stats/compare/v0.6.4...v0.6.5) (2021-02-21) +### Update +- Update changelog with v0.7.0 changes +- Update changelog with v0.7.0 changes +- Update github action go workflow +- Update geometric distribution tests -### Add -* Add Float64Data.Quartiles documentation + +## [v0.6.6] - 2021-04-26 +### Add +- Add support for string and io.Reader in LoadRawData (pr [#68](https://github.com/montanaflynn/stats/issues/68)) +- Add latest versions of Go to test against ### Update +- Update changelog with v0.6.6 changes -* Update changelog with v0.6.5 changes - +### Use +- Use math.Sqrt in StandardDeviation (PR [#64](https://github.com/montanaflynn/stats/issues/64)) - -## [v0.6.4](https://github.com/montanaflynn/stats/compare/v0.6.3...v0.6.4) (2021-02-21) + +## [v0.6.5] - 2021-02-21 ### Add - -* Add Quartiles method to Float64Data type (issue [#60](https://github.com/montanaflynn/stats/issues/60)) +- Add Float64Data.Quartiles documentation +- Add Quartiles method to Float64Data type (issue [#60](https://github.com/montanaflynn/stats/issues/60)) ### Fix - -* Fix make release changelog command and add changelog history -* Fix failing tests due to precision errors on arm64 ([#58](https://github.com/montanaflynn/stats/issues/58)) +- Fix make release changelog command and add changelog history ### Update +- Update changelog with v0.6.5 changes +- Update changelog with v0.6.4 changes +- Update README.md links to CHANGELOG.md and DOCUMENTATION.md +- Update README.md and Makefile with new release commands -* Update changelog with v0.6.4 changes -* Update README.md links to CHANGELOG.md and DOCUMENTATION.md -* Update README.md and Makefile with new release commands -* Update changelog with v0.6.4 changes -* Update examples directory to include a README.md used for synopsis -* Update go.mod to include go version where modules are enabled by default -* Update changelog with v0.6.3 changes + +## [v0.6.4] - 2021-01-13 +### Fix +- Fix failing tests due to precision errors on arm64 ([#58](https://github.com/montanaflynn/stats/issues/58)) - -## [v0.6.3](https://github.com/montanaflynn/stats/compare/v0.6.2...v0.6.3) (2020-02-18) +### Update +- Update changelog with v0.6.4 changes +- Update examples directory to include a README.md used for synopsis +- Update go.mod to include go version where modules are enabled by default +- Update changelog with v0.6.3 changes -### Add -* Add creating and committing changelog to Makefile release directive -* Add release-notes.txt and .chglog directory to .gitignore + +## [v0.6.3] - 2020-02-18 +### Add +- Add creating and committing changelog to Makefile release directive +- Add release-notes.txt and .chglog directory to .gitignore ### Update - -* Update exported tests to use import for better example documentation -* Update documentation using godoc2md -* Update changelog with v0.6.2 release +- Update exported tests to use import for better example documentation +- Update documentation using godoc2md +- Update changelog with v0.6.2 release -## [v0.6.2](https://github.com/montanaflynn/stats/compare/v0.6.1...v0.6.2) (2020-02-18) - +## [v0.6.2] - 2020-02-18 ### Fix - -* Fix linting errcheck warnings in go benchmarks +- Fix linting errcheck warnings in go benchmarks ### Update - -* Update Makefile release directive to use correct release name +- Update Makefile release directive to use correct release name -## [v0.6.1](https://github.com/montanaflynn/stats/compare/v0.6.0...v0.6.1) (2020-02-18) - +## [v0.6.1] - 2020-02-18 ### Add - -* Add StableSample function signature to readme +- Add StableSample function signature to readme ### Fix - -* Fix linting warnings for normal distribution functions formatting and tests +- Fix linting warnings for normal distribution functions formatting and tests ### Update - -* Update documentation links and rename DOC.md to DOCUMENTATION.md -* Update README with link to pkg.go.dev reference and release section -* Update Makefile with new changelog, docs, and release directives -* Update DOC.md links to GitHub source code -* Update doc.go comment and add DOC.md package reference file -* Update changelog using git-chglog +- Update documentation links and rename DOC.md to DOCUMENTATION.md +- Update README with link to pkg.go.dev reference and release section +- Update Makefile with new changelog, docs, and release directives +- Update DOC.md links to GitHub source code +- Update doc.go comment and add DOC.md package reference file +- Update changelog using git-chglog -## [v0.6.0](https://github.com/montanaflynn/stats/compare/v0.5.0...v0.6.0) (2020-02-17) - +## [v0.6.0] - 2020-02-17 ### Add - -* Add Normal Distribution Functions ([#56](https://github.com/montanaflynn/stats/issues/56)) -* Add previous versions of Go to travis CI config -* Add check for distinct values in Mode function ([#51](https://github.com/montanaflynn/stats/issues/51)) -* Add StableSample function ([#48](https://github.com/montanaflynn/stats/issues/48)) -* Add doc.go file to show description and usage on godoc.org -* Add comments to new error and legacy error variables -* Add ExampleRound function to tests -* Add go.mod file for module support -* Add Sigmoid, SoftMax and Entropy methods and tests -* Add Entropy documentation, example and benchmarks -* Add Entropy function ([#44](https://github.com/montanaflynn/stats/issues/44)) +- Add Normal Distribution Functions ([#56](https://github.com/montanaflynn/stats/issues/56)) +- Add previous versions of Go to travis CI config +- Add check for distinct values in Mode function ([#51](https://github.com/montanaflynn/stats/issues/51)) +- Add StableSample function ([#48](https://github.com/montanaflynn/stats/issues/48)) +- Add doc.go file to show description and usage on godoc.org +- Add comments to new error and legacy error variables +- Add ExampleRound function to tests +- Add go.mod file for module support +- Add Sigmoid, SoftMax and Entropy methods and tests +- Add Entropy documentation, example and benchmarks +- Add Entropy function ([#44](https://github.com/montanaflynn/stats/issues/44)) ### Fix - -* Fix percentile when only one element ([#47](https://github.com/montanaflynn/stats/issues/47)) -* Fix AutoCorrelation name in comments and remove unneeded Sprintf +- Fix percentile when only one element ([#47](https://github.com/montanaflynn/stats/issues/47)) +- Fix AutoCorrelation name in comments and remove unneeded Sprintf ### Improve - -* Improve documentation section with command comments +- Improve documentation section with command comments ### Remove - -* Remove very old versions of Go in travis CI config -* Remove boolean comparison to get rid of gometalinter warning +- Remove very old versions of Go in travis CI config +- Remove boolean comparison to get rid of gometalinter warning ### Update - -* Update license dates -* Update Distance functions signatures to use Float64Data -* Update Sigmoid examples -* Update error names with backward compatibility +- Update license dates +- Update Distance functions signatures to use Float64Data +- Update Sigmoid examples +- Update error names with backward compatibility ### Use - -* Use relative link to examples/main.go -* Use a single var block for exported errors +- Use relative link to examples/main.go +- Use a single var block for exported errors -## [v0.5.0](https://github.com/montanaflynn/stats/compare/v0.4.0...v0.5.0) (2019-01-16) - +## [v0.5.0] - 2019-01-16 ### Add - -* Add Sigmoid and Softmax functions +- Add Sigmoid and Softmax functions ### Fix - -* Fix syntax highlighting and add CumulativeSum func +- Fix syntax highlighting and add CumulativeSum func -## [v0.4.0](https://github.com/montanaflynn/stats/compare/0.3.0...v0.4.0) (2019-01-14) - +## [v0.4.0] - 2019-01-14 ### Add - -* Add goreport badge and documentation section to README.md -* Add Examples to test files -* Add AutoCorrelation and nist tests -* Add String method to statsErr type -* Add Y coordinate error for ExponentialRegression -* Add syntax highlighting ([#43](https://github.com/montanaflynn/stats/issues/43)) -* Add CumulativeSum ([#40](https://github.com/montanaflynn/stats/issues/40)) -* Add more tests and rename distance files -* Add coverage and benchmarks to azure pipeline -* Add go tests to azure pipeline +- Add goreport badge and documentation section to README.md +- Add Examples to test files +- Add AutoCorrelation and nist tests +- Add String method to statsErr type +- Add Y coordinate error for ExponentialRegression +- Add syntax highlighting ([#43](https://github.com/montanaflynn/stats/issues/43)) +- Add CumulativeSum ([#40](https://github.com/montanaflynn/stats/issues/40)) +- Add more tests and rename distance files +- Add coverage and benchmarks to azure pipeline +- Add go tests to azure pipeline ### Change - -* Change travis tip alias to master -* Change codecov to coveralls for code coverage +- Change travis tip alias to master +- Change codecov to coveralls for code coverage ### Fix - -* Fix a few lint warnings -* Fix example error +- Fix a few lint warnings +- Fix example error ### Improve - -* Improve test coverage of distance functions +- Improve test coverage of distance functions ### Only - -* Only run travis on stable and tip versions -* Only check code coverage on tip +- Only run travis on stable and tip versions +- Only check code coverage on tip ### Remove - -* Remove azure CI pipeline -* Remove unnecessary type conversions +- Remove azure CI pipeline +- Remove unnecessary type conversions ### Return - -* Return EmptyInputErr instead of EmptyInput +- Return EmptyInputErr instead of EmptyInput ### Set - -* Set up CI with Azure Pipelines +- Set up CI with Azure Pipelines -## [0.3.0](https://github.com/montanaflynn/stats/compare/0.2.0...0.3.0) (2017-12-02) - +## [0.3.0] - 2017-12-02 ### Add - -* Add Chebyshev, Manhattan, Euclidean and Minkowski distance functions ([#35](https://github.com/montanaflynn/stats/issues/35)) -* Add function for computing chebyshev distance. ([#34](https://github.com/montanaflynn/stats/issues/34)) -* Add support for time.Duration -* Add LoadRawData to docs and examples -* Add unit test for edge case that wasn't covered -* Add unit tests for edge cases that weren't covered -* Add pearson alias delegating to correlation -* Add CovariancePopulation to Float64Data -* Add pearson product-moment correlation coefficient -* Add population covariance -* Add random slice benchmarks -* Add all applicable functions as methods to Float64Data type -* Add MIT license badge -* Add link to examples/methods.go -* Add Protips for usage and documentation sections -* Add tests for rounding up -* Add webdoc target and remove linting from test target -* Add example usage and consolidate contributing information +- Add Chebyshev, Manhattan, Euclidean and Minkowski distance functions ([#35](https://github.com/montanaflynn/stats/issues/35)) +- Add function for computing chebyshev distance. ([#34](https://github.com/montanaflynn/stats/issues/34)) +- Add support for time.Duration +- Add LoadRawData to docs and examples +- Add unit test for edge case that wasn't covered +- Add unit tests for edge cases that weren't covered +- Add pearson alias delegating to correlation +- Add CovariancePopulation to Float64Data +- Add pearson product-moment correlation coefficient +- Add population covariance +- Add random slice benchmarks +- Add all applicable functions as methods to Float64Data type +- Add MIT license badge +- Add link to examples/methods.go +- Add Protips for usage and documentation sections +- Add tests for rounding up +- Add webdoc target and remove linting from test target +- Add example usage and consolidate contributing information ### Added - -* Added MedianAbsoluteDeviation +- Added MedianAbsoluteDeviation ### Annotation - -* Annotation spelling error +- Annotation spelling error ### Auto - -* auto commit -* auto commit +- auto commit +- auto commit ### Calculate - -* Calculate correlation with sdev and covp +- Calculate correlation with sdev and covp ### Clean - -* Clean up README.md and add info for offline docs +- Clean up README.md and add info for offline docs ### Consolidated - -* Consolidated all error values. +- Consolidated all error values. ### Fix - -* Fix Percentile logic -* Fix InterQuartileRange method test -* Fix zero percent bug and add test -* Fix usage example output typos +- Fix Percentile logic +- Fix InterQuartileRange method test +- Fix zero percent bug and add test +- Fix usage example output typos ### Improve - -* Improve bounds checking in Percentile -* Improve error log messaging +- Improve bounds checking in Percentile +- Improve error log messaging ### Imput - -* Imput -> Input +- Imput -> Input ### Include - -* Include alternative way to set Float64Data in example +- Include alternative way to set Float64Data in example ### Make - -* Make various changes to README.md +- Make various changes to README.md ### Merge - -* Merge branch 'master' of github.com:montanaflynn/stats -* Merge master +- Merge branch 'master' of github.com:montanaflynn/stats +- Merge master ### Mode - -* Mode calculation fix and tests +- Mode calculation fix and tests ### Realized - -* Realized the obvious efficiency gains of ignoring the unique numbers at the beginning of the slice. Benchmark joy ensued. +- Realized the obvious efficiency gains of ignoring the unique numbers at the beginning of the slice. Benchmark joy ensued. ### Refactor - -* Refactor testing of Round() -* Refactor setting Coordinate y field using Exp in place of Pow -* Refactor Makefile and add docs target +- Refactor testing of Round() +- Refactor setting Coordinate y field using Exp in place of Pow +- Refactor Makefile and add docs target ### Remove - -* Remove deep links to types and functions +- Remove deep links to types and functions ### Rename - -* Rename file from types to data +- Rename file from types to data ### Retrieve - -* Retrieve InterQuartileRange for the Float64Data. +- Retrieve InterQuartileRange for the Float64Data. ### Split - -* Split up stats.go into separate files +- Split up stats.go into separate files ### Support - -* Support more types on LoadRawData() ([#36](https://github.com/montanaflynn/stats/issues/36)) +- Support more types on LoadRawData() ([#36](https://github.com/montanaflynn/stats/issues/36)) ### Switch - -* Switch default and check targets +- Switch default and check targets ### Update - -* Update Readme -* Update example methods and some text -* Update README and include Float64Data type method examples +- Update Readme +- Update example methods and some text +- Update README and include Float64Data type method examples ### Pull Requests - -* Merge pull request [#32](https://github.com/montanaflynn/stats/issues/32) from a-robinson/percentile -* Merge pull request [#30](https://github.com/montanaflynn/stats/issues/30) from montanaflynn/fix-test -* Merge pull request [#29](https://github.com/montanaflynn/stats/issues/29) from edupsousa/master -* Merge pull request [#27](https://github.com/montanaflynn/stats/issues/27) from andrey-yantsen/fix-percentile-out-of-bounds -* Merge pull request [#25](https://github.com/montanaflynn/stats/issues/25) from kazhuravlev/patch-1 -* Merge pull request [#22](https://github.com/montanaflynn/stats/issues/22) from JanBerktold/time-duration -* Merge pull request [#24](https://github.com/montanaflynn/stats/issues/24) from alouche/master -* Merge pull request [#21](https://github.com/montanaflynn/stats/issues/21) from brydavis/master -* Merge pull request [#19](https://github.com/montanaflynn/stats/issues/19) from ginodeis/mode-bug -* Merge pull request [#17](https://github.com/montanaflynn/stats/issues/17) from Kunde21/master -* Merge pull request [#3](https://github.com/montanaflynn/stats/issues/3) from montanaflynn/master -* Merge pull request [#2](https://github.com/montanaflynn/stats/issues/2) from montanaflynn/master -* Merge pull request [#13](https://github.com/montanaflynn/stats/issues/13) from toashd/pearson -* Merge pull request [#12](https://github.com/montanaflynn/stats/issues/12) from alixaxel/MAD -* Merge pull request [#1](https://github.com/montanaflynn/stats/issues/1) from montanaflynn/master -* Merge pull request [#11](https://github.com/montanaflynn/stats/issues/11) from Kunde21/modeMemReduce -* Merge pull request [#10](https://github.com/montanaflynn/stats/issues/10) from Kunde21/ModeRewrite +- Merge pull request [#32](https://github.com/montanaflynn/stats/issues/32) from a-robinson/percentile +- Merge pull request [#30](https://github.com/montanaflynn/stats/issues/30) from montanaflynn/fix-test +- Merge pull request [#29](https://github.com/montanaflynn/stats/issues/29) from edupsousa/master +- Merge pull request [#27](https://github.com/montanaflynn/stats/issues/27) from andrey-yantsen/fix-percentile-out-of-bounds +- Merge pull request [#25](https://github.com/montanaflynn/stats/issues/25) from kazhuravlev/patch-1 +- Merge pull request [#22](https://github.com/montanaflynn/stats/issues/22) from JanBerktold/time-duration +- Merge pull request [#24](https://github.com/montanaflynn/stats/issues/24) from alouche/master +- Merge pull request [#21](https://github.com/montanaflynn/stats/issues/21) from brydavis/master +- Merge pull request [#19](https://github.com/montanaflynn/stats/issues/19) from ginodeis/mode-bug +- Merge pull request [#17](https://github.com/montanaflynn/stats/issues/17) from Kunde21/master +- Merge pull request [#3](https://github.com/montanaflynn/stats/issues/3) from montanaflynn/master +- Merge pull request [#2](https://github.com/montanaflynn/stats/issues/2) from montanaflynn/master +- Merge pull request [#13](https://github.com/montanaflynn/stats/issues/13) from toashd/pearson +- Merge pull request [#12](https://github.com/montanaflynn/stats/issues/12) from alixaxel/MAD +- Merge pull request [#1](https://github.com/montanaflynn/stats/issues/1) from montanaflynn/master +- Merge pull request [#11](https://github.com/montanaflynn/stats/issues/11) from Kunde21/modeMemReduce +- Merge pull request [#10](https://github.com/montanaflynn/stats/issues/10) from Kunde21/ModeRewrite -## [0.2.0](https://github.com/montanaflynn/stats/compare/0.1.0...0.2.0) (2015-10-14) - +## [0.2.0] - 2015-10-14 ### Add - -* Add Makefile with gometalinter, testing, benchmarking and coverage report targets -* Add comments describing functions and structs -* Add Correlation func -* Add Covariance func -* Add tests for new function shortcuts -* Add StandardDeviation function as a shortcut to StandardDeviationPopulation -* Add Float64Data and Series types +- Add Makefile with gometalinter, testing, benchmarking and coverage report targets +- Add comments describing functions and structs +- Add Correlation func +- Add Covariance func +- Add tests for new function shortcuts +- Add StandardDeviation function as a shortcut to StandardDeviationPopulation +- Add Float64Data and Series types ### Change - -* Change Sample to return a standard []float64 type +- Change Sample to return a standard []float64 type ### Fix - -* Fix broken link to Makefile -* Fix broken link and simplify code coverage reporting command -* Fix go vet warning about printf type placeholder -* Fix failing codecov test coverage reporting -* Fix link to CHANGELOG.md +- Fix broken link to Makefile +- Fix broken link and simplify code coverage reporting command +- Fix go vet warning about printf type placeholder +- Fix failing codecov test coverage reporting +- Fix link to CHANGELOG.md ### Fixed - -* Fixed typographical error, changed accomdate to accommodate in README. +- Fixed typographical error, changed accomdate to accommodate in README. ### Include - -* Include Variance and StandardDeviation shortcuts +- Include Variance and StandardDeviation shortcuts ### Pass - -* Pass gometalinter +- Pass gometalinter ### Refactor - -* Refactor Variance function to be the same as population variance +- Refactor Variance function to be the same as population variance ### Release - -* Release version 0.2.0 +- Release version 0.2.0 ### Remove - -* Remove unneeded do packages and update cover URL -* Remove sudo from pip install +- Remove unneeded do packages and update cover URL +- Remove sudo from pip install ### Reorder - -* Reorder functions and sections +- Reorder functions and sections ### Revert - -* Revert to legacy containers to preserve go1.1 testing +- Revert to legacy containers to preserve go1.1 testing ### Switch - -* Switch from legacy to container-based CI infrastructure +- Switch from legacy to container-based CI infrastructure ### Update - -* Update contributing instructions and mention Makefile +- Update contributing instructions and mention Makefile ### Pull Requests - -* Merge pull request [#5](https://github.com/montanaflynn/stats/issues/5) from orthographic-pedant/spell_check/accommodate +- Merge pull request [#5](https://github.com/montanaflynn/stats/issues/5) from orthographic-pedant/spell_check/accommodate -## [0.1.0](https://github.com/montanaflynn/stats/compare/0.0.9...0.1.0) (2015-08-19) - +## [0.1.0] - 2015-08-19 ### Add - -* Add CONTRIBUTING.md +- Add CONTRIBUTING.md ### Rename - -* Rename functions while preserving backwards compatibility +- Rename functions while preserving backwards compatibility -## 0.0.9 (2015-08-18) - +## 0.0.9 - 2015-08-18 ### Add - -* Add HarmonicMean func -* Add GeometricMean func -* Add .gitignore to avoid commiting test coverage report -* Add Outliers stuct and QuantileOutliers func -* Add Interquartile Range, Midhinge and Trimean examples -* Add Trimean -* Add Midhinge -* Add Inter Quartile Range -* Add a unit test to check for an empty slice error -* Add Quantiles struct and Quantile func -* Add more tests and fix a typo -* Add Golang 1.5 to build tests -* Add a standard MIT license file -* Add basic benchmarking -* Add regression models -* Add codecov token -* Add codecov -* Add check for slices with a single item -* Add coverage tests -* Add back previous Go versions to Travis CI -* Add Travis CI -* Add GoDoc badge -* Add Percentile and Float64ToInt functions -* Add another rounding test for whole numbers -* Add build status badge -* Add code coverage badge -* Add test for NaN, achieving 100% code coverage -* Add round function -* Add standard deviation function -* Add sum function +- Add HarmonicMean func +- Add GeometricMean func +- Add .gitignore to avoid commiting test coverage report +- Add Outliers stuct and QuantileOutliers func +- Add Interquartile Range, Midhinge and Trimean examples +- Add Trimean +- Add Midhinge +- Add Inter Quartile Range +- Add a unit test to check for an empty slice error +- Add Quantiles struct and Quantile func +- Add more tests and fix a typo +- Add Golang 1.5 to build tests +- Add a standard MIT license file +- Add basic benchmarking +- Add regression models +- Add codecov token +- Add codecov +- Add check for slices with a single item +- Add coverage tests +- Add back previous Go versions to Travis CI +- Add Travis CI +- Add GoDoc badge +- Add Percentile and Float64ToInt functions +- Add another rounding test for whole numbers +- Add build status badge +- Add code coverage badge +- Add test for NaN, achieving 100% code coverage +- Add round function +- Add standard deviation function +- Add sum function ### Add - -* add tests for sample -* add sample +- add tests for sample +- add sample ### Added - -* Added sample and population variance and deviation functions -* Added README +- Added sample and population variance and deviation functions +- Added README ### Adjust - -* Adjust API ordering +- Adjust API ordering ### Avoid - -* Avoid unintended consequence of using sort +- Avoid unintended consequence of using sort ### Better - -* Better performing min/max -* Better description +- Better performing min/max +- Better description ### Change - -* Change package path to potentially fix a bug in earlier versions of Go +- Change package path to potentially fix a bug in earlier versions of Go ### Clean - -* Clean up README and add some more information -* Clean up test error +- Clean up README and add some more information +- Clean up test error ### Consistent - -* Consistent empty slice error messages -* Consistent var naming -* Consistent func declaration +- Consistent empty slice error messages +- Consistent var naming +- Consistent func declaration ### Convert - -* Convert ints to floats +- Convert ints to floats ### Duplicate - -* Duplicate packages for all versions +- Duplicate packages for all versions ### Export - -* Export Coordinate struct fields +- Export Coordinate struct fields ### First - -* First commit +- First commit ### Fix - -* Fix copy pasta mistake testing the wrong function -* Fix error message -* Fix usage output and edit API doc section -* Fix testing edgecase where map was in wrong order -* Fix usage example -* Fix usage examples +- Fix copy pasta mistake testing the wrong function +- Fix error message +- Fix usage output and edit API doc section +- Fix testing edgecase where map was in wrong order +- Fix usage example +- Fix usage examples ### Include - -* Include the Nearest Rank method of calculating percentiles +- Include the Nearest Rank method of calculating percentiles ### More - -* More commenting +- More commenting ### Move - -* Move GoDoc link to top +- Move GoDoc link to top ### Redirect - -* Redirect kills newer versions of Go +- Redirect kills newer versions of Go ### Refactor - -* Refactor code and error checking +- Refactor code and error checking ### Remove - -* Remove unnecassary typecasting in sum func -* Remove cover since it doesn't work for later versions of go -* Remove golint and gocoveralls +- Remove unnecassary typecasting in sum func +- Remove cover since it doesn't work for later versions of go +- Remove golint and gocoveralls ### Rename - -* Rename StandardDev to StdDev -* Rename StandardDev to StdDev +- Rename StandardDev to StdDev +- Rename StandardDev to StdDev ### Return - -* Return errors for all functions +- Return errors for all functions ### Run - -* Run go fmt to clean up formatting +- Run go fmt to clean up formatting ### Simplify - -* Simplify min/max function +- Simplify min/max function ### Start - -* Start with minimal tests +- Start with minimal tests ### Switch - -* Switch wercker to travis and update todos +- Switch wercker to travis and update todos ### Table - -* table testing style +- table testing style ### Update - -* Update README and move the example main.go into it's own file -* Update TODO list -* Update README -* Update usage examples and todos +- Update README and move the example main.go into it's own file +- Update TODO list +- Update README +- Update usage examples and todos ### Use - -* Use codecov the recommended way -* Use correct string formatting types +- Use codecov the recommended way +- Use correct string formatting types ### Pull Requests - -* Merge pull request [#4](https://github.com/montanaflynn/stats/issues/4) from saromanov/sample - +- Merge pull request [#4](https://github.com/montanaflynn/stats/issues/4) from saromanov/sample + + +[Unreleased]: https://github.com/montanaflynn/stats/compare/v0.7.1...HEAD +[v0.7.1]: https://github.com/montanaflynn/stats/compare/v0.7.0...v0.7.1 +[v0.7.0]: https://github.com/montanaflynn/stats/compare/v0.6.6...v0.7.0 +[v0.6.6]: https://github.com/montanaflynn/stats/compare/v0.6.5...v0.6.6 +[v0.6.5]: https://github.com/montanaflynn/stats/compare/v0.6.4...v0.6.5 +[v0.6.4]: https://github.com/montanaflynn/stats/compare/v0.6.3...v0.6.4 +[v0.6.3]: https://github.com/montanaflynn/stats/compare/v0.6.2...v0.6.3 +[v0.6.2]: https://github.com/montanaflynn/stats/compare/v0.6.1...v0.6.2 +[v0.6.1]: https://github.com/montanaflynn/stats/compare/v0.6.0...v0.6.1 +[v0.6.0]: https://github.com/montanaflynn/stats/compare/v0.5.0...v0.6.0 +[v0.5.0]: https://github.com/montanaflynn/stats/compare/v0.4.0...v0.5.0 +[v0.4.0]: https://github.com/montanaflynn/stats/compare/0.3.0...v0.4.0 +[0.3.0]: https://github.com/montanaflynn/stats/compare/0.2.0...0.3.0 +[0.2.0]: https://github.com/montanaflynn/stats/compare/0.1.0...0.2.0 +[0.1.0]: https://github.com/montanaflynn/stats/compare/0.0.9...0.1.0 diff --git a/vendor/github.com/montanaflynn/stats/DOCUMENTATION.md b/vendor/github.com/montanaflynn/stats/DOCUMENTATION.md index b56788946..978df2ffc 100644 --- a/vendor/github.com/montanaflynn/stats/DOCUMENTATION.md +++ b/vendor/github.com/montanaflynn/stats/DOCUMENTATION.md @@ -44,6 +44,7 @@ MIT License Copyright (c) 2014-2020 Montana Flynn (Package files -[correlation.go](/src/github.com/montanaflynn/stats/correlation.go) [cumulative_sum.go](/src/github.com/montanaflynn/stats/cumulative_sum.go) [data.go](/src/github.com/montanaflynn/stats/data.go) [deviation.go](/src/github.com/montanaflynn/stats/deviation.go) [distances.go](/src/github.com/montanaflynn/stats/distances.go) [doc.go](/src/github.com/montanaflynn/stats/doc.go) [entropy.go](/src/github.com/montanaflynn/stats/entropy.go) [errors.go](/src/github.com/montanaflynn/stats/errors.go) [legacy.go](/src/github.com/montanaflynn/stats/legacy.go) [load.go](/src/github.com/montanaflynn/stats/load.go) [max.go](/src/github.com/montanaflynn/stats/max.go) [mean.go](/src/github.com/montanaflynn/stats/mean.go) [median.go](/src/github.com/montanaflynn/stats/median.go) [min.go](/src/github.com/montanaflynn/stats/min.go) [mode.go](/src/github.com/montanaflynn/stats/mode.go) [norm.go](/src/github.com/montanaflynn/stats/norm.go) [outlier.go](/src/github.com/montanaflynn/stats/outlier.go) [percentile.go](/src/github.com/montanaflynn/stats/percentile.go) [quartile.go](/src/github.com/montanaflynn/stats/quartile.go) [ranksum.go](/src/github.com/montanaflynn/stats/ranksum.go) [regression.go](/src/github.com/montanaflynn/stats/regression.go) [round.go](/src/github.com/montanaflynn/stats/round.go) [sample.go](/src/github.com/montanaflynn/stats/sample.go) [sigmoid.go](/src/github.com/montanaflynn/stats/sigmoid.go) [softmax.go](/src/github.com/montanaflynn/stats/softmax.go) [sum.go](/src/github.com/montanaflynn/stats/sum.go) [util.go](/src/github.com/montanaflynn/stats/util.go) [variance.go](/src/github.com/montanaflynn/stats/variance.go) +[correlation.go](/src/github.com/montanaflynn/stats/correlation.go) [cumulative_sum.go](/src/github.com/montanaflynn/stats/cumulative_sum.go) [data.go](/src/github.com/montanaflynn/stats/data.go) [deviation.go](/src/github.com/montanaflynn/stats/deviation.go) [distances.go](/src/github.com/montanaflynn/stats/distances.go) [doc.go](/src/github.com/montanaflynn/stats/doc.go) [entropy.go](/src/github.com/montanaflynn/stats/entropy.go) [errors.go](/src/github.com/montanaflynn/stats/errors.go) [geometric_distribution.go](/src/github.com/montanaflynn/stats/geometric_distribution.go) [legacy.go](/src/github.com/montanaflynn/stats/legacy.go) [load.go](/src/github.com/montanaflynn/stats/load.go) [max.go](/src/github.com/montanaflynn/stats/max.go) [mean.go](/src/github.com/montanaflynn/stats/mean.go) [median.go](/src/github.com/montanaflynn/stats/median.go) [min.go](/src/github.com/montanaflynn/stats/min.go) [mode.go](/src/github.com/montanaflynn/stats/mode.go) [norm.go](/src/github.com/montanaflynn/stats/norm.go) [outlier.go](/src/github.com/montanaflynn/stats/outlier.go) [percentile.go](/src/github.com/montanaflynn/stats/percentile.go) [quartile.go](/src/github.com/montanaflynn/stats/quartile.go) [ranksum.go](/src/github.com/montanaflynn/stats/ranksum.go) [regression.go](/src/github.com/montanaflynn/stats/regression.go) [round.go](/src/github.com/montanaflynn/stats/round.go) [sample.go](/src/github.com/montanaflynn/stats/sample.go) [sigmoid.go](/src/github.com/montanaflynn/stats/sigmoid.go) [softmax.go](/src/github.com/montanaflynn/stats/softmax.go) [sum.go](/src/github.com/montanaflynn/stats/sum.go) [util.go](/src/github.com/montanaflynn/stats/util.go) [variance.go](/src/github.com/montanaflynn/stats/variance.go) @@ -277,6 +283,15 @@ EuclideanDistance computes the Euclidean distance between two data sets +## func [ExpGeom](/geometric_distribution.go?s=652:700#L27) +``` go +func ExpGeom(p float64) (exp float64, err error) +``` +ProbGeom generates the expectation or average number of trials +for a geometric random variable with parameter p + + + ## func [GeometricMean](/mean.go?s=319:373#L18) ``` go func GeometricMean(input Float64Data) (float64, error) @@ -602,6 +617,16 @@ PopulationVariance finds the amount of variance within a population +## func [ProbGeom](/geometric_distribution.go?s=258:322#L10) +``` go +func ProbGeom(a int, b int, p float64) (prob float64, err error) +``` +ProbGeom generates the probability for a geometric random variable +with parameter p to achieve success in the interval of [a, b] trials +See https://en.wikipedia.org/wiki/Geometric_distribution for more information + + + ## func [Round](/round.go?s=88:154#L6) ``` go func Round(input float64, places int) (rounded float64, err error) @@ -671,7 +696,7 @@ StandardDeviationPopulation finds the amount of variation from the population -## func [StandardDeviationSample](/deviation.go?s=1254:1327#L46) +## func [StandardDeviationSample](/deviation.go?s=1250:1323#L46) ``` go func StandardDeviationSample(input Float64Data) (sdev float64, err error) ``` @@ -711,6 +736,15 @@ Trimean finds the average of the median and the midhinge +## func [VarGeom](/geometric_distribution.go?s=885:933#L37) +``` go +func VarGeom(p float64) (exp float64, err error) +``` +ProbGeom generates the variance for number for a +geometric random variable with parameter p + + + ## func [VarP](/legacy.go?s=59:113#L4) ``` go func VarP(input Float64Data) (sdev float64, err error) @@ -787,7 +821,7 @@ Float64Data is a named type for []float64 with helper methods -### func [LoadRawData](/load.go?s=119:168#L9) +### func [LoadRawData](/load.go?s=145:194#L12) ``` go func LoadRawData(raw interface{}) (f Float64Data) ``` diff --git a/vendor/github.com/montanaflynn/stats/LICENSE b/vendor/github.com/montanaflynn/stats/LICENSE index 159096129..3162cb1a5 100644 --- a/vendor/github.com/montanaflynn/stats/LICENSE +++ b/vendor/github.com/montanaflynn/stats/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2014-2020 Montana Flynn (https://montanaflynn.com) +Copyright (c) 2014-2023 Montana Flynn (https://montanaflynn.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/montanaflynn/stats/README.md b/vendor/github.com/montanaflynn/stats/README.md index 4495c8ddf..9c1889073 100644 --- a/vendor/github.com/montanaflynn/stats/README.md +++ b/vendor/github.com/montanaflynn/stats/README.md @@ -1,6 +1,6 @@ # Stats - Golang Statistics Package -[![][travis-svg]][travis-url] [![][coveralls-svg]][coveralls-url] [![][goreport-svg]][goreport-url] [![][godoc-svg]][godoc-url] [![][pkggodev-svg]][pkggodev-url] [![][license-svg]][license-url] +[![][action-svg]][action-url] [![][codecov-svg]][codecov-url] [![][goreport-svg]][goreport-url] [![][godoc-svg]][godoc-url] [![][pkggodev-svg]][pkggodev-url] [![][license-svg]][license-url] A well tested and comprehensive Golang statistics library / package / module with no dependencies. @@ -76,6 +76,8 @@ func Correlation(data1, data2 Float64Data) (float64, error) {} func Covariance(data1, data2 Float64Data) (float64, error) {} func CovariancePopulation(data1, data2 Float64Data) (float64, error) {} func CumulativeSum(input Float64Data) ([]float64, error) {} +func Describe(input Float64Data, allowNaN bool, percentiles *[]float64) (*Description, error) {} +func DescribePercentileFunc(input Float64Data, allowNaN bool, percentiles *[]float64, percentileFunc func(Float64Data, float64) (float64, error)) (*Description, error) {} func Entropy(input Float64Data) (float64, error) {} func EuclideanDistance(dataPointX, dataPointY Float64Data) (distance float64, err error) {} func GeometricMean(input Float64Data) (float64, error) {} @@ -129,6 +131,9 @@ func Trimean(input Float64Data) (float64, error) {} func VarP(input Float64Data) (sdev float64, err error) {} func VarS(input Float64Data) (sdev float64, err error) {} func Variance(input Float64Data) (sdev float64, err error) {} +func ProbGeom(a int, b int, p float64) (prob float64, err error) {} +func ExpGeom(p float64) (exp float64, err error) {} +func VarGeom(p float64) (exp float64, err error) {} type Coordinate struct { X, Y float64 @@ -177,13 +182,15 @@ To make things as seamless as possible please also consider the following steps: ## Releasing -To release a new version we should update the [CHANGELOG.md](/CHANGELOG.md) and [DOCUMENTATION.md](/DOCUMENTATION.md). +This is not required by contributors and mostly here as a reminder to myself as the maintainer of this repo. To release a new version we should update the [CHANGELOG.md](/CHANGELOG.md) and [DOCUMENTATION.md](/DOCUMENTATION.md). -First install the tools used to generate the markdown files: +First install the tools used to generate the markdown files and release: ``` -go get github.com/davecheney/godoc2md -go get github.com/golangci/golangci-lint/cmd/golangci-lint +go install github.com/davecheney/godoc2md@latest +go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest +brew tap git-chglog/git-chglog +brew install gnu-sed hub git-chglog ``` Then you can run these `make` directives: @@ -199,9 +206,11 @@ Then we can create a [CHANGELOG.md](/CHANGELOG.md) a new git tag and a github re make release TAG=v0.x.x ``` +To authenticate `hub` for the release you will need to create a personal access token and use it as the password when it's requested. + ## MIT License -Copyright (c) 2014-2021 Montana Flynn (https://montanaflynn.com) +Copyright (c) 2014-2023 Montana Flynn (https://montanaflynn.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: @@ -209,11 +218,11 @@ The above copyright notice and this permission notice shall be included in all c THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORpublicS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -[travis-url]: https://travis-ci.org/montanaflynn/stats -[travis-svg]: https://img.shields.io/travis/montanaflynn/stats.svg +[action-url]: https://github.com/montanaflynn/stats/actions +[action-svg]: https://img.shields.io/github/actions/workflow/status/montanaflynn/stats/go.yml -[coveralls-url]: https://coveralls.io/r/montanaflynn/stats?branch=master -[coveralls-svg]: https://img.shields.io/coveralls/montanaflynn/stats.svg +[codecov-url]: https://app.codecov.io/gh/montanaflynn/stats +[codecov-svg]: https://img.shields.io/codecov/c/github/montanaflynn/stats?token=wnw8dActnH [goreport-url]: https://goreportcard.com/report/github.com/montanaflynn/stats [goreport-svg]: https://goreportcard.com/badge/github.com/montanaflynn/stats diff --git a/vendor/github.com/montanaflynn/stats/describe.go b/vendor/github.com/montanaflynn/stats/describe.go new file mode 100644 index 000000000..86b72425c --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/describe.go @@ -0,0 +1,81 @@ +package stats + +import "fmt" + +// Holds information about the dataset provided to Describe +type Description struct { + Count int + Mean float64 + Std float64 + Max float64 + Min float64 + DescriptionPercentiles []descriptionPercentile + AllowedNaN bool +} + +// Specifies percentiles to be computed +type descriptionPercentile struct { + Percentile float64 + Value float64 +} + +// Describe generates descriptive statistics about a provided dataset, similar to python's pandas.describe() +func Describe(input Float64Data, allowNaN bool, percentiles *[]float64) (*Description, error) { + return DescribePercentileFunc(input, allowNaN, percentiles, Percentile) +} + +// Describe generates descriptive statistics about a provided dataset, similar to python's pandas.describe() +// Takes in a function to use for percentile calculation +func DescribePercentileFunc(input Float64Data, allowNaN bool, percentiles *[]float64, percentileFunc func(Float64Data, float64) (float64, error)) (*Description, error) { + var description Description + description.AllowedNaN = allowNaN + description.Count = input.Len() + + if description.Count == 0 && !allowNaN { + return &description, ErrEmptyInput + } + + // Disregard error, since it cannot be thrown if Count is > 0 and allowNaN is false, else NaN is accepted + description.Std, _ = StandardDeviation(input) + description.Max, _ = Max(input) + description.Min, _ = Min(input) + description.Mean, _ = Mean(input) + + if percentiles != nil { + for _, percentile := range *percentiles { + if value, err := percentileFunc(input, percentile); err == nil || allowNaN { + description.DescriptionPercentiles = append(description.DescriptionPercentiles, descriptionPercentile{Percentile: percentile, Value: value}) + } + } + } + + return &description, nil +} + +/* +Represents the Description instance in a string format with specified number of decimals + + count 3 + mean 2.00 + std 0.82 + max 3.00 + min 1.00 + 25.00% NaN + 50.00% 1.50 + 75.00% 2.50 + NaN OK true +*/ +func (d *Description) String(decimals int) string { + var str string + + str += fmt.Sprintf("count\t%d\n", d.Count) + str += fmt.Sprintf("mean\t%.*f\n", decimals, d.Mean) + str += fmt.Sprintf("std\t%.*f\n", decimals, d.Std) + str += fmt.Sprintf("max\t%.*f\n", decimals, d.Max) + str += fmt.Sprintf("min\t%.*f\n", decimals, d.Min) + for _, percentile := range d.DescriptionPercentiles { + str += fmt.Sprintf("%.2f%%\t%.*f\n", percentile.Percentile, decimals, percentile.Value) + } + str += fmt.Sprintf("NaN OK\t%t", d.AllowedNaN) + return str +} diff --git a/vendor/github.com/montanaflynn/stats/distances.go b/vendor/github.com/montanaflynn/stats/distances.go index c2b7d8f8e..8a6330e38 100644 --- a/vendor/github.com/montanaflynn/stats/distances.go +++ b/vendor/github.com/montanaflynn/stats/distances.go @@ -62,16 +62,19 @@ func ManhattanDistance(dataPointX, dataPointY Float64Data) (distance float64, er // MinkowskiDistance computes the Minkowski distance between two data sets // // Arguments: -// dataPointX: First set of data points -// dataPointY: Second set of data points. Length of both data -// sets must be equal. -// lambda: aka p or city blocks; With lambda = 1 -// returned distance is manhattan distance and -// lambda = 2; it is euclidean distance. Lambda -// reaching to infinite - distance would be chebysev -// distance. +// +// dataPointX: First set of data points +// dataPointY: Second set of data points. Length of both data +// sets must be equal. +// lambda: aka p or city blocks; With lambda = 1 +// returned distance is manhattan distance and +// lambda = 2; it is euclidean distance. Lambda +// reaching to infinite - distance would be chebysev +// distance. +// // Return: -// Distance or error +// +// Distance or error func MinkowskiDistance(dataPointX, dataPointY Float64Data, lambda float64) (distance float64, err error) { err = validateData(dataPointX, dataPointY) if err != nil { diff --git a/vendor/github.com/montanaflynn/stats/geometric_distribution.go b/vendor/github.com/montanaflynn/stats/geometric_distribution.go new file mode 100644 index 000000000..db785dda2 --- /dev/null +++ b/vendor/github.com/montanaflynn/stats/geometric_distribution.go @@ -0,0 +1,42 @@ +package stats + +import ( + "math" +) + +// ProbGeom generates the probability for a geometric random variable +// with parameter p to achieve success in the interval of [a, b] trials +// See https://en.wikipedia.org/wiki/Geometric_distribution for more information +func ProbGeom(a int, b int, p float64) (prob float64, err error) { + if (a > b) || (a < 1) { + return math.NaN(), ErrBounds + } + + prob = 0 + q := 1 - p // probability of failure + + for k := a + 1; k <= b; k++ { + prob = prob + p*math.Pow(q, float64(k-1)) + } + + return prob, nil +} + +// ProbGeom generates the expectation or average number of trials +// for a geometric random variable with parameter p +func ExpGeom(p float64) (exp float64, err error) { + if (p > 1) || (p < 0) { + return math.NaN(), ErrNegative + } + + return 1 / p, nil +} + +// ProbGeom generates the variance for number for a +// geometric random variable with parameter p +func VarGeom(p float64) (exp float64, err error) { + if (p > 1) || (p < 0) { + return math.NaN(), ErrNegative + } + return (1 - p) / math.Pow(p, 2), nil +} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go index 6ca8d9ad6..652aa48b8 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go @@ -15,16 +15,15 @@ import ( // ArrayCodec is the Codec used for bsoncore.Array values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// ArrayCodec registered. +// Deprecated: ArrayCodec will not be directly accessible in Go Driver 2.0. type ArrayCodec struct{} var defaultArrayCodec = NewArrayCodec() // NewArrayCodec returns an ArrayCodec. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// ArrayCodec registered. +// Deprecated: NewArrayCodec will not be available in Go Driver 2.0. See +// [ArrayCodec] for more details. func NewArrayCodec() *ArrayCodec { return &ArrayCodec{} } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go index dde3e7681..0134b5a94 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go @@ -17,13 +17,28 @@ import ( // ByteSliceCodec is the Codec used for []byte values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// ByteSliceCodec registered. +// Deprecated: ByteSliceCodec will not be directly configurable in Go Driver +// 2.0. To configure the byte slice encode and decode behavior, use the +// configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the byte slice +// encode and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to encode nil byte slices as empty +// BSON binary values, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilByteSliceAsEmpty: true, +// }) +// +// See the deprecation notice for each field in ByteSliceCodec for the +// corresponding settings. type ByteSliceCodec struct { // EncodeNilAsEmpty causes EncodeValue to marshal nil Go byte slices as empty BSON binary values // instead of BSON null. // - // Deprecated: Use bson.Encoder.NilByteSliceAsEmpty instead. + // Deprecated: Use bson.Encoder.NilByteSliceAsEmpty or options.BSONOptions.NilByteSliceAsEmpty + // instead. EncodeNilAsEmpty bool } @@ -38,8 +53,8 @@ var ( // NewByteSliceCodec returns a ByteSliceCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// ByteSliceCodec registered. +// Deprecated: NewByteSliceCodec will not be available in Go Driver 2.0. See +// [ByteSliceCodec] for more details. func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec { byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...) codec := ByteSliceCodec{} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go index 2ce119731..fc4a7b1db 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go @@ -41,7 +41,7 @@ func newDefaultStructCodec() *StructCodec { if err != nil { // This function is called from the codec registration path, so errors can't be propagated. If there's an error // constructing the StructCodec, we panic to avoid losing it. - panic(fmt.Errorf("error creating default StructCodec: %v", err)) + panic(fmt.Errorf("error creating default StructCodec: %w", err)) } return codec } @@ -178,7 +178,7 @@ func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueRe for { key, elemVr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } else if err != nil { return err @@ -330,7 +330,7 @@ func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReade case reflect.Int64: return reflect.ValueOf(i64), nil case reflect.Int: - if int64(int(i64)) != i64 { // Can we fit this inside of an int + if i64 > math.MaxInt { // Can we fit this inside of an int return emptyValue, fmt.Errorf("%d overflows int", i64) } @@ -434,7 +434,7 @@ func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.Valu return fmt.Errorf("%d overflows uint64", i64) } case reflect.Uint: - if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint + if i64 < 0 || uint64(i64) > uint64(math.MaxUint) { // Can we fit this inside of an uint return fmt.Errorf("%d overflows uint", i64) } default: @@ -1379,7 +1379,7 @@ func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.Value keyType := val.Type().Key() for { key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { @@ -1675,7 +1675,7 @@ func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueR idx := 0 for { vr, err := ar.ReadValue() - if err == bsonrw.ErrEOA { + if errors.Is(err, bsonrw.ErrEOA) { break } if err != nil { @@ -1787,7 +1787,7 @@ func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr b elems := make([]reflect.Value, 0) for { key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go index 4ab14a668..4751ae995 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go @@ -343,7 +343,7 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum } currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -352,7 +352,7 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err @@ -418,7 +418,7 @@ func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.Val for idx := 0; idx < val.Len(); idx++ { currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -427,7 +427,7 @@ func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.Val return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err @@ -487,7 +487,7 @@ func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.Val for idx := 0; idx < val.Len(); idx++ { currEncoder, currVal, lookupErr := dve.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -496,7 +496,7 @@ func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.Val return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go index 94f7dcf1e..098368f07 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go @@ -17,13 +17,27 @@ import ( // EmptyInterfaceCodec is the Codec used for interface{} values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// EmptyInterfaceCodec registered. +// Deprecated: EmptyInterfaceCodec will not be directly configurable in Go +// Driver 2.0. To configure the empty interface encode and decode behavior, use +// the configuration methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the empty interface +// encode and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to unmarshal BSON binary field +// values as a Go byte slice, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// BinaryAsSlice: true, +// }) +// +// See the deprecation notice for each field in EmptyInterfaceCodec for the +// corresponding settings. type EmptyInterfaceCodec struct { // DecodeBinaryAsSlice causes DecodeValue to unmarshal BSON binary field values that are the // "Generic" or "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary. // - // Deprecated: Use bson.Decoder.BinaryAsSlice instead. + // Deprecated: Use bson.Decoder.BinaryAsSlice or options.BSONOptions.BinaryAsSlice instead. DecodeBinaryAsSlice bool } @@ -38,8 +52,8 @@ var ( // NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// EmptyInterfaceCodec registered. +// Deprecated: NewEmptyInterfaceCodec will not be available in Go Driver 2.0. See +// [EmptyInterfaceCodec] for more details. func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec { interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go index 325c1738a..d7e00ffa8 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go @@ -8,6 +8,7 @@ package bsoncodec import ( "encoding" + "errors" "fmt" "reflect" "strconv" @@ -21,25 +22,40 @@ var defaultMapCodec = NewMapCodec() // MapCodec is the Codec used for map values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// MapCodec registered. +// Deprecated: MapCodec will not be directly configurable in Go Driver 2.0. To +// configure the map encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the map encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal nil Go maps as empty BSON +// documents, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilMapAsEmpty: true, +// }) +// +// See the deprecation notice for each field in MapCodec for the corresponding +// settings. type MapCodec struct { // DecodeZerosMap causes DecodeValue to delete any existing values from Go maps in the destination // value passed to Decode before unmarshaling BSON documents into them. // - // Deprecated: Use bson.Decoder.ZeroMaps instead. + // Deprecated: Use bson.Decoder.ZeroMaps or options.BSONOptions.ZeroMaps instead. DecodeZerosMap bool // EncodeNilAsEmpty causes EncodeValue to marshal nil Go maps as empty BSON documents instead of // BSON null. // - // Deprecated: Use bson.Encoder.NilMapAsEmpty instead. + // Deprecated: Use bson.Encoder.NilMapAsEmpty or options.BSONOptions.NilMapAsEmpty instead. EncodeNilAsEmpty bool // EncodeKeysWithStringer causes the Encoder to convert Go map keys to BSON document field name // strings using fmt.Sprintf() instead of the default string conversion logic. // - // Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt instead. + // Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt or + // options.BSONOptions.StringifyMapKeysWithFmt instead. EncodeKeysWithStringer bool } @@ -61,8 +77,8 @@ type KeyUnmarshaler interface { // NewMapCodec returns a MapCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// MapCodec registered. +// Deprecated: NewMapCodec will not be available in Go Driver 2.0. See +// [MapCodec] for more details. func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { mapOpt := bsonoptions.MergeMapCodecOptions(opts...) @@ -128,7 +144,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v } currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.MapIndex(key)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -137,7 +153,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err @@ -200,7 +216,7 @@ func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val ref for { key, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { @@ -313,7 +329,7 @@ func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, if mc.EncodeKeysWithStringer { parsed, err := strconv.ParseFloat(key, 64) if err != nil { - return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %v", keyType.Kind(), err) + return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %w", keyType.Kind(), err) } keyVal = reflect.ValueOf(parsed) break diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go index e5923230b..ddfa4a33e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go @@ -18,8 +18,16 @@ var _ ValueDecoder = &PointerCodec{} // PointerCodec is the Codec used for pointers. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// PointerCodec registered. +// Deprecated: PointerCodec will not be directly accessible in Go Driver 2.0. To +// override the default pointer encode and decode behavior, create a new registry +// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new +// encoder and decoder for pointers. +// +// For example, +// +// reg := bson.NewRegistry() +// reg.RegisterKindEncoder(reflect.Ptr, myPointerEncoder) +// reg.RegisterKindDecoder(reflect.Ptr, myPointerDecoder) type PointerCodec struct { ecache typeEncoderCache dcache typeDecoderCache @@ -27,8 +35,8 @@ type PointerCodec struct { // NewPointerCodec returns a PointerCodec that has been initialized. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// PointerCodec registered. +// Deprecated: NewPointerCodec will not be available in Go Driver 2.0. See +// [PointerCodec] for more details. func NewPointerCodec() *PointerCodec { return &PointerCodec{} } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go index f309ee2b3..196c491bb 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -388,6 +388,9 @@ func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) { // If no encoder is found, an error of type ErrNoEncoder is returned. LookupEncoder is safe for // concurrent use by multiple goroutines after all codecs and encoders are registered. func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) { + if valueType == nil { + return nil, ErrNoEncoder{Type: valueType} + } enc, found := r.lookupTypeEncoder(valueType) if found { if enc == nil { @@ -400,15 +403,10 @@ func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) { if found { return r.typeEncoders.LoadOrStore(valueType, enc), nil } - if valueType == nil { - r.storeTypeEncoder(valueType, nil) - return nil, ErrNoEncoder{Type: valueType} - } if v, ok := r.kindEncoders.Load(valueType.Kind()); ok { return r.storeTypeEncoder(valueType, v), nil } - r.storeTypeEncoder(valueType, nil) return nil, ErrNoEncoder{Type: valueType} } @@ -474,7 +472,6 @@ func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) { if v, ok := r.kindDecoders.Load(valueType.Kind()); ok { return r.storeTypeDecoder(valueType, v), nil } - r.storeTypeDecoder(valueType, nil) return nil, ErrNoDecoder{Type: valueType} } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go index a43daf005..14c9fd256 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go @@ -7,6 +7,7 @@ package bsoncodec import ( + "errors" "fmt" "reflect" @@ -20,8 +21,22 @@ var defaultSliceCodec = NewSliceCodec() // SliceCodec is the Codec used for slice values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// SliceCodec registered. +// Deprecated: SliceCodec will not be directly configurable in Go Driver 2.0. To +// configure the slice encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the slice encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal nil Go slices as empty +// BSON arrays, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// NilSliceAsEmpty: true, +// }) +// +// See the deprecation notice for each field in SliceCodec for the corresponding +// settings. type SliceCodec struct { // EncodeNilAsEmpty causes EncodeValue to marshal nil Go slices as empty BSON arrays instead of // BSON null. @@ -32,8 +47,8 @@ type SliceCodec struct { // NewSliceCodec returns a MapCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// SliceCodec registered. +// Deprecated: NewSliceCodec will not be available in Go Driver 2.0. See +// [SliceCodec] for more details. func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec { sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...) @@ -93,7 +108,7 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re for idx := 0; idx < val.Len(); idx++ { currEncoder, currVal, lookupErr := defaultValueEncoders.lookupElementEncoder(ec, encoder, val.Index(idx)) - if lookupErr != nil && lookupErr != errInvalidValue { + if lookupErr != nil && !errors.Is(lookupErr, errInvalidValue) { return lookupErr } @@ -102,7 +117,7 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re return err } - if lookupErr == errInvalidValue { + if errors.Is(lookupErr, errInvalidValue) { err = vw.WriteNull() if err != nil { return err diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go index ff931b725..a8f885a85 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go @@ -17,8 +17,16 @@ import ( // StringCodec is the Codec used for string values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// StringCodec registered. +// Deprecated: StringCodec will not be directly accessible in Go Driver 2.0. To +// override the default string encode and decode behavior, create a new registry +// with [go.mongodb.org/mongo-driver/bson.NewRegistry] and register a new +// encoder and decoder for strings. +// +// For example, +// +// reg := bson.NewRegistry() +// reg.RegisterKindEncoder(reflect.String, myStringEncoder) +// reg.RegisterKindDecoder(reflect.String, myStringDecoder) type StringCodec struct { // DecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. // If false, a string made from the raw object ID bytes will be used. Defaults to true. @@ -38,8 +46,8 @@ var ( // NewStringCodec returns a StringCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// StringCodec registered. +// Deprecated: NewStringCodec will not be available in Go Driver 2.0. See +// [StringCodec] for more details. func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec { stringOpt := bsonoptions.MergeStringCodecOptions(opts...) return &StringCodec{*stringOpt.DecodeObjectIDAsHex} diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go index 4cde0a4d6..f8d9690c1 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go @@ -60,8 +60,22 @@ type Zeroer interface { // StructCodec is the Codec used for struct values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// StructCodec registered. +// Deprecated: StructCodec will not be directly configurable in Go Driver 2.0. +// To configure the struct encode and decode behavior, use the configuration +// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the struct encode +// and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to omit zero-value structs when +// using the "omitempty" struct tag, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// OmitZeroStruct: true, +// }) +// +// See the deprecation notice for each field in StructCodec for the corresponding +// settings. type StructCodec struct { cache sync.Map // map[reflect.Type]*structDescription parser StructTagParser @@ -69,7 +83,7 @@ type StructCodec struct { // DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the // destination value passed to Decode before unmarshaling BSON documents into them. // - // Deprecated: Use bson.Decoder.ZeroStructs instead. + // Deprecated: Use bson.Decoder.ZeroStructs or options.BSONOptions.ZeroStructs instead. DecodeZeroStruct bool // DecodeDeepZeroInline causes DecodeValue to delete any existing values from Go structs in the @@ -82,7 +96,7 @@ type StructCodec struct { // MyStruct{}) as empty and omit it from the marshaled BSON when the "omitempty" struct tag // option is set. // - // Deprecated: Use bson.Encoder.OmitZeroStruct instead. + // Deprecated: Use bson.Encoder.OmitZeroStruct or options.BSONOptions.OmitZeroStruct instead. EncodeOmitDefaultStruct bool // AllowUnexportedFields allows encoding and decoding values from un-exported struct fields. @@ -95,7 +109,8 @@ type StructCodec struct { // a duplicate field in the marshaled BSON when the "inline" struct tag option is set. The // default value is true. // - // Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates instead. + // Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates or + // options.BSONOptions.ErrorOnInlineDuplicates instead. OverwriteDuplicatedInlinedFields bool } @@ -104,8 +119,8 @@ var _ ValueDecoder = &StructCodec{} // NewStructCodec returns a StructCodec that uses p for struct tag parsing. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// StructCodec registered. +// Deprecated: NewStructCodec will not be available in Go Driver 2.0. See +// [StructCodec] for more details. func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) { if p == nil { return nil, errors.New("a StructTagParser must be provided to NewStructCodec") @@ -164,11 +179,11 @@ func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(ec, desc.encoder, rv) - if err != nil && err != errInvalidValue { + if err != nil && !errors.Is(err, errInvalidValue) { return err } - if err == errInvalidValue { + if errors.Is(err, errInvalidValue) { if desc.omitEmpty { continue } @@ -189,17 +204,17 @@ func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val encoder := desc.encoder - var zero bool + var empty bool if cz, ok := encoder.(CodecZeroer); ok { - zero = cz.IsTypeZero(rv.Interface()) + empty = cz.IsTypeZero(rv.Interface()) } else if rv.Kind() == reflect.Interface { - // isZero will not treat an interface rv as an interface, so we need to check for the - // zero interface separately. - zero = rv.IsNil() + // isEmpty will not treat an interface rv as an interface, so we need to check for the + // nil interface separately. + empty = rv.IsNil() } else { - zero = isZero(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) + empty = isEmpty(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct) } - if desc.omitEmpty && zero { + if desc.omitEmpty && empty { continue } @@ -239,8 +254,8 @@ func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val } func newDecodeError(key string, original error) error { - de, ok := original.(*DecodeError) - if !ok { + var de *DecodeError + if !errors.As(original, &de) { return &DecodeError{ keys: []string{key}, wrapped: original, @@ -308,7 +323,7 @@ func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val for { name, vr, err := dr.ReadElement() - if err == bsonrw.ErrEOD { + if errors.Is(err, bsonrw.ErrEOD) { break } if err != nil { @@ -391,12 +406,15 @@ func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val return nil } -func isZero(v reflect.Value, omitZeroStruct bool) bool { +func isEmpty(v reflect.Value, omitZeroStruct bool) bool { kind := v.Kind() if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) { return v.Interface().(Zeroer).IsZero() } - if kind == reflect.Struct { + switch kind { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Struct: if !omitZeroStruct { return false } @@ -410,7 +428,7 @@ func isZero(v reflect.Value, omitZeroStruct bool) bool { if ff.PkgPath != "" && !ff.Anonymous { continue // Private field } - if !isZero(v.Field(i), omitZeroStruct) { + if !isEmpty(v.Field(i), omitZeroStruct) { return false } } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go index 7b005a995..22fb762c4 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go @@ -23,12 +23,26 @@ const ( // TimeCodec is the Codec used for time.Time values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// TimeCodec registered. +// Deprecated: TimeCodec will not be directly configurable in Go Driver 2.0. +// To configure the time.Time encode and decode behavior, use the configuration +// methods on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the time.Time encode +// and decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to ..., use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// UseLocalTimeZone: true, +// }) +// +// See the deprecation notice for each field in TimeCodec for the corresponding +// settings. type TimeCodec struct { // UseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false. // - // Deprecated: Use bson.Decoder.UseLocalTimeZone instead. + // Deprecated: Use bson.Decoder.UseLocalTimeZone or options.BSONOptions.UseLocalTimeZone + // instead. UseLocalTimeZone bool } @@ -42,8 +56,8 @@ var ( // NewTimeCodec returns a TimeCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// TimeCodec registered. +// Deprecated: NewTimeCodec will not be available in Go Driver 2.0. See +// [TimeCodec] for more details. func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec { timeOpt := bsonoptions.MergeTimeCodecOptions(opts...) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go index 7eb106905..39b07135b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go @@ -18,13 +18,27 @@ import ( // UIntCodec is the Codec used for uint values. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// UIntCodec registered. +// Deprecated: UIntCodec will not be directly configurable in Go Driver 2.0. To +// configure the uint encode and decode behavior, use the configuration methods +// on a [go.mongodb.org/mongo-driver/bson.Encoder] or +// [go.mongodb.org/mongo-driver/bson.Decoder]. To configure the uint encode and +// decode behavior for a mongo.Client, use +// [go.mongodb.org/mongo-driver/mongo/options.ClientOptions.SetBSONOptions]. +// +// For example, to configure a mongo.Client to marshal Go uint values as the +// minimum BSON int size that can represent the value, use: +// +// opt := options.Client().SetBSONOptions(&options.BSONOptions{ +// IntMinSize: true, +// }) +// +// See the deprecation notice for each field in UIntCodec for the corresponding +// settings. type UIntCodec struct { // EncodeToMinSize causes EncodeValue to marshal Go uint values (excluding uint64) as the // minimum BSON int size (either 32-bit or 64-bit) that can represent the integer value. // - // Deprecated: Use bson.Encoder.IntMinSize instead. + // Deprecated: Use bson.Encoder.IntMinSize or options.BSONOptions.IntMinSize instead. EncodeToMinSize bool } @@ -38,8 +52,8 @@ var ( // NewUIntCodec returns a UIntCodec with options opts. // -// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the -// UIntCodec registered. +// Deprecated: NewUIntCodec will not be available in Go Driver 2.0. See +// [UIntCodec] for more details. func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec { uintOpt := bsonoptions.MergeUIntCodecOptions(opts...) @@ -150,11 +164,15 @@ func (uic *UIntCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t refl return reflect.ValueOf(uint64(i64)), nil case reflect.Uint: - if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint + if i64 < 0 { + return emptyValue, fmt.Errorf("%d overflows uint", i64) + } + v := uint64(i64) + if v > math.MaxUint { // Can we fit this inside of an uint return emptyValue, fmt.Errorf("%d overflows uint", i64) } - return reflect.ValueOf(uint(i64)), nil + return reflect.ValueOf(uint(v)), nil default: return emptyValue, ValueDecoderError{ Name: "UintDecodeValue", diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go index 4d279b7fe..1e25570b8 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go @@ -7,6 +7,7 @@ package bsonrw import ( + "errors" "fmt" "io" @@ -442,7 +443,7 @@ func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { for { vr, err := ar.ReadValue() - if err == ErrEOA { + if errors.Is(err, ErrEOA) { break } if err != nil { @@ -466,7 +467,7 @@ func (c Copier) copyArray(dst ValueWriter, src ValueReader) error { func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error { for { key, vr, err := dr.ReadElement() - if err == ErrEOD { + if errors.Is(err, ErrEOD) { break } if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go index 54c76bf74..bb52a0ec3 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go @@ -313,7 +313,7 @@ func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) { // convert hex to bytes bytes, err := hex.DecodeString(uuidNoHyphens) if err != nil { - return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %v", err) + return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %w", err) } ejp.advanceState() diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go index 2aca37a91..59ddfc448 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go @@ -7,6 +7,7 @@ package bsonrw import ( + "errors" "fmt" "io" "sync" @@ -613,7 +614,7 @@ func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) { name, t, err := ejvr.p.readKey() if err != nil { - if err == ErrEOD { + if errors.Is(err, ErrEOD) { if ejvr.stack[ejvr.frame].mode == mCodeWithScope { _, err := ejvr.p.peekType() if err != nil { @@ -640,7 +641,7 @@ func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) { t, err := ejvr.p.peekType() if err != nil { - if err == ErrEOA { + if errors.Is(err, ErrEOA) { ejvr.pop() } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go index 969570424..af6ae7b76 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go @@ -95,9 +95,9 @@ func (ejv *extJSONValue) parseBinary() (b []byte, subType byte, err error) { return nil, 0, fmt.Errorf("$binary subType value should be string, but instead is %s", val.t) } - i, err := strconv.ParseInt(val.v.(string), 16, 64) + i, err := strconv.ParseUint(val.v.(string), 16, 8) if err != nil { - return nil, 0, fmt.Errorf("invalid $binary subType string: %s", val.v.(string)) + return nil, 0, fmt.Errorf("invalid $binary subType string: %q: %w", val.v.(string), err) } subType = byte(i) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go index cd4843a3a..43f3e4f38 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go @@ -58,7 +58,7 @@ func (js *jsonScanner) nextToken() (*jsonToken, error) { c, err = js.readNextByte() } - if err == io.EOF { + if errors.Is(err, io.EOF) { return &jsonToken{t: jttEOF}, nil } else if err != nil { return nil, err @@ -198,7 +198,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { for { c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -209,7 +209,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { case '\\': c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -248,7 +248,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { if utf16.IsSurrogate(rn) { c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -264,7 +264,7 @@ func (js *jsonScanner) scanString() (*jsonToken, error) { c, err = js.readNextByte() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil, errors.New("end of input in JSON string") } return nil, err @@ -325,17 +325,17 @@ func (js *jsonScanner) scanLiteral(first byte) (*jsonToken, error) { c5, err := js.readNextByte() - if bytes.Equal([]byte("true"), lit) && (isValueTerminator(c5) || err == io.EOF) { + if bytes.Equal([]byte("true"), lit) && (isValueTerminator(c5) || errors.Is(err, io.EOF)) { js.pos = int(math.Max(0, float64(js.pos-1))) return &jsonToken{t: jttBool, v: true, p: p}, nil - } else if bytes.Equal([]byte("null"), lit) && (isValueTerminator(c5) || err == io.EOF) { + } else if bytes.Equal([]byte("null"), lit) && (isValueTerminator(c5) || errors.Is(err, io.EOF)) { js.pos = int(math.Max(0, float64(js.pos-1))) return &jsonToken{t: jttNull, v: nil, p: p}, nil } else if bytes.Equal([]byte("fals"), lit) { if c5 == 'e' { c5, err = js.readNextByte() - if isValueTerminator(c5) || err == io.EOF { + if isValueTerminator(c5) || errors.Is(err, io.EOF) { js.pos = int(math.Max(0, float64(js.pos-1))) return &jsonToken{t: jttBool, v: false, p: p}, nil } @@ -384,7 +384,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { for { c, err = js.readNextByte() - if err != nil && err != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { return nil, err } @@ -413,7 +413,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else { s = nssInvalid @@ -430,7 +430,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else if isDigit(c) { s = nssSawIntegerDigits @@ -455,7 +455,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else if isDigit(c) { s = nssSawFractionDigits @@ -490,7 +490,7 @@ func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) { case '}', ']', ',': s = nssDone default: - if isWhiteSpace(c) || err == io.EOF { + if isWhiteSpace(c) || errors.Is(err, io.EOF) { s = nssDone } else if isDigit(c) { s = nssSawExponentDigits diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go index a242bb57c..0e07d5055 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go @@ -842,7 +842,7 @@ func (vr *valueReader) peekLength() (int32, error) { } idx := vr.offset - return (int32(vr.d[idx]) | int32(vr.d[idx+1])<<8 | int32(vr.d[idx+2])<<16 | int32(vr.d[idx+3])<<24), nil + return int32(binary.LittleEndian.Uint32(vr.d[idx:])), nil } func (vr *valueReader) readLength() (int32, error) { return vr.readi32() } @@ -854,7 +854,7 @@ func (vr *valueReader) readi32() (int32, error) { idx := vr.offset vr.offset += 4 - return (int32(vr.d[idx]) | int32(vr.d[idx+1])<<8 | int32(vr.d[idx+2])<<16 | int32(vr.d[idx+3])<<24), nil + return int32(binary.LittleEndian.Uint32(vr.d[idx:])), nil } func (vr *valueReader) readu32() (uint32, error) { @@ -864,7 +864,7 @@ func (vr *valueReader) readu32() (uint32, error) { idx := vr.offset vr.offset += 4 - return (uint32(vr.d[idx]) | uint32(vr.d[idx+1])<<8 | uint32(vr.d[idx+2])<<16 | uint32(vr.d[idx+3])<<24), nil + return binary.LittleEndian.Uint32(vr.d[idx:]), nil } func (vr *valueReader) readi64() (int64, error) { @@ -874,8 +874,7 @@ func (vr *valueReader) readi64() (int64, error) { idx := vr.offset vr.offset += 8 - return int64(vr.d[idx]) | int64(vr.d[idx+1])<<8 | int64(vr.d[idx+2])<<16 | int64(vr.d[idx+3])<<24 | - int64(vr.d[idx+4])<<32 | int64(vr.d[idx+5])<<40 | int64(vr.d[idx+6])<<48 | int64(vr.d[idx+7])<<56, nil + return int64(binary.LittleEndian.Uint64(vr.d[idx:])), nil } func (vr *valueReader) readu64() (uint64, error) { @@ -885,6 +884,5 @@ func (vr *valueReader) readu64() (uint64, error) { idx := vr.offset vr.offset += 8 - return uint64(vr.d[idx]) | uint64(vr.d[idx+1])<<8 | uint64(vr.d[idx+2])<<16 | uint64(vr.d[idx+3])<<24 | - uint64(vr.d[idx+4])<<32 | uint64(vr.d[idx+5])<<40 | uint64(vr.d[idx+6])<<48 | uint64(vr.d[idx+7])<<56, nil + return binary.LittleEndian.Uint64(vr.d[idx:]), nil } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/doc.go index 048b5eb99..af6098475 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/doc.go @@ -6,9 +6,9 @@ // Package bson is a library for reading, writing, and manipulating BSON. BSON is a binary serialization format used to // store documents and make remote procedure calls in MongoDB. The BSON specification is located at https://bsonspec.org. -// The BSON library handles marshalling and unmarshalling of values through a configurable codec system. For a description -// of the codec system and examples of registering custom codecs, see the bsoncodec package. For additional information and -// usage examples, check out the [Work with BSON] page in the Go Driver docs site. +// The BSON library handles marshaling and unmarshaling of values through a configurable codec system. For a description +// of the codec system and examples of registering custom codecs, see the bsoncodec package. For additional information +// and usage examples, check out the [Work with BSON] page in the Go Driver docs site. // // # Raw BSON // @@ -38,7 +38,7 @@ // bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} // bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} // -// When decoding BSON to a D or M, the following type mappings apply when unmarshalling: +// When decoding BSON to a D or M, the following type mappings apply when unmarshaling: // // 1. BSON int32 unmarshals to an int32. // 2. BSON int64 unmarshals to an int64. @@ -62,83 +62,78 @@ // 20. BSON DBPointer unmarshals to a primitive.DBPointer. // 21. BSON symbol unmarshals to a primitive.Symbol. // -// The above mappings also apply when marshalling a D or M to BSON. Some other useful marshalling mappings are: +// The above mappings also apply when marshaling a D or M to BSON. Some other useful marshaling mappings are: // // 1. time.Time marshals to a BSON datetime. // 2. int8, int16, and int32 marshal to a BSON int32. // 3. int marshals to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, inclusive, and a BSON int64 // otherwise. -// 4. int64 marshals to BSON int64. +// 4. int64 marshals to BSON int64 (unless [Encoder.IntMinSize] is set). // 5. uint8 and uint16 marshal to a BSON int32. -// 6. uint, uint32, and uint64 marshal to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, -// inclusive, and BSON int64 otherwise. -// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshalling a BSON null or +// 6. uint, uint32, and uint64 marshal to a BSON int64 (unless [Encoder.IntMinSize] is set). +// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshaling a BSON null or // undefined value into a string will yield the empty string.). // // # Structs // -// Structs can be marshalled/unmarshalled to/from BSON or Extended JSON. When transforming structs to/from BSON or Extended +// Structs can be marshaled/unmarshaled to/from BSON or Extended JSON. When transforming structs to/from BSON or Extended // JSON, the following rules apply: // -// 1. Only exported fields in structs will be marshalled or unmarshalled. +// 1. Only exported fields in structs will be marshaled or unmarshaled. // -// 2. When marshalling a struct, each field will be lowercased to generate the key for the corresponding BSON element. +// 2. When marshaling a struct, each field will be lowercased to generate the key for the corresponding BSON element. // For example, a struct field named "Foo" will generate key "foo". This can be overridden via a struct tag (e.g. // `bson:"fooField"` to generate key "fooField" instead). // -// 3. An embedded struct field is marshalled as a subdocument. The key will be the lowercased name of the field's type. +// 3. An embedded struct field is marshaled as a subdocument. The key will be the lowercased name of the field's type. // -// 4. A pointer field is marshalled as the underlying type if the pointer is non-nil. If the pointer is nil, it is -// marshalled as a BSON null value. +// 4. A pointer field is marshaled as the underlying type if the pointer is non-nil. If the pointer is nil, it is +// marshaled as a BSON null value. // -// 5. When unmarshalling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents -// unmarshalled into an interface{} field will be unmarshalled as a D. +// 5. When unmarshaling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents +// unmarshaled into an interface{} field will be unmarshaled as a D. // // The encoding of each struct field can be customized by the "bson" struct tag. // // This tag behavior is configurable, and different struct tag behavior can be configured by initializing a new -// bsoncodec.StructCodec with the desired tag parser and registering that StructCodec onto the Registry. By default, JSON tags -// are not honored, but that can be enabled by creating a StructCodec with JSONFallbackStructTagParser, like below: +// bsoncodec.StructCodec with the desired tag parser and registering that StructCodec onto the Registry. By default, JSON +// tags are not honored, but that can be enabled by creating a StructCodec with JSONFallbackStructTagParser, like below: // // Example: // // structcodec, _ := bsoncodec.NewStructCodec(bsoncodec.JSONFallbackStructTagParser) // // The bson tag gives the name of the field, possibly followed by a comma-separated list of options. -// The name may be empty in order to specify options without overriding the default field name. The following options can be used -// to configure behavior: -// -// 1. omitempty: If the omitempty struct tag is specified on a field, the field will not be marshalled if it is set to -// the zero value. Fields with language primitive types such as integers, booleans, and strings are considered empty if -// their value is equal to the zero value for the type (i.e. 0 for integers, false for booleans, and "" for strings). -// Slices, maps, and arrays are considered empty if they are of length zero. Interfaces and pointers are considered -// empty if their value is nil. By default, structs are only considered empty if the struct type implements the -// bsoncodec.Zeroer interface and the IsZero method returns true. Struct fields whose types do not implement Zeroer are -// never considered empty and will be marshalled as embedded documents. +// The name may be empty in order to specify options without overriding the default field name. The following options can +// be used to configure behavior: +// +// 1. omitempty: If the omitempty struct tag is specified on a field, the field will be omitted from the marshaling if +// the field has an empty value, defined as false, 0, a nil pointer, a nil interface value, and any empty array, +// slice, map, or string. // NOTE: It is recommended that this tag be used for all slice and map fields. // // 2. minsize: If the minsize struct tag is specified on a field of type int64, uint, uint32, or uint64 and the value of -// the field can fit in a signed int32, the field will be serialized as a BSON int32 rather than a BSON int64. For other -// types, this tag is ignored. +// the field can fit in a signed int32, the field will be serialized as a BSON int32 rather than a BSON int64. For +// other types, this tag is ignored. // -// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles unmarshalled -// into that field will be truncated at the decimal point. For example, if 3.14 is unmarshalled into a field of type int, -// it will be unmarshalled as 3. If this tag is not specified, the decoder will throw an error if the value cannot be -// decoded without losing precision. For float64 or non-numeric types, this tag is ignored. +// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles +// unmarshaled into that field will be truncated at the decimal point. For example, if 3.14 is unmarshaled into a +// field of type int, it will be unmarshaled as 3. If this tag is not specified, the decoder will throw an error if +// the value cannot be decoded without losing precision. For float64 or non-numeric types, this tag is ignored. // // 4. inline: If the inline struct tag is specified for a struct or map field, the field will be "flattened" when -// marshalling and "un-flattened" when unmarshalling. This means that all of the fields in that struct/map will be -// pulled up one level and will become top-level fields rather than being fields in a nested document. For example, if a -// map field named "Map" with value map[string]interface{}{"foo": "bar"} is inlined, the resulting document will be -// {"foo": "bar"} instead of {"map": {"foo": "bar"}}. There can only be one inlined map field in a struct. If there are -// duplicated fields in the resulting document when an inlined struct is marshalled, the inlined field will be overwritten. -// If there are duplicated fields in the resulting document when an inlined map is marshalled, an error will be returned. -// This tag can be used with fields that are pointers to structs. If an inlined pointer field is nil, it will not be -// marshalled. For fields that are not maps or structs, this tag is ignored. -// -// # Marshalling and Unmarshalling -// -// Manually marshalling and unmarshalling can be done with the Marshal and Unmarshal family of functions. +// marshaling and "un-flattened" when unmarshaling. This means that all of the fields in that struct/map will be +// pulled up one level and will become top-level fields rather than being fields in a nested document. For example, +// if a map field named "Map" with value map[string]interface{}{"foo": "bar"} is inlined, the resulting document will +// be {"foo": "bar"} instead of {"map": {"foo": "bar"}}. There can only be one inlined map field in a struct. If +// there are duplicated fields in the resulting document when an inlined struct is marshaled, the inlined field will +// be overwritten. If there are duplicated fields in the resulting document when an inlined map is marshaled, an +// error will be returned. This tag can be used with fields that are pointers to structs. If an inlined pointer field +// is nil, it will not be marshaled. For fields that are not maps or structs, this tag is ignored. +// +// # Marshaling and Unmarshaling +// +// Manually marshaling and unmarshaling can be done with the Marshal and Unmarshal family of functions. // // [Work with BSON]: https://www.mongodb.com/docs/drivers/go/current/fundamentals/bson/ package bson diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go index 24ab58fc4..08c39514b 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go @@ -164,9 +164,6 @@ func (d Decimal128) BigInt() (*big.Int, int, error) { // Would be handled by the logic below, but that's trivial and common. if high == 0 && low == 0 && exp == 0 { - if posSign { - return new(big.Int), 0, nil - } return new(big.Int), 0, nil } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go index 9bbaffac2..c130e3ff1 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go @@ -183,7 +183,7 @@ func processUniqueBytes() [5]byte { var b [5]byte _, err := io.ReadFull(rand.Reader, b[:]) if err != nil { - panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err)) + panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %w", err)) } return b @@ -193,7 +193,7 @@ func readRandomUint32() uint32 { var b [4]byte _, err := io.ReadFull(rand.Reader, b[:]) if err != nil { - panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err)) + panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %w", err)) } return (uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) diff --git a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go index 4d1bfb316..a8088e1e3 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/raw_value.go @@ -88,8 +88,12 @@ func (rv RawValue) UnmarshalWithRegistry(r *bsoncodec.Registry, val interface{}) return dec.DecodeValue(bsoncodec.DecodeContext{Registry: r}, vr, rval) } -// UnmarshalWithContext performs the same unmarshalling as Unmarshal but uses the provided DecodeContext -// instead of the one attached or the default registry. +// UnmarshalWithContext performs the same unmarshalling as Unmarshal but uses +// the provided DecodeContext instead of the one attached or the default +// registry. +// +// Deprecated: Use [RawValue.UnmarshalWithRegistry] with a custom registry to customize +// unmarshal behavior instead. func (rv RawValue) UnmarshalWithContext(dc *bsoncodec.DecodeContext, val interface{}) error { if dc == nil { return ErrNilContext diff --git a/vendor/go.mongodb.org/mongo-driver/bson/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/registry.go index b5b0f3568..d6afb2850 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/registry.go @@ -10,15 +10,27 @@ import ( "go.mongodb.org/mongo-driver/bson/bsoncodec" ) -// DefaultRegistry is the default bsoncodec.Registry. It contains the default codecs and the -// primitive codecs. +// DefaultRegistry is the default bsoncodec.Registry. It contains the default +// codecs and the primitive codecs. +// +// Deprecated: Use [NewRegistry] to construct a new default registry. To use a +// custom registry when marshaling or unmarshaling, use the "SetRegistry" method +// on an [Encoder] or [Decoder] instead: +// +// dec, err := bson.NewDecoder(bsonrw.NewBSONDocumentReader(data)) +// if err != nil { +// panic(err) +// } +// dec.SetRegistry(reg) +// +// See [Encoder] and [Decoder] for more examples. var DefaultRegistry = NewRegistry() // NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and // decoders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the // PrimitiveCodecs type in this package. // -// Deprecated: Use NewRegistry instead. +// Deprecated: Use [NewRegistry] instead. func NewRegistryBuilder() *bsoncodec.RegistryBuilder { rb := bsoncodec.NewRegistryBuilder() bsoncodec.DefaultValueEncoders{}.RegisterDefaultEncoders(rb) diff --git a/vendor/go.mongodb.org/mongo-driver/event/monitoring.go b/vendor/go.mongodb.org/mongo-driver/event/monitoring.go index 53d1caf2e..ddc7abacf 100644 --- a/vendor/go.mongodb.org/mongo-driver/event/monitoring.go +++ b/vendor/go.mongodb.org/mongo-driver/event/monitoring.go @@ -117,11 +117,13 @@ type PoolEvent struct { Address string `json:"address"` ConnectionID uint64 `json:"connectionId"` PoolOptions *MonitorPoolOptions `json:"options"` + Duration time.Duration `json:"duration"` Reason string `json:"reason"` // ServiceID is only set if the Type is PoolCleared and the server is deployed behind a load balancer. This field // can be used to distinguish between individual servers in a load balanced deployment. - ServiceID *primitive.ObjectID `json:"serviceId"` - Error error `json:"error"` + ServiceID *primitive.ObjectID `json:"serviceId"` + Interruption bool `json:"interruptInUseConnections"` + Error error `json:"error"` } // PoolMonitor is a function that allows the user to gain access to events occurring in the pool diff --git a/vendor/go.mongodb.org/mongo-driver/internal/csfle/csfle.go b/vendor/go.mongodb.org/mongo-driver/internal/csfle/csfle.go index 71e71b468..20a6d43a0 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/csfle/csfle.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/csfle/csfle.go @@ -7,6 +7,7 @@ package csfle import ( + "errors" "fmt" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -23,7 +24,7 @@ func GetEncryptedStateCollectionName(efBSON bsoncore.Document, dataCollectionNam fieldName := stateCollection + "Collection" val, err := efBSON.LookupErr(fieldName) if err != nil { - if err != bsoncore.ErrElementNotFound { + if !errors.Is(err, bsoncore.ErrElementNotFound) { return "", err } // Return default name. diff --git a/vendor/go.mongodb.org/mongo-driver/internal/csot/csot.go b/vendor/go.mongodb.org/mongo-driver/internal/csot/csot.go index 678252c51..43801a5d4 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/csot/csot.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/csot/csot.go @@ -21,11 +21,13 @@ type timeoutKey struct{} // TODO default behavior. func MakeTimeoutContext(ctx context.Context, to time.Duration) (context.Context, context.CancelFunc) { // Only use the passed in Duration as a timeout on the Context if it - // is non-zero. + // is non-zero and if the Context doesn't already have a timeout. cancelFunc := func() {} - if to != 0 { + if _, deadlineSet := ctx.Deadline(); to != 0 && !deadlineSet { ctx, cancelFunc = context.WithTimeout(ctx, to) } + + // Add timeoutKey either way to indicate CSOT is enabled. return context.WithValue(ctx, timeoutKey{}, true), cancelFunc } diff --git a/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go b/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go index c5ff1474b..0a6c1bdca 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/logger/io_sink.go @@ -9,6 +9,7 @@ package logger import ( "encoding/json" "io" + "math" "sync" "time" ) @@ -36,7 +37,11 @@ func NewIOSink(out io.Writer) *IOSink { // Info will write a JSON-encoded message to the io.Writer. func (sink *IOSink) Info(_ int, msg string, keysAndValues ...interface{}) { - kvMap := make(map[string]interface{}, len(keysAndValues)/2+2) + mapSize := len(keysAndValues) / 2 + if math.MaxInt-mapSize >= 2 { + mapSize += 2 + } + kvMap := make(map[string]interface{}, mapSize) kvMap[KeyTimestamp] = time.Now().UnixNano() kvMap[KeyMessage] = msg diff --git a/vendor/go.mongodb.org/mongo-driver/internal/logger/logger.go b/vendor/go.mongodb.org/mongo-driver/internal/logger/logger.go index 03d42814f..2250286e4 100644 --- a/vendor/go.mongodb.org/mongo-driver/internal/logger/logger.go +++ b/vendor/go.mongodb.org/mongo-driver/internal/logger/logger.go @@ -183,7 +183,7 @@ func selectLogSink(sink LogSink) (LogSink, *os.File, error) { if path != "" { logFile, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666) if err != nil { - return nil, nil, fmt.Errorf("unable to open log file: %v", err) + return nil, nil, fmt.Errorf("unable to open log file: %w", err) } return NewIOSink(logFile), logFile, nil diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go index 42d286ea7..3fdb67b9a 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/bulk_write.go @@ -8,6 +8,7 @@ package mongo import ( "context" + "errors" "go.mongodb.org/mongo-driver/bson/bsoncodec" "go.mongodb.org/mongo-driver/bson/primitive" @@ -71,7 +72,7 @@ func (bw *bulkWrite) execute(ctx context.Context) error { bwErr.WriteErrors = append(bwErr.WriteErrors, batchErr.WriteErrors...) - commandErrorOccurred := err != nil && err != driver.ErrUnacknowledgedWrite + commandErrorOccurred := err != nil && !errors.Is(err, driver.ErrUnacknowledgedWrite) writeErrorOccurred := len(batchErr.WriteErrors) > 0 || batchErr.WriteConcernError != nil if !continueOnError && (commandErrorOccurred || writeErrorOccurred) { if err != nil { @@ -108,8 +109,8 @@ func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWr case *InsertOneModel: res, err := bw.runInsert(ctx, batch) if err != nil { - writeErr, ok := err.(driver.WriteCommandError) - if !ok { + var writeErr driver.WriteCommandError + if !errors.As(err, &writeErr) { return BulkWriteResult{}, batchErr, err } writeErrors = writeErr.WriteErrors @@ -120,8 +121,8 @@ func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWr case *DeleteOneModel, *DeleteManyModel: res, err := bw.runDelete(ctx, batch) if err != nil { - writeErr, ok := err.(driver.WriteCommandError) - if !ok { + var writeErr driver.WriteCommandError + if !errors.As(err, &writeErr) { return BulkWriteResult{}, batchErr, err } writeErrors = writeErr.WriteErrors @@ -132,8 +133,8 @@ func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWr case *ReplaceOneModel, *UpdateOneModel, *UpdateManyModel: res, err := bw.runUpdate(ctx, batch) if err != nil { - writeErr, ok := err.(driver.WriteCommandError) - if !ok { + var writeErr driver.WriteCommandError + if !errors.As(err, &writeErr) { return BulkWriteResult{}, batchErr, err } writeErrors = writeErr.WriteErrors @@ -170,7 +171,7 @@ func (bw *bulkWrite) runInsert(ctx context.Context, batch bulkWriteBatch) (opera if err != nil { return operation.InsertResult{}, err } - doc, _, err = ensureID(doc, primitive.NewObjectID(), bw.collection.bsonOpts, bw.collection.registry) + doc, _, err = ensureID(doc, primitive.NilObjectID, bw.collection.bsonOpts, bw.collection.registry) if err != nil { return operation.InsertResult{}, err } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go index 773cbb0e5..8d0a2031d 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/change_stream.go @@ -277,10 +277,10 @@ func (cs *ChangeStream) executeOperation(ctx context.Context, resuming bool) err cs.aggregate.Pipeline(plArr) } - // If no deadline is set on the passed-in context, cs.client.timeout is set, and context is not already - // a Timeout context, honor cs.client.timeout in new Timeout context for change stream operation execution - // and potential retry. - if _, deadlineSet := ctx.Deadline(); !deadlineSet && cs.client.timeout != nil && !csot.IsTimeoutContext(ctx) { + // If cs.client.timeout is set and context is not already a Timeout context, + // honor cs.client.timeout in new Timeout context for change stream + // operation execution and potential retry. + if cs.client.timeout != nil && !csot.IsTimeoutContext(ctx) { newCtx, cancelFunc := csot.MakeTimeoutContext(ctx, *cs.client.timeout) // Redefine ctx to be the new timeout-derived context. ctx = newCtx @@ -531,6 +531,12 @@ func (cs *ChangeStream) ID() int64 { return cs.cursor.ID() } +// RemainingBatchLength returns the number of documents left in the current batch. If this returns zero, the subsequent +// call to Next or TryNext will do a network request to fetch the next batch. +func (cs *ChangeStream) RemainingBatchLength() int { + return len(cs.batch) +} + // SetBatchSize sets the number of documents to fetch from the database with // each iteration of the ChangeStream's "Next" or "TryNext" method. This setting // only affects subsequent document batches fetched from the database. @@ -689,8 +695,8 @@ func (cs *ChangeStream) loopNext(ctx context.Context, nonBlocking bool) { } func (cs *ChangeStream) isResumableError() bool { - commandErr, ok := cs.err.(CommandError) - if !ok || commandErr.HasErrorLabel(networkErrorLabel) { + var commandErr CommandError + if !errors.As(cs.err, &commandErr) || commandErr.HasErrorLabel(networkErrorLabel) { // All non-server errors or network errors are resumable. return true } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/client.go b/vendor/go.mongodb.org/mongo-driver/mongo/client.go index 592927483..4266412aa 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/client.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/client.go @@ -209,10 +209,6 @@ func NewClient(opts ...*options.ClientOptions) (*Client, error) { clientOpt.SetMaxPoolSize(defaultMaxPoolSize) } - if err != nil { - return nil, err - } - cfg, err := topology.NewConfig(clientOpt, client.clock) if err != nil { return nil, err @@ -555,7 +551,7 @@ func (c *Client) newMongoCrypt(opts *options.AutoEncryptionOptions) (*mongocrypt kmsProviders, err := marshal(opts.KmsProviders, c.bsonOpts, c.registry) if err != nil { - return nil, fmt.Errorf("error creating KMS providers document: %v", err) + return nil, fmt.Errorf("error creating KMS providers document: %w", err) } // Set the crypt_shared library override path from the "cryptSharedLibPath" extra option if one diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go b/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go index 01c2ec319..b51f57b47 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/client_encryption.go @@ -46,7 +46,7 @@ func NewClientEncryption(keyVaultClient *Client, opts ...*options.ClientEncrypti kmsProviders, err := marshal(ceo.KmsProviders, nil, nil) if err != nil { - return nil, fmt.Errorf("error creating KMS providers map: %v", err) + return nil, fmt.Errorf("error creating KMS providers map: %w", err) } mc, err := mongocrypt.NewMongoCrypt(mcopts.MongoCrypt(). diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go index fcbfcc77a..4cf6fd1a1 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/collection.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/collection.go @@ -256,7 +256,7 @@ func (coll *Collection) insert(ctx context.Context, documents []interface{}, if err != nil { return nil, err } - bsoncoreDoc, id, err := ensureID(bsoncoreDoc, primitive.NewObjectID(), coll.bsonOpts, coll.registry) + bsoncoreDoc, id, err := ensureID(bsoncoreDoc, primitive.NilObjectID, coll.bsonOpts, coll.registry) if err != nil { return nil, err } @@ -313,8 +313,8 @@ func (coll *Collection) insert(ctx context.Context, documents []interface{}, op = op.Retry(retry) err = op.Execute(ctx) - wce, ok := err.(driver.WriteCommandError) - if !ok { + var wce driver.WriteCommandError + if !errors.As(err, &wce) { return result, err } @@ -388,8 +388,8 @@ func (coll *Collection) InsertMany(ctx context.Context, documents []interface{}, } imResult := &InsertManyResult{InsertedIDs: result} - writeException, ok := err.(WriteException) - if !ok { + var writeException WriteException + if !errors.As(err, &writeException) { return imResult, err } @@ -863,6 +863,15 @@ func aggregate(a aggregateParams) (cur *Cursor, err error) { Timeout(a.client.timeout). MaxTime(ao.MaxTime) + // Omit "maxTimeMS" from operations that return a user-managed cursor to + // prevent confusing "cursor not found" errors. To maintain existing + // behavior for users who set "timeoutMS" with no context deadline, only + // omit "maxTimeMS" when a context deadline is set. + // + // See DRIVERS-2722 for more detail. + _, deadlineSet := a.ctx.Deadline() + op.OmitCSOTMaxTimeMS(deadlineSet) + if ao.AllowDiskUse != nil { op.AllowDiskUse(*ao.AllowDiskUse) } @@ -1196,6 +1205,23 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, ctx = context.Background() } + // Omit "maxTimeMS" from operations that return a user-managed cursor to + // prevent confusing "cursor not found" errors. To maintain existing + // behavior for users who set "timeoutMS" with no context deadline, only + // omit "maxTimeMS" when a context deadline is set. + // + // See DRIVERS-2722 for more detail. + _, deadlineSet := ctx.Deadline() + return coll.find(ctx, filter, deadlineSet, opts...) +} + +func (coll *Collection) find( + ctx context.Context, + filter interface{}, + omitCSOTMaxTimeMS bool, + opts ...*options.FindOptions, +) (cur *Cursor, err error) { + f, err := marshal(filter, coll.bsonOpts, coll.registry) if err != nil { return nil, err @@ -1230,7 +1256,8 @@ func (coll *Collection) Find(ctx context.Context, filter interface{}, CommandMonitor(coll.client.monitor).ServerSelector(selector). ClusterClock(coll.client.clock).Database(coll.db.name).Collection(coll.name). Deployment(coll.client.deployment).Crypt(coll.client.cryptFLE).ServerAPI(coll.client.serverAPI). - Timeout(coll.client.timeout).MaxTime(fo.MaxTime).Logger(coll.client.logger) + Timeout(coll.client.timeout).MaxTime(fo.MaxTime).Logger(coll.client.logger). + OmitCSOTMaxTimeMS(omitCSOTMaxTimeMS) cursorOpts := coll.client.createBaseCursorOptions() @@ -1408,7 +1435,7 @@ func (coll *Collection) FindOne(ctx context.Context, filter interface{}, // by the server. findOpts = append(findOpts, options.Find().SetLimit(-1)) - cursor, err := coll.Find(ctx, filter, findOpts...) + cursor, err := coll.find(ctx, filter, false, findOpts...) return &SingleResult{ ctx: ctx, cur: cursor, @@ -1775,8 +1802,11 @@ func (coll *Collection) Indexes() IndexView { // SearchIndexes returns a SearchIndexView instance that can be used to perform operations on the search indexes for the collection. func (coll *Collection) SearchIndexes() SearchIndexView { + c, _ := coll.Clone() // Clone() always return a nil error. + c.readConcern = nil + c.writeConcern = nil return SearchIndexView{ - coll: coll, + coll: c, } } @@ -1806,7 +1836,7 @@ func (coll *Collection) Drop(ctx context.Context) error { func (coll *Collection) dropEncryptedCollection(ctx context.Context, ef interface{}) error { efBSON, err := marshal(ef, coll.bsonOpts, coll.registry) if err != nil { - return fmt.Errorf("error transforming document: %v", err) + return fmt.Errorf("error transforming document: %w", err) } // Drop the two encryption-related, associated collections: `escCollection` and `ecocCollection`. diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go b/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go index d2228ed9c..c77d1109f 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/cursor.go @@ -160,13 +160,13 @@ func (c *Cursor) next(ctx context.Context, nonBlocking bool) bool { ctx = context.Background() } doc, err := c.batch.Next() - switch err { - case nil: + switch { + case err == nil: // Consume the next document in the current batch. c.batchLength-- c.Current = bson.Raw(doc) return true - case io.EOF: // Need to do a getMore + case errors.Is(err, io.EOF): // Need to do a getMore default: c.err = err return false @@ -204,12 +204,12 @@ func (c *Cursor) next(ctx context.Context, nonBlocking bool) bool { c.batch = c.bc.Batch() c.batchLength = c.batch.DocumentCount() doc, err = c.batch.Next() - switch err { - case nil: + switch { + case err == nil: c.batchLength-- c.Current = bson.Raw(doc) return true - case io.EOF: // Empty batch so we continue + case errors.Is(err, io.EOF): // Empty batch so we continue default: c.err = err return false diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/database.go b/vendor/go.mongodb.org/mongo-driver/mongo/database.go index f5d5ad379..57c0186ec 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/database.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/database.go @@ -185,18 +185,21 @@ func (db *Database) processRunCommand(ctx context.Context, cmd interface{}, op = operation.NewCommand(runCmdDoc) } - // TODO(GODRIVER-2649): ReadConcern(db.readConcern) will not actually pass the database's - // read concern. Remove this note once readConcern is correctly passed to the operation - // level. return op.Session(sess).CommandMonitor(db.client.monitor). ServerSelector(readSelect).ClusterClock(db.client.clock). - Database(db.name).Deployment(db.client.deployment).ReadConcern(db.readConcern). + Database(db.name).Deployment(db.client.deployment). Crypt(db.client.cryptFLE).ReadPreference(ro.ReadPreference).ServerAPI(db.client.serverAPI). Timeout(db.client.timeout).Logger(db.client.logger), sess, nil } -// RunCommand executes the given command against the database. This function does not obey the Database's read -// preference. To specify a read preference, the RunCmdOptions.ReadPreference option must be used. +// RunCommand executes the given command against the database. +// +// This function does not obey the Database's readPreference. To specify a read +// preference, the RunCmdOptions.ReadPreference option must be used. +// +// This function does not obey the Database's readConcern or writeConcern. A +// user must supply these values manually in the user-provided runCommand +// parameter. // // The runCommand parameter must be a document for the command to be executed. It cannot be nil. // This must be an order-preserving type such as bson.D. Map types such as bson.M are not valid. @@ -563,7 +566,7 @@ func (db *Database) getEncryptedFieldsFromServer(ctx context.Context, collection } collSpec := collSpecs[0] rawValue, err := collSpec.Options.LookupErr("encryptedFields") - if err == bsoncore.ErrElementNotFound { + if errors.Is(err, bsoncore.ErrElementNotFound) { return nil, nil } else if err != nil { return nil, err @@ -577,7 +580,7 @@ func (db *Database) getEncryptedFieldsFromServer(ctx context.Context, collection return encryptedFields, nil } -// getEncryptedFieldsFromServer tries to get an "encryptedFields" document associated with collectionName by checking the client EncryptedFieldsMap. +// getEncryptedFieldsFromMap tries to get an "encryptedFields" document associated with collectionName by checking the client EncryptedFieldsMap. // Returns nil and no error if an EncryptedFieldsMap is not configured, or does not contain an entry for collectionName. func (db *Database) getEncryptedFieldsFromMap(collectionName string) interface{} { // Check the EncryptedFieldsMap @@ -599,7 +602,7 @@ func (db *Database) getEncryptedFieldsFromMap(collectionName string) interface{} func (db *Database) createCollectionWithEncryptedFields(ctx context.Context, name string, ef interface{}, opts ...*options.CreateCollectionOptions) error { efBSON, err := marshal(ef, db.bsonOpts, db.registry) if err != nil { - return fmt.Errorf("error transforming document: %v", err) + return fmt.Errorf("error transforming document: %w", err) } // Check the wire version to ensure server is 7.0.0 or newer. @@ -659,7 +662,7 @@ func (db *Database) createCollectionWithEncryptedFields(ctx context.Context, nam // Create an index on the __safeContent__ field in the collection @collectionName. if _, err := db.Collection(name).Indexes().CreateOne(ctx, IndexModel{Keys: bson.D{{"__safeContent__", 1}}}); err != nil { - return fmt.Errorf("error creating safeContent index: %v", err) + return fmt.Errorf("error creating safeContent index: %w", err) } return nil diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go b/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go index aee1f050c..176f0fb53 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/description/server_selector.go @@ -182,7 +182,17 @@ func (writeServerSelector) SelectServer(t Topology, candidates []Server) ([]Serv case Single, LoadBalanced: return candidates, nil default: - result := []Server{} + // Determine the capacity of the results slice. + selected := 0 + for _, candidate := range candidates { + switch candidate.Kind { + case Mongos, RSPrimary, Standalone: + selected++ + } + } + + // Append candidates to the results slice. + result := make([]Server, 0, selected) for _, candidate := range candidates { switch candidate.Kind { case Mongos, RSPrimary, Standalone: diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/errors.go b/vendor/go.mongodb.org/mongo-driver/mongo/errors.go index 72c3bcc24..d92c9ca9b 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/errors.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/errors.go @@ -52,7 +52,7 @@ func replaceErrors(err error) error { return nil } - if err == topology.ErrTopologyClosed { + if errors.Is(err, topology.ErrTopologyClosed) { return ErrClientDisconnected } if de, ok := err.(driver.Error); ok { @@ -630,7 +630,7 @@ const ( // WriteConcernError will be returned over WriteErrors if both are present. func processWriteError(err error) (returnResult, error) { switch { - case err == driver.ErrUnacknowledgedWrite: + case errors.Is(err, driver.ErrUnacknowledgedWrite): return rrAll, ErrUnacknowledgedWrite case err != nil: switch tt := err.(type) { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go b/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go index 41a93a214..8d3555d0b 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go @@ -86,6 +86,9 @@ func (iv IndexView) List(ctx context.Context, opts ...*options.ListIndexesOption description.LatencySelector(iv.coll.client.localThreshold), }) selector = makeReadPrefSelector(sess, selector, iv.coll.client.localThreshold) + + // TODO(GODRIVER-3038): This operation should pass CSE to the ListIndexes + // Crypt setter to be applied to the operation. op := operation.NewListIndexes(). Session(sess).CommandMonitor(iv.coll.client.monitor). ServerSelector(selector).ClusterClock(iv.coll.client.clock). @@ -251,6 +254,10 @@ func (iv IndexView) CreateMany(ctx context.Context, models []IndexModel, opts .. option := options.MergeCreateIndexesOptions(opts...) + // TODO(GODRIVER-3038): This operation should pass CSE to the CreateIndexes + // Crypt setter to be applied to the operation. + // + // This was added in GODRIVER-2413 for the 2.0 major release. op := operation.NewCreateIndexes(indexes). Session(sess).WriteConcern(wc).ClusterClock(iv.coll.client.clock). Database(iv.coll.db.name).Collection(iv.coll.name).CommandMonitor(iv.coll.client.monitor). @@ -387,6 +394,9 @@ func (iv IndexView) drop(ctx context.Context, name string, opts ...*options.Drop selector := makePinnedSelector(sess, iv.coll.writeSelector) dio := options.MergeDropIndexesOptions(opts...) + + // TODO(GODRIVER-3038): This operation should pass CSE to the DropIndexes + // Crypt setter to be applied to the operation. op := operation.NewDropIndexes(name). Session(sess).WriteConcern(wc).CommandMonitor(iv.coll.client.monitor). ServerSelector(selector).ClusterClock(iv.coll.client.clock). diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go b/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go index 393c5b771..ec8e817c7 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/mongo.go @@ -177,8 +177,11 @@ func marshal( } // ensureID inserts the given ObjectID as an element named "_id" at the -// beginning of the given BSON document if there is not an "_id" already. If -// there is already an element named "_id", the document is not modified. It +// beginning of the given BSON document if there is not an "_id" already. +// If the given ObjectID is primitive.NilObjectID, a new object ID will be +// generated with time.Now(). +// +// If there is already an element named "_id", the document is not modified. It // returns the resulting document and the decoded Go value of the "_id" element. func ensureID( doc bsoncore.Document, @@ -219,6 +222,9 @@ func ensureID( const extraSpace = 17 doc = make(bsoncore.Document, 0, len(olddoc)+extraSpace) _, doc = bsoncore.ReserveLength(doc) + if oid.IsZero() { + oid = primitive.NewObjectID() + } doc = bsoncore.AppendObjectIDElement(doc, "_id", oid) // Remove and re-write the BSON document length header. diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go index 42664be03..17b373130 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/clientoptions.go @@ -15,6 +15,7 @@ import ( "errors" "fmt" "io/ioutil" + "math" "net" "net/http" "strings" @@ -237,7 +238,6 @@ type ClientOptions struct { ZstdLevel *int err error - uri string cs *connstring.ConnString // AuthenticateToAnything skips server type checks when deciding if authentication is possible. @@ -338,7 +338,10 @@ func (c *ClientOptions) validate() error { // GetURI returns the original URI used to configure the ClientOptions instance. If ApplyURI was not called during // construction, this returns "". func (c *ClientOptions) GetURI() string { - return c.uri + if c.cs == nil { + return "" + } + return c.cs.Original } // ApplyURI parses the given URI and sets options accordingly. The URI can contain host names, IPv4/IPv6 literals, or @@ -360,13 +363,12 @@ func (c *ClientOptions) ApplyURI(uri string) *ClientOptions { return c } - c.uri = uri cs, err := connstring.ParseAndValidate(uri) if err != nil { c.err = err return c } - c.cs = &cs + c.cs = cs if cs.AppName != "" { c.AppName = &cs.AppName @@ -1134,9 +1136,6 @@ func MergeClientOptions(opts ...*ClientOptions) *ClientOptions { if opt.err != nil { c.err = opt.err } - if opt.uri != "" { - c.uri = opt.uri - } if opt.cs != nil { c.cs = opt.cs } @@ -1179,7 +1178,19 @@ func addClientCertFromSeparateFiles(cfg *tls.Config, keyFile, certFile, keyPassw return "", err } - data := make([]byte, 0, len(keyData)+len(certData)+1) + keySize := len(keyData) + if keySize > 64*1024*1024 { + return "", errors.New("X.509 key must be less than 64 MiB") + } + certSize := len(certData) + if certSize > 64*1024*1024 { + return "", errors.New("X.509 certificate must be less than 64 MiB") + } + dataSize := keySize + certSize + 1 + if dataSize > math.MaxInt { + return "", errors.New("size overflow") + } + data := make([]byte, 0, dataSize) data = append(data, keyData...) data = append(data, '\n') data = append(data, certData...) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go index 04fda6d77..7904dbd67 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go @@ -95,6 +95,9 @@ func MergeCollectionOptions(opts ...*CollectionOptions) *CollectionOptions { if opt.Registry != nil { c.Registry = opt.Registry } + if opt.BSONOptions != nil { + c.BSONOptions = opt.BSONOptions + } } return c diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go index 8a380d216..38ee13550 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go @@ -95,6 +95,9 @@ func MergeDatabaseOptions(opts ...*DatabaseOptions) *DatabaseOptions { if opt.Registry != nil { d.Registry = opt.Registry } + if opt.BSONOptions != nil { + d.BSONOptions = opt.BSONOptions + } } return d diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go index fd17ce44e..36088c2fc 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/mongooptions.go @@ -104,7 +104,7 @@ const ( // UpdateLookup includes a delta describing the changes to the document and a copy of the entire document that // was changed. UpdateLookup FullDocument = "updateLookup" - // WhenAvailable includes a post-image of the the modified document for replace and update change events + // WhenAvailable includes a post-image of the modified document for replace and update change events // if the post-image for this event is available. WhenAvailable FullDocument = "whenAvailable" ) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go index 9774d615b..8cb8a08b7 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/searchindexoptions.go @@ -9,6 +9,7 @@ package options // SearchIndexesOptions represents options that can be used to configure a SearchIndexView. type SearchIndexesOptions struct { Name *string + Type *string } // SearchIndexes creates a new SearchIndexesOptions instance. @@ -22,6 +23,12 @@ func (sio *SearchIndexesOptions) SetName(name string) *SearchIndexesOptions { return sio } +// SetType sets the value for the Type field. +func (sio *SearchIndexesOptions) SetType(typ string) *SearchIndexesOptions { + sio.Type = &typ + return sio +} + // CreateSearchIndexesOptions represents options that can be used to configure a SearchIndexView.CreateOne or // SearchIndexView.CreateMany operation. type CreateSearchIndexesOptions struct { diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go b/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go index 6a7871531..73fe8534e 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/search_index_view.go @@ -13,7 +13,6 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo/options" - "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/operation" @@ -109,6 +108,9 @@ func (siv SearchIndexView) CreateMany( if model.Options != nil && model.Options.Name != nil { indexes = bsoncore.AppendStringElement(indexes, "name", *model.Options.Name) } + if model.Options != nil && model.Options.Type != nil { + indexes = bsoncore.AppendStringElement(indexes, "type", *model.Options.Type) + } indexes = bsoncore.AppendDocumentElement(indexes, "definition", definition) indexes, err = bsoncore.AppendDocumentEnd(indexes, iidx) @@ -134,20 +136,13 @@ func (siv SearchIndexView) CreateMany( return nil, err } - wc := siv.coll.writeConcern - if sess.TransactionRunning() { - wc = nil - } - if !writeconcern.AckWrite(wc) { - sess = nil - } - selector := makePinnedSelector(sess, siv.coll.writeSelector) op := operation.NewCreateSearchIndexes(indexes). - Session(sess).WriteConcern(wc).ClusterClock(siv.coll.client.clock). - Database(siv.coll.db.name).Collection(siv.coll.name).CommandMonitor(siv.coll.client.monitor). - Deployment(siv.coll.client.deployment).ServerSelector(selector).ServerAPI(siv.coll.client.serverAPI). + Session(sess).CommandMonitor(siv.coll.client.monitor). + ServerSelector(selector).ClusterClock(siv.coll.client.clock). + Collection(siv.coll.name).Database(siv.coll.db.name). + Deployment(siv.coll.client.deployment).ServerAPI(siv.coll.client.serverAPI). Timeout(siv.coll.client.timeout) err = op.Execute(ctx) @@ -196,20 +191,12 @@ func (siv SearchIndexView) DropOne( return err } - wc := siv.coll.writeConcern - if sess.TransactionRunning() { - wc = nil - } - if !writeconcern.AckWrite(wc) { - sess = nil - } - selector := makePinnedSelector(sess, siv.coll.writeSelector) op := operation.NewDropSearchIndex(name). - Session(sess).WriteConcern(wc).CommandMonitor(siv.coll.client.monitor). + Session(sess).CommandMonitor(siv.coll.client.monitor). ServerSelector(selector).ClusterClock(siv.coll.client.clock). - Database(siv.coll.db.name).Collection(siv.coll.name). + Collection(siv.coll.name).Database(siv.coll.db.name). Deployment(siv.coll.client.deployment).ServerAPI(siv.coll.client.serverAPI). Timeout(siv.coll.client.timeout) @@ -258,20 +245,12 @@ func (siv SearchIndexView) UpdateOne( return err } - wc := siv.coll.writeConcern - if sess.TransactionRunning() { - wc = nil - } - if !writeconcern.AckWrite(wc) { - sess = nil - } - selector := makePinnedSelector(sess, siv.coll.writeSelector) op := operation.NewUpdateSearchIndex(name, indexDefinition). - Session(sess).WriteConcern(wc).CommandMonitor(siv.coll.client.monitor). + Session(sess).CommandMonitor(siv.coll.client.monitor). ServerSelector(selector).ClusterClock(siv.coll.client.clock). - Database(siv.coll.db.name).Collection(siv.coll.name). + Collection(siv.coll.name).Database(siv.coll.db.name). Deployment(siv.coll.client.deployment).ServerAPI(siv.coll.client.serverAPI). Timeout(siv.coll.client.timeout) diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go b/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go index 8e288d10b..7a73d8d72 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/writeconcern/writeconcern.go @@ -42,7 +42,7 @@ var ErrNegativeW = errors.New("write concern `w` field cannot be a negative numb // Deprecated: ErrNegativeWTimeout will be removed in Go Driver 2.0. var ErrNegativeWTimeout = errors.New("write concern `wtimeout` field cannot be negative") -// A WriteConcern defines a MongoDB read concern, which describes the level of acknowledgment +// A WriteConcern defines a MongoDB write concern, which describes the level of acknowledgment // requested from MongoDB for write operations to a standalone mongod, to replica sets, or to // sharded clusters. // @@ -51,7 +51,7 @@ var ErrNegativeWTimeout = errors.New("write concern `wtimeout` field cannot be n type WriteConcern struct { // W requests acknowledgment that the write operation has propagated to a // specified number of mongod instances or to mongod instances with - // specified tags. It sets the the "w" option in a MongoDB write concern. + // specified tags. It sets the "w" option in a MongoDB write concern. // // W values must be a string or an int. // diff --git a/vendor/go.mongodb.org/mongo-driver/version/version.go b/vendor/go.mongodb.org/mongo-driver/version/version.go index 4bd1f9b5e..659d48d7a 100644 --- a/vendor/go.mongodb.org/mongo-driver/version/version.go +++ b/vendor/go.mongodb.org/mongo-driver/version/version.go @@ -8,4 +8,4 @@ package version // import "go.mongodb.org/mongo-driver/version" // Driver is the current version of the driver. -var Driver = "v1.13.0" +var Driver = "1.16.0" diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go index 88133293e..03925d7ad 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go @@ -8,6 +8,7 @@ package bsoncore // import "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" import ( "bytes" + "encoding/binary" "fmt" "math" "strconv" @@ -706,17 +707,16 @@ func ReserveLength(dst []byte) (int32, []byte) { // UpdateLength updates the length at index with length and returns the []byte. func UpdateLength(dst []byte, index, length int32) []byte { - dst[index] = byte(length) - dst[index+1] = byte(length >> 8) - dst[index+2] = byte(length >> 16) - dst[index+3] = byte(length >> 24) + binary.LittleEndian.PutUint32(dst[index:], uint32(length)) return dst } func appendLength(dst []byte, l int32) []byte { return appendi32(dst, l) } func appendi32(dst []byte, i32 int32) []byte { - return append(dst, byte(i32), byte(i32>>8), byte(i32>>16), byte(i32>>24)) + b := []byte{0, 0, 0, 0} + binary.LittleEndian.PutUint32(b, uint32(i32)) + return append(dst, b...) } // ReadLength reads an int32 length from src and returns the length and the remaining bytes. If @@ -734,27 +734,26 @@ func readi32(src []byte) (int32, []byte, bool) { if len(src) < 4 { return 0, src, false } - return (int32(src[0]) | int32(src[1])<<8 | int32(src[2])<<16 | int32(src[3])<<24), src[4:], true + return int32(binary.LittleEndian.Uint32(src)), src[4:], true } func appendi64(dst []byte, i64 int64) []byte { - return append(dst, - byte(i64), byte(i64>>8), byte(i64>>16), byte(i64>>24), - byte(i64>>32), byte(i64>>40), byte(i64>>48), byte(i64>>56), - ) + b := []byte{0, 0, 0, 0, 0, 0, 0, 0} + binary.LittleEndian.PutUint64(b, uint64(i64)) + return append(dst, b...) } func readi64(src []byte) (int64, []byte, bool) { if len(src) < 8 { return 0, src, false } - i64 := (int64(src[0]) | int64(src[1])<<8 | int64(src[2])<<16 | int64(src[3])<<24 | - int64(src[4])<<32 | int64(src[5])<<40 | int64(src[6])<<48 | int64(src[7])<<56) - return i64, src[8:], true + return int64(binary.LittleEndian.Uint64(src)), src[8:], true } func appendu32(dst []byte, u32 uint32) []byte { - return append(dst, byte(u32), byte(u32>>8), byte(u32>>16), byte(u32>>24)) + b := []byte{0, 0, 0, 0} + binary.LittleEndian.PutUint32(b, u32) + return append(dst, b...) } func readu32(src []byte) (uint32, []byte, bool) { @@ -762,23 +761,20 @@ func readu32(src []byte) (uint32, []byte, bool) { return 0, src, false } - return (uint32(src[0]) | uint32(src[1])<<8 | uint32(src[2])<<16 | uint32(src[3])<<24), src[4:], true + return binary.LittleEndian.Uint32(src), src[4:], true } func appendu64(dst []byte, u64 uint64) []byte { - return append(dst, - byte(u64), byte(u64>>8), byte(u64>>16), byte(u64>>24), - byte(u64>>32), byte(u64>>40), byte(u64>>48), byte(u64>>56), - ) + b := []byte{0, 0, 0, 0, 0, 0, 0, 0} + binary.LittleEndian.PutUint64(b, u64) + return append(dst, b...) } func readu64(src []byte) (uint64, []byte, bool) { if len(src) < 8 { return 0, src, false } - u64 := (uint64(src[0]) | uint64(src[1])<<8 | uint64(src[2])<<16 | uint64(src[3])<<24 | - uint64(src[4])<<32 | uint64(src[5])<<40 | uint64(src[6])<<48 | uint64(src[7])<<56) - return u64, src[8:], true + return binary.LittleEndian.Uint64(src), src[8:], true } // keep in sync with readcstringbytes diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go index 6837b53fc..f68e1da1a 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/doc.go @@ -4,10 +4,18 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Package bsoncore contains functions that can be used to encode and decode BSON -// elements and values to or from a slice of bytes. These functions are aimed at -// allowing low level manipulation of BSON and can be used to build a higher -// level BSON library. +// Package bsoncore is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +// +// Package bsoncore contains functions that can be used to encode and decode +// BSON elements and values to or from a slice of bytes. These functions are +// aimed at allowing low level manipulation of BSON and can be used to build a +// higher level BSON library. // // The Read* functions within this package return the values of the element and // a boolean indicating if the values are valid. A boolean was used instead of @@ -15,15 +23,12 @@ // enough bytes. This library attempts to do no validation, it will only return // false if there are not enough bytes for an item to be read. For example, the // ReadDocument function checks the length, if that length is larger than the -// number of bytes available, it will return false, if there are enough bytes, it -// will return those bytes and true. It is the consumers responsibility to +// number of bytes available, it will return false, if there are enough bytes, +// it will return those bytes and true. It is the consumers responsibility to // validate those bytes. // // The Append* functions within this package will append the type value to the // given dst slice. If the slice has enough capacity, it will not grow the // slice. The Append*Element functions within this package operate in the same // way, but additionally append the BSON type and the key before the value. -// -// Warning: Package bsoncore is unstable and there is no backward compatibility -// guarantee. It is experimental and subject to change. package bsoncore diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md deleted file mode 100644 index 3c3e6c56c..000000000 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/DESIGN.md +++ /dev/null @@ -1,27 +0,0 @@ -# Driver Library Design - -This document outlines the design for this package. - -## Deployment, Server, and Connection - -Acquiring a `Connection` from a `Server` selected from a `Deployment` enables sending and receiving -wire messages. A `Deployment` represents an set of MongoDB servers and a `Server` represents a -member of that set. These three types form the operation execution stack. - -### Compression - -Compression is handled by Connection type while uncompression is handled automatically by the -Operation type. This is done because the compressor to use for compressing a wire message is -chosen by the connection during handshake, while uncompression can be performed without this -information. This does make the design of compression non-symmetric, but it makes the design simpler -to implement and more consistent. - -## Operation - -The `Operation` type handles executing a series of commands using a `Deployment`. For most uses -`Operation` will only execute a single command, but the main use case for a series of commands is -batch split write commands, such as insert. The type itself is heavily documented, so reading the -code and comments together should provide an understanding of how the type works. - -This type is not meant to be used directly by callers. Instead a wrapping type should be defined -using the IDL. diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/doc.go new file mode 100644 index 000000000..99c4c3470 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/creds/doc.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package creds is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +package creds diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go index 9db65cf19..5f9f1f574 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/doc.go @@ -4,20 +4,11 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Package auth is not for public use. +// Package auth is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. // -// The API for packages in the 'private' directory have no stability -// guarantee. -// -// The packages within the 'private' directory would normally be put into an -// 'internal' directory to prohibit their use outside the 'mongo' directory. -// However, some MongoDB tools require very low-level access to the building -// blocks of a driver, so we have placed them under 'private' to allow these -// packages to be imported by projects that need them. -// -// These package APIs may be modified in backwards-incompatible ways at any -// time. -// -// You are strongly discouraged from directly using any packages -// under 'private'. +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package auth diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/sasl.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/sasl.go index a7ae3368f..2a84b53a6 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/sasl.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/sasl.go @@ -102,7 +102,7 @@ func (sc *saslConversation) Finish(ctx context.Context, cfg *Config, firstRespon var saslResp saslResponse err := bson.Unmarshal(firstResponse, &saslResp) if err != nil { - fullErr := fmt.Errorf("unmarshal error: %v", err) + fullErr := fmt.Errorf("unmarshal error: %w", err) return newError(fullErr, sc.mechanism) } @@ -146,7 +146,7 @@ func (sc *saslConversation) Finish(ctx context.Context, cfg *Config, firstRespon err = bson.Unmarshal(rdr, &saslResp) if err != nil { - fullErr := fmt.Errorf("unmarshal error: %v", err) + fullErr := fmt.Errorf("unmarshal error: %w", err) return newError(fullErr, sc.mechanism) } } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/scram.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/scram.go index f4f069699..c1238cd6a 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/scram.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/auth/scram.go @@ -14,7 +14,6 @@ package auth import ( "context" - "fmt" "github.com/xdg-go/scram" "github.com/xdg-go/stringprep" @@ -53,7 +52,7 @@ func newScramSHA1Authenticator(cred *Cred) (Authenticator, error) { func newScramSHA256Authenticator(cred *Cred) (Authenticator, error) { passprep, err := stringprep.SASLprep.Prepare(cred.Password) if err != nil { - return nil, newAuthError(fmt.Sprintf("error SASLprepping password '%s'", cred.Password), err) + return nil, newAuthError("error SASLprepping password", err) } client, err := scram.SHA256.NewClientUnprepped(cred.Username, passprep, "") if err != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batch_cursor.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batch_cursor.go index fefcfdb47..23b4a6539 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batch_cursor.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/batch_cursor.go @@ -79,7 +79,7 @@ type CursorResponse struct { func NewCursorResponse(info ResponseInfo) (CursorResponse, error) { response := info.ServerResponse cur, err := response.LookupErr("cursor") - if err == bsoncore.ErrElementNotFound { + if errors.Is(err, bsoncore.ErrElementNotFound) { return CursorResponse{}, ErrNoCursor } if err != nil { @@ -142,7 +142,7 @@ func NewCursorResponse(info ResponseInfo) (CursorResponse, error) { return CursorResponse{}, fmt.Errorf("expected Connection used to establish a cursor to implement PinnedConnection, but got %T", info.Connection) } if err := refConn.PinToCursor(); err != nil { - return CursorResponse{}, fmt.Errorf("error incrementing connection reference count when creating a cursor: %v", err) + return CursorResponse{}, fmt.Errorf("error incrementing connection reference count when creating a cursor: %w", err) } curresp.Connection = refConn } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go index d79b024b7..d9a6c68fe 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/compression.go @@ -30,7 +30,11 @@ type CompressionOpts struct { // destination writer. It panics on any errors and should only be used at // package initialization time. func mustZstdNewWriter(lvl zstd.EncoderLevel) *zstd.Encoder { - enc, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(lvl)) + enc, err := zstd.NewWriter( + nil, + zstd.WithWindowSize(8<<20), // Set window size to 8MB. + zstd.WithEncoderLevel(lvl), + ) if err != nil { panic(err) } @@ -105,6 +109,13 @@ func (e *zlibEncoder) Encode(dst, src []byte) ([]byte, error) { return dst, nil } +var zstdBufPool = sync.Pool{ + New: func() interface{} { + s := make([]byte, 0) + return &s + }, +} + // CompressPayload takes a byte slice and compresses it according to the options passed func CompressPayload(in []byte, opts CompressionOpts) ([]byte, error) { switch opts.Compressor { @@ -123,7 +134,13 @@ func CompressPayload(in []byte, opts CompressionOpts) ([]byte, error) { if err != nil { return nil, err } - return encoder.EncodeAll(in, nil), nil + ptr := zstdBufPool.Get().(*[]byte) + b := encoder.EncodeAll(in, *ptr) + dst := make([]byte, len(b)) + copy(dst, b) + *ptr = b[:0] + zstdBufPool.Put(ptr) + return dst, nil default: return nil, fmt.Errorf("unknown compressor ID %v", opts.Compressor) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go index cd4313647..686458e29 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/connstring/connstring.go @@ -4,6 +4,13 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package connstring is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package connstring // import "go.mongodb.org/mongo-driver/x/mongo/driver/connstring" import ( @@ -73,29 +80,28 @@ var random = randutil.NewLockedRand() // ParseAndValidate parses the provided URI into a ConnString object. // It check that all values are valid. -func ParseAndValidate(s string) (ConnString, error) { - p := parser{dnsResolver: dns.DefaultResolver} - err := p.parse(s) +func ParseAndValidate(s string) (*ConnString, error) { + connStr, err := Parse(s) if err != nil { - return p.ConnString, fmt.Errorf("error parsing uri: %w", err) + return nil, err } - err = p.ConnString.Validate() + err = connStr.Validate() if err != nil { - return p.ConnString, fmt.Errorf("error validating uri: %w", err) + return nil, fmt.Errorf("error validating uri: %w", err) } - return p.ConnString, nil + return connStr, nil } // Parse parses the provided URI into a ConnString object // but does not check that all values are valid. Use `ConnString.Validate()` // to run the validation checks separately. -func Parse(s string) (ConnString, error) { +func Parse(s string) (*ConnString, error) { p := parser{dnsResolver: dns.DefaultResolver} - err := p.parse(s) + connStr, err := p.parse(s) if err != nil { - err = fmt.Errorf("error parsing uri: %w", err) + return nil, fmt.Errorf("error parsing uri: %w", err) } - return p.ConnString, err + return connStr, err } // ConnString represents a connection string to mongodb. @@ -134,6 +140,7 @@ type ConnString struct { MaxConnectingSet bool Password string PasswordSet bool + RawHosts []string ReadConcernLevel string ReadPreference string ReadPreferenceTagSets []map[string]string @@ -202,242 +209,51 @@ func (u *ConnString) HasAuthParameters() bool { // Validate checks that the Auth and SSL parameters are valid values. func (u *ConnString) Validate() error { - p := parser{ - dnsResolver: dns.DefaultResolver, - ConnString: *u, - } - return p.validate() -} - -// ConnectMode informs the driver on how to connect -// to the server. -type ConnectMode uint8 - -var _ fmt.Stringer = ConnectMode(0) - -// ConnectMode constants. -const ( - AutoConnect ConnectMode = iota - SingleConnect -) - -// String implements the fmt.Stringer interface. -func (c ConnectMode) String() string { - switch c { - case AutoConnect: - return "automatic" - case SingleConnect: - return "direct" - default: - return "unknown" - } -} - -// Scheme constants -const ( - SchemeMongoDB = "mongodb" - SchemeMongoDBSRV = "mongodb+srv" -) - -type parser struct { - ConnString - - dnsResolver *dns.Resolver - tlsssl *bool // used to determine if tls and ssl options are both specified and set differently. -} - -func (p *parser) parse(original string) error { - p.Original = original - uri := original - var err error - if strings.HasPrefix(uri, SchemeMongoDBSRV+"://") { - p.Scheme = SchemeMongoDBSRV - // remove the scheme - uri = uri[len(SchemeMongoDBSRV)+3:] - } else if strings.HasPrefix(uri, SchemeMongoDB+"://") { - p.Scheme = SchemeMongoDB - // remove the scheme - uri = uri[len(SchemeMongoDB)+3:] - } else { - return errors.New(`scheme must be "mongodb" or "mongodb+srv"`) - } - - if idx := strings.Index(uri, "@"); idx != -1 { - userInfo := uri[:idx] - uri = uri[idx+1:] - - username := userInfo - var password string - - if idx := strings.Index(userInfo, ":"); idx != -1 { - username = userInfo[:idx] - password = userInfo[idx+1:] - p.PasswordSet = true - } - - // Validate and process the username. - if strings.Contains(username, "/") { - return fmt.Errorf("unescaped slash in username") - } - p.Username, err = url.PathUnescape(username) - if err != nil { - return fmt.Errorf("invalid username: %w", err) - } - p.UsernameSet = true - - // Validate and process the password. - if strings.Contains(password, ":") { - return fmt.Errorf("unescaped colon in password") - } - if strings.Contains(password, "/") { - return fmt.Errorf("unescaped slash in password") - } - p.Password, err = url.PathUnescape(password) - if err != nil { - return fmt.Errorf("invalid password: %w", err) - } - } - - // fetch the hosts field - hosts := uri - if idx := strings.IndexAny(uri, "/?@"); idx != -1 { - if uri[idx] == '@' { - return fmt.Errorf("unescaped @ sign in user info") - } - if uri[idx] == '?' { - return fmt.Errorf("must have a / before the query ?") - } - hosts = uri[:idx] - } - parsedHosts := strings.Split(hosts, ",") - uri = uri[len(hosts):] - extractedDatabase, err := extractDatabaseFromURI(uri) - if err != nil { + if err = u.validateAuth(); err != nil { return err } - uri = extractedDatabase.uri - p.Database = extractedDatabase.db - - // grab connection arguments from URI - connectionArgsFromQueryString, err := extractQueryArgsFromURI(uri) - if err != nil { - return err - } - - // grab connection arguments from TXT record and enable SSL if "mongodb+srv://" - var connectionArgsFromTXT []string - if p.Scheme == SchemeMongoDBSRV { - connectionArgsFromTXT, err = p.dnsResolver.GetConnectionArgsFromTXT(hosts) - if err != nil { - return err - } - - // SSL is enabled by default for SRV, but can be manually disabled with "ssl=false". - p.SSL = true - p.SSLSet = true - } - - // add connection arguments from URI and TXT records to connstring - connectionArgPairs := make([]string, 0, len(connectionArgsFromTXT)+len(connectionArgsFromQueryString)) - connectionArgPairs = append(connectionArgPairs, connectionArgsFromTXT...) - connectionArgPairs = append(connectionArgPairs, connectionArgsFromQueryString...) - - for _, pair := range connectionArgPairs { - err := p.addOption(pair) - if err != nil { - return err - } - } - - // do SRV lookup if "mongodb+srv://" - if p.Scheme == SchemeMongoDBSRV { - parsedHosts, err = p.dnsResolver.ParseHosts(hosts, p.SRVServiceName, true) - if err != nil { - return err - } - - // If p.SRVMaxHosts is non-zero and is less than the number of hosts, randomly - // select SRVMaxHosts hosts from parsedHosts. - if p.SRVMaxHosts > 0 && p.SRVMaxHosts < len(parsedHosts) { - random.Shuffle(len(parsedHosts), func(i, j int) { - parsedHosts[i], parsedHosts[j] = parsedHosts[j], parsedHosts[i] - }) - parsedHosts = parsedHosts[:p.SRVMaxHosts] - } - } - - for _, host := range parsedHosts { - err = p.addHost(host) - if err != nil { - return fmt.Errorf("invalid host %q: %w", host, err) - } - } - if len(p.Hosts) == 0 { - return fmt.Errorf("must have at least 1 host") - } - - err = p.setDefaultAuthParams(extractedDatabase.db) - if err != nil { - return err - } - - // If WTimeout was set from manual options passed in, set WTImeoutSet to true. - if p.WTimeoutSetFromOption { - p.WTimeoutSet = true - } - - return nil -} - -func (p *parser) validate() error { - var err error - - err = p.validateAuth() - if err != nil { - return err - } - - if err = p.validateSSL(); err != nil { + if err = u.validateSSL(); err != nil { return err } // Check for invalid write concern (i.e. w=0 and j=true) - if p.WNumberSet && p.WNumber == 0 && p.JSet && p.J { + if u.WNumberSet && u.WNumber == 0 && u.JSet && u.J { return writeconcern.ErrInconsistent } // Check for invalid use of direct connections. - if (p.ConnectSet && p.Connect == SingleConnect) || (p.DirectConnectionSet && p.DirectConnection) { - if len(p.Hosts) > 1 { + if (u.ConnectSet && u.Connect == SingleConnect) || + (u.DirectConnectionSet && u.DirectConnection) { + if len(u.Hosts) > 1 { return errors.New("a direct connection cannot be made if multiple hosts are specified") } - if p.Scheme == SchemeMongoDBSRV { + if u.Scheme == SchemeMongoDBSRV { return errors.New("a direct connection cannot be made if an SRV URI is used") } - if p.LoadBalancedSet && p.LoadBalanced { + if u.LoadBalancedSet && u.LoadBalanced { return ErrLoadBalancedWithDirectConnection } } // Validation for load-balanced mode. - if p.LoadBalancedSet && p.LoadBalanced { - if len(p.Hosts) > 1 { + if u.LoadBalancedSet && u.LoadBalanced { + if len(u.Hosts) > 1 { return ErrLoadBalancedWithMultipleHosts } - if p.ReplicaSet != "" { + if u.ReplicaSet != "" { return ErrLoadBalancedWithReplicaSet } } // Check for invalid use of SRVMaxHosts. - if p.SRVMaxHosts > 0 { - if p.ReplicaSet != "" { + if u.SRVMaxHosts > 0 { + if u.ReplicaSet != "" { return ErrSRVMaxHostsWithReplicaSet } - if p.LoadBalanced { + if u.LoadBalanced { return ErrSRVMaxHostsWithLoadBalanced } } @@ -445,34 +261,34 @@ func (p *parser) validate() error { return nil } -func (p *parser) setDefaultAuthParams(dbName string) error { +func (u *ConnString) setDefaultAuthParams(dbName string) error { // We do this check here rather than in validateAuth because this function is called as part of parsing and sets // the value of AuthSource if authentication is enabled. - if p.AuthSourceSet && p.AuthSource == "" { + if u.AuthSourceSet && u.AuthSource == "" { return errors.New("authSource must be non-empty when supplied in a URI") } - switch strings.ToLower(p.AuthMechanism) { + switch strings.ToLower(u.AuthMechanism) { case "plain": - if p.AuthSource == "" { - p.AuthSource = dbName - if p.AuthSource == "" { - p.AuthSource = "$external" + if u.AuthSource == "" { + u.AuthSource = dbName + if u.AuthSource == "" { + u.AuthSource = "$external" } } case "gssapi": - if p.AuthMechanismProperties == nil { - p.AuthMechanismProperties = map[string]string{ + if u.AuthMechanismProperties == nil { + u.AuthMechanismProperties = map[string]string{ "SERVICE_NAME": "mongodb", } - } else if v, ok := p.AuthMechanismProperties["SERVICE_NAME"]; !ok || v == "" { - p.AuthMechanismProperties["SERVICE_NAME"] = "mongodb" + } else if v, ok := u.AuthMechanismProperties["SERVICE_NAME"]; !ok || v == "" { + u.AuthMechanismProperties["SERVICE_NAME"] = "mongodb" } fallthrough case "mongodb-aws", "mongodb-x509": - if p.AuthSource == "" { - p.AuthSource = "$external" - } else if p.AuthSource != "$external" { + if u.AuthSource == "" { + u.AuthSource = "$external" + } else if u.AuthSource != "$external" { return fmt.Errorf("auth source must be $external") } case "mongodb-cr": @@ -480,18 +296,18 @@ func (p *parser) setDefaultAuthParams(dbName string) error { case "scram-sha-1": fallthrough case "scram-sha-256": - if p.AuthSource == "" { - p.AuthSource = dbName - if p.AuthSource == "" { - p.AuthSource = "admin" + if u.AuthSource == "" { + u.AuthSource = dbName + if u.AuthSource == "" { + u.AuthSource = "admin" } } case "": // Only set auth source if there is a request for authentication via non-empty credentials. - if p.AuthSource == "" && (p.AuthMechanismProperties != nil || p.Username != "" || p.PasswordSet) { - p.AuthSource = dbName - if p.AuthSource == "" { - p.AuthSource = "admin" + if u.AuthSource == "" && (u.AuthMechanismProperties != nil || u.Username != "" || u.PasswordSet) { + u.AuthSource = dbName + if u.AuthSource == "" { + u.AuthSource = "admin" } } default: @@ -500,83 +316,473 @@ func (p *parser) setDefaultAuthParams(dbName string) error { return nil } -func (p *parser) validateAuth() error { - switch strings.ToLower(p.AuthMechanism) { +func (u *ConnString) addOptions(connectionArgPairs []string) error { + var tlsssl *bool // used to determine if tls and ssl options are both specified and set differently. + for _, pair := range connectionArgPairs { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 || kv[0] == "" { + return fmt.Errorf("invalid option") + } + + key, err := url.QueryUnescape(kv[0]) + if err != nil { + return fmt.Errorf("invalid option key %q: %w", kv[0], err) + } + + value, err := url.QueryUnescape(kv[1]) + if err != nil { + return fmt.Errorf("invalid option value %q: %w", kv[1], err) + } + + lowerKey := strings.ToLower(key) + switch lowerKey { + case "appname": + u.AppName = value + case "authmechanism": + u.AuthMechanism = value + case "authmechanismproperties": + u.AuthMechanismProperties = make(map[string]string) + pairs := strings.Split(value, ",") + for _, pair := range pairs { + kv := strings.SplitN(pair, ":", 2) + if len(kv) != 2 || kv[0] == "" { + return fmt.Errorf("invalid authMechanism property") + } + u.AuthMechanismProperties[kv[0]] = kv[1] + } + u.AuthMechanismPropertiesSet = true + case "authsource": + u.AuthSource = value + u.AuthSourceSet = true + case "compressors": + compressors := strings.Split(value, ",") + if len(compressors) < 1 { + return fmt.Errorf("must have at least 1 compressor") + } + u.Compressors = compressors + case "connect": + switch strings.ToLower(value) { + case "automatic": + case "direct": + u.Connect = SingleConnect + default: + return fmt.Errorf("invalid 'connect' value: %q", value) + } + if u.DirectConnectionSet { + expectedValue := u.Connect == SingleConnect // directConnection should be true if connect=direct + if u.DirectConnection != expectedValue { + return fmt.Errorf("options connect=%q and directConnection=%v conflict", value, u.DirectConnection) + } + } + + u.ConnectSet = true + case "directconnection": + switch strings.ToLower(value) { + case "true": + u.DirectConnection = true + case "false": + default: + return fmt.Errorf("invalid 'directConnection' value: %q", value) + } + + if u.ConnectSet { + expectedValue := AutoConnect + if u.DirectConnection { + expectedValue = SingleConnect + } + + if u.Connect != expectedValue { + return fmt.Errorf("options connect=%q and directConnection=%q conflict", u.Connect, value) + } + } + u.DirectConnectionSet = true + case "connecttimeoutms": + n, err := strconv.Atoi(value) + if err != nil || n < 0 { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + u.ConnectTimeout = time.Duration(n) * time.Millisecond + u.ConnectTimeoutSet = true + case "heartbeatintervalms", "heartbeatfrequencyms": + n, err := strconv.Atoi(value) + if err != nil || n < 0 { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + u.HeartbeatInterval = time.Duration(n) * time.Millisecond + u.HeartbeatIntervalSet = true + case "journal": + switch value { + case "true": + u.J = true + case "false": + u.J = false + default: + return fmt.Errorf("invalid value for %q: %q", key, value) + } + + u.JSet = true + case "loadbalanced": + switch value { + case "true": + u.LoadBalanced = true + case "false": + u.LoadBalanced = false + default: + return fmt.Errorf("invalid value for %q: %q", key, value) + } + + u.LoadBalancedSet = true + case "localthresholdms": + n, err := strconv.Atoi(value) + if err != nil || n < 0 { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + u.LocalThreshold = time.Duration(n) * time.Millisecond + u.LocalThresholdSet = true + case "maxidletimems": + n, err := strconv.Atoi(value) + if err != nil || n < 0 { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + u.MaxConnIdleTime = time.Duration(n) * time.Millisecond + u.MaxConnIdleTimeSet = true + case "maxpoolsize": + n, err := strconv.Atoi(value) + if err != nil || n < 0 { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + u.MaxPoolSize = uint64(n) + u.MaxPoolSizeSet = true + case "minpoolsize": + n, err := strconv.Atoi(value) + if err != nil || n < 0 { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + u.MinPoolSize = uint64(n) + u.MinPoolSizeSet = true + case "maxconnecting": + n, err := strconv.Atoi(value) + if err != nil || n < 0 { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + u.MaxConnecting = uint64(n) + u.MaxConnectingSet = true + case "readconcernlevel": + u.ReadConcernLevel = value + case "readpreference": + u.ReadPreference = value + case "readpreferencetags": + if value == "" { + // If "readPreferenceTags=" is supplied, append an empty map to tag sets to + // represent a wild-card. + u.ReadPreferenceTagSets = append(u.ReadPreferenceTagSets, map[string]string{}) + break + } + + tags := make(map[string]string) + items := strings.Split(value, ",") + for _, item := range items { + parts := strings.Split(item, ":") + if len(parts) != 2 { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + tags[parts[0]] = parts[1] + } + u.ReadPreferenceTagSets = append(u.ReadPreferenceTagSets, tags) + case "maxstaleness", "maxstalenessseconds": + n, err := strconv.Atoi(value) + if err != nil || n < 0 { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + u.MaxStaleness = time.Duration(n) * time.Second + u.MaxStalenessSet = true + case "replicaset": + u.ReplicaSet = value + case "retrywrites": + switch value { + case "true": + u.RetryWrites = true + case "false": + u.RetryWrites = false + default: + return fmt.Errorf("invalid value for %q: %q", key, value) + } + + u.RetryWritesSet = true + case "retryreads": + switch value { + case "true": + u.RetryReads = true + case "false": + u.RetryReads = false + default: + return fmt.Errorf("invalid value for %q: %q", key, value) + } + + u.RetryReadsSet = true + case "servermonitoringmode": + if !IsValidServerMonitoringMode(value) { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + + u.ServerMonitoringMode = value + case "serverselectiontimeoutms": + n, err := strconv.Atoi(value) + if err != nil || n < 0 { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + u.ServerSelectionTimeout = time.Duration(n) * time.Millisecond + u.ServerSelectionTimeoutSet = true + case "sockettimeoutms": + n, err := strconv.Atoi(value) + if err != nil || n < 0 { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + u.SocketTimeout = time.Duration(n) * time.Millisecond + u.SocketTimeoutSet = true + case "srvmaxhosts": + // srvMaxHosts can only be set on URIs with the "mongodb+srv" scheme + if u.Scheme != SchemeMongoDBSRV { + return fmt.Errorf("cannot specify srvMaxHosts on non-SRV URI") + } + + n, err := strconv.Atoi(value) + if err != nil || n < 0 { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + u.SRVMaxHosts = n + case "srvservicename": + // srvServiceName can only be set on URIs with the "mongodb+srv" scheme + if u.Scheme != SchemeMongoDBSRV { + return fmt.Errorf("cannot specify srvServiceName on non-SRV URI") + } + + // srvServiceName must be between 1 and 62 characters according to + // our specification. Empty service names are not valid, and the service + // name (including prepended underscore) should not exceed the 63 character + // limit for DNS query subdomains. + if len(value) < 1 || len(value) > 62 { + return fmt.Errorf("srvServiceName value must be between 1 and 62 characters") + } + u.SRVServiceName = value + case "ssl", "tls": + switch value { + case "true": + u.SSL = true + case "false": + u.SSL = false + default: + return fmt.Errorf("invalid value for %q: %q", key, value) + } + if tlsssl == nil { + tlsssl = new(bool) + *tlsssl = u.SSL + } else if *tlsssl != u.SSL { + return errors.New("tls and ssl options, when both specified, must be equivalent") + } + + u.SSLSet = true + case "sslclientcertificatekeyfile", "tlscertificatekeyfile": + u.SSL = true + u.SSLSet = true + u.SSLClientCertificateKeyFile = value + u.SSLClientCertificateKeyFileSet = true + case "sslclientcertificatekeypassword", "tlscertificatekeyfilepassword": + u.SSLClientCertificateKeyPassword = func() string { return value } + u.SSLClientCertificateKeyPasswordSet = true + case "tlscertificatefile": + u.SSL = true + u.SSLSet = true + u.SSLCertificateFile = value + u.SSLCertificateFileSet = true + case "tlsprivatekeyfile": + u.SSL = true + u.SSLSet = true + u.SSLPrivateKeyFile = value + u.SSLPrivateKeyFileSet = true + case "sslinsecure", "tlsinsecure": + switch value { + case "true": + u.SSLInsecure = true + case "false": + u.SSLInsecure = false + default: + return fmt.Errorf("invalid value for %q: %q", key, value) + } + + u.SSLInsecureSet = true + case "sslcertificateauthorityfile", "tlscafile": + u.SSL = true + u.SSLSet = true + u.SSLCaFile = value + u.SSLCaFileSet = true + case "timeoutms": + n, err := strconv.Atoi(value) + if err != nil || n < 0 { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + u.Timeout = time.Duration(n) * time.Millisecond + u.TimeoutSet = true + case "tlsdisableocspendpointcheck": + u.SSL = true + u.SSLSet = true + + switch value { + case "true": + u.SSLDisableOCSPEndpointCheck = true + case "false": + u.SSLDisableOCSPEndpointCheck = false + default: + return fmt.Errorf("invalid value for %q: %q", key, value) + } + u.SSLDisableOCSPEndpointCheckSet = true + case "w": + if w, err := strconv.Atoi(value); err == nil { + if w < 0 { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + + u.WNumber = w + u.WNumberSet = true + u.WString = "" + break + } + + u.WString = value + u.WNumberSet = false + + case "wtimeoutms": + n, err := strconv.Atoi(value) + if err != nil || n < 0 { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + u.WTimeout = time.Duration(n) * time.Millisecond + u.WTimeoutSet = true + case "wtimeout": + // Defer to wtimeoutms, but not to a manually-set option. + if u.WTimeoutSet { + break + } + n, err := strconv.Atoi(value) + if err != nil || n < 0 { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + u.WTimeout = time.Duration(n) * time.Millisecond + case "zlibcompressionlevel": + level, err := strconv.Atoi(value) + if err != nil || (level < -1 || level > 9) { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + + if level == -1 { + level = wiremessage.DefaultZlibLevel + } + u.ZlibLevel = level + u.ZlibLevelSet = true + case "zstdcompressionlevel": + const maxZstdLevel = 22 // https://github.com/facebook/zstd/blob/a880ca239b447968493dd2fed3850e766d6305cc/contrib/linux-kernel/lib/zstd/compress.c#L3291 + level, err := strconv.Atoi(value) + if err != nil || (level < -1 || level > maxZstdLevel) { + return fmt.Errorf("invalid value for %q: %q", key, value) + } + + if level == -1 { + level = wiremessage.DefaultZstdLevel + } + u.ZstdLevel = level + u.ZstdLevelSet = true + default: + if u.UnknownOptions == nil { + u.UnknownOptions = make(map[string][]string) + } + u.UnknownOptions[lowerKey] = append(u.UnknownOptions[lowerKey], value) + } + + if u.Options == nil { + u.Options = make(map[string][]string) + } + u.Options[lowerKey] = append(u.Options[lowerKey], value) + } + return nil +} + +func (u *ConnString) validateAuth() error { + switch strings.ToLower(u.AuthMechanism) { case "mongodb-cr": - if p.Username == "" { + if u.Username == "" { return fmt.Errorf("username required for MONGO-CR") } - if p.Password == "" { + if u.Password == "" { return fmt.Errorf("password required for MONGO-CR") } - if p.AuthMechanismProperties != nil { + if u.AuthMechanismProperties != nil { return fmt.Errorf("MONGO-CR cannot have mechanism properties") } case "mongodb-x509": - if p.Password != "" { + if u.Password != "" { return fmt.Errorf("password cannot be specified for MONGO-X509") } - if p.AuthMechanismProperties != nil { + if u.AuthMechanismProperties != nil { return fmt.Errorf("MONGO-X509 cannot have mechanism properties") } case "mongodb-aws": - if p.Username != "" && p.Password == "" { + if u.Username != "" && u.Password == "" { return fmt.Errorf("username without password is invalid for MONGODB-AWS") } - if p.Username == "" && p.Password != "" { + if u.Username == "" && u.Password != "" { return fmt.Errorf("password without username is invalid for MONGODB-AWS") } var token bool - for k := range p.AuthMechanismProperties { + for k := range u.AuthMechanismProperties { if k != "AWS_SESSION_TOKEN" { return fmt.Errorf("invalid auth property for MONGODB-AWS") } token = true } - if token && p.Username == "" && p.Password == "" { + if token && u.Username == "" && u.Password == "" { return fmt.Errorf("token without username and password is invalid for MONGODB-AWS") } case "gssapi": - if p.Username == "" { + if u.Username == "" { return fmt.Errorf("username required for GSSAPI") } - for k := range p.AuthMechanismProperties { + for k := range u.AuthMechanismProperties { if k != "SERVICE_NAME" && k != "CANONICALIZE_HOST_NAME" && k != "SERVICE_REALM" && k != "SERVICE_HOST" { return fmt.Errorf("invalid auth property for GSSAPI") } } case "plain": - if p.Username == "" { + if u.Username == "" { return fmt.Errorf("username required for PLAIN") } - if p.Password == "" { + if u.Password == "" { return fmt.Errorf("password required for PLAIN") } - if p.AuthMechanismProperties != nil { + if u.AuthMechanismProperties != nil { return fmt.Errorf("PLAIN cannot have mechanism properties") } case "scram-sha-1": - if p.Username == "" { + if u.Username == "" { return fmt.Errorf("username required for SCRAM-SHA-1") } - if p.Password == "" { + if u.Password == "" { return fmt.Errorf("password required for SCRAM-SHA-1") } - if p.AuthMechanismProperties != nil { + if u.AuthMechanismProperties != nil { return fmt.Errorf("SCRAM-SHA-1 cannot have mechanism properties") } case "scram-sha-256": - if p.Username == "" { + if u.Username == "" { return fmt.Errorf("username required for SCRAM-SHA-256") } - if p.Password == "" { + if u.Password == "" { return fmt.Errorf("password required for SCRAM-SHA-256") } - if p.AuthMechanismProperties != nil { + if u.AuthMechanismProperties != nil { return fmt.Errorf("SCRAM-SHA-256 cannot have mechanism properties") } case "": - if p.UsernameSet && p.Username == "" { + if u.UsernameSet && u.Username == "" { return fmt.Errorf("username required if URI contains user info") } default: @@ -585,457 +791,261 @@ func (p *parser) validateAuth() error { return nil } -func (p *parser) validateSSL() error { - if !p.SSL { +func (u *ConnString) validateSSL() error { + if !u.SSL { return nil } - if p.SSLClientCertificateKeyFileSet { - if p.SSLCertificateFileSet || p.SSLPrivateKeyFileSet { + if u.SSLClientCertificateKeyFileSet { + if u.SSLCertificateFileSet || u.SSLPrivateKeyFileSet { return errors.New("the sslClientCertificateKeyFile/tlsCertificateKeyFile URI option cannot be provided " + "along with tlsCertificateFile or tlsPrivateKeyFile") } return nil } - if p.SSLCertificateFileSet && !p.SSLPrivateKeyFileSet { + if u.SSLCertificateFileSet && !u.SSLPrivateKeyFileSet { return errors.New("the tlsPrivateKeyFile URI option must be provided if the tlsCertificateFile option is specified") } - if p.SSLPrivateKeyFileSet && !p.SSLCertificateFileSet { + if u.SSLPrivateKeyFileSet && !u.SSLCertificateFileSet { return errors.New("the tlsCertificateFile URI option must be provided if the tlsPrivateKeyFile option is specified") } - if p.SSLInsecureSet && p.SSLDisableOCSPEndpointCheckSet { + if u.SSLInsecureSet && u.SSLDisableOCSPEndpointCheckSet { return errors.New("the sslInsecure/tlsInsecure URI option cannot be provided along with " + "tlsDisableOCSPEndpointCheck ") } return nil } -func (p *parser) addHost(host string) error { +func sanitizeHost(host string) (string, error) { if host == "" { - return nil + return host, nil } - host, err := url.QueryUnescape(host) + unescaped, err := url.QueryUnescape(host) if err != nil { - return fmt.Errorf("invalid host %q: %w", host, err) + return "", fmt.Errorf("invalid host %q: %w", host, err) } - _, port, err := net.SplitHostPort(host) + _, port, err := net.SplitHostPort(unescaped) // this is unfortunate that SplitHostPort actually requires // a port to exist. if err != nil { if addrError, ok := err.(*net.AddrError); !ok || addrError.Err != "missing port in address" { - return err + return "", err } } if port != "" { d, err := strconv.Atoi(port) if err != nil { - return fmt.Errorf("port must be an integer: %w", err) + return "", fmt.Errorf("port must be an integer: %w", err) } if d <= 0 || d >= 65536 { - return fmt.Errorf("port must be in the range [1, 65535]") + return "", fmt.Errorf("port must be in the range [1, 65535]") } } - p.Hosts = append(p.Hosts, host) - return nil + return unescaped, nil } -// IsValidServerMonitoringMode will return true if the given string matches a -// valid server monitoring mode. -func IsValidServerMonitoringMode(mode string) bool { - return mode == ServerMonitoringModeAuto || - mode == ServerMonitoringModeStream || - mode == ServerMonitoringModePoll -} +// ConnectMode informs the driver on how to connect +// to the server. +type ConnectMode uint8 -func (p *parser) addOption(pair string) error { - kv := strings.SplitN(pair, "=", 2) - if len(kv) != 2 || kv[0] == "" { - return fmt.Errorf("invalid option") - } +var _ fmt.Stringer = ConnectMode(0) - key, err := url.QueryUnescape(kv[0]) - if err != nil { - return fmt.Errorf("invalid option key %q: %w", kv[0], err) - } +// ConnectMode constants. +const ( + AutoConnect ConnectMode = iota + SingleConnect +) - value, err := url.QueryUnescape(kv[1]) - if err != nil { - return fmt.Errorf("invalid option value %q: %w", kv[1], err) +// String implements the fmt.Stringer interface. +func (c ConnectMode) String() string { + switch c { + case AutoConnect: + return "automatic" + case SingleConnect: + return "direct" + default: + return "unknown" } +} - lowerKey := strings.ToLower(key) - switch lowerKey { - case "appname": - p.AppName = value - case "authmechanism": - p.AuthMechanism = value - case "authmechanismproperties": - p.AuthMechanismProperties = make(map[string]string) - pairs := strings.Split(value, ",") - for _, pair := range pairs { - kv := strings.SplitN(pair, ":", 2) - if len(kv) != 2 || kv[0] == "" { - return fmt.Errorf("invalid authMechanism property") - } - p.AuthMechanismProperties[kv[0]] = kv[1] - } - p.AuthMechanismPropertiesSet = true - case "authsource": - p.AuthSource = value - p.AuthSourceSet = true - case "compressors": - compressors := strings.Split(value, ",") - if len(compressors) < 1 { - return fmt.Errorf("must have at least 1 compressor") - } - p.Compressors = compressors - case "connect": - switch strings.ToLower(value) { - case "automatic": - case "direct": - p.Connect = SingleConnect - default: - return fmt.Errorf("invalid 'connect' value: %q", value) - } - if p.DirectConnectionSet { - expectedValue := p.Connect == SingleConnect // directConnection should be true if connect=direct - if p.DirectConnection != expectedValue { - return fmt.Errorf("options connect=%q and directConnection=%v conflict", value, p.DirectConnection) - } - } +// Scheme constants +const ( + SchemeMongoDB = "mongodb" + SchemeMongoDBSRV = "mongodb+srv" +) - p.ConnectSet = true - case "directconnection": - switch strings.ToLower(value) { - case "true": - p.DirectConnection = true - case "false": - default: - return fmt.Errorf("invalid 'directConnection' value: %q", value) - } +type parser struct { + dnsResolver *dns.Resolver +} - if p.ConnectSet { - expectedValue := AutoConnect - if p.DirectConnection { - expectedValue = SingleConnect - } +func (p *parser) parse(original string) (*ConnString, error) { + connStr := &ConnString{} + connStr.Original = original + uri := original - if p.Connect != expectedValue { - return fmt.Errorf("options connect=%q and directConnection=%q conflict", p.Connect, value) - } - } - p.DirectConnectionSet = true - case "connecttimeoutms": - n, err := strconv.Atoi(value) - if err != nil || n < 0 { - return fmt.Errorf("invalid value for %q: %q", key, value) - } - p.ConnectTimeout = time.Duration(n) * time.Millisecond - p.ConnectTimeoutSet = true - case "heartbeatintervalms", "heartbeatfrequencyms": - n, err := strconv.Atoi(value) - if err != nil || n < 0 { - return fmt.Errorf("invalid value for %q: %q", key, value) - } - p.HeartbeatInterval = time.Duration(n) * time.Millisecond - p.HeartbeatIntervalSet = true - case "journal": - switch value { - case "true": - p.J = true - case "false": - p.J = false - default: - return fmt.Errorf("invalid value for %q: %q", key, value) - } + var err error + if strings.HasPrefix(uri, SchemeMongoDBSRV+"://") { + connStr.Scheme = SchemeMongoDBSRV + // remove the scheme + uri = uri[len(SchemeMongoDBSRV)+3:] + } else if strings.HasPrefix(uri, SchemeMongoDB+"://") { + connStr.Scheme = SchemeMongoDB + // remove the scheme + uri = uri[len(SchemeMongoDB)+3:] + } else { + return nil, errors.New(`scheme must be "mongodb" or "mongodb+srv"`) + } - p.JSet = true - case "loadbalanced": - switch value { - case "true": - p.LoadBalanced = true - case "false": - p.LoadBalanced = false - default: - return fmt.Errorf("invalid value for %q: %q", key, value) - } + if idx := strings.Index(uri, "@"); idx != -1 { + userInfo := uri[:idx] + uri = uri[idx+1:] - p.LoadBalancedSet = true - case "localthresholdms": - n, err := strconv.Atoi(value) - if err != nil || n < 0 { - return fmt.Errorf("invalid value for %q: %q", key, value) - } - p.LocalThreshold = time.Duration(n) * time.Millisecond - p.LocalThresholdSet = true - case "maxidletimems": - n, err := strconv.Atoi(value) - if err != nil || n < 0 { - return fmt.Errorf("invalid value for %q: %q", key, value) - } - p.MaxConnIdleTime = time.Duration(n) * time.Millisecond - p.MaxConnIdleTimeSet = true - case "maxpoolsize": - n, err := strconv.Atoi(value) - if err != nil || n < 0 { - return fmt.Errorf("invalid value for %q: %q", key, value) - } - p.MaxPoolSize = uint64(n) - p.MaxPoolSizeSet = true - case "minpoolsize": - n, err := strconv.Atoi(value) - if err != nil || n < 0 { - return fmt.Errorf("invalid value for %q: %q", key, value) - } - p.MinPoolSize = uint64(n) - p.MinPoolSizeSet = true - case "maxconnecting": - n, err := strconv.Atoi(value) - if err != nil || n < 0 { - return fmt.Errorf("invalid value for %q: %q", key, value) - } - p.MaxConnecting = uint64(n) - p.MaxConnectingSet = true - case "readconcernlevel": - p.ReadConcernLevel = value - case "readpreference": - p.ReadPreference = value - case "readpreferencetags": - if value == "" { - // If "readPreferenceTags=" is supplied, append an empty map to tag sets to - // represent a wild-card. - p.ReadPreferenceTagSets = append(p.ReadPreferenceTagSets, map[string]string{}) - break - } + username := userInfo + var password string - tags := make(map[string]string) - items := strings.Split(value, ",") - for _, item := range items { - parts := strings.Split(item, ":") - if len(parts) != 2 { - return fmt.Errorf("invalid value for %q: %q", key, value) - } - tags[parts[0]] = parts[1] - } - p.ReadPreferenceTagSets = append(p.ReadPreferenceTagSets, tags) - case "maxstaleness", "maxstalenessseconds": - n, err := strconv.Atoi(value) - if err != nil || n < 0 { - return fmt.Errorf("invalid value for %q: %q", key, value) - } - p.MaxStaleness = time.Duration(n) * time.Second - p.MaxStalenessSet = true - case "replicaset": - p.ReplicaSet = value - case "retrywrites": - switch value { - case "true": - p.RetryWrites = true - case "false": - p.RetryWrites = false - default: - return fmt.Errorf("invalid value for %q: %q", key, value) + if idx := strings.Index(userInfo, ":"); idx != -1 { + username = userInfo[:idx] + password = userInfo[idx+1:] + connStr.PasswordSet = true } - p.RetryWritesSet = true - case "retryreads": - switch value { - case "true": - p.RetryReads = true - case "false": - p.RetryReads = false - default: - return fmt.Errorf("invalid value for %q: %q", key, value) + // Validate and process the username. + if strings.Contains(username, "/") { + return nil, fmt.Errorf("unescaped slash in username") } - - p.RetryReadsSet = true - case "servermonitoringmode": - if !IsValidServerMonitoringMode(value) { - return fmt.Errorf("invalid value for %q: %q", key, value) + connStr.Username, err = url.PathUnescape(username) + if err != nil { + return nil, fmt.Errorf("invalid username: %w", err) } + connStr.UsernameSet = true - p.ServerMonitoringMode = value - case "serverselectiontimeoutms": - n, err := strconv.Atoi(value) - if err != nil || n < 0 { - return fmt.Errorf("invalid value for %q: %q", key, value) + // Validate and process the password. + if strings.Contains(password, ":") { + return nil, fmt.Errorf("unescaped colon in password") } - p.ServerSelectionTimeout = time.Duration(n) * time.Millisecond - p.ServerSelectionTimeoutSet = true - case "sockettimeoutms": - n, err := strconv.Atoi(value) - if err != nil || n < 0 { - return fmt.Errorf("invalid value for %q: %q", key, value) + if strings.Contains(password, "/") { + return nil, fmt.Errorf("unescaped slash in password") } - p.SocketTimeout = time.Duration(n) * time.Millisecond - p.SocketTimeoutSet = true - case "srvmaxhosts": - // srvMaxHosts can only be set on URIs with the "mongodb+srv" scheme - if p.Scheme != SchemeMongoDBSRV { - return fmt.Errorf("cannot specify srvMaxHosts on non-SRV URI") + connStr.Password, err = url.PathUnescape(password) + if err != nil { + return nil, fmt.Errorf("invalid password: %w", err) } + } - n, err := strconv.Atoi(value) - if err != nil || n < 0 { - return fmt.Errorf("invalid value for %q: %q", key, value) + // fetch the hosts field + hosts := uri + if idx := strings.IndexAny(uri, "/?@"); idx != -1 { + if uri[idx] == '@' { + return nil, fmt.Errorf("unescaped @ sign in user info") } - p.SRVMaxHosts = n - case "srvservicename": - // srvServiceName can only be set on URIs with the "mongodb+srv" scheme - if p.Scheme != SchemeMongoDBSRV { - return fmt.Errorf("cannot specify srvServiceName on non-SRV URI") + if uri[idx] == '?' { + return nil, fmt.Errorf("must have a / before the query ?") } + hosts = uri[:idx] + } - // srvServiceName must be between 1 and 62 characters according to - // our specification. Empty service names are not valid, and the service - // name (including prepended underscore) should not exceed the 63 character - // limit for DNS query subdomains. - if len(value) < 1 || len(value) > 62 { - return fmt.Errorf("srvServiceName value must be between 1 and 62 characters") - } - p.SRVServiceName = value - case "ssl", "tls": - switch value { - case "true": - p.SSL = true - case "false": - p.SSL = false - default: - return fmt.Errorf("invalid value for %q: %q", key, value) + for _, host := range strings.Split(hosts, ",") { + host, err = sanitizeHost(host) + if err != nil { + return nil, fmt.Errorf("invalid host %q: %w", host, err) } - if p.tlsssl != nil && *p.tlsssl != p.SSL { - return errors.New("tls and ssl options, when both specified, must be equivalent") + if host != "" { + connStr.RawHosts = append(connStr.RawHosts, host) } + } + connStr.Hosts = connStr.RawHosts + uri = uri[len(hosts):] + extractedDatabase, err := extractDatabaseFromURI(uri) + if err != nil { + return nil, err + } - p.tlsssl = new(bool) - *p.tlsssl = p.SSL - - p.SSLSet = true - case "sslclientcertificatekeyfile", "tlscertificatekeyfile": - p.SSL = true - p.SSLSet = true - p.SSLClientCertificateKeyFile = value - p.SSLClientCertificateKeyFileSet = true - case "sslclientcertificatekeypassword", "tlscertificatekeyfilepassword": - p.SSLClientCertificateKeyPassword = func() string { return value } - p.SSLClientCertificateKeyPasswordSet = true - case "tlscertificatefile": - p.SSL = true - p.SSLSet = true - p.SSLCertificateFile = value - p.SSLCertificateFileSet = true - case "tlsprivatekeyfile": - p.SSL = true - p.SSLSet = true - p.SSLPrivateKeyFile = value - p.SSLPrivateKeyFileSet = true - case "sslinsecure", "tlsinsecure": - switch value { - case "true": - p.SSLInsecure = true - case "false": - p.SSLInsecure = false - default: - return fmt.Errorf("invalid value for %q: %q", key, value) - } + uri = extractedDatabase.uri + connStr.Database = extractedDatabase.db - p.SSLInsecureSet = true - case "sslcertificateauthorityfile", "tlscafile": - p.SSL = true - p.SSLSet = true - p.SSLCaFile = value - p.SSLCaFileSet = true - case "timeoutms": - n, err := strconv.Atoi(value) - if err != nil || n < 0 { - return fmt.Errorf("invalid value for %q: %q", key, value) - } - p.Timeout = time.Duration(n) * time.Millisecond - p.TimeoutSet = true - case "tlsdisableocspendpointcheck": - p.SSL = true - p.SSLSet = true - - switch value { - case "true": - p.SSLDisableOCSPEndpointCheck = true - case "false": - p.SSLDisableOCSPEndpointCheck = false - default: - return fmt.Errorf("invalid value for %q: %q", key, value) - } - p.SSLDisableOCSPEndpointCheckSet = true - case "w": - if w, err := strconv.Atoi(value); err == nil { - if w < 0 { - return fmt.Errorf("invalid value for %q: %q", key, value) - } + // grab connection arguments from URI + connectionArgsFromQueryString, err := extractQueryArgsFromURI(uri) + if err != nil { + return nil, err + } - p.WNumber = w - p.WNumberSet = true - p.WString = "" - break + // grab connection arguments from TXT record and enable SSL if "mongodb+srv://" + var connectionArgsFromTXT []string + if connStr.Scheme == SchemeMongoDBSRV && p.dnsResolver != nil { + connectionArgsFromTXT, err = p.dnsResolver.GetConnectionArgsFromTXT(hosts) + if err != nil { + return nil, err } - p.WString = value - p.WNumberSet = false + // SSL is enabled by default for SRV, but can be manually disabled with "ssl=false". + connStr.SSL = true + connStr.SSLSet = true + } + + // add connection arguments from URI and TXT records to connstring + connectionArgPairs := make([]string, 0, len(connectionArgsFromTXT)+len(connectionArgsFromQueryString)) + connectionArgPairs = append(connectionArgPairs, connectionArgsFromTXT...) + connectionArgPairs = append(connectionArgPairs, connectionArgsFromQueryString...) - case "wtimeoutms": - n, err := strconv.Atoi(value) - if err != nil || n < 0 { - return fmt.Errorf("invalid value for %q: %q", key, value) - } - p.WTimeout = time.Duration(n) * time.Millisecond - p.WTimeoutSet = true - case "wtimeout": - // Defer to wtimeoutms, but not to a manually-set option. - if p.WTimeoutSet { - break - } - n, err := strconv.Atoi(value) - if err != nil || n < 0 { - return fmt.Errorf("invalid value for %q: %q", key, value) - } - p.WTimeout = time.Duration(n) * time.Millisecond - case "zlibcompressionlevel": - level, err := strconv.Atoi(value) - if err != nil || (level < -1 || level > 9) { - return fmt.Errorf("invalid value for %q: %q", key, value) - } + err = connStr.addOptions(connectionArgPairs) + if err != nil { + return nil, err + } - if level == -1 { - level = wiremessage.DefaultZlibLevel - } - p.ZlibLevel = level - p.ZlibLevelSet = true - case "zstdcompressionlevel": - const maxZstdLevel = 22 // https://github.com/facebook/zstd/blob/a880ca239b447968493dd2fed3850e766d6305cc/contrib/linux-kernel/lib/zstd/compress.c#L3291 - level, err := strconv.Atoi(value) - if err != nil || (level < -1 || level > maxZstdLevel) { - return fmt.Errorf("invalid value for %q: %q", key, value) + // do SRV lookup if "mongodb+srv://" + if connStr.Scheme == SchemeMongoDBSRV && p.dnsResolver != nil { + parsedHosts, err := p.dnsResolver.ParseHosts(hosts, connStr.SRVServiceName, true) + if err != nil { + return connStr, err } - if level == -1 { - level = wiremessage.DefaultZstdLevel + // If p.SRVMaxHosts is non-zero and is less than the number of hosts, randomly + // select SRVMaxHosts hosts from parsedHosts. + if connStr.SRVMaxHosts > 0 && connStr.SRVMaxHosts < len(parsedHosts) { + random.Shuffle(len(parsedHosts), func(i, j int) { + parsedHosts[i], parsedHosts[j] = parsedHosts[j], parsedHosts[i] + }) + parsedHosts = parsedHosts[:connStr.SRVMaxHosts] } - p.ZstdLevel = level - p.ZstdLevelSet = true - default: - if p.UnknownOptions == nil { - p.UnknownOptions = make(map[string][]string) + + var hosts []string + for _, host := range parsedHosts { + host, err = sanitizeHost(host) + if err != nil { + return connStr, fmt.Errorf("invalid host %q: %w", host, err) + } + if host != "" { + hosts = append(hosts, host) + } } - p.UnknownOptions[lowerKey] = append(p.UnknownOptions[lowerKey], value) + connStr.Hosts = hosts + } + if len(connStr.Hosts) == 0 { + return nil, fmt.Errorf("must have at least 1 host") } - if p.Options == nil { - p.Options = make(map[string][]string) + err = connStr.setDefaultAuthParams(extractedDatabase.db) + if err != nil { + return nil, err } - p.Options[lowerKey] = append(p.Options[lowerKey], value) - return nil + // If WTimeout was set from manual options passed in, set WTImeoutSet to true. + if connStr.WTimeoutSetFromOption { + connStr.WTimeoutSet = true + } + + return connStr, nil +} + +// IsValidServerMonitoringMode will return true if the given string matches a +// valid server monitoring mode. +func IsValidServerMonitoringMode(mode string) bool { + return mode == ServerMonitoringModeAuto || + mode == ServerMonitoringModeStream || + mode == ServerMonitoringModePoll } func extractQueryArgsFromURI(uri string) ([]string, error) { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/crypt.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/crypt.go index 4c254c03c..576c007d6 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/crypt.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/crypt.go @@ -9,6 +9,7 @@ package driver import ( "context" "crypto/tls" + "errors" "fmt" "io" "strings" @@ -399,7 +400,7 @@ func (c *crypt) decryptKey(kmsCtx *mongocrypt.KmsContext) error { res := make([]byte, bytesNeeded) bytesRead, err := conn.Read(res) - if err != nil && err != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { return err } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go index 848554d3a..9334d493e 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/dns/dns.go @@ -4,6 +4,13 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package dns is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package dns import ( @@ -104,8 +111,8 @@ func (r *Resolver) fetchSeedlistFromSRV(host string, srvName string, stopOnErr b } func validateSRVResult(recordFromSRV, inputHostName string) error { - separatedInputDomain := strings.Split(inputHostName, ".") - separatedRecord := strings.Split(recordFromSRV, ".") + separatedInputDomain := strings.Split(strings.ToLower(inputHostName), ".") + separatedRecord := strings.Split(strings.ToLower(recordFromSRV), ".") if len(separatedRecord) < 2 { return errors.New("DNS name must contain at least 2 labels") } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go index 5fd3ddcb4..900729bf8 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/driver.go @@ -4,6 +4,13 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package driver is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package driver // import "go.mongodb.org/mongo-driver/x/mongo/driver" import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/errors.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/errors.go index 3b8b9823b..177c2d450 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/errors.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/errors.go @@ -14,6 +14,7 @@ import ( "strings" "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/internal/csot" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" ) @@ -377,7 +378,7 @@ func (e Error) NamespaceNotFound() bool { // ExtractErrorFromServerResponse extracts an error from a server response bsoncore.Document // if there is one. Also used in testing for SDAM. -func ExtractErrorFromServerResponse(doc bsoncore.Document) error { +func ExtractErrorFromServerResponse(ctx context.Context, doc bsoncore.Document) error { var errmsg, codeName string var code int32 var labels []string @@ -514,7 +515,7 @@ func ExtractErrorFromServerResponse(doc bsoncore.Document) error { errmsg = "command failed" } - return Error{ + err := Error{ Code: code, Message: errmsg, Name: codeName, @@ -522,6 +523,20 @@ func ExtractErrorFromServerResponse(doc bsoncore.Document) error { TopologyVersion: tv, Raw: doc, } + + // If CSOT is enabled and we get a MaxTimeMSExpired error, assume that + // the error was caused by setting "maxTimeMS" on the command based on + // the context deadline or on "timeoutMS". In that case, make the error + // wrap context.DeadlineExceeded so that users can always check + // + // errors.Is(err, context.DeadlineExceeded) + // + // for either client-side or server-side timeouts. + if csot.IsTimeoutContext(ctx) && err.Code == 50 { + err.Wrapped = context.DeadlineExceeded + } + + return err } if len(wcError.WriteErrors) > 0 || wcError.WriteConcernError != nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go index 24f9f9b0e..80f500085 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/mongocrypt_not_enabled.go @@ -7,6 +7,13 @@ //go:build !cse // +build !cse +// Package mongocrypt is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package mongocrypt import ( diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/doc.go new file mode 100644 index 000000000..e0cc77052 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/mongocrypt/options/doc.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package options is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +package options diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/config.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/config.go index eac2aab7f..5b720cd59 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/config.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/config.go @@ -57,11 +57,11 @@ func newConfig(certChain []*x509.Certificate, opts *VerifyOptions) (config, erro var err error cfg.ocspRequestBytes, err = ocsp.CreateRequest(cfg.serverCert, cfg.issuer, nil) if err != nil { - return cfg, fmt.Errorf("error creating OCSP request: %v", err) + return cfg, fmt.Errorf("error creating OCSP request: %w", err) } cfg.ocspRequest, err = ocsp.ParseRequest(cfg.ocspRequestBytes) if err != nil { - return cfg, fmt.Errorf("error parsing OCSP request bytes: %v", err) + return cfg, fmt.Errorf("error parsing OCSP request bytes: %w", err) } return cfg, nil diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go index 849530fde..2bff94a65 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/ocsp/ocsp.go @@ -4,6 +4,13 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package ocsp is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package ocsp import ( @@ -161,10 +168,10 @@ func processStaple(cfg config, staple []byte) (*ResponseDetails, error) { // If the stapled response could not be parsed correctly, error. This can happen if the response is malformed, // the response does not cover the certificate presented by the server, or if the response contains an error // status. - return nil, fmt.Errorf("error parsing stapled response: %v", err) + return nil, fmt.Errorf("error parsing stapled response: %w", err) } if err = verifyResponse(cfg, parsedResponse); err != nil { - return nil, fmt.Errorf("error validating stapled response: %v", err) + return nil, fmt.Errorf("error validating stapled response: %w", err) } return extractResponseDetails(parsedResponse), nil @@ -192,7 +199,7 @@ func isMustStapleCertificate(cert *x509.Certificate) (bool, error) { // Use []*big.Int to ensure that all values in the sequence can be successfully unmarshalled. var featureValues []*big.Int if _, err := asn1.Unmarshal(featureExtension.Value, &featureValues); err != nil { - return false, fmt.Errorf("error unmarshalling TLS feature extension values: %v", err) + return false, fmt.Errorf("error unmarshalling TLS feature extension values: %w", err) } for _, value := range featureValues { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go index 905c9cfc5..db5367bed 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation.go @@ -310,6 +310,11 @@ type Operation struct { // OP_MSG as well as for logging server selection data. Name string + // OmitCSOTMaxTimeMS omits the automatically-calculated "maxTimeMS" from the + // command when CSOT is enabled. It does not effect "maxTimeMS" set by + // [Operation.MaxTime]. + OmitCSOTMaxTimeMS bool + // omitReadPreference is a boolean that indicates whether to omit the // read preference from the command. This omition includes the case // where a default read preference is used when the operation @@ -458,7 +463,7 @@ func (op Operation) getServerAndConnection( if err := pinnedConn.PinToTransaction(); err != nil { // Close the original connection to avoid a leak. _ = conn.Close() - return nil, nil, fmt.Errorf("error incrementing connection reference count when starting a transaction: %v", err) + return nil, nil, fmt.Errorf("error incrementing connection reference count when starting a transaction: %w", err) } op.Client.PinnedConnection = pinnedConn } @@ -499,9 +504,9 @@ func (op Operation) Execute(ctx context.Context) error { return err } - // If no deadline is set on the passed-in context, op.Timeout is set, and context is not already - // a Timeout context, honor op.Timeout in new Timeout context for operation execution. - if _, deadlineSet := ctx.Deadline(); !deadlineSet && op.Timeout != nil && !csot.IsTimeoutContext(ctx) { + // If op.Timeout is set, and context is not already a Timeout context, honor + // op.Timeout in new Timeout context for operation execution. + if op.Timeout != nil && !csot.IsTimeoutContext(ctx) { newCtx, cancelFunc := csot.MakeTimeoutContext(ctx, *op.Timeout) // Redefine ctx to be the new timeout-derived context. ctx = newCtx @@ -617,6 +622,13 @@ func (op Operation) Execute(ctx context.Context) error { } }() for { + // If we're starting a retry and the error from the previous try was + // a context canceled or deadline exceeded error, stop retrying and + // return that error. + if errors.Is(prevErr, context.Canceled) || errors.Is(prevErr, context.DeadlineExceeded) { + return prevErr + } + requestID := wiremessage.NextRequestID() // If the server or connection are nil, try to select a new server and get a new connection. @@ -683,8 +695,7 @@ func (op Operation) Execute(ctx context.Context) error { first = false } - // Calculate maxTimeMS value to potentially be appended to the wire message. - maxTimeMS, err := op.calculateMaxTimeMS(ctx, srvr.RTTMonitor().P90(), srvr.RTTMonitor().Stats()) + maxTimeMS, err := op.calculateMaxTimeMS(ctx, srvr.RTTMonitor()) if err != nil { return err } @@ -777,7 +788,7 @@ func (op Operation) Execute(ctx context.Context) error { } else if deadline, ok := ctx.Deadline(); ok { if csot.IsTimeoutContext(ctx) && time.Now().Add(srvr.RTTMonitor().P90()).After(deadline) { err = fmt.Errorf( - "remaining time %v until context deadline is less than 90th percentile RTT: %w\n%v", + "remaining time %v until context deadline is less than 90th percentile network round-trip time: %w\n%v", time.Until(deadline), ErrDeadlineWouldBeExceeded, srvr.RTTMonitor().Stats()) @@ -1089,7 +1100,7 @@ func (op Operation) readWireMessage(ctx context.Context, conn Connection) (resul } // decode - res, err := op.decodeResult(opcode, rem) + res, err := op.decodeResult(ctx, opcode, rem) // Update cluster/operation time and recovery tokens before handling the error to ensure we're properly updating // everything. op.updateClusterTimes(res) @@ -1492,7 +1503,7 @@ func (op Operation) addWriteConcern(dst []byte, desc description.SelectedServer) } t, data, err := wc.MarshalBSONValue() - if err == writeconcern.ErrEmptyWriteConcern { + if errors.Is(err, writeconcern.ErrEmptyWriteConcern) { return dst, nil } if err != nil { @@ -1562,10 +1573,21 @@ func (op Operation) addClusterTime(dst []byte, desc description.SelectedServer) // if the ctx is a Timeout context. If the context is not a Timeout context, it uses the // operation's MaxTimeMS if set. If no MaxTimeMS is set on the operation, and context is // not a Timeout context, calculateMaxTimeMS returns 0. -func (op Operation) calculateMaxTimeMS(ctx context.Context, rtt90 time.Duration, rttStats string) (uint64, error) { - if csot.IsTimeoutContext(ctx) { +func (op Operation) calculateMaxTimeMS(ctx context.Context, mon RTTMonitor) (uint64, error) { + // If CSOT is enabled and we're not omitting the CSOT-calculated maxTimeMS + // value, then calculate maxTimeMS. + // + // This allows commands that do not currently send CSOT-calculated maxTimeMS + // (e.g. Find and Aggregate) to still use a manually-provided maxTimeMS + // value. + // + // TODO(GODRIVER-2944): Remove or refactor this logic when we add the + // "timeoutMode" option, which will allow users to opt-in to the + // CSOT-calculated maxTimeMS values if that's the behavior they want. + if csot.IsTimeoutContext(ctx) && !op.OmitCSOTMaxTimeMS { if deadline, ok := ctx.Deadline(); ok { remainingTimeout := time.Until(deadline) + rtt90 := mon.P90() maxTime := remainingTimeout - rtt90 // Always round up to the next millisecond value so we never truncate the calculated @@ -1573,11 +1595,21 @@ func (op Operation) calculateMaxTimeMS(ctx context.Context, rtt90 time.Duration, maxTimeMS := int64((maxTime + (time.Millisecond - 1)) / time.Millisecond) if maxTimeMS <= 0 { return 0, fmt.Errorf( - "remaining time %v until context deadline is less than or equal to 90th percentile RTT: %w\n%v", + "negative maxTimeMS: remaining time %v until context deadline is less than 90th percentile network round-trip time (%v): %w", remainingTimeout, - ErrDeadlineWouldBeExceeded, - rttStats) + mon.Stats(), + ErrDeadlineWouldBeExceeded) } + + // The server will return a "BadValue" error if maxTimeMS is greater + // than the maximum positive int32 value (about 24.9 days). If the + // user specified a timeout value greater than that, omit maxTimeMS + // and let the client-side timeout handle cancelling the op if the + // timeout is ever reached. + if maxTimeMS > math.MaxInt32 { + return 0, nil + } + return uint64(maxTimeMS), nil } } else if op.MaxTime != nil { @@ -1748,7 +1780,7 @@ func (op Operation) createReadPref(desc description.SelectedServer, isOpQuery bo doc = bsoncore.AppendBooleanElement(doc, "enabled", *hedgeEnabled) doc, err = bsoncore.AppendDocumentEnd(doc, hedgeIdx) if err != nil { - return nil, fmt.Errorf("error creating hedge document: %v", err) + return nil, fmt.Errorf("error creating hedge document: %w", err) } } @@ -1827,7 +1859,7 @@ func (Operation) decodeOpReply(wm []byte) opReply { return reply } -func (op Operation) decodeResult(opcode wiremessage.OpCode, wm []byte) (bsoncore.Document, error) { +func (op Operation) decodeResult(ctx context.Context, opcode wiremessage.OpCode, wm []byte) (bsoncore.Document, error) { switch opcode { case wiremessage.OpReply: reply := op.decodeOpReply(wm) @@ -1845,7 +1877,7 @@ func (op Operation) decodeResult(opcode wiremessage.OpCode, wm []byte) (bsoncore return nil, NewCommandResponseError("malformed OP_REPLY: invalid document", err) } - return rdr, ExtractErrorFromServerResponse(rdr) + return rdr, ExtractErrorFromServerResponse(ctx, rdr) case wiremessage.OpMsg: _, wm, ok := wiremessage.ReadMsgFlags(wm) if !ok { @@ -1867,7 +1899,6 @@ func (op Operation) decodeResult(opcode wiremessage.OpCode, wm []byte) (bsoncore return nil, errors.New("malformed wire message: insufficient bytes to read single document") } case wiremessage.DocumentSequence: - // TODO(GODRIVER-617): Implement document sequence returns. _, _, wm, ok = wiremessage.ReadMsgSectionDocumentSequence(wm) if !ok { return nil, errors.New("malformed wire message: insufficient bytes to read document sequence") @@ -1882,7 +1913,7 @@ func (op Operation) decodeResult(opcode wiremessage.OpCode, wm []byte) (bsoncore return nil, NewCommandResponseError("malformed OP_MSG: invalid document", err) } - return res, ExtractErrorFromServerResponse(res) + return res, ExtractErrorFromServerResponse(ctx, res) default: return nil, fmt.Errorf("cannot decode result from %s", opcode) } @@ -1963,7 +1994,7 @@ func (op Operation) publishStartedEvent(ctx context.Context, info startedInforma } } -// canPublishSucceededEvent returns true if a CommandSucceededEvent can be +// canPublishFinishedEvent returns true if a CommandSucceededEvent can be // published for the given command. This is true if the command is not an // unacknowledged write and the command monitor is monitoring succeeded events. func (op Operation) canPublishFinishedEvent(info finishedInformation) bool { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.go index ca0e79652..44467df8f 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/aggregate.go @@ -50,6 +50,7 @@ type Aggregate struct { hasOutputStage bool customOptions map[string]bsoncore.Value timeout *time.Duration + omitCSOTMaxTimeMS bool result driver.CursorResponse } @@ -113,6 +114,7 @@ func (a *Aggregate) Execute(ctx context.Context) error { MaxTime: a.maxTime, Timeout: a.timeout, Name: driverutil.AggregateOp, + OmitCSOTMaxTimeMS: a.omitCSOTMaxTimeMS, }.Execute(ctx) } @@ -419,3 +421,15 @@ func (a *Aggregate) Timeout(timeout *time.Duration) *Aggregate { a.timeout = timeout return a } + +// OmitCSOTMaxTimeMS omits the automatically-calculated "maxTimeMS" from the +// command when CSOT is enabled. It does not effect "maxTimeMS" set by +// [Aggregate.MaxTime]. +func (a *Aggregate) OmitCSOTMaxTimeMS(omit bool) *Aggregate { + if a == nil { + a = new(Aggregate) + } + + a.omitCSOTMaxTimeMS = omit + return a +} diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go index 5aad3f72e..35283794a 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go @@ -14,7 +14,6 @@ import ( "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/internal/logger" "go.mongodb.org/mongo-driver/mongo/description" - "go.mongodb.org/mongo-driver/mongo/readconcern" "go.mongodb.org/mongo-driver/mongo/readpref" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" @@ -24,7 +23,6 @@ import ( // Command is used to run a generic operation. type Command struct { command bsoncore.Document - readConcern *readconcern.ReadConcern database string deployment driver.Deployment selector description.ServerSelector @@ -79,7 +77,6 @@ func (c *Command) Execute(ctx context.Context) error { return errors.New("the Command operation must have a Deployment set before Execute can be called") } - // TODO(GODRIVER-2649): Actually pass readConcern to underlying driver.Operation. return driver.Operation{ CommandFn: func(dst []byte, desc description.SelectedServer) ([]byte, error) { return append(dst, c.command[4:len(c.command)-1]...), nil @@ -163,16 +160,6 @@ func (c *Command) Deployment(deployment driver.Deployment) *Command { return c } -// ReadConcern specifies the read concern for this operation. -func (c *Command) ReadConcern(readConcern *readconcern.ReadConcern) *Command { - if c == nil { - c = new(Command) - } - - c.readConcern = readConcern - return c -} - // ReadPreference set the read preference used with this operation. func (c *Command) ReadPreference(readPreference *readpref.ReadPref) *Command { if c == nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go index a16f9d716..cb0d80795 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/create_search_indexes.go @@ -15,7 +15,6 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/mongo/description" - "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -23,19 +22,18 @@ import ( // CreateSearchIndexes performs a createSearchIndexes operation. type CreateSearchIndexes struct { - indexes bsoncore.Document - session *session.Client - clock *session.ClusterClock - collection string - monitor *event.CommandMonitor - crypt driver.Crypt - database string - deployment driver.Deployment - selector description.ServerSelector - writeConcern *writeconcern.WriteConcern - result CreateSearchIndexesResult - serverAPI *driver.ServerAPIOptions - timeout *time.Duration + indexes bsoncore.Document + session *session.Client + clock *session.ClusterClock + collection string + monitor *event.CommandMonitor + crypt driver.Crypt + database string + deployment driver.Deployment + selector description.ServerSelector + result CreateSearchIndexesResult + serverAPI *driver.ServerAPIOptions + timeout *time.Duration } // CreateSearchIndexResult represents a single search index result in CreateSearchIndexesResult. @@ -109,9 +107,15 @@ func (csi *CreateSearchIndexes) Execute(ctx context.Context) error { return driver.Operation{ CommandFn: csi.command, ProcessResponseFn: csi.processResponse, + Client: csi.session, + Clock: csi.clock, CommandMonitor: csi.monitor, + Crypt: csi.crypt, Database: csi.database, Deployment: csi.deployment, + Selector: csi.selector, + ServerAPI: csi.serverAPI, + Timeout: csi.timeout, }.Execute(ctx) } @@ -214,16 +218,6 @@ func (csi *CreateSearchIndexes) ServerSelector(selector description.ServerSelect return csi } -// WriteConcern sets the write concern for this operation. -func (csi *CreateSearchIndexes) WriteConcern(writeConcern *writeconcern.WriteConcern) *CreateSearchIndexes { - if csi == nil { - csi = new(CreateSearchIndexes) - } - - csi.writeConcern = writeConcern - return csi -} - // ServerAPI sets the server API version for this operation. func (csi *CreateSearchIndexes) ServerAPI(serverAPI *driver.ServerAPIOptions) *CreateSearchIndexes { if csi == nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/doc.go new file mode 100644 index 000000000..e55b12a74 --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/doc.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package operation is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +package operation diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go index 25cde8154..3992c8316 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/drop_search_index.go @@ -14,7 +14,6 @@ import ( "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/mongo/description" - "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -22,19 +21,18 @@ import ( // DropSearchIndex performs an dropSearchIndex operation. type DropSearchIndex struct { - index string - session *session.Client - clock *session.ClusterClock - collection string - monitor *event.CommandMonitor - crypt driver.Crypt - database string - deployment driver.Deployment - selector description.ServerSelector - writeConcern *writeconcern.WriteConcern - result DropSearchIndexResult - serverAPI *driver.ServerAPIOptions - timeout *time.Duration + index string + session *session.Client + clock *session.ClusterClock + collection string + monitor *event.CommandMonitor + crypt driver.Crypt + database string + deployment driver.Deployment + selector description.ServerSelector + result DropSearchIndexResult + serverAPI *driver.ServerAPIOptions + timeout *time.Duration } // DropSearchIndexResult represents a dropSearchIndex result returned by the server. @@ -93,7 +91,6 @@ func (dsi *DropSearchIndex) Execute(ctx context.Context) error { Database: dsi.database, Deployment: dsi.deployment, Selector: dsi.selector, - WriteConcern: dsi.writeConcern, ServerAPI: dsi.serverAPI, Timeout: dsi.timeout, }.Execute(ctx) @@ -196,16 +193,6 @@ func (dsi *DropSearchIndex) ServerSelector(selector description.ServerSelector) return dsi } -// WriteConcern sets the write concern for this operation. -func (dsi *DropSearchIndex) WriteConcern(writeConcern *writeconcern.WriteConcern) *DropSearchIndex { - if dsi == nil { - dsi = new(DropSearchIndex) - } - - dsi.writeConcern = writeConcern - return dsi -} - // ServerAPI sets the server API version for this operation. func (dsi *DropSearchIndex) ServerAPI(serverAPI *driver.ServerAPIOptions) *DropSearchIndex { if dsi == nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.go index 27bb5b4f9..8950fde86 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/find.go @@ -62,6 +62,7 @@ type Find struct { result driver.CursorResponse serverAPI *driver.ServerAPIOptions timeout *time.Duration + omitCSOTMaxTimeMS bool logger *logger.Logger } @@ -110,6 +111,7 @@ func (f *Find) Execute(ctx context.Context) error { Timeout: f.timeout, Logger: f.logger, Name: driverutil.FindOp, + OmitCSOTMaxTimeMS: f.omitCSOTMaxTimeMS, }.Execute(ctx) } @@ -552,6 +554,18 @@ func (f *Find) Timeout(timeout *time.Duration) *Find { return f } +// OmitCSOTMaxTimeMS omits the automatically-calculated "maxTimeMS" from the +// command when CSOT is enabled. It does not effect "maxTimeMS" set by +// [Find.MaxTime]. +func (f *Find) OmitCSOTMaxTimeMS(omit bool) *Find { + if f == nil { + f = new(Find) + } + + f.omitCSOTMaxTimeMS = omit + return f +} + // Logger sets the logger for this operation. func (f *Find) Logger(logger *logger.Logger) *Find { if f == nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go index 6e750fd03..16f2ebf6c 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go @@ -530,7 +530,7 @@ func (h *Hello) handshakeCommand(dst []byte, desc description.SelectedServer) ([ func (h *Hello) command(dst []byte, desc description.SelectedServer) ([]byte, error) { // Use "hello" if topology is LoadBalanced, API version is declared or server // has responded with "helloOk". Otherwise, use legacy hello. - if desc.Kind == description.LoadBalanced || h.serverAPI != nil || desc.Server.HelloOK { + if h.loadBalanced || h.serverAPI != nil || desc.Server.HelloOK { dst = bsoncore.AppendInt32Element(dst, "hello", 1) } else { dst = bsoncore.AppendInt32Element(dst, handshake.LegacyHello, 1) @@ -575,8 +575,8 @@ func (h *Hello) StreamResponse(ctx context.Context, conn driver.StreamerConnecti // loadBalanced is False. If this is the case, then the drivers MUST use legacy // hello for the first message of the initial handshake with the OP_QUERY // protocol -func isLegacyHandshake(srvAPI *driver.ServerAPIOptions, deployment driver.Deployment) bool { - return srvAPI == nil && deployment.Kind() != description.LoadBalanced +func isLegacyHandshake(srvAPI *driver.ServerAPIOptions, loadbalanced bool) bool { + return srvAPI == nil && !loadbalanced } func (h *Hello) createOperation() driver.Operation { @@ -592,7 +592,7 @@ func (h *Hello) createOperation() driver.Operation { ServerAPI: h.serverAPI, } - if isLegacyHandshake(h.serverAPI, h.d) { + if isLegacyHandshake(h.serverAPI, h.loadBalanced) { op.Legacy = driver.LegacyHandshake } @@ -616,7 +616,7 @@ func (h *Hello) GetHandshakeInformation(ctx context.Context, _ address.Address, ServerAPI: h.serverAPI, } - if isLegacyHandshake(h.serverAPI, deployment) { + if isLegacyHandshake(h.serverAPI, h.loadBalanced) { op.Legacy = driver.LegacyHandshake } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go index ba807986c..64f2da7f6 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/update_search_index.go @@ -14,7 +14,6 @@ import ( "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/mongo/description" - "go.mongodb.org/mongo-driver/mongo/writeconcern" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" "go.mongodb.org/mongo-driver/x/mongo/driver/session" @@ -22,20 +21,19 @@ import ( // UpdateSearchIndex performs a updateSearchIndex operation. type UpdateSearchIndex struct { - index string - definition bsoncore.Document - session *session.Client - clock *session.ClusterClock - collection string - monitor *event.CommandMonitor - crypt driver.Crypt - database string - deployment driver.Deployment - selector description.ServerSelector - writeConcern *writeconcern.WriteConcern - result UpdateSearchIndexResult - serverAPI *driver.ServerAPIOptions - timeout *time.Duration + index string + definition bsoncore.Document + session *session.Client + clock *session.ClusterClock + collection string + monitor *event.CommandMonitor + crypt driver.Crypt + database string + deployment driver.Deployment + selector description.ServerSelector + result UpdateSearchIndexResult + serverAPI *driver.ServerAPIOptions + timeout *time.Duration } // UpdateSearchIndexResult represents a single index in the updateSearchIndexResult result. @@ -95,7 +93,6 @@ func (usi *UpdateSearchIndex) Execute(ctx context.Context) error { Database: usi.database, Deployment: usi.deployment, Selector: usi.selector, - WriteConcern: usi.writeConcern, ServerAPI: usi.serverAPI, Timeout: usi.timeout, }.Execute(ctx) @@ -209,16 +206,6 @@ func (usi *UpdateSearchIndex) ServerSelector(selector description.ServerSelector return usi } -// WriteConcern sets the write concern for this operation. -func (usi *UpdateSearchIndex) WriteConcern(writeConcern *writeconcern.WriteConcern) *UpdateSearchIndex { - if usi == nil { - usi = new(UpdateSearchIndex) - } - - usi.writeConcern = writeConcern - return usi -} - // ServerAPI sets the server API version for this operation. func (usi *UpdateSearchIndex) ServerAPI(serverAPI *driver.ServerAPIOptions) *UpdateSearchIndex { if usi == nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/doc.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/doc.go new file mode 100644 index 000000000..80b2ac2dd --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/session/doc.go @@ -0,0 +1,14 @@ +// Copyright (C) MongoDB, Inc. 2024-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package session is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +package session diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go index af25b1f68..649e87b3d 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go @@ -18,6 +18,7 @@ import ( "sync/atomic" "time" + "go.mongodb.org/mongo-driver/internal/csot" "go.mongodb.org/mongo-driver/mongo/address" "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" @@ -77,6 +78,10 @@ type connection struct { // TODO(GODRIVER-2824): change driverConnectionID type to int64. driverConnectionID uint64 generation uint64 + + // awaitingResponse indicates that the server response was not completely + // read before returning the connection to the pool. + awaitingResponse bool } // newConnection handles the creation of a connection. It does not connect the connection. @@ -314,8 +319,8 @@ func transformNetworkError(ctx context.Context, originalError error, contextDead } // If there was an error and the context was cancelled, we assume it happened due to the cancellation. - if ctx.Err() == context.Canceled { - return context.Canceled + if errors.Is(ctx.Err(), context.Canceled) { + return ctx.Err() } // If there was a timeout error and the context deadline was used, we convert the error into @@ -324,7 +329,7 @@ func transformNetworkError(ctx context.Context, originalError error, contextDead return originalError } if netErr, ok := originalError.(net.Error); ok && netErr.Timeout() { - return context.DeadlineExceeded + return fmt.Errorf("%w: %s", context.DeadlineExceeded, originalError.Error()) } return originalError @@ -337,7 +342,10 @@ func (c *connection) cancellationListenerCallback() { func (c *connection) writeWireMessage(ctx context.Context, wm []byte) error { var err error if atomic.LoadInt64(&c.state) != connConnected { - return ConnectionError{ConnectionID: c.id, message: "connection is closed"} + return ConnectionError{ + ConnectionID: c.id, + message: "connection is closed", + } } var deadline time.Time @@ -388,7 +396,10 @@ func (c *connection) write(ctx context.Context, wm []byte) (err error) { // readWireMessage reads a wiremessage from the connection. The dst parameter will be overwritten. func (c *connection) readWireMessage(ctx context.Context) ([]byte, error) { if atomic.LoadInt64(&c.state) != connConnected { - return nil, ConnectionError{ConnectionID: c.id, message: "connection is closed"} + return nil, ConnectionError{ + ConnectionID: c.id, + message: "connection is closed", + } } var deadline time.Time @@ -408,10 +419,19 @@ func (c *connection) readWireMessage(ctx context.Context) ([]byte, error) { dst, errMsg, err := c.read(ctx) if err != nil { - // We closeConnection the connection because we don't know if there are other bytes left to read. - c.close() + if nerr := net.Error(nil); errors.As(err, &nerr) && nerr.Timeout() && csot.IsTimeoutContext(ctx) { + // If the error was a timeout error and CSOT is enabled, instead of + // closing the connection mark it as awaiting response so the pool + // can read the response before making it available to other + // operations. + c.awaitingResponse = true + } else { + // Otherwise, use the pre-CSOT behavior and close the connection + // because we don't know if there are other bytes left to read. + c.close() + } message := errMsg - if err == io.EOF { + if errors.Is(err, io.EOF) { message = "socket was unexpectedly closed" } return nil, ConnectionError{ @@ -858,7 +878,7 @@ func newCancellListener() *cancellListener { // Listen blocks until the provided context is cancelled or listening is aborted // via the StopListening function. If this detects that the context has been -// cancelled (i.e. ctx.Err() == context.Canceled), the provided callback is +// cancelled (i.e. errors.Is(ctx.Err(), context.Canceled), the provided callback is // called to abort in-progress work. Even if the context expires, this function // will block until StopListening is called. func (c *cancellListener) Listen(ctx context.Context, abortFn func()) { @@ -866,7 +886,7 @@ func (c *cancellListener) Listen(ctx context.Context, abortFn func()) { select { case <-ctx.Done(): - if ctx.Err() == context.Canceled { + if errors.Is(ctx.Err(), context.Canceled) { c.aborted = true abortFn() } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/errors.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/errors.go index 7ce41864e..a6630aae7 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/errors.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/errors.go @@ -8,6 +8,7 @@ package topology import ( "context" + "errors" "fmt" "time" @@ -86,9 +87,9 @@ type pinnedConnections struct { // Error implements the error interface. func (w WaitQueueTimeoutError) Error() string { errorMsg := "timed out while checking out a connection from connection pool" - switch w.Wrapped { - case nil: - case context.Canceled: + switch { + case w.Wrapped == nil: + case errors.Is(w.Wrapped, context.Canceled): errorMsg = fmt.Sprintf( "%s: %s", "canceled while checking out a connection from connection pool", diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool.go index 6e150344d..52461eb68 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool.go @@ -375,6 +375,13 @@ func (p *pool) close(ctx context.Context) { // Empty the idle connections stack and try to deliver ErrPoolClosed to any waiting wantConns // from idleConnWait while holding the idleMu lock. p.idleMu.Lock() + for _, conn := range p.idleConns { + _ = p.removeConnection(conn, reason{ + loggerConn: logger.ReasonConnClosedPoolClosed, + event: event.ReasonPoolClosed, + }, nil) + _ = p.closeConnection(conn) // We don't care about errors while closing the connection. + } p.idleConns = p.idleConns[:0] for { w := p.idleConnWait.popFront() @@ -402,16 +409,6 @@ func (p *pool) close(ctx context.Context) { } p.createConnectionsCond.L.Unlock() - // Now that we're not holding any locks, remove all of the connections we collected from the - // pool. - for _, conn := range conns { - _ = p.removeConnection(conn, reason{ - loggerConn: logger.ReasonConnClosedPoolClosed, - event: event.ReasonPoolClosed, - }, nil) - _ = p.closeConnection(conn) // We don't care about errors while closing the connection. - } - if mustLogPoolMessage(p) { logPoolMessage(p, logger.ConnectionPoolClosed) } @@ -422,6 +419,16 @@ func (p *pool) close(ctx context.Context) { Address: p.address.String(), }) } + + // Now that we're not holding any locks, remove all of the connections we collected from the + // pool. + for _, conn := range conns { + _ = p.removeConnection(conn, reason{ + loggerConn: logger.ReasonConnClosedPoolClosed, + event: event.ReasonPoolClosed, + }, nil) + _ = p.closeConnection(conn) // We don't care about errors while closing the connection. + } } func (p *pool) pinConnectionToCursor() { @@ -460,6 +467,7 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { }) } + start := time.Now() // Check the pool state while holding a stateMu read lock. If the pool state is not "ready", // return an error. Do all of this while holding the stateMu read lock to prevent a state change between // checking the state and entering the wait queue. Not holding the stateMu read lock here may @@ -470,8 +478,10 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { case poolClosed: p.stateMu.RUnlock() + duration := time.Since(start) if mustLogPoolMessage(p) { keysAndValues := logger.KeyValues{ + logger.KeyDurationMS, duration.Milliseconds(), logger.KeyReason, logger.ReasonConnCheckoutFailedPoolClosed, } @@ -480,9 +490,10 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ - Type: event.GetFailed, - Address: p.address.String(), - Reason: event.ReasonPoolClosed, + Type: event.GetFailed, + Address: p.address.String(), + Duration: duration, + Reason: event.ReasonPoolClosed, }) } return nil, ErrPoolClosed @@ -490,8 +501,10 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { err := poolClearedError{err: p.lastClearErr, address: p.address} p.stateMu.RUnlock() + duration := time.Since(start) if mustLogPoolMessage(p) { keysAndValues := logger.KeyValues{ + logger.KeyDurationMS, duration.Milliseconds(), logger.KeyReason, logger.ReasonConnCheckoutFailedError, } @@ -500,10 +513,11 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ - Type: event.GetFailed, - Address: p.address.String(), - Reason: event.ReasonConnectionErrored, - Error: err, + Type: event.GetFailed, + Address: p.address.String(), + Duration: duration, + Reason: event.ReasonConnectionErrored, + Error: err, }) } return nil, err @@ -532,9 +546,11 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { // or an error, so unlock the stateMu lock here. p.stateMu.RUnlock() + duration := time.Since(start) if w.err != nil { if mustLogPoolMessage(p) { keysAndValues := logger.KeyValues{ + logger.KeyDurationMS, duration.Milliseconds(), logger.KeyReason, logger.ReasonConnCheckoutFailedError, } @@ -543,18 +559,21 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ - Type: event.GetFailed, - Address: p.address.String(), - Reason: event.ReasonConnectionErrored, - Error: w.err, + Type: event.GetFailed, + Address: p.address.String(), + Duration: duration, + Reason: event.ReasonConnectionErrored, + Error: w.err, }) } return nil, w.err } + duration = time.Since(start) if mustLogPoolMessage(p) { keysAndValues := logger.KeyValues{ logger.KeyDriverConnectionID, w.conn.driverConnectionID, + logger.KeyDurationMS, duration.Milliseconds(), } logPoolMessage(p, logger.ConnectionCheckedOut, keysAndValues...) @@ -565,6 +584,7 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { Type: event.GetSucceeded, Address: p.address.String(), ConnectionID: w.conn.driverConnectionID, + Duration: duration, }) } @@ -577,12 +597,14 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { p.stateMu.RUnlock() // Wait for either the wantConn to be ready or for the Context to time out. - start := time.Now() + waitQueueStart := time.Now() select { case <-w.ready: if w.err != nil { + duration := time.Since(start) if mustLogPoolMessage(p) { keysAndValues := logger.KeyValues{ + logger.KeyDurationMS, duration.Milliseconds(), logger.KeyReason, logger.ReasonConnCheckoutFailedError, logger.KeyError, w.err.Error(), } @@ -592,19 +614,22 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ - Type: event.GetFailed, - Address: p.address.String(), - Reason: event.ReasonConnectionErrored, - Error: w.err, + Type: event.GetFailed, + Address: p.address.String(), + Duration: duration, + Reason: event.ReasonConnectionErrored, + Error: w.err, }) } return nil, w.err } + duration := time.Since(start) if mustLogPoolMessage(p) { keysAndValues := logger.KeyValues{ logger.KeyDriverConnectionID, w.conn.driverConnectionID, + logger.KeyDurationMS, duration.Milliseconds(), } logPoolMessage(p, logger.ConnectionCheckedOut, keysAndValues...) @@ -615,14 +640,17 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { Type: event.GetSucceeded, Address: p.address.String(), ConnectionID: w.conn.driverConnectionID, + Duration: duration, }) } return w.conn, nil case <-ctx.Done(): - duration := time.Since(start) + waitQueueDuration := time.Since(waitQueueStart) + duration := time.Since(start) if mustLogPoolMessage(p) { keysAndValues := logger.KeyValues{ + logger.KeyDurationMS, duration.Milliseconds(), logger.KeyReason, logger.ReasonConnCheckoutFailedTimout, } @@ -631,10 +659,11 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { if p.monitor != nil { p.monitor.Event(&event.PoolEvent{ - Type: event.GetFailed, - Address: p.address.String(), - Reason: event.ReasonTimedOut, - Error: ctx.Err(), + Type: event.GetFailed, + Address: p.address.String(), + Duration: duration, + Reason: event.ReasonTimedOut, + Error: ctx.Err(), }) } @@ -643,7 +672,7 @@ func (p *pool) checkOut(ctx context.Context) (conn *connection, err error) { maxPoolSize: p.maxSize, totalConnections: p.totalConnectionCount(), availableConnections: p.availableConnectionCount(), - waitDuration: duration, + waitDuration: waitQueueDuration, } if p.loadBalanced { err.pinnedConnections = &pinnedConnections{ @@ -735,6 +764,81 @@ func (p *pool) removeConnection(conn *connection, reason reason, err error) erro return nil } +var ( + // BGReadTimeout is the maximum amount of the to wait when trying to read + // the server reply on a connection after an operation timed out. The + // default is 1 second. + // + // Deprecated: BGReadTimeout is intended for internal use only and may be + // removed or modified at any time. + BGReadTimeout = 1 * time.Second + + // BGReadCallback is a callback for monitoring the behavior of the + // background-read-on-timeout connection preserving mechanism. + // + // Deprecated: BGReadCallback is intended for internal use only and may be + // removed or modified at any time. + BGReadCallback func(addr string, start, read time.Time, errs []error, connClosed bool) +) + +// bgRead sets a new read deadline on the provided connection (1 second in the +// future) and tries to read any bytes returned by the server. If successful, it +// checks the connection into the provided pool. If there are any errors, it +// closes the connection. +// +// It calls the package-global BGReadCallback function, if set, with the +// address, timings, and any errors that occurred. +func bgRead(pool *pool, conn *connection) { + var start, read time.Time + start = time.Now() + errs := make([]error, 0) + connClosed := false + + defer func() { + // No matter what happens, always check the connection back into the + // pool, which will either make it available for other operations or + // remove it from the pool if it was closed. + err := pool.checkInNoEvent(conn) + if err != nil { + errs = append(errs, fmt.Errorf("error checking in: %w", err)) + } + + if BGReadCallback != nil { + BGReadCallback(conn.addr.String(), start, read, errs, connClosed) + } + }() + + err := conn.nc.SetReadDeadline(time.Now().Add(BGReadTimeout)) + if err != nil { + errs = append(errs, fmt.Errorf("error setting a read deadline: %w", err)) + + connClosed = true + err := conn.close() + if err != nil { + errs = append(errs, fmt.Errorf("error closing conn after setting read deadline: %w", err)) + } + + return + } + + // The context here is only used for cancellation, not deadline timeout, so + // use context.Background(). The read timeout is set by calling + // SetReadDeadline above. + _, _, err = conn.read(context.Background()) + read = time.Now() + if err != nil { + errs = append(errs, fmt.Errorf("error reading: %w", err)) + + connClosed = true + err := conn.close() + if err != nil { + errs = append(errs, fmt.Errorf("error closing conn after reading: %w", err)) + } + + return + } +} + // checkIn returns an idle connection to the pool. If the connection is perished or the pool is // closed, it is removed from the connection pool and closed. func (p *pool) checkIn(conn *connection) error { @@ -774,6 +878,20 @@ func (p *pool) checkInNoEvent(conn *connection) error { return ErrWrongPool } + // If the connection has an awaiting server response, try to read the + // response in another goroutine before checking it back into the pool. + // + // Do this here because we want to publish checkIn events when the operation + // is done with the connection, not when it's ready to be used again. That + // means that connections in "awaiting response" state are checked in but + // not usable, which is not covered by the current pool events. We may need + // to add pool event information in the future to communicate that. + if conn.awaitingResponse { + conn.awaitingResponse = false + go bgRead(p, conn) + return nil + } + // Bump the connection idle deadline here because we're about to make the connection "available". // The idle deadline is used to determine when a connection has reached its max idle time and // should be closed. A connection reaches its max idle time when it has been "available" in the @@ -782,20 +900,16 @@ func (p *pool) checkInNoEvent(conn *connection) error { // connection should never be perished due to max idle time. conn.bumpIdleDeadline() - if reason, perished := connectionPerished(conn); perished { - _ = p.removeConnection(conn, reason, nil) - go func() { - _ = p.closeConnection(conn) - }() - return nil - } - - if conn.pool.getState() == poolClosed { - _ = p.removeConnection(conn, reason{ + r, perished := connectionPerished(conn) + if !perished && conn.pool.getState() == poolClosed { + perished = true + r = reason{ loggerConn: logger.ReasonConnClosedPoolClosed, event: event.ReasonPoolClosed, - }, nil) - + } + } + if perished { + _ = p.removeConnection(conn, r, nil) go func() { _ = p.closeConnection(conn) }() @@ -825,12 +939,37 @@ func (p *pool) checkInNoEvent(conn *connection) error { return nil } +// clear calls clearImpl internally with a false interruptAllConnections value. +func (p *pool) clear(err error, serviceID *primitive.ObjectID) { + p.clearImpl(err, serviceID, false) +} + +// clearAll does same as the "clear" method but interrupts all connections. +func (p *pool) clearAll(err error, serviceID *primitive.ObjectID) { + p.clearImpl(err, serviceID, true) +} + +// interruptConnections interrupts the input connections. +func (p *pool) interruptConnections(conns []*connection) { + for _, conn := range conns { + _ = p.removeConnection(conn, reason{ + loggerConn: logger.ReasonConnClosedStale, + event: event.ReasonStale, + }, nil) + go func(c *connection) { + _ = p.closeConnection(c) + }(conn) + } +} + // clear marks all connections as stale by incrementing the generation number, stops all background // goroutines, removes all requests from idleConnWait and newConnWait, and sets the pool state to // "paused". If serviceID is nil, clear marks all connections as stale. If serviceID is not nil, // clear marks only connections associated with the given serviceID stale (for use in load balancer // mode). -func (p *pool) clear(err error, serviceID *primitive.ObjectID) { +// If interruptAllConnections is true, this function calls interruptConnections to interrupt all +// non-idle connections. +func (p *pool) clearImpl(err error, serviceID *primitive.ObjectID, interruptAllConnections bool) { if p.getState() == poolClosed { return } @@ -854,7 +993,51 @@ func (p *pool) clear(err error, serviceID *primitive.ObjectID) { } p.lastClearErr = err p.stateMu.Unlock() + } + + if mustLogPoolMessage(p) { + keysAndValues := logger.KeyValues{ + logger.KeyServiceID, serviceID, + } + + logPoolMessage(p, logger.ConnectionPoolCleared, keysAndValues...) + } + if sendEvent && p.monitor != nil { + event := &event.PoolEvent{ + Type: event.PoolCleared, + Address: p.address.String(), + ServiceID: serviceID, + Interruption: interruptAllConnections, + Error: err, + } + p.monitor.Event(event) + } + + p.removePerishedConns() + if interruptAllConnections { + p.createConnectionsCond.L.Lock() + p.idleMu.Lock() + + idleConns := make(map[*connection]bool, len(p.idleConns)) + for _, idle := range p.idleConns { + idleConns[idle] = true + } + + conns := make([]*connection, 0, len(p.conns)) + for _, conn := range p.conns { + if _, ok := idleConns[conn]; !ok && p.stale(conn) { + conns = append(conns, conn) + } + } + + p.idleMu.Unlock() + p.createConnectionsCond.L.Unlock() + + p.interruptConnections(conns) + } + + if serviceID == nil { pcErr := poolClearedError{err: err, address: p.address} // Clear the idle connections wait queue. @@ -881,23 +1064,6 @@ func (p *pool) clear(err error, serviceID *primitive.ObjectID) { } p.createConnectionsCond.L.Unlock() } - - if mustLogPoolMessage(p) { - keysAndValues := logger.KeyValues{ - logger.KeyServiceID, serviceID, - } - - logPoolMessage(p, logger.ConnectionPoolCleared, keysAndValues...) - } - - if sendEvent && p.monitor != nil { - p.monitor.Event(&event.PoolEvent{ - Type: event.PoolCleared, - Address: p.address.String(), - ServiceID: serviceID, - Error: err, - }) - } } // getOrQueueForIdleConn attempts to deliver an idle connection to the given wantConn. If there is @@ -1030,6 +1196,7 @@ func (p *pool) createConnections(ctx context.Context, wg *sync.WaitGroup) { }) } + start := time.Now() // Pass the createConnections context to connect to allow pool close to cancel connection // establishment so shutdown doesn't block indefinitely if connectTimeout=0. err := conn.connect(ctx) @@ -1056,9 +1223,11 @@ func (p *pool) createConnections(ctx context.Context, wg *sync.WaitGroup) { continue } + duration := time.Since(start) if mustLogPoolMessage(p) { keysAndValues := logger.KeyValues{ logger.KeyDriverConnectionID, conn.driverConnectionID, + logger.KeyDurationMS, duration.Milliseconds(), } logPoolMessage(p, logger.ConnectionReady, keysAndValues...) @@ -1069,6 +1238,7 @@ func (p *pool) createConnections(ctx context.Context, wg *sync.WaitGroup) { Type: event.ConnectionReady, Address: p.address.String(), ConnectionID: conn.driverConnectionID, + Duration: duration, }) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool_generation_counter.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool_generation_counter.go index 47fac2f61..dd10c0ce7 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool_generation_counter.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/pool_generation_counter.go @@ -112,25 +112,21 @@ func (p *poolGenerationMap) stale(serviceIDPtr *primitive.ObjectID, knownGenerat return true } - serviceID := getServiceID(serviceIDPtr) - p.Lock() - defer p.Unlock() - - if stats, ok := p.generationMap[serviceID]; ok { - return knownGeneration < stats.generation + if generation, ok := p.getGeneration(serviceIDPtr); ok { + return knownGeneration < generation } return false } -func (p *poolGenerationMap) getGeneration(serviceIDPtr *primitive.ObjectID) uint64 { +func (p *poolGenerationMap) getGeneration(serviceIDPtr *primitive.ObjectID) (uint64, bool) { serviceID := getServiceID(serviceIDPtr) p.Lock() defer p.Unlock() if stats, ok := p.generationMap[serviceID]; ok { - return stats.generation + return stats.generation, true } - return 0 + return 0, false } func (p *poolGenerationMap) getNumConns(serviceIDPtr *primitive.ObjectID) uint64 { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go index 0934beed8..c7b168dc2 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/rtt_monitor.go @@ -56,7 +56,6 @@ type rttMonitor struct { cfg *rttConfig ctx context.Context cancelFn context.CancelFunc - started bool } var _ driver.RTTMonitor = &rttMonitor{} @@ -83,7 +82,6 @@ func (r *rttMonitor) connect() { r.connMu.Lock() defer r.connMu.Unlock() - r.started = true r.closeWg.Add(1) go func() { @@ -97,10 +95,6 @@ func (r *rttMonitor) disconnect() { r.connMu.Lock() defer r.connMu.Unlock() - if !r.started { - return - } - r.cancelFn() // Wait for the existing connection to complete. @@ -267,7 +261,7 @@ func percentile(perc float64, samples []time.Duration, minSamples int) time.Dura p, err := stats.Percentile(floatSamples, perc) if err != nil { - panic(fmt.Errorf("x/mongo/driver/topology: error calculating %f percentile RTT: %v for samples:\n%v", perc, err, floatSamples)) + panic(fmt.Errorf("x/mongo/driver/topology: error calculating %f percentile RTT: %w for samples:\n%v", perc, err, floatSamples)) } return time.Duration(p) } @@ -318,11 +312,14 @@ func (r *rttMonitor) Stats() string { var err error stdDev, err = stats.StandardDeviation(floatSamples) if err != nil { - panic(fmt.Errorf("x/mongo/driver/topology: error calculating standard deviation RTT: %v for samples:\n%v", err, floatSamples)) + panic(fmt.Errorf("x/mongo/driver/topology: error calculating standard deviation RTT: %w for samples:\n%v", err, floatSamples)) } } - return fmt.Sprintf(`Round-trip-time monitor statistics:`+"\n"+ - `average RTT: %v, minimum RTT: %v, 90th percentile RTT: %v, standard dev: %v`+"\n", - time.Duration(avg), r.minRTT, r.rtt90, time.Duration(stdDev)) + return fmt.Sprintf( + "network round-trip time stats: avg: %v, min: %v, 90th pct: %v, stddev: %v", + time.Duration(avg), + r.minRTT, + r.rtt90, + time.Duration(stdDev)) } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go index 5823d3d7a..99f8dd618 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go @@ -15,6 +15,7 @@ import ( "sync/atomic" "time" + "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/internal/driverutil" @@ -124,6 +125,7 @@ type Server struct { processErrorLock sync.Mutex rttMonitor *rttMonitor + monitorOnce sync.Once } // updateTopologyCallback is a callback used to create a server that should be called when the parent Topology instance @@ -284,10 +286,10 @@ func (s *Server) Disconnect(ctx context.Context) error { close(s.done) s.cancelCheck() - s.rttMonitor.disconnect() s.pool.close(ctx) s.closewg.Wait() + s.rttMonitor.disconnect() atomic.StoreInt64(&s.state, serverDisconnected) return nil @@ -333,7 +335,7 @@ func (s *Server) ProcessHandshakeError(err error, startingGenerationNumber uint6 return } // Ignore the error if the connection is stale. - if startingGenerationNumber < s.pool.generation.getGeneration(serviceID) { + if generation, _ := s.pool.generation.getGeneration(serviceID); startingGenerationNumber < generation { return } @@ -415,8 +417,8 @@ func (s *Server) RequestImmediateCheck() { // (error, true) if the error is a WriteConcernError and the falls under the requirements for SDAM error // handling and (nil, false) otherwise. func getWriteConcernErrorForProcessing(err error) (*driver.WriteConcernError, bool) { - writeCmdErr, ok := err.(driver.WriteCommandError) - if !ok { + var writeCmdErr driver.WriteCommandError + if !errors.As(err, &writeCmdErr) { return nil, false } @@ -525,7 +527,7 @@ func (s *Server) ProcessError(err error, conn driver.Connection) driver.ProcessE if netErr, ok := wrappedConnErr.(net.Error); ok && netErr.Timeout() { return driver.NoChange } - if wrappedConnErr == context.Canceled || wrappedConnErr == context.DeadlineExceeded { + if errors.Is(wrappedConnErr, context.Canceled) || errors.Is(wrappedConnErr, context.DeadlineExceeded) { return driver.NoChange } @@ -549,9 +551,7 @@ func (s *Server) update() { checkNow := s.checkNow done := s.done - defer func() { - _ = recover() - }() + defer logUnexpectedFailure(s.cfg.logger, "Encountered unexpected failure updating server") closeServer := func() { s.subLock.Lock() @@ -603,7 +603,7 @@ func (s *Server) update() { // Perform the next check. desc, err := s.check() - if err == errCheckCancelled { + if errors.Is(err, errCheckCancelled) { if atomic.LoadInt64(&s.state) != serverConnected { continue } @@ -625,7 +625,7 @@ func (s *Server) update() { // Retry after the first timeout before clearing the pool in case of a FAAS pause as // described in GODRIVER-2577. if err := unwrapConnectionError(desc.LastError); err != nil && timeoutCnt < 1 { - if err == context.Canceled || err == context.DeadlineExceeded { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { timeoutCnt++ // We want to immediately retry on timeout error. Continue to next loop. return true @@ -640,7 +640,11 @@ func (s *Server) update() { // Clear the pool once the description has been updated to Unknown. Pass in a nil service ID to clear // because the monitoring routine only runs for non-load balanced deployments in which servers don't return // IDs. - s.pool.clear(err, nil) + if timeoutCnt > 0 { + s.pool.clearAll(err, nil) + } else { + s.pool.clear(err, nil) + } } // We're either not handling a timeout error, or we just handled the 2nd consecutive // timeout error. In either case, reset the timeout count to 0 and return false to @@ -658,8 +662,8 @@ func (s *Server) update() { transitionedFromNetworkError := desc.LastError != nil && unwrapConnectionError(desc.LastError) != nil && previousDescription.Kind != description.Unknown - if isStreamingEnabled(s) && isStreamable(s) && !s.rttMonitor.started { - s.rttMonitor.connect() + if isStreamingEnabled(s) && isStreamable(s) { + s.monitorOnce.Do(s.rttMonitor.connect) } if isStreamable(s) || connectionIsStreaming || transitionedFromNetworkError { @@ -683,10 +687,7 @@ func (s *Server) updateDescription(desc description.Server) { return } - defer func() { - // ¯\_(ツ)_/¯ - _ = recover() - }() + defer logUnexpectedFailure(s.cfg.logger, "Encountered unexpected failure updating server description") // Anytime we update the server description to something other than "unknown", set the pool to // "ready". Do this before updating the description so that connections can be checked out as @@ -1060,10 +1061,24 @@ func (s *Server) publishServerHeartbeatSucceededEvent(connectionID string, } if mustLogServerMessage(s) { - logServerMessage(s, logger.TopologyServerHeartbeatStarted, + descRaw, _ := bson.Marshal(struct { + description.Server `bson:",inline"` + Ok int32 + }{ + Server: desc, + Ok: func() int32 { + if desc.LastError != nil { + return 0 + } + + return 1 + }(), + }) + + logServerMessage(s, logger.TopologyServerHeartbeatSucceeded, logger.KeyAwaited, await, logger.KeyDurationMS, duration.Milliseconds(), - logger.KeyReply, desc) + logger.KeyReply, bson.Raw(descRaw).String()) } } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go index bbffbd1da..0fb913d21 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/topology.go @@ -4,10 +4,19 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 -// Package topology contains types that handles the discovery, monitoring, and selection -// of servers. This package is designed to expose enough inner workings of service discovery -// and monitoring to allow low level applications to have fine grained control, while hiding -// most of the detailed implementation of the algorithms. +// Package topology is intended for internal use only. It is made available to +// facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! +// +// Package topology contains types that handles the discovery, monitoring, and +// selection of servers. This package is designed to expose enough inner +// workings of service discovery and monitoring to allow low level applications +// to have fine grained control, while hiding most of the detailed +// implementation of the algorithms. package topology // import "go.mongodb.org/mongo-driver/x/mongo/driver/topology" import ( @@ -15,7 +24,6 @@ import ( "errors" "fmt" "net" - "net/url" "strconv" "strings" "sync" @@ -30,6 +38,7 @@ import ( "go.mongodb.org/mongo-driver/mongo/description" "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/x/mongo/driver" + "go.mongodb.org/mongo-driver/x/mongo/driver/connstring" "go.mongodb.org/mongo-driver/x/mongo/driver/dns" ) @@ -87,6 +96,8 @@ type Topology struct { rescanSRVInterval time.Duration pollHeartbeatTime atomic.Value // holds a bool + hosts []string + updateCallback updateTopologyCallback fsm *fsm @@ -153,7 +164,12 @@ func New(cfg *Config) (*Topology, error) { } if t.cfg.URI != "" { - t.pollingRequired = strings.HasPrefix(t.cfg.URI, "mongodb+srv://") && !t.cfg.LoadBalanced + connStr, err := connstring.Parse(t.cfg.URI) + if err != nil { + return nil, err + } + t.pollingRequired = (connStr.Scheme == connstring.SchemeMongoDBSRV) && !t.cfg.LoadBalanced + t.hosts = connStr.RawHosts } t.publishTopologyOpeningEvent() @@ -269,6 +285,32 @@ func logServerSelectionFailed( logger.KeyFailure, err.Error()) } +// logUnexpectedFailure is a defer-recover function for logging unexpected +// failures encountered while maintaining a topology. +// +// Most topology maintenance actions, such as updating a server, should not take +// down a client's application. This function provides a best-effort to log +// unexpected failures. If the logger passed to this function is nil, then the +// recovery will be silent. +func logUnexpectedFailure(log *logger.Logger, msg string, callbacks ...func()) { + r := recover() + if r == nil { + return + } + + defer func() { + for _, clbk := range callbacks { + clbk() + } + }() + + if log == nil { + return + } + + log.Print(logger.LevelInfo, logger.ComponentTopology, fmt.Sprintf("%s: %v", msg, r)) +} + // Connect initializes a Topology and starts the monitoring process. This function // must be called to properly monitor the topology. func (t *Topology) Connect() error { @@ -351,26 +393,21 @@ func (t *Topology) Connect() error { } t.serversLock.Unlock() - uri, err := url.Parse(t.cfg.URI) - if err != nil { - return err - } - parsedHosts := strings.Split(uri.Host, ",") if mustLogTopologyMessage(t, logger.LevelInfo) { - logTopologyThirdPartyUsage(t, parsedHosts) + logTopologyThirdPartyUsage(t, t.hosts) } if t.pollingRequired { // sanity check before passing the hostname to resolver - if len(parsedHosts) != 1 { + if len(t.hosts) != 1 { return fmt.Errorf("URI with SRV must include one and only one hostname") } - _, _, err = net.SplitHostPort(uri.Host) + _, _, err = net.SplitHostPort(t.hosts[0]) if err == nil { // we were able to successfully extract a port from the host, // but should not be able to when using SRV return fmt.Errorf("URI with srv must not include a port number") } - go t.pollSRVRecords(uri.Host) + go t.pollSRVRecords(t.hosts[0]) t.pollingwg.Add(1) } @@ -546,7 +583,7 @@ func (t *Topology) SelectServer(ctx context.Context, ss description.ServerSelect return nil, err } - defer t.Unsubscribe(sub) + defer func() { _ = t.Unsubscribe(sub) }() } suitable, selectErr = t.selectServerFromSubscription(ctx, sub.Updates, selectionState) @@ -768,12 +805,11 @@ func (t *Topology) pollSRVRecords(hosts string) { defer pollTicker.Stop() t.pollHeartbeatTime.Store(false) var doneOnce bool - defer func() { - // ¯\_(ツ)_/¯ - if r := recover(); r != nil && !doneOnce { + defer logUnexpectedFailure(t.cfg.logger, "Encountered unexpected failure polling SRV records", func() { + if !doneOnce { <-t.pollingDone } - }() + }) for { select { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go index abf09c15b..2199f855b 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/wiremessage/wiremessage.go @@ -4,10 +4,18 @@ // not use this file except in compliance with the License. You may obtain // a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// Package wiremessage is intended for internal use only. It is made available +// to facilitate use cases that require access to internal MongoDB driver +// functionality and state. The API of this package is not stable and there is +// no backward compatibility guarantee. +// +// WARNING: THIS PACKAGE IS EXPERIMENTAL AND MAY BE MODIFIED OR REMOVED WITHOUT +// NOTICE! USE WITH EXTREME CAUTION! package wiremessage import ( "bytes" + "encoding/binary" "strings" "sync/atomic" @@ -231,10 +239,11 @@ func ReadHeader(src []byte) (length, requestID, responseTo int32, opcode OpCode, if len(src) < 16 { return 0, 0, 0, 0, src, false } - length = (int32(src[0]) | int32(src[1])<<8 | int32(src[2])<<16 | int32(src[3])<<24) - requestID = (int32(src[4]) | int32(src[5])<<8 | int32(src[6])<<16 | int32(src[7])<<24) - responseTo = (int32(src[8]) | int32(src[9])<<8 | int32(src[10])<<16 | int32(src[11])<<24) - opcode = OpCode(int32(src[12]) | int32(src[13])<<8 | int32(src[14])<<16 | int32(src[15])<<24) + + length = readi32unsafe(src) + requestID = readi32unsafe(src[4:]) + responseTo = readi32unsafe(src[8:]) + opcode = OpCode(readi32unsafe(src[12:])) return length, requestID, responseTo, opcode, src[16:], true } @@ -486,7 +495,7 @@ func ReadReplyCursorID(src []byte) (cursorID int64, rem []byte, ok bool) { return readi64(src) } -// ReadReplyStartingFrom reads the starting from from src. +// ReadReplyStartingFrom reads the starting from src. func ReadReplyStartingFrom(src []byte) (startingFrom int32, rem []byte, ok bool) { return readi32(src) } @@ -570,12 +579,16 @@ func ReadKillCursorsCursorIDs(src []byte, numIDs int32) (cursorIDs []int64, rem return cursorIDs, src, true } -func appendi32(dst []byte, i32 int32) []byte { - return append(dst, byte(i32), byte(i32>>8), byte(i32>>16), byte(i32>>24)) +func appendi32(dst []byte, x int32) []byte { + b := []byte{0, 0, 0, 0} + binary.LittleEndian.PutUint32(b, uint32(x)) + return append(dst, b...) } -func appendi64(b []byte, i int64) []byte { - return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24), byte(i>>32), byte(i>>40), byte(i>>48), byte(i>>56)) +func appendi64(dst []byte, x int64) []byte { + b := []byte{0, 0, 0, 0, 0, 0, 0, 0} + binary.LittleEndian.PutUint64(b, uint64(x)) + return append(dst, b...) } func appendCString(b []byte, str string) []byte { @@ -587,21 +600,18 @@ func readi32(src []byte) (int32, []byte, bool) { if len(src) < 4 { return 0, src, false } - - return (int32(src[0]) | int32(src[1])<<8 | int32(src[2])<<16 | int32(src[3])<<24), src[4:], true + return readi32unsafe(src), src[4:], true } func readi32unsafe(src []byte) int32 { - return (int32(src[0]) | int32(src[1])<<8 | int32(src[2])<<16 | int32(src[3])<<24) + return int32(binary.LittleEndian.Uint32(src)) } func readi64(src []byte) (int64, []byte, bool) { if len(src) < 8 { return 0, src, false } - i64 := (int64(src[0]) | int64(src[1])<<8 | int64(src[2])<<16 | int64(src[3])<<24 | - int64(src[4])<<32 | int64(src[5])<<40 | int64(src[6])<<48 | int64(src[7])<<56) - return i64, src[8:], true + return int64(binary.LittleEndian.Uint64(src)), src[8:], true } func readcstring(src []byte) (string, []byte, bool) { diff --git a/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/golang.org/x/crypto/ocsp/ocsp.go index bf2259537..e6c645e7c 100644 --- a/vendor/golang.org/x/crypto/ocsp/ocsp.go +++ b/vendor/golang.org/x/crypto/ocsp/ocsp.go @@ -5,7 +5,7 @@ // Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses // are signed messages attesting to the validity of a certificate for a small // period of time. This is used to manage revocation for X.509 certificates. -package ocsp // import "golang.org/x/crypto/ocsp" +package ocsp import ( "crypto" diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go index 904b57e01..28cd99c7f 100644 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -16,7 +16,7 @@ Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To choose, you can pass the `New` functions from the different SHA packages to pbkdf2.Key. */ -package pbkdf2 // import "golang.org/x/crypto/pbkdf2" +package pbkdf2 import ( "crypto/hmac" diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go index c971a99fa..76fa40fb2 100644 --- a/vendor/golang.org/x/crypto/scrypt/scrypt.go +++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go @@ -5,7 +5,7 @@ // Package scrypt implements the scrypt key derivation function as defined in // Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard // Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf). -package scrypt // import "golang.org/x/crypto/scrypt" +package scrypt import ( "crypto/sha256" diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go new file mode 100644 index 000000000..fbf1934a0 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/cmp.go @@ -0,0 +1,44 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// min is a version of the predeclared function from the Go 1.21 release. +func min[T constraints.Ordered](a, b T) T { + if a < b || isNaN(a) { + return a + } + return b +} + +// max is a version of the predeclared function from the Go 1.21 release. +func max[T constraints.Ordered](a, b T) T { + if a > b || isNaN(a) { + return a + } + return b +} + +// cmpLess is a copy of cmp.Less from the Go 1.21 release. +func cmpLess[T constraints.Ordered](x, y T) bool { + return (isNaN(x) && !isNaN(y)) || x < y +} + +// cmpCompare is a copy of cmp.Compare from the Go 1.21 release. +func cmpCompare[T constraints.Ordered](x, y T) int { + xNaN := isNaN(x) + yNaN := isNaN(y) + if xNaN && yNaN { + return 0 + } + if xNaN || x < y { + return -1 + } + if yNaN || x > y { + return +1 + } + return 0 +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index cff0cd49e..46ceac343 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -3,23 +3,20 @@ // license that can be found in the LICENSE file. // Package slices defines various functions useful with slices of any type. -// Unless otherwise specified, these functions all apply to the elements -// of a slice at index 0 <= i < len(s). -// -// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a -// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), -// or the sorting may fail to sort correctly. A common case is when sorting slices of -// floating-point numbers containing NaN values. package slices -import "golang.org/x/exp/constraints" +import ( + "unsafe" + + "golang.org/x/exp/constraints" +) // Equal reports whether two slices are equal: the same length and all // elements equal. If the lengths are different, Equal returns false. // Otherwise, the elements are compared in increasing index order, and the // comparison stops at the first unequal pair. // Floating point NaNs are not considered equal. -func Equal[E comparable](s1, s2 []E) bool { +func Equal[S ~[]E, E comparable](s1, s2 S) bool { if len(s1) != len(s2) { return false } @@ -31,12 +28,12 @@ func Equal[E comparable](s1, s2 []E) bool { return true } -// EqualFunc reports whether two slices are equal using a comparison +// EqualFunc reports whether two slices are equal using an equality // function on each pair of elements. If the lengths are different, // EqualFunc returns false. Otherwise, the elements are compared in // increasing index order, and the comparison stops at the first index // for which eq returns false. -func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { +func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { if len(s1) != len(s2) { return false } @@ -49,45 +46,37 @@ func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { return true } -// Compare compares the elements of s1 and s2. -// The elements are compared sequentially, starting at index 0, +// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair +// of elements. The elements are compared sequentially, starting at index 0, // until one element is not equal to the other. // The result of comparing the first non-matching elements is returned. // If both slices are equal until one of them ends, the shorter slice is // considered less than the longer one. // The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. -// Comparisons involving floating point NaNs are ignored. -func Compare[E constraints.Ordered](s1, s2 []E) int { - s2len := len(s2) +func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { for i, v1 := range s1 { - if i >= s2len { + if i >= len(s2) { return +1 } v2 := s2[i] - switch { - case v1 < v2: - return -1 - case v1 > v2: - return +1 + if c := cmpCompare(v1, v2); c != 0 { + return c } } - if len(s1) < s2len { + if len(s1) < len(s2) { return -1 } return 0 } -// CompareFunc is like Compare but uses a comparison function -// on each pair of elements. The elements are compared in increasing -// index order, and the comparisons stop after the first time cmp -// returns non-zero. +// CompareFunc is like [Compare] but uses a custom comparison function on each +// pair of elements. // The result is the first non-zero result of cmp; if cmp always // returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), // and +1 if len(s1) > len(s2). -func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { - s2len := len(s2) +func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { for i, v1 := range s1 { - if i >= s2len { + if i >= len(s2) { return +1 } v2 := s2[i] @@ -95,7 +84,7 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { return c } } - if len(s1) < s2len { + if len(s1) < len(s2) { return -1 } return 0 @@ -103,9 +92,9 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { // Index returns the index of the first occurrence of v in s, // or -1 if not present. -func Index[E comparable](s []E, v E) int { - for i, vs := range s { - if v == vs { +func Index[S ~[]E, E comparable](s S, v E) int { + for i := range s { + if v == s[i] { return i } } @@ -114,9 +103,9 @@ func Index[E comparable](s []E, v E) int { // IndexFunc returns the first index i satisfying f(s[i]), // or -1 if none do. -func IndexFunc[E any](s []E, f func(E) bool) int { - for i, v := range s { - if f(v) { +func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { + for i := range s { + if f(s[i]) { return i } } @@ -124,66 +113,237 @@ func IndexFunc[E any](s []E, f func(E) bool) int { } // Contains reports whether v is present in s. -func Contains[E comparable](s []E, v E) bool { +func Contains[S ~[]E, E comparable](s S, v E) bool { return Index(s, v) >= 0 } // ContainsFunc reports whether at least one // element e of s satisfies f(e). -func ContainsFunc[E any](s []E, f func(E) bool) bool { +func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { return IndexFunc(s, f) >= 0 } // Insert inserts the values v... into s at index i, // returning the modified slice. -// In the returned slice r, r[i] == v[0]. +// The elements at s[i:] are shifted up to make room. +// In the returned slice r, r[i] == v[0], +// and r[i+len(v)] == value originally at r[i]. // Insert panics if i is out of range. // This function is O(len(s) + len(v)). func Insert[S ~[]E, E any](s S, i int, v ...E) S { - tot := len(s) + len(v) - if tot <= cap(s) { - s2 := s[:tot] - copy(s2[i+len(v):], s[i:]) + m := len(v) + if m == 0 { + return s + } + n := len(s) + if i == n { + return append(s, v...) + } + if n+m > cap(s) { + // Use append rather than make so that we bump the size of + // the slice up to the next storage class. + // This is what Grow does but we don't call Grow because + // that might copy the values twice. + s2 := append(s[:i], make(S, n+m-i)...) copy(s2[i:], v) + copy(s2[i+m:], s[i:]) return s2 } - s2 := make(S, tot) - copy(s2, s[:i]) - copy(s2[i:], v) - copy(s2[i+len(v):], s[i:]) - return s2 + s = s[:n+m] + + // before: + // s: aaaaaaaabbbbccccccccdddd + // ^ ^ ^ ^ + // i i+m n n+m + // after: + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // + // a are the values that don't move in s. + // v are the values copied in from v. + // b and c are the values from s that are shifted up in index. + // d are the values that get overwritten, never to be seen again. + + if !overlaps(v, s[i+m:]) { + // Easy case - v does not overlap either the c or d regions. + // (It might be in some of a or b, or elsewhere entirely.) + // The data we copy up doesn't write to v at all, so just do it. + + copy(s[i+m:], s[i:]) + + // Now we have + // s: aaaaaaaabbbbbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // Note the b values are duplicated. + + copy(s[i:], v) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s + } + + // The hard case - v overlaps c or d. We can't just shift up + // the data because we'd move or clobber the values we're trying + // to insert. + // So instead, write v on top of d, then rotate. + copy(s[n:], v) + + // Now we have + // s: aaaaaaaabbbbccccccccvvvv + // ^ ^ ^ ^ + // i i+m n n+m + + rotateRight(s[i:], m) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s +} + +// clearSlice sets all elements up to the length of s to the zero value of E. +// We may use the builtin clear func instead, and remove clearSlice, when upgrading +// to Go 1.21+. +func clearSlice[S ~[]E, E any](s S) { + var zero E + for i := range s { + s[i] = zero + } } // Delete removes the elements s[i:j] from s, returning the modified slice. -// Delete panics if s[i:j] is not a valid slice of s. -// Delete modifies the contents of the slice s; it does not create a new slice. -// Delete is O(len(s)-j), so if many items must be deleted, it is better to +// Delete panics if j > len(s) or s[i:j] is not a valid slice of s. +// Delete is O(len(s)-i), so if many items must be deleted, it is better to // make a single call deleting them all together than to delete one at a time. -// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those -// elements contain pointers you might consider zeroing those elements so that -// objects they reference can be garbage collected. +// Delete zeroes the elements s[len(s)-(j-i):len(s)]. func Delete[S ~[]E, E any](s S, i, j int) S { - _ = s[i:j] // bounds check + _ = s[i:j:len(s)] // bounds check + + if i == j { + return s + } - return append(s[:i], s[j:]...) + oldlen := len(s) + s = append(s[:i], s[j:]...) + clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC + return s +} + +// DeleteFunc removes any elements from s for which del returns true, +// returning the modified slice. +// DeleteFunc zeroes the elements between the new length and the original length. +func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { + i := IndexFunc(s, del) + if i == -1 { + return s + } + // Don't start copying elements until we find one to delete. + for j := i + 1; j < len(s); j++ { + if v := s[j]; !del(v) { + s[i] = v + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] } // Replace replaces the elements s[i:j] by the given v, and returns the // modified slice. Replace panics if s[i:j] is not a valid slice of s. +// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { _ = s[i:j] // verify that i:j is a valid subslice + + if i == j { + return Insert(s, i, v...) + } + if j == len(s) { + return append(s[:i], v...) + } + tot := len(s[:i]) + len(v) + len(s[j:]) - if tot <= cap(s) { - s2 := s[:tot] - copy(s2[i+len(v):], s[j:]) + if tot > cap(s) { + // Too big to fit, allocate and copy over. + s2 := append(s[:i], make(S, tot-i)...) // See Insert copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) return s2 } - s2 := make(S, tot) - copy(s2, s[:i]) - copy(s2[i:], v) - copy(s2[i+len(v):], s[j:]) - return s2 + + r := s[:tot] + + if i+len(v) <= j { + // Easy, as v fits in the deleted portion. + copy(r[i:], v) + if i+len(v) != j { + copy(r[i+len(v):], s[j:]) + } + clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC + return r + } + + // We are expanding (v is bigger than j-i). + // The situation is something like this: + // (example has i=4,j=8,len(s)=16,len(v)=6) + // s: aaaaxxxxbbbbbbbbyy + // ^ ^ ^ ^ + // i j len(s) tot + // a: prefix of s + // x: deleted range + // b: more of s + // y: area to expand into + + if !overlaps(r[i+len(v):], v) { + // Easy, as v is not clobbered by the first copy. + copy(r[i+len(v):], s[j:]) + copy(r[i:], v) + return r + } + + // This is a situation where we don't have a single place to which + // we can copy v. Parts of it need to go to two different places. + // We want to copy the prefix of v into y and the suffix into x, then + // rotate |y| spots to the right. + // + // v[2:] v[:2] + // | | + // s: aaaavvvvbbbbbbbbvv + // ^ ^ ^ ^ + // i j len(s) tot + // + // If either of those two destinations don't alias v, then we're good. + y := len(v) - (j - i) // length of y portion + + if !overlaps(r[i:j], v) { + copy(r[i:j], v[y:]) + copy(r[len(s):], v[:y]) + rotateRight(r[i:], y) + return r + } + if !overlaps(r[len(s):], v) { + copy(r[len(s):], v[:y]) + copy(r[i:j], v[y:]) + rotateRight(r[i:], y) + return r + } + + // Now we know that v overlaps both x and y. + // That means that the entirety of b is *inside* v. + // So we don't need to preserve b at all; instead we + // can copy v first, then copy the b part of v out of + // v to the right destination. + k := startIdx(v, s[j:]) + copy(r[i:], v) + copy(r[i+len(v):], r[i+k:]) + return r } // Clone returns a copy of the slice. @@ -198,40 +358,43 @@ func Clone[S ~[]E, E any](s S) S { // Compact replaces consecutive runs of equal elements with a single copy. // This is like the uniq command found on Unix. -// Compact modifies the contents of the slice s; it does not create a new slice. -// When Compact discards m elements in total, it might not modify the elements -// s[len(s)-m:len(s)]. If those elements contain pointers you might consider -// zeroing those elements so that objects they reference can be garbage collected. +// Compact modifies the contents of the slice s and returns the modified slice, +// which may have a smaller length. +// Compact zeroes the elements between the new length and the original length. func Compact[S ~[]E, E comparable](s S) S { if len(s) < 2 { return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if v != last { - s[i] = v + for k := 1; k < len(s); k++ { + if s[k] != s[k-1] { + if i != k { + s[i] = s[k] + } i++ - last = v } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } -// CompactFunc is like Compact but uses a comparison function. +// CompactFunc is like [Compact] but uses an equality function to compare elements. +// For runs of elements that compare equal, CompactFunc keeps the first one. +// CompactFunc zeroes the elements between the new length and the original length. func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { if len(s) < 2 { return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if !eq(v, last) { - s[i] = v + for k := 1; k < len(s); k++ { + if !eq(s[k], s[k-1]) { + if i != k { + s[i] = s[k] + } i++ - last = v } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } @@ -256,3 +419,97 @@ func Grow[S ~[]E, E any](s S, n int) S { func Clip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] } + +// Rotation algorithm explanation: +// +// rotate left by 2 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join first parts +// 89234567 01 +// recursively rotate first left part by 2 +// 23456789 01 +// join at the end +// 2345678901 +// +// rotate left by 8 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join last parts +// 89 23456701 +// recursively rotate second part left by 6 +// 89 01234567 +// join at the end +// 8901234567 + +// TODO: There are other rotate algorithms. +// This algorithm has the desirable property that it moves each element exactly twice. +// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes. +// The follow-cycles algorithm can be 1-write but it is not very cache friendly. + +// rotateLeft rotates b left by n spaces. +// s_final[i] = s_orig[i+r], wrapping around. +func rotateLeft[E any](s []E, r int) { + for r != 0 && r != len(s) { + if r*2 <= len(s) { + swap(s[:r], s[len(s)-r:]) + s = s[:len(s)-r] + } else { + swap(s[:len(s)-r], s[r:]) + s, r = s[len(s)-r:], r*2-len(s) + } + } +} +func rotateRight[E any](s []E, r int) { + rotateLeft(s, len(s)-r) +} + +// swap swaps the contents of x and y. x and y must be equal length and disjoint. +func swap[E any](x, y []E) { + for i := 0; i < len(x); i++ { + x[i], y[i] = y[i], x[i] + } +} + +// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap. +func overlaps[E any](a, b []E) bool { + if len(a) == 0 || len(b) == 0 { + return false + } + elemSize := unsafe.Sizeof(a[0]) + if elemSize == 0 { + return false + } + // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445. + // Also see crypto/internal/alias/alias.go:AnyOverlap + return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) && + uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1) +} + +// startIdx returns the index in haystack where the needle starts. +// prerequisite: the needle must be aliased entirely inside the haystack. +func startIdx[E any](haystack, needle []E) int { + p := &needle[0] + for i := range haystack { + if p == &haystack[i] { + return i + } + } + // TODO: what if the overlap is by a non-integral number of Es? + panic("needle not found") +} + +// Reverse reverses the elements of the slice in place. +func Reverse[S ~[]E, E any](s S) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index f14f40da7..b67897f76 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp + package slices import ( @@ -11,57 +13,116 @@ import ( ) // Sort sorts a slice of any ordered type in ascending order. -// Sort may fail to sort correctly when sorting slices of floating-point -// numbers containing Not-a-number (NaN) values. -// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) -// instead if the input may contain NaNs. -func Sort[E constraints.Ordered](x []E) { +// When sorting floating-point numbers, NaNs are ordered before other values. +func Sort[S ~[]E, E constraints.Ordered](x S) { n := len(x) pdqsortOrdered(x, 0, n, bits.Len(uint(n))) } -// SortFunc sorts the slice x in ascending order as determined by the less function. -// This sort is not guaranteed to be stable. +// SortFunc sorts the slice x in ascending order as determined by the cmp +// function. This sort is not guaranteed to be stable. +// cmp(a, b) should return a negative number when a < b, a positive number when +// a > b and zero when a == b. // -// SortFunc requires that less is a strict weak ordering. +// SortFunc requires that cmp is a strict weak ordering. // See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. -func SortFunc[E any](x []E, less func(a, b E) bool) { +func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { n := len(x) - pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) + pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) } // SortStableFunc sorts the slice x while keeping the original order of equal -// elements, using less to compare elements. -func SortStableFunc[E any](x []E, less func(a, b E) bool) { - stableLessFunc(x, len(x), less) +// elements, using cmp to compare elements in the same way as [SortFunc]. +func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { + stableCmpFunc(x, len(x), cmp) } // IsSorted reports whether x is sorted in ascending order. -func IsSorted[E constraints.Ordered](x []E) bool { +func IsSorted[S ~[]E, E constraints.Ordered](x S) bool { for i := len(x) - 1; i > 0; i-- { - if x[i] < x[i-1] { + if cmpLess(x[i], x[i-1]) { return false } } return true } -// IsSortedFunc reports whether x is sorted in ascending order, with less as the -// comparison function. -func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { +// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the +// comparison function as defined by [SortFunc]. +func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { for i := len(x) - 1; i > 0; i-- { - if less(x[i], x[i-1]) { + if cmp(x[i], x[i-1]) < 0 { return false } } return true } +// Min returns the minimal value in x. It panics if x is empty. +// For floating-point numbers, Min propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Min[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Min: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = min(m, x[i]) + } + return m +} + +// MinFunc returns the minimal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one minimal element +// according to the cmp function, MinFunc returns the first one. +func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MinFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) < 0 { + m = x[i] + } + } + return m +} + +// Max returns the maximal value in x. It panics if x is empty. +// For floating-point E, Max propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Max[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Max: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = max(m, x[i]) + } + return m +} + +// MaxFunc returns the maximal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one maximal element +// according to the cmp function, MaxFunc returns the first one. +func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MaxFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) > 0 { + m = x[i] + } + } + return m +} + // BinarySearch searches for target in a sorted slice and returns the position // where target is found, or the position where target would appear in the // sort order; it also returns a bool saying whether the target is really found // in the slice. The slice must be sorted in increasing order. -func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { +func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { // Inlining is faster than calling BinarySearchFunc with a lambda. n := len(x) // Define x[-1] < target and x[n] >= target. @@ -70,22 +131,24 @@ func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { for i < j { h := int(uint(i+j) >> 1) // avoid overflow when computing h // i ≤ h < j - if x[h] < target { + if cmpLess(x[h], target) { i = h + 1 // preserves x[i-1] < target } else { j = h // preserves x[j] >= target } } // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. - return i, i < n && x[i] == target + return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target))) } -// BinarySearchFunc works like BinarySearch, but uses a custom comparison -// function. The slice must be sorted in increasing order, where "increasing" is -// defined by cmp. cmp(a, b) is expected to return an integer comparing the two -// parameters: 0 if a == b, a negative number if a < b and a positive number if -// a > b. -func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) { +// BinarySearchFunc works like [BinarySearch], but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" +// is defined by cmp. cmp should return 0 if the slice element matches +// the target, a negative number if the slice element precedes the target, +// or a positive number if the slice element follows the target. +// cmp must implement the same ordering as the slice, such that if +// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. +func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { n := len(x) // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. @@ -124,3 +187,9 @@ func (r *xorshift) Next() uint64 { func nextPowerOfTwo(length int) uint { return 1 << bits.Len(uint(length)) } + +// isNaN reports whether x is a NaN without requiring the math package. +// This will always return false if T is not floating-point. +func isNaN[T constraints.Ordered](x T) bool { + return x != x +} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go similarity index 64% rename from vendor/golang.org/x/exp/slices/zsortfunc.go rename to vendor/golang.org/x/exp/slices/zsortanyfunc.go index 2a632476c..06f2c7a24 100644 --- a/vendor/golang.org/x/exp/slices/zsortfunc.go +++ b/vendor/golang.org/x/exp/slices/zsortanyfunc.go @@ -6,28 +6,28 @@ package slices -// insertionSortLessFunc sorts data[a:b] using insertion sort. -func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +// insertionSortCmpFunc sorts data[a:b] using insertion sort. +func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { for i := a + 1; i < b; i++ { - for j := i; j > a && less(data[j], data[j-1]); j-- { + for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- { data[j], data[j-1] = data[j-1], data[j] } } } -// siftDownLessFunc implements the heap property on data[lo:hi]. +// siftDownCmpFunc implements the heap property on data[lo:hi]. // first is an offset into the array where the root of the heap lies. -func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { +func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) { root := lo for { child := 2*root + 1 if child >= hi { break } - if child+1 < hi && less(data[first+child], data[first+child+1]) { + if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) { child++ } - if !less(data[first+root], data[first+child]) { + if !(cmp(data[first+root], data[first+child]) < 0) { return } data[first+root], data[first+child] = data[first+child], data[first+root] @@ -35,30 +35,30 @@ func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool } } -func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { first := a lo := 0 hi := b - a // Build heap with greatest element at top. for i := (hi - 1) / 2; i >= 0; i-- { - siftDownLessFunc(data, i, hi, first, less) + siftDownCmpFunc(data, i, hi, first, cmp) } // Pop elements, largest first, into end of data. for i := hi - 1; i >= 0; i-- { data[first], data[first+i] = data[first+i], data[first] - siftDownLessFunc(data, lo, i, first, less) + siftDownCmpFunc(data, lo, i, first, cmp) } } -// pdqsortLessFunc sorts data[a:b]. +// pdqsortCmpFunc sorts data[a:b]. // The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. // pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf // C++ implementation: https://github.com/orlp/pdqsort // Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ // limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { +func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) { const maxInsertion = 12 var ( @@ -70,25 +70,25 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { length := b - a if length <= maxInsertion { - insertionSortLessFunc(data, a, b, less) + insertionSortCmpFunc(data, a, b, cmp) return } // Fall back to heapsort if too many bad choices were made. if limit == 0 { - heapSortLessFunc(data, a, b, less) + heapSortCmpFunc(data, a, b, cmp) return } // If the last partitioning was imbalanced, we need to breaking patterns. if !wasBalanced { - breakPatternsLessFunc(data, a, b, less) + breakPatternsCmpFunc(data, a, b, cmp) limit-- } - pivot, hint := choosePivotLessFunc(data, a, b, less) + pivot, hint := choosePivotCmpFunc(data, a, b, cmp) if hint == decreasingHint { - reverseRangeLessFunc(data, a, b, less) + reverseRangeCmpFunc(data, a, b, cmp) // The chosen pivot was pivot-a elements after the start of the array. // After reversing it is pivot-a elements before the end of the array. // The idea came from Rust's implementation. @@ -98,48 +98,48 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { // The slice is likely already sorted. if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortLessFunc(data, a, b, less) { + if partialInsertionSortCmpFunc(data, a, b, cmp) { return } } // Probably the slice contains many duplicate elements, partition the slice into // elements equal to and elements greater than the pivot. - if a > 0 && !less(data[a-1], data[pivot]) { - mid := partitionEqualLessFunc(data, a, b, pivot, less) + if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) { + mid := partitionEqualCmpFunc(data, a, b, pivot, cmp) a = mid continue } - mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) + mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp) wasPartitioned = alreadyPartitioned leftLen, rightLen := mid-a, b-mid balanceThreshold := length / 8 if leftLen < rightLen { wasBalanced = leftLen >= balanceThreshold - pdqsortLessFunc(data, a, mid, limit, less) + pdqsortCmpFunc(data, a, mid, limit, cmp) a = mid + 1 } else { wasBalanced = rightLen >= balanceThreshold - pdqsortLessFunc(data, mid+1, b, limit, less) + pdqsortCmpFunc(data, mid+1, b, limit, cmp) b = mid } } } -// partitionLessFunc does one quicksort partition. +// partitionCmpFunc does one quicksort partition. // Let p = data[pivot] // Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. // On return, data[newpivot] = p -func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { +func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) { data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - for i <= j && less(data[i], data[a]) { + for i <= j && (cmp(data[i], data[a]) < 0) { i++ } - for i <= j && !less(data[j], data[a]) { + for i <= j && !(cmp(data[j], data[a]) < 0) { j-- } if i > j { @@ -151,10 +151,10 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) j-- for { - for i <= j && less(data[i], data[a]) { + for i <= j && (cmp(data[i], data[a]) < 0) { i++ } - for i <= j && !less(data[j], data[a]) { + for i <= j && !(cmp(data[j], data[a]) < 0) { j-- } if i > j { @@ -168,17 +168,17 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) return j, false } -// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. // It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { +func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) { data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned for { - for i <= j && !less(data[a], data[i]) { + for i <= j && !(cmp(data[a], data[i]) < 0) { i++ } - for i <= j && less(data[a], data[j]) { + for i <= j && (cmp(data[a], data[j]) < 0) { j-- } if i > j { @@ -191,15 +191,15 @@ func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) return i } -// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { +// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool { const ( maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted shortestShifting = 50 // don't shift any elements on short arrays ) i := a + 1 for j := 0; j < maxSteps; j++ { - for i < b && !less(data[i], data[i-1]) { + for i < b && !(cmp(data[i], data[i-1]) < 0) { i++ } @@ -216,7 +216,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b // Shift the smaller one to the left. if i-a >= 2 { for j := i - 1; j >= 1; j-- { - if !less(data[j], data[j-1]) { + if !(cmp(data[j], data[j-1]) < 0) { break } data[j], data[j-1] = data[j-1], data[j] @@ -225,7 +225,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b // Shift the greater one to the right. if b-i >= 2 { for j := i + 1; j < b; j++ { - if !less(data[j], data[j-1]) { + if !(cmp(data[j], data[j-1]) < 0) { break } data[j], data[j-1] = data[j-1], data[j] @@ -235,9 +235,9 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b return false } -// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns +// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns // that might cause imbalanced partitions in quicksort. -func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { length := b - a if length >= 8 { random := xorshift(length) @@ -253,12 +253,12 @@ func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { } } -// choosePivotLessFunc chooses a pivot in data[a:b]. +// choosePivotCmpFunc chooses a pivot in data[a:b]. // // [0,8): chooses a static pivot. // [8,shortestNinther): uses the simple median-of-three method. // [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { +func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) { const ( shortestNinther = 50 maxSwaps = 4 * 3 @@ -276,12 +276,12 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv if l >= 8 { if l >= shortestNinther { // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentLessFunc(data, i, &swaps, less) - j = medianAdjacentLessFunc(data, j, &swaps, less) - k = medianAdjacentLessFunc(data, k, &swaps, less) + i = medianAdjacentCmpFunc(data, i, &swaps, cmp) + j = medianAdjacentCmpFunc(data, j, &swaps, cmp) + k = medianAdjacentCmpFunc(data, k, &swaps, cmp) } // Find the median among i, j, k and stores it into j. - j = medianLessFunc(data, i, j, k, &swaps, less) + j = medianCmpFunc(data, i, j, k, &swaps, cmp) } switch swaps { @@ -294,29 +294,29 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv } } -// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { - if less(data[b], data[a]) { +// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) { + if cmp(data[b], data[a]) < 0 { *swaps++ return b, a } return a, b } -// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { - a, b = order2LessFunc(data, a, b, swaps, less) - b, c = order2LessFunc(data, b, c, swaps, less) - a, b = order2LessFunc(data, a, b, swaps, less) +// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int { + a, b = order2CmpFunc(data, a, b, swaps, cmp) + b, c = order2CmpFunc(data, b, c, swaps, cmp) + a, b = order2CmpFunc(data, a, b, swaps, cmp) return b } -// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { - return medianLessFunc(data, a-1, a, a+1, swaps, less) +// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int { + return medianCmpFunc(data, a-1, a, a+1, swaps, cmp) } -func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { i := a j := b - 1 for i < j { @@ -326,37 +326,37 @@ func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { } } -func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { +func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) { for i := 0; i < n; i++ { data[a+i], data[b+i] = data[b+i], data[a+i] } } -func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { +func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) { blockSize := 20 // must be > 0 a, b := 0, blockSize for b <= n { - insertionSortLessFunc(data, a, b, less) + insertionSortCmpFunc(data, a, b, cmp) a = b b += blockSize } - insertionSortLessFunc(data, a, n, less) + insertionSortCmpFunc(data, a, n, cmp) for blockSize < n { a, b = 0, 2*blockSize for b <= n { - symMergeLessFunc(data, a, a+blockSize, b, less) + symMergeCmpFunc(data, a, a+blockSize, b, cmp) a = b b += 2 * blockSize } if m := a + blockSize; m < n { - symMergeLessFunc(data, a, m, n, less) + symMergeCmpFunc(data, a, m, n, cmp) } blockSize *= 2 } } -// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using // the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum // Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz // Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in @@ -375,7 +375,7 @@ func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { // symMerge assumes non-degenerate arguments: a < m && m < b. // Having the caller check this condition eliminates many leaf recursion calls, // which improves performance. -func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { +func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { // Avoid unnecessary recursions of symMerge // by direct insertion of data[a] into data[m:b] // if data[a:m] only contains one element. @@ -387,7 +387,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { j := b for i < j { h := int(uint(i+j) >> 1) - if less(data[h], data[a]) { + if cmp(data[h], data[a]) < 0 { i = h + 1 } else { j = h @@ -411,7 +411,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { j := m for i < j { h := int(uint(i+j) >> 1) - if !less(data[m], data[h]) { + if !(cmp(data[m], data[h]) < 0) { i = h + 1 } else { j = h @@ -438,7 +438,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { for start < r { c := int(uint(start+r) >> 1) - if !less(data[p-c], data[c]) { + if !(cmp(data[p-c], data[c]) < 0) { start = c + 1 } else { r = c @@ -447,33 +447,33 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { end := n - start if start < m && m < end { - rotateLessFunc(data, start, m, end, less) + rotateCmpFunc(data, start, m, end, cmp) } if a < start && start < mid { - symMergeLessFunc(data, a, start, mid, less) + symMergeCmpFunc(data, a, start, mid, cmp) } if mid < end && end < b { - symMergeLessFunc(data, mid, end, b, less) + symMergeCmpFunc(data, mid, end, b, cmp) } } -// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: // Data of the form 'x u v y' is changed to 'x v u y'. // rotate performs at most b-a many calls to data.Swap, // and it assumes non-degenerate arguments: a < m && m < b. -func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { +func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { i := m - a j := b - m for i != j { if i > j { - swapRangeLessFunc(data, m-i, m, j, less) + swapRangeCmpFunc(data, m-i, m, j, cmp) i -= j } else { - swapRangeLessFunc(data, m-i, m+j-i, i, less) + swapRangeCmpFunc(data, m-i, m+j-i, i, cmp) j -= i } } // i == j - swapRangeLessFunc(data, m-i, m, i, less) + swapRangeCmpFunc(data, m-i, m, i, cmp) } diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go index efaa1c8b7..99b47c398 100644 --- a/vendor/golang.org/x/exp/slices/zsortordered.go +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -11,7 +11,7 @@ import "golang.org/x/exp/constraints" // insertionSortOrdered sorts data[a:b] using insertion sort. func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { for i := a + 1; i < b; i++ { - for j := i; j > a && (data[j] < data[j-1]); j-- { + for j := i; j > a && cmpLess(data[j], data[j-1]); j-- { data[j], data[j-1] = data[j-1], data[j] } } @@ -26,10 +26,10 @@ func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { if child >= hi { break } - if child+1 < hi && (data[first+child] < data[first+child+1]) { + if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) { child++ } - if !(data[first+root] < data[first+child]) { + if !cmpLess(data[first+root], data[first+child]) { return } data[first+root], data[first+child] = data[first+child], data[first+root] @@ -107,7 +107,7 @@ func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { // Probably the slice contains many duplicate elements, partition the slice into // elements equal to and elements greater than the pivot. - if a > 0 && !(data[a-1] < data[pivot]) { + if a > 0 && !cmpLess(data[a-1], data[pivot]) { mid := partitionEqualOrdered(data, a, b, pivot) a = mid continue @@ -138,10 +138,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - for i <= j && (data[i] < data[a]) { + for i <= j && cmpLess(data[i], data[a]) { i++ } - for i <= j && !(data[j] < data[a]) { + for i <= j && !cmpLess(data[j], data[a]) { j-- } if i > j { @@ -153,10 +153,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo j-- for { - for i <= j && (data[i] < data[a]) { + for i <= j && cmpLess(data[i], data[a]) { i++ } - for i <= j && !(data[j] < data[a]) { + for i <= j && !cmpLess(data[j], data[a]) { j-- } if i > j { @@ -177,10 +177,10 @@ func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (ne i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned for { - for i <= j && !(data[a] < data[i]) { + for i <= j && !cmpLess(data[a], data[i]) { i++ } - for i <= j && (data[a] < data[j]) { + for i <= j && cmpLess(data[a], data[j]) { j-- } if i > j { @@ -201,7 +201,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool ) i := a + 1 for j := 0; j < maxSteps; j++ { - for i < b && !(data[i] < data[i-1]) { + for i < b && !cmpLess(data[i], data[i-1]) { i++ } @@ -218,7 +218,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool // Shift the smaller one to the left. if i-a >= 2 { for j := i - 1; j >= 1; j-- { - if !(data[j] < data[j-1]) { + if !cmpLess(data[j], data[j-1]) { break } data[j], data[j-1] = data[j-1], data[j] @@ -227,7 +227,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool // Shift the greater one to the right. if b-i >= 2 { for j := i + 1; j < b; j++ { - if !(data[j] < data[j-1]) { + if !cmpLess(data[j], data[j-1]) { break } data[j], data[j-1] = data[j-1], data[j] @@ -298,7 +298,7 @@ func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, h // order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { - if data[b] < data[a] { + if cmpLess(data[b], data[a]) { *swaps++ return b, a } @@ -389,7 +389,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { j := b for i < j { h := int(uint(i+j) >> 1) - if data[h] < data[a] { + if cmpLess(data[h], data[a]) { i = h + 1 } else { j = h @@ -413,7 +413,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { j := m for i < j { h := int(uint(i+j) >> 1) - if !(data[m] < data[h]) { + if !cmpLess(data[m], data[h]) { i = h + 1 } else { j = h @@ -440,7 +440,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { for start < r { c := int(uint(start+r) >> 1) - if !(data[p-c] < data[c]) { + if !cmpLess(data[p-c], data[c]) { start = c + 1 } else { r = c diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go index 6e071e852..9b4de9401 100644 --- a/vendor/golang.org/x/net/http/httpguts/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -12,7 +12,7 @@ import ( "golang.org/x/net/idna" ) -var isTokenTable = [127]bool{ +var isTokenTable = [256]bool{ '!': true, '#': true, '$': true, @@ -93,12 +93,7 @@ var isTokenTable = [127]bool{ } func IsTokenRune(r rune) bool { - i := int(r) - return i < len(isTokenTable) && isTokenTable[i] -} - -func isNotToken(r rune) bool { - return !IsTokenRune(r) + return r < utf8.RuneSelf && isTokenTable[byte(r)] } // HeaderValuesContainsToken reports whether any string in values @@ -202,8 +197,8 @@ func ValidHeaderFieldName(v string) bool { if len(v) == 0 { return false } - for _, r := range v { - if !IsTokenRune(r) { + for i := 0; i < len(v); i++ { + if !isTokenTable[v[i]] { return false } } diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 43557ab7e..105c3b279 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -490,6 +490,9 @@ func terminalReadFrameError(err error) bool { // returned error is ErrFrameTooLarge. Other errors may be of type // ConnectionError, StreamError, or anything else from the underlying // reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. func (fr *Framer) ReadFrame() (Frame, error) { fr.errDetail = nil if fr.lastFrame != nil { @@ -1521,7 +1524,7 @@ func (fr *Framer) maxHeaderStringLen() int { // readMetaFrame returns 0 or more CONTINUATION frames from fr and // merge them into the provided hf and returns a MetaHeadersFrame // with the decoded hpack values. -func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (Frame, error) { if fr.AllowIllegalReads { return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") } @@ -1592,7 +1595,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { } // It would be nice to send a RST_STREAM before sending the GOAWAY, // but the structure of the server's frame writer makes this difficult. - return nil, ConnectionError(ErrCodeProtocol) + return mh, ConnectionError(ErrCodeProtocol) } // Also close the connection after any CONTINUATION frame following an @@ -1604,11 +1607,11 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { } // It would be nice to send a RST_STREAM before sending the GOAWAY, // but the structure of the server's frame writer makes this difficult. - return nil, ConnectionError(ErrCodeProtocol) + return mh, ConnectionError(ErrCodeProtocol) } if _, err := hdec.Write(frag); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if hc.HeadersEnded() { @@ -1625,7 +1628,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { mh.HeadersFrame.invalidate() if err := hdec.Close(); err != nil { - return nil, ConnectionError(ErrCodeCompression) + return mh, ConnectionError(ErrCodeCompression) } if invalid != nil { fr.errDetail = invalid diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index ce2e8b40e..c5d081081 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -732,11 +732,7 @@ func isClosedConnError(err error) bool { return false } - // TODO: remove this string search and be more like the Windows - // case below. That might involve modifying the standard library - // to return better error types. - str := err.Error() - if strings.Contains(str, "use of closed network connection") { + if errors.Is(err, net.ErrClosed) { return true } @@ -1482,6 +1478,11 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { sc.goAway(ErrCodeFlowControl) return true case ConnectionError: + if res.f != nil { + if id := res.f.Header().StreamID; id > sc.maxClientStreamID { + sc.maxClientStreamID = id + } + } sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) sc.goAway(ErrCode(ev)) return true // goAway will handle shutdown diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index ce375c8c7..2fa49490c 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -936,7 +936,20 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) { } last := f.LastStreamID for streamID, cs := range cc.streams { - if streamID > last { + if streamID <= last { + // The server's GOAWAY indicates that it received this stream. + // It will either finish processing it, or close the connection + // without doing so. Either way, leave the stream alone for now. + continue + } + if streamID == 1 && cc.goAway.ErrCode != ErrCodeNo { + // Don't retry the first stream on a connection if we get a non-NO error. + // If the server is sending an error on a new connection, + // retrying the request on a new one probably isn't going to work. + cs.abortStreamLocked(fmt.Errorf("http2: Transport received GOAWAY from server ErrCode:%v", cc.goAway.ErrCode)) + } else { + // Aborting the stream with errClentConnGotGoAway indicates that + // the request should be retried on a new connection. cs.abortStreamLocked(errClientConnGotGoAway) } } diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 4756ad5f7..8fa707aa4 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -103,6 +103,7 @@ var ARM64 struct { HasASIMDDP bool // Advanced SIMD double precision instruction set HasSHA512 bool // SHA512 hardware implementation HasSVE bool // Scalable Vector Extensions + HasSVE2 bool // Scalable Vector Extensions 2 HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 _ CacheLinePad } diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go index f3eb993bf..0e27a21e1 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -28,6 +28,7 @@ func initOptions() { {Name: "sm3", Feature: &ARM64.HasSM3}, {Name: "sm4", Feature: &ARM64.HasSM4}, {Name: "sve", Feature: &ARM64.HasSVE}, + {Name: "sve2", Feature: &ARM64.HasSVE2}, {Name: "crc32", Feature: &ARM64.HasCRC32}, {Name: "atomics", Feature: &ARM64.HasATOMICS}, {Name: "asimdhp", Feature: &ARM64.HasASIMDHP}, @@ -164,6 +165,15 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) { switch extractBits(pfr0, 32, 35) { case 1: ARM64.HasSVE = true + + parseARM64SVERegister(getzfr0()) + } +} + +func parseARM64SVERegister(zfr0 uint64) { + switch extractBits(zfr0, 0, 3) { + case 1: + ARM64.HasSVE2 = true } } diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s index fcb9a3888..22cc99844 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -29,3 +29,11 @@ TEXT ·getpfr0(SB),NOSPLIT,$0-8 WORD $0xd5380400 MOVD R0, ret+0(FP) RET + +// func getzfr0() uint64 +TEXT ·getzfr0(SB),NOSPLIT,$0-8 + // get SVE Feature Register 0 into x0 + // mrs x0, ID_AA64ZFR0_EL1 = d5380480 + WORD $0xd5380480 + MOVD R0, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go index a8acd3e32..6ac6e1efb 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -9,3 +9,4 @@ package cpu func getisar0() uint64 func getisar1() uint64 func getpfr0() uint64 +func getzfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index a968b80fa..3d386d0fc 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -35,6 +35,8 @@ const ( hwcap_SHA512 = 1 << 21 hwcap_SVE = 1 << 22 hwcap_ASIMDFHM = 1 << 23 + + hwcap2_SVE2 = 1 << 1 ) // linuxKernelCanEmulateCPUID reports whether we're running @@ -104,6 +106,9 @@ func doinit() { ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) ARM64.HasSVE = isSet(hwCap, hwcap_SVE) ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) + + // HWCAP2 feature bits + ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) } func isSet(hwc uint, value uint) bool { diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s index 2f67ba86d..813dfad7d 100644 --- a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s @@ -9,9 +9,11 @@ #define PSALAA 1208(R0) #define GTAB64(x) 80(x) #define LCA64(x) 88(x) +#define SAVSTACK_ASYNC(x) 336(x) // in the LCA #define CAA(x) 8(x) -#define EDCHPXV(x) 1016(x) // in the CAA -#define SAVSTACK_ASYNC(x) 336(x) // in the LCA +#define CEECAATHDID(x) 976(x) // in the CAA +#define EDCHPXV(x) 1016(x) // in the CAA +#define GOCB(x) 1104(x) // in the CAA // SS_*, where x=SAVSTACK_ASYNC #define SS_LE(x) 0(x) @@ -19,405 +21,362 @@ #define SS_ERRNO(x) 16(x) #define SS_ERRNOJR(x) 20(x) -#define LE_CALL BYTE $0x0D; BYTE $0x76; // BL R7, R6 +// Function Descriptor Offsets +#define __errno 0x156*16 +#define __err2ad 0x16C*16 -TEXT ·clearErrno(SB),NOSPLIT,$0-0 - BL addrerrno<>(SB) - MOVD $0, 0(R3) +// Call Instructions +#define LE_CALL BYTE $0x0D; BYTE $0x76 // BL R7, R6 +#define SVC_LOAD BYTE $0x0A; BYTE $0x08 // SVC 08 LOAD +#define SVC_DELETE BYTE $0x0A; BYTE $0x09 // SVC 09 DELETE + +DATA zosLibVec<>(SB)/8, $0 +GLOBL zosLibVec<>(SB), NOPTR, $8 + +TEXT ·initZosLibVec(SB), NOSPLIT|NOFRAME, $0-0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R8 + MOVD EDCHPXV(R8), R8 + MOVD R8, zosLibVec<>(SB) + RET + +TEXT ·GetZosLibVec(SB), NOSPLIT|NOFRAME, $0-0 + MOVD zosLibVec<>(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·clearErrno(SB), NOSPLIT, $0-0 + BL addrerrno<>(SB) + MOVD $0, 0(R3) RET // Returns the address of errno in R3. -TEXT addrerrno<>(SB),NOSPLIT|NOFRAME,$0-0 +TEXT addrerrno<>(SB), NOSPLIT|NOFRAME, $0-0 // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 // Get __errno FuncDesc. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - ADD $(0x156*16), R9 - LMG 0(R9), R5, R6 + MOVD CAA(R8), R9 + MOVD EDCHPXV(R9), R9 + ADD $(__errno), R9 + LMG 0(R9), R5, R6 // Switch to saved LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R4 + MOVD $0, 0(R9) // Call __errno function. LE_CALL NOPH // Switch back to Go stack. - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. + XOR R0, R0 // Restore R0 to $0. + MOVD R4, 0(R9) // Save stack pointer. RET -TEXT ·syscall_syscall(SB),NOSPLIT,$0-56 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 +// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) +TEXT ·svcCall(SB), NOSPLIT, $0 + BL runtime·save_g(SB) // Save g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD R15, 0(R9) - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVD argv+8(FP), R1 // Move function arguments into registers + MOVD dsa+16(FP), g + MOVD fnptr+0(FP), R15 - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 + BYTE $0x0D // Branch to function + BYTE $0xEF - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) + BL runtime·load_g(SB) // Restore g and stack pointer + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD SAVSTACK_ASYNC(R8), R9 + MOVD 0(R9), R15 - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+32(FP) - MOVD R0, r2+40(FP) - MOVD R0, err+48(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+48(FP) -done: - BL runtime·exitsyscall(SB) RET -TEXT ·syscall_rawsyscall(SB),NOSPLIT,$0-56 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 +// func svcLoad(name *byte) unsafe.Pointer +TEXT ·svcLoad(SB), NOSPLIT, $0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD $0x80000000, R1 + MOVD $0, R15 + SVC_LOAD + MOVW R15, R3 // Save return code from SVC + MOVD R2, R15 // Restore go stack pointer + CMP R3, $0 // Check SVC return code + BNE error + + MOVD $-2, R3 // Reset last bit of entry point to zero + AND R0, R3 + MOVD R3, ret+8(FP) // Return entry point returned by SVC + CMP R0, R3 // Check if last bit of entry point was set + BNE done + + MOVD R15, R2 // Save go stack pointer + MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08) + SVC_DELETE + MOVD R2, R15 // Restore go stack pointer - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) +error: + MOVD $0, ret+8(FP) // Return 0 on failure - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+32(FP) - MOVD R0, r2+40(FP) - MOVD R0, err+48(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+48(FP) done: + XOR R0, R0 // Reset r0 to 0 RET -TEXT ·syscall_syscall6(SB),NOSPLIT,$0-80 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 +// func svcUnload(name *byte, fnptr unsafe.Pointer) int64 +TEXT ·svcUnload(SB), NOSPLIT, $0 + MOVD R15, R2 // Save go stack pointer + MOVD name+0(FP), R0 // Move SVC args into registers + MOVD fnptr+8(FP), R15 + SVC_DELETE + XOR R0, R0 // Reset r0 to 0 + MOVD R15, R1 // Save SVC return code + MOVD R2, R15 // Restore go stack pointer + MOVD R1, ret+16(FP) // Return SVC return code + RET +// func gettid() uint64 +TEXT ·gettid(SB), NOSPLIT, $0 // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 + // Get CEECAATHDID + MOVD CAA(R8), R9 + MOVD CEECAATHDID(R9), R9 + MOVD R9, ret+0(FP) - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - - // Call function. - LE_CALL - NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+56(FP) - MOVD R0, r2+64(FP) - MOVD R0, err+72(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+72(FP) -done: - BL runtime·exitsyscall(SB) RET -TEXT ·syscall_rawsyscall6(SB),NOSPLIT,$0-80 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 +// +// Call LE function, if the return is -1 +// errno and errno2 is retrieved +// +TEXT ·CallLeFuncWithErr(SB), NOSPLIT, $0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R9 + MOVD g, GOCB(R9) // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - - // Call function. - LE_CALL + MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address + MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer + + MOVD parms_base+8(FP), R7 // R7 -> argument array + MOVD parms_len+16(FP), R8 // R8 number of arguments + + // arg 1 ---> R1 + CMP R8, $0 + BEQ docall + SUB $1, R8 + MOVD 0(R7), R1 + + // arg 2 ---> R2 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R2 + + // arg 3 --> R3 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R3 + + CMP R8, $0 + BEQ docall + MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument + +repeat: + ADD $8, R7 + MOVD 0(R7), R0 // advance arg pointer by 8 byte + ADD $8, R6 // advance LE argument address by 8 byte + MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame + SUB $1, R8 + CMP R8, $0 + BNE repeat + +docall: + MOVD funcdesc+0(FP), R8 // R8-> function descriptor + LMG 0(R8), R5, R6 + MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC + LE_CALL // balr R7, R6 (return #1) + NOPH + MOVD R3, ret+32(FP) + CMP R3, $-1 // compare result to -1 + BNE done + + // retrieve errno and errno2 + MOVD zosLibVec<>(SB), R8 + ADD $(__errno), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __errno (return #3) NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+56(FP) - MOVD R0, r2+64(FP) - MOVD R0, err+72(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL ·rrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+72(FP) + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) + MOVD zosLibVec<>(SB), R8 + ADD $(__err2ad), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __err2ad (return #2) + NOPH + MOVW (R3), R2 // retrieve errno2 + MOVD R2, errno2+40(FP) // store in return area + done: + MOVD R4, 0(R9) // Save stack pointer. RET -TEXT ·syscall_syscall9(SB),NOSPLIT,$0 - BL runtime·entersyscall(SB) - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 +// +// Call LE function, if the return is 0 +// errno and errno2 is retrieved +// +TEXT ·CallLeFuncWithPtrReturn(SB), NOSPLIT, $0 + MOVW PSALAA, R8 + MOVD LCA64(R8), R8 + MOVD CAA(R8), R9 + MOVD g, GOCB(R9) // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - MOVD a7+56(FP), R12 - MOVD R12, (2176+48)(R4) - MOVD a8+64(FP), R12 - MOVD R12, (2176+56)(R4) - MOVD a9+72(FP), R12 - MOVD R12, (2176+64)(R4) - - // Call function. - LE_CALL + MOVD SAVSTACK_ASYNC(R8), R9 // R9-> LE stack frame saving address + MOVD 0(R9), R4 // R4-> restore previously saved stack frame pointer + + MOVD parms_base+8(FP), R7 // R7 -> argument array + MOVD parms_len+16(FP), R8 // R8 number of arguments + + // arg 1 ---> R1 + CMP R8, $0 + BEQ docall + SUB $1, R8 + MOVD 0(R7), R1 + + // arg 2 ---> R2 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R2 + + // arg 3 --> R3 + CMP R8, $0 + BEQ docall + SUB $1, R8 + ADD $8, R7 + MOVD 0(R7), R3 + + CMP R8, $0 + BEQ docall + MOVD $2176+16, R6 // starting LE stack address-8 to store 4th argument + +repeat: + ADD $8, R7 + MOVD 0(R7), R0 // advance arg pointer by 8 byte + ADD $8, R6 // advance LE argument address by 8 byte + MOVD R0, (R4)(R6*1) // copy argument from go-slice to le-frame + SUB $1, R8 + CMP R8, $0 + BNE repeat + +docall: + MOVD funcdesc+0(FP), R8 // R8-> function descriptor + LMG 0(R8), R5, R6 + MOVD $0, 0(R9) // R9 address of SAVSTACK_ASYNC + LE_CALL // balr R7, R6 (return #1) NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+80(FP) - MOVD R0, r2+88(FP) - MOVD R0, err+96(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+96(FP) -done: - BL runtime·exitsyscall(SB) - RET - -TEXT ·syscall_rawsyscall9(SB),NOSPLIT,$0 - MOVD a1+8(FP), R1 - MOVD a2+16(FP), R2 - MOVD a3+24(FP), R3 - - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get function. - MOVD CAA(R8), R9 - MOVD EDCHPXV(R9), R9 - MOVD trap+0(FP), R5 - SLD $4, R5 - ADD R5, R9 - LMG 0(R9), R5, R6 - - // Restore LE stack. - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R4 - MOVD $0, 0(R9) - - // Fill in parameter list. - MOVD a4+32(FP), R12 - MOVD R12, (2176+24)(R4) - MOVD a5+40(FP), R12 - MOVD R12, (2176+32)(R4) - MOVD a6+48(FP), R12 - MOVD R12, (2176+40)(R4) - MOVD a7+56(FP), R12 - MOVD R12, (2176+48)(R4) - MOVD a8+64(FP), R12 - MOVD R12, (2176+56)(R4) - MOVD a9+72(FP), R12 - MOVD R12, (2176+64)(R4) - - // Call function. - LE_CALL + MOVD R3, ret+32(FP) + CMP R3, $0 // compare result to 0 + BNE done + + // retrieve errno and errno2 + MOVD zosLibVec<>(SB), R8 + ADD $(__errno), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __errno (return #3) NOPH - XOR R0, R0 // Restore R0 to $0. - MOVD R4, 0(R9) // Save stack pointer. - - MOVD R3, r1+80(FP) - MOVD R0, r2+88(FP) - MOVD R0, err+96(FP) - MOVW R3, R4 - CMP R4, $-1 - BNE done - BL addrerrno<>(SB) - MOVWZ 0(R3), R3 - MOVD R3, err+96(FP) -done: - RET - -// func svcCall(fnptr unsafe.Pointer, argv *unsafe.Pointer, dsa *uint64) -TEXT ·svcCall(SB),NOSPLIT,$0 - BL runtime·save_g(SB) // Save g and stack pointer - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD R15, 0(R9) - - MOVD argv+8(FP), R1 // Move function arguments into registers - MOVD dsa+16(FP), g - MOVD fnptr+0(FP), R15 - - BYTE $0x0D // Branch to function - BYTE $0xEF - - BL runtime·load_g(SB) // Restore g and stack pointer - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - MOVD SAVSTACK_ASYNC(R8), R9 - MOVD 0(R9), R15 - - RET - -// func svcLoad(name *byte) unsafe.Pointer -TEXT ·svcLoad(SB),NOSPLIT,$0 - MOVD R15, R2 // Save go stack pointer - MOVD name+0(FP), R0 // Move SVC args into registers - MOVD $0x80000000, R1 - MOVD $0, R15 - BYTE $0x0A // SVC 08 LOAD - BYTE $0x08 - MOVW R15, R3 // Save return code from SVC - MOVD R2, R15 // Restore go stack pointer - CMP R3, $0 // Check SVC return code - BNE error - - MOVD $-2, R3 // Reset last bit of entry point to zero - AND R0, R3 - MOVD R3, addr+8(FP) // Return entry point returned by SVC - CMP R0, R3 // Check if last bit of entry point was set - BNE done - - MOVD R15, R2 // Save go stack pointer - MOVD $0, R15 // Move SVC args into registers (entry point still in r0 from SVC 08) - BYTE $0x0A // SVC 09 DELETE - BYTE $0x09 - MOVD R2, R15 // Restore go stack pointer + MOVWZ 0(R3), R3 + MOVD R3, err+48(FP) + MOVD zosLibVec<>(SB), R8 + ADD $(__err2ad), R8 + LMG 0(R8), R5, R6 + LE_CALL // balr R7, R6 __err2ad (return #2) + NOPH + MOVW (R3), R2 // retrieve errno2 + MOVD R2, errno2+40(FP) // store in return area + XOR R2, R2 + MOVWZ R2, (R3) // clear errno2 -error: - MOVD $0, addr+8(FP) // Return 0 on failure done: - XOR R0, R0 // Reset r0 to 0 + MOVD R4, 0(R9) // Save stack pointer. RET -// func svcUnload(name *byte, fnptr unsafe.Pointer) int64 -TEXT ·svcUnload(SB),NOSPLIT,$0 - MOVD R15, R2 // Save go stack pointer - MOVD name+0(FP), R0 // Move SVC args into registers - MOVD addr+8(FP), R15 - BYTE $0x0A // SVC 09 - BYTE $0x09 - XOR R0, R0 // Reset r0 to 0 - MOVD R15, R1 // Save SVC return code - MOVD R2, R15 // Restore go stack pointer - MOVD R1, rc+0(FP) // Return SVC return code +// +// function to test if a pointer can be safely dereferenced (content read) +// return 0 for succces +// +TEXT ·ptrtest(SB), NOSPLIT, $0-16 + MOVD arg+0(FP), R10 // test pointer in R10 + + // set up R2 to point to CEECAADMC + BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208 + BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2 + BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767 + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2) + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2) + BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2) + + // set up R5 to point to the "shunt" path which set 1 to R3 (failure) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3 + BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1 + BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1 + + // if r3 is not zero (failed) then branch to finish + BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3 + BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2 + + // stomic store shunt address in R5 into CEECAADMC + BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2) + + // now try reading from the test pointer in R10, if it fails it branches to the "lghi" instruction above + BYTE $0xE3; BYTE $0x9A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 9,0(10) + + // finish here, restore 0 into CEECAADMC + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9 + BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2) + MOVD R3, ret+8(FP) // result in R3 RET -// func gettid() uint64 -TEXT ·gettid(SB), NOSPLIT, $0 - // Get library control area (LCA). - MOVW PSALAA, R8 - MOVD LCA64(R8), R8 - - // Get CEECAATHDID - MOVD CAA(R8), R9 - MOVD 0x3D0(R9), R9 - MOVD R9, ret+0(FP) - +// +// function to test if a untptr can be loaded from a pointer +// return 1: the 8-byte content +// 2: 0 for success, 1 for failure +// +// func safeload(ptr uintptr) ( value uintptr, error uintptr) +TEXT ·safeload(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R10 // test pointer in R10 + MOVD $0x0, R6 + BYTE $0xE3; BYTE $0x20; BYTE $0x04; BYTE $0xB8; BYTE $0x00; BYTE $0x17 // llgt 2,1208 + BYTE $0xB9; BYTE $0x17; BYTE $0x00; BYTE $0x22 // llgtr 2,2 + BYTE $0xA5; BYTE $0x26; BYTE $0x7F; BYTE $0xFF // nilh 2,32767 + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x58; BYTE $0x00; BYTE $0x04 // lg 2,88(2) + BYTE $0xE3; BYTE $0x22; BYTE $0x00; BYTE $0x08; BYTE $0x00; BYTE $0x04 // lg 2,8(2) + BYTE $0x41; BYTE $0x22; BYTE $0x03; BYTE $0x68 // la 2,872(2) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x33 // xgr 3,3 + BYTE $0xA7; BYTE $0x55; BYTE $0x00; BYTE $0x04 // bras 5,lbl1 + BYTE $0xA7; BYTE $0x39; BYTE $0x00; BYTE $0x01 // lghi 3,1 + BYTE $0xB9; BYTE $0x02; BYTE $0x00; BYTE $0x33 // lbl1 ltgr 3,3 + BYTE $0xA7; BYTE $0x74; BYTE $0x00; BYTE $0x08 // brc b'0111',lbl2 + BYTE $0xE3; BYTE $0x52; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 5,0(2) + BYTE $0xE3; BYTE $0x6A; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x04 // lg 6,0(10) + BYTE $0xB9; BYTE $0x82; BYTE $0x00; BYTE $0x99 // lbl2 xgr 9,9 + BYTE $0xE3; BYTE $0x92; BYTE $0x00; BYTE $0x00; BYTE $0x00; BYTE $0x24 // stg 9,0(2) + MOVD R6, value+8(FP) // result in R6 + MOVD R3, error+16(FP) // error in R3 RET diff --git a/vendor/golang.org/x/sys/unix/bpxsvc_zos.go b/vendor/golang.org/x/sys/unix/bpxsvc_zos.go new file mode 100644 index 000000000..39d647d86 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/bpxsvc_zos.go @@ -0,0 +1,657 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos + +package unix + +import ( + "bytes" + "fmt" + "unsafe" +) + +//go:noescape +func bpxcall(plist []unsafe.Pointer, bpx_offset int64) + +//go:noescape +func A2e([]byte) + +//go:noescape +func E2a([]byte) + +const ( + BPX4STA = 192 // stat + BPX4FST = 104 // fstat + BPX4LST = 132 // lstat + BPX4OPN = 156 // open + BPX4CLO = 72 // close + BPX4CHR = 500 // chattr + BPX4FCR = 504 // fchattr + BPX4LCR = 1180 // lchattr + BPX4CTW = 492 // cond_timed_wait + BPX4GTH = 1056 // __getthent + BPX4PTQ = 412 // pthread_quiesc + BPX4PTR = 320 // ptrace +) + +const ( + //options + //byte1 + BPX_OPNFHIGH = 0x80 + //byte2 + BPX_OPNFEXEC = 0x80 + //byte3 + BPX_O_NOLARGEFILE = 0x08 + BPX_O_LARGEFILE = 0x04 + BPX_O_ASYNCSIG = 0x02 + BPX_O_SYNC = 0x01 + //byte4 + BPX_O_CREXCL = 0xc0 + BPX_O_CREAT = 0x80 + BPX_O_EXCL = 0x40 + BPX_O_NOCTTY = 0x20 + BPX_O_TRUNC = 0x10 + BPX_O_APPEND = 0x08 + BPX_O_NONBLOCK = 0x04 + BPX_FNDELAY = 0x04 + BPX_O_RDWR = 0x03 + BPX_O_RDONLY = 0x02 + BPX_O_WRONLY = 0x01 + BPX_O_ACCMODE = 0x03 + BPX_O_GETFL = 0x0f + + //mode + // byte1 (file type) + BPX_FT_DIR = 1 + BPX_FT_CHARSPEC = 2 + BPX_FT_REGFILE = 3 + BPX_FT_FIFO = 4 + BPX_FT_SYMLINK = 5 + BPX_FT_SOCKET = 6 + //byte3 + BPX_S_ISUID = 0x08 + BPX_S_ISGID = 0x04 + BPX_S_ISVTX = 0x02 + BPX_S_IRWXU1 = 0x01 + BPX_S_IRUSR = 0x01 + //byte4 + BPX_S_IRWXU2 = 0xc0 + BPX_S_IWUSR = 0x80 + BPX_S_IXUSR = 0x40 + BPX_S_IRWXG = 0x38 + BPX_S_IRGRP = 0x20 + BPX_S_IWGRP = 0x10 + BPX_S_IXGRP = 0x08 + BPX_S_IRWXOX = 0x07 + BPX_S_IROTH = 0x04 + BPX_S_IWOTH = 0x02 + BPX_S_IXOTH = 0x01 + + CW_INTRPT = 1 + CW_CONDVAR = 32 + CW_TIMEOUT = 64 + + PGTHA_NEXT = 2 + PGTHA_CURRENT = 1 + PGTHA_FIRST = 0 + PGTHA_LAST = 3 + PGTHA_PROCESS = 0x80 + PGTHA_CONTTY = 0x40 + PGTHA_PATH = 0x20 + PGTHA_COMMAND = 0x10 + PGTHA_FILEDATA = 0x08 + PGTHA_THREAD = 0x04 + PGTHA_PTAG = 0x02 + PGTHA_COMMANDLONG = 0x01 + PGTHA_THREADFAST = 0x80 + PGTHA_FILEPATH = 0x40 + PGTHA_THDSIGMASK = 0x20 + // thread quiece mode + QUIESCE_TERM int32 = 1 + QUIESCE_FORCE int32 = 2 + QUIESCE_QUERY int32 = 3 + QUIESCE_FREEZE int32 = 4 + QUIESCE_UNFREEZE int32 = 5 + FREEZE_THIS_THREAD int32 = 6 + FREEZE_EXIT int32 = 8 + QUIESCE_SRB int32 = 9 +) + +type Pgtha struct { + Pid uint32 // 0 + Tid0 uint32 // 4 + Tid1 uint32 + Accesspid byte // C + Accesstid byte // D + Accessasid uint16 // E + Loginname [8]byte // 10 + Flag1 byte // 18 + Flag1b2 byte // 19 +} + +type Bpxystat_t struct { // DSECT BPXYSTAT + St_id [4]uint8 // 0 + St_length uint16 // 0x4 + St_version uint16 // 0x6 + St_mode uint32 // 0x8 + St_ino uint32 // 0xc + St_dev uint32 // 0x10 + St_nlink uint32 // 0x14 + St_uid uint32 // 0x18 + St_gid uint32 // 0x1c + St_size uint64 // 0x20 + St_atime uint32 // 0x28 + St_mtime uint32 // 0x2c + St_ctime uint32 // 0x30 + St_rdev uint32 // 0x34 + St_auditoraudit uint32 // 0x38 + St_useraudit uint32 // 0x3c + St_blksize uint32 // 0x40 + St_createtime uint32 // 0x44 + St_auditid [4]uint32 // 0x48 + St_res01 uint32 // 0x58 + Ft_ccsid uint16 // 0x5c + Ft_flags uint16 // 0x5e + St_res01a [2]uint32 // 0x60 + St_res02 uint32 // 0x68 + St_blocks uint32 // 0x6c + St_opaque [3]uint8 // 0x70 + St_visible uint8 // 0x73 + St_reftime uint32 // 0x74 + St_fid uint64 // 0x78 + St_filefmt uint8 // 0x80 + St_fspflag2 uint8 // 0x81 + St_res03 [2]uint8 // 0x82 + St_ctimemsec uint32 // 0x84 + St_seclabel [8]uint8 // 0x88 + St_res04 [4]uint8 // 0x90 + // end of version 1 + _ uint32 // 0x94 + St_atime64 uint64 // 0x98 + St_mtime64 uint64 // 0xa0 + St_ctime64 uint64 // 0xa8 + St_createtime64 uint64 // 0xb0 + St_reftime64 uint64 // 0xb8 + _ uint64 // 0xc0 + St_res05 [16]uint8 // 0xc8 + // end of version 2 +} + +type BpxFilestatus struct { + Oflag1 byte + Oflag2 byte + Oflag3 byte + Oflag4 byte +} + +type BpxMode struct { + Ftype byte + Mode1 byte + Mode2 byte + Mode3 byte +} + +// Thr attribute structure for extended attributes +type Bpxyatt_t struct { // DSECT BPXYATT + Att_id [4]uint8 + Att_version uint16 + Att_res01 [2]uint8 + Att_setflags1 uint8 + Att_setflags2 uint8 + Att_setflags3 uint8 + Att_setflags4 uint8 + Att_mode uint32 + Att_uid uint32 + Att_gid uint32 + Att_opaquemask [3]uint8 + Att_visblmaskres uint8 + Att_opaque [3]uint8 + Att_visibleres uint8 + Att_size_h uint32 + Att_size_l uint32 + Att_atime uint32 + Att_mtime uint32 + Att_auditoraudit uint32 + Att_useraudit uint32 + Att_ctime uint32 + Att_reftime uint32 + // end of version 1 + Att_filefmt uint8 + Att_res02 [3]uint8 + Att_filetag uint32 + Att_res03 [8]uint8 + // end of version 2 + Att_atime64 uint64 + Att_mtime64 uint64 + Att_ctime64 uint64 + Att_reftime64 uint64 + Att_seclabel [8]uint8 + Att_ver3res02 [8]uint8 + // end of version 3 +} + +func BpxOpen(name string, options *BpxFilestatus, mode *BpxMode) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(options) + parms[3] = unsafe.Pointer(mode) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4OPN) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxClose(fd int32) (rv int32, rc int32, rn int32) { + var parms [4]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&rv) + parms[2] = unsafe.Pointer(&rc) + parms[3] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CLO) + return rv, rc, rn +} + +func BpxFileFStat(fd int32, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [6]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&stat_sz) + parms[2] = unsafe.Pointer(st) + parms[3] = unsafe.Pointer(&rv) + parms[4] = unsafe.Pointer(&rc) + parms[5] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4FST) + return rv, rc, rn +} + +func BpxFileStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&stat_sz) + parms[3] = unsafe.Pointer(st) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4STA) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxFileLStat(name string, st *Bpxystat_t) (rv int32, rc int32, rn int32) { + if len(name) < 1024 { + var namebuf [1024]byte + sz := int32(copy(namebuf[:], name)) + A2e(namebuf[:sz]) + st.St_id = [4]uint8{0xe2, 0xe3, 0xc1, 0xe3} + st.St_version = 2 + stat_sz := uint32(unsafe.Sizeof(*st)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&stat_sz) + parms[3] = unsafe.Pointer(st) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4LST) + return rv, rc, rn + } + return -1, -1, -1 +} + +func BpxChattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + if len(path) >= 1024 { + return -1, -1, -1 + } + var namebuf [1024]byte + sz := int32(copy(namebuf[:], path)) + A2e(namebuf[:sz]) + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&attr_sz) + parms[3] = unsafe.Pointer(attr) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CHR) + return rv, rc, rn +} + +func BpxLchattr(path string, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + if len(path) >= 1024 { + return -1, -1, -1 + } + var namebuf [1024]byte + sz := int32(copy(namebuf[:], path)) + A2e(namebuf[:sz]) + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [7]unsafe.Pointer + parms[0] = unsafe.Pointer(&sz) + parms[1] = unsafe.Pointer(&namebuf[0]) + parms[2] = unsafe.Pointer(&attr_sz) + parms[3] = unsafe.Pointer(attr) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4LCR) + return rv, rc, rn +} + +func BpxFchattr(fd int32, attr *Bpxyatt_t) (rv int32, rc int32, rn int32) { + attr_sz := uint32(unsafe.Sizeof(*attr)) + var parms [6]unsafe.Pointer + parms[0] = unsafe.Pointer(&fd) + parms[1] = unsafe.Pointer(&attr_sz) + parms[2] = unsafe.Pointer(attr) + parms[3] = unsafe.Pointer(&rv) + parms[4] = unsafe.Pointer(&rc) + parms[5] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4FCR) + return rv, rc, rn +} + +func BpxCondTimedWait(sec uint32, nsec uint32, events uint32, secrem *uint32, nsecrem *uint32) (rv int32, rc int32, rn int32) { + var parms [8]unsafe.Pointer + parms[0] = unsafe.Pointer(&sec) + parms[1] = unsafe.Pointer(&nsec) + parms[2] = unsafe.Pointer(&events) + parms[3] = unsafe.Pointer(secrem) + parms[4] = unsafe.Pointer(nsecrem) + parms[5] = unsafe.Pointer(&rv) + parms[6] = unsafe.Pointer(&rc) + parms[7] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4CTW) + return rv, rc, rn +} +func BpxGetthent(in *Pgtha, outlen *uint32, out unsafe.Pointer) (rv int32, rc int32, rn int32) { + var parms [7]unsafe.Pointer + inlen := uint32(26) // nothing else will work. Go says Pgtha is 28-byte because of alignment, but Pgtha is "packed" and must be 26-byte + parms[0] = unsafe.Pointer(&inlen) + parms[1] = unsafe.Pointer(&in) + parms[2] = unsafe.Pointer(outlen) + parms[3] = unsafe.Pointer(&out) + parms[4] = unsafe.Pointer(&rv) + parms[5] = unsafe.Pointer(&rc) + parms[6] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4GTH) + return rv, rc, rn +} +func ZosJobname() (jobname string, err error) { + var pgtha Pgtha + pgtha.Pid = uint32(Getpid()) + pgtha.Accesspid = PGTHA_CURRENT + pgtha.Flag1 = PGTHA_PROCESS + var out [256]byte + var outlen uint32 + outlen = 256 + rv, rc, rn := BpxGetthent(&pgtha, &outlen, unsafe.Pointer(&out[0])) + if rv == 0 { + gthc := []byte{0x87, 0xa3, 0x88, 0x83} // 'gthc' in ebcdic + ix := bytes.Index(out[:], gthc) + if ix == -1 { + err = fmt.Errorf("BPX4GTH: gthc return data not found") + return + } + jn := out[ix+80 : ix+88] // we didn't declare Pgthc, but jobname is 8-byte at offset 80 + E2a(jn) + jobname = string(bytes.TrimRight(jn, " ")) + + } else { + err = fmt.Errorf("BPX4GTH: rc=%d errno=%d reason=code=0x%x", rv, rc, rn) + } + return +} +func Bpx4ptq(code int32, data string) (rv int32, rc int32, rn int32) { + var userdata [8]byte + var parms [5]unsafe.Pointer + copy(userdata[:], data+" ") + A2e(userdata[:]) + parms[0] = unsafe.Pointer(&code) + parms[1] = unsafe.Pointer(&userdata[0]) + parms[2] = unsafe.Pointer(&rv) + parms[3] = unsafe.Pointer(&rc) + parms[4] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4PTQ) + return rv, rc, rn +} + +const ( + PT_TRACE_ME = 0 // Debug this process + PT_READ_I = 1 // Read a full word + PT_READ_D = 2 // Read a full word + PT_READ_U = 3 // Read control info + PT_WRITE_I = 4 //Write a full word + PT_WRITE_D = 5 //Write a full word + PT_CONTINUE = 7 //Continue the process + PT_KILL = 8 //Terminate the process + PT_READ_GPR = 11 // Read GPR, CR, PSW + PT_READ_FPR = 12 // Read FPR + PT_READ_VR = 13 // Read VR + PT_WRITE_GPR = 14 // Write GPR, CR, PSW + PT_WRITE_FPR = 15 // Write FPR + PT_WRITE_VR = 16 // Write VR + PT_READ_BLOCK = 17 // Read storage + PT_WRITE_BLOCK = 19 // Write storage + PT_READ_GPRH = 20 // Read GPRH + PT_WRITE_GPRH = 21 // Write GPRH + PT_REGHSET = 22 // Read all GPRHs + PT_ATTACH = 30 // Attach to a process + PT_DETACH = 31 // Detach from a process + PT_REGSET = 32 // Read all GPRs + PT_REATTACH = 33 // Reattach to a process + PT_LDINFO = 34 // Read loader info + PT_MULTI = 35 // Multi process mode + PT_LD64INFO = 36 // RMODE64 Info Area + PT_BLOCKREQ = 40 // Block request + PT_THREAD_INFO = 60 // Read thread info + PT_THREAD_MODIFY = 61 + PT_THREAD_READ_FOCUS = 62 + PT_THREAD_WRITE_FOCUS = 63 + PT_THREAD_HOLD = 64 + PT_THREAD_SIGNAL = 65 + PT_EXPLAIN = 66 + PT_EVENTS = 67 + PT_THREAD_INFO_EXTENDED = 68 + PT_REATTACH2 = 71 + PT_CAPTURE = 72 + PT_UNCAPTURE = 73 + PT_GET_THREAD_TCB = 74 + PT_GET_ALET = 75 + PT_SWAPIN = 76 + PT_EXTENDED_EVENT = 98 + PT_RECOVER = 99 // Debug a program check + PT_GPR0 = 0 // General purpose register 0 + PT_GPR1 = 1 // General purpose register 1 + PT_GPR2 = 2 // General purpose register 2 + PT_GPR3 = 3 // General purpose register 3 + PT_GPR4 = 4 // General purpose register 4 + PT_GPR5 = 5 // General purpose register 5 + PT_GPR6 = 6 // General purpose register 6 + PT_GPR7 = 7 // General purpose register 7 + PT_GPR8 = 8 // General purpose register 8 + PT_GPR9 = 9 // General purpose register 9 + PT_GPR10 = 10 // General purpose register 10 + PT_GPR11 = 11 // General purpose register 11 + PT_GPR12 = 12 // General purpose register 12 + PT_GPR13 = 13 // General purpose register 13 + PT_GPR14 = 14 // General purpose register 14 + PT_GPR15 = 15 // General purpose register 15 + PT_FPR0 = 16 // Floating point register 0 + PT_FPR1 = 17 // Floating point register 1 + PT_FPR2 = 18 // Floating point register 2 + PT_FPR3 = 19 // Floating point register 3 + PT_FPR4 = 20 // Floating point register 4 + PT_FPR5 = 21 // Floating point register 5 + PT_FPR6 = 22 // Floating point register 6 + PT_FPR7 = 23 // Floating point register 7 + PT_FPR8 = 24 // Floating point register 8 + PT_FPR9 = 25 // Floating point register 9 + PT_FPR10 = 26 // Floating point register 10 + PT_FPR11 = 27 // Floating point register 11 + PT_FPR12 = 28 // Floating point register 12 + PT_FPR13 = 29 // Floating point register 13 + PT_FPR14 = 30 // Floating point register 14 + PT_FPR15 = 31 // Floating point register 15 + PT_FPC = 32 // Floating point control register + PT_PSW = 40 // PSW + PT_PSW0 = 40 // Left half of the PSW + PT_PSW1 = 41 // Right half of the PSW + PT_CR0 = 42 // Control register 0 + PT_CR1 = 43 // Control register 1 + PT_CR2 = 44 // Control register 2 + PT_CR3 = 45 // Control register 3 + PT_CR4 = 46 // Control register 4 + PT_CR5 = 47 // Control register 5 + PT_CR6 = 48 // Control register 6 + PT_CR7 = 49 // Control register 7 + PT_CR8 = 50 // Control register 8 + PT_CR9 = 51 // Control register 9 + PT_CR10 = 52 // Control register 10 + PT_CR11 = 53 // Control register 11 + PT_CR12 = 54 // Control register 12 + PT_CR13 = 55 // Control register 13 + PT_CR14 = 56 // Control register 14 + PT_CR15 = 57 // Control register 15 + PT_GPRH0 = 58 // GP High register 0 + PT_GPRH1 = 59 // GP High register 1 + PT_GPRH2 = 60 // GP High register 2 + PT_GPRH3 = 61 // GP High register 3 + PT_GPRH4 = 62 // GP High register 4 + PT_GPRH5 = 63 // GP High register 5 + PT_GPRH6 = 64 // GP High register 6 + PT_GPRH7 = 65 // GP High register 7 + PT_GPRH8 = 66 // GP High register 8 + PT_GPRH9 = 67 // GP High register 9 + PT_GPRH10 = 68 // GP High register 10 + PT_GPRH11 = 69 // GP High register 11 + PT_GPRH12 = 70 // GP High register 12 + PT_GPRH13 = 71 // GP High register 13 + PT_GPRH14 = 72 // GP High register 14 + PT_GPRH15 = 73 // GP High register 15 + PT_VR0 = 74 // Vector register 0 + PT_VR1 = 75 // Vector register 1 + PT_VR2 = 76 // Vector register 2 + PT_VR3 = 77 // Vector register 3 + PT_VR4 = 78 // Vector register 4 + PT_VR5 = 79 // Vector register 5 + PT_VR6 = 80 // Vector register 6 + PT_VR7 = 81 // Vector register 7 + PT_VR8 = 82 // Vector register 8 + PT_VR9 = 83 // Vector register 9 + PT_VR10 = 84 // Vector register 10 + PT_VR11 = 85 // Vector register 11 + PT_VR12 = 86 // Vector register 12 + PT_VR13 = 87 // Vector register 13 + PT_VR14 = 88 // Vector register 14 + PT_VR15 = 89 // Vector register 15 + PT_VR16 = 90 // Vector register 16 + PT_VR17 = 91 // Vector register 17 + PT_VR18 = 92 // Vector register 18 + PT_VR19 = 93 // Vector register 19 + PT_VR20 = 94 // Vector register 20 + PT_VR21 = 95 // Vector register 21 + PT_VR22 = 96 // Vector register 22 + PT_VR23 = 97 // Vector register 23 + PT_VR24 = 98 // Vector register 24 + PT_VR25 = 99 // Vector register 25 + PT_VR26 = 100 // Vector register 26 + PT_VR27 = 101 // Vector register 27 + PT_VR28 = 102 // Vector register 28 + PT_VR29 = 103 // Vector register 29 + PT_VR30 = 104 // Vector register 30 + PT_VR31 = 105 // Vector register 31 + PT_PSWG = 106 // PSWG + PT_PSWG0 = 106 // Bytes 0-3 + PT_PSWG1 = 107 // Bytes 4-7 + PT_PSWG2 = 108 // Bytes 8-11 (IA high word) + PT_PSWG3 = 109 // Bytes 12-15 (IA low word) +) + +func Bpx4ptr(request int32, pid int32, addr unsafe.Pointer, data unsafe.Pointer, buffer unsafe.Pointer) (rv int32, rc int32, rn int32) { + var parms [8]unsafe.Pointer + parms[0] = unsafe.Pointer(&request) + parms[1] = unsafe.Pointer(&pid) + parms[2] = unsafe.Pointer(&addr) + parms[3] = unsafe.Pointer(&data) + parms[4] = unsafe.Pointer(&buffer) + parms[5] = unsafe.Pointer(&rv) + parms[6] = unsafe.Pointer(&rc) + parms[7] = unsafe.Pointer(&rn) + bpxcall(parms[:], BPX4PTR) + return rv, rc, rn +} + +func copyU8(val uint8, dest []uint8) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU8Arr(src, dest []uint8) int { + if len(dest) < len(src) { + return 0 + } + for i, v := range src { + dest[i] = v + } + return len(src) +} + +func copyU16(val uint16, dest []uint16) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU32(val uint32, dest []uint32) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} + +func copyU32Arr(src, dest []uint32) int { + if len(dest) < len(src) { + return 0 + } + for i, v := range src { + dest[i] = v + } + return len(src) +} + +func copyU64(val uint64, dest []uint64) int { + if len(dest) < 1 { + return 0 + } + dest[0] = val + return 1 +} diff --git a/vendor/golang.org/x/sys/unix/bpxsvc_zos.s b/vendor/golang.org/x/sys/unix/bpxsvc_zos.s new file mode 100644 index 000000000..4bd4a1798 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/bpxsvc_zos.s @@ -0,0 +1,192 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "go_asm.h" +#include "textflag.h" + +// function to call USS assembly language services +// +// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bit64env.htm +// +// arg1 unsafe.Pointer array that ressembles an OS PLIST +// +// arg2 function offset as in +// doc: https://www.ibm.com/support/knowledgecenter/en/SSLTBW_3.1.0/com.ibm.zos.v3r1.bpxb100/bpx2cr_List_of_offsets.htm +// +// func bpxcall(plist []unsafe.Pointer, bpx_offset int64) + +TEXT ·bpxcall(SB), NOSPLIT|NOFRAME, $0 + MOVD plist_base+0(FP), R1 // r1 points to plist + MOVD bpx_offset+24(FP), R2 // r2 offset to BPX vector table + MOVD R14, R7 // save r14 + MOVD R15, R8 // save r15 + MOVWZ 16(R0), R9 + MOVWZ 544(R9), R9 + MOVWZ 24(R9), R9 // call vector in r9 + ADD R2, R9 // add offset to vector table + MOVWZ (R9), R9 // r9 points to entry point + BYTE $0x0D // BL R14,R9 --> basr r14,r9 + BYTE $0xE9 // clobbers 0,1,14,15 + MOVD R8, R15 // restore 15 + JMP R7 // return via saved return address + +// func A2e(arr [] byte) +// code page conversion from 819 to 1047 +TEXT ·A2e(SB), NOSPLIT|NOFRAME, $0 + MOVD arg_base+0(FP), R2 // pointer to arry of characters + MOVD arg_len+8(FP), R3 // count + XOR R0, R0 + XOR R1, R1 + BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2)) + + // ASCII -> EBCDIC conversion table: + BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03 + BYTE $0x37; BYTE $0x2d; BYTE $0x2e; BYTE $0x2f + BYTE $0x16; BYTE $0x05; BYTE $0x15; BYTE $0x0b + BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f + BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13 + BYTE $0x3c; BYTE $0x3d; BYTE $0x32; BYTE $0x26 + BYTE $0x18; BYTE $0x19; BYTE $0x3f; BYTE $0x27 + BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f + BYTE $0x40; BYTE $0x5a; BYTE $0x7f; BYTE $0x7b + BYTE $0x5b; BYTE $0x6c; BYTE $0x50; BYTE $0x7d + BYTE $0x4d; BYTE $0x5d; BYTE $0x5c; BYTE $0x4e + BYTE $0x6b; BYTE $0x60; BYTE $0x4b; BYTE $0x61 + BYTE $0xf0; BYTE $0xf1; BYTE $0xf2; BYTE $0xf3 + BYTE $0xf4; BYTE $0xf5; BYTE $0xf6; BYTE $0xf7 + BYTE $0xf8; BYTE $0xf9; BYTE $0x7a; BYTE $0x5e + BYTE $0x4c; BYTE $0x7e; BYTE $0x6e; BYTE $0x6f + BYTE $0x7c; BYTE $0xc1; BYTE $0xc2; BYTE $0xc3 + BYTE $0xc4; BYTE $0xc5; BYTE $0xc6; BYTE $0xc7 + BYTE $0xc8; BYTE $0xc9; BYTE $0xd1; BYTE $0xd2 + BYTE $0xd3; BYTE $0xd4; BYTE $0xd5; BYTE $0xd6 + BYTE $0xd7; BYTE $0xd8; BYTE $0xd9; BYTE $0xe2 + BYTE $0xe3; BYTE $0xe4; BYTE $0xe5; BYTE $0xe6 + BYTE $0xe7; BYTE $0xe8; BYTE $0xe9; BYTE $0xad + BYTE $0xe0; BYTE $0xbd; BYTE $0x5f; BYTE $0x6d + BYTE $0x79; BYTE $0x81; BYTE $0x82; BYTE $0x83 + BYTE $0x84; BYTE $0x85; BYTE $0x86; BYTE $0x87 + BYTE $0x88; BYTE $0x89; BYTE $0x91; BYTE $0x92 + BYTE $0x93; BYTE $0x94; BYTE $0x95; BYTE $0x96 + BYTE $0x97; BYTE $0x98; BYTE $0x99; BYTE $0xa2 + BYTE $0xa3; BYTE $0xa4; BYTE $0xa5; BYTE $0xa6 + BYTE $0xa7; BYTE $0xa8; BYTE $0xa9; BYTE $0xc0 + BYTE $0x4f; BYTE $0xd0; BYTE $0xa1; BYTE $0x07 + BYTE $0x20; BYTE $0x21; BYTE $0x22; BYTE $0x23 + BYTE $0x24; BYTE $0x25; BYTE $0x06; BYTE $0x17 + BYTE $0x28; BYTE $0x29; BYTE $0x2a; BYTE $0x2b + BYTE $0x2c; BYTE $0x09; BYTE $0x0a; BYTE $0x1b + BYTE $0x30; BYTE $0x31; BYTE $0x1a; BYTE $0x33 + BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x08 + BYTE $0x38; BYTE $0x39; BYTE $0x3a; BYTE $0x3b + BYTE $0x04; BYTE $0x14; BYTE $0x3e; BYTE $0xff + BYTE $0x41; BYTE $0xaa; BYTE $0x4a; BYTE $0xb1 + BYTE $0x9f; BYTE $0xb2; BYTE $0x6a; BYTE $0xb5 + BYTE $0xbb; BYTE $0xb4; BYTE $0x9a; BYTE $0x8a + BYTE $0xb0; BYTE $0xca; BYTE $0xaf; BYTE $0xbc + BYTE $0x90; BYTE $0x8f; BYTE $0xea; BYTE $0xfa + BYTE $0xbe; BYTE $0xa0; BYTE $0xb6; BYTE $0xb3 + BYTE $0x9d; BYTE $0xda; BYTE $0x9b; BYTE $0x8b + BYTE $0xb7; BYTE $0xb8; BYTE $0xb9; BYTE $0xab + BYTE $0x64; BYTE $0x65; BYTE $0x62; BYTE $0x66 + BYTE $0x63; BYTE $0x67; BYTE $0x9e; BYTE $0x68 + BYTE $0x74; BYTE $0x71; BYTE $0x72; BYTE $0x73 + BYTE $0x78; BYTE $0x75; BYTE $0x76; BYTE $0x77 + BYTE $0xac; BYTE $0x69; BYTE $0xed; BYTE $0xee + BYTE $0xeb; BYTE $0xef; BYTE $0xec; BYTE $0xbf + BYTE $0x80; BYTE $0xfd; BYTE $0xfe; BYTE $0xfb + BYTE $0xfc; BYTE $0xba; BYTE $0xae; BYTE $0x59 + BYTE $0x44; BYTE $0x45; BYTE $0x42; BYTE $0x46 + BYTE $0x43; BYTE $0x47; BYTE $0x9c; BYTE $0x48 + BYTE $0x54; BYTE $0x51; BYTE $0x52; BYTE $0x53 + BYTE $0x58; BYTE $0x55; BYTE $0x56; BYTE $0x57 + BYTE $0x8c; BYTE $0x49; BYTE $0xcd; BYTE $0xce + BYTE $0xcb; BYTE $0xcf; BYTE $0xcc; BYTE $0xe1 + BYTE $0x70; BYTE $0xdd; BYTE $0xde; BYTE $0xdb + BYTE $0xdc; BYTE $0x8d; BYTE $0x8e; BYTE $0xdf + +retry: + WORD $0xB9931022 // TROO 2,2,b'0001' + BVS retry + RET + +// func e2a(arr [] byte) +// code page conversion from 1047 to 819 +TEXT ·E2a(SB), NOSPLIT|NOFRAME, $0 + MOVD arg_base+0(FP), R2 // pointer to arry of characters + MOVD arg_len+8(FP), R3 // count + XOR R0, R0 + XOR R1, R1 + BYTE $0xA7; BYTE $0x15; BYTE $0x00; BYTE $0x82 // BRAS 1,(2+(256/2)) + + // EBCDIC -> ASCII conversion table: + BYTE $0x00; BYTE $0x01; BYTE $0x02; BYTE $0x03 + BYTE $0x9c; BYTE $0x09; BYTE $0x86; BYTE $0x7f + BYTE $0x97; BYTE $0x8d; BYTE $0x8e; BYTE $0x0b + BYTE $0x0c; BYTE $0x0d; BYTE $0x0e; BYTE $0x0f + BYTE $0x10; BYTE $0x11; BYTE $0x12; BYTE $0x13 + BYTE $0x9d; BYTE $0x0a; BYTE $0x08; BYTE $0x87 + BYTE $0x18; BYTE $0x19; BYTE $0x92; BYTE $0x8f + BYTE $0x1c; BYTE $0x1d; BYTE $0x1e; BYTE $0x1f + BYTE $0x80; BYTE $0x81; BYTE $0x82; BYTE $0x83 + BYTE $0x84; BYTE $0x85; BYTE $0x17; BYTE $0x1b + BYTE $0x88; BYTE $0x89; BYTE $0x8a; BYTE $0x8b + BYTE $0x8c; BYTE $0x05; BYTE $0x06; BYTE $0x07 + BYTE $0x90; BYTE $0x91; BYTE $0x16; BYTE $0x93 + BYTE $0x94; BYTE $0x95; BYTE $0x96; BYTE $0x04 + BYTE $0x98; BYTE $0x99; BYTE $0x9a; BYTE $0x9b + BYTE $0x14; BYTE $0x15; BYTE $0x9e; BYTE $0x1a + BYTE $0x20; BYTE $0xa0; BYTE $0xe2; BYTE $0xe4 + BYTE $0xe0; BYTE $0xe1; BYTE $0xe3; BYTE $0xe5 + BYTE $0xe7; BYTE $0xf1; BYTE $0xa2; BYTE $0x2e + BYTE $0x3c; BYTE $0x28; BYTE $0x2b; BYTE $0x7c + BYTE $0x26; BYTE $0xe9; BYTE $0xea; BYTE $0xeb + BYTE $0xe8; BYTE $0xed; BYTE $0xee; BYTE $0xef + BYTE $0xec; BYTE $0xdf; BYTE $0x21; BYTE $0x24 + BYTE $0x2a; BYTE $0x29; BYTE $0x3b; BYTE $0x5e + BYTE $0x2d; BYTE $0x2f; BYTE $0xc2; BYTE $0xc4 + BYTE $0xc0; BYTE $0xc1; BYTE $0xc3; BYTE $0xc5 + BYTE $0xc7; BYTE $0xd1; BYTE $0xa6; BYTE $0x2c + BYTE $0x25; BYTE $0x5f; BYTE $0x3e; BYTE $0x3f + BYTE $0xf8; BYTE $0xc9; BYTE $0xca; BYTE $0xcb + BYTE $0xc8; BYTE $0xcd; BYTE $0xce; BYTE $0xcf + BYTE $0xcc; BYTE $0x60; BYTE $0x3a; BYTE $0x23 + BYTE $0x40; BYTE $0x27; BYTE $0x3d; BYTE $0x22 + BYTE $0xd8; BYTE $0x61; BYTE $0x62; BYTE $0x63 + BYTE $0x64; BYTE $0x65; BYTE $0x66; BYTE $0x67 + BYTE $0x68; BYTE $0x69; BYTE $0xab; BYTE $0xbb + BYTE $0xf0; BYTE $0xfd; BYTE $0xfe; BYTE $0xb1 + BYTE $0xb0; BYTE $0x6a; BYTE $0x6b; BYTE $0x6c + BYTE $0x6d; BYTE $0x6e; BYTE $0x6f; BYTE $0x70 + BYTE $0x71; BYTE $0x72; BYTE $0xaa; BYTE $0xba + BYTE $0xe6; BYTE $0xb8; BYTE $0xc6; BYTE $0xa4 + BYTE $0xb5; BYTE $0x7e; BYTE $0x73; BYTE $0x74 + BYTE $0x75; BYTE $0x76; BYTE $0x77; BYTE $0x78 + BYTE $0x79; BYTE $0x7a; BYTE $0xa1; BYTE $0xbf + BYTE $0xd0; BYTE $0x5b; BYTE $0xde; BYTE $0xae + BYTE $0xac; BYTE $0xa3; BYTE $0xa5; BYTE $0xb7 + BYTE $0xa9; BYTE $0xa7; BYTE $0xb6; BYTE $0xbc + BYTE $0xbd; BYTE $0xbe; BYTE $0xdd; BYTE $0xa8 + BYTE $0xaf; BYTE $0x5d; BYTE $0xb4; BYTE $0xd7 + BYTE $0x7b; BYTE $0x41; BYTE $0x42; BYTE $0x43 + BYTE $0x44; BYTE $0x45; BYTE $0x46; BYTE $0x47 + BYTE $0x48; BYTE $0x49; BYTE $0xad; BYTE $0xf4 + BYTE $0xf6; BYTE $0xf2; BYTE $0xf3; BYTE $0xf5 + BYTE $0x7d; BYTE $0x4a; BYTE $0x4b; BYTE $0x4c + BYTE $0x4d; BYTE $0x4e; BYTE $0x4f; BYTE $0x50 + BYTE $0x51; BYTE $0x52; BYTE $0xb9; BYTE $0xfb + BYTE $0xfc; BYTE $0xf9; BYTE $0xfa; BYTE $0xff + BYTE $0x5c; BYTE $0xf7; BYTE $0x53; BYTE $0x54 + BYTE $0x55; BYTE $0x56; BYTE $0x57; BYTE $0x58 + BYTE $0x59; BYTE $0x5a; BYTE $0xb2; BYTE $0xd4 + BYTE $0xd6; BYTE $0xd2; BYTE $0xd3; BYTE $0xd5 + BYTE $0x30; BYTE $0x31; BYTE $0x32; BYTE $0x33 + BYTE $0x34; BYTE $0x35; BYTE $0x36; BYTE $0x37 + BYTE $0x38; BYTE $0x39; BYTE $0xb3; BYTE $0xdb + BYTE $0xdc; BYTE $0xd9; BYTE $0xda; BYTE $0x9f + +retry: + WORD $0xB9931022 // TROO 2,2,b'0001' + BVS retry + RET diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go deleted file mode 100644 index 7753fddea..000000000 --- a/vendor/golang.org/x/sys/unix/epoll_zos.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build zos && s390x - -package unix - -import ( - "sync" -) - -// This file simulates epoll on z/OS using poll. - -// Analogous to epoll_event on Linux. -// TODO(neeilan): Pad is because the Linux kernel expects a 96-bit struct. We never pass this to the kernel; remove? -type EpollEvent struct { - Events uint32 - Fd int32 - Pad int32 -} - -const ( - EPOLLERR = 0x8 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDNORM = 0x40 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - // The following constants are part of the epoll API, but represent - // currently unsupported functionality on z/OS. - // EPOLL_CLOEXEC = 0x80000 - // EPOLLET = 0x80000000 - // EPOLLONESHOT = 0x40000000 - // EPOLLRDHUP = 0x2000 // Typically used with edge-triggered notis - // EPOLLEXCLUSIVE = 0x10000000 // Exclusive wake-up mode - // EPOLLWAKEUP = 0x20000000 // Relies on Linux's BLOCK_SUSPEND capability -) - -// TODO(neeilan): We can eliminate these epToPoll / pToEpoll calls by using identical mask values for POLL/EPOLL -// constants where possible The lower 16 bits of epoll events (uint32) can fit any system poll event (int16). - -// epToPollEvt converts epoll event field to poll equivalent. -// In epoll, Events is a 32-bit field, while poll uses 16 bits. -func epToPollEvt(events uint32) int16 { - var ep2p = map[uint32]int16{ - EPOLLIN: POLLIN, - EPOLLOUT: POLLOUT, - EPOLLHUP: POLLHUP, - EPOLLPRI: POLLPRI, - EPOLLERR: POLLERR, - } - - var pollEvts int16 = 0 - for epEvt, pEvt := range ep2p { - if (events & epEvt) != 0 { - pollEvts |= pEvt - } - } - - return pollEvts -} - -// pToEpollEvt converts 16 bit poll event bitfields to 32-bit epoll event fields. -func pToEpollEvt(revents int16) uint32 { - var p2ep = map[int16]uint32{ - POLLIN: EPOLLIN, - POLLOUT: EPOLLOUT, - POLLHUP: EPOLLHUP, - POLLPRI: EPOLLPRI, - POLLERR: EPOLLERR, - } - - var epollEvts uint32 = 0 - for pEvt, epEvt := range p2ep { - if (revents & pEvt) != 0 { - epollEvts |= epEvt - } - } - - return epollEvts -} - -// Per-process epoll implementation. -type epollImpl struct { - mu sync.Mutex - epfd2ep map[int]*eventPoll - nextEpfd int -} - -// eventPoll holds a set of file descriptors being watched by the process. A process can have multiple epoll instances. -// On Linux, this is an in-kernel data structure accessed through a fd. -type eventPoll struct { - mu sync.Mutex - fds map[int]*EpollEvent -} - -// epoll impl for this process. -var impl epollImpl = epollImpl{ - epfd2ep: make(map[int]*eventPoll), - nextEpfd: 0, -} - -func (e *epollImpl) epollcreate(size int) (epfd int, err error) { - e.mu.Lock() - defer e.mu.Unlock() - epfd = e.nextEpfd - e.nextEpfd++ - - e.epfd2ep[epfd] = &eventPoll{ - fds: make(map[int]*EpollEvent), - } - return epfd, nil -} - -func (e *epollImpl) epollcreate1(flag int) (fd int, err error) { - return e.epollcreate(4) -} - -func (e *epollImpl) epollctl(epfd int, op int, fd int, event *EpollEvent) (err error) { - e.mu.Lock() - defer e.mu.Unlock() - - ep, ok := e.epfd2ep[epfd] - if !ok { - - return EBADF - } - - switch op { - case EPOLL_CTL_ADD: - // TODO(neeilan): When we make epfds and fds disjoint, detect epoll - // loops here (instances watching each other) and return ELOOP. - if _, ok := ep.fds[fd]; ok { - return EEXIST - } - ep.fds[fd] = event - case EPOLL_CTL_MOD: - if _, ok := ep.fds[fd]; !ok { - return ENOENT - } - ep.fds[fd] = event - case EPOLL_CTL_DEL: - if _, ok := ep.fds[fd]; !ok { - return ENOENT - } - delete(ep.fds, fd) - - } - return nil -} - -// Must be called while holding ep.mu -func (ep *eventPoll) getFds() []int { - fds := make([]int, len(ep.fds)) - for fd := range ep.fds { - fds = append(fds, fd) - } - return fds -} - -func (e *epollImpl) epollwait(epfd int, events []EpollEvent, msec int) (n int, err error) { - e.mu.Lock() // in [rare] case of concurrent epollcreate + epollwait - ep, ok := e.epfd2ep[epfd] - - if !ok { - e.mu.Unlock() - return 0, EBADF - } - - pollfds := make([]PollFd, 4) - for fd, epollevt := range ep.fds { - pollfds = append(pollfds, PollFd{Fd: int32(fd), Events: epToPollEvt(epollevt.Events)}) - } - e.mu.Unlock() - - n, err = Poll(pollfds, msec) - if err != nil { - return n, err - } - - i := 0 - for _, pFd := range pollfds { - if pFd.Revents != 0 { - events[i] = EpollEvent{Fd: pFd.Fd, Events: pToEpollEvt(pFd.Revents)} - i++ - } - - if i == n { - break - } - } - - return n, nil -} - -func EpollCreate(size int) (fd int, err error) { - return impl.epollcreate(size) -} - -func EpollCreate1(flag int) (fd int, err error) { - return impl.epollcreate1(flag) -} - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - return impl.epollctl(epfd, op, fd, event) -} - -// Because EpollWait mutates events, the caller is expected to coordinate -// concurrent access if calling with the same epfd from multiple goroutines. -func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { - return impl.epollwait(epfd, events, msec) -} diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go deleted file mode 100644 index c8bde601e..000000000 --- a/vendor/golang.org/x/sys/unix/fstatfs_zos.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build zos && s390x - -package unix - -import ( - "unsafe" -) - -// This file simulates fstatfs on z/OS using fstatvfs and w_getmntent. - -func Fstatfs(fd int, stat *Statfs_t) (err error) { - var stat_v Statvfs_t - err = Fstatvfs(fd, &stat_v) - if err == nil { - // populate stat - stat.Type = 0 - stat.Bsize = stat_v.Bsize - stat.Blocks = stat_v.Blocks - stat.Bfree = stat_v.Bfree - stat.Bavail = stat_v.Bavail - stat.Files = stat_v.Files - stat.Ffree = stat_v.Ffree - stat.Fsid = stat_v.Fsid - stat.Namelen = stat_v.Namemax - stat.Frsize = stat_v.Frsize - stat.Flags = stat_v.Flag - for passn := 0; passn < 5; passn++ { - switch passn { - case 0: - err = tryGetmntent64(stat) - break - case 1: - err = tryGetmntent128(stat) - break - case 2: - err = tryGetmntent256(stat) - break - case 3: - err = tryGetmntent512(stat) - break - case 4: - err = tryGetmntent1024(stat) - break - default: - break - } - //proceed to return if: err is nil (found), err is nonnil but not ERANGE (another error occurred) - if err == nil || err != nil && err != ERANGE { - break - } - } - } - return err -} - -func tryGetmntent64(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [64]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent128(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [128]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent256(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [256]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent512(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [512]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} - -func tryGetmntent1024(stat *Statfs_t) (err error) { - var mnt_ent_buffer struct { - header W_Mnth - filesys_info [1024]W_Mntent - } - var buffer_size int = int(unsafe.Sizeof(mnt_ent_buffer)) - fs_count, err := W_Getmntent((*byte)(unsafe.Pointer(&mnt_ent_buffer)), buffer_size) - if err != nil { - return err - } - err = ERANGE //return ERANGE if no match is found in this batch - for i := 0; i < fs_count; i++ { - if stat.Fsid == uint64(mnt_ent_buffer.filesys_info[i].Dev) { - stat.Type = uint32(mnt_ent_buffer.filesys_info[i].Fstname[0]) - err = nil - break - } - } - return err -} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index fdcaa974d..4ed2e488b 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -263,6 +263,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -549,6 +550,7 @@ ccflags="$@" $2 !~ "NLA_TYPE_MASK" && $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || + $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go index 4b68e5978..7f602ffd2 100644 --- a/vendor/golang.org/x/sys/unix/mmap_nomremap.go +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris +//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index fd45fe529..3a5e776f8 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -50,3 +50,8 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [ func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) { return mapper.Mremap(oldData, newLength, flags) } + +func MremapPtr(oldAddr unsafe.Pointer, oldSize uintptr, newAddr unsafe.Pointer, newSize uintptr, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mremap(uintptr(oldAddr), oldSize, newSize, flags, uintptr(newAddr)) + return unsafe.Pointer(xaddr), err +} diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go index 4d0a3430e..0482408d7 100644 --- a/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos // For Unix, get the pagesize from the runtime. diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go index 130398b6b..b903c0060 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin +//go:build darwin || zos package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_zos.go b/vendor/golang.org/x/sys/unix/sockcmsg_zos.go new file mode 100644 index 000000000..3e53dbc02 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/sockcmsg_zos.go @@ -0,0 +1,58 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Socket control messages + +package unix + +import "unsafe" + +// UnixCredentials encodes credentials into a socket control message +// for sending to another process. This can be used for +// authentication. +func UnixCredentials(ucred *Ucred) []byte { + b := make([]byte, CmsgSpace(SizeofUcred)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_SOCKET + h.Type = SCM_CREDENTIALS + h.SetLen(CmsgLen(SizeofUcred)) + *(*Ucred)(h.data(0)) = *ucred + return b +} + +// ParseUnixCredentials decodes a socket control message that contains +// credentials in a Ucred structure. To receive such a message, the +// SO_PASSCRED option must be enabled on the socket. +func ParseUnixCredentials(m *SocketControlMessage) (*Ucred, error) { + if m.Header.Level != SOL_SOCKET { + return nil, EINVAL + } + if m.Header.Type != SCM_CREDENTIALS { + return nil, EINVAL + } + ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0])) + return &ucred, nil +} + +// PktInfo4 encodes Inet4Pktinfo into a socket control message of type IP_PKTINFO. +func PktInfo4(info *Inet4Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet4Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IP + h.Type = IP_PKTINFO + h.SetLen(CmsgLen(SizeofInet4Pktinfo)) + *(*Inet4Pktinfo)(h.data(0)) = *info + return b +} + +// PktInfo6 encodes Inet6Pktinfo into a socket control message of type IPV6_PKTINFO. +func PktInfo6(info *Inet6Pktinfo) []byte { + b := make([]byte, CmsgSpace(SizeofInet6Pktinfo)) + h := (*Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = SOL_IPV6 + h.Type = IPV6_PKTINFO + h.SetLen(CmsgLen(SizeofInet6Pktinfo)) + *(*Inet6Pktinfo)(h.data(0)) = *info + return b +} diff --git a/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s b/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s new file mode 100644 index 000000000..3c4f33cb6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/symaddr_zos_s390x.s @@ -0,0 +1,75 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build zos && s390x && gc + +#include "textflag.h" + +// provide the address of function variable to be fixed up. + +TEXT ·getPipe2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Pipe2(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_FlockAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Flock(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_GetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Getxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_NanosleepAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Nanosleep(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_SetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Setxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_Wait4Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Wait4(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_MountAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mount(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UnmountAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unmount(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UtimesNanoAtAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·UtimesNanoAt(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_UtimesNanoAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·UtimesNano(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_MkfifoatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mkfifoat(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_ChtagAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Chtag(SB), R8 + MOVD R8, ret+0(FP) + RET + +TEXT ·get_ReadlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Readlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 59542a897..4cc7b0059 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -542,6 +542,18 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { } } +//sys pthread_chdir_np(path string) (err error) + +func PthreadChdir(path string) (err error) { + return pthread_chdir_np(path) +} + +//sys pthread_fchdir_np(fd int) (err error) + +func PthreadFchdir(fd int) (err error) { + return pthread_fchdir_np(fd) +} + //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 77081de8c..4e92e5aa4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -154,6 +154,15 @@ func Munmap(b []byte) (err error) { return mapper.Munmap(b) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset) + return unsafe.Pointer(xaddr), err +} + +func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) { + return mapper.munmap(uintptr(addr), length) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index b473038c6..312ae6ac1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -4,11 +4,21 @@ //go:build zos && s390x +// Many of the following syscalls are not available on all versions of z/OS. +// Some missing calls have legacy implementations/simulations but others +// will be missing completely. To achieve consistent failing behaviour on +// legacy systems, we first test the function pointer via a safeloading +// mechanism to see if the function exists on a given system. Then execution +// is branched to either continue the function call, or return an error. + package unix import ( "bytes" "fmt" + "os" + "reflect" + "regexp" "runtime" "sort" "strings" @@ -17,17 +27,205 @@ import ( "unsafe" ) +//go:noescape +func initZosLibVec() + +//go:noescape +func GetZosLibVec() uintptr + +func init() { + initZosLibVec() + r0, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS_____GETENV_A<<4, uintptr(unsafe.Pointer(&([]byte("__ZOS_XSYSTRACE\x00"))[0]))) + if r0 != 0 { + n, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___ATOI_A<<4, r0) + ZosTraceLevel = int(n) + r0, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS_____GETENV_A<<4, uintptr(unsafe.Pointer(&([]byte("__ZOS_XSYSTRACEFD\x00"))[0]))) + if r0 != 0 { + fd, _, _ := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___ATOI_A<<4, r0) + f := os.NewFile(fd, "zostracefile") + if f != nil { + ZosTracefile = f + } + } + + } +} + +//go:noescape +func CallLeFuncWithErr(funcdesc uintptr, parms ...uintptr) (ret, errno2 uintptr, err Errno) + +//go:noescape +func CallLeFuncWithPtrReturn(funcdesc uintptr, parms ...uintptr) (ret, errno2 uintptr, err Errno) + +// ------------------------------- +// pointer validity test +// good pointer returns 0 +// bad pointer returns 1 +// +//go:nosplit +func ptrtest(uintptr) uint64 + +// Load memory at ptr location with error handling if the location is invalid +// +//go:noescape +func safeload(ptr uintptr) (value uintptr, error uintptr) + const ( - O_CLOEXEC = 0 // Dummy value (not supported). - AF_LOCAL = AF_UNIX // AF_LOCAL is an alias for AF_UNIX + entrypointLocationOffset = 8 // From function descriptor + + xplinkEyecatcher = 0x00c300c500c500f1 // ".C.E.E.1" + eyecatcherOffset = 16 // From function entrypoint (negative) + ppa1LocationOffset = 8 // From function entrypoint (negative) + + nameLenOffset = 0x14 // From PPA1 start + nameOffset = 0x16 // From PPA1 start ) -func syscall_syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) -func syscall_syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall_syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) -func syscall_rawsyscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) +func getPpaOffset(funcptr uintptr) int64 { + entrypoint, err := safeload(funcptr + entrypointLocationOffset) + if err != 0 { + return -1 + } + + // XPLink functions have ".C.E.E.1" as the first 8 bytes (EBCDIC) + val, err := safeload(entrypoint - eyecatcherOffset) + if err != 0 { + return -1 + } + if val != xplinkEyecatcher { + return -1 + } + + ppaoff, err := safeload(entrypoint - ppa1LocationOffset) + if err != 0 { + return -1 + } + + ppaoff >>= 32 + return int64(ppaoff) +} + +//------------------------------- +// function descriptor pointer validity test +// good pointer returns 0 +// bad pointer returns 1 + +// TODO: currently mksyscall_zos_s390x.go generate empty string for funcName +// have correct funcName pass to the funcptrtest function +func funcptrtest(funcptr uintptr, funcName string) uint64 { + entrypoint, err := safeload(funcptr + entrypointLocationOffset) + if err != 0 { + return 1 + } + + ppaoff := getPpaOffset(funcptr) + if ppaoff == -1 { + return 1 + } + + // PPA1 offset value is from the start of the entire function block, not the entrypoint + ppa1 := (entrypoint - eyecatcherOffset) + uintptr(ppaoff) + + nameLen, err := safeload(ppa1 + nameLenOffset) + if err != 0 { + return 1 + } + + nameLen >>= 48 + if nameLen > 128 { + return 1 + } + + // no function name input to argument end here + if funcName == "" { + return 0 + } + + var funcname [128]byte + for i := 0; i < int(nameLen); i += 8 { + v, err := safeload(ppa1 + nameOffset + uintptr(i)) + if err != 0 { + return 1 + } + funcname[i] = byte(v >> 56) + funcname[i+1] = byte(v >> 48) + funcname[i+2] = byte(v >> 40) + funcname[i+3] = byte(v >> 32) + funcname[i+4] = byte(v >> 24) + funcname[i+5] = byte(v >> 16) + funcname[i+6] = byte(v >> 8) + funcname[i+7] = byte(v) + } + + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, // __e2a_l + []uintptr{uintptr(unsafe.Pointer(&funcname[0])), nameLen}) + + name := string(funcname[:nameLen]) + if name != funcName { + return 1 + } + + return 0 +} + +// For detection of capabilities on a system. +// Is function descriptor f a valid function? +func isValidLeFunc(f uintptr) error { + ret := funcptrtest(f, "") + if ret != 0 { + return fmt.Errorf("Bad pointer, not an LE function ") + } + return nil +} + +// Retrieve function name from descriptor +func getLeFuncName(f uintptr) (string, error) { + // assume it has been checked, only check ppa1 validity here + entry := ((*[2]uintptr)(unsafe.Pointer(f)))[1] + preamp := ((*[4]uint32)(unsafe.Pointer(entry - eyecatcherOffset))) + + offsetPpa1 := preamp[2] + if offsetPpa1 > 0x0ffff { + return "", fmt.Errorf("PPA1 offset seems too big 0x%x\n", offsetPpa1) + } + + ppa1 := uintptr(unsafe.Pointer(preamp)) + uintptr(offsetPpa1) + res := ptrtest(ppa1) + if res != 0 { + return "", fmt.Errorf("PPA1 address not valid") + } + + size := *(*uint16)(unsafe.Pointer(ppa1 + nameLenOffset)) + if size > 128 { + return "", fmt.Errorf("Function name seems too long, length=%d\n", size) + } + + var name [128]byte + funcname := (*[128]byte)(unsafe.Pointer(ppa1 + nameOffset)) + copy(name[0:size], funcname[0:size]) + + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, // __e2a_l + []uintptr{uintptr(unsafe.Pointer(&name[0])), uintptr(size)}) + + return string(name[:size]), nil +} + +// Check z/OS version +func zosLeVersion() (version, release uint32) { + p1 := (*(*uintptr)(unsafe.Pointer(uintptr(1208)))) >> 32 + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 88))) + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 8))) + p1 = *(*uintptr)(unsafe.Pointer(uintptr(p1 + 984))) + vrm := *(*uint32)(unsafe.Pointer(p1 + 80)) + version = (vrm & 0x00ff0000) >> 16 + release = (vrm & 0x0000ff00) >> 8 + return +} + +// returns a zos C FILE * for stdio fd 0, 1, 2 +func ZosStdioFilep(fd int32) uintptr { + return uintptr(*(*uint64)(unsafe.Pointer(uintptr(*(*uint64)(unsafe.Pointer(uintptr(*(*uint64)(unsafe.Pointer(uintptr(uint64(*(*uint32)(unsafe.Pointer(uintptr(1208)))) + 80))) + uint64((fd+2)<<3)))))))) +} func copyStat(stat *Stat_t, statLE *Stat_LE_t) { stat.Dev = uint64(statLE.Dev) @@ -65,6 +263,21 @@ func (d *Dirent) NameString() string { } } +func DecodeData(dest []byte, sz int, val uint64) { + for i := 0; i < sz; i++ { + dest[sz-1-i] = byte((val >> (uint64(i * 8))) & 0xff) + } +} + +func EncodeData(data []byte) uint64 { + var value uint64 + sz := len(data) + for i := 0; i < sz; i++ { + value |= uint64(data[i]) << uint64(((sz - i - 1) * 8)) + } + return value +} + func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { if sa.Port < 0 || sa.Port > 0xFFFF { return nil, 0, EINVAL @@ -74,7 +287,9 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) { p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) - sa.raw.Addr = sa.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -88,7 +303,9 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) { p[0] = byte(sa.Port >> 8) p[1] = byte(sa.Port) sa.raw.Scope_id = sa.ZoneId - sa.raw.Addr = sa.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil } @@ -146,7 +363,9 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { sa := new(SockaddrInet4) p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) - sa.Addr = pp.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } return sa, nil case AF_INET6: @@ -155,7 +374,9 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) { p := (*[2]byte)(unsafe.Pointer(&pp.Port)) sa.Port = int(p[0])<<8 + int(p[1]) sa.ZoneId = pp.Scope_id - sa.Addr = pp.Addr + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } return sa, nil } return nil, EAFNOSUPPORT @@ -177,6 +398,43 @@ func Accept(fd int) (nfd int, sa Sockaddr, err error) { return } +func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) { + var rsa RawSockaddrAny + var len _Socklen = SizeofSockaddrAny + nfd, err = accept4(fd, &rsa, &len, flags) + if err != nil { + return + } + if len > SizeofSockaddrAny { + panic("RawSockaddrAny too small") + } + // TODO(neeilan): Remove 0 in call + sa, err = anyToSockaddr(0, &rsa) + if err != nil { + Close(nfd) + nfd = 0 + } + return +} + +func Ctermid() (tty string, err error) { + var termdev [1025]byte + runtime.EnterSyscall() + r0, err2, err1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___CTERMID_A<<4, uintptr(unsafe.Pointer(&termdev[0]))) + runtime.ExitSyscall() + if r0 == 0 { + return "", fmt.Errorf("%s (errno2=0x%x)\n", err1.Error(), err2) + } + s := string(termdev[:]) + idx := strings.Index(s, string(rune(0))) + if idx == -1 { + tty = s + } else { + tty = s[:idx] + } + return +} + func (iov *Iovec) SetLen(length int) { iov.Len = uint64(length) } @@ -190,10 +448,16 @@ func (cmsg *Cmsghdr) SetLen(length int) { } //sys fcntl(fd int, cmd int, arg int) (val int, err error) +//sys Flistxattr(fd int, dest []byte) (sz int, err error) = SYS___FLISTXATTR_A +//sys Fremovexattr(fd int, attr string) (err error) = SYS___FREMOVEXATTR_A //sys read(fd int, p []byte) (n int, err error) //sys write(fd int, p []byte) (n int, err error) +//sys Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) = SYS___FGETXATTR_A +//sys Fsetxattr(fd int, attr string, data []byte, flag int) (err error) = SYS___FSETXATTR_A + //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = SYS___ACCEPT_A +//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) = SYS___ACCEPT4_A //sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___BIND_A //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = SYS___CONNECT_A //sysnb getgroups(n int, list *_Gid_t) (nn int, err error) @@ -204,6 +468,7 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) //sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETPEERNAME_A //sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) = SYS___GETSOCKNAME_A +//sys Removexattr(path string, attr string) (err error) = SYS___REMOVEXATTR_A //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = SYS___RECVFROM_A //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = SYS___SENDTO_A //sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error) = SYS___RECVMSG_A @@ -212,6 +477,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys munmap(addr uintptr, length uintptr) (err error) = SYS_MUNMAP //sys ioctl(fd int, req int, arg uintptr) (err error) = SYS_IOCTL //sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = SYS_IOCTL +//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) = SYS_SHMAT +//sys shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) = SYS_SHMCTL64 +//sys shmdt(addr uintptr) (err error) = SYS_SHMDT +//sys shmget(key int, size int, flag int) (id int, err error) = SYS_SHMGET //sys Access(path string, mode uint32) (err error) = SYS___ACCESS_A //sys Chdir(path string) (err error) = SYS___CHDIR_A @@ -220,14 +489,31 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys Creat(path string, mode uint32) (fd int, err error) = SYS___CREAT_A //sys Dup(oldfd int) (fd int, err error) //sys Dup2(oldfd int, newfd int) (err error) +//sys Dup3(oldfd int, newfd int, flags int) (err error) = SYS_DUP3 +//sys Dirfd(dirp uintptr) (fd int, err error) = SYS_DIRFD +//sys EpollCreate(size int) (fd int, err error) = SYS_EPOLL_CREATE +//sys EpollCreate1(flags int) (fd int, err error) = SYS_EPOLL_CREATE1 +//sys EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) = SYS_EPOLL_CTL +//sys EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) = SYS_EPOLL_PWAIT +//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_WAIT //sys Errno2() (er2 int) = SYS___ERRNO2 -//sys Err2ad() (eadd *int) = SYS___ERR2AD +//sys Eventfd(initval uint, flags int) (fd int, err error) = SYS_EVENTFD //sys Exit(code int) +//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) = SYS___FACCESSAT_A + +func Faccessat2(dirfd int, path string, mode uint32, flags int) (err error) { + return Faccessat(dirfd, path, mode, flags) +} + //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) = SYS___FCHMODAT_A //sys Fchown(fd int, uid int, gid int) (err error) +//sys Fchownat(fd int, path string, uid int, gid int, flags int) (err error) = SYS___FCHOWNAT_A //sys FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) = SYS_FCNTL +//sys Fdatasync(fd int) (err error) = SYS_FDATASYNC //sys fstat(fd int, stat *Stat_LE_t) (err error) +//sys fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) = SYS___FSTATAT_A func Fstat(fd int, stat *Stat_t) (err error) { var statLE Stat_LE_t @@ -236,28 +522,208 @@ func Fstat(fd int, stat *Stat_t) (err error) { return } +func Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) { + var statLE Stat_LE_t + err = fstatat(dirfd, path, &statLE, flags) + copyStat(stat, &statLE) + return +} + +func impl_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest))) + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_GetxattrAddr() *(func(path string, attr string, dest []byte) (sz int, err error)) + +var Getxattr = enter_Getxattr + +func enter_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + funcref := get_GetxattrAddr() + if validGetxattr() { + *funcref = impl_Getxattr + } else { + *funcref = error_Getxattr + } + return (*funcref)(path, attr, dest) +} + +func error_Getxattr(path string, attr string, dest []byte) (sz int, err error) { + return -1, ENOSYS +} + +func validGetxattr() bool { + if funcptrtest(GetZosLibVec()+SYS___GETXATTR_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___GETXATTR_A<<4); err == nil { + return name == "__getxattr_a" + } + } + return false +} + +//sys Lgetxattr(link string, attr string, dest []byte) (sz int, err error) = SYS___LGETXATTR_A +//sys Lsetxattr(path string, attr string, data []byte, flags int) (err error) = SYS___LSETXATTR_A + +func impl_Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_SetxattrAddr() *(func(path string, attr string, data []byte, flags int) (err error)) + +var Setxattr = enter_Setxattr + +func enter_Setxattr(path string, attr string, data []byte, flags int) (err error) { + funcref := get_SetxattrAddr() + if validSetxattr() { + *funcref = impl_Setxattr + } else { + *funcref = error_Setxattr + } + return (*funcref)(path, attr, data, flags) +} + +func error_Setxattr(path string, attr string, data []byte, flags int) (err error) { + return ENOSYS +} + +func validSetxattr() bool { + if funcptrtest(GetZosLibVec()+SYS___SETXATTR_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___SETXATTR_A<<4); err == nil { + return name == "__setxattr_a" + } + } + return false +} + +//sys Fstatfs(fd int, buf *Statfs_t) (err error) = SYS_FSTATFS //sys Fstatvfs(fd int, stat *Statvfs_t) (err error) = SYS_FSTATVFS //sys Fsync(fd int) (err error) +//sys Futimes(fd int, tv []Timeval) (err error) = SYS_FUTIMES +//sys Futimesat(dirfd int, path string, tv []Timeval) (err error) = SYS___FUTIMESAT_A //sys Ftruncate(fd int, length int64) (err error) -//sys Getpagesize() (pgsize int) = SYS_GETPAGESIZE +//sys Getrandom(buf []byte, flags int) (n int, err error) = SYS_GETRANDOM +//sys InotifyInit() (fd int, err error) = SYS_INOTIFY_INIT +//sys InotifyInit1(flags int) (fd int, err error) = SYS_INOTIFY_INIT1 +//sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) = SYS___INOTIFY_ADD_WATCH_A +//sys InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) = SYS_INOTIFY_RM_WATCH +//sys Listxattr(path string, dest []byte) (sz int, err error) = SYS___LISTXATTR_A +//sys Llistxattr(path string, dest []byte) (sz int, err error) = SYS___LLISTXATTR_A +//sys Lremovexattr(path string, attr string) (err error) = SYS___LREMOVEXATTR_A +//sys Lutimes(path string, tv []Timeval) (err error) = SYS___LUTIMES_A //sys Mprotect(b []byte, prot int) (err error) = SYS_MPROTECT //sys Msync(b []byte, flags int) (err error) = SYS_MSYNC +//sys Console2(cmsg *ConsMsg2, modstr *byte, concmd *uint32) (err error) = SYS___CONSOLE2 + +// Pipe2 begin + +//go:nosplit +func getPipe2Addr() *(func([]int, int) error) + +var Pipe2 = pipe2Enter + +func pipe2Enter(p []int, flags int) (err error) { + if funcptrtest(GetZosLibVec()+SYS_PIPE2<<4, "") == 0 { + *getPipe2Addr() = pipe2Impl + } else { + *getPipe2Addr() = pipe2Error + } + return (*getPipe2Addr())(p, flags) +} + +func pipe2Impl(p []int, flags int) (err error) { + var pp [2]_C_int + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PIPE2<<4, uintptr(unsafe.Pointer(&pp[0])), uintptr(flags)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } else { + p[0] = int(pp[0]) + p[1] = int(pp[1]) + } + return +} +func pipe2Error(p []int, flags int) (err error) { + return fmt.Errorf("Pipe2 is not available on this system") +} + +// Pipe2 end + //sys Poll(fds []PollFd, timeout int) (n int, err error) = SYS_POLL + +func Readdir(dir uintptr) (dirent *Dirent, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READDIR_A<<4, uintptr(dir)) + runtime.ExitSyscall() + dirent = (*Dirent)(unsafe.Pointer(r0)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//sys Readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) = SYS___READDIR_R_A +//sys Statfs(path string, buf *Statfs_t) (err error) = SYS___STATFS_A +//sys Syncfs(fd int) (err error) = SYS_SYNCFS //sys Times(tms *Tms) (ticks uintptr, err error) = SYS_TIMES //sys W_Getmntent(buff *byte, size int) (lastsys int, err error) = SYS_W_GETMNTENT //sys W_Getmntent_A(buff *byte, size int) (lastsys int, err error) = SYS___W_GETMNTENT_A //sys mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) = SYS___MOUNT_A -//sys unmount(filesystem string, mtm int) (err error) = SYS___UMOUNT_A +//sys unmount_LE(filesystem string, mtm int) (err error) = SYS___UMOUNT_A //sys Chroot(path string) (err error) = SYS___CHROOT_A //sys Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) = SYS_SELECT -//sysnb Uname(buf *Utsname) (err error) = SYS___UNAME_A +//sysnb Uname(buf *Utsname) (err error) = SYS_____OSNAME_A +//sys Unshare(flags int) (err error) = SYS_UNSHARE func Ptsname(fd int) (name string, err error) { - r0, _, e1 := syscall_syscall(SYS___PTSNAME_A, uintptr(fd), 0, 0) - name = u2s(unsafe.Pointer(r0)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___PTSNAME_A<<4, uintptr(fd)) + runtime.ExitSyscall() + if r0 == 0 { + err = errnoErr2(e1, e2) + } else { + name = u2s(unsafe.Pointer(r0)) } return } @@ -272,13 +738,19 @@ func u2s(cstr unsafe.Pointer) string { } func Close(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSE<<4, uintptr(fd)) + runtime.ExitSyscall() for i := 0; e1 == EAGAIN && i < 10; i++ { - _, _, _ = syscall_syscall(SYS_USLEEP, uintptr(10), 0, 0) - _, _, e1 = syscall_syscall(SYS_CLOSE, uintptr(fd), 0, 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_USLEEP<<4, uintptr(10)) + runtime.ExitSyscall() + runtime.EnterSyscall() + r0, e2, e1 = CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSE<<4, uintptr(fd)) + runtime.ExitSyscall() } - if e1 != 0 { - err = errnoErr(e1) + if r0 != 0 { + err = errnoErr2(e1, e2) } return } @@ -288,9 +760,15 @@ func Madvise(b []byte, advice int) (err error) { return } +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + //sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A -//sysnb Getegid() (egid int) -//sysnb Geteuid() (uid int) //sysnb Getgid() (gid int) //sysnb Getpid() (pid int) //sysnb Getpgid(pid int) (pgid int, err error) = SYS_GETPGID @@ -317,11 +795,14 @@ func Getrusage(who int, rusage *Rusage) (err error) { return } +//sys Getegid() (egid int) = SYS_GETEGID +//sys Geteuid() (euid int) = SYS_GETEUID //sysnb Getsid(pid int) (sid int, err error) = SYS_GETSID //sysnb Getuid() (uid int) //sysnb Kill(pid int, sig Signal) (err error) //sys Lchown(path string, uid int, gid int) (err error) = SYS___LCHOWN_A //sys Link(path string, link string) (err error) = SYS___LINK_A +//sys Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) = SYS___LINKAT_A //sys Listen(s int, n int) (err error) //sys lstat(path string, stat *Stat_LE_t) (err error) = SYS___LSTAT_A @@ -332,15 +813,150 @@ func Lstat(path string, stat *Stat_t) (err error) { return } +// for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/ +func isSpecialPath(path []byte) (v bool) { + var special = [4][8]byte{ + [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'}, + [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'}, + [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'}, + [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}} + + var i, j int + for i = 0; i < len(special); i++ { + for j = 0; j < len(special[i]); j++ { + if path[j] != special[i][j] { + break + } + } + if j == len(special[i]) { + return true + } + } + return false +} + +func realpath(srcpath string, abspath []byte) (pathlen int, errno int) { + var source [1024]byte + copy(source[:], srcpath) + source[len(srcpath)] = 0 + ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___REALPATH_A<<4, //__realpath_a() + []uintptr{uintptr(unsafe.Pointer(&source[0])), + uintptr(unsafe.Pointer(&abspath[0]))}) + if ret != 0 { + index := bytes.IndexByte(abspath[:], byte(0)) + if index != -1 { + return index, 0 + } + } else { + errptr := (*int)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, []uintptr{}))) //__errno() + return 0, *errptr + } + return 0, 245 // EBADDATA 245 +} + +func Readlink(path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + n = int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___READLINK_A<<4, + []uintptr{uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))})) + runtime.KeepAlive(unsafe.Pointer(_p0)) + if n == -1 { + value := *(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, []uintptr{}))) + err = errnoErr(Errno(value)) + } else { + if buf[0] == '$' { + if isSpecialPath(buf[1:9]) { + cnt, err1 := realpath(path, buf) + if err1 == 0 { + n = cnt + } + } + } + } + return +} + +func impl_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READLINKAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + return n, err + } else { + if buf[0] == '$' { + if isSpecialPath(buf[1:9]) { + cnt, err1 := realpath(path, buf) + if err1 == 0 { + n = cnt + } + } + } + } + return +} + +//go:nosplit +func get_ReadlinkatAddr() *(func(dirfd int, path string, buf []byte) (n int, err error)) + +var Readlinkat = enter_Readlinkat + +func enter_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + funcref := get_ReadlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___READLINKAT_A<<4, "") == 0 { + *funcref = impl_Readlinkat + } else { + *funcref = error_Readlinkat + } + return (*funcref)(dirfd, path, buf) +} + +func error_Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + n = -1 + err = ENOSYS + return +} + //sys Mkdir(path string, mode uint32) (err error) = SYS___MKDIR_A +//sys Mkdirat(dirfd int, path string, mode uint32) (err error) = SYS___MKDIRAT_A //sys Mkfifo(path string, mode uint32) (err error) = SYS___MKFIFO_A //sys Mknod(path string, mode uint32, dev int) (err error) = SYS___MKNOD_A +//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) = SYS___MKNODAT_A +//sys PivotRoot(newroot string, oldroot string) (err error) = SYS___PIVOT_ROOT_A //sys Pread(fd int, p []byte, offset int64) (n int, err error) //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) -//sys Readlink(path string, buf []byte) (n int, err error) = SYS___READLINK_A +//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) = SYS___PRCTL_A +//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT //sys Rename(from string, to string) (err error) = SYS___RENAME_A +//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) = SYS___RENAMEAT_A +//sys Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) = SYS___RENAMEAT2_A //sys Rmdir(path string) (err error) = SYS___RMDIR_A //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK +//sys Setegid(egid int) (err error) = SYS_SETEGID +//sys Seteuid(euid int) (err error) = SYS_SETEUID +//sys Sethostname(p []byte) (err error) = SYS___SETHOSTNAME_A +//sys Setns(fd int, nstype int) (err error) = SYS_SETNS //sys Setpriority(which int, who int, prio int) (err error) //sysnb Setpgid(pid int, pgid int) (err error) = SYS_SETPGID //sysnb Setrlimit(resource int, lim *Rlimit) (err error) @@ -360,32 +976,57 @@ func Stat(path string, sta *Stat_t) (err error) { } //sys Symlink(path string, link string) (err error) = SYS___SYMLINK_A +//sys Symlinkat(oldPath string, dirfd int, newPath string) (err error) = SYS___SYMLINKAT_A //sys Sync() = SYS_SYNC //sys Truncate(path string, length int64) (err error) = SYS___TRUNCATE_A //sys Tcgetattr(fildes int, termptr *Termios) (err error) = SYS_TCGETATTR //sys Tcsetattr(fildes int, when int, termptr *Termios) (err error) = SYS_TCSETATTR //sys Umask(mask int) (oldmask int) //sys Unlink(path string) (err error) = SYS___UNLINK_A +//sys Unlinkat(dirfd int, path string, flags int) (err error) = SYS___UNLINKAT_A //sys Utime(path string, utim *Utimbuf) (err error) = SYS___UTIME_A //sys open(path string, mode int, perm uint32) (fd int, err error) = SYS___OPEN_A func Open(path string, mode int, perm uint32) (fd int, err error) { + if mode&O_ACCMODE == 0 { + mode |= O_RDONLY + } return open(path, mode, perm) } -func Mkfifoat(dirfd int, path string, mode uint32) (err error) { - wd, err := Getwd() - if err != nil { - return err +//sys openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) = SYS___OPENAT_A + +func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + if flags&O_ACCMODE == 0 { + flags |= O_RDONLY } + return openat(dirfd, path, flags, mode) +} - if err := Fchdir(dirfd); err != nil { - return err +//sys openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) = SYS___OPENAT2_A + +func Openat2(dirfd int, path string, how *OpenHow) (fd int, err error) { + if how.Flags&O_ACCMODE == 0 { + how.Flags |= O_RDONLY } - defer Chdir(wd) + return openat2(dirfd, path, how, SizeofOpenHow) +} - return Mkfifo(path, mode) +func ZosFdToPath(dirfd int) (path string, err error) { + var buffer [1024]byte + runtime.EnterSyscall() + ret, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_W_IOCTL<<4, uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))) + runtime.ExitSyscall() + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + CallLeFuncWithErr(GetZosLibVec()+SYS___E2A_L<<4, uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)) + return string(buffer[:zb]), nil + } + return "", errnoErr2(e1, e2) } //sys remove(path string) (err error) @@ -403,10 +1044,12 @@ func Getcwd(buf []byte) (n int, err error) { } else { p = unsafe.Pointer(&_zero) } - _, _, e := syscall_syscall(SYS___GETCWD_A, uintptr(p), uintptr(len(buf)), 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___GETCWD_A<<4, uintptr(p), uintptr(len(buf))) + runtime.ExitSyscall() n = clen(buf) + 1 - if e != 0 { - err = errnoErr(e) + if r0 == 0 { + err = errnoErr2(e1, e2) } return } @@ -520,9 +1163,41 @@ func (w WaitStatus) StopSignal() Signal { func (w WaitStatus) TrapCause() int { return -1 } +//sys waitid(idType int, id int, info *Siginfo, options int) (err error) + +func Waitid(idType int, id int, info *Siginfo, options int, rusage *Rusage) (err error) { + return waitid(idType, id, info, options) +} + //sys waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) -func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { +func impl_Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAIT4<<4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage))) + runtime.ExitSyscall() + wpid = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_Wait4Addr() *(func(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error)) + +var Wait4 = enter_Wait4 + +func enter_Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { + funcref := get_Wait4Addr() + if funcptrtest(GetZosLibVec()+SYS_WAIT4<<4, "") == 0 { + *funcref = impl_Wait4 + } else { + *funcref = legacyWait4 + } + return (*funcref)(pid, wstatus, options, rusage) +} + +func legacyWait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) { // TODO(mundaym): z/OS doesn't have wait4. I don't think getrusage does what we want. // At the moment rusage will not be touched. var status _C_int @@ -571,23 +1246,62 @@ func Pipe(p []int) (err error) { } var pp [2]_C_int err = pipe(&pp) - if err == nil { - p[0] = int(pp[0]) - p[1] = int(pp[1]) - } + p[0] = int(pp[0]) + p[1] = int(pp[1]) return } //sys utimes(path string, timeval *[2]Timeval) (err error) = SYS___UTIMES_A func Utimes(path string, tv []Timeval) (err error) { + if tv == nil { + return utimes(path, nil) + } if len(tv) != 2 { return EINVAL } return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } -func UtimesNano(path string, ts []Timespec) error { +//sys utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) = SYS___UTIMENSAT_A + +func validUtimensat() bool { + if funcptrtest(GetZosLibVec()+SYS___UTIMENSAT_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___UTIMENSAT_A<<4); err == nil { + return name == "__utimensat_a" + } + } + return false +} + +// Begin UtimesNano + +//go:nosplit +func get_UtimesNanoAddr() *(func(path string, ts []Timespec) (err error)) + +var UtimesNano = enter_UtimesNano + +func enter_UtimesNano(path string, ts []Timespec) (err error) { + funcref := get_UtimesNanoAddr() + if validUtimensat() { + *funcref = utimesNanoImpl + } else { + *funcref = legacyUtimesNano + } + return (*funcref)(path, ts) +} + +func utimesNanoImpl(path string, ts []Timespec) (err error) { + if ts == nil { + return utimensat(AT_FDCWD, path, nil, 0) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0) +} + +func legacyUtimesNano(path string, ts []Timespec) (err error) { if len(ts) != 2 { return EINVAL } @@ -600,6 +1314,70 @@ func UtimesNano(path string, ts []Timespec) error { return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } +// End UtimesNano + +// Begin UtimesNanoAt + +//go:nosplit +func get_UtimesNanoAtAddr() *(func(dirfd int, path string, ts []Timespec, flags int) (err error)) + +var UtimesNanoAt = enter_UtimesNanoAt + +func enter_UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) (err error) { + funcref := get_UtimesNanoAtAddr() + if validUtimensat() { + *funcref = utimesNanoAtImpl + } else { + *funcref = legacyUtimesNanoAt + } + return (*funcref)(dirfd, path, ts, flags) +} + +func utimesNanoAtImpl(dirfd int, path string, ts []Timespec, flags int) (err error) { + if ts == nil { + return utimensat(dirfd, path, nil, flags) + } + if len(ts) != 2 { + return EINVAL + } + return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), flags) +} + +func legacyUtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) (err error) { + if path[0] != '/' { + dirPath, err := ZosFdToPath(dirfd) + if err != nil { + return err + } + path = dirPath + "/" + path + } + if flags == AT_SYMLINK_NOFOLLOW { + if len(ts) != 2 { + return EINVAL + } + + if ts[0].Nsec >= 5e8 { + ts[0].Sec++ + } + ts[0].Nsec = 0 + if ts[1].Nsec >= 5e8 { + ts[1].Sec++ + } + ts[1].Nsec = 0 + + // Not as efficient as it could be because Timespec and + // Timeval have different types in the different OSes + tv := []Timeval{ + NsecToTimeval(TimespecToNsec(ts[0])), + NsecToTimeval(TimespecToNsec(ts[1])), + } + return Lutimes(path, tv) + } + return UtimesNano(path, ts) +} + +// End UtimesNanoAt + func Getsockname(fd int) (sa Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny @@ -1186,67 +1964,46 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) return n, nil } -func Opendir(name string) (uintptr, error) { - p, err := BytePtrFromString(name) - if err != nil { - return 0, err - } - dir, _, e := syscall_syscall(SYS___OPENDIR_A, uintptr(unsafe.Pointer(p)), 0, 0) - runtime.KeepAlive(unsafe.Pointer(p)) - if e != 0 { - err = errnoErr(e) - } - return dir, err -} - -// clearsyscall.Errno resets the errno value to 0. -func clearErrno() - -func Readdir(dir uintptr) (*Dirent, error) { - var ent Dirent - var res uintptr - // __readdir_r_a returns errno at the end of the directory stream, rather than 0. - // Therefore to avoid false positives we clear errno before calling it. - - // TODO(neeilan): Commented this out to get sys/unix compiling on z/OS. Uncomment and fix. Error: "undefined: clearsyscall" - //clearsyscall.Errno() // TODO(mundaym): check pre-emption rules. - - e, _, _ := syscall_syscall(SYS___READDIR_R_A, dir, uintptr(unsafe.Pointer(&ent)), uintptr(unsafe.Pointer(&res))) - var err error - if e != 0 { - err = errnoErr(Errno(e)) - } - if res == 0 { - return nil, err - } - return &ent, err -} - -func readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) { - r0, _, e1 := syscall_syscall(SYS___READDIR_R_A, dirp, uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) - if int64(r0) == -1 { - err = errnoErr(Errno(e1)) +func Opendir(name string) (uintptr, error) { + p, err := BytePtrFromString(name) + if err != nil { + return 0, err } - return + err = nil + runtime.EnterSyscall() + dir, e2, e1 := CallLeFuncWithPtrReturn(GetZosLibVec()+SYS___OPENDIR_A<<4, uintptr(unsafe.Pointer(p))) + runtime.ExitSyscall() + runtime.KeepAlive(unsafe.Pointer(p)) + if dir == 0 { + err = errnoErr2(e1, e2) + } + return dir, err } +// clearsyscall.Errno resets the errno value to 0. +func clearErrno() + func Closedir(dir uintptr) error { - _, _, e := syscall_syscall(SYS_CLOSEDIR, dir, 0, 0) - if e != 0 { - return errnoErr(e) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_CLOSEDIR<<4, dir) + runtime.ExitSyscall() + if r0 != 0 { + return errnoErr2(e1, e2) } return nil } func Seekdir(dir uintptr, pos int) { - _, _, _ = syscall_syscall(SYS_SEEKDIR, dir, uintptr(pos), 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_SEEKDIR<<4, dir, uintptr(pos)) + runtime.ExitSyscall() } func Telldir(dir uintptr) (int, error) { - p, _, e := syscall_syscall(SYS_TELLDIR, dir, 0, 0) + p, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TELLDIR<<4, dir) pos := int(p) - if pos == -1 { - return pos, errnoErr(e) + if int64(p) == -1 { + return pos, errnoErr2(e1, e2) } return pos, nil } @@ -1261,19 +2018,55 @@ func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { *(*int64)(unsafe.Pointer(&flock[4])) = lk.Start *(*int64)(unsafe.Pointer(&flock[12])) = lk.Len *(*int32)(unsafe.Pointer(&flock[20])) = lk.Pid - _, _, errno := syscall_syscall(SYS_FCNTL, fd, uintptr(cmd), uintptr(unsafe.Pointer(&flock))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, fd, uintptr(cmd), uintptr(unsafe.Pointer(&flock))) + runtime.ExitSyscall() lk.Type = *(*int16)(unsafe.Pointer(&flock[0])) lk.Whence = *(*int16)(unsafe.Pointer(&flock[2])) lk.Start = *(*int64)(unsafe.Pointer(&flock[4])) lk.Len = *(*int64)(unsafe.Pointer(&flock[12])) lk.Pid = *(*int32)(unsafe.Pointer(&flock[20])) - if errno == 0 { + if r0 == 0 { return nil } - return errno + return errnoErr2(e1, e2) +} + +func impl_Flock(fd int, how int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FLOCK<<4, uintptr(fd), uintptr(how)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FlockAddr() *(func(fd int, how int) (err error)) + +var Flock = enter_Flock + +func validFlock(fp uintptr) bool { + if funcptrtest(GetZosLibVec()+SYS_FLOCK<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS_FLOCK<<4); err == nil { + return name == "flock" + } + } + return false +} + +func enter_Flock(fd int, how int) (err error) { + funcref := get_FlockAddr() + if validFlock(GetZosLibVec() + SYS_FLOCK<<4) { + *funcref = impl_Flock + } else { + *funcref = legacyFlock + } + return (*funcref)(fd, how) } -func Flock(fd int, how int) error { +func legacyFlock(fd int, how int) error { var flock_type int16 var fcntl_cmd int @@ -1307,41 +2100,51 @@ func Flock(fd int, how int) error { } func Mlock(b []byte) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Mlock2(b []byte, flags int) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Mlockall(flags int) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_NONSWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_NONSWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Munlock(b []byte) (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_SWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } func Munlockall() (err error) { - _, _, e1 := syscall_syscall(SYS___MLOCKALL, _BPX_SWAP, 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MLOCKALL<<4, _BPX_SWAP) + runtime.ExitSyscall() + if r0 != 0 { + err = errnoErr2(e1, e2) } return } @@ -1372,15 +2175,104 @@ func ClockGettime(clockid int32, ts *Timespec) error { return nil } -func Statfs(path string, stat *Statfs_t) (err error) { - fd, err := open(path, O_RDONLY, 0) - defer Close(fd) - if err != nil { - return err +// Chtag + +//go:nosplit +func get_ChtagAddr() *(func(path string, ccsid uint64, textbit uint64) error) + +var Chtag = enter_Chtag + +func enter_Chtag(path string, ccsid uint64, textbit uint64) error { + funcref := get_ChtagAddr() + if validSetxattr() { + *funcref = impl_Chtag + } else { + *funcref = legacy_Chtag + } + return (*funcref)(path, ccsid, textbit) +} + +func legacy_Chtag(path string, ccsid uint64, textbit uint64) error { + tag := ccsid<<16 | textbit<<15 + var tag_buff [8]byte + DecodeData(tag_buff[:], 8, tag) + return Setxattr(path, "filetag", tag_buff[:], XATTR_REPLACE) +} + +func impl_Chtag(path string, ccsid uint64, textbit uint64) error { + tag := ccsid<<16 | textbit<<15 + var tag_buff [4]byte + DecodeData(tag_buff[:], 4, tag) + return Setxattr(path, "system.filetag", tag_buff[:], XATTR_REPLACE) +} + +// End of Chtag + +// Nanosleep + +//go:nosplit +func get_NanosleepAddr() *(func(time *Timespec, leftover *Timespec) error) + +var Nanosleep = enter_Nanosleep + +func enter_Nanosleep(time *Timespec, leftover *Timespec) error { + funcref := get_NanosleepAddr() + if funcptrtest(GetZosLibVec()+SYS_NANOSLEEP<<4, "") == 0 { + *funcref = impl_Nanosleep + } else { + *funcref = legacyNanosleep + } + return (*funcref)(time, leftover) +} + +func impl_Nanosleep(time *Timespec, leftover *Timespec) error { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_NANOSLEEP<<4, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover))) + runtime.ExitSyscall() + if int64(r0) == -1 { + return errnoErr2(e1, e2) + } + return nil +} + +func legacyNanosleep(time *Timespec, leftover *Timespec) error { + t0 := runtime.Nanotime1() + var secrem uint32 + var nsecrem uint32 + total := time.Sec*1000000000 + time.Nsec + elapsed := runtime.Nanotime1() - t0 + var rv int32 + var rc int32 + var err error + // repeatedly sleep for 1 second until less than 1 second left + for total-elapsed > 1000000000 { + rv, rc, _ = BpxCondTimedWait(uint32(1), uint32(0), uint32(CW_CONDVAR), &secrem, &nsecrem) + if rv != 0 && rc != 112 { // 112 is EAGAIN + if leftover != nil && rc == 120 { // 120 is EINTR + leftover.Sec = int64(secrem) + leftover.Nsec = int64(nsecrem) + } + err = Errno(rc) + return err + } + elapsed = runtime.Nanotime1() - t0 } - return Fstatfs(fd, stat) + // sleep the remainder + if total > elapsed { + rv, rc, _ = BpxCondTimedWait(uint32(0), uint32(total-elapsed), uint32(CW_CONDVAR), &secrem, &nsecrem) + } + if leftover != nil && rc == 120 { + leftover.Sec = int64(secrem) + leftover.Nsec = int64(nsecrem) + } + if rv != 0 && rc != 112 { + err = Errno(rc) + } + return err } +// End of Nanosleep + var ( Stdin = 0 Stdout = 1 @@ -1395,6 +2287,9 @@ var ( errENOENT error = syscall.ENOENT ) +var ZosTraceLevel int +var ZosTracefile *os.File + var ( signalNameMapOnce sync.Once signalNameMap map[string]syscall.Signal @@ -1416,6 +2311,56 @@ func errnoErr(e Errno) error { return e } +var reg *regexp.Regexp + +// enhanced with zos specific errno2 +func errnoErr2(e Errno, e2 uintptr) error { + switch e { + case 0: + return nil + case EAGAIN: + return errEAGAIN + /* + Allow the retrieval of errno2 for EINVAL and ENOENT on zos + case EINVAL: + return errEINVAL + case ENOENT: + return errENOENT + */ + } + if ZosTraceLevel > 0 { + var name string + if reg == nil { + reg = regexp.MustCompile("(^unix\\.[^/]+$|.*\\/unix\\.[^/]+$)") + } + i := 1 + pc, file, line, ok := runtime.Caller(i) + if ok { + name = runtime.FuncForPC(pc).Name() + } + for ok && reg.MatchString(runtime.FuncForPC(pc).Name()) { + i += 1 + pc, file, line, ok = runtime.Caller(i) + } + if ok { + if ZosTracefile == nil { + ZosConsolePrintf("From %s:%d\n", file, line) + ZosConsolePrintf("%s: %s (errno2=0x%x)\n", name, e.Error(), e2) + } else { + fmt.Fprintf(ZosTracefile, "From %s:%d\n", file, line) + fmt.Fprintf(ZosTracefile, "%s: %s (errno2=0x%x)\n", name, e.Error(), e2) + } + } else { + if ZosTracefile == nil { + ZosConsolePrintf("%s (errno2=0x%x)\n", e.Error(), e2) + } else { + fmt.Fprintf(ZosTracefile, "%s (errno2=0x%x)\n", e.Error(), e2) + } + } + } + return e +} + // ErrnoName returns the error name for error number e. func ErrnoName(e Errno) string { i := sort.Search(len(errorList), func(i int) bool { @@ -1474,6 +2419,9 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d return nil, EINVAL } + // Set __MAP_64 by default + flags |= __MAP_64 + // Map the requested memory. addr, errno := m.mmap(0, uintptr(length), prot, flags, fd, offset) if errno != nil { @@ -1778,83 +2726,170 @@ func Exec(argv0 string, argv []string, envv []string) error { return syscall.Exec(argv0, argv, envv) } -func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { +func Getag(path string) (ccsid uint16, flag uint16, err error) { + var val [8]byte + sz, err := Getxattr(path, "ccsid", val[:]) + if err != nil { + return + } + ccsid = uint16(EncodeData(val[0:sz])) + sz, err = Getxattr(path, "flags", val[:]) + if err != nil { + return + } + flag = uint16(EncodeData(val[0:sz]) >> 15) + return +} + +// Mount begin +func impl_Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + var _p3 *byte + _p3, err = BytePtrFromString(data) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MOUNT1_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(_p3))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MountAddr() *(func(source string, target string, fstype string, flags uintptr, data string) (err error)) + +var Mount = enter_Mount + +func enter_Mount(source string, target string, fstype string, flags uintptr, data string) (err error) { + funcref := get_MountAddr() + if validMount() { + *funcref = impl_Mount + } else { + *funcref = legacyMount + } + return (*funcref)(source, target, fstype, flags, data) +} + +func legacyMount(source string, target string, fstype string, flags uintptr, data string) (err error) { if needspace := 8 - len(fstype); needspace <= 0 { - fstype = fstype[:8] + fstype = fstype[0:8] } else { - fstype += " "[:needspace] + fstype += " "[0:needspace] } return mount_LE(target, source, fstype, uint32(flags), int32(len(data)), data) } -func Unmount(name string, mtm int) (err error) { +func validMount() bool { + if funcptrtest(GetZosLibVec()+SYS___MOUNT1_A<<4, "") == 0 { + if name, err := getLeFuncName(GetZosLibVec() + SYS___MOUNT1_A<<4); err == nil { + return name == "__mount1_a" + } + } + return false +} + +// Mount end + +// Unmount begin +func impl_Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UMOUNT2_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnmountAddr() *(func(target string, flags int) (err error)) + +var Unmount = enter_Unmount + +func enter_Unmount(target string, flags int) (err error) { + funcref := get_UnmountAddr() + if funcptrtest(GetZosLibVec()+SYS___UMOUNT2_A<<4, "") == 0 { + *funcref = impl_Unmount + } else { + *funcref = legacyUnmount + } + return (*funcref)(target, flags) +} + +func legacyUnmount(name string, mtm int) (err error) { // mountpoint is always a full path and starts with a '/' // check if input string is not a mountpoint but a filesystem name if name[0] != '/' { - return unmount(name, mtm) + return unmount_LE(name, mtm) } // treat name as mountpoint b2s := func(arr []byte) string { - nulli := bytes.IndexByte(arr, 0) - if nulli == -1 { - return string(arr) - } else { - return string(arr[:nulli]) + var str string + for i := 0; i < len(arr); i++ { + if arr[i] == 0 { + str = string(arr[:i]) + break + } } + return str } var buffer struct { header W_Mnth fsinfo [64]W_Mntent } - fsCount, err := W_Getmntent_A((*byte)(unsafe.Pointer(&buffer)), int(unsafe.Sizeof(buffer))) - if err != nil { - return err - } - if fsCount == 0 { - return EINVAL - } - for i := 0; i < fsCount; i++ { - if b2s(buffer.fsinfo[i].Mountpoint[:]) == name { - err = unmount(b2s(buffer.fsinfo[i].Fsname[:]), mtm) - break + fs_count, err := W_Getmntent_A((*byte)(unsafe.Pointer(&buffer)), int(unsafe.Sizeof(buffer))) + if err == nil { + err = EINVAL + for i := 0; i < fs_count; i++ { + if b2s(buffer.fsinfo[i].Mountpoint[:]) == name { + err = unmount_LE(b2s(buffer.fsinfo[i].Fsname[:]), mtm) + break + } } + } else if fs_count == 0 { + err = EINVAL } return err } -func fdToPath(dirfd int) (path string, err error) { - var buffer [1024]byte - // w_ctrl() - ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_W_IOCTL<<4, - []uintptr{uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))}) - if ret == 0 { - zb := bytes.IndexByte(buffer[:], 0) - if zb == -1 { - zb = len(buffer) - } - // __e2a_l() - runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, - []uintptr{uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)}) - return string(buffer[:zb]), nil - } - // __errno() - errno := int(*(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, - []uintptr{})))) - // __errno2() - errno2 := int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO2<<4, - []uintptr{})) - // strerror_r() - ret = runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_STRERROR_R<<4, - []uintptr{uintptr(errno), uintptr(unsafe.Pointer(&buffer[0])), 1024}) - if ret == 0 { - zb := bytes.IndexByte(buffer[:], 0) - if zb == -1 { - zb = len(buffer) - } - return "", fmt.Errorf("%s (errno2=0x%x)", buffer[:zb], errno2) - } else { - return "", fmt.Errorf("fdToPath errno %d (errno2=0x%x)", errno, errno2) +// Unmount end + +func direntIno(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +} + +func direntReclen(buf []byte) (uint64, bool) { + return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +} + +func direntNamlen(buf []byte) (uint64, bool) { + reclen, ok := direntReclen(buf) + if !ok { + return 0, false } + return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true } func direntLeToDirentUnix(dirent *direntLE, dir uintptr, path string) (Dirent, error) { @@ -1896,7 +2931,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { } // Get path from fd to avoid unavailable call (fdopendir) - path, err := fdToPath(fd) + path, err := ZosFdToPath(fd) if err != nil { return 0, err } @@ -1910,7 +2945,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { for { var entryLE direntLE var entrypLE *direntLE - e := readdir_r(d, &entryLE, &entrypLE) + e := Readdir_r(d, &entryLE, &entrypLE) if e != nil { return n, e } @@ -1956,23 +2991,127 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { return n, nil } -func ReadDirent(fd int, buf []byte) (n int, err error) { - var base = (*uintptr)(unsafe.Pointer(new(uint64))) - return Getdirentries(fd, buf, base) +func Err2ad() (eadd *int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS___ERR2AD<<4) + eadd = (*int)(unsafe.Pointer(r0)) + return } -func direntIno(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Ino), unsafe.Sizeof(Dirent{}.Ino)) +func ZosConsolePrintf(format string, v ...interface{}) (int, error) { + type __cmsg struct { + _ uint16 + _ [2]uint8 + __msg_length uint32 + __msg uintptr + _ [4]uint8 + } + msg := fmt.Sprintf(format, v...) + strptr := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&msg)).Data) + len := (*reflect.StringHeader)(unsafe.Pointer(&msg)).Len + cmsg := __cmsg{__msg_length: uint32(len), __msg: uintptr(strptr)} + cmd := uint32(0) + runtime.EnterSyscall() + rc, err2, err1 := CallLeFuncWithErr(GetZosLibVec()+SYS_____CONSOLE_A<<4, uintptr(unsafe.Pointer(&cmsg)), 0, uintptr(unsafe.Pointer(&cmd))) + runtime.ExitSyscall() + if rc != 0 { + return 0, fmt.Errorf("%s (errno2=0x%x)\n", err1.Error(), err2) + } + return 0, nil +} +func ZosStringToEbcdicBytes(str string, nullterm bool) (ebcdicBytes []byte) { + if nullterm { + ebcdicBytes = []byte(str + "\x00") + } else { + ebcdicBytes = []byte(str) + } + A2e(ebcdicBytes) + return +} +func ZosEbcdicBytesToString(b []byte, trimRight bool) (str string) { + res := make([]byte, len(b)) + copy(res, b) + E2a(res) + if trimRight { + str = string(bytes.TrimRight(res, " \x00")) + } else { + str = string(res) + } + return } -func direntReclen(buf []byte) (uint64, bool) { - return readInt(buf, unsafe.Offsetof(Dirent{}.Reclen), unsafe.Sizeof(Dirent{}.Reclen)) +func fdToPath(dirfd int) (path string, err error) { + var buffer [1024]byte + // w_ctrl() + ret := runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_W_IOCTL<<4, + []uintptr{uintptr(dirfd), 17, 1024, uintptr(unsafe.Pointer(&buffer[0]))}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + // __e2a_l() + runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___E2A_L<<4, + []uintptr{uintptr(unsafe.Pointer(&buffer[0])), uintptr(zb)}) + return string(buffer[:zb]), nil + } + // __errno() + errno := int(*(*int32)(unsafe.Pointer(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO<<4, + []uintptr{})))) + // __errno2() + errno2 := int(runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS___ERRNO2<<4, + []uintptr{})) + // strerror_r() + ret = runtime.CallLeFuncByPtr(runtime.XplinkLibvec+SYS_STRERROR_R<<4, + []uintptr{uintptr(errno), uintptr(unsafe.Pointer(&buffer[0])), 1024}) + if ret == 0 { + zb := bytes.IndexByte(buffer[:], 0) + if zb == -1 { + zb = len(buffer) + } + return "", fmt.Errorf("%s (errno2=0x%x)", buffer[:zb], errno2) + } else { + return "", fmt.Errorf("fdToPath errno %d (errno2=0x%x)", errno, errno2) + } } -func direntNamlen(buf []byte) (uint64, bool) { - reclen, ok := direntReclen(buf) - if !ok { - return 0, false +func impl_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return } - return reclen - uint64(unsafe.Offsetof(Dirent{}.Name)), true + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKFIFOAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MkfifoatAddr() *(func(dirfd int, path string, mode uint32) (err error)) + +var Mkfifoat = enter_Mkfifoat + +func enter_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + funcref := get_MkfifoatAddr() + if funcptrtest(GetZosLibVec()+SYS___MKFIFOAT_A<<4, "") == 0 { + *funcref = impl_Mkfifoat + } else { + *funcref = legacy_Mkfifoat + } + return (*funcref)(dirfd, path, mode) +} + +func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) { + dirname, err := ZosFdToPath(dirfd) + if err != nil { + return err + } + return Mkfifo(dirname+"/"+path, mode) } + +//sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT +//sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT +//sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go index 79a84f18b..672d6b0a8 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (darwin && !ios) || linux +//go:build (darwin && !ios) || linux || zos package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go index 9eb0db664..8b7977a28 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build darwin && !ios +//go:build (darwin && !ios) || zos package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 36bf8399f..877a62b47 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -491,6 +491,7 @@ const ( BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 BPF_F_TEST_STATE_FREQ = 0x8 @@ -501,6 +502,7 @@ const ( BPF_IMM = 0x0 BPF_IND = 0x40 BPF_JA = 0x0 + BPF_JCOND = 0xe0 BPF_JEQ = 0x10 BPF_JGE = 0x30 BPF_JGT = 0x20 @@ -656,6 +658,9 @@ const ( CAN_NPROTO = 0x8 CAN_RAW = 0x1 CAN_RAW_FILTER_MAX = 0x200 + CAN_RAW_XL_VCID_RX_FILTER = 0x4 + CAN_RAW_XL_VCID_TX_PASS = 0x2 + CAN_RAW_XL_VCID_TX_SET = 0x1 CAN_RTR_FLAG = 0x40000000 CAN_SFF_ID_BITS = 0xb CAN_SFF_MASK = 0x7ff @@ -1338,6 +1343,7 @@ const ( F_OFD_SETLK = 0x25 F_OFD_SETLKW = 0x26 F_OK = 0x0 + F_SEAL_EXEC = 0x20 F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 @@ -1626,6 +1632,7 @@ const ( IP_FREEBIND = 0xf IP_HDRINCL = 0x3 IP_IPSEC_POLICY = 0x10 + IP_LOCAL_PORT_RANGE = 0x33 IP_MAXPACKET = 0xffff IP_MAX_MEMBERSHIPS = 0x14 IP_MF = 0x2000 @@ -1652,6 +1659,7 @@ const ( IP_PMTUDISC_OMIT = 0x5 IP_PMTUDISC_PROBE = 0x3 IP_PMTUDISC_WANT = 0x1 + IP_PROTOCOL = 0x34 IP_RECVERR = 0xb IP_RECVERR_RFC4884 = 0x1a IP_RECVFRAGSIZE = 0x19 @@ -1697,6 +1705,7 @@ const ( KEXEC_ARCH_S390 = 0x160000 KEXEC_ARCH_SH = 0x2a0000 KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_DEBUG = 0x8 KEXEC_FILE_NO_INITRAMFS = 0x4 KEXEC_FILE_ON_CRASH = 0x2 KEXEC_FILE_UNLOAD = 0x1 @@ -1898,6 +1907,7 @@ const ( MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MNT_ID_REQ_SIZE_VER0 = 0x18 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2166,7 +2176,7 @@ const ( NFT_SECMARK_CTX_MAXLEN = 0x100 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 - NFT_TABLE_F_MASK = 0x3 + NFT_TABLE_F_MASK = 0x7 NFT_TABLE_MAXNAMELEN = 0x100 NFT_TRACETYPE_MAX = 0x3 NFT_TUNNEL_F_MASK = 0x7 @@ -2302,6 +2312,7 @@ const ( PERF_AUX_FLAG_PARTIAL = 0x4 PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK = 0xff00 PERF_AUX_FLAG_TRUNCATED = 0x1 + PERF_BRANCH_ENTRY_INFO_BITS_MAX = 0x21 PERF_BR_ARM64_DEBUG_DATA = 0x7 PERF_BR_ARM64_DEBUG_EXIT = 0x5 PERF_BR_ARM64_DEBUG_HALT = 0x4 @@ -2399,6 +2410,7 @@ const ( PERF_RECORD_MISC_USER = 0x2 PERF_SAMPLE_BRANCH_PLM_ALL = 0x7 PERF_SAMPLE_WEIGHT_TYPE = 0x1004000 + PID_FS_MAGIC = 0x50494446 PIPEFS_MAGIC = 0x50495045 PPPIOCGNPMODE = 0xc008744c PPPIOCNEWUNIT = 0xc004743e @@ -2892,8 +2904,9 @@ const ( RWF_APPEND = 0x10 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 + RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x1f + RWF_SUPPORTED = 0x3f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -2914,7 +2927,9 @@ const ( SCHED_RESET_ON_FORK = 0x40000000 SCHED_RR = 0x2 SCM_CREDENTIALS = 0x2 + SCM_PIDFD = 0x4 SCM_RIGHTS = 0x1 + SCM_SECURITY = 0x3 SCM_TIMESTAMP = 0x1d SC_LOG_FLUSH = 0x100000 SECCOMP_ADDFD_FLAG_SEND = 0x2 @@ -3047,6 +3062,8 @@ const ( SIOCSMIIREG = 0x8949 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SK_DIAG_BPF_STORAGE_MAX = 0x3 + SK_DIAG_BPF_STORAGE_REQ_MAX = 0x1 SMACK_MAGIC = 0x43415d53 SMART_AUTOSAVE = 0xd2 SMART_AUTO_OFFLINE = 0xdb @@ -3067,6 +3084,8 @@ const ( SOCKFS_MAGIC = 0x534f434b SOCK_BUF_LOCK_MASK = 0x3 SOCK_DCCP = 0x6 + SOCK_DESTROY = 0x15 + SOCK_DIAG_BY_FAMILY = 0x14 SOCK_IOC_TYPE = 0x89 SOCK_PACKET = 0xa SOCK_RAW = 0x3 @@ -3168,6 +3187,7 @@ const ( STATX_GID = 0x10 STATX_INO = 0x100 STATX_MNT_ID = 0x1000 + STATX_MNT_ID_UNIQUE = 0x4000 STATX_MODE = 0x2 STATX_MTIME = 0x40 STATX_NLINK = 0x4 @@ -3255,6 +3275,7 @@ const ( TCP_MAX_WINSHIFT = 0xe TCP_MD5SIG = 0xe TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_IFINDEX = 0x2 TCP_MD5SIG_FLAG_PREFIX = 0x1 TCP_MD5SIG_MAXKEYLEN = 0x50 TCP_MSS = 0x200 @@ -3562,12 +3583,16 @@ const ( XDP_RX_RING = 0x2 XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 + XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_TIMESTAMP = 0x1 + XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 XDP_UMEM_COMPLETION_RING = 0x6 XDP_UMEM_FILL_RING = 0x5 XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 XDP_USE_SG = 0x10 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 42ff8c3c1..e4bc0bd57 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -118,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 MAP_32BIT = 0x40 + MAP_ABOVE4G = 0x80 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index dca436004..689317afd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -118,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 MAP_32BIT = 0x40 + MAP_ABOVE4G = 0x80 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d8cae6d15..14270508b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -87,6 +87,7 @@ const ( FICLONE = 0x40049409 FICLONERANGE = 0x4020940d FLUSHO = 0x1000 + FPMR_MAGIC = 0x46504d52 FPSIMD_MAGIC = 0x46508001 FS_IOC_ENABLE_VERITY = 0x40806685 FS_IOC_GETFLAGS = 0x80086601 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index 4dfd2e051..da08b2ab3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -10,41 +10,99 @@ package unix const ( - BRKINT = 0x0001 - CLOCK_MONOTONIC = 0x1 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_THREAD_CPUTIME_ID = 0x3 - CS8 = 0x0030 - CSIZE = 0x0030 - ECHO = 0x00000008 - ECHONL = 0x00000001 - FD_CLOEXEC = 0x01 - FD_CLOFORK = 0x02 - FNDELAY = 0x04 - F_CLOSFD = 9 - F_CONTROL_CVT = 13 - F_DUPFD = 0 - F_DUPFD2 = 8 - F_GETFD = 1 - F_GETFL = 259 - F_GETLK = 5 - F_GETOWN = 10 - F_OK = 0x0 - F_RDLCK = 1 - F_SETFD = 2 - F_SETFL = 4 - F_SETLK = 6 - F_SETLKW = 7 - F_SETOWN = 11 - F_SETTAG = 12 - F_UNLCK = 3 - F_WRLCK = 2 - FSTYPE_ZFS = 0xe9 //"Z" - FSTYPE_HFS = 0xc8 //"H" - FSTYPE_NFS = 0xd5 //"N" - FSTYPE_TFS = 0xe3 //"T" - FSTYPE_AUTOMOUNT = 0xc1 //"A" + BRKINT = 0x0001 + CLOCAL = 0x1 + CLOCK_MONOTONIC = 0x1 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLONE_NEWIPC = 0x08000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x00020000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWUTS = 0x04000000 + CLONE_PARENT = 0x00008000 + CS8 = 0x0030 + CSIZE = 0x0030 + ECHO = 0x00000008 + ECHONL = 0x00000001 + EFD_SEMAPHORE = 0x00002000 + EFD_CLOEXEC = 0x00001000 + EFD_NONBLOCK = 0x00000004 + EPOLL_CLOEXEC = 0x00001000 + EPOLL_CTL_ADD = 0 + EPOLL_CTL_MOD = 1 + EPOLL_CTL_DEL = 2 + EPOLLRDNORM = 0x0001 + EPOLLRDBAND = 0x0002 + EPOLLIN = 0x0003 + EPOLLOUT = 0x0004 + EPOLLWRBAND = 0x0008 + EPOLLPRI = 0x0010 + EPOLLERR = 0x0020 + EPOLLHUP = 0x0040 + EPOLLEXCLUSIVE = 0x20000000 + EPOLLONESHOT = 0x40000000 + FD_CLOEXEC = 0x01 + FD_CLOFORK = 0x02 + FD_SETSIZE = 0x800 + FNDELAY = 0x04 + F_CLOSFD = 9 + F_CONTROL_CVT = 13 + F_DUPFD = 0 + F_DUPFD2 = 8 + F_GETFD = 1 + F_GETFL = 259 + F_GETLK = 5 + F_GETOWN = 10 + F_OK = 0x0 + F_RDLCK = 1 + F_SETFD = 2 + F_SETFL = 4 + F_SETLK = 6 + F_SETLKW = 7 + F_SETOWN = 11 + F_SETTAG = 12 + F_UNLCK = 3 + F_WRLCK = 2 + FSTYPE_ZFS = 0xe9 //"Z" + FSTYPE_HFS = 0xc8 //"H" + FSTYPE_NFS = 0xd5 //"N" + FSTYPE_TFS = 0xe3 //"T" + FSTYPE_AUTOMOUNT = 0xc1 //"A" + GRND_NONBLOCK = 1 + GRND_RANDOM = 2 + HUPCL = 0x0100 // Hang up on last close + IN_CLOEXEC = 0x00001000 + IN_NONBLOCK = 0x00000004 + IN_ACCESS = 0x00000001 + IN_MODIFY = 0x00000002 + IN_ATTRIB = 0x00000004 + IN_CLOSE_WRITE = 0x00000008 + IN_CLOSE_NOWRITE = 0x00000010 + IN_OPEN = 0x00000020 + IN_MOVED_FROM = 0x00000040 + IN_MOVED_TO = 0x00000080 + IN_CREATE = 0x00000100 + IN_DELETE = 0x00000200 + IN_DELETE_SELF = 0x00000400 + IN_MOVE_SELF = 0x00000800 + IN_UNMOUNT = 0x00002000 + IN_Q_OVERFLOW = 0x00004000 + IN_IGNORED = 0x00008000 + IN_CLOSE = (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE) + IN_MOVE = (IN_MOVED_FROM | IN_MOVED_TO) + IN_ALL_EVENTS = (IN_ACCESS | IN_MODIFY | IN_ATTRIB | + IN_CLOSE | IN_OPEN | IN_MOVE | + IN_CREATE | IN_DELETE | IN_DELETE_SELF | + IN_MOVE_SELF) + IN_ONLYDIR = 0x01000000 + IN_DONT_FOLLOW = 0x02000000 + IN_EXCL_UNLINK = 0x04000000 + IN_MASK_CREATE = 0x10000000 + IN_MASK_ADD = 0x20000000 + IN_ISDIR = 0x40000000 + IN_ONESHOT = 0x80000000 IP6F_MORE_FRAG = 0x0001 IP6F_OFF_MASK = 0xfff8 IP6F_RESERVED_MASK = 0x0006 @@ -152,10 +210,18 @@ const ( IP_PKTINFO = 101 IP_RECVPKTINFO = 102 IP_TOS = 2 - IP_TTL = 3 + IP_TTL = 14 IP_UNBLOCK_SOURCE = 11 + ICMP6_FILTER = 1 + MCAST_INCLUDE = 0 + MCAST_EXCLUDE = 1 + MCAST_JOIN_GROUP = 40 + MCAST_LEAVE_GROUP = 41 + MCAST_JOIN_SOURCE_GROUP = 42 + MCAST_LEAVE_SOURCE_GROUP = 43 + MCAST_BLOCK_SOURCE = 44 + MCAST_UNBLOCK_SOURCE = 46 ICANON = 0x0010 - ICMP6_FILTER = 0x26 ICRNL = 0x0002 IEXTEN = 0x0020 IGNBRK = 0x0004 @@ -165,10 +231,10 @@ const ( ISTRIP = 0x0080 IXON = 0x0200 IXOFF = 0x0100 - LOCK_SH = 0x1 // Not exist on zOS - LOCK_EX = 0x2 // Not exist on zOS - LOCK_NB = 0x4 // Not exist on zOS - LOCK_UN = 0x8 // Not exist on zOS + LOCK_SH = 0x1 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_UN = 0x8 POLLIN = 0x0003 POLLOUT = 0x0004 POLLPRI = 0x0010 @@ -182,15 +248,29 @@ const ( MAP_PRIVATE = 0x1 // changes are private MAP_SHARED = 0x2 // changes are shared MAP_FIXED = 0x4 // place exactly - MCAST_JOIN_GROUP = 40 - MCAST_LEAVE_GROUP = 41 - MCAST_JOIN_SOURCE_GROUP = 42 - MCAST_LEAVE_SOURCE_GROUP = 43 - MCAST_BLOCK_SOURCE = 44 - MCAST_UNBLOCK_SOURCE = 45 + __MAP_MEGA = 0x8 + __MAP_64 = 0x10 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 MS_SYNC = 0x1 // msync - synchronous writes MS_ASYNC = 0x2 // asynchronous writes MS_INVALIDATE = 0x4 // invalidate mappings + MS_BIND = 0x00001000 + MS_MOVE = 0x00002000 + MS_NOSUID = 0x00000002 + MS_PRIVATE = 0x00040000 + MS_REC = 0x00004000 + MS_REMOUNT = 0x00008000 + MS_RDONLY = 0x00000001 + MS_UNBINDABLE = 0x00020000 + MNT_DETACH = 0x00000004 + ZOSDSFS_SUPER_MAGIC = 0x44534653 // zOS DSFS + NFS_SUPER_MAGIC = 0x6969 // NFS + NSFS_MAGIC = 0x6e736673 // PROCNS + PROC_SUPER_MAGIC = 0x9fa0 // proc FS + ZOSTFS_SUPER_MAGIC = 0x544653 // zOS TFS + ZOSUFS_SUPER_MAGIC = 0x554653 // zOS UFS + ZOSZFS_SUPER_MAGIC = 0x5A4653 // zOS ZFS MTM_RDONLY = 0x80000000 MTM_RDWR = 0x40000000 MTM_UMOUNT = 0x10000000 @@ -205,13 +285,20 @@ const ( MTM_REMOUNT = 0x00000100 MTM_NOSECURITY = 0x00000080 NFDBITS = 0x20 + ONLRET = 0x0020 // NL performs CR function O_ACCMODE = 0x03 O_APPEND = 0x08 O_ASYNCSIG = 0x0200 O_CREAT = 0x80 + O_DIRECT = 0x00002000 + O_NOFOLLOW = 0x00004000 + O_DIRECTORY = 0x00008000 + O_PATH = 0x00080000 + O_CLOEXEC = 0x00001000 O_EXCL = 0x40 O_GETFL = 0x0F O_LARGEFILE = 0x0400 + O_NDELAY = 0x4 O_NONBLOCK = 0x04 O_RDONLY = 0x02 O_RDWR = 0x03 @@ -248,6 +335,7 @@ const ( AF_IUCV = 17 AF_LAT = 14 AF_LINK = 18 + AF_LOCAL = AF_UNIX // AF_LOCAL is an alias for AF_UNIX AF_MAX = 30 AF_NBS = 7 AF_NDD = 23 @@ -285,15 +373,33 @@ const ( RLIMIT_AS = 5 RLIMIT_NOFILE = 6 RLIMIT_MEMLIMIT = 7 + RLIMIT_MEMLOCK = 0x8 RLIM_INFINITY = 2147483647 + SCHED_FIFO = 0x2 + SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x01 SF_CLOSE = 0x00000002 SF_REUSE = 0x00000001 + SHM_RND = 0x2 + SHM_RDONLY = 0x1 + SHMLBA = 0x1000 + IPC_STAT = 0x3 + IPC_SET = 0x2 + IPC_RMID = 0x1 + IPC_PRIVATE = 0x0 + IPC_CREAT = 0x1000000 + __IPC_MEGA = 0x4000000 + __IPC_SHAREAS = 0x20000000 + __IPC_BELOWBAR = 0x10000000 + IPC_EXCL = 0x2000000 + __IPC_GIGA = 0x8000000 SHUT_RD = 0 SHUT_RDWR = 2 SHUT_WR = 1 + SOCK_CLOEXEC = 0x00001000 SOCK_CONN_DGRAM = 6 SOCK_DGRAM = 2 + SOCK_NONBLOCK = 0x800 SOCK_RAW = 3 SOCK_RDM = 4 SOCK_SEQPACKET = 5 @@ -378,8 +484,6 @@ const ( S_IFMST = 0x00FF0000 TCP_KEEPALIVE = 0x8 TCP_NODELAY = 0x1 - TCP_INFO = 0xb - TCP_USER_TIMEOUT = 0x1 TIOCGWINSZ = 0x4008a368 TIOCSWINSZ = 0x8008a367 TIOCSBRK = 0x2000a77b @@ -427,7 +531,10 @@ const ( VSUSP = 9 VTIME = 10 WCONTINUED = 0x4 + WEXITED = 0x8 WNOHANG = 0x1 + WNOWAIT = 0x20 + WSTOPPED = 0x10 WUNTRACED = 0x2 _BPX_SWAP = 1 _BPX_NONSWAP = 2 @@ -452,8 +559,28 @@ const ( MADV_FREE = 15 // for Linux compatibility -- no zos semantics MADV_WIPEONFORK = 16 // for Linux compatibility -- no zos semantics MADV_KEEPONFORK = 17 // for Linux compatibility -- no zos semantics - AT_SYMLINK_NOFOLLOW = 1 // for Unix compatibility -- no zos semantics - AT_FDCWD = 2 // for Unix compatibility -- no zos semantics + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + P_PID = 0 + P_PGID = 1 + P_ALL = 2 + PR_SET_NAME = 15 + PR_GET_NAME = 16 + PR_SET_NO_NEW_PRIVS = 38 + PR_GET_NO_NEW_PRIVS = 39 + PR_SET_DUMPABLE = 4 + PR_GET_DUMPABLE = 3 + PR_SET_PDEATHSIG = 1 + PR_GET_PDEATHSIG = 2 + PR_SET_CHILD_SUBREAPER = 36 + PR_GET_CHILD_SUBREAPER = 37 + AT_FDCWD = -100 + AT_EACCESS = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_REMOVEDIR = 0x200 + RENAME_NOREPLACE = 1 << 0 ) const ( @@ -476,6 +603,7 @@ const ( EMLINK = Errno(125) ENAMETOOLONG = Errno(126) ENFILE = Errno(127) + ENOATTR = Errno(265) ENODEV = Errno(128) ENOENT = Errno(129) ENOEXEC = Errno(130) @@ -700,7 +828,7 @@ var errorList = [...]struct { {145, "EDC5145I", "The parameter list is too long, or the message to receive was too large for the buffer."}, {146, "EDC5146I", "Too many levels of symbolic links."}, {147, "EDC5147I", "Illegal byte sequence."}, - {148, "", ""}, + {148, "EDC5148I", "The named attribute or data not available."}, {149, "EDC5149I", "Value Overflow Error."}, {150, "EDC5150I", "UNIX System Services is not active."}, {151, "EDC5151I", "Dynamic allocation error."}, @@ -743,6 +871,7 @@ var errorList = [...]struct { {259, "EDC5259I", "A CUN_RS_NO_CONVERSION error was issued by Unicode Services."}, {260, "EDC5260I", "A CUN_RS_TABLE_NOT_ALIGNED error was issued by Unicode Services."}, {262, "EDC5262I", "An iconv() function encountered an unexpected error while using Unicode Services."}, + {265, "EDC5265I", "The named attribute not available."}, {1000, "EDC8000I", "A bad socket-call constant was found in the IUCV header."}, {1001, "EDC8001I", "An error was found in the IUCV header."}, {1002, "EDC8002I", "A socket descriptor is out of range."}, diff --git a/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s b/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s new file mode 100644 index 000000000..b77ff5db9 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsymaddr_zos_s390x.s @@ -0,0 +1,364 @@ +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s +// Code generated by the command above; see README.md. DO NOT EDIT. + +//go:build zos && s390x +#include "textflag.h" + +// provide the address of function variable to be fixed up. + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FlistxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Flistxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FremovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fremovexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FgetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fgetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FsetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fsetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_accept4Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·accept4(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_RemovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Removexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_Dup3Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Dup3(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_DirfdAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Dirfd(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCreateAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCreate(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCreate1Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCreate1(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollCtlAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollCtl(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollPwaitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollPwait(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EpollWaitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·EpollWait(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_EventfdAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Eventfd(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FaccessatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Faccessat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FchmodatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fchmodat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FchownatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fchownat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FdatasyncAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fdatasync(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_fstatatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·fstatat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LgetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lgetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LsetxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lsetxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FstatfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Fstatfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FutimesAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Futimes(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_FutimesatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Futimesat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_GetrandomAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Getrandom(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyInitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyInit(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyInit1Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyInit1(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyAddWatchAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyAddWatch(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_InotifyRmWatchAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·InotifyRmWatch(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_ListxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Listxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LlistxattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Llistxattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LremovexattrAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lremovexattr(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LutimesAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Lutimes(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_StatfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Statfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SyncfsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Syncfs(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_UnshareAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unshare(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_LinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Linkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_MkdiratAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mkdirat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_MknodatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Mknodat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PivotRootAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·PivotRoot(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PrctlAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Prctl(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_PrlimitAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Prlimit(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_RenameatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Renameat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_Renameat2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Renameat2(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SethostnameAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Sethostname(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SetnsAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Setns(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_SymlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Symlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_UnlinkatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·Unlinkat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_openatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·openat(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_openat2Addr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·openat2(SB), R8 + MOVD R8, ret+0(FP) + RET + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +TEXT ·get_utimensatAddr(SB), NOSPLIT|NOFRAME, $0-8 + MOVD $·utimensat(SB), R8 + MOVD R8, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index ccb02f240..07642c308 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -760,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 8b8bb2840..923e08cb7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -228,6 +228,16 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 1b40b997b..7d73dda64 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -760,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func pthread_chdir_np(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_chdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pthread_fchdir_np(fd int) (err error) { + _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pthread_fchdir_np_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 08362c1ab..057700111 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -228,6 +228,16 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_chdir_np(SB) +GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB) + +TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pthread_fchdir_np(SB) +GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index 94f011238..7ccf66b7e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags zos,s390x syscall_zos_s390x.go +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s // Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x @@ -6,17 +6,100 @@ package unix import ( + "runtime" + "syscall" "unsafe" ) +var _ syscall.Errno + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.ExitSyscall() val = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FLISTXATTR_A<<4, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FlistxattrAddr() *(func(fd int, dest []byte) (sz int, err error)) + +var Flistxattr = enter_Flistxattr + +func enter_Flistxattr(fd int, dest []byte) (sz int, err error) { + funcref := get_FlistxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FLISTXATTR_A<<4, "") == 0 { + *funcref = impl_Flistxattr + } else { + *funcref = error_Flistxattr + } + return (*funcref)(fd, dest) +} + +func error_Flistxattr(fd int, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FREMOVEXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FremovexattrAddr() *(func(fd int, attr string) (err error)) + +var Fremovexattr = enter_Fremovexattr + +func enter_Fremovexattr(fd int, attr string) (err error) { + funcref := get_FremovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FREMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Fremovexattr + } else { + *funcref = error_Fremovexattr } + return (*funcref)(fd, attr) +} + +func error_Fremovexattr(fd int, attr string) (err error) { + err = ENOSYS return } @@ -29,10 +112,12 @@ func read(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_READ<<4, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -46,31 +131,159 @@ func write(fd int, p []byte) (n int, err error) { } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WRITE<<4, uintptr(fd), uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FGETXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FgetxattrAddr() *(func(fd int, attr string, dest []byte) (sz int, err error)) + +var Fgetxattr = enter_Fgetxattr + +func enter_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + funcref := get_FgetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FGETXATTR_A<<4, "") == 0 { + *funcref = impl_Fgetxattr + } else { + *funcref = error_Fgetxattr + } + return (*funcref)(fd, attr, dest) +} + +func error_Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(data) > 0 { + _p1 = unsafe.Pointer(&data[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FSETXATTR_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(data)), uintptr(flag)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FsetxattrAddr() *(func(fd int, attr string, data []byte, flag int) (err error)) + +var Fsetxattr = enter_Fsetxattr + +func enter_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + funcref := get_FsetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___FSETXATTR_A<<4, "") == 0 { + *funcref = impl_Fsetxattr + } else { + *funcref = error_Fsetxattr } + return (*funcref)(fd, attr, data, flag) +} + +func error_Fsetxattr(fd int, attr string, data []byte, flag int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { - r0, _, e1 := syscall_syscall(SYS___ACCEPT_A, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCEPT_A<<4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCEPT4_A<<4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_accept4Addr() *(func(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)) + +var accept4 = enter_accept4 + +func enter_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + funcref := get_accept4Addr() + if funcptrtest(GetZosLibVec()+SYS___ACCEPT4_A<<4, "") == 0 { + *funcref = impl_accept4 + } else { + *funcref = error_accept4 } + return (*funcref)(s, rsa, addrlen, flags) +} + +func error_accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(SYS___BIND_A, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___BIND_A<<4, uintptr(s), uintptr(addr), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -78,9 +291,11 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { - _, _, e1 := syscall_syscall(SYS___CONNECT_A, uintptr(s), uintptr(addr), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CONNECT_A<<4, uintptr(s), uintptr(addr), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -88,10 +303,10 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(n int, list *_Gid_t) (nn int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETGROUPS<<4, uintptr(n), uintptr(unsafe.Pointer(list))) nn = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -99,9 +314,9 @@ func getgroups(n int, list *_Gid_t) (nn int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(n int, list *_Gid_t) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETGROUPS<<4, uintptr(n), uintptr(unsafe.Pointer(list))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -109,9 +324,11 @@ func setgroups(n int, list *_Gid_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { - _, _, e1 := syscall_syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETSOCKOPT<<4, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -119,9 +336,11 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { - _, _, e1 := syscall_syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETSOCKOPT<<4, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -129,10 +348,10 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SOCKET<<4, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -140,9 +359,9 @@ func socket(domain int, typ int, proto int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { - _, _, e1 := syscall_rawsyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SOCKETPAIR<<4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -150,9 +369,9 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___GETPEERNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETPEERNAME_A<<4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -160,10 +379,52 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___GETSOCKNAME_A, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETSOCKNAME_A<<4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___REMOVEXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_RemovexattrAddr() *(func(path string, attr string) (err error)) + +var Removexattr = enter_Removexattr + +func enter_Removexattr(path string, attr string) (err error) { + funcref := get_RemovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___REMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Removexattr + } else { + *funcref = error_Removexattr } + return (*funcref)(path, attr) +} + +func error_Removexattr(path string, attr string) (err error) { + err = ENOSYS return } @@ -176,10 +437,12 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(SYS___RECVFROM_A, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RECVFROM_A<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -193,9 +456,11 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall6(SYS___SENDTO_A, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SENDTO_A<<4, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -203,10 +468,12 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(SYS___RECVMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RECVMSG_A<<4, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -214,10 +481,12 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { - r0, _, e1 := syscall_syscall(SYS___SENDMSG_A, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SENDMSG_A<<4, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -225,10 +494,12 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { - r0, _, e1 := syscall_syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MMAP<<4, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) + runtime.ExitSyscall() ret = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -236,9 +507,11 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := syscall_syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MUNMAP<<4, uintptr(addr), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -246,9 +519,11 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctl(fd int, req int, arg uintptr) (err error) { - _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_IOCTL<<4, uintptr(fd), uintptr(req), uintptr(arg)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -256,9 +531,62 @@ func ioctl(fd int, req int, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) { - _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_IOCTL<<4, uintptr(fd), uintptr(req), uintptr(arg)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmat(id int, addr uintptr, flag int) (ret uintptr, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMAT<<4, uintptr(id), uintptr(addr), uintptr(flag)) + runtime.ExitSyscall() + ret = uintptr(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmctl(id int, cmd int, buf *SysvShmDesc) (result int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMCTL64<<4, uintptr(id), uintptr(cmd), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + result = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmdt(addr uintptr) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMDT<<4, uintptr(addr)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func shmget(key int, size int, flag int) (id int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHMGET<<4, uintptr(key), uintptr(size), uintptr(flag)) + runtime.ExitSyscall() + id = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -271,9 +599,11 @@ func Access(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___ACCESS_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___ACCESS_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -286,9 +616,11 @@ func Chdir(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHDIR_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -301,9 +633,11 @@ func Chown(path string, uid int, gid int) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHOWN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -316,9 +650,11 @@ func Chmod(path string, mode uint32) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHMOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHMOD_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -331,10 +667,12 @@ func Creat(path string, mode uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(SYS___CREAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CREAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -342,10 +680,12 @@ func Creat(path string, mode uint32) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := syscall_syscall(SYS_DUP, uintptr(oldfd), 0, 0) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP<<4, uintptr(oldfd)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -353,617 +693,2216 @@ func Dup(oldfd int) (fd int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(oldfd int, newfd int) (err error) { - _, _, e1 := syscall_syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP2<<4, uintptr(oldfd), uintptr(newfd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Errno2() (er2 int) { - uer2, _, _ := syscall_syscall(SYS___ERRNO2, 0, 0, 0) - er2 = int(uer2) +func impl_Dup3(oldfd int, newfd int, flags int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DUP3<<4, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_Dup3Addr() *(func(oldfd int, newfd int, flags int) (err error)) -func Err2ad() (eadd *int) { - ueadd, _, _ := syscall_syscall(SYS___ERR2AD, 0, 0, 0) - eadd = (*int)(unsafe.Pointer(ueadd)) - return -} +var Dup3 = enter_Dup3 -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func enter_Dup3(oldfd int, newfd int, flags int) (err error) { + funcref := get_Dup3Addr() + if funcptrtest(GetZosLibVec()+SYS_DUP3<<4, "") == 0 { + *funcref = impl_Dup3 + } else { + *funcref = error_Dup3 + } + return (*funcref)(oldfd, newfd, flags) +} -func Exit(code int) { - syscall_syscall(SYS_EXIT, uintptr(code), 0, 0) +func error_Dup3(oldfd int, newfd int, flags int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchdir(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Dirfd(dirp uintptr) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_DIRFD<<4, uintptr(dirp)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_DirfdAddr() *(func(dirp uintptr) (fd int, err error)) -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) +var Dirfd = enter_Dirfd + +func enter_Dirfd(dirp uintptr) (fd int, err error) { + funcref := get_DirfdAddr() + if funcptrtest(GetZosLibVec()+SYS_DIRFD<<4, "") == 0 { + *funcref = impl_Dirfd + } else { + *funcref = error_Dirfd } + return (*funcref)(dirp) +} + +func error_Dirfd(dirp uintptr) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fchown(fd int, uid int, gid int) (err error) { - _, _, e1 := syscall_syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) +func impl_EpollCreate(size int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CREATE<<4, uintptr(size)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollCreateAddr() *(func(size int) (fd int, err error)) -func FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) { - r0, _, e1 := syscall_syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - retval = int(r0) - if e1 != 0 { - err = errnoErr(e1) +var EpollCreate = enter_EpollCreate + +func enter_EpollCreate(size int) (fd int, err error) { + funcref := get_EpollCreateAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CREATE<<4, "") == 0 { + *funcref = impl_EpollCreate + } else { + *funcref = error_EpollCreate } + return (*funcref)(size) +} + +func error_EpollCreate(size int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fstat(fd int, stat *Stat_LE_t) (err error) { - _, _, e1 := syscall_syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_EpollCreate1(flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CREATE1<<4, uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollCreate1Addr() *(func(flags int) (fd int, err error)) -func Fstatvfs(fd int, stat *Statvfs_t) (err error) { - _, _, e1 := syscall_syscall(SYS_FSTATVFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) +var EpollCreate1 = enter_EpollCreate1 + +func enter_EpollCreate1(flags int) (fd int, err error) { + funcref := get_EpollCreate1Addr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CREATE1<<4, "") == 0 { + *funcref = impl_EpollCreate1 + } else { + *funcref = error_EpollCreate1 } + return (*funcref)(flags) +} + +func error_EpollCreate1(flags int) (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Fsync(fd int) (err error) { - _, _, e1 := syscall_syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_CTL<<4, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollCtlAddr() *(func(epfd int, op int, fd int, event *EpollEvent) (err error)) -func Ftruncate(fd int, length int64) (err error) { - _, _, e1 := syscall_syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) +var EpollCtl = enter_EpollCtl + +func enter_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + funcref := get_EpollCtlAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_CTL<<4, "") == 0 { + *funcref = impl_EpollCtl + } else { + *funcref = error_EpollCtl } - return + return (*funcref)(epfd, op, fd, event) } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpagesize() (pgsize int) { - r0, _, _ := syscall_syscall(SYS_GETPAGESIZE, 0, 0, 0) - pgsize = int(r0) +func error_EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mprotect(b []byte, prot int) (err error) { +func impl_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_PWAIT<<4, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), uintptr(unsafe.Pointer(sigmask))) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollPwaitAddr() *(func(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error)) -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) +var EpollPwait = enter_EpollPwait + +func enter_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { + funcref := get_EpollPwaitAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_PWAIT<<4, "") == 0 { + *funcref = impl_EpollPwait } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) + *funcref = error_EpollPwait } + return (*funcref)(epfd, events, msec, sigmask) +} + +func error_EpollPwait(epfd int, events []EpollEvent, msec int, sigmask *int) (n int, err error) { + n = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Poll(fds []PollFd, timeout int) (n int, err error) { +func impl_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer - if len(fds) > 0 { - _p0 = unsafe.Pointer(&fds[0]) + if len(events) > 0 { + _p0 = unsafe.Pointer(&events[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall(SYS_POLL, uintptr(_p0), uintptr(len(fds)), uintptr(timeout)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EPOLL_WAIT<<4, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec)) + runtime.ExitSyscall() n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_EpollWaitAddr() *(func(epfd int, events []EpollEvent, msec int) (n int, err error)) -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := syscall_syscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) +var EpollWait = enter_EpollWait + +func enter_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + funcref := get_EpollWaitAddr() + if funcptrtest(GetZosLibVec()+SYS_EPOLL_WAIT<<4, "") == 0 { + *funcref = impl_EpollWait + } else { + *funcref = error_EpollWait } + return (*funcref)(epfd, events, msec) +} + +func error_EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + n = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func W_Getmntent(buff *byte, size int) (lastsys int, err error) { - r0, _, e1 := syscall_syscall(SYS_W_GETMNTENT, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) - lastsys = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func Errno2() (er2 int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS___ERRNO2<<4) + runtime.ExitSyscall() + er2 = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func W_Getmntent_A(buff *byte, size int) (lastsys int, err error) { - r0, _, e1 := syscall_syscall(SYS___W_GETMNTENT_A, uintptr(unsafe.Pointer(buff)), uintptr(size), 0) - lastsys = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Eventfd(initval uint, flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_EVENTFD<<4, uintptr(initval), uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } +//go:nosplit +func get_EventfdAddr() *(func(initval uint, flags int) (fd int, err error)) + +var Eventfd = enter_Eventfd + +func enter_Eventfd(initval uint, flags int) (fd int, err error) { + funcref := get_EventfdAddr() + if funcptrtest(GetZosLibVec()+SYS_EVENTFD<<4, "") == 0 { + *funcref = impl_Eventfd + } else { + *funcref = error_Eventfd + } + return (*funcref)(initval, flags) +} + +func error_Eventfd(initval uint, flags int) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) { +func Exit(code int) { + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec()+SYS_EXIT<<4, uintptr(code)) + runtime.ExitSyscall() + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - var _p1 *byte - _p1, err = BytePtrFromString(filesystem) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - var _p3 *byte - _p3, err = BytePtrFromString(parm) - if err != nil { - return - } - _, _, e1 := syscall_syscall6(SYS___MOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(mtm), uintptr(parmlen), uintptr(unsafe.Pointer(_p3))) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FACCESSAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_FaccessatAddr() *(func(dirfd int, path string, mode uint32, flags int) (err error)) -func unmount(filesystem string, mtm int) (err error) { +var Faccessat = enter_Faccessat + +func enter_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + funcref := get_FaccessatAddr() + if funcptrtest(GetZosLibVec()+SYS___FACCESSAT_A<<4, "") == 0 { + *funcref = impl_Faccessat + } else { + *funcref = error_Faccessat + } + return (*funcref)(dirfd, path, mode, flags) +} + +func error_Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHDIR<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHMOD<<4, uintptr(fd), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(filesystem) + _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UMOUNT_A, uintptr(unsafe.Pointer(_p0)), uintptr(mtm), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FCHMODAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FchmodatAddr() *(func(dirfd int, path string, mode uint32, flags int) (err error)) + +var Fchmodat = enter_Fchmodat + +func enter_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + funcref := get_FchmodatAddr() + if funcptrtest(GetZosLibVec()+SYS___FCHMODAT_A<<4, "") == 0 { + *funcref = impl_Fchmodat + } else { + *funcref = error_Fchmodat + } + return (*funcref)(dirfd, path, mode, flags) +} + +func error_Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchown(fd int, uid int, gid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCHOWN<<4, uintptr(fd), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Chroot(path string) (err error) { +func impl_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___CHROOT_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FCHOWNAT_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FchownatAddr() *(func(fd int, path string, uid int, gid int, flags int) (err error)) + +var Fchownat = enter_Fchownat + +func enter_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { + funcref := get_FchownatAddr() + if funcptrtest(GetZosLibVec()+SYS___FCHOWNAT_A<<4, "") == 0 { + *funcref = impl_Fchownat + } else { + *funcref = error_Fchownat } + return (*funcref)(fd, path, uid, gid, flags) +} + +func error_Fchownat(fd int, path string, uid int, gid int, flags int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Uname(buf *Utsname) (err error) { - _, _, e1 := syscall_rawsyscall(SYS___UNAME_A, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func FcntlInt(fd uintptr, cmd int, arg int) (retval int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), uintptr(arg)) + runtime.ExitSyscall() + retval = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Gethostname(buf []byte) (err error) { +func impl_Fdatasync(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FDATASYNC<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FdatasyncAddr() *(func(fd int) (err error)) + +var Fdatasync = enter_Fdatasync + +func enter_Fdatasync(fd int) (err error) { + funcref := get_FdatasyncAddr() + if funcptrtest(GetZosLibVec()+SYS_FDATASYNC<<4, "") == 0 { + *funcref = impl_Fdatasync + } else { + *funcref = error_Fdatasync + } + return (*funcref)(fd) +} + +func error_Fdatasync(fd int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fstat(fd int, stat *Stat_LE_t) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTAT<<4, uintptr(fd), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FSTATAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_fstatatAddr() *(func(dirfd int, path string, stat *Stat_LE_t, flags int) (err error)) + +var fstatat = enter_fstatat + +func enter_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + funcref := get_fstatatAddr() + if funcptrtest(GetZosLibVec()+SYS___FSTATAT_A<<4, "") == 0 { + *funcref = impl_fstatat + } else { + *funcref = error_fstatat + } + return (*funcref)(dirfd, path, stat, flags) +} + +func error_fstatat(dirfd int, path string, stat *Stat_LE_t, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(link) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LGETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LgetxattrAddr() *(func(link string, attr string, dest []byte) (sz int, err error)) + +var Lgetxattr = enter_Lgetxattr + +func enter_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + funcref := get_LgetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LGETXATTR_A<<4, "") == 0 { + *funcref = impl_Lgetxattr + } else { + *funcref = error_Lgetxattr + } + return (*funcref)(link, attr, dest) +} + +func error_Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LSETXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LsetxattrAddr() *(func(path string, attr string, data []byte, flags int) (err error)) + +var Lsetxattr = enter_Lsetxattr + +func enter_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + funcref := get_LsetxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LSETXATTR_A<<4, "") == 0 { + *funcref = impl_Lsetxattr + } else { + *funcref = error_Lsetxattr + } + return (*funcref)(path, attr, data, flags) +} + +func error_Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Fstatfs(fd int, buf *Statfs_t) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTATFS<<4, uintptr(fd), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FstatfsAddr() *(func(fd int, buf *Statfs_t) (err error)) + +var Fstatfs = enter_Fstatfs + +func enter_Fstatfs(fd int, buf *Statfs_t) (err error) { + funcref := get_FstatfsAddr() + if funcptrtest(GetZosLibVec()+SYS_FSTATFS<<4, "") == 0 { + *funcref = impl_Fstatfs + } else { + *funcref = error_Fstatfs + } + return (*funcref)(fd, buf) +} + +func error_Fstatfs(fd int, buf *Statfs_t) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fstatvfs(fd int, stat *Statvfs_t) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSTATVFS<<4, uintptr(fd), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FSYNC<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Futimes(fd int, tv []Timeval) (err error) { var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + if len(tv) > 0 { + _p0 = unsafe.Pointer(&tv[0]) } else { _p0 = unsafe.Pointer(&_zero) } - _, _, e1 := syscall_syscall(SYS___GETHOSTNAME_A, uintptr(_p0), uintptr(len(buf)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FUTIMES<<4, uintptr(fd), uintptr(_p0), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_FutimesAddr() *(func(fd int, tv []Timeval) (err error)) -func Getegid() (egid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETEGID, 0, 0, 0) - egid = int(r0) +var Futimes = enter_Futimes + +func enter_Futimes(fd int, tv []Timeval) (err error) { + funcref := get_FutimesAddr() + if funcptrtest(GetZosLibVec()+SYS_FUTIMES<<4, "") == 0 { + *funcref = impl_Futimes + } else { + *funcref = error_Futimes + } + return (*funcref)(fd, tv) +} + +func error_Futimes(fd int, tv []Timeval) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Geteuid() (uid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETEUID, 0, 0, 0) - uid = int(r0) +func impl_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(tv) > 0 { + _p1 = unsafe.Pointer(&tv[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___FUTIMESAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_FutimesatAddr() *(func(dirfd int, path string, tv []Timeval) (err error)) + +var Futimesat = enter_Futimesat + +func enter_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + funcref := get_FutimesatAddr() + if funcptrtest(GetZosLibVec()+SYS___FUTIMESAT_A<<4, "") == 0 { + *funcref = impl_Futimesat + } else { + *funcref = error_Futimesat + } + return (*funcref)(dirfd, path, tv) +} + +func error_Futimesat(dirfd int, path string, tv []Timeval) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getgid() (gid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETGID, 0, 0, 0) - gid = int(r0) +func Ftruncate(fd int, length int64) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FTRUNCATE<<4, uintptr(fd), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpid() (pid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETPID, 0, 0, 0) - pid = int(r0) +func impl_Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRANDOM<<4, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_GetrandomAddr() *(func(buf []byte, flags int) (n int, err error)) + +var Getrandom = enter_Getrandom + +func enter_Getrandom(buf []byte, flags int) (n int, err error) { + funcref := get_GetrandomAddr() + if funcptrtest(GetZosLibVec()+SYS_GETRANDOM<<4, "") == 0 { + *funcref = impl_Getrandom + } else { + *funcref = error_Getrandom + } + return (*funcref)(buf, flags) +} + +func error_Getrandom(buf []byte, flags int) (n int, err error) { + n = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func impl_InotifyInit() (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec() + SYS_INOTIFY_INIT<<4) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyInitAddr() *(func() (fd int, err error)) + +var InotifyInit = enter_InotifyInit + +func enter_InotifyInit() (fd int, err error) { + funcref := get_InotifyInitAddr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_INIT<<4, "") == 0 { + *funcref = impl_InotifyInit + } else { + *funcref = error_InotifyInit } + return (*funcref)() +} + +func error_InotifyInit() (fd int, err error) { + fd = -1 + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getppid() (pid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETPPID, 0, 0, 0) - pid = int(r0) +func impl_InotifyInit1(flags int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_INOTIFY_INIT1<<4, uintptr(flags)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyInit1Addr() *(func(flags int) (fd int, err error)) + +var InotifyInit1 = enter_InotifyInit1 + +func enter_InotifyInit1(flags int) (fd int, err error) { + funcref := get_InotifyInit1Addr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_INIT1<<4, "") == 0 { + *funcref = impl_InotifyInit1 + } else { + *funcref = error_InotifyInit1 + } + return (*funcref)(flags) +} + +func error_InotifyInit1(flags int) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___INOTIFY_ADD_WATCH_A<<4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + runtime.ExitSyscall() + watchdesc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyAddWatchAddr() *(func(fd int, pathname string, mask uint32) (watchdesc int, err error)) + +var InotifyAddWatch = enter_InotifyAddWatch + +func enter_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + funcref := get_InotifyAddWatchAddr() + if funcptrtest(GetZosLibVec()+SYS___INOTIFY_ADD_WATCH_A<<4, "") == 0 { + *funcref = impl_InotifyAddWatch + } else { + *funcref = error_InotifyAddWatch + } + return (*funcref)(fd, pathname, mask) +} + +func error_InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + watchdesc = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_INOTIFY_RM_WATCH<<4, uintptr(fd), uintptr(watchdesc)) + runtime.ExitSyscall() + success = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_InotifyRmWatchAddr() *(func(fd int, watchdesc uint32) (success int, err error)) + +var InotifyRmWatch = enter_InotifyRmWatch + +func enter_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + funcref := get_InotifyRmWatchAddr() + if funcptrtest(GetZosLibVec()+SYS_INOTIFY_RM_WATCH<<4, "") == 0 { + *funcref = impl_InotifyRmWatch + } else { + *funcref = error_InotifyRmWatch + } + return (*funcref)(fd, watchdesc) +} + +func error_InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + success = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LISTXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_ListxattrAddr() *(func(path string, dest []byte) (sz int, err error)) + +var Listxattr = enter_Listxattr + +func enter_Listxattr(path string, dest []byte) (sz int, err error) { + funcref := get_ListxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LISTXATTR_A<<4, "") == 0 { + *funcref = impl_Listxattr + } else { + *funcref = error_Listxattr + } + return (*funcref)(path, dest) +} + +func error_Listxattr(path string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LLISTXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + runtime.ExitSyscall() + sz = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LlistxattrAddr() *(func(path string, dest []byte) (sz int, err error)) + +var Llistxattr = enter_Llistxattr + +func enter_Llistxattr(path string, dest []byte) (sz int, err error) { + funcref := get_LlistxattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LLISTXATTR_A<<4, "") == 0 { + *funcref = impl_Llistxattr + } else { + *funcref = error_Llistxattr + } + return (*funcref)(path, dest) +} + +func error_Llistxattr(path string, dest []byte) (sz int, err error) { + sz = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LREMOVEXATTR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LremovexattrAddr() *(func(path string, attr string) (err error)) + +var Lremovexattr = enter_Lremovexattr + +func enter_Lremovexattr(path string, attr string) (err error) { + funcref := get_LremovexattrAddr() + if funcptrtest(GetZosLibVec()+SYS___LREMOVEXATTR_A<<4, "") == 0 { + *funcref = impl_Lremovexattr + } else { + *funcref = error_Lremovexattr + } + return (*funcref)(path, attr) +} + +func error_Lremovexattr(path string, attr string) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Lutimes(path string, tv []Timeval) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(tv) > 0 { + _p1 = unsafe.Pointer(&tv[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LUTIMES_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(tv))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LutimesAddr() *(func(path string, tv []Timeval) (err error)) + +var Lutimes = enter_Lutimes + +func enter_Lutimes(path string, tv []Timeval) (err error) { + funcref := get_LutimesAddr() + if funcptrtest(GetZosLibVec()+SYS___LUTIMES_A<<4, "") == 0 { + *funcref = impl_Lutimes + } else { + *funcref = error_Lutimes + } + return (*funcref)(path, tv) +} + +func error_Lutimes(path string, tv []Timeval) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MPROTECT<<4, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_MSYNC<<4, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Console2(cmsg *ConsMsg2, modstr *byte, concmd *uint32) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CONSOLE2<<4, uintptr(unsafe.Pointer(cmsg)), uintptr(unsafe.Pointer(modstr)), uintptr(unsafe.Pointer(concmd))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Poll(fds []PollFd, timeout int) (n int, err error) { + var _p0 unsafe.Pointer + if len(fds) > 0 { + _p0 = unsafe.Pointer(&fds[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_POLL<<4, uintptr(_p0), uintptr(len(fds)), uintptr(timeout)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readdir_r(dirp uintptr, entry *direntLE, result **direntLE) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___READDIR_R_A<<4, uintptr(dirp), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Statfs(path string, buf *Statfs_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___STATFS_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_StatfsAddr() *(func(path string, buf *Statfs_t) (err error)) + +var Statfs = enter_Statfs + +func enter_Statfs(path string, buf *Statfs_t) (err error) { + funcref := get_StatfsAddr() + if funcptrtest(GetZosLibVec()+SYS___STATFS_A<<4, "") == 0 { + *funcref = impl_Statfs + } else { + *funcref = error_Statfs + } + return (*funcref)(path, buf) +} + +func error_Statfs(path string, buf *Statfs_t) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Syncfs(fd int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SYNCFS<<4, uintptr(fd)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_SyncfsAddr() *(func(fd int) (err error)) + +var Syncfs = enter_Syncfs + +func enter_Syncfs(fd int) (err error) { + funcref := get_SyncfsAddr() + if funcptrtest(GetZosLibVec()+SYS_SYNCFS<<4, "") == 0 { + *funcref = impl_Syncfs + } else { + *funcref = error_Syncfs + } + return (*funcref)(fd) +} + +func error_Syncfs(fd int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TIMES<<4, uintptr(unsafe.Pointer(tms))) + runtime.ExitSyscall() + ticks = uintptr(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func W_Getmntent(buff *byte, size int) (lastsys int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_W_GETMNTENT<<4, uintptr(unsafe.Pointer(buff)), uintptr(size)) + runtime.ExitSyscall() + lastsys = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func W_Getmntent_A(buff *byte, size int) (lastsys int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___W_GETMNTENT_A<<4, uintptr(unsafe.Pointer(buff)), uintptr(size)) + runtime.ExitSyscall() + lastsys = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount_LE(path string, filesystem string, fstype string, mtm uint32, parmlen int32, parm string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(filesystem) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + var _p3 *byte + _p3, err = BytePtrFromString(parm) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MOUNT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(mtm), uintptr(parmlen), uintptr(unsafe.Pointer(_p3))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unmount_LE(filesystem string, mtm int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(filesystem) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UMOUNT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mtm)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___CHROOT_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SELECT<<4, uintptr(nmsgsfds), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout))) + runtime.ExitSyscall() + ret = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_____OSNAME_A<<4, uintptr(unsafe.Pointer(buf))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Unshare(flags int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_UNSHARE<<4, uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnshareAddr() *(func(flags int) (err error)) + +var Unshare = enter_Unshare + +func enter_Unshare(flags int) (err error) { + funcref := get_UnshareAddr() + if funcptrtest(GetZosLibVec()+SYS_UNSHARE<<4, "") == 0 { + *funcref = impl_Unshare + } else { + *funcref = error_Unshare + } + return (*funcref)(flags) +} + +func error_Unshare(flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gethostname(buf []byte) (err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___GETHOSTNAME_A<<4, uintptr(_p0), uintptr(len(buf))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getgid() (gid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETGID<<4) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETPID<<4) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETPGID<<4, uintptr(pid)) + pgid = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (pid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETPPID<<4) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETPRIORITY<<4, uintptr(which), uintptr(who)) + runtime.ExitSyscall() + prio = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrlimit(resource int, rlim *Rlimit) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRLIMIT<<4, uintptr(resource), uintptr(unsafe.Pointer(rlim))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getrusage(who int, rusage *rusage_zos) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETRUSAGE<<4, uintptr(who), uintptr(unsafe.Pointer(rusage))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getegid() (egid int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETEGID<<4) + runtime.ExitSyscall() + egid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Geteuid() (euid int) { + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETEUID<<4) + runtime.ExitSyscall() + euid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETSID<<4, uintptr(pid)) + sid = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getuid() (uid int) { + r0, _, _ := CallLeFuncWithErr(GetZosLibVec() + SYS_GETUID<<4) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig Signal) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_KILL<<4, uintptr(pid), uintptr(sig)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lchown(path string, uid int, gid int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LCHOWN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Link(path string, link string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(link) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LINK_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldPath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newPath) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LINKAT_A<<4, uintptr(oldDirFd), uintptr(unsafe.Pointer(_p0)), uintptr(newDirFd), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_LinkatAddr() *(func(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error)) + +var Linkat = enter_Linkat + +func enter_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + funcref := get_LinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___LINKAT_A<<4, "") == 0 { + *funcref = impl_Linkat + } else { + *funcref = error_Linkat + } + return (*funcref)(oldDirFd, oldPath, newDirFd, newPath, flags) +} + +func error_Linkat(oldDirFd int, oldPath string, newDirFd int, newPath string, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listen(s int, n int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_LISTEN<<4, uintptr(s), uintptr(n)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func lstat(path string, stat *Stat_LE_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___LSTAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdir(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKDIR_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKDIRAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MkdiratAddr() *(func(dirfd int, path string, mode uint32) (err error)) + +var Mkdirat = enter_Mkdirat + +func enter_Mkdirat(dirfd int, path string, mode uint32) (err error) { + funcref := get_MkdiratAddr() + if funcptrtest(GetZosLibVec()+SYS___MKDIRAT_A<<4, "") == 0 { + *funcref = impl_Mkdirat + } else { + *funcref = error_Mkdirat + } + return (*funcref)(dirfd, path, mode) +} + +func error_Mkdirat(dirfd int, path string, mode uint32) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkfifo(path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKFIFO_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknod(path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKNOD_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___MKNODAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_MknodatAddr() *(func(dirfd int, path string, mode uint32, dev int) (err error)) + +var Mknodat = enter_Mknodat + +func enter_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + funcref := get_MknodatAddr() + if funcptrtest(GetZosLibVec()+SYS___MKNODAT_A<<4, "") == 0 { + *funcref = impl_Mknodat + } else { + *funcref = error_Mknodat + } + return (*funcref)(dirfd, path, mode, dev) +} + +func error_Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_PivotRoot(newroot string, oldroot string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(oldroot) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___PIVOT_ROOT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_PivotRootAddr() *(func(newroot string, oldroot string) (err error)) + +var PivotRoot = enter_PivotRoot + +func enter_PivotRoot(newroot string, oldroot string) (err error) { + funcref := get_PivotRootAddr() + if funcptrtest(GetZosLibVec()+SYS___PIVOT_ROOT_A<<4, "") == 0 { + *funcref = impl_PivotRoot + } else { + *funcref = error_PivotRoot + } + return (*funcref)(newroot, oldroot) +} + +func error_PivotRoot(newroot string, oldroot string) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pread(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PREAD<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := syscall_syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func Pwrite(fd int, p []byte, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PWRITE<<4, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset)) + runtime.ExitSyscall() + n = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrlimit(resource int, rlim *Rlimit) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___PRCTL_A<<4, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_PrctlAddr() *(func(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)) -func getrusage(who int, rusage *rusage_zos) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) +var Prctl = enter_Prctl + +func enter_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + funcref := get_PrctlAddr() + if funcptrtest(GetZosLibVec()+SYS___PRCTL_A<<4, "") == 0 { + *funcref = impl_Prctl + } else { + *funcref = error_Prctl } - return + return (*funcref)(option, arg2, arg3, arg4, arg5) } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } +func error_Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getuid() (uid int) { - r0, _, _ := syscall_rawsyscall(SYS_GETUID, 0, 0, 0) - uid = int(r0) +func impl_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PRLIMIT<<4, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_PrlimitAddr() *(func(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error)) -func Kill(pid int, sig Signal) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) +var Prlimit = enter_Prlimit + +func enter_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + funcref := get_PrlimitAddr() + if funcptrtest(GetZosLibVec()+SYS_PRLIMIT<<4, "") == 0 { + *funcref = impl_Prlimit + } else { + *funcref = error_Prlimit } + return (*funcref)(pid, resource, newlimit, old) +} + +func error_Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Lchown(path string, uid int, gid int) (err error) { +func Rename(from string, to string) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(from) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(to) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LCHOWN_A, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAME_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Link(path string, link string) (err error) { +func impl_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(oldpath) if err != nil { return } var _p1 *byte - _p1, err = BytePtrFromString(link) + _p1, err = BytePtrFromString(newpath) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAMEAT_A<<4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_RenameatAddr() *(func(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)) -func Listen(s int, n int) (err error) { - _, _, e1 := syscall_syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0) - if e1 != 0 { - err = errnoErr(e1) +var Renameat = enter_Renameat + +func enter_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + funcref := get_RenameatAddr() + if funcptrtest(GetZosLibVec()+SYS___RENAMEAT_A<<4, "") == 0 { + *funcref = impl_Renameat + } else { + *funcref = error_Renameat } + return (*funcref)(olddirfd, oldpath, newdirfd, newpath) +} + +func error_Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func lstat(path string, stat *Stat_LE_t) (err error) { +func impl_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { var _p0 *byte - _p0, err = BytePtrFromString(path) + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___LSTAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RENAMEAT2_A<<4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_Renameat2Addr() *(func(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error)) -func Mkdir(path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___MKDIR_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) +var Renameat2 = enter_Renameat2 + +func enter_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + funcref := get_Renameat2Addr() + if funcptrtest(GetZosLibVec()+SYS___RENAMEAT2_A<<4, "") == 0 { + *funcref = impl_Renameat2 + } else { + *funcref = error_Renameat2 } + return (*funcref)(olddirfd, oldpath, newdirfd, newpath, flags) +} + +func error_Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mkfifo(path string, mode uint32) (err error) { +func Rmdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } - _, _, e1 := syscall_syscall(SYS___MKFIFO_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___RMDIR_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Mknod(path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___MKNOD_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) - if e1 != 0 { - err = errnoErr(e1) +func Seek(fd int, offset int64, whence int) (off int64, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_LSEEK<<4, uintptr(fd), uintptr(offset), uintptr(whence)) + runtime.ExitSyscall() + off = int64(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pread(fd int, p []byte, offset int64) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) +func Setegid(egid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETEGID<<4, uintptr(egid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } - r0, _, e1 := syscall_syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Seteuid(euid int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETEUID<<4, uintptr(euid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Pwrite(fd int, p []byte, offset int64) (n int, err error) { +func impl_Sethostname(p []byte) (err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := syscall_syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SETHOSTNAME_A<<4, uintptr(_p0), uintptr(len(p))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_SethostnameAddr() *(func(p []byte) (err error)) -func Readlink(path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) +var Sethostname = enter_Sethostname + +func enter_Sethostname(p []byte) (err error) { + funcref := get_SethostnameAddr() + if funcptrtest(GetZosLibVec()+SYS___SETHOSTNAME_A<<4, "") == 0 { + *funcref = impl_Sethostname } else { - _p1 = unsafe.Pointer(&_zero) + *funcref = error_Sethostname } - r0, _, e1 := syscall_syscall(SYS___READLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return + return (*funcref)(p) } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Rename(from string, to string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(from) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(to) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___RENAME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } +func error_Sethostname(p []byte) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Rmdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := syscall_syscall(SYS___RMDIR_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) +func impl_Setns(fd int, nstype int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETNS<<4, uintptr(fd), uintptr(nstype)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +//go:nosplit +func get_SetnsAddr() *(func(fd int, nstype int) (err error)) -func Seek(fd int, offset int64, whence int) (off int64, err error) { - r0, _, e1 := syscall_syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) - off = int64(r0) - if e1 != 0 { - err = errnoErr(e1) +var Setns = enter_Setns + +func enter_Setns(fd int, nstype int) (err error) { + funcref := get_SetnsAddr() + if funcptrtest(GetZosLibVec()+SYS_SETNS<<4, "") == 0 { + *funcref = impl_Setns + } else { + *funcref = error_Setns } + return (*funcref)(fd, nstype) +} + +func error_Setns(fd int, nstype int) (err error) { + err = ENOSYS return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETPRIORITY<<4, uintptr(which), uintptr(who), uintptr(prio)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -971,9 +2910,9 @@ func Setpriority(which int, who int, prio int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETPGID<<4, uintptr(pid), uintptr(pgid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -981,9 +2920,9 @@ func Setpgid(pid int, pgid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(resource int, lim *Rlimit) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(lim)), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETRLIMIT<<4, uintptr(resource), uintptr(unsafe.Pointer(lim))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -991,9 +2930,9 @@ func Setrlimit(resource int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETREGID<<4, uintptr(rgid), uintptr(egid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1001,9 +2940,9 @@ func Setregid(rgid int, egid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETREUID<<4, uintptr(ruid), uintptr(euid)) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1011,10 +2950,10 @@ func Setreuid(ruid int, euid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { - r0, _, e1 := syscall_rawsyscall(SYS_SETSID, 0, 0, 0) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec() + SYS_SETSID<<4) pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1022,9 +2961,11 @@ func Setsid() (pid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETUID<<4, uintptr(uid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1032,9 +2973,11 @@ func Setuid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(uid int) (err error) { - _, _, e1 := syscall_syscall(SYS_SETGID, uintptr(uid), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SETGID<<4, uintptr(uid)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1042,9 +2985,11 @@ func Setgid(uid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(fd int, how int) (err error) { - _, _, e1 := syscall_syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_SHUTDOWN<<4, uintptr(fd), uintptr(how)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1057,9 +3002,11 @@ func stat(path string, statLE *Stat_LE_t) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___STAT_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statLE)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___STAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(statLE))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1077,17 +3024,63 @@ func Symlink(path string, link string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___SYMLINK_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SYMLINK_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldPath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newPath) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___SYMLINKAT_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(dirfd), uintptr(unsafe.Pointer(_p1))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } +//go:nosplit +func get_SymlinkatAddr() *(func(oldPath string, dirfd int, newPath string) (err error)) + +var Symlinkat = enter_Symlinkat + +func enter_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + funcref := get_SymlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___SYMLINKAT_A<<4, "") == 0 { + *funcref = impl_Symlinkat + } else { + *funcref = error_Symlinkat + } + return (*funcref)(oldPath, dirfd, newPath) +} + +func error_Symlinkat(oldPath string, dirfd int, newPath string) (err error) { + err = ENOSYS + return +} + // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() { - syscall_syscall(SYS_SYNC, 0, 0, 0) + runtime.EnterSyscall() + CallLeFuncWithErr(GetZosLibVec() + SYS_SYNC<<4) + runtime.ExitSyscall() return } @@ -1099,9 +3092,11 @@ func Truncate(path string, length int64) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___TRUNCATE_A, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___TRUNCATE_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(length)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1109,9 +3104,11 @@ func Truncate(path string, length int64) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Tcgetattr(fildes int, termptr *Termios) (err error) { - _, _, e1 := syscall_syscall(SYS_TCGETATTR, uintptr(fildes), uintptr(unsafe.Pointer(termptr)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TCGETATTR<<4, uintptr(fildes), uintptr(unsafe.Pointer(termptr))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1119,9 +3116,11 @@ func Tcgetattr(fildes int, termptr *Termios) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Tcsetattr(fildes int, when int, termptr *Termios) (err error) { - _, _, e1 := syscall_syscall(SYS_TCSETATTR, uintptr(fildes), uintptr(when), uintptr(unsafe.Pointer(termptr))) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_TCSETATTR<<4, uintptr(fildes), uintptr(when), uintptr(unsafe.Pointer(termptr))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1129,7 +3128,9 @@ func Tcsetattr(fildes int, when int, termptr *Termios) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(mask int) (oldmask int) { - r0, _, _ := syscall_syscall(SYS_UMASK, uintptr(mask), 0, 0) + runtime.EnterSyscall() + r0, _, _ := CallLeFuncWithErr(GetZosLibVec()+SYS_UMASK<<4, uintptr(mask)) + runtime.ExitSyscall() oldmask = int(r0) return } @@ -1142,10 +3143,49 @@ func Unlink(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UNLINK_A, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UNLINK_A<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UNLINKAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_UnlinkatAddr() *(func(dirfd int, path string, flags int) (err error)) + +var Unlinkat = enter_Unlinkat + +func enter_Unlinkat(dirfd int, path string, flags int) (err error) { + funcref := get_UnlinkatAddr() + if funcptrtest(GetZosLibVec()+SYS___UNLINKAT_A<<4, "") == 0 { + *funcref = impl_Unlinkat + } else { + *funcref = error_Unlinkat } + return (*funcref)(dirfd, path, flags) +} + +func error_Unlinkat(dirfd int, path string, flags int) (err error) { + err = ENOSYS return } @@ -1157,9 +3197,11 @@ func Utime(path string, utim *Utimbuf) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UTIME_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(utim)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIME_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(utim))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1172,11 +3214,91 @@ func open(path string, mode int, perm uint32) (fd int, err error) { if err != nil { return } - r0, _, e1 := syscall_syscall(SYS___OPEN_A, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPEN_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPENAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_openatAddr() *(func(dirfd int, path string, flags int, mode uint32) (fd int, err error)) + +var openat = enter_openat + +func enter_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + funcref := get_openatAddr() + if funcptrtest(GetZosLibVec()+SYS___OPENAT_A<<4, "") == 0 { + *funcref = impl_openat + } else { + *funcref = error_openat + } + return (*funcref)(dirfd, path, flags, mode) +} + +func error_openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + fd = -1 + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func impl_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___OPENAT2_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(open_how)), uintptr(size)) + runtime.ExitSyscall() fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_openat2Addr() *(func(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error)) + +var openat2 = enter_openat2 + +func enter_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + funcref := get_openat2Addr() + if funcptrtest(GetZosLibVec()+SYS___OPENAT2_A<<4, "") == 0 { + *funcref = impl_openat2 + } else { + *funcref = error_openat2 } + return (*funcref)(dirfd, path, open_how, size) +} + +func error_openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err error) { + fd = -1 + err = ENOSYS return } @@ -1188,9 +3310,23 @@ func remove(path string) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_REMOVE<<4, uintptr(unsafe.Pointer(_p0))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func waitid(idType int, id int, info *Siginfo, options int) (err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAITID<<4, uintptr(idType), uintptr(id), uintptr(unsafe.Pointer(info)), uintptr(options)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1198,10 +3334,12 @@ func remove(path string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) { - r0, _, e1 := syscall_syscall(SYS_WAITPID, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options)) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_WAITPID<<4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options)) + runtime.ExitSyscall() wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1209,9 +3347,9 @@ func waitpid(pid int, wstatus *_C_int, options int) (wpid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func gettimeofday(tv *timeval_zos) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GETTIMEOFDAY<<4, uintptr(unsafe.Pointer(tv))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1219,9 +3357,9 @@ func gettimeofday(tv *timeval_zos) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe(p *[2]_C_int) (err error) { - _, _, e1 := syscall_rawsyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_PIPE<<4, uintptr(unsafe.Pointer(p))) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } @@ -1234,20 +3372,87 @@ func utimes(path string, timeval *[2]Timeval) (err error) { if err != nil { return } - _, _, e1 := syscall_syscall(SYS___UTIMES_A, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) - if e1 != 0 { - err = errnoErr(e1) + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIMES_A<<4, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval))) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(nmsgsfds int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (ret int, err error) { - r0, _, e1 := syscall_syscall6(SYS_SELECT, uintptr(nmsgsfds), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) +func impl_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS___UTIMENSAT_A<<4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(ts)), uintptr(flags)) + runtime.ExitSyscall() + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +//go:nosplit +func get_utimensatAddr() *(func(dirfd int, path string, ts *[2]Timespec, flags int) (err error)) + +var utimensat = enter_utimensat + +func enter_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + funcref := get_utimensatAddr() + if funcptrtest(GetZosLibVec()+SYS___UTIMENSAT_A<<4, "") == 0 { + *funcref = impl_utimensat + } else { + *funcref = error_utimensat + } + return (*funcref)(dirfd, path, ts, flags) +} + +func error_utimensat(dirfd int, path string, ts *[2]Timespec, flags int) (err error) { + err = ENOSYS + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Posix_openpt(oflag int) (fd int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_POSIX_OPENPT<<4, uintptr(oflag)) + runtime.ExitSyscall() + fd = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Grantpt(fildes int) (rc int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_GRANTPT<<4, uintptr(fildes)) + runtime.ExitSyscall() + rc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlockpt(fildes int) (rc int, err error) { + runtime.EnterSyscall() + r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_UNLOCKPT<<4, uintptr(fildes)) + runtime.ExitSyscall() + rc = int(r0) + if int64(r0) == -1 { + err = errnoErr2(e1, e2) } return } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 0cc3ce496..53aef5dc5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -452,4 +452,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 856d92d69..71d524763 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -374,4 +374,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 8d467094c..c74770613 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -416,4 +416,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index edc173244..f96e214f6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -319,4 +319,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 445eba206..28425346c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -313,4 +313,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index adba01bca..d0953018d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -436,4 +436,9 @@ const ( SYS_FUTEX_WAKE = 4454 SYS_FUTEX_WAIT = 4455 SYS_FUTEX_REQUEUE = 4456 + SYS_STATMOUNT = 4457 + SYS_LISTMOUNT = 4458 + SYS_LSM_GET_SELF_ATTR = 4459 + SYS_LSM_SET_SELF_ATTR = 4460 + SYS_LSM_LIST_MODULES = 4461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 014c4e9c7..295c7f4b8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -366,4 +366,9 @@ const ( SYS_FUTEX_WAKE = 5454 SYS_FUTEX_WAIT = 5455 SYS_FUTEX_REQUEUE = 5456 + SYS_STATMOUNT = 5457 + SYS_LISTMOUNT = 5458 + SYS_LSM_GET_SELF_ATTR = 5459 + SYS_LSM_SET_SELF_ATTR = 5460 + SYS_LSM_LIST_MODULES = 5461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index ccc97d74d..d1a9eaca7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -366,4 +366,9 @@ const ( SYS_FUTEX_WAKE = 5454 SYS_FUTEX_WAIT = 5455 SYS_FUTEX_REQUEUE = 5456 + SYS_STATMOUNT = 5457 + SYS_LISTMOUNT = 5458 + SYS_LSM_GET_SELF_ATTR = 5459 + SYS_LSM_SET_SELF_ATTR = 5460 + SYS_LSM_LIST_MODULES = 5461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index ec2b64a95..bec157c39 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -436,4 +436,9 @@ const ( SYS_FUTEX_WAKE = 4454 SYS_FUTEX_WAIT = 4455 SYS_FUTEX_REQUEUE = 4456 + SYS_STATMOUNT = 4457 + SYS_LISTMOUNT = 4458 + SYS_LSM_GET_SELF_ATTR = 4459 + SYS_LSM_SET_SELF_ATTR = 4460 + SYS_LSM_LIST_MODULES = 4461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 21a839e33..7ee7bdc43 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -443,4 +443,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index c11121ec3..fad1f25b4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -415,4 +415,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 909b631fc..7d3e16357 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -415,4 +415,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index e49bed16e..0ed53ad9f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -320,4 +320,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 66017d2d3..2fba04ad5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -381,4 +381,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 47bab18dc..621d00d74 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -394,4 +394,9 @@ const ( SYS_FUTEX_WAKE = 454 SYS_FUTEX_WAIT = 455 SYS_FUTEX_REQUEUE = 456 + SYS_STATMOUNT = 457 + SYS_LISTMOUNT = 458 + SYS_LSM_GET_SELF_ATTR = 459 + SYS_LSM_SET_SELF_ATTR = 460 + SYS_LSM_LIST_MODULES = 461 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go index b2e308581..5e8c263ca 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go @@ -1,2669 +1,2852 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// go run mksyscall_zos_s390x.go -o_sysnum zsysnum_zos_s390x.go -o_syscall zsyscall_zos_s390x.go -i_syscall syscall_zos_s390x.go -o_asm zsymaddr_zos_s390x.s +// Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x package unix -// TODO: auto-generate. - const ( - SYS_ACOSD128 = 0xB80 - SYS_ACOSD32 = 0xB7E - SYS_ACOSD64 = 0xB7F - SYS_ACOSHD128 = 0xB83 - SYS_ACOSHD32 = 0xB81 - SYS_ACOSHD64 = 0xB82 - SYS_AIO_FSYNC = 0xC69 - SYS_ASCTIME = 0x0AE - SYS_ASCTIME64 = 0xCD7 - SYS_ASCTIME64_R = 0xCD8 - SYS_ASIND128 = 0xB86 - SYS_ASIND32 = 0xB84 - SYS_ASIND64 = 0xB85 - SYS_ASINHD128 = 0xB89 - SYS_ASINHD32 = 0xB87 - SYS_ASINHD64 = 0xB88 - SYS_ATAN2D128 = 0xB8F - SYS_ATAN2D32 = 0xB8D - SYS_ATAN2D64 = 0xB8E - SYS_ATAND128 = 0xB8C - SYS_ATAND32 = 0xB8A - SYS_ATAND64 = 0xB8B - SYS_ATANHD128 = 0xB92 - SYS_ATANHD32 = 0xB90 - SYS_ATANHD64 = 0xB91 - SYS_BIND2ADDRSEL = 0xD59 - SYS_C16RTOMB = 0xD40 - SYS_C32RTOMB = 0xD41 - SYS_CBRTD128 = 0xB95 - SYS_CBRTD32 = 0xB93 - SYS_CBRTD64 = 0xB94 - SYS_CEILD128 = 0xB98 - SYS_CEILD32 = 0xB96 - SYS_CEILD64 = 0xB97 - SYS_CLEARENV = 0x0C9 - SYS_CLEARERR_UNLOCKED = 0xCA1 - SYS_CLOCK = 0x0AA - SYS_CLOGL = 0xA00 - SYS_CLRMEMF = 0x0BD - SYS_CONJ = 0xA03 - SYS_CONJF = 0xA06 - SYS_CONJL = 0xA09 - SYS_COPYSIGND128 = 0xB9E - SYS_COPYSIGND32 = 0xB9C - SYS_COPYSIGND64 = 0xB9D - SYS_COSD128 = 0xBA1 - SYS_COSD32 = 0xB9F - SYS_COSD64 = 0xBA0 - SYS_COSHD128 = 0xBA4 - SYS_COSHD32 = 0xBA2 - SYS_COSHD64 = 0xBA3 - SYS_CPOW = 0xA0C - SYS_CPOWF = 0xA0F - SYS_CPOWL = 0xA12 - SYS_CPROJ = 0xA15 - SYS_CPROJF = 0xA18 - SYS_CPROJL = 0xA1B - SYS_CREAL = 0xA1E - SYS_CREALF = 0xA21 - SYS_CREALL = 0xA24 - SYS_CSIN = 0xA27 - SYS_CSINF = 0xA2A - SYS_CSINH = 0xA30 - SYS_CSINHF = 0xA33 - SYS_CSINHL = 0xA36 - SYS_CSINL = 0xA2D - SYS_CSNAP = 0x0C5 - SYS_CSQRT = 0xA39 - SYS_CSQRTF = 0xA3C - SYS_CSQRTL = 0xA3F - SYS_CTAN = 0xA42 - SYS_CTANF = 0xA45 - SYS_CTANH = 0xA4B - SYS_CTANHF = 0xA4E - SYS_CTANHL = 0xA51 - SYS_CTANL = 0xA48 - SYS_CTIME = 0x0AB - SYS_CTIME64 = 0xCD9 - SYS_CTIME64_R = 0xCDA - SYS_CTRACE = 0x0C6 - SYS_DIFFTIME = 0x0A7 - SYS_DIFFTIME64 = 0xCDB - SYS_DLADDR = 0xC82 - SYS_DYNALLOC = 0x0C3 - SYS_DYNFREE = 0x0C2 - SYS_ERFCD128 = 0xBAA - SYS_ERFCD32 = 0xBA8 - SYS_ERFCD64 = 0xBA9 - SYS_ERFD128 = 0xBA7 - SYS_ERFD32 = 0xBA5 - SYS_ERFD64 = 0xBA6 - SYS_EXP2D128 = 0xBB0 - SYS_EXP2D32 = 0xBAE - SYS_EXP2D64 = 0xBAF - SYS_EXPD128 = 0xBAD - SYS_EXPD32 = 0xBAB - SYS_EXPD64 = 0xBAC - SYS_EXPM1D128 = 0xBB3 - SYS_EXPM1D32 = 0xBB1 - SYS_EXPM1D64 = 0xBB2 - SYS_FABSD128 = 0xBB6 - SYS_FABSD32 = 0xBB4 - SYS_FABSD64 = 0xBB5 - SYS_FDELREC_UNLOCKED = 0xCA2 - SYS_FDIMD128 = 0xBB9 - SYS_FDIMD32 = 0xBB7 - SYS_FDIMD64 = 0xBB8 - SYS_FDOPEN_UNLOCKED = 0xCFC - SYS_FECLEAREXCEPT = 0xAEA - SYS_FEGETENV = 0xAEB - SYS_FEGETEXCEPTFLAG = 0xAEC - SYS_FEGETROUND = 0xAED - SYS_FEHOLDEXCEPT = 0xAEE - SYS_FEOF_UNLOCKED = 0xCA3 - SYS_FERAISEEXCEPT = 0xAEF - SYS_FERROR_UNLOCKED = 0xCA4 - SYS_FESETENV = 0xAF0 - SYS_FESETEXCEPTFLAG = 0xAF1 - SYS_FESETROUND = 0xAF2 - SYS_FETCHEP = 0x0BF - SYS_FETESTEXCEPT = 0xAF3 - SYS_FEUPDATEENV = 0xAF4 - SYS_FE_DEC_GETROUND = 0xBBA - SYS_FE_DEC_SETROUND = 0xBBB - SYS_FFLUSH_UNLOCKED = 0xCA5 - SYS_FGETC_UNLOCKED = 0xC80 - SYS_FGETPOS64 = 0xCEE - SYS_FGETPOS64_UNLOCKED = 0xCF4 - SYS_FGETPOS_UNLOCKED = 0xCA6 - SYS_FGETS_UNLOCKED = 0xC7C - SYS_FGETWC_UNLOCKED = 0xCA7 - SYS_FGETWS_UNLOCKED = 0xCA8 - SYS_FILENO_UNLOCKED = 0xCA9 - SYS_FLDATA = 0x0C1 - SYS_FLDATA_UNLOCKED = 0xCAA - SYS_FLOCATE_UNLOCKED = 0xCAB - SYS_FLOORD128 = 0xBBE - SYS_FLOORD32 = 0xBBC - SYS_FLOORD64 = 0xBBD - SYS_FMA = 0xA63 - SYS_FMAD128 = 0xBC1 - SYS_FMAD32 = 0xBBF - SYS_FMAD64 = 0xBC0 - SYS_FMAF = 0xA66 - SYS_FMAL = 0xA69 - SYS_FMAX = 0xA6C - SYS_FMAXD128 = 0xBC4 - SYS_FMAXD32 = 0xBC2 - SYS_FMAXD64 = 0xBC3 - SYS_FMAXF = 0xA6F - SYS_FMAXL = 0xA72 - SYS_FMIN = 0xA75 - SYS_FMIND128 = 0xBC7 - SYS_FMIND32 = 0xBC5 - SYS_FMIND64 = 0xBC6 - SYS_FMINF = 0xA78 - SYS_FMINL = 0xA7B - SYS_FMODD128 = 0xBCA - SYS_FMODD32 = 0xBC8 - SYS_FMODD64 = 0xBC9 - SYS_FOPEN64 = 0xD49 - SYS_FOPEN64_UNLOCKED = 0xD4A - SYS_FOPEN_UNLOCKED = 0xCFA - SYS_FPRINTF_UNLOCKED = 0xCAC - SYS_FPUTC_UNLOCKED = 0xC81 - SYS_FPUTS_UNLOCKED = 0xC7E - SYS_FPUTWC_UNLOCKED = 0xCAD - SYS_FPUTWS_UNLOCKED = 0xCAE - SYS_FREAD_NOUPDATE = 0xCEC - SYS_FREAD_NOUPDATE_UNLOCKED = 0xCED - SYS_FREAD_UNLOCKED = 0xC7B - SYS_FREEIFADDRS = 0xCE6 - SYS_FREOPEN64 = 0xD4B - SYS_FREOPEN64_UNLOCKED = 0xD4C - SYS_FREOPEN_UNLOCKED = 0xCFB - SYS_FREXPD128 = 0xBCE - SYS_FREXPD32 = 0xBCC - SYS_FREXPD64 = 0xBCD - SYS_FSCANF_UNLOCKED = 0xCAF - SYS_FSEEK64 = 0xCEF - SYS_FSEEK64_UNLOCKED = 0xCF5 - SYS_FSEEKO64 = 0xCF0 - SYS_FSEEKO64_UNLOCKED = 0xCF6 - SYS_FSEEKO_UNLOCKED = 0xCB1 - SYS_FSEEK_UNLOCKED = 0xCB0 - SYS_FSETPOS64 = 0xCF1 - SYS_FSETPOS64_UNLOCKED = 0xCF7 - SYS_FSETPOS_UNLOCKED = 0xCB3 - SYS_FTELL64 = 0xCF2 - SYS_FTELL64_UNLOCKED = 0xCF8 - SYS_FTELLO64 = 0xCF3 - SYS_FTELLO64_UNLOCKED = 0xCF9 - SYS_FTELLO_UNLOCKED = 0xCB5 - SYS_FTELL_UNLOCKED = 0xCB4 - SYS_FUPDATE = 0x0B5 - SYS_FUPDATE_UNLOCKED = 0xCB7 - SYS_FWIDE_UNLOCKED = 0xCB8 - SYS_FWPRINTF_UNLOCKED = 0xCB9 - SYS_FWRITE_UNLOCKED = 0xC7A - SYS_FWSCANF_UNLOCKED = 0xCBA - SYS_GETDATE64 = 0xD4F - SYS_GETIFADDRS = 0xCE7 - SYS_GETIPV4SOURCEFILTER = 0xC77 - SYS_GETSOURCEFILTER = 0xC79 - SYS_GETSYNTX = 0x0FD - SYS_GETS_UNLOCKED = 0xC7D - SYS_GETTIMEOFDAY64 = 0xD50 - SYS_GETWCHAR_UNLOCKED = 0xCBC - SYS_GETWC_UNLOCKED = 0xCBB - SYS_GMTIME = 0x0B0 - SYS_GMTIME64 = 0xCDC - SYS_GMTIME64_R = 0xCDD - SYS_HYPOTD128 = 0xBD1 - SYS_HYPOTD32 = 0xBCF - SYS_HYPOTD64 = 0xBD0 - SYS_ILOGBD128 = 0xBD4 - SYS_ILOGBD32 = 0xBD2 - SYS_ILOGBD64 = 0xBD3 - SYS_ILOGBF = 0xA7E - SYS_ILOGBL = 0xA81 - SYS_INET6_IS_SRCADDR = 0xD5A - SYS_ISBLANK = 0x0FE - SYS_ISWALNUM = 0x0FF - SYS_LDEXPD128 = 0xBD7 - SYS_LDEXPD32 = 0xBD5 - SYS_LDEXPD64 = 0xBD6 - SYS_LGAMMAD128 = 0xBDA - SYS_LGAMMAD32 = 0xBD8 - SYS_LGAMMAD64 = 0xBD9 - SYS_LIO_LISTIO = 0xC6A - SYS_LLRINT = 0xA84 - SYS_LLRINTD128 = 0xBDD - SYS_LLRINTD32 = 0xBDB - SYS_LLRINTD64 = 0xBDC - SYS_LLRINTF = 0xA87 - SYS_LLRINTL = 0xA8A - SYS_LLROUND = 0xA8D - SYS_LLROUNDD128 = 0xBE0 - SYS_LLROUNDD32 = 0xBDE - SYS_LLROUNDD64 = 0xBDF - SYS_LLROUNDF = 0xA90 - SYS_LLROUNDL = 0xA93 - SYS_LOCALTIM = 0x0B1 - SYS_LOCALTIME = 0x0B1 - SYS_LOCALTIME64 = 0xCDE - SYS_LOCALTIME64_R = 0xCDF - SYS_LOG10D128 = 0xBE6 - SYS_LOG10D32 = 0xBE4 - SYS_LOG10D64 = 0xBE5 - SYS_LOG1PD128 = 0xBE9 - SYS_LOG1PD32 = 0xBE7 - SYS_LOG1PD64 = 0xBE8 - SYS_LOG2D128 = 0xBEC - SYS_LOG2D32 = 0xBEA - SYS_LOG2D64 = 0xBEB - SYS_LOGBD128 = 0xBEF - SYS_LOGBD32 = 0xBED - SYS_LOGBD64 = 0xBEE - SYS_LOGBF = 0xA96 - SYS_LOGBL = 0xA99 - SYS_LOGD128 = 0xBE3 - SYS_LOGD32 = 0xBE1 - SYS_LOGD64 = 0xBE2 - SYS_LRINT = 0xA9C - SYS_LRINTD128 = 0xBF2 - SYS_LRINTD32 = 0xBF0 - SYS_LRINTD64 = 0xBF1 - SYS_LRINTF = 0xA9F - SYS_LRINTL = 0xAA2 - SYS_LROUNDD128 = 0xBF5 - SYS_LROUNDD32 = 0xBF3 - SYS_LROUNDD64 = 0xBF4 - SYS_LROUNDL = 0xAA5 - SYS_MBLEN = 0x0AF - SYS_MBRTOC16 = 0xD42 - SYS_MBRTOC32 = 0xD43 - SYS_MEMSET = 0x0A3 - SYS_MKTIME = 0x0AC - SYS_MKTIME64 = 0xCE0 - SYS_MODFD128 = 0xBF8 - SYS_MODFD32 = 0xBF6 - SYS_MODFD64 = 0xBF7 - SYS_NAN = 0xAA8 - SYS_NAND128 = 0xBFB - SYS_NAND32 = 0xBF9 - SYS_NAND64 = 0xBFA - SYS_NANF = 0xAAA - SYS_NANL = 0xAAC - SYS_NEARBYINT = 0xAAE - SYS_NEARBYINTD128 = 0xBFE - SYS_NEARBYINTD32 = 0xBFC - SYS_NEARBYINTD64 = 0xBFD - SYS_NEARBYINTF = 0xAB1 - SYS_NEARBYINTL = 0xAB4 - SYS_NEXTAFTERD128 = 0xC01 - SYS_NEXTAFTERD32 = 0xBFF - SYS_NEXTAFTERD64 = 0xC00 - SYS_NEXTAFTERF = 0xAB7 - SYS_NEXTAFTERL = 0xABA - SYS_NEXTTOWARD = 0xABD - SYS_NEXTTOWARDD128 = 0xC04 - SYS_NEXTTOWARDD32 = 0xC02 - SYS_NEXTTOWARDD64 = 0xC03 - SYS_NEXTTOWARDF = 0xAC0 - SYS_NEXTTOWARDL = 0xAC3 - SYS_NL_LANGINFO = 0x0FC - SYS_PERROR_UNLOCKED = 0xCBD - SYS_POSIX_FALLOCATE = 0xCE8 - SYS_POSIX_MEMALIGN = 0xCE9 - SYS_POSIX_OPENPT = 0xC66 - SYS_POWD128 = 0xC07 - SYS_POWD32 = 0xC05 - SYS_POWD64 = 0xC06 - SYS_PRINTF_UNLOCKED = 0xCBE - SYS_PSELECT = 0xC67 - SYS_PTHREAD_ATTR_GETSTACK = 0xB3E - SYS_PTHREAD_ATTR_SETSTACK = 0xB3F - SYS_PTHREAD_SECURITY_APPLID_NP = 0xCE4 - SYS_PUTS_UNLOCKED = 0xC7F - SYS_PUTWCHAR_UNLOCKED = 0xCC0 - SYS_PUTWC_UNLOCKED = 0xCBF - SYS_QUANTEXPD128 = 0xD46 - SYS_QUANTEXPD32 = 0xD44 - SYS_QUANTEXPD64 = 0xD45 - SYS_QUANTIZED128 = 0xC0A - SYS_QUANTIZED32 = 0xC08 - SYS_QUANTIZED64 = 0xC09 - SYS_REMAINDERD128 = 0xC0D - SYS_REMAINDERD32 = 0xC0B - SYS_REMAINDERD64 = 0xC0C - SYS_RESIZE_ALLOC = 0xCEB - SYS_REWIND_UNLOCKED = 0xCC1 - SYS_RINTD128 = 0xC13 - SYS_RINTD32 = 0xC11 - SYS_RINTD64 = 0xC12 - SYS_RINTF = 0xACB - SYS_RINTL = 0xACD - SYS_ROUND = 0xACF - SYS_ROUNDD128 = 0xC16 - SYS_ROUNDD32 = 0xC14 - SYS_ROUNDD64 = 0xC15 - SYS_ROUNDF = 0xAD2 - SYS_ROUNDL = 0xAD5 - SYS_SAMEQUANTUMD128 = 0xC19 - SYS_SAMEQUANTUMD32 = 0xC17 - SYS_SAMEQUANTUMD64 = 0xC18 - SYS_SCALBLN = 0xAD8 - SYS_SCALBLND128 = 0xC1C - SYS_SCALBLND32 = 0xC1A - SYS_SCALBLND64 = 0xC1B - SYS_SCALBLNF = 0xADB - SYS_SCALBLNL = 0xADE - SYS_SCALBND128 = 0xC1F - SYS_SCALBND32 = 0xC1D - SYS_SCALBND64 = 0xC1E - SYS_SCALBNF = 0xAE3 - SYS_SCALBNL = 0xAE6 - SYS_SCANF_UNLOCKED = 0xCC2 - SYS_SCHED_YIELD = 0xB32 - SYS_SETENV = 0x0C8 - SYS_SETIPV4SOURCEFILTER = 0xC76 - SYS_SETSOURCEFILTER = 0xC78 - SYS_SHM_OPEN = 0xC8C - SYS_SHM_UNLINK = 0xC8D - SYS_SIND128 = 0xC22 - SYS_SIND32 = 0xC20 - SYS_SIND64 = 0xC21 - SYS_SINHD128 = 0xC25 - SYS_SINHD32 = 0xC23 - SYS_SINHD64 = 0xC24 - SYS_SIZEOF_ALLOC = 0xCEA - SYS_SOCKATMARK = 0xC68 - SYS_SQRTD128 = 0xC28 - SYS_SQRTD32 = 0xC26 - SYS_SQRTD64 = 0xC27 - SYS_STRCHR = 0x0A0 - SYS_STRCSPN = 0x0A1 - SYS_STRERROR = 0x0A8 - SYS_STRERROR_R = 0xB33 - SYS_STRFTIME = 0x0B2 - SYS_STRLEN = 0x0A9 - SYS_STRPBRK = 0x0A2 - SYS_STRSPN = 0x0A4 - SYS_STRSTR = 0x0A5 - SYS_STRTOD128 = 0xC2B - SYS_STRTOD32 = 0xC29 - SYS_STRTOD64 = 0xC2A - SYS_STRTOK = 0x0A6 - SYS_TAND128 = 0xC2E - SYS_TAND32 = 0xC2C - SYS_TAND64 = 0xC2D - SYS_TANHD128 = 0xC31 - SYS_TANHD32 = 0xC2F - SYS_TANHD64 = 0xC30 - SYS_TGAMMAD128 = 0xC34 - SYS_TGAMMAD32 = 0xC32 - SYS_TGAMMAD64 = 0xC33 - SYS_TIME = 0x0AD - SYS_TIME64 = 0xCE1 - SYS_TMPFILE64 = 0xD4D - SYS_TMPFILE64_UNLOCKED = 0xD4E - SYS_TMPFILE_UNLOCKED = 0xCFD - SYS_TRUNCD128 = 0xC40 - SYS_TRUNCD32 = 0xC3E - SYS_TRUNCD64 = 0xC3F - SYS_UNGETC_UNLOCKED = 0xCC3 - SYS_UNGETWC_UNLOCKED = 0xCC4 - SYS_UNSETENV = 0xB34 - SYS_VFPRINTF_UNLOCKED = 0xCC5 - SYS_VFSCANF_UNLOCKED = 0xCC7 - SYS_VFWPRINTF_UNLOCKED = 0xCC9 - SYS_VFWSCANF_UNLOCKED = 0xCCB - SYS_VPRINTF_UNLOCKED = 0xCCD - SYS_VSCANF_UNLOCKED = 0xCCF - SYS_VWPRINTF_UNLOCKED = 0xCD1 - SYS_VWSCANF_UNLOCKED = 0xCD3 - SYS_WCSTOD128 = 0xC43 - SYS_WCSTOD32 = 0xC41 - SYS_WCSTOD64 = 0xC42 - SYS_WPRINTF_UNLOCKED = 0xCD5 - SYS_WSCANF_UNLOCKED = 0xCD6 - SYS__FLUSHLBF = 0xD68 - SYS__FLUSHLBF_UNLOCKED = 0xD6F - SYS___ACOSHF_H = 0xA54 - SYS___ACOSHL_H = 0xA55 - SYS___ASINHF_H = 0xA56 - SYS___ASINHL_H = 0xA57 - SYS___ATANPID128 = 0xC6D - SYS___ATANPID32 = 0xC6B - SYS___ATANPID64 = 0xC6C - SYS___CBRTF_H = 0xA58 - SYS___CBRTL_H = 0xA59 - SYS___CDUMP = 0x0C4 - SYS___CLASS = 0xAFA - SYS___CLASS2 = 0xB99 - SYS___CLASS2D128 = 0xC99 - SYS___CLASS2D32 = 0xC97 - SYS___CLASS2D64 = 0xC98 - SYS___CLASS2F = 0xC91 - SYS___CLASS2F_B = 0xC93 - SYS___CLASS2F_H = 0xC94 - SYS___CLASS2L = 0xC92 - SYS___CLASS2L_B = 0xC95 - SYS___CLASS2L_H = 0xC96 - SYS___CLASS2_B = 0xB9A - SYS___CLASS2_H = 0xB9B - SYS___CLASS_B = 0xAFB - SYS___CLASS_H = 0xAFC - SYS___CLOGL_B = 0xA01 - SYS___CLOGL_H = 0xA02 - SYS___CLRENV = 0x0C9 - SYS___CLRMF = 0x0BD - SYS___CODEPAGE_INFO = 0xC64 - SYS___CONJF_B = 0xA07 - SYS___CONJF_H = 0xA08 - SYS___CONJL_B = 0xA0A - SYS___CONJL_H = 0xA0B - SYS___CONJ_B = 0xA04 - SYS___CONJ_H = 0xA05 - SYS___COPYSIGN_B = 0xA5A - SYS___COPYSIGN_H = 0xAF5 - SYS___COSPID128 = 0xC70 - SYS___COSPID32 = 0xC6E - SYS___COSPID64 = 0xC6F - SYS___CPOWF_B = 0xA10 - SYS___CPOWF_H = 0xA11 - SYS___CPOWL_B = 0xA13 - SYS___CPOWL_H = 0xA14 - SYS___CPOW_B = 0xA0D - SYS___CPOW_H = 0xA0E - SYS___CPROJF_B = 0xA19 - SYS___CPROJF_H = 0xA1A - SYS___CPROJL_B = 0xA1C - SYS___CPROJL_H = 0xA1D - SYS___CPROJ_B = 0xA16 - SYS___CPROJ_H = 0xA17 - SYS___CREALF_B = 0xA22 - SYS___CREALF_H = 0xA23 - SYS___CREALL_B = 0xA25 - SYS___CREALL_H = 0xA26 - SYS___CREAL_B = 0xA1F - SYS___CREAL_H = 0xA20 - SYS___CSINF_B = 0xA2B - SYS___CSINF_H = 0xA2C - SYS___CSINHF_B = 0xA34 - SYS___CSINHF_H = 0xA35 - SYS___CSINHL_B = 0xA37 - SYS___CSINHL_H = 0xA38 - SYS___CSINH_B = 0xA31 - SYS___CSINH_H = 0xA32 - SYS___CSINL_B = 0xA2E - SYS___CSINL_H = 0xA2F - SYS___CSIN_B = 0xA28 - SYS___CSIN_H = 0xA29 - SYS___CSNAP = 0x0C5 - SYS___CSQRTF_B = 0xA3D - SYS___CSQRTF_H = 0xA3E - SYS___CSQRTL_B = 0xA40 - SYS___CSQRTL_H = 0xA41 - SYS___CSQRT_B = 0xA3A - SYS___CSQRT_H = 0xA3B - SYS___CTANF_B = 0xA46 - SYS___CTANF_H = 0xA47 - SYS___CTANHF_B = 0xA4F - SYS___CTANHF_H = 0xA50 - SYS___CTANHL_B = 0xA52 - SYS___CTANHL_H = 0xA53 - SYS___CTANH_B = 0xA4C - SYS___CTANH_H = 0xA4D - SYS___CTANL_B = 0xA49 - SYS___CTANL_H = 0xA4A - SYS___CTAN_B = 0xA43 - SYS___CTAN_H = 0xA44 - SYS___CTEST = 0x0C7 - SYS___CTRACE = 0x0C6 - SYS___D1TOP = 0xC9B - SYS___D2TOP = 0xC9C - SYS___D4TOP = 0xC9D - SYS___DYNALL = 0x0C3 - SYS___DYNFRE = 0x0C2 - SYS___EXP2F_H = 0xA5E - SYS___EXP2L_H = 0xA5F - SYS___EXP2_H = 0xA5D - SYS___EXPM1F_H = 0xA5B - SYS___EXPM1L_H = 0xA5C - SYS___FBUFSIZE = 0xD60 - SYS___FLBF = 0xD62 - SYS___FLDATA = 0x0C1 - SYS___FMAF_B = 0xA67 - SYS___FMAF_H = 0xA68 - SYS___FMAL_B = 0xA6A - SYS___FMAL_H = 0xA6B - SYS___FMAXF_B = 0xA70 - SYS___FMAXF_H = 0xA71 - SYS___FMAXL_B = 0xA73 - SYS___FMAXL_H = 0xA74 - SYS___FMAX_B = 0xA6D - SYS___FMAX_H = 0xA6E - SYS___FMA_B = 0xA64 - SYS___FMA_H = 0xA65 - SYS___FMINF_B = 0xA79 - SYS___FMINF_H = 0xA7A - SYS___FMINL_B = 0xA7C - SYS___FMINL_H = 0xA7D - SYS___FMIN_B = 0xA76 - SYS___FMIN_H = 0xA77 - SYS___FPENDING = 0xD61 - SYS___FPENDING_UNLOCKED = 0xD6C - SYS___FPURGE = 0xD69 - SYS___FPURGE_UNLOCKED = 0xD70 - SYS___FP_CAST_D = 0xBCB - SYS___FREADABLE = 0xD63 - SYS___FREADAHEAD = 0xD6A - SYS___FREADAHEAD_UNLOCKED = 0xD71 - SYS___FREADING = 0xD65 - SYS___FREADING_UNLOCKED = 0xD6D - SYS___FSEEK2 = 0xB3C - SYS___FSETERR = 0xD6B - SYS___FSETLOCKING = 0xD67 - SYS___FTCHEP = 0x0BF - SYS___FTELL2 = 0xB3B - SYS___FUPDT = 0x0B5 - SYS___FWRITABLE = 0xD64 - SYS___FWRITING = 0xD66 - SYS___FWRITING_UNLOCKED = 0xD6E - SYS___GETCB = 0x0B4 - SYS___GETGRGID1 = 0xD5B - SYS___GETGRNAM1 = 0xD5C - SYS___GETTHENT = 0xCE5 - SYS___GETTOD = 0xD3E - SYS___HYPOTF_H = 0xAF6 - SYS___HYPOTL_H = 0xAF7 - SYS___ILOGBF_B = 0xA7F - SYS___ILOGBF_H = 0xA80 - SYS___ILOGBL_B = 0xA82 - SYS___ILOGBL_H = 0xA83 - SYS___ISBLANK_A = 0xB2E - SYS___ISBLNK = 0x0FE - SYS___ISWBLANK_A = 0xB2F - SYS___LE_CEEGTJS = 0xD72 - SYS___LE_TRACEBACK = 0xB7A - SYS___LGAMMAL_H = 0xA62 - SYS___LGAMMA_B_C99 = 0xB39 - SYS___LGAMMA_H_C99 = 0xB38 - SYS___LGAMMA_R_C99 = 0xB3A - SYS___LLRINTF_B = 0xA88 - SYS___LLRINTF_H = 0xA89 - SYS___LLRINTL_B = 0xA8B - SYS___LLRINTL_H = 0xA8C - SYS___LLRINT_B = 0xA85 - SYS___LLRINT_H = 0xA86 - SYS___LLROUNDF_B = 0xA91 - SYS___LLROUNDF_H = 0xA92 - SYS___LLROUNDL_B = 0xA94 - SYS___LLROUNDL_H = 0xA95 - SYS___LLROUND_B = 0xA8E - SYS___LLROUND_H = 0xA8F - SYS___LOCALE_CTL = 0xD47 - SYS___LOG1PF_H = 0xA60 - SYS___LOG1PL_H = 0xA61 - SYS___LOGBF_B = 0xA97 - SYS___LOGBF_H = 0xA98 - SYS___LOGBL_B = 0xA9A - SYS___LOGBL_H = 0xA9B - SYS___LOGIN_APPLID = 0xCE2 - SYS___LRINTF_B = 0xAA0 - SYS___LRINTF_H = 0xAA1 - SYS___LRINTL_B = 0xAA3 - SYS___LRINTL_H = 0xAA4 - SYS___LRINT_B = 0xA9D - SYS___LRINT_H = 0xA9E - SYS___LROUNDF_FIXUP = 0xB31 - SYS___LROUNDL_B = 0xAA6 - SYS___LROUNDL_H = 0xAA7 - SYS___LROUND_FIXUP = 0xB30 - SYS___MOSERVICES = 0xD3D - SYS___MUST_STAY_CLEAN = 0xB7C - SYS___NANF_B = 0xAAB - SYS___NANL_B = 0xAAD - SYS___NAN_B = 0xAA9 - SYS___NEARBYINTF_B = 0xAB2 - SYS___NEARBYINTF_H = 0xAB3 - SYS___NEARBYINTL_B = 0xAB5 - SYS___NEARBYINTL_H = 0xAB6 - SYS___NEARBYINT_B = 0xAAF - SYS___NEARBYINT_H = 0xAB0 - SYS___NEXTAFTERF_B = 0xAB8 - SYS___NEXTAFTERF_H = 0xAB9 - SYS___NEXTAFTERL_B = 0xABB - SYS___NEXTAFTERL_H = 0xABC - SYS___NEXTTOWARDF_B = 0xAC1 - SYS___NEXTTOWARDF_H = 0xAC2 - SYS___NEXTTOWARDL_B = 0xAC4 - SYS___NEXTTOWARDL_H = 0xAC5 - SYS___NEXTTOWARD_B = 0xABE - SYS___NEXTTOWARD_H = 0xABF - SYS___O_ENV = 0xB7D - SYS___PASSWD_APPLID = 0xCE3 - SYS___PTOD1 = 0xC9E - SYS___PTOD2 = 0xC9F - SYS___PTOD4 = 0xCA0 - SYS___REGCOMP_STD = 0x0EA - SYS___REMAINDERF_H = 0xAC6 - SYS___REMAINDERL_H = 0xAC7 - SYS___REMQUOD128 = 0xC10 - SYS___REMQUOD32 = 0xC0E - SYS___REMQUOD64 = 0xC0F - SYS___REMQUOF_H = 0xAC9 - SYS___REMQUOL_H = 0xACA - SYS___REMQUO_H = 0xAC8 - SYS___RINTF_B = 0xACC - SYS___RINTL_B = 0xACE - SYS___ROUNDF_B = 0xAD3 - SYS___ROUNDF_H = 0xAD4 - SYS___ROUNDL_B = 0xAD6 - SYS___ROUNDL_H = 0xAD7 - SYS___ROUND_B = 0xAD0 - SYS___ROUND_H = 0xAD1 - SYS___SCALBLNF_B = 0xADC - SYS___SCALBLNF_H = 0xADD - SYS___SCALBLNL_B = 0xADF - SYS___SCALBLNL_H = 0xAE0 - SYS___SCALBLN_B = 0xAD9 - SYS___SCALBLN_H = 0xADA - SYS___SCALBNF_B = 0xAE4 - SYS___SCALBNF_H = 0xAE5 - SYS___SCALBNL_B = 0xAE7 - SYS___SCALBNL_H = 0xAE8 - SYS___SCALBN_B = 0xAE1 - SYS___SCALBN_H = 0xAE2 - SYS___SETENV = 0x0C8 - SYS___SINPID128 = 0xC73 - SYS___SINPID32 = 0xC71 - SYS___SINPID64 = 0xC72 - SYS___SMF_RECORD2 = 0xD48 - SYS___STATIC_REINIT = 0xB3D - SYS___TGAMMAF_H_C99 = 0xB79 - SYS___TGAMMAL_H = 0xAE9 - SYS___TGAMMA_H_C99 = 0xB78 - SYS___TOCSNAME2 = 0xC9A - SYS_CEIL = 0x01F - SYS_CHAUDIT = 0x1E0 - SYS_EXP = 0x01A - SYS_FCHAUDIT = 0x1E1 - SYS_FREXP = 0x01D - SYS_GETGROUPSBYNAME = 0x1E2 - SYS_GETPWUID = 0x1A0 - SYS_GETUID = 0x1A1 - SYS_ISATTY = 0x1A3 - SYS_KILL = 0x1A4 - SYS_LDEXP = 0x01E - SYS_LINK = 0x1A5 - SYS_LOG10 = 0x01C - SYS_LSEEK = 0x1A6 - SYS_LSTAT = 0x1A7 - SYS_MKDIR = 0x1A8 - SYS_MKFIFO = 0x1A9 - SYS_MKNOD = 0x1AA - SYS_MODF = 0x01B - SYS_MOUNT = 0x1AB - SYS_OPEN = 0x1AC - SYS_OPENDIR = 0x1AD - SYS_PATHCONF = 0x1AE - SYS_PAUSE = 0x1AF - SYS_PIPE = 0x1B0 - SYS_PTHREAD_ATTR_DESTROY = 0x1E7 - SYS_PTHREAD_ATTR_GETDETACHSTATE = 0x1EB - SYS_PTHREAD_ATTR_GETSTACKSIZE = 0x1E9 - SYS_PTHREAD_ATTR_GETWEIGHT_NP = 0x1ED - SYS_PTHREAD_ATTR_INIT = 0x1E6 - SYS_PTHREAD_ATTR_SETDETACHSTATE = 0x1EA - SYS_PTHREAD_ATTR_SETSTACKSIZE = 0x1E8 - SYS_PTHREAD_ATTR_SETWEIGHT_NP = 0x1EC - SYS_PTHREAD_CANCEL = 0x1EE - SYS_PTHREAD_CLEANUP_POP = 0x1F0 - SYS_PTHREAD_CLEANUP_PUSH = 0x1EF - SYS_PTHREAD_CONDATTR_DESTROY = 0x1F2 - SYS_PTHREAD_CONDATTR_INIT = 0x1F1 - SYS_PTHREAD_COND_BROADCAST = 0x1F6 - SYS_PTHREAD_COND_DESTROY = 0x1F4 - SYS_PTHREAD_COND_INIT = 0x1F3 - SYS_PTHREAD_COND_SIGNAL = 0x1F5 - SYS_PTHREAD_COND_TIMEDWAIT = 0x1F8 - SYS_PTHREAD_COND_WAIT = 0x1F7 - SYS_PTHREAD_CREATE = 0x1F9 - SYS_PTHREAD_DETACH = 0x1FA - SYS_PTHREAD_EQUAL = 0x1FB - SYS_PTHREAD_EXIT = 0x1E4 - SYS_PTHREAD_GETSPECIFIC = 0x1FC - SYS_PTHREAD_JOIN = 0x1FD - SYS_PTHREAD_KEY_CREATE = 0x1FE - SYS_PTHREAD_KILL = 0x1E5 - SYS_PTHREAD_MUTEXATTR_INIT = 0x1FF - SYS_READ = 0x1B2 - SYS_READDIR = 0x1B3 - SYS_READLINK = 0x1B4 - SYS_REWINDDIR = 0x1B5 - SYS_RMDIR = 0x1B6 - SYS_SETEGID = 0x1B7 - SYS_SETEUID = 0x1B8 - SYS_SETGID = 0x1B9 - SYS_SETPGID = 0x1BA - SYS_SETSID = 0x1BB - SYS_SETUID = 0x1BC - SYS_SIGACTION = 0x1BD - SYS_SIGADDSET = 0x1BE - SYS_SIGDELSET = 0x1BF - SYS_SIGEMPTYSET = 0x1C0 - SYS_SIGFILLSET = 0x1C1 - SYS_SIGISMEMBER = 0x1C2 - SYS_SIGLONGJMP = 0x1C3 - SYS_SIGPENDING = 0x1C4 - SYS_SIGPROCMASK = 0x1C5 - SYS_SIGSETJMP = 0x1C6 - SYS_SIGSUSPEND = 0x1C7 - SYS_SIGWAIT = 0x1E3 - SYS_SLEEP = 0x1C8 - SYS_STAT = 0x1C9 - SYS_SYMLINK = 0x1CB - SYS_SYSCONF = 0x1CC - SYS_TCDRAIN = 0x1CD - SYS_TCFLOW = 0x1CE - SYS_TCFLUSH = 0x1CF - SYS_TCGETATTR = 0x1D0 - SYS_TCGETPGRP = 0x1D1 - SYS_TCSENDBREAK = 0x1D2 - SYS_TCSETATTR = 0x1D3 - SYS_TCSETPGRP = 0x1D4 - SYS_TIMES = 0x1D5 - SYS_TTYNAME = 0x1D6 - SYS_TZSET = 0x1D7 - SYS_UMASK = 0x1D8 - SYS_UMOUNT = 0x1D9 - SYS_UNAME = 0x1DA - SYS_UNLINK = 0x1DB - SYS_UTIME = 0x1DC - SYS_WAIT = 0x1DD - SYS_WAITPID = 0x1DE - SYS_WRITE = 0x1DF - SYS_W_GETPSENT = 0x1B1 - SYS_W_IOCTL = 0x1A2 - SYS_W_STATFS = 0x1CA - SYS_A64L = 0x2EF - SYS_BCMP = 0x2B9 - SYS_BCOPY = 0x2BA - SYS_BZERO = 0x2BB - SYS_CATCLOSE = 0x2B6 - SYS_CATGETS = 0x2B7 - SYS_CATOPEN = 0x2B8 - SYS_CRYPT = 0x2AC - SYS_DBM_CLEARERR = 0x2F7 - SYS_DBM_CLOSE = 0x2F8 - SYS_DBM_DELETE = 0x2F9 - SYS_DBM_ERROR = 0x2FA - SYS_DBM_FETCH = 0x2FB - SYS_DBM_FIRSTKEY = 0x2FC - SYS_DBM_NEXTKEY = 0x2FD - SYS_DBM_OPEN = 0x2FE - SYS_DBM_STORE = 0x2FF - SYS_DRAND48 = 0x2B2 - SYS_ENCRYPT = 0x2AD - SYS_ENDUTXENT = 0x2E1 - SYS_ERAND48 = 0x2B3 - SYS_ERF = 0x02C - SYS_ERFC = 0x02D - SYS_FCHDIR = 0x2D9 - SYS_FFS = 0x2BC - SYS_FMTMSG = 0x2E5 - SYS_FSTATVFS = 0x2B4 - SYS_FTIME = 0x2F5 - SYS_GAMMA = 0x02E - SYS_GETDATE = 0x2A6 - SYS_GETPAGESIZE = 0x2D8 - SYS_GETTIMEOFDAY = 0x2F6 - SYS_GETUTXENT = 0x2E0 - SYS_GETUTXID = 0x2E2 - SYS_GETUTXLINE = 0x2E3 - SYS_HCREATE = 0x2C6 - SYS_HDESTROY = 0x2C7 - SYS_HSEARCH = 0x2C8 - SYS_HYPOT = 0x02B - SYS_INDEX = 0x2BD - SYS_INITSTATE = 0x2C2 - SYS_INSQUE = 0x2CF - SYS_ISASCII = 0x2ED - SYS_JRAND48 = 0x2E6 - SYS_L64A = 0x2F0 - SYS_LCONG48 = 0x2EA - SYS_LFIND = 0x2C9 - SYS_LRAND48 = 0x2E7 - SYS_LSEARCH = 0x2CA - SYS_MEMCCPY = 0x2D4 - SYS_MRAND48 = 0x2E8 - SYS_NRAND48 = 0x2E9 - SYS_PCLOSE = 0x2D2 - SYS_POPEN = 0x2D1 - SYS_PUTUTXLINE = 0x2E4 - SYS_RANDOM = 0x2C4 - SYS_REMQUE = 0x2D0 - SYS_RINDEX = 0x2BE - SYS_SEED48 = 0x2EC - SYS_SETKEY = 0x2AE - SYS_SETSTATE = 0x2C3 - SYS_SETUTXENT = 0x2DF - SYS_SRAND48 = 0x2EB - SYS_SRANDOM = 0x2C5 - SYS_STATVFS = 0x2B5 - SYS_STRCASECMP = 0x2BF - SYS_STRDUP = 0x2C0 - SYS_STRNCASECMP = 0x2C1 - SYS_SWAB = 0x2D3 - SYS_TDELETE = 0x2CB - SYS_TFIND = 0x2CC - SYS_TOASCII = 0x2EE - SYS_TSEARCH = 0x2CD - SYS_TWALK = 0x2CE - SYS_UALARM = 0x2F1 - SYS_USLEEP = 0x2F2 - SYS_WAIT3 = 0x2A7 - SYS_WAITID = 0x2A8 - SYS_Y1 = 0x02A - SYS___ATOE = 0x2DB - SYS___ATOE_L = 0x2DC - SYS___CATTRM = 0x2A9 - SYS___CNVBLK = 0x2AF - SYS___CRYTRM = 0x2B0 - SYS___DLGHT = 0x2A1 - SYS___ECRTRM = 0x2B1 - SYS___ETOA = 0x2DD - SYS___ETOA_L = 0x2DE - SYS___GDTRM = 0x2AA - SYS___OCLCK = 0x2DA - SYS___OPARGF = 0x2A2 - SYS___OPERRF = 0x2A5 - SYS___OPINDF = 0x2A4 - SYS___OPOPTF = 0x2A3 - SYS___RNDTRM = 0x2AB - SYS___SRCTRM = 0x2F4 - SYS___TZONE = 0x2A0 - SYS___UTXTRM = 0x2F3 - SYS_ASIN = 0x03E - SYS_ISXDIGIT = 0x03B - SYS_SETLOCAL = 0x03A - SYS_SETLOCALE = 0x03A - SYS_SIN = 0x03F - SYS_TOLOWER = 0x03C - SYS_TOUPPER = 0x03D - SYS_ACCEPT_AND_RECV = 0x4F7 - SYS_ATOL = 0x04E - SYS_CHECKSCH = 0x4BC - SYS_CHECKSCHENV = 0x4BC - SYS_CLEARERR = 0x04C - SYS_CONNECTS = 0x4B5 - SYS_CONNECTSERVER = 0x4B5 - SYS_CONNECTW = 0x4B4 - SYS_CONNECTWORKMGR = 0x4B4 - SYS_CONTINUE = 0x4B3 - SYS_CONTINUEWORKUNIT = 0x4B3 - SYS_COPYSIGN = 0x4C2 - SYS_CREATEWO = 0x4B2 - SYS_CREATEWORKUNIT = 0x4B2 - SYS_DELETEWO = 0x4B9 - SYS_DELETEWORKUNIT = 0x4B9 - SYS_DISCONNE = 0x4B6 - SYS_DISCONNECTSERVER = 0x4B6 - SYS_FEOF = 0x04D - SYS_FERROR = 0x04A - SYS_FINITE = 0x4C8 - SYS_GAMMA_R = 0x4E2 - SYS_JOINWORK = 0x4B7 - SYS_JOINWORKUNIT = 0x4B7 - SYS_LEAVEWOR = 0x4B8 - SYS_LEAVEWORKUNIT = 0x4B8 - SYS_LGAMMA_R = 0x4EB - SYS_MATHERR = 0x4D0 - SYS_PERROR = 0x04F - SYS_QUERYMET = 0x4BA - SYS_QUERYMETRICS = 0x4BA - SYS_QUERYSCH = 0x4BB - SYS_QUERYSCHENV = 0x4BB - SYS_REWIND = 0x04B - SYS_SCALBN = 0x4D4 - SYS_SIGNIFIC = 0x4D5 - SYS_SIGNIFICAND = 0x4D5 - SYS___ACOSH_B = 0x4DA - SYS___ACOS_B = 0x4D9 - SYS___ASINH_B = 0x4BE - SYS___ASIN_B = 0x4DB - SYS___ATAN2_B = 0x4DC - SYS___ATANH_B = 0x4DD - SYS___ATAN_B = 0x4BF - SYS___CBRT_B = 0x4C0 - SYS___CEIL_B = 0x4C1 - SYS___COSH_B = 0x4DE - SYS___COS_B = 0x4C3 - SYS___DGHT = 0x4A8 - SYS___ENVN = 0x4B0 - SYS___ERFC_B = 0x4C5 - SYS___ERF_B = 0x4C4 - SYS___EXPM1_B = 0x4C6 - SYS___EXP_B = 0x4DF - SYS___FABS_B = 0x4C7 - SYS___FLOOR_B = 0x4C9 - SYS___FMOD_B = 0x4E0 - SYS___FP_SETMODE = 0x4F8 - SYS___FREXP_B = 0x4CA - SYS___GAMMA_B = 0x4E1 - SYS___GDRR = 0x4A1 - SYS___HRRNO = 0x4A2 - SYS___HYPOT_B = 0x4E3 - SYS___ILOGB_B = 0x4CB - SYS___ISNAN_B = 0x4CC - SYS___J0_B = 0x4E4 - SYS___J1_B = 0x4E6 - SYS___JN_B = 0x4E8 - SYS___LDEXP_B = 0x4CD - SYS___LGAMMA_B = 0x4EA - SYS___LOG10_B = 0x4ED - SYS___LOG1P_B = 0x4CE - SYS___LOGB_B = 0x4CF - SYS___LOGIN = 0x4F5 - SYS___LOG_B = 0x4EC - SYS___MLOCKALL = 0x4B1 - SYS___MODF_B = 0x4D1 - SYS___NEXTAFTER_B = 0x4D2 - SYS___OPENDIR2 = 0x4F3 - SYS___OPEN_STAT = 0x4F6 - SYS___OPND = 0x4A5 - SYS___OPPT = 0x4A6 - SYS___OPRG = 0x4A3 - SYS___OPRR = 0x4A4 - SYS___PID_AFFINITY = 0x4BD - SYS___POW_B = 0x4EE - SYS___READDIR2 = 0x4F4 - SYS___REMAINDER_B = 0x4EF - SYS___RINT_B = 0x4D3 - SYS___SCALB_B = 0x4F0 - SYS___SIGACTIONSET = 0x4FB - SYS___SIGGM = 0x4A7 - SYS___SINH_B = 0x4F1 - SYS___SIN_B = 0x4D6 - SYS___SQRT_B = 0x4F2 - SYS___TANH_B = 0x4D8 - SYS___TAN_B = 0x4D7 - SYS___TRRNO = 0x4AF - SYS___TZNE = 0x4A9 - SYS___TZZN = 0x4AA - SYS___UCREATE = 0x4FC - SYS___UFREE = 0x4FE - SYS___UHEAPREPORT = 0x4FF - SYS___UMALLOC = 0x4FD - SYS___Y0_B = 0x4E5 - SYS___Y1_B = 0x4E7 - SYS___YN_B = 0x4E9 - SYS_ABORT = 0x05C - SYS_ASCTIME_R = 0x5E0 - SYS_ATEXIT = 0x05D - SYS_CONNECTE = 0x5AE - SYS_CONNECTEXPORTIMPORT = 0x5AE - SYS_CTIME_R = 0x5E1 - SYS_DN_COMP = 0x5DF - SYS_DN_EXPAND = 0x5DD - SYS_DN_SKIPNAME = 0x5DE - SYS_EXIT = 0x05A - SYS_EXPORTWO = 0x5A1 - SYS_EXPORTWORKUNIT = 0x5A1 - SYS_EXTRACTW = 0x5A5 - SYS_EXTRACTWORKUNIT = 0x5A5 - SYS_FSEEKO = 0x5C9 - SYS_FTELLO = 0x5C8 - SYS_GETGRGID_R = 0x5E7 - SYS_GETGRNAM_R = 0x5E8 - SYS_GETLOGIN_R = 0x5E9 - SYS_GETPWNAM_R = 0x5EA - SYS_GETPWUID_R = 0x5EB - SYS_GMTIME_R = 0x5E2 - SYS_IMPORTWO = 0x5A3 - SYS_IMPORTWORKUNIT = 0x5A3 - SYS_INET_NTOP = 0x5D3 - SYS_INET_PTON = 0x5D4 - SYS_LLABS = 0x5CE - SYS_LLDIV = 0x5CB - SYS_LOCALTIME_R = 0x5E3 - SYS_PTHREAD_ATFORK = 0x5ED - SYS_PTHREAD_ATTR_GETDETACHSTATE_U98 = 0x5FB - SYS_PTHREAD_ATTR_GETGUARDSIZE = 0x5EE - SYS_PTHREAD_ATTR_GETSCHEDPARAM = 0x5F9 - SYS_PTHREAD_ATTR_GETSTACKADDR = 0x5EF - SYS_PTHREAD_ATTR_SETDETACHSTATE_U98 = 0x5FC - SYS_PTHREAD_ATTR_SETGUARDSIZE = 0x5F0 - SYS_PTHREAD_ATTR_SETSCHEDPARAM = 0x5FA - SYS_PTHREAD_ATTR_SETSTACKADDR = 0x5F1 - SYS_PTHREAD_CONDATTR_GETPSHARED = 0x5F2 - SYS_PTHREAD_CONDATTR_SETPSHARED = 0x5F3 - SYS_PTHREAD_DETACH_U98 = 0x5FD - SYS_PTHREAD_GETCONCURRENCY = 0x5F4 - SYS_PTHREAD_GETSPECIFIC_U98 = 0x5FE - SYS_PTHREAD_KEY_DELETE = 0x5F5 - SYS_PTHREAD_SETCANCELSTATE = 0x5FF - SYS_PTHREAD_SETCONCURRENCY = 0x5F6 - SYS_PTHREAD_SIGMASK = 0x5F7 - SYS_QUERYENC = 0x5AD - SYS_QUERYWORKUNITCLASSIFICATION = 0x5AD - SYS_RAISE = 0x05E - SYS_RAND_R = 0x5E4 - SYS_READDIR_R = 0x5E6 - SYS_REALLOC = 0x05B - SYS_RES_INIT = 0x5D8 - SYS_RES_MKQUERY = 0x5D7 - SYS_RES_QUERY = 0x5D9 - SYS_RES_QUERYDOMAIN = 0x5DC - SYS_RES_SEARCH = 0x5DA - SYS_RES_SEND = 0x5DB - SYS_SETJMP = 0x05F - SYS_SIGQUEUE = 0x5A9 - SYS_STRTOK_R = 0x5E5 - SYS_STRTOLL = 0x5B0 - SYS_STRTOULL = 0x5B1 - SYS_TTYNAME_R = 0x5EC - SYS_UNDOEXPO = 0x5A2 - SYS_UNDOEXPORTWORKUNIT = 0x5A2 - SYS_UNDOIMPO = 0x5A4 - SYS_UNDOIMPORTWORKUNIT = 0x5A4 - SYS_WCSTOLL = 0x5CC - SYS_WCSTOULL = 0x5CD - SYS___ABORT = 0x05C - SYS___CONSOLE2 = 0x5D2 - SYS___CPL = 0x5A6 - SYS___DISCARDDATA = 0x5F8 - SYS___DSA_PREV = 0x5B2 - SYS___EP_FIND = 0x5B3 - SYS___FP_SWAPMODE = 0x5AF - SYS___GETUSERID = 0x5AB - SYS___GET_CPUID = 0x5B9 - SYS___GET_SYSTEM_SETTINGS = 0x5BA - SYS___IPDOMAINNAME = 0x5AC - SYS___MAP_INIT = 0x5A7 - SYS___MAP_SERVICE = 0x5A8 - SYS___MOUNT = 0x5AA - SYS___MSGRCV_TIMED = 0x5B7 - SYS___RES = 0x5D6 - SYS___SEMOP_TIMED = 0x5B8 - SYS___SERVER_THREADS_QUERY = 0x5B4 - SYS_FPRINTF = 0x06D - SYS_FSCANF = 0x06A - SYS_PRINTF = 0x06F - SYS_SETBUF = 0x06B - SYS_SETVBUF = 0x06C - SYS_SSCANF = 0x06E - SYS___CATGETS_A = 0x6C0 - SYS___CHAUDIT_A = 0x6F4 - SYS___CHMOD_A = 0x6E8 - SYS___COLLATE_INIT_A = 0x6AC - SYS___CREAT_A = 0x6F6 - SYS___CTYPE_INIT_A = 0x6AF - SYS___DLLLOAD_A = 0x6DF - SYS___DLLQUERYFN_A = 0x6E0 - SYS___DLLQUERYVAR_A = 0x6E1 - SYS___E2A_L = 0x6E3 - SYS___EXECLE_A = 0x6A0 - SYS___EXECLP_A = 0x6A4 - SYS___EXECVE_A = 0x6C1 - SYS___EXECVP_A = 0x6C2 - SYS___EXECV_A = 0x6B1 - SYS___FPRINTF_A = 0x6FA - SYS___GETADDRINFO_A = 0x6BF - SYS___GETNAMEINFO_A = 0x6C4 - SYS___GET_WCTYPE_STD_A = 0x6AE - SYS___ICONV_OPEN_A = 0x6DE - SYS___IF_INDEXTONAME_A = 0x6DC - SYS___IF_NAMETOINDEX_A = 0x6DB - SYS___ISWCTYPE_A = 0x6B0 - SYS___IS_WCTYPE_STD_A = 0x6B2 - SYS___LOCALECONV_A = 0x6B8 - SYS___LOCALECONV_STD_A = 0x6B9 - SYS___LOCALE_INIT_A = 0x6B7 - SYS___LSTAT_A = 0x6EE - SYS___LSTAT_O_A = 0x6EF - SYS___MKDIR_A = 0x6E9 - SYS___MKFIFO_A = 0x6EC - SYS___MKNOD_A = 0x6F0 - SYS___MONETARY_INIT_A = 0x6BC - SYS___MOUNT_A = 0x6F1 - SYS___NL_CSINFO_A = 0x6D6 - SYS___NL_LANGINFO_A = 0x6BA - SYS___NL_LNAGINFO_STD_A = 0x6BB - SYS___NL_MONINFO_A = 0x6D7 - SYS___NL_NUMINFO_A = 0x6D8 - SYS___NL_RESPINFO_A = 0x6D9 - SYS___NL_TIMINFO_A = 0x6DA - SYS___NUMERIC_INIT_A = 0x6C6 - SYS___OPEN_A = 0x6F7 - SYS___PRINTF_A = 0x6DD - SYS___RESP_INIT_A = 0x6C7 - SYS___RPMATCH_A = 0x6C8 - SYS___RPMATCH_C_A = 0x6C9 - SYS___RPMATCH_STD_A = 0x6CA - SYS___SETLOCALE_A = 0x6F9 - SYS___SPAWNP_A = 0x6C5 - SYS___SPAWN_A = 0x6C3 - SYS___SPRINTF_A = 0x6FB - SYS___STAT_A = 0x6EA - SYS___STAT_O_A = 0x6EB - SYS___STRCOLL_STD_A = 0x6A1 - SYS___STRFMON_A = 0x6BD - SYS___STRFMON_STD_A = 0x6BE - SYS___STRFTIME_A = 0x6CC - SYS___STRFTIME_STD_A = 0x6CD - SYS___STRPTIME_A = 0x6CE - SYS___STRPTIME_STD_A = 0x6CF - SYS___STRXFRM_A = 0x6A2 - SYS___STRXFRM_C_A = 0x6A3 - SYS___STRXFRM_STD_A = 0x6A5 - SYS___SYNTAX_INIT_A = 0x6D4 - SYS___TIME_INIT_A = 0x6CB - SYS___TOD_INIT_A = 0x6D5 - SYS___TOWLOWER_A = 0x6B3 - SYS___TOWLOWER_STD_A = 0x6B4 - SYS___TOWUPPER_A = 0x6B5 - SYS___TOWUPPER_STD_A = 0x6B6 - SYS___UMOUNT_A = 0x6F2 - SYS___VFPRINTF_A = 0x6FC - SYS___VPRINTF_A = 0x6FD - SYS___VSPRINTF_A = 0x6FE - SYS___VSWPRINTF_A = 0x6FF - SYS___WCSCOLL_A = 0x6A6 - SYS___WCSCOLL_C_A = 0x6A7 - SYS___WCSCOLL_STD_A = 0x6A8 - SYS___WCSFTIME_A = 0x6D0 - SYS___WCSFTIME_STD_A = 0x6D1 - SYS___WCSXFRM_A = 0x6A9 - SYS___WCSXFRM_C_A = 0x6AA - SYS___WCSXFRM_STD_A = 0x6AB - SYS___WCTYPE_A = 0x6AD - SYS___W_GETMNTENT_A = 0x6F5 - SYS_____CCSIDTYPE_A = 0x6E6 - SYS_____CHATTR_A = 0x6E2 - SYS_____CSNAMETYPE_A = 0x6E7 - SYS_____OPEN_STAT_A = 0x6ED - SYS_____SPAWN2_A = 0x6D2 - SYS_____SPAWNP2_A = 0x6D3 - SYS_____TOCCSID_A = 0x6E4 - SYS_____TOCSNAME_A = 0x6E5 - SYS_ACL_FREE = 0x7FF - SYS_ACL_INIT = 0x7FE - SYS_FWIDE = 0x7DF - SYS_FWPRINTF = 0x7D1 - SYS_FWRITE = 0x07E - SYS_FWSCANF = 0x7D5 - SYS_GETCHAR = 0x07B - SYS_GETS = 0x07C - SYS_M_CREATE_LAYOUT = 0x7C9 - SYS_M_DESTROY_LAYOUT = 0x7CA - SYS_M_GETVALUES_LAYOUT = 0x7CB - SYS_M_SETVALUES_LAYOUT = 0x7CC - SYS_M_TRANSFORM_LAYOUT = 0x7CD - SYS_M_WTRANSFORM_LAYOUT = 0x7CE - SYS_PREAD = 0x7C7 - SYS_PUTC = 0x07D - SYS_PUTCHAR = 0x07A - SYS_PUTS = 0x07F - SYS_PWRITE = 0x7C8 - SYS_TOWCTRAN = 0x7D8 - SYS_TOWCTRANS = 0x7D8 - SYS_UNATEXIT = 0x7B5 - SYS_VFWPRINT = 0x7D3 - SYS_VFWPRINTF = 0x7D3 - SYS_VWPRINTF = 0x7D4 - SYS_WCTRANS = 0x7D7 - SYS_WPRINTF = 0x7D2 - SYS_WSCANF = 0x7D6 - SYS___ASCTIME_R_A = 0x7A1 - SYS___BASENAME_A = 0x7DC - SYS___BTOWC_A = 0x7E4 - SYS___CDUMP_A = 0x7B7 - SYS___CEE3DMP_A = 0x7B6 - SYS___CEILF_H = 0x7F4 - SYS___CEILL_H = 0x7F5 - SYS___CEIL_H = 0x7EA - SYS___CRYPT_A = 0x7BE - SYS___CSNAP_A = 0x7B8 - SYS___CTEST_A = 0x7B9 - SYS___CTIME_R_A = 0x7A2 - SYS___CTRACE_A = 0x7BA - SYS___DBM_OPEN_A = 0x7E6 - SYS___DIRNAME_A = 0x7DD - SYS___FABSF_H = 0x7FA - SYS___FABSL_H = 0x7FB - SYS___FABS_H = 0x7ED - SYS___FGETWC_A = 0x7AA - SYS___FGETWS_A = 0x7AD - SYS___FLOORF_H = 0x7F6 - SYS___FLOORL_H = 0x7F7 - SYS___FLOOR_H = 0x7EB - SYS___FPUTWC_A = 0x7A5 - SYS___FPUTWS_A = 0x7A8 - SYS___GETTIMEOFDAY_A = 0x7AE - SYS___GETWCHAR_A = 0x7AC - SYS___GETWC_A = 0x7AB - SYS___GLOB_A = 0x7DE - SYS___GMTIME_A = 0x7AF - SYS___GMTIME_R_A = 0x7B0 - SYS___INET_PTON_A = 0x7BC - SYS___J0_H = 0x7EE - SYS___J1_H = 0x7EF - SYS___JN_H = 0x7F0 - SYS___LOCALTIME_A = 0x7B1 - SYS___LOCALTIME_R_A = 0x7B2 - SYS___MALLOC24 = 0x7FC - SYS___MALLOC31 = 0x7FD - SYS___MKTIME_A = 0x7B3 - SYS___MODFF_H = 0x7F8 - SYS___MODFL_H = 0x7F9 - SYS___MODF_H = 0x7EC - SYS___OPENDIR_A = 0x7C2 - SYS___OSNAME = 0x7E0 - SYS___PUTWCHAR_A = 0x7A7 - SYS___PUTWC_A = 0x7A6 - SYS___READDIR_A = 0x7C3 - SYS___STRTOLL_A = 0x7A3 - SYS___STRTOULL_A = 0x7A4 - SYS___SYSLOG_A = 0x7BD - SYS___TZZNA = 0x7B4 - SYS___UNGETWC_A = 0x7A9 - SYS___UTIME_A = 0x7A0 - SYS___VFPRINTF2_A = 0x7E7 - SYS___VPRINTF2_A = 0x7E8 - SYS___VSPRINTF2_A = 0x7E9 - SYS___VSWPRNTF2_A = 0x7BB - SYS___WCSTOD_A = 0x7D9 - SYS___WCSTOL_A = 0x7DA - SYS___WCSTOUL_A = 0x7DB - SYS___WCTOB_A = 0x7E5 - SYS___Y0_H = 0x7F1 - SYS___Y1_H = 0x7F2 - SYS___YN_H = 0x7F3 - SYS_____OPENDIR2_A = 0x7BF - SYS_____OSNAME_A = 0x7E1 - SYS_____READDIR2_A = 0x7C0 - SYS_DLCLOSE = 0x8DF - SYS_DLERROR = 0x8E0 - SYS_DLOPEN = 0x8DD - SYS_DLSYM = 0x8DE - SYS_FLOCKFILE = 0x8D3 - SYS_FTRYLOCKFILE = 0x8D4 - SYS_FUNLOCKFILE = 0x8D5 - SYS_GETCHAR_UNLOCKED = 0x8D7 - SYS_GETC_UNLOCKED = 0x8D6 - SYS_PUTCHAR_UNLOCKED = 0x8D9 - SYS_PUTC_UNLOCKED = 0x8D8 - SYS_SNPRINTF = 0x8DA - SYS_VSNPRINTF = 0x8DB - SYS_WCSCSPN = 0x08B - SYS_WCSLEN = 0x08C - SYS_WCSNCAT = 0x08D - SYS_WCSNCMP = 0x08A - SYS_WCSNCPY = 0x08F - SYS_WCSSPN = 0x08E - SYS___ABSF_H = 0x8E7 - SYS___ABSL_H = 0x8E8 - SYS___ABS_H = 0x8E6 - SYS___ACOSF_H = 0x8EA - SYS___ACOSH_H = 0x8EC - SYS___ACOSL_H = 0x8EB - SYS___ACOS_H = 0x8E9 - SYS___ASINF_H = 0x8EE - SYS___ASINH_H = 0x8F0 - SYS___ASINL_H = 0x8EF - SYS___ASIN_H = 0x8ED - SYS___ATAN2F_H = 0x8F8 - SYS___ATAN2L_H = 0x8F9 - SYS___ATAN2_H = 0x8F7 - SYS___ATANF_H = 0x8F2 - SYS___ATANHF_H = 0x8F5 - SYS___ATANHL_H = 0x8F6 - SYS___ATANH_H = 0x8F4 - SYS___ATANL_H = 0x8F3 - SYS___ATAN_H = 0x8F1 - SYS___CBRT_H = 0x8FA - SYS___COPYSIGNF_H = 0x8FB - SYS___COPYSIGNL_H = 0x8FC - SYS___COSF_H = 0x8FE - SYS___COSL_H = 0x8FF - SYS___COS_H = 0x8FD - SYS___DLERROR_A = 0x8D2 - SYS___DLOPEN_A = 0x8D0 - SYS___DLSYM_A = 0x8D1 - SYS___GETUTXENT_A = 0x8C6 - SYS___GETUTXID_A = 0x8C7 - SYS___GETUTXLINE_A = 0x8C8 - SYS___ITOA = 0x8AA - SYS___ITOA_A = 0x8B0 - SYS___LE_CONDITION_TOKEN_BUILD = 0x8A5 - SYS___LE_MSG_ADD_INSERT = 0x8A6 - SYS___LE_MSG_GET = 0x8A7 - SYS___LE_MSG_GET_AND_WRITE = 0x8A8 - SYS___LE_MSG_WRITE = 0x8A9 - SYS___LLTOA = 0x8AE - SYS___LLTOA_A = 0x8B4 - SYS___LTOA = 0x8AC - SYS___LTOA_A = 0x8B2 - SYS___PUTCHAR_UNLOCKED_A = 0x8CC - SYS___PUTC_UNLOCKED_A = 0x8CB - SYS___PUTUTXLINE_A = 0x8C9 - SYS___RESET_EXCEPTION_HANDLER = 0x8E3 - SYS___REXEC_A = 0x8C4 - SYS___REXEC_AF_A = 0x8C5 - SYS___SET_EXCEPTION_HANDLER = 0x8E2 - SYS___SNPRINTF_A = 0x8CD - SYS___SUPERKILL = 0x8A4 - SYS___TCGETATTR_A = 0x8A1 - SYS___TCSETATTR_A = 0x8A2 - SYS___ULLTOA = 0x8AF - SYS___ULLTOA_A = 0x8B5 - SYS___ULTOA = 0x8AD - SYS___ULTOA_A = 0x8B3 - SYS___UTOA = 0x8AB - SYS___UTOA_A = 0x8B1 - SYS___VHM_EVENT = 0x8E4 - SYS___VSNPRINTF_A = 0x8CE - SYS_____GETENV_A = 0x8C3 - SYS_____UTMPXNAME_A = 0x8CA - SYS_CACOSH = 0x9A0 - SYS_CACOSHF = 0x9A3 - SYS_CACOSHL = 0x9A6 - SYS_CARG = 0x9A9 - SYS_CARGF = 0x9AC - SYS_CARGL = 0x9AF - SYS_CASIN = 0x9B2 - SYS_CASINF = 0x9B5 - SYS_CASINH = 0x9BB - SYS_CASINHF = 0x9BE - SYS_CASINHL = 0x9C1 - SYS_CASINL = 0x9B8 - SYS_CATAN = 0x9C4 - SYS_CATANF = 0x9C7 - SYS_CATANH = 0x9CD - SYS_CATANHF = 0x9D0 - SYS_CATANHL = 0x9D3 - SYS_CATANL = 0x9CA - SYS_CCOS = 0x9D6 - SYS_CCOSF = 0x9D9 - SYS_CCOSH = 0x9DF - SYS_CCOSHF = 0x9E2 - SYS_CCOSHL = 0x9E5 - SYS_CCOSL = 0x9DC - SYS_CEXP = 0x9E8 - SYS_CEXPF = 0x9EB - SYS_CEXPL = 0x9EE - SYS_CIMAG = 0x9F1 - SYS_CIMAGF = 0x9F4 - SYS_CIMAGL = 0x9F7 - SYS_CLOGF = 0x9FD - SYS_MEMCHR = 0x09B - SYS_MEMCMP = 0x09A - SYS_STRCOLL = 0x09C - SYS_STRNCMP = 0x09D - SYS_STRRCHR = 0x09F - SYS_STRXFRM = 0x09E - SYS___CACOSHF_B = 0x9A4 - SYS___CACOSHF_H = 0x9A5 - SYS___CACOSHL_B = 0x9A7 - SYS___CACOSHL_H = 0x9A8 - SYS___CACOSH_B = 0x9A1 - SYS___CACOSH_H = 0x9A2 - SYS___CARGF_B = 0x9AD - SYS___CARGF_H = 0x9AE - SYS___CARGL_B = 0x9B0 - SYS___CARGL_H = 0x9B1 - SYS___CARG_B = 0x9AA - SYS___CARG_H = 0x9AB - SYS___CASINF_B = 0x9B6 - SYS___CASINF_H = 0x9B7 - SYS___CASINHF_B = 0x9BF - SYS___CASINHF_H = 0x9C0 - SYS___CASINHL_B = 0x9C2 - SYS___CASINHL_H = 0x9C3 - SYS___CASINH_B = 0x9BC - SYS___CASINH_H = 0x9BD - SYS___CASINL_B = 0x9B9 - SYS___CASINL_H = 0x9BA - SYS___CASIN_B = 0x9B3 - SYS___CASIN_H = 0x9B4 - SYS___CATANF_B = 0x9C8 - SYS___CATANF_H = 0x9C9 - SYS___CATANHF_B = 0x9D1 - SYS___CATANHF_H = 0x9D2 - SYS___CATANHL_B = 0x9D4 - SYS___CATANHL_H = 0x9D5 - SYS___CATANH_B = 0x9CE - SYS___CATANH_H = 0x9CF - SYS___CATANL_B = 0x9CB - SYS___CATANL_H = 0x9CC - SYS___CATAN_B = 0x9C5 - SYS___CATAN_H = 0x9C6 - SYS___CCOSF_B = 0x9DA - SYS___CCOSF_H = 0x9DB - SYS___CCOSHF_B = 0x9E3 - SYS___CCOSHF_H = 0x9E4 - SYS___CCOSHL_B = 0x9E6 - SYS___CCOSHL_H = 0x9E7 - SYS___CCOSH_B = 0x9E0 - SYS___CCOSH_H = 0x9E1 - SYS___CCOSL_B = 0x9DD - SYS___CCOSL_H = 0x9DE - SYS___CCOS_B = 0x9D7 - SYS___CCOS_H = 0x9D8 - SYS___CEXPF_B = 0x9EC - SYS___CEXPF_H = 0x9ED - SYS___CEXPL_B = 0x9EF - SYS___CEXPL_H = 0x9F0 - SYS___CEXP_B = 0x9E9 - SYS___CEXP_H = 0x9EA - SYS___CIMAGF_B = 0x9F5 - SYS___CIMAGF_H = 0x9F6 - SYS___CIMAGL_B = 0x9F8 - SYS___CIMAGL_H = 0x9F9 - SYS___CIMAG_B = 0x9F2 - SYS___CIMAG_H = 0x9F3 - SYS___CLOG = 0x9FA - SYS___CLOGF_B = 0x9FE - SYS___CLOGF_H = 0x9FF - SYS___CLOG_B = 0x9FB - SYS___CLOG_H = 0x9FC - SYS_ISWCTYPE = 0x10C - SYS_ISWXDIGI = 0x10A - SYS_ISWXDIGIT = 0x10A - SYS_MBSINIT = 0x10F - SYS_TOWLOWER = 0x10D - SYS_TOWUPPER = 0x10E - SYS_WCTYPE = 0x10B - SYS_WCSSTR = 0x11B - SYS___RPMTCH = 0x11A - SYS_WCSTOD = 0x12E - SYS_WCSTOK = 0x12C - SYS_WCSTOL = 0x12D - SYS_WCSTOUL = 0x12F - SYS_FGETWC = 0x13C - SYS_FGETWS = 0x13D - SYS_FPUTWC = 0x13E - SYS_FPUTWS = 0x13F - SYS_REGERROR = 0x13B - SYS_REGFREE = 0x13A - SYS_COLLEQUIV = 0x14F - SYS_COLLTOSTR = 0x14E - SYS_ISMCCOLLEL = 0x14C - SYS_STRTOCOLL = 0x14D - SYS_DLLFREE = 0x16F - SYS_DLLQUERYFN = 0x16D - SYS_DLLQUERYVAR = 0x16E - SYS_GETMCCOLL = 0x16A - SYS_GETWMCCOLL = 0x16B - SYS___ERR2AD = 0x16C - SYS_CFSETOSPEED = 0x17A - SYS_CHDIR = 0x17B - SYS_CHMOD = 0x17C - SYS_CHOWN = 0x17D - SYS_CLOSE = 0x17E - SYS_CLOSEDIR = 0x17F - SYS_LOG = 0x017 - SYS_COSH = 0x018 - SYS_FCHMOD = 0x18A - SYS_FCHOWN = 0x18B - SYS_FCNTL = 0x18C - SYS_FILENO = 0x18D - SYS_FORK = 0x18E - SYS_FPATHCONF = 0x18F - SYS_GETLOGIN = 0x19A - SYS_GETPGRP = 0x19C - SYS_GETPID = 0x19D - SYS_GETPPID = 0x19E - SYS_GETPWNAM = 0x19F - SYS_TANH = 0x019 - SYS_W_GETMNTENT = 0x19B - SYS_POW = 0x020 - SYS_PTHREAD_SELF = 0x20A - SYS_PTHREAD_SETINTR = 0x20B - SYS_PTHREAD_SETINTRTYPE = 0x20C - SYS_PTHREAD_SETSPECIFIC = 0x20D - SYS_PTHREAD_TESTINTR = 0x20E - SYS_PTHREAD_YIELD = 0x20F - SYS_SQRT = 0x021 - SYS_FLOOR = 0x022 - SYS_J1 = 0x023 - SYS_WCSPBRK = 0x23F - SYS_BSEARCH = 0x24C - SYS_FABS = 0x024 - SYS_GETENV = 0x24A - SYS_LDIV = 0x24D - SYS_SYSTEM = 0x24B - SYS_FMOD = 0x025 - SYS___RETHROW = 0x25F - SYS___THROW = 0x25E - SYS_J0 = 0x026 - SYS_PUTENV = 0x26A - SYS___GETENV = 0x26F - SYS_SEMCTL = 0x27A - SYS_SEMGET = 0x27B - SYS_SEMOP = 0x27C - SYS_SHMAT = 0x27D - SYS_SHMCTL = 0x27E - SYS_SHMDT = 0x27F - SYS_YN = 0x027 - SYS_JN = 0x028 - SYS_SIGALTSTACK = 0x28A - SYS_SIGHOLD = 0x28B - SYS_SIGIGNORE = 0x28C - SYS_SIGINTERRUPT = 0x28D - SYS_SIGPAUSE = 0x28E - SYS_SIGRELSE = 0x28F - SYS_GETOPT = 0x29A - SYS_GETSUBOPT = 0x29D - SYS_LCHOWN = 0x29B - SYS_SETPGRP = 0x29E - SYS_TRUNCATE = 0x29C - SYS_Y0 = 0x029 - SYS___GDERR = 0x29F - SYS_ISALPHA = 0x030 - SYS_VFORK = 0x30F - SYS__LONGJMP = 0x30D - SYS__SETJMP = 0x30E - SYS_GLOB = 0x31A - SYS_GLOBFREE = 0x31B - SYS_ISALNUM = 0x031 - SYS_PUTW = 0x31C - SYS_SEEKDIR = 0x31D - SYS_TELLDIR = 0x31E - SYS_TEMPNAM = 0x31F - SYS_GETTIMEOFDAY_R = 0x32E - SYS_ISLOWER = 0x032 - SYS_LGAMMA = 0x32C - SYS_REMAINDER = 0x32A - SYS_SCALB = 0x32B - SYS_SYNC = 0x32F - SYS_TTYSLOT = 0x32D - SYS_ENDPROTOENT = 0x33A - SYS_ENDSERVENT = 0x33B - SYS_GETHOSTBYADDR = 0x33D - SYS_GETHOSTBYADDR_R = 0x33C - SYS_GETHOSTBYNAME = 0x33F - SYS_GETHOSTBYNAME_R = 0x33E - SYS_ISCNTRL = 0x033 - SYS_GETSERVBYNAME = 0x34A - SYS_GETSERVBYPORT = 0x34B - SYS_GETSERVENT = 0x34C - SYS_GETSOCKNAME = 0x34D - SYS_GETSOCKOPT = 0x34E - SYS_INET_ADDR = 0x34F - SYS_ISDIGIT = 0x034 - SYS_ISGRAPH = 0x035 - SYS_SELECT = 0x35B - SYS_SELECTEX = 0x35C - SYS_SEND = 0x35D - SYS_SENDTO = 0x35F - SYS_CHROOT = 0x36A - SYS_ISNAN = 0x36D - SYS_ISUPPER = 0x036 - SYS_ULIMIT = 0x36C - SYS_UTIMES = 0x36E - SYS_W_STATVFS = 0x36B - SYS___H_ERRNO = 0x36F - SYS_GRANTPT = 0x37A - SYS_ISPRINT = 0x037 - SYS_TCGETSID = 0x37C - SYS_UNLOCKPT = 0x37B - SYS___TCGETCP = 0x37D - SYS___TCSETCP = 0x37E - SYS___TCSETTABLES = 0x37F - SYS_ISPUNCT = 0x038 - SYS_NLIST = 0x38C - SYS___IPDBCS = 0x38D - SYS___IPDSPX = 0x38E - SYS___IPMSGC = 0x38F - SYS___STHOSTENT = 0x38B - SYS___STSERVENT = 0x38A - SYS_ISSPACE = 0x039 - SYS_COS = 0x040 - SYS_T_ALLOC = 0x40A - SYS_T_BIND = 0x40B - SYS_T_CLOSE = 0x40C - SYS_T_CONNECT = 0x40D - SYS_T_ERROR = 0x40E - SYS_T_FREE = 0x40F - SYS_TAN = 0x041 - SYS_T_RCVREL = 0x41A - SYS_T_RCVUDATA = 0x41B - SYS_T_RCVUDERR = 0x41C - SYS_T_SND = 0x41D - SYS_T_SNDDIS = 0x41E - SYS_T_SNDREL = 0x41F - SYS_GETPMSG = 0x42A - SYS_ISASTREAM = 0x42B - SYS_PUTMSG = 0x42C - SYS_PUTPMSG = 0x42D - SYS_SINH = 0x042 - SYS___ISPOSIXON = 0x42E - SYS___OPENMVSREL = 0x42F - SYS_ACOS = 0x043 - SYS_ATAN = 0x044 - SYS_ATAN2 = 0x045 - SYS_FTELL = 0x046 - SYS_FGETPOS = 0x047 - SYS_SOCK_DEBUG = 0x47A - SYS_SOCK_DO_TESTSTOR = 0x47D - SYS_TAKESOCKET = 0x47E - SYS___SERVER_INIT = 0x47F - SYS_FSEEK = 0x048 - SYS___IPHOST = 0x48B - SYS___IPNODE = 0x48C - SYS___SERVER_CLASSIFY_CREATE = 0x48D - SYS___SERVER_CLASSIFY_DESTROY = 0x48E - SYS___SERVER_CLASSIFY_RESET = 0x48F - SYS___SMF_RECORD = 0x48A - SYS_FSETPOS = 0x049 - SYS___FNWSA = 0x49B - SYS___SPAWN2 = 0x49D - SYS___SPAWNP2 = 0x49E - SYS_ATOF = 0x050 - SYS_PTHREAD_MUTEXATTR_GETPSHARED = 0x50A - SYS_PTHREAD_MUTEXATTR_SETPSHARED = 0x50B - SYS_PTHREAD_RWLOCK_DESTROY = 0x50C - SYS_PTHREAD_RWLOCK_INIT = 0x50D - SYS_PTHREAD_RWLOCK_RDLOCK = 0x50E - SYS_PTHREAD_RWLOCK_TRYRDLOCK = 0x50F - SYS_ATOI = 0x051 - SYS___FP_CLASS = 0x51D - SYS___FP_CLR_FLAG = 0x51A - SYS___FP_FINITE = 0x51E - SYS___FP_ISNAN = 0x51F - SYS___FP_RAISE_XCP = 0x51C - SYS___FP_READ_FLAG = 0x51B - SYS_RAND = 0x052 - SYS_SIGTIMEDWAIT = 0x52D - SYS_SIGWAITINFO = 0x52E - SYS___CHKBFP = 0x52F - SYS___FPC_RS = 0x52C - SYS___FPC_RW = 0x52A - SYS___FPC_SM = 0x52B - SYS_STRTOD = 0x053 - SYS_STRTOL = 0x054 - SYS_STRTOUL = 0x055 - SYS_MALLOC = 0x056 - SYS_SRAND = 0x057 - SYS_CALLOC = 0x058 - SYS_FREE = 0x059 - SYS___OSENV = 0x59F - SYS___W_PIOCTL = 0x59E - SYS_LONGJMP = 0x060 - SYS___FLOORF_B = 0x60A - SYS___FLOORL_B = 0x60B - SYS___FREXPF_B = 0x60C - SYS___FREXPL_B = 0x60D - SYS___LDEXPF_B = 0x60E - SYS___LDEXPL_B = 0x60F - SYS_SIGNAL = 0x061 - SYS___ATAN2F_B = 0x61A - SYS___ATAN2L_B = 0x61B - SYS___COSHF_B = 0x61C - SYS___COSHL_B = 0x61D - SYS___EXPF_B = 0x61E - SYS___EXPL_B = 0x61F - SYS_TMPNAM = 0x062 - SYS___ABSF_B = 0x62A - SYS___ABSL_B = 0x62C - SYS___ABS_B = 0x62B - SYS___FMODF_B = 0x62D - SYS___FMODL_B = 0x62E - SYS___MODFF_B = 0x62F - SYS_ATANL = 0x63A - SYS_CEILF = 0x63B - SYS_CEILL = 0x63C - SYS_COSF = 0x63D - SYS_COSHF = 0x63F - SYS_COSL = 0x63E - SYS_REMOVE = 0x063 - SYS_POWL = 0x64A - SYS_RENAME = 0x064 - SYS_SINF = 0x64B - SYS_SINHF = 0x64F - SYS_SINL = 0x64C - SYS_SQRTF = 0x64D - SYS_SQRTL = 0x64E - SYS_BTOWC = 0x65F - SYS_FREXPL = 0x65A - SYS_LDEXPF = 0x65B - SYS_LDEXPL = 0x65C - SYS_MODFF = 0x65D - SYS_MODFL = 0x65E - SYS_TMPFILE = 0x065 - SYS_FREOPEN = 0x066 - SYS___CHARMAP_INIT_A = 0x66E - SYS___GETHOSTBYADDR_R_A = 0x66C - SYS___GETHOSTBYNAME_A = 0x66A - SYS___GETHOSTBYNAME_R_A = 0x66D - SYS___MBLEN_A = 0x66F - SYS___RES_INIT_A = 0x66B - SYS_FCLOSE = 0x067 - SYS___GETGRGID_R_A = 0x67D - SYS___WCSTOMBS_A = 0x67A - SYS___WCSTOMBS_STD_A = 0x67B - SYS___WCSWIDTH_A = 0x67C - SYS___WCSWIDTH_ASIA = 0x67F - SYS___WCSWIDTH_STD_A = 0x67E - SYS_FFLUSH = 0x068 - SYS___GETLOGIN_R_A = 0x68E - SYS___GETPWNAM_R_A = 0x68C - SYS___GETPWUID_R_A = 0x68D - SYS___TTYNAME_R_A = 0x68F - SYS___WCWIDTH_ASIA = 0x68B - SYS___WCWIDTH_STD_A = 0x68A - SYS_FOPEN = 0x069 - SYS___REGEXEC_A = 0x69A - SYS___REGEXEC_STD_A = 0x69B - SYS___REGFREE_A = 0x69C - SYS___REGFREE_STD_A = 0x69D - SYS___STRCOLL_A = 0x69E - SYS___STRCOLL_C_A = 0x69F - SYS_SCANF = 0x070 - SYS___A64L_A = 0x70C - SYS___ECVT_A = 0x70D - SYS___FCVT_A = 0x70E - SYS___GCVT_A = 0x70F - SYS___STRTOUL_A = 0x70A - SYS_____AE_CORRESTBL_QUERY_A = 0x70B - SYS_SPRINTF = 0x071 - SYS___ACCESS_A = 0x71F - SYS___CATOPEN_A = 0x71E - SYS___GETOPT_A = 0x71D - SYS___REALPATH_A = 0x71A - SYS___SETENV_A = 0x71B - SYS___SYSTEM_A = 0x71C - SYS_FGETC = 0x072 - SYS___GAI_STRERROR_A = 0x72F - SYS___RMDIR_A = 0x72A - SYS___STATVFS_A = 0x72B - SYS___SYMLINK_A = 0x72C - SYS___TRUNCATE_A = 0x72D - SYS___UNLINK_A = 0x72E - SYS_VFPRINTF = 0x073 - SYS___ISSPACE_A = 0x73A - SYS___ISUPPER_A = 0x73B - SYS___ISWALNUM_A = 0x73F - SYS___ISXDIGIT_A = 0x73C - SYS___TOLOWER_A = 0x73D - SYS___TOUPPER_A = 0x73E - SYS_VPRINTF = 0x074 - SYS___CONFSTR_A = 0x74B - SYS___FDOPEN_A = 0x74E - SYS___FLDATA_A = 0x74F - SYS___FTOK_A = 0x74C - SYS___ISWXDIGIT_A = 0x74A - SYS___MKTEMP_A = 0x74D - SYS_VSPRINTF = 0x075 - SYS___GETGRGID_A = 0x75A - SYS___GETGRNAM_A = 0x75B - SYS___GETGROUPSBYNAME_A = 0x75C - SYS___GETHOSTENT_A = 0x75D - SYS___GETHOSTNAME_A = 0x75E - SYS___GETLOGIN_A = 0x75F - SYS_GETC = 0x076 - SYS___CREATEWORKUNIT_A = 0x76A - SYS___CTERMID_A = 0x76B - SYS___FMTMSG_A = 0x76C - SYS___INITGROUPS_A = 0x76D - SYS___MSGRCV_A = 0x76F - SYS_____LOGIN_A = 0x76E - SYS_FGETS = 0x077 - SYS___STRCASECMP_A = 0x77B - SYS___STRNCASECMP_A = 0x77C - SYS___TTYNAME_A = 0x77D - SYS___UNAME_A = 0x77E - SYS___UTIMES_A = 0x77F - SYS_____SERVER_PWU_A = 0x77A - SYS_FPUTC = 0x078 - SYS___CREAT_O_A = 0x78E - SYS___ENVNA = 0x78F - SYS___FREAD_A = 0x78A - SYS___FWRITE_A = 0x78B - SYS___ISASCII = 0x78D - SYS___OPEN_O_A = 0x78C - SYS_FPUTS = 0x079 - SYS___ASCTIME_A = 0x79C - SYS___CTIME_A = 0x79D - SYS___GETDATE_A = 0x79E - SYS___GETSERVBYPORT_A = 0x79A - SYS___GETSERVENT_A = 0x79B - SYS___TZSET_A = 0x79F - SYS_ACL_FROM_TEXT = 0x80C - SYS_ACL_SET_FD = 0x80A - SYS_ACL_SET_FILE = 0x80B - SYS_ACL_SORT = 0x80E - SYS_ACL_TO_TEXT = 0x80D - SYS_UNGETC = 0x080 - SYS___SHUTDOWN_REGISTRATION = 0x80F - SYS_FREAD = 0x081 - SYS_FREEADDRINFO = 0x81A - SYS_GAI_STRERROR = 0x81B - SYS_REXEC_AF = 0x81C - SYS___DYNALLOC_A = 0x81F - SYS___POE = 0x81D - SYS_WCSTOMBS = 0x082 - SYS___INET_ADDR_A = 0x82F - SYS___NLIST_A = 0x82A - SYS_____TCGETCP_A = 0x82B - SYS_____TCSETCP_A = 0x82C - SYS_____W_PIOCTL_A = 0x82E - SYS_MBTOWC = 0x083 - SYS___CABEND = 0x83D - SYS___LE_CIB_GET = 0x83E - SYS___RECVMSG_A = 0x83B - SYS___SENDMSG_A = 0x83A - SYS___SET_LAA_FOR_JIT = 0x83F - SYS_____LCHATTR_A = 0x83C - SYS_WCTOMB = 0x084 - SYS___CBRTL_B = 0x84A - SYS___COPYSIGNF_B = 0x84B - SYS___COPYSIGNL_B = 0x84C - SYS___COTANF_B = 0x84D - SYS___COTANL_B = 0x84F - SYS___COTAN_B = 0x84E - SYS_MBSTOWCS = 0x085 - SYS___LOG1PL_B = 0x85A - SYS___LOG2F_B = 0x85B - SYS___LOG2L_B = 0x85D - SYS___LOG2_B = 0x85C - SYS___REMAINDERF_B = 0x85E - SYS___REMAINDERL_B = 0x85F - SYS_ACOSHF = 0x86E - SYS_ACOSHL = 0x86F - SYS_WCSCPY = 0x086 - SYS___ERFCF_B = 0x86D - SYS___ERFF_B = 0x86C - SYS___LROUNDF_B = 0x86A - SYS___LROUND_B = 0x86B - SYS_COTANL = 0x87A - SYS_EXP2F = 0x87B - SYS_EXP2L = 0x87C - SYS_EXPM1F = 0x87D - SYS_EXPM1L = 0x87E - SYS_FDIMF = 0x87F - SYS_WCSCAT = 0x087 - SYS___COTANL = 0x87A - SYS_REMAINDERF = 0x88A - SYS_REMAINDERL = 0x88B - SYS_REMAINDF = 0x88A - SYS_REMAINDL = 0x88B - SYS_REMQUO = 0x88D - SYS_REMQUOF = 0x88C - SYS_REMQUOL = 0x88E - SYS_TGAMMAF = 0x88F - SYS_WCSCHR = 0x088 - SYS_ERFCF = 0x89B - SYS_ERFCL = 0x89C - SYS_ERFL = 0x89A - SYS_EXP2 = 0x89E - SYS_WCSCMP = 0x089 - SYS___EXP2_B = 0x89D - SYS___FAR_JUMP = 0x89F - SYS_ABS = 0x090 - SYS___ERFCL_H = 0x90A - SYS___EXPF_H = 0x90C - SYS___EXPL_H = 0x90D - SYS___EXPM1_H = 0x90E - SYS___EXP_H = 0x90B - SYS___FDIM_H = 0x90F - SYS_DIV = 0x091 - SYS___LOG2F_H = 0x91F - SYS___LOG2_H = 0x91E - SYS___LOGB_H = 0x91D - SYS___LOGF_H = 0x91B - SYS___LOGL_H = 0x91C - SYS___LOG_H = 0x91A - SYS_LABS = 0x092 - SYS___POWL_H = 0x92A - SYS___REMAINDER_H = 0x92B - SYS___RINT_H = 0x92C - SYS___SCALB_H = 0x92D - SYS___SINF_H = 0x92F - SYS___SIN_H = 0x92E - SYS_STRNCPY = 0x093 - SYS___TANHF_H = 0x93B - SYS___TANHL_H = 0x93C - SYS___TANH_H = 0x93A - SYS___TGAMMAF_H = 0x93E - SYS___TGAMMA_H = 0x93D - SYS___TRUNC_H = 0x93F - SYS_MEMCPY = 0x094 - SYS_VFWSCANF = 0x94A - SYS_VSWSCANF = 0x94E - SYS_VWSCANF = 0x94C - SYS_INET6_RTH_ADD = 0x95D - SYS_INET6_RTH_INIT = 0x95C - SYS_INET6_RTH_REVERSE = 0x95E - SYS_INET6_RTH_SEGMENTS = 0x95F - SYS_INET6_RTH_SPACE = 0x95B - SYS_MEMMOVE = 0x095 - SYS_WCSTOLD = 0x95A - SYS_STRCPY = 0x096 - SYS_STRCMP = 0x097 - SYS_CABS = 0x98E - SYS_STRCAT = 0x098 - SYS___CABS_B = 0x98F - SYS___POW_II = 0x98A - SYS___POW_II_B = 0x98B - SYS___POW_II_H = 0x98C - SYS_CACOSF = 0x99A - SYS_CACOSL = 0x99D - SYS_STRNCAT = 0x099 - SYS___CACOSF_B = 0x99B - SYS___CACOSF_H = 0x99C - SYS___CACOSL_B = 0x99E - SYS___CACOSL_H = 0x99F - SYS_ISWALPHA = 0x100 - SYS_ISWBLANK = 0x101 - SYS___ISWBLK = 0x101 - SYS_ISWCNTRL = 0x102 - SYS_ISWDIGIT = 0x103 - SYS_ISWGRAPH = 0x104 - SYS_ISWLOWER = 0x105 - SYS_ISWPRINT = 0x106 - SYS_ISWPUNCT = 0x107 - SYS_ISWSPACE = 0x108 - SYS_ISWUPPER = 0x109 - SYS_WCTOB = 0x110 - SYS_MBRLEN = 0x111 - SYS_MBRTOWC = 0x112 - SYS_MBSRTOWC = 0x113 - SYS_MBSRTOWCS = 0x113 - SYS_WCRTOMB = 0x114 - SYS_WCSRTOMB = 0x115 - SYS_WCSRTOMBS = 0x115 - SYS___CSID = 0x116 - SYS___WCSID = 0x117 - SYS_STRPTIME = 0x118 - SYS___STRPTM = 0x118 - SYS_STRFMON = 0x119 - SYS_WCSCOLL = 0x130 - SYS_WCSXFRM = 0x131 - SYS_WCSWIDTH = 0x132 - SYS_WCWIDTH = 0x133 - SYS_WCSFTIME = 0x134 - SYS_SWPRINTF = 0x135 - SYS_VSWPRINT = 0x136 - SYS_VSWPRINTF = 0x136 - SYS_SWSCANF = 0x137 - SYS_REGCOMP = 0x138 - SYS_REGEXEC = 0x139 - SYS_GETWC = 0x140 - SYS_GETWCHAR = 0x141 - SYS_PUTWC = 0x142 - SYS_PUTWCHAR = 0x143 - SYS_UNGETWC = 0x144 - SYS_ICONV_OPEN = 0x145 - SYS_ICONV = 0x146 - SYS_ICONV_CLOSE = 0x147 - SYS_COLLRANGE = 0x150 - SYS_CCLASS = 0x151 - SYS_COLLORDER = 0x152 - SYS___DEMANGLE = 0x154 - SYS_FDOPEN = 0x155 - SYS___ERRNO = 0x156 - SYS___ERRNO2 = 0x157 - SYS___TERROR = 0x158 - SYS_MAXCOLL = 0x169 - SYS_DLLLOAD = 0x170 - SYS__EXIT = 0x174 - SYS_ACCESS = 0x175 - SYS_ALARM = 0x176 - SYS_CFGETISPEED = 0x177 - SYS_CFGETOSPEED = 0x178 - SYS_CFSETISPEED = 0x179 - SYS_CREAT = 0x180 - SYS_CTERMID = 0x181 - SYS_DUP = 0x182 - SYS_DUP2 = 0x183 - SYS_EXECL = 0x184 - SYS_EXECLE = 0x185 - SYS_EXECLP = 0x186 - SYS_EXECV = 0x187 - SYS_EXECVE = 0x188 - SYS_EXECVP = 0x189 - SYS_FSTAT = 0x190 - SYS_FSYNC = 0x191 - SYS_FTRUNCATE = 0x192 - SYS_GETCWD = 0x193 - SYS_GETEGID = 0x194 - SYS_GETEUID = 0x195 - SYS_GETGID = 0x196 - SYS_GETGRGID = 0x197 - SYS_GETGRNAM = 0x198 - SYS_GETGROUPS = 0x199 - SYS_PTHREAD_MUTEXATTR_DESTROY = 0x200 - SYS_PTHREAD_MUTEXATTR_SETKIND_NP = 0x201 - SYS_PTHREAD_MUTEXATTR_GETKIND_NP = 0x202 - SYS_PTHREAD_MUTEX_INIT = 0x203 - SYS_PTHREAD_MUTEX_DESTROY = 0x204 - SYS_PTHREAD_MUTEX_LOCK = 0x205 - SYS_PTHREAD_MUTEX_TRYLOCK = 0x206 - SYS_PTHREAD_MUTEX_UNLOCK = 0x207 - SYS_PTHREAD_ONCE = 0x209 - SYS_TW_OPEN = 0x210 - SYS_TW_FCNTL = 0x211 - SYS_PTHREAD_JOIN_D4_NP = 0x212 - SYS_PTHREAD_CONDATTR_SETKIND_NP = 0x213 - SYS_PTHREAD_CONDATTR_GETKIND_NP = 0x214 - SYS_EXTLINK_NP = 0x215 - SYS___PASSWD = 0x216 - SYS_SETGROUPS = 0x217 - SYS_INITGROUPS = 0x218 - SYS_WCSRCHR = 0x240 - SYS_SVC99 = 0x241 - SYS___SVC99 = 0x241 - SYS_WCSWCS = 0x242 - SYS_LOCALECO = 0x243 - SYS_LOCALECONV = 0x243 - SYS___LIBREL = 0x244 - SYS_RELEASE = 0x245 - SYS___RLSE = 0x245 - SYS_FLOCATE = 0x246 - SYS___FLOCT = 0x246 - SYS_FDELREC = 0x247 - SYS___FDLREC = 0x247 - SYS_FETCH = 0x248 - SYS___FETCH = 0x248 - SYS_QSORT = 0x249 - SYS___CLEANUPCATCH = 0x260 - SYS___CATCHMATCH = 0x261 - SYS___CLEAN2UPCATCH = 0x262 - SYS_GETPRIORITY = 0x270 - SYS_NICE = 0x271 - SYS_SETPRIORITY = 0x272 - SYS_GETITIMER = 0x273 - SYS_SETITIMER = 0x274 - SYS_MSGCTL = 0x275 - SYS_MSGGET = 0x276 - SYS_MSGRCV = 0x277 - SYS_MSGSND = 0x278 - SYS_MSGXRCV = 0x279 - SYS___MSGXR = 0x279 - SYS_SHMGET = 0x280 - SYS___GETIPC = 0x281 - SYS_SETGRENT = 0x282 - SYS_GETGRENT = 0x283 - SYS_ENDGRENT = 0x284 - SYS_SETPWENT = 0x285 - SYS_GETPWENT = 0x286 - SYS_ENDPWENT = 0x287 - SYS_BSD_SIGNAL = 0x288 - SYS_KILLPG = 0x289 - SYS_SIGSET = 0x290 - SYS_SIGSTACK = 0x291 - SYS_GETRLIMIT = 0x292 - SYS_SETRLIMIT = 0x293 - SYS_GETRUSAGE = 0x294 - SYS_MMAP = 0x295 - SYS_MPROTECT = 0x296 - SYS_MSYNC = 0x297 - SYS_MUNMAP = 0x298 - SYS_CONFSTR = 0x299 - SYS___NDMTRM = 0x300 - SYS_FTOK = 0x301 - SYS_BASENAME = 0x302 - SYS_DIRNAME = 0x303 - SYS_GETDTABLESIZE = 0x304 - SYS_MKSTEMP = 0x305 - SYS_MKTEMP = 0x306 - SYS_NFTW = 0x307 - SYS_GETWD = 0x308 - SYS_LOCKF = 0x309 - SYS_WORDEXP = 0x310 - SYS_WORDFREE = 0x311 - SYS_GETPGID = 0x312 - SYS_GETSID = 0x313 - SYS___UTMPXNAME = 0x314 - SYS_CUSERID = 0x315 - SYS_GETPASS = 0x316 - SYS_FNMATCH = 0x317 - SYS_FTW = 0x318 - SYS_GETW = 0x319 - SYS_ACOSH = 0x320 - SYS_ASINH = 0x321 - SYS_ATANH = 0x322 - SYS_CBRT = 0x323 - SYS_EXPM1 = 0x324 - SYS_ILOGB = 0x325 - SYS_LOGB = 0x326 - SYS_LOG1P = 0x327 - SYS_NEXTAFTER = 0x328 - SYS_RINT = 0x329 - SYS_SPAWN = 0x330 - SYS_SPAWNP = 0x331 - SYS_GETLOGIN_UU = 0x332 - SYS_ECVT = 0x333 - SYS_FCVT = 0x334 - SYS_GCVT = 0x335 - SYS_ACCEPT = 0x336 - SYS_BIND = 0x337 - SYS_CONNECT = 0x338 - SYS_ENDHOSTENT = 0x339 - SYS_GETHOSTENT = 0x340 - SYS_GETHOSTID = 0x341 - SYS_GETHOSTNAME = 0x342 - SYS_GETNETBYADDR = 0x343 - SYS_GETNETBYNAME = 0x344 - SYS_GETNETENT = 0x345 - SYS_GETPEERNAME = 0x346 - SYS_GETPROTOBYNAME = 0x347 - SYS_GETPROTOBYNUMBER = 0x348 - SYS_GETPROTOENT = 0x349 - SYS_INET_LNAOF = 0x350 - SYS_INET_MAKEADDR = 0x351 - SYS_INET_NETOF = 0x352 - SYS_INET_NETWORK = 0x353 - SYS_INET_NTOA = 0x354 - SYS_IOCTL = 0x355 - SYS_LISTEN = 0x356 - SYS_READV = 0x357 - SYS_RECV = 0x358 - SYS_RECVFROM = 0x359 - SYS_SETHOSTENT = 0x360 - SYS_SETNETENT = 0x361 - SYS_SETPEER = 0x362 - SYS_SETPROTOENT = 0x363 - SYS_SETSERVENT = 0x364 - SYS_SETSOCKOPT = 0x365 - SYS_SHUTDOWN = 0x366 - SYS_SOCKET = 0x367 - SYS_SOCKETPAIR = 0x368 - SYS_WRITEV = 0x369 - SYS_ENDNETENT = 0x370 - SYS_CLOSELOG = 0x371 - SYS_OPENLOG = 0x372 - SYS_SETLOGMASK = 0x373 - SYS_SYSLOG = 0x374 - SYS_PTSNAME = 0x375 - SYS_SETREUID = 0x376 - SYS_SETREGID = 0x377 - SYS_REALPATH = 0x378 - SYS___SIGNGAM = 0x379 - SYS_POLL = 0x380 - SYS_REXEC = 0x381 - SYS___ISASCII2 = 0x382 - SYS___TOASCII2 = 0x383 - SYS_CHPRIORITY = 0x384 - SYS_PTHREAD_ATTR_SETSYNCTYPE_NP = 0x385 - SYS_PTHREAD_ATTR_GETSYNCTYPE_NP = 0x386 - SYS_PTHREAD_SET_LIMIT_NP = 0x387 - SYS___STNETENT = 0x388 - SYS___STPROTOENT = 0x389 - SYS___SELECT1 = 0x390 - SYS_PTHREAD_SECURITY_NP = 0x391 - SYS___CHECK_RESOURCE_AUTH_NP = 0x392 - SYS___CONVERT_ID_NP = 0x393 - SYS___OPENVMREL = 0x394 - SYS_WMEMCHR = 0x395 - SYS_WMEMCMP = 0x396 - SYS_WMEMCPY = 0x397 - SYS_WMEMMOVE = 0x398 - SYS_WMEMSET = 0x399 - SYS___FPUTWC = 0x400 - SYS___PUTWC = 0x401 - SYS___PWCHAR = 0x402 - SYS___WCSFTM = 0x403 - SYS___WCSTOK = 0x404 - SYS___WCWDTH = 0x405 - SYS_T_ACCEPT = 0x409 - SYS_T_GETINFO = 0x410 - SYS_T_GETPROTADDR = 0x411 - SYS_T_GETSTATE = 0x412 - SYS_T_LISTEN = 0x413 - SYS_T_LOOK = 0x414 - SYS_T_OPEN = 0x415 - SYS_T_OPTMGMT = 0x416 - SYS_T_RCV = 0x417 - SYS_T_RCVCONNECT = 0x418 - SYS_T_RCVDIS = 0x419 - SYS_T_SNDUDATA = 0x420 - SYS_T_STRERROR = 0x421 - SYS_T_SYNC = 0x422 - SYS_T_UNBIND = 0x423 - SYS___T_ERRNO = 0x424 - SYS___RECVMSG2 = 0x425 - SYS___SENDMSG2 = 0x426 - SYS_FATTACH = 0x427 - SYS_FDETACH = 0x428 - SYS_GETMSG = 0x429 - SYS_GETCONTEXT = 0x430 - SYS_SETCONTEXT = 0x431 - SYS_MAKECONTEXT = 0x432 - SYS_SWAPCONTEXT = 0x433 - SYS_PTHREAD_GETSPECIFIC_D8_NP = 0x434 - SYS_GETCLIENTID = 0x470 - SYS___GETCLIENTID = 0x471 - SYS_GETSTABLESIZE = 0x472 - SYS_GETIBMOPT = 0x473 - SYS_GETIBMSOCKOPT = 0x474 - SYS_GIVESOCKET = 0x475 - SYS_IBMSFLUSH = 0x476 - SYS_MAXDESC = 0x477 - SYS_SETIBMOPT = 0x478 - SYS_SETIBMSOCKOPT = 0x479 - SYS___SERVER_PWU = 0x480 - SYS_PTHREAD_TAG_NP = 0x481 - SYS___CONSOLE = 0x482 - SYS___WSINIT = 0x483 - SYS___IPTCPN = 0x489 - SYS___SERVER_CLASSIFY = 0x490 - SYS___HEAPRPT = 0x496 - SYS___ISBFP = 0x500 - SYS___FP_CAST = 0x501 - SYS___CERTIFICATE = 0x502 - SYS_SEND_FILE = 0x503 - SYS_AIO_CANCEL = 0x504 - SYS_AIO_ERROR = 0x505 - SYS_AIO_READ = 0x506 - SYS_AIO_RETURN = 0x507 - SYS_AIO_SUSPEND = 0x508 - SYS_AIO_WRITE = 0x509 - SYS_PTHREAD_RWLOCK_TRYWRLOCK = 0x510 - SYS_PTHREAD_RWLOCK_UNLOCK = 0x511 - SYS_PTHREAD_RWLOCK_WRLOCK = 0x512 - SYS_PTHREAD_RWLOCKATTR_GETPSHARED = 0x513 - SYS_PTHREAD_RWLOCKATTR_SETPSHARED = 0x514 - SYS_PTHREAD_RWLOCKATTR_INIT = 0x515 - SYS_PTHREAD_RWLOCKATTR_DESTROY = 0x516 - SYS___CTTBL = 0x517 - SYS_PTHREAD_MUTEXATTR_SETTYPE = 0x518 - SYS_PTHREAD_MUTEXATTR_GETTYPE = 0x519 - SYS___FP_UNORDERED = 0x520 - SYS___FP_READ_RND = 0x521 - SYS___FP_READ_RND_B = 0x522 - SYS___FP_SWAP_RND = 0x523 - SYS___FP_SWAP_RND_B = 0x524 - SYS___FP_LEVEL = 0x525 - SYS___FP_BTOH = 0x526 - SYS___FP_HTOB = 0x527 - SYS___FPC_RD = 0x528 - SYS___FPC_WR = 0x529 - SYS_PTHREAD_SETCANCELTYPE = 0x600 - SYS_PTHREAD_TESTCANCEL = 0x601 - SYS___ATANF_B = 0x602 - SYS___ATANL_B = 0x603 - SYS___CEILF_B = 0x604 - SYS___CEILL_B = 0x605 - SYS___COSF_B = 0x606 - SYS___COSL_B = 0x607 - SYS___FABSF_B = 0x608 - SYS___FABSL_B = 0x609 - SYS___SINF_B = 0x610 - SYS___SINL_B = 0x611 - SYS___TANF_B = 0x612 - SYS___TANL_B = 0x613 - SYS___TANHF_B = 0x614 - SYS___TANHL_B = 0x615 - SYS___ACOSF_B = 0x616 - SYS___ACOSL_B = 0x617 - SYS___ASINF_B = 0x618 - SYS___ASINL_B = 0x619 - SYS___LOGF_B = 0x620 - SYS___LOGL_B = 0x621 - SYS___LOG10F_B = 0x622 - SYS___LOG10L_B = 0x623 - SYS___POWF_B = 0x624 - SYS___POWL_B = 0x625 - SYS___SINHF_B = 0x626 - SYS___SINHL_B = 0x627 - SYS___SQRTF_B = 0x628 - SYS___SQRTL_B = 0x629 - SYS___MODFL_B = 0x630 - SYS_ABSF = 0x631 - SYS_ABSL = 0x632 - SYS_ACOSF = 0x633 - SYS_ACOSL = 0x634 - SYS_ASINF = 0x635 - SYS_ASINL = 0x636 - SYS_ATAN2F = 0x637 - SYS_ATAN2L = 0x638 - SYS_ATANF = 0x639 - SYS_COSHL = 0x640 - SYS_EXPF = 0x641 - SYS_EXPL = 0x642 - SYS_TANHF = 0x643 - SYS_TANHL = 0x644 - SYS_LOG10F = 0x645 - SYS_LOG10L = 0x646 - SYS_LOGF = 0x647 - SYS_LOGL = 0x648 - SYS_POWF = 0x649 - SYS_SINHL = 0x650 - SYS_TANF = 0x651 - SYS_TANL = 0x652 - SYS_FABSF = 0x653 - SYS_FABSL = 0x654 - SYS_FLOORF = 0x655 - SYS_FLOORL = 0x656 - SYS_FMODF = 0x657 - SYS_FMODL = 0x658 - SYS_FREXPF = 0x659 - SYS___CHATTR = 0x660 - SYS___FCHATTR = 0x661 - SYS___TOCCSID = 0x662 - SYS___CSNAMETYPE = 0x663 - SYS___TOCSNAME = 0x664 - SYS___CCSIDTYPE = 0x665 - SYS___AE_CORRESTBL_QUERY = 0x666 - SYS___AE_AUTOCONVERT_STATE = 0x667 - SYS_DN_FIND = 0x668 - SYS___GETHOSTBYADDR_A = 0x669 - SYS___MBLEN_SB_A = 0x670 - SYS___MBLEN_STD_A = 0x671 - SYS___MBLEN_UTF = 0x672 - SYS___MBSTOWCS_A = 0x673 - SYS___MBSTOWCS_STD_A = 0x674 - SYS___MBTOWC_A = 0x675 - SYS___MBTOWC_ISO1 = 0x676 - SYS___MBTOWC_SBCS = 0x677 - SYS___MBTOWC_MBCS = 0x678 - SYS___MBTOWC_UTF = 0x679 - SYS___CSID_A = 0x680 - SYS___CSID_STD_A = 0x681 - SYS___WCSID_A = 0x682 - SYS___WCSID_STD_A = 0x683 - SYS___WCTOMB_A = 0x684 - SYS___WCTOMB_ISO1 = 0x685 - SYS___WCTOMB_STD_A = 0x686 - SYS___WCTOMB_UTF = 0x687 - SYS___WCWIDTH_A = 0x688 - SYS___GETGRNAM_R_A = 0x689 - SYS___READDIR_R_A = 0x690 - SYS___E2A_S = 0x691 - SYS___FNMATCH_A = 0x692 - SYS___FNMATCH_C_A = 0x693 - SYS___EXECL_A = 0x694 - SYS___FNMATCH_STD_A = 0x695 - SYS___REGCOMP_A = 0x696 - SYS___REGCOMP_STD_A = 0x697 - SYS___REGERROR_A = 0x698 - SYS___REGERROR_STD_A = 0x699 - SYS___SWPRINTF_A = 0x700 - SYS___FSCANF_A = 0x701 - SYS___SCANF_A = 0x702 - SYS___SSCANF_A = 0x703 - SYS___SWSCANF_A = 0x704 - SYS___ATOF_A = 0x705 - SYS___ATOI_A = 0x706 - SYS___ATOL_A = 0x707 - SYS___STRTOD_A = 0x708 - SYS___STRTOL_A = 0x709 - SYS___L64A_A = 0x710 - SYS___STRERROR_A = 0x711 - SYS___PERROR_A = 0x712 - SYS___FETCH_A = 0x713 - SYS___GETENV_A = 0x714 - SYS___MKSTEMP_A = 0x717 - SYS___PTSNAME_A = 0x718 - SYS___PUTENV_A = 0x719 - SYS___CHDIR_A = 0x720 - SYS___CHOWN_A = 0x721 - SYS___CHROOT_A = 0x722 - SYS___GETCWD_A = 0x723 - SYS___GETWD_A = 0x724 - SYS___LCHOWN_A = 0x725 - SYS___LINK_A = 0x726 - SYS___PATHCONF_A = 0x727 - SYS___IF_NAMEINDEX_A = 0x728 - SYS___READLINK_A = 0x729 - SYS___EXTLINK_NP_A = 0x730 - SYS___ISALNUM_A = 0x731 - SYS___ISALPHA_A = 0x732 - SYS___A2E_S = 0x733 - SYS___ISCNTRL_A = 0x734 - SYS___ISDIGIT_A = 0x735 - SYS___ISGRAPH_A = 0x736 - SYS___ISLOWER_A = 0x737 - SYS___ISPRINT_A = 0x738 - SYS___ISPUNCT_A = 0x739 - SYS___ISWALPHA_A = 0x740 - SYS___A2E_L = 0x741 - SYS___ISWCNTRL_A = 0x742 - SYS___ISWDIGIT_A = 0x743 - SYS___ISWGRAPH_A = 0x744 - SYS___ISWLOWER_A = 0x745 - SYS___ISWPRINT_A = 0x746 - SYS___ISWPUNCT_A = 0x747 - SYS___ISWSPACE_A = 0x748 - SYS___ISWUPPER_A = 0x749 - SYS___REMOVE_A = 0x750 - SYS___RENAME_A = 0x751 - SYS___TMPNAM_A = 0x752 - SYS___FOPEN_A = 0x753 - SYS___FREOPEN_A = 0x754 - SYS___CUSERID_A = 0x755 - SYS___POPEN_A = 0x756 - SYS___TEMPNAM_A = 0x757 - SYS___FTW_A = 0x758 - SYS___GETGRENT_A = 0x759 - SYS___INET_NTOP_A = 0x760 - SYS___GETPASS_A = 0x761 - SYS___GETPWENT_A = 0x762 - SYS___GETPWNAM_A = 0x763 - SYS___GETPWUID_A = 0x764 - SYS_____CHECK_RESOURCE_AUTH_NP_A = 0x765 - SYS___CHECKSCHENV_A = 0x766 - SYS___CONNECTSERVER_A = 0x767 - SYS___CONNECTWORKMGR_A = 0x768 - SYS_____CONSOLE_A = 0x769 - SYS___MSGSND_A = 0x770 - SYS___MSGXRCV_A = 0x771 - SYS___NFTW_A = 0x772 - SYS_____PASSWD_A = 0x773 - SYS___PTHREAD_SECURITY_NP_A = 0x774 - SYS___QUERYMETRICS_A = 0x775 - SYS___QUERYSCHENV = 0x776 - SYS___READV_A = 0x777 - SYS_____SERVER_CLASSIFY_A = 0x778 - SYS_____SERVER_INIT_A = 0x779 - SYS___W_GETPSENT_A = 0x780 - SYS___WRITEV_A = 0x781 - SYS___W_STATFS_A = 0x782 - SYS___W_STATVFS_A = 0x783 - SYS___FPUTC_A = 0x784 - SYS___PUTCHAR_A = 0x785 - SYS___PUTS_A = 0x786 - SYS___FGETS_A = 0x787 - SYS___GETS_A = 0x788 - SYS___FPUTS_A = 0x789 - SYS___PUTC_A = 0x790 - SYS___AE_THREAD_SETMODE = 0x791 - SYS___AE_THREAD_SWAPMODE = 0x792 - SYS___GETNETBYADDR_A = 0x793 - SYS___GETNETBYNAME_A = 0x794 - SYS___GETNETENT_A = 0x795 - SYS___GETPROTOBYNAME_A = 0x796 - SYS___GETPROTOBYNUMBER_A = 0x797 - SYS___GETPROTOENT_A = 0x798 - SYS___GETSERVBYNAME_A = 0x799 - SYS_ACL_FIRST_ENTRY = 0x800 - SYS_ACL_GET_ENTRY = 0x801 - SYS_ACL_VALID = 0x802 - SYS_ACL_CREATE_ENTRY = 0x803 - SYS_ACL_DELETE_ENTRY = 0x804 - SYS_ACL_UPDATE_ENTRY = 0x805 - SYS_ACL_DELETE_FD = 0x806 - SYS_ACL_DELETE_FILE = 0x807 - SYS_ACL_GET_FD = 0x808 - SYS_ACL_GET_FILE = 0x809 - SYS___ERFL_B = 0x810 - SYS___ERFCL_B = 0x811 - SYS___LGAMMAL_B = 0x812 - SYS___SETHOOKEVENTS = 0x813 - SYS_IF_NAMETOINDEX = 0x814 - SYS_IF_INDEXTONAME = 0x815 - SYS_IF_NAMEINDEX = 0x816 - SYS_IF_FREENAMEINDEX = 0x817 - SYS_GETADDRINFO = 0x818 - SYS_GETNAMEINFO = 0x819 - SYS___DYNFREE_A = 0x820 - SYS___RES_QUERY_A = 0x821 - SYS___RES_SEARCH_A = 0x822 - SYS___RES_QUERYDOMAIN_A = 0x823 - SYS___RES_MKQUERY_A = 0x824 - SYS___RES_SEND_A = 0x825 - SYS___DN_EXPAND_A = 0x826 - SYS___DN_SKIPNAME_A = 0x827 - SYS___DN_COMP_A = 0x828 - SYS___DN_FIND_A = 0x829 - SYS___INET_NTOA_A = 0x830 - SYS___INET_NETWORK_A = 0x831 - SYS___ACCEPT_A = 0x832 - SYS___ACCEPT_AND_RECV_A = 0x833 - SYS___BIND_A = 0x834 - SYS___CONNECT_A = 0x835 - SYS___GETPEERNAME_A = 0x836 - SYS___GETSOCKNAME_A = 0x837 - SYS___RECVFROM_A = 0x838 - SYS___SENDTO_A = 0x839 - SYS___LCHATTR = 0x840 - SYS___WRITEDOWN = 0x841 - SYS_PTHREAD_MUTEX_INIT2 = 0x842 - SYS___ACOSHF_B = 0x843 - SYS___ACOSHL_B = 0x844 - SYS___ASINHF_B = 0x845 - SYS___ASINHL_B = 0x846 - SYS___ATANHF_B = 0x847 - SYS___ATANHL_B = 0x848 - SYS___CBRTF_B = 0x849 - SYS___EXP2F_B = 0x850 - SYS___EXP2L_B = 0x851 - SYS___EXPM1F_B = 0x852 - SYS___EXPM1L_B = 0x853 - SYS___FDIMF_B = 0x854 - SYS___FDIM_B = 0x855 - SYS___FDIML_B = 0x856 - SYS___HYPOTF_B = 0x857 - SYS___HYPOTL_B = 0x858 - SYS___LOG1PF_B = 0x859 - SYS___REMQUOF_B = 0x860 - SYS___REMQUO_B = 0x861 - SYS___REMQUOL_B = 0x862 - SYS___TGAMMAF_B = 0x863 - SYS___TGAMMA_B = 0x864 - SYS___TGAMMAL_B = 0x865 - SYS___TRUNCF_B = 0x866 - SYS___TRUNC_B = 0x867 - SYS___TRUNCL_B = 0x868 - SYS___LGAMMAF_B = 0x869 - SYS_ASINHF = 0x870 - SYS_ASINHL = 0x871 - SYS_ATANHF = 0x872 - SYS_ATANHL = 0x873 - SYS_CBRTF = 0x874 - SYS_CBRTL = 0x875 - SYS_COPYSIGNF = 0x876 - SYS_CPYSIGNF = 0x876 - SYS_COPYSIGNL = 0x877 - SYS_CPYSIGNL = 0x877 - SYS_COTANF = 0x878 - SYS___COTANF = 0x878 - SYS_COTAN = 0x879 - SYS___COTAN = 0x879 - SYS_FDIM = 0x881 - SYS_FDIML = 0x882 - SYS_HYPOTF = 0x883 - SYS_HYPOTL = 0x884 - SYS_LOG1PF = 0x885 - SYS_LOG1PL = 0x886 - SYS_LOG2F = 0x887 - SYS_LOG2 = 0x888 - SYS_LOG2L = 0x889 - SYS_TGAMMA = 0x890 - SYS_TGAMMAL = 0x891 - SYS_TRUNCF = 0x892 - SYS_TRUNC = 0x893 - SYS_TRUNCL = 0x894 - SYS_LGAMMAF = 0x895 - SYS_LGAMMAL = 0x896 - SYS_LROUNDF = 0x897 - SYS_LROUND = 0x898 - SYS_ERFF = 0x899 - SYS___COSHF_H = 0x900 - SYS___COSHL_H = 0x901 - SYS___COTAN_H = 0x902 - SYS___COTANF_H = 0x903 - SYS___COTANL_H = 0x904 - SYS___ERF_H = 0x905 - SYS___ERFF_H = 0x906 - SYS___ERFL_H = 0x907 - SYS___ERFC_H = 0x908 - SYS___ERFCF_H = 0x909 - SYS___FDIMF_H = 0x910 - SYS___FDIML_H = 0x911 - SYS___FMOD_H = 0x912 - SYS___FMODF_H = 0x913 - SYS___FMODL_H = 0x914 - SYS___GAMMA_H = 0x915 - SYS___HYPOT_H = 0x916 - SYS___ILOGB_H = 0x917 - SYS___LGAMMA_H = 0x918 - SYS___LGAMMAF_H = 0x919 - SYS___LOG2L_H = 0x920 - SYS___LOG1P_H = 0x921 - SYS___LOG10_H = 0x922 - SYS___LOG10F_H = 0x923 - SYS___LOG10L_H = 0x924 - SYS___LROUND_H = 0x925 - SYS___LROUNDF_H = 0x926 - SYS___NEXTAFTER_H = 0x927 - SYS___POW_H = 0x928 - SYS___POWF_H = 0x929 - SYS___SINL_H = 0x930 - SYS___SINH_H = 0x931 - SYS___SINHF_H = 0x932 - SYS___SINHL_H = 0x933 - SYS___SQRT_H = 0x934 - SYS___SQRTF_H = 0x935 - SYS___SQRTL_H = 0x936 - SYS___TAN_H = 0x937 - SYS___TANF_H = 0x938 - SYS___TANL_H = 0x939 - SYS___TRUNCF_H = 0x940 - SYS___TRUNCL_H = 0x941 - SYS___COSH_H = 0x942 - SYS___LE_DEBUG_SET_RESUME_MCH = 0x943 - SYS_VFSCANF = 0x944 - SYS_VSCANF = 0x946 - SYS_VSSCANF = 0x948 - SYS_IMAXABS = 0x950 - SYS_IMAXDIV = 0x951 - SYS_STRTOIMAX = 0x952 - SYS_STRTOUMAX = 0x953 - SYS_WCSTOIMAX = 0x954 - SYS_WCSTOUMAX = 0x955 - SYS_ATOLL = 0x956 - SYS_STRTOF = 0x957 - SYS_STRTOLD = 0x958 - SYS_WCSTOF = 0x959 - SYS_INET6_RTH_GETADDR = 0x960 - SYS_INET6_OPT_INIT = 0x961 - SYS_INET6_OPT_APPEND = 0x962 - SYS_INET6_OPT_FINISH = 0x963 - SYS_INET6_OPT_SET_VAL = 0x964 - SYS_INET6_OPT_NEXT = 0x965 - SYS_INET6_OPT_FIND = 0x966 - SYS_INET6_OPT_GET_VAL = 0x967 - SYS___POW_I = 0x987 - SYS___POW_I_B = 0x988 - SYS___POW_I_H = 0x989 - SYS___CABS_H = 0x990 - SYS_CABSF = 0x991 - SYS___CABSF_B = 0x992 - SYS___CABSF_H = 0x993 - SYS_CABSL = 0x994 - SYS___CABSL_B = 0x995 - SYS___CABSL_H = 0x996 - SYS_CACOS = 0x997 - SYS___CACOS_B = 0x998 - SYS___CACOS_H = 0x999 + SYS_LOG = 0x17 // 23 + SYS_COSH = 0x18 // 24 + SYS_TANH = 0x19 // 25 + SYS_EXP = 0x1A // 26 + SYS_MODF = 0x1B // 27 + SYS_LOG10 = 0x1C // 28 + SYS_FREXP = 0x1D // 29 + SYS_LDEXP = 0x1E // 30 + SYS_CEIL = 0x1F // 31 + SYS_POW = 0x20 // 32 + SYS_SQRT = 0x21 // 33 + SYS_FLOOR = 0x22 // 34 + SYS_J1 = 0x23 // 35 + SYS_FABS = 0x24 // 36 + SYS_FMOD = 0x25 // 37 + SYS_J0 = 0x26 // 38 + SYS_YN = 0x27 // 39 + SYS_JN = 0x28 // 40 + SYS_Y0 = 0x29 // 41 + SYS_Y1 = 0x2A // 42 + SYS_HYPOT = 0x2B // 43 + SYS_ERF = 0x2C // 44 + SYS_ERFC = 0x2D // 45 + SYS_GAMMA = 0x2E // 46 + SYS_ISALPHA = 0x30 // 48 + SYS_ISALNUM = 0x31 // 49 + SYS_ISLOWER = 0x32 // 50 + SYS_ISCNTRL = 0x33 // 51 + SYS_ISDIGIT = 0x34 // 52 + SYS_ISGRAPH = 0x35 // 53 + SYS_ISUPPER = 0x36 // 54 + SYS_ISPRINT = 0x37 // 55 + SYS_ISPUNCT = 0x38 // 56 + SYS_ISSPACE = 0x39 // 57 + SYS_SETLOCAL = 0x3A // 58 + SYS_SETLOCALE = 0x3A // 58 + SYS_ISXDIGIT = 0x3B // 59 + SYS_TOLOWER = 0x3C // 60 + SYS_TOUPPER = 0x3D // 61 + SYS_ASIN = 0x3E // 62 + SYS_SIN = 0x3F // 63 + SYS_COS = 0x40 // 64 + SYS_TAN = 0x41 // 65 + SYS_SINH = 0x42 // 66 + SYS_ACOS = 0x43 // 67 + SYS_ATAN = 0x44 // 68 + SYS_ATAN2 = 0x45 // 69 + SYS_FTELL = 0x46 // 70 + SYS_FGETPOS = 0x47 // 71 + SYS_FSEEK = 0x48 // 72 + SYS_FSETPOS = 0x49 // 73 + SYS_FERROR = 0x4A // 74 + SYS_REWIND = 0x4B // 75 + SYS_CLEARERR = 0x4C // 76 + SYS_FEOF = 0x4D // 77 + SYS_ATOL = 0x4E // 78 + SYS_PERROR = 0x4F // 79 + SYS_ATOF = 0x50 // 80 + SYS_ATOI = 0x51 // 81 + SYS_RAND = 0x52 // 82 + SYS_STRTOD = 0x53 // 83 + SYS_STRTOL = 0x54 // 84 + SYS_STRTOUL = 0x55 // 85 + SYS_MALLOC = 0x56 // 86 + SYS_SRAND = 0x57 // 87 + SYS_CALLOC = 0x58 // 88 + SYS_FREE = 0x59 // 89 + SYS_EXIT = 0x5A // 90 + SYS_REALLOC = 0x5B // 91 + SYS_ABORT = 0x5C // 92 + SYS___ABORT = 0x5C // 92 + SYS_ATEXIT = 0x5D // 93 + SYS_RAISE = 0x5E // 94 + SYS_SETJMP = 0x5F // 95 + SYS_LONGJMP = 0x60 // 96 + SYS_SIGNAL = 0x61 // 97 + SYS_TMPNAM = 0x62 // 98 + SYS_REMOVE = 0x63 // 99 + SYS_RENAME = 0x64 // 100 + SYS_TMPFILE = 0x65 // 101 + SYS_FREOPEN = 0x66 // 102 + SYS_FCLOSE = 0x67 // 103 + SYS_FFLUSH = 0x68 // 104 + SYS_FOPEN = 0x69 // 105 + SYS_FSCANF = 0x6A // 106 + SYS_SETBUF = 0x6B // 107 + SYS_SETVBUF = 0x6C // 108 + SYS_FPRINTF = 0x6D // 109 + SYS_SSCANF = 0x6E // 110 + SYS_PRINTF = 0x6F // 111 + SYS_SCANF = 0x70 // 112 + SYS_SPRINTF = 0x71 // 113 + SYS_FGETC = 0x72 // 114 + SYS_VFPRINTF = 0x73 // 115 + SYS_VPRINTF = 0x74 // 116 + SYS_VSPRINTF = 0x75 // 117 + SYS_GETC = 0x76 // 118 + SYS_FGETS = 0x77 // 119 + SYS_FPUTC = 0x78 // 120 + SYS_FPUTS = 0x79 // 121 + SYS_PUTCHAR = 0x7A // 122 + SYS_GETCHAR = 0x7B // 123 + SYS_GETS = 0x7C // 124 + SYS_PUTC = 0x7D // 125 + SYS_FWRITE = 0x7E // 126 + SYS_PUTS = 0x7F // 127 + SYS_UNGETC = 0x80 // 128 + SYS_FREAD = 0x81 // 129 + SYS_WCSTOMBS = 0x82 // 130 + SYS_MBTOWC = 0x83 // 131 + SYS_WCTOMB = 0x84 // 132 + SYS_MBSTOWCS = 0x85 // 133 + SYS_WCSCPY = 0x86 // 134 + SYS_WCSCAT = 0x87 // 135 + SYS_WCSCHR = 0x88 // 136 + SYS_WCSCMP = 0x89 // 137 + SYS_WCSNCMP = 0x8A // 138 + SYS_WCSCSPN = 0x8B // 139 + SYS_WCSLEN = 0x8C // 140 + SYS_WCSNCAT = 0x8D // 141 + SYS_WCSSPN = 0x8E // 142 + SYS_WCSNCPY = 0x8F // 143 + SYS_ABS = 0x90 // 144 + SYS_DIV = 0x91 // 145 + SYS_LABS = 0x92 // 146 + SYS_STRNCPY = 0x93 // 147 + SYS_MEMCPY = 0x94 // 148 + SYS_MEMMOVE = 0x95 // 149 + SYS_STRCPY = 0x96 // 150 + SYS_STRCMP = 0x97 // 151 + SYS_STRCAT = 0x98 // 152 + SYS_STRNCAT = 0x99 // 153 + SYS_MEMCMP = 0x9A // 154 + SYS_MEMCHR = 0x9B // 155 + SYS_STRCOLL = 0x9C // 156 + SYS_STRNCMP = 0x9D // 157 + SYS_STRXFRM = 0x9E // 158 + SYS_STRRCHR = 0x9F // 159 + SYS_STRCHR = 0xA0 // 160 + SYS_STRCSPN = 0xA1 // 161 + SYS_STRPBRK = 0xA2 // 162 + SYS_MEMSET = 0xA3 // 163 + SYS_STRSPN = 0xA4 // 164 + SYS_STRSTR = 0xA5 // 165 + SYS_STRTOK = 0xA6 // 166 + SYS_DIFFTIME = 0xA7 // 167 + SYS_STRERROR = 0xA8 // 168 + SYS_STRLEN = 0xA9 // 169 + SYS_CLOCK = 0xAA // 170 + SYS_CTIME = 0xAB // 171 + SYS_MKTIME = 0xAC // 172 + SYS_TIME = 0xAD // 173 + SYS_ASCTIME = 0xAE // 174 + SYS_MBLEN = 0xAF // 175 + SYS_GMTIME = 0xB0 // 176 + SYS_LOCALTIM = 0xB1 // 177 + SYS_LOCALTIME = 0xB1 // 177 + SYS_STRFTIME = 0xB2 // 178 + SYS___GETCB = 0xB4 // 180 + SYS_FUPDATE = 0xB5 // 181 + SYS___FUPDT = 0xB5 // 181 + SYS_CLRMEMF = 0xBD // 189 + SYS___CLRMF = 0xBD // 189 + SYS_FETCHEP = 0xBF // 191 + SYS___FTCHEP = 0xBF // 191 + SYS_FLDATA = 0xC1 // 193 + SYS___FLDATA = 0xC1 // 193 + SYS_DYNFREE = 0xC2 // 194 + SYS___DYNFRE = 0xC2 // 194 + SYS_DYNALLOC = 0xC3 // 195 + SYS___DYNALL = 0xC3 // 195 + SYS___CDUMP = 0xC4 // 196 + SYS_CSNAP = 0xC5 // 197 + SYS___CSNAP = 0xC5 // 197 + SYS_CTRACE = 0xC6 // 198 + SYS___CTRACE = 0xC6 // 198 + SYS___CTEST = 0xC7 // 199 + SYS_SETENV = 0xC8 // 200 + SYS___SETENV = 0xC8 // 200 + SYS_CLEARENV = 0xC9 // 201 + SYS___CLRENV = 0xC9 // 201 + SYS___REGCOMP_STD = 0xEA // 234 + SYS_NL_LANGINFO = 0xFC // 252 + SYS_GETSYNTX = 0xFD // 253 + SYS_ISBLANK = 0xFE // 254 + SYS___ISBLNK = 0xFE // 254 + SYS_ISWALNUM = 0xFF // 255 + SYS_ISWALPHA = 0x100 // 256 + SYS_ISWBLANK = 0x101 // 257 + SYS___ISWBLK = 0x101 // 257 + SYS_ISWCNTRL = 0x102 // 258 + SYS_ISWDIGIT = 0x103 // 259 + SYS_ISWGRAPH = 0x104 // 260 + SYS_ISWLOWER = 0x105 // 261 + SYS_ISWPRINT = 0x106 // 262 + SYS_ISWPUNCT = 0x107 // 263 + SYS_ISWSPACE = 0x108 // 264 + SYS_ISWUPPER = 0x109 // 265 + SYS_ISWXDIGI = 0x10A // 266 + SYS_ISWXDIGIT = 0x10A // 266 + SYS_WCTYPE = 0x10B // 267 + SYS_ISWCTYPE = 0x10C // 268 + SYS_TOWLOWER = 0x10D // 269 + SYS_TOWUPPER = 0x10E // 270 + SYS_MBSINIT = 0x10F // 271 + SYS_WCTOB = 0x110 // 272 + SYS_MBRLEN = 0x111 // 273 + SYS_MBRTOWC = 0x112 // 274 + SYS_MBSRTOWC = 0x113 // 275 + SYS_MBSRTOWCS = 0x113 // 275 + SYS_WCRTOMB = 0x114 // 276 + SYS_WCSRTOMB = 0x115 // 277 + SYS_WCSRTOMBS = 0x115 // 277 + SYS___CSID = 0x116 // 278 + SYS___WCSID = 0x117 // 279 + SYS_STRPTIME = 0x118 // 280 + SYS___STRPTM = 0x118 // 280 + SYS_STRFMON = 0x119 // 281 + SYS___RPMTCH = 0x11A // 282 + SYS_WCSSTR = 0x11B // 283 + SYS_WCSTOK = 0x12C // 300 + SYS_WCSTOL = 0x12D // 301 + SYS_WCSTOD = 0x12E // 302 + SYS_WCSTOUL = 0x12F // 303 + SYS_WCSCOLL = 0x130 // 304 + SYS_WCSXFRM = 0x131 // 305 + SYS_WCSWIDTH = 0x132 // 306 + SYS_WCWIDTH = 0x133 // 307 + SYS_WCSFTIME = 0x134 // 308 + SYS_SWPRINTF = 0x135 // 309 + SYS_VSWPRINT = 0x136 // 310 + SYS_VSWPRINTF = 0x136 // 310 + SYS_SWSCANF = 0x137 // 311 + SYS_REGCOMP = 0x138 // 312 + SYS_REGEXEC = 0x139 // 313 + SYS_REGFREE = 0x13A // 314 + SYS_REGERROR = 0x13B // 315 + SYS_FGETWC = 0x13C // 316 + SYS_FGETWS = 0x13D // 317 + SYS_FPUTWC = 0x13E // 318 + SYS_FPUTWS = 0x13F // 319 + SYS_GETWC = 0x140 // 320 + SYS_GETWCHAR = 0x141 // 321 + SYS_PUTWC = 0x142 // 322 + SYS_PUTWCHAR = 0x143 // 323 + SYS_UNGETWC = 0x144 // 324 + SYS_ICONV_OPEN = 0x145 // 325 + SYS_ICONV = 0x146 // 326 + SYS_ICONV_CLOSE = 0x147 // 327 + SYS_ISMCCOLLEL = 0x14C // 332 + SYS_STRTOCOLL = 0x14D // 333 + SYS_COLLTOSTR = 0x14E // 334 + SYS_COLLEQUIV = 0x14F // 335 + SYS_COLLRANGE = 0x150 // 336 + SYS_CCLASS = 0x151 // 337 + SYS_COLLORDER = 0x152 // 338 + SYS___DEMANGLE = 0x154 // 340 + SYS_FDOPEN = 0x155 // 341 + SYS___ERRNO = 0x156 // 342 + SYS___ERRNO2 = 0x157 // 343 + SYS___TERROR = 0x158 // 344 + SYS_MAXCOLL = 0x169 // 361 + SYS_GETMCCOLL = 0x16A // 362 + SYS_GETWMCCOLL = 0x16B // 363 + SYS___ERR2AD = 0x16C // 364 + SYS_DLLQUERYFN = 0x16D // 365 + SYS_DLLQUERYVAR = 0x16E // 366 + SYS_DLLFREE = 0x16F // 367 + SYS_DLLLOAD = 0x170 // 368 + SYS__EXIT = 0x174 // 372 + SYS_ACCESS = 0x175 // 373 + SYS_ALARM = 0x176 // 374 + SYS_CFGETISPEED = 0x177 // 375 + SYS_CFGETOSPEED = 0x178 // 376 + SYS_CFSETISPEED = 0x179 // 377 + SYS_CFSETOSPEED = 0x17A // 378 + SYS_CHDIR = 0x17B // 379 + SYS_CHMOD = 0x17C // 380 + SYS_CHOWN = 0x17D // 381 + SYS_CLOSE = 0x17E // 382 + SYS_CLOSEDIR = 0x17F // 383 + SYS_CREAT = 0x180 // 384 + SYS_CTERMID = 0x181 // 385 + SYS_DUP = 0x182 // 386 + SYS_DUP2 = 0x183 // 387 + SYS_EXECL = 0x184 // 388 + SYS_EXECLE = 0x185 // 389 + SYS_EXECLP = 0x186 // 390 + SYS_EXECV = 0x187 // 391 + SYS_EXECVE = 0x188 // 392 + SYS_EXECVP = 0x189 // 393 + SYS_FCHMOD = 0x18A // 394 + SYS_FCHOWN = 0x18B // 395 + SYS_FCNTL = 0x18C // 396 + SYS_FILENO = 0x18D // 397 + SYS_FORK = 0x18E // 398 + SYS_FPATHCONF = 0x18F // 399 + SYS_FSTAT = 0x190 // 400 + SYS_FSYNC = 0x191 // 401 + SYS_FTRUNCATE = 0x192 // 402 + SYS_GETCWD = 0x193 // 403 + SYS_GETEGID = 0x194 // 404 + SYS_GETEUID = 0x195 // 405 + SYS_GETGID = 0x196 // 406 + SYS_GETGRGID = 0x197 // 407 + SYS_GETGRNAM = 0x198 // 408 + SYS_GETGROUPS = 0x199 // 409 + SYS_GETLOGIN = 0x19A // 410 + SYS_W_GETMNTENT = 0x19B // 411 + SYS_GETPGRP = 0x19C // 412 + SYS_GETPID = 0x19D // 413 + SYS_GETPPID = 0x19E // 414 + SYS_GETPWNAM = 0x19F // 415 + SYS_GETPWUID = 0x1A0 // 416 + SYS_GETUID = 0x1A1 // 417 + SYS_W_IOCTL = 0x1A2 // 418 + SYS_ISATTY = 0x1A3 // 419 + SYS_KILL = 0x1A4 // 420 + SYS_LINK = 0x1A5 // 421 + SYS_LSEEK = 0x1A6 // 422 + SYS_LSTAT = 0x1A7 // 423 + SYS_MKDIR = 0x1A8 // 424 + SYS_MKFIFO = 0x1A9 // 425 + SYS_MKNOD = 0x1AA // 426 + SYS_MOUNT = 0x1AB // 427 + SYS_OPEN = 0x1AC // 428 + SYS_OPENDIR = 0x1AD // 429 + SYS_PATHCONF = 0x1AE // 430 + SYS_PAUSE = 0x1AF // 431 + SYS_PIPE = 0x1B0 // 432 + SYS_W_GETPSENT = 0x1B1 // 433 + SYS_READ = 0x1B2 // 434 + SYS_READDIR = 0x1B3 // 435 + SYS_READLINK = 0x1B4 // 436 + SYS_REWINDDIR = 0x1B5 // 437 + SYS_RMDIR = 0x1B6 // 438 + SYS_SETEGID = 0x1B7 // 439 + SYS_SETEUID = 0x1B8 // 440 + SYS_SETGID = 0x1B9 // 441 + SYS_SETPGID = 0x1BA // 442 + SYS_SETSID = 0x1BB // 443 + SYS_SETUID = 0x1BC // 444 + SYS_SIGACTION = 0x1BD // 445 + SYS_SIGADDSET = 0x1BE // 446 + SYS_SIGDELSET = 0x1BF // 447 + SYS_SIGEMPTYSET = 0x1C0 // 448 + SYS_SIGFILLSET = 0x1C1 // 449 + SYS_SIGISMEMBER = 0x1C2 // 450 + SYS_SIGLONGJMP = 0x1C3 // 451 + SYS_SIGPENDING = 0x1C4 // 452 + SYS_SIGPROCMASK = 0x1C5 // 453 + SYS_SIGSETJMP = 0x1C6 // 454 + SYS_SIGSUSPEND = 0x1C7 // 455 + SYS_SLEEP = 0x1C8 // 456 + SYS_STAT = 0x1C9 // 457 + SYS_W_STATFS = 0x1CA // 458 + SYS_SYMLINK = 0x1CB // 459 + SYS_SYSCONF = 0x1CC // 460 + SYS_TCDRAIN = 0x1CD // 461 + SYS_TCFLOW = 0x1CE // 462 + SYS_TCFLUSH = 0x1CF // 463 + SYS_TCGETATTR = 0x1D0 // 464 + SYS_TCGETPGRP = 0x1D1 // 465 + SYS_TCSENDBREAK = 0x1D2 // 466 + SYS_TCSETATTR = 0x1D3 // 467 + SYS_TCSETPGRP = 0x1D4 // 468 + SYS_TIMES = 0x1D5 // 469 + SYS_TTYNAME = 0x1D6 // 470 + SYS_TZSET = 0x1D7 // 471 + SYS_UMASK = 0x1D8 // 472 + SYS_UMOUNT = 0x1D9 // 473 + SYS_UNAME = 0x1DA // 474 + SYS_UNLINK = 0x1DB // 475 + SYS_UTIME = 0x1DC // 476 + SYS_WAIT = 0x1DD // 477 + SYS_WAITPID = 0x1DE // 478 + SYS_WRITE = 0x1DF // 479 + SYS_CHAUDIT = 0x1E0 // 480 + SYS_FCHAUDIT = 0x1E1 // 481 + SYS_GETGROUPSBYNAME = 0x1E2 // 482 + SYS_SIGWAIT = 0x1E3 // 483 + SYS_PTHREAD_EXIT = 0x1E4 // 484 + SYS_PTHREAD_KILL = 0x1E5 // 485 + SYS_PTHREAD_ATTR_INIT = 0x1E6 // 486 + SYS_PTHREAD_ATTR_DESTROY = 0x1E7 // 487 + SYS_PTHREAD_ATTR_SETSTACKSIZE = 0x1E8 // 488 + SYS_PTHREAD_ATTR_GETSTACKSIZE = 0x1E9 // 489 + SYS_PTHREAD_ATTR_SETDETACHSTATE = 0x1EA // 490 + SYS_PTHREAD_ATTR_GETDETACHSTATE = 0x1EB // 491 + SYS_PTHREAD_ATTR_SETWEIGHT_NP = 0x1EC // 492 + SYS_PTHREAD_ATTR_GETWEIGHT_NP = 0x1ED // 493 + SYS_PTHREAD_CANCEL = 0x1EE // 494 + SYS_PTHREAD_CLEANUP_PUSH = 0x1EF // 495 + SYS_PTHREAD_CLEANUP_POP = 0x1F0 // 496 + SYS_PTHREAD_CONDATTR_INIT = 0x1F1 // 497 + SYS_PTHREAD_CONDATTR_DESTROY = 0x1F2 // 498 + SYS_PTHREAD_COND_INIT = 0x1F3 // 499 + SYS_PTHREAD_COND_DESTROY = 0x1F4 // 500 + SYS_PTHREAD_COND_SIGNAL = 0x1F5 // 501 + SYS_PTHREAD_COND_BROADCAST = 0x1F6 // 502 + SYS_PTHREAD_COND_WAIT = 0x1F7 // 503 + SYS_PTHREAD_COND_TIMEDWAIT = 0x1F8 // 504 + SYS_PTHREAD_CREATE = 0x1F9 // 505 + SYS_PTHREAD_DETACH = 0x1FA // 506 + SYS_PTHREAD_EQUAL = 0x1FB // 507 + SYS_PTHREAD_GETSPECIFIC = 0x1FC // 508 + SYS_PTHREAD_JOIN = 0x1FD // 509 + SYS_PTHREAD_KEY_CREATE = 0x1FE // 510 + SYS_PTHREAD_MUTEXATTR_INIT = 0x1FF // 511 + SYS_PTHREAD_MUTEXATTR_DESTROY = 0x200 // 512 + SYS_PTHREAD_MUTEXATTR_SETKIND_NP = 0x201 // 513 + SYS_PTHREAD_MUTEXATTR_GETKIND_NP = 0x202 // 514 + SYS_PTHREAD_MUTEX_INIT = 0x203 // 515 + SYS_PTHREAD_MUTEX_DESTROY = 0x204 // 516 + SYS_PTHREAD_MUTEX_LOCK = 0x205 // 517 + SYS_PTHREAD_MUTEX_TRYLOCK = 0x206 // 518 + SYS_PTHREAD_MUTEX_UNLOCK = 0x207 // 519 + SYS_PTHREAD_ONCE = 0x209 // 521 + SYS_PTHREAD_SELF = 0x20A // 522 + SYS_PTHREAD_SETINTR = 0x20B // 523 + SYS_PTHREAD_SETINTRTYPE = 0x20C // 524 + SYS_PTHREAD_SETSPECIFIC = 0x20D // 525 + SYS_PTHREAD_TESTINTR = 0x20E // 526 + SYS_PTHREAD_YIELD = 0x20F // 527 + SYS_TW_OPEN = 0x210 // 528 + SYS_TW_FCNTL = 0x211 // 529 + SYS_PTHREAD_JOIN_D4_NP = 0x212 // 530 + SYS_PTHREAD_CONDATTR_SETKIND_NP = 0x213 // 531 + SYS_PTHREAD_CONDATTR_GETKIND_NP = 0x214 // 532 + SYS_EXTLINK_NP = 0x215 // 533 + SYS___PASSWD = 0x216 // 534 + SYS_SETGROUPS = 0x217 // 535 + SYS_INITGROUPS = 0x218 // 536 + SYS_WCSPBRK = 0x23F // 575 + SYS_WCSRCHR = 0x240 // 576 + SYS_SVC99 = 0x241 // 577 + SYS___SVC99 = 0x241 // 577 + SYS_WCSWCS = 0x242 // 578 + SYS_LOCALECO = 0x243 // 579 + SYS_LOCALECONV = 0x243 // 579 + SYS___LIBREL = 0x244 // 580 + SYS_RELEASE = 0x245 // 581 + SYS___RLSE = 0x245 // 581 + SYS_FLOCATE = 0x246 // 582 + SYS___FLOCT = 0x246 // 582 + SYS_FDELREC = 0x247 // 583 + SYS___FDLREC = 0x247 // 583 + SYS_FETCH = 0x248 // 584 + SYS___FETCH = 0x248 // 584 + SYS_QSORT = 0x249 // 585 + SYS_GETENV = 0x24A // 586 + SYS_SYSTEM = 0x24B // 587 + SYS_BSEARCH = 0x24C // 588 + SYS_LDIV = 0x24D // 589 + SYS___THROW = 0x25E // 606 + SYS___RETHROW = 0x25F // 607 + SYS___CLEANUPCATCH = 0x260 // 608 + SYS___CATCHMATCH = 0x261 // 609 + SYS___CLEAN2UPCATCH = 0x262 // 610 + SYS_PUTENV = 0x26A // 618 + SYS___GETENV = 0x26F // 623 + SYS_GETPRIORITY = 0x270 // 624 + SYS_NICE = 0x271 // 625 + SYS_SETPRIORITY = 0x272 // 626 + SYS_GETITIMER = 0x273 // 627 + SYS_SETITIMER = 0x274 // 628 + SYS_MSGCTL = 0x275 // 629 + SYS_MSGGET = 0x276 // 630 + SYS_MSGRCV = 0x277 // 631 + SYS_MSGSND = 0x278 // 632 + SYS_MSGXRCV = 0x279 // 633 + SYS___MSGXR = 0x279 // 633 + SYS_SEMCTL = 0x27A // 634 + SYS_SEMGET = 0x27B // 635 + SYS_SEMOP = 0x27C // 636 + SYS_SHMAT = 0x27D // 637 + SYS_SHMCTL = 0x27E // 638 + SYS_SHMDT = 0x27F // 639 + SYS_SHMGET = 0x280 // 640 + SYS___GETIPC = 0x281 // 641 + SYS_SETGRENT = 0x282 // 642 + SYS_GETGRENT = 0x283 // 643 + SYS_ENDGRENT = 0x284 // 644 + SYS_SETPWENT = 0x285 // 645 + SYS_GETPWENT = 0x286 // 646 + SYS_ENDPWENT = 0x287 // 647 + SYS_BSD_SIGNAL = 0x288 // 648 + SYS_KILLPG = 0x289 // 649 + SYS_SIGALTSTACK = 0x28A // 650 + SYS_SIGHOLD = 0x28B // 651 + SYS_SIGIGNORE = 0x28C // 652 + SYS_SIGINTERRUPT = 0x28D // 653 + SYS_SIGPAUSE = 0x28E // 654 + SYS_SIGRELSE = 0x28F // 655 + SYS_SIGSET = 0x290 // 656 + SYS_SIGSTACK = 0x291 // 657 + SYS_GETRLIMIT = 0x292 // 658 + SYS_SETRLIMIT = 0x293 // 659 + SYS_GETRUSAGE = 0x294 // 660 + SYS_MMAP = 0x295 // 661 + SYS_MPROTECT = 0x296 // 662 + SYS_MSYNC = 0x297 // 663 + SYS_MUNMAP = 0x298 // 664 + SYS_CONFSTR = 0x299 // 665 + SYS_GETOPT = 0x29A // 666 + SYS_LCHOWN = 0x29B // 667 + SYS_TRUNCATE = 0x29C // 668 + SYS_GETSUBOPT = 0x29D // 669 + SYS_SETPGRP = 0x29E // 670 + SYS___GDERR = 0x29F // 671 + SYS___TZONE = 0x2A0 // 672 + SYS___DLGHT = 0x2A1 // 673 + SYS___OPARGF = 0x2A2 // 674 + SYS___OPOPTF = 0x2A3 // 675 + SYS___OPINDF = 0x2A4 // 676 + SYS___OPERRF = 0x2A5 // 677 + SYS_GETDATE = 0x2A6 // 678 + SYS_WAIT3 = 0x2A7 // 679 + SYS_WAITID = 0x2A8 // 680 + SYS___CATTRM = 0x2A9 // 681 + SYS___GDTRM = 0x2AA // 682 + SYS___RNDTRM = 0x2AB // 683 + SYS_CRYPT = 0x2AC // 684 + SYS_ENCRYPT = 0x2AD // 685 + SYS_SETKEY = 0x2AE // 686 + SYS___CNVBLK = 0x2AF // 687 + SYS___CRYTRM = 0x2B0 // 688 + SYS___ECRTRM = 0x2B1 // 689 + SYS_DRAND48 = 0x2B2 // 690 + SYS_ERAND48 = 0x2B3 // 691 + SYS_FSTATVFS = 0x2B4 // 692 + SYS_STATVFS = 0x2B5 // 693 + SYS_CATCLOSE = 0x2B6 // 694 + SYS_CATGETS = 0x2B7 // 695 + SYS_CATOPEN = 0x2B8 // 696 + SYS_BCMP = 0x2B9 // 697 + SYS_BCOPY = 0x2BA // 698 + SYS_BZERO = 0x2BB // 699 + SYS_FFS = 0x2BC // 700 + SYS_INDEX = 0x2BD // 701 + SYS_RINDEX = 0x2BE // 702 + SYS_STRCASECMP = 0x2BF // 703 + SYS_STRDUP = 0x2C0 // 704 + SYS_STRNCASECMP = 0x2C1 // 705 + SYS_INITSTATE = 0x2C2 // 706 + SYS_SETSTATE = 0x2C3 // 707 + SYS_RANDOM = 0x2C4 // 708 + SYS_SRANDOM = 0x2C5 // 709 + SYS_HCREATE = 0x2C6 // 710 + SYS_HDESTROY = 0x2C7 // 711 + SYS_HSEARCH = 0x2C8 // 712 + SYS_LFIND = 0x2C9 // 713 + SYS_LSEARCH = 0x2CA // 714 + SYS_TDELETE = 0x2CB // 715 + SYS_TFIND = 0x2CC // 716 + SYS_TSEARCH = 0x2CD // 717 + SYS_TWALK = 0x2CE // 718 + SYS_INSQUE = 0x2CF // 719 + SYS_REMQUE = 0x2D0 // 720 + SYS_POPEN = 0x2D1 // 721 + SYS_PCLOSE = 0x2D2 // 722 + SYS_SWAB = 0x2D3 // 723 + SYS_MEMCCPY = 0x2D4 // 724 + SYS_GETPAGESIZE = 0x2D8 // 728 + SYS_FCHDIR = 0x2D9 // 729 + SYS___OCLCK = 0x2DA // 730 + SYS___ATOE = 0x2DB // 731 + SYS___ATOE_L = 0x2DC // 732 + SYS___ETOA = 0x2DD // 733 + SYS___ETOA_L = 0x2DE // 734 + SYS_SETUTXENT = 0x2DF // 735 + SYS_GETUTXENT = 0x2E0 // 736 + SYS_ENDUTXENT = 0x2E1 // 737 + SYS_GETUTXID = 0x2E2 // 738 + SYS_GETUTXLINE = 0x2E3 // 739 + SYS_PUTUTXLINE = 0x2E4 // 740 + SYS_FMTMSG = 0x2E5 // 741 + SYS_JRAND48 = 0x2E6 // 742 + SYS_LRAND48 = 0x2E7 // 743 + SYS_MRAND48 = 0x2E8 // 744 + SYS_NRAND48 = 0x2E9 // 745 + SYS_LCONG48 = 0x2EA // 746 + SYS_SRAND48 = 0x2EB // 747 + SYS_SEED48 = 0x2EC // 748 + SYS_ISASCII = 0x2ED // 749 + SYS_TOASCII = 0x2EE // 750 + SYS_A64L = 0x2EF // 751 + SYS_L64A = 0x2F0 // 752 + SYS_UALARM = 0x2F1 // 753 + SYS_USLEEP = 0x2F2 // 754 + SYS___UTXTRM = 0x2F3 // 755 + SYS___SRCTRM = 0x2F4 // 756 + SYS_FTIME = 0x2F5 // 757 + SYS_GETTIMEOFDAY = 0x2F6 // 758 + SYS_DBM_CLEARERR = 0x2F7 // 759 + SYS_DBM_CLOSE = 0x2F8 // 760 + SYS_DBM_DELETE = 0x2F9 // 761 + SYS_DBM_ERROR = 0x2FA // 762 + SYS_DBM_FETCH = 0x2FB // 763 + SYS_DBM_FIRSTKEY = 0x2FC // 764 + SYS_DBM_NEXTKEY = 0x2FD // 765 + SYS_DBM_OPEN = 0x2FE // 766 + SYS_DBM_STORE = 0x2FF // 767 + SYS___NDMTRM = 0x300 // 768 + SYS_FTOK = 0x301 // 769 + SYS_BASENAME = 0x302 // 770 + SYS_DIRNAME = 0x303 // 771 + SYS_GETDTABLESIZE = 0x304 // 772 + SYS_MKSTEMP = 0x305 // 773 + SYS_MKTEMP = 0x306 // 774 + SYS_NFTW = 0x307 // 775 + SYS_GETWD = 0x308 // 776 + SYS_LOCKF = 0x309 // 777 + SYS__LONGJMP = 0x30D // 781 + SYS__SETJMP = 0x30E // 782 + SYS_VFORK = 0x30F // 783 + SYS_WORDEXP = 0x310 // 784 + SYS_WORDFREE = 0x311 // 785 + SYS_GETPGID = 0x312 // 786 + SYS_GETSID = 0x313 // 787 + SYS___UTMPXNAME = 0x314 // 788 + SYS_CUSERID = 0x315 // 789 + SYS_GETPASS = 0x316 // 790 + SYS_FNMATCH = 0x317 // 791 + SYS_FTW = 0x318 // 792 + SYS_GETW = 0x319 // 793 + SYS_GLOB = 0x31A // 794 + SYS_GLOBFREE = 0x31B // 795 + SYS_PUTW = 0x31C // 796 + SYS_SEEKDIR = 0x31D // 797 + SYS_TELLDIR = 0x31E // 798 + SYS_TEMPNAM = 0x31F // 799 + SYS_ACOSH = 0x320 // 800 + SYS_ASINH = 0x321 // 801 + SYS_ATANH = 0x322 // 802 + SYS_CBRT = 0x323 // 803 + SYS_EXPM1 = 0x324 // 804 + SYS_ILOGB = 0x325 // 805 + SYS_LOGB = 0x326 // 806 + SYS_LOG1P = 0x327 // 807 + SYS_NEXTAFTER = 0x328 // 808 + SYS_RINT = 0x329 // 809 + SYS_REMAINDER = 0x32A // 810 + SYS_SCALB = 0x32B // 811 + SYS_LGAMMA = 0x32C // 812 + SYS_TTYSLOT = 0x32D // 813 + SYS_GETTIMEOFDAY_R = 0x32E // 814 + SYS_SYNC = 0x32F // 815 + SYS_SPAWN = 0x330 // 816 + SYS_SPAWNP = 0x331 // 817 + SYS_GETLOGIN_UU = 0x332 // 818 + SYS_ECVT = 0x333 // 819 + SYS_FCVT = 0x334 // 820 + SYS_GCVT = 0x335 // 821 + SYS_ACCEPT = 0x336 // 822 + SYS_BIND = 0x337 // 823 + SYS_CONNECT = 0x338 // 824 + SYS_ENDHOSTENT = 0x339 // 825 + SYS_ENDPROTOENT = 0x33A // 826 + SYS_ENDSERVENT = 0x33B // 827 + SYS_GETHOSTBYADDR_R = 0x33C // 828 + SYS_GETHOSTBYADDR = 0x33D // 829 + SYS_GETHOSTBYNAME_R = 0x33E // 830 + SYS_GETHOSTBYNAME = 0x33F // 831 + SYS_GETHOSTENT = 0x340 // 832 + SYS_GETHOSTID = 0x341 // 833 + SYS_GETHOSTNAME = 0x342 // 834 + SYS_GETNETBYADDR = 0x343 // 835 + SYS_GETNETBYNAME = 0x344 // 836 + SYS_GETNETENT = 0x345 // 837 + SYS_GETPEERNAME = 0x346 // 838 + SYS_GETPROTOBYNAME = 0x347 // 839 + SYS_GETPROTOBYNUMBER = 0x348 // 840 + SYS_GETPROTOENT = 0x349 // 841 + SYS_GETSERVBYNAME = 0x34A // 842 + SYS_GETSERVBYPORT = 0x34B // 843 + SYS_GETSERVENT = 0x34C // 844 + SYS_GETSOCKNAME = 0x34D // 845 + SYS_GETSOCKOPT = 0x34E // 846 + SYS_INET_ADDR = 0x34F // 847 + SYS_INET_LNAOF = 0x350 // 848 + SYS_INET_MAKEADDR = 0x351 // 849 + SYS_INET_NETOF = 0x352 // 850 + SYS_INET_NETWORK = 0x353 // 851 + SYS_INET_NTOA = 0x354 // 852 + SYS_IOCTL = 0x355 // 853 + SYS_LISTEN = 0x356 // 854 + SYS_READV = 0x357 // 855 + SYS_RECV = 0x358 // 856 + SYS_RECVFROM = 0x359 // 857 + SYS_SELECT = 0x35B // 859 + SYS_SELECTEX = 0x35C // 860 + SYS_SEND = 0x35D // 861 + SYS_SENDTO = 0x35F // 863 + SYS_SETHOSTENT = 0x360 // 864 + SYS_SETNETENT = 0x361 // 865 + SYS_SETPEER = 0x362 // 866 + SYS_SETPROTOENT = 0x363 // 867 + SYS_SETSERVENT = 0x364 // 868 + SYS_SETSOCKOPT = 0x365 // 869 + SYS_SHUTDOWN = 0x366 // 870 + SYS_SOCKET = 0x367 // 871 + SYS_SOCKETPAIR = 0x368 // 872 + SYS_WRITEV = 0x369 // 873 + SYS_CHROOT = 0x36A // 874 + SYS_W_STATVFS = 0x36B // 875 + SYS_ULIMIT = 0x36C // 876 + SYS_ISNAN = 0x36D // 877 + SYS_UTIMES = 0x36E // 878 + SYS___H_ERRNO = 0x36F // 879 + SYS_ENDNETENT = 0x370 // 880 + SYS_CLOSELOG = 0x371 // 881 + SYS_OPENLOG = 0x372 // 882 + SYS_SETLOGMASK = 0x373 // 883 + SYS_SYSLOG = 0x374 // 884 + SYS_PTSNAME = 0x375 // 885 + SYS_SETREUID = 0x376 // 886 + SYS_SETREGID = 0x377 // 887 + SYS_REALPATH = 0x378 // 888 + SYS___SIGNGAM = 0x379 // 889 + SYS_GRANTPT = 0x37A // 890 + SYS_UNLOCKPT = 0x37B // 891 + SYS_TCGETSID = 0x37C // 892 + SYS___TCGETCP = 0x37D // 893 + SYS___TCSETCP = 0x37E // 894 + SYS___TCSETTABLES = 0x37F // 895 + SYS_POLL = 0x380 // 896 + SYS_REXEC = 0x381 // 897 + SYS___ISASCII2 = 0x382 // 898 + SYS___TOASCII2 = 0x383 // 899 + SYS_CHPRIORITY = 0x384 // 900 + SYS_PTHREAD_ATTR_SETSYNCTYPE_NP = 0x385 // 901 + SYS_PTHREAD_ATTR_GETSYNCTYPE_NP = 0x386 // 902 + SYS_PTHREAD_SET_LIMIT_NP = 0x387 // 903 + SYS___STNETENT = 0x388 // 904 + SYS___STPROTOENT = 0x389 // 905 + SYS___STSERVENT = 0x38A // 906 + SYS___STHOSTENT = 0x38B // 907 + SYS_NLIST = 0x38C // 908 + SYS___IPDBCS = 0x38D // 909 + SYS___IPDSPX = 0x38E // 910 + SYS___IPMSGC = 0x38F // 911 + SYS___SELECT1 = 0x390 // 912 + SYS_PTHREAD_SECURITY_NP = 0x391 // 913 + SYS___CHECK_RESOURCE_AUTH_NP = 0x392 // 914 + SYS___CONVERT_ID_NP = 0x393 // 915 + SYS___OPENVMREL = 0x394 // 916 + SYS_WMEMCHR = 0x395 // 917 + SYS_WMEMCMP = 0x396 // 918 + SYS_WMEMCPY = 0x397 // 919 + SYS_WMEMMOVE = 0x398 // 920 + SYS_WMEMSET = 0x399 // 921 + SYS___FPUTWC = 0x400 // 1024 + SYS___PUTWC = 0x401 // 1025 + SYS___PWCHAR = 0x402 // 1026 + SYS___WCSFTM = 0x403 // 1027 + SYS___WCSTOK = 0x404 // 1028 + SYS___WCWDTH = 0x405 // 1029 + SYS_T_ACCEPT = 0x409 // 1033 + SYS_T_ALLOC = 0x40A // 1034 + SYS_T_BIND = 0x40B // 1035 + SYS_T_CLOSE = 0x40C // 1036 + SYS_T_CONNECT = 0x40D // 1037 + SYS_T_ERROR = 0x40E // 1038 + SYS_T_FREE = 0x40F // 1039 + SYS_T_GETINFO = 0x410 // 1040 + SYS_T_GETPROTADDR = 0x411 // 1041 + SYS_T_GETSTATE = 0x412 // 1042 + SYS_T_LISTEN = 0x413 // 1043 + SYS_T_LOOK = 0x414 // 1044 + SYS_T_OPEN = 0x415 // 1045 + SYS_T_OPTMGMT = 0x416 // 1046 + SYS_T_RCV = 0x417 // 1047 + SYS_T_RCVCONNECT = 0x418 // 1048 + SYS_T_RCVDIS = 0x419 // 1049 + SYS_T_RCVREL = 0x41A // 1050 + SYS_T_RCVUDATA = 0x41B // 1051 + SYS_T_RCVUDERR = 0x41C // 1052 + SYS_T_SND = 0x41D // 1053 + SYS_T_SNDDIS = 0x41E // 1054 + SYS_T_SNDREL = 0x41F // 1055 + SYS_T_SNDUDATA = 0x420 // 1056 + SYS_T_STRERROR = 0x421 // 1057 + SYS_T_SYNC = 0x422 // 1058 + SYS_T_UNBIND = 0x423 // 1059 + SYS___T_ERRNO = 0x424 // 1060 + SYS___RECVMSG2 = 0x425 // 1061 + SYS___SENDMSG2 = 0x426 // 1062 + SYS_FATTACH = 0x427 // 1063 + SYS_FDETACH = 0x428 // 1064 + SYS_GETMSG = 0x429 // 1065 + SYS_GETPMSG = 0x42A // 1066 + SYS_ISASTREAM = 0x42B // 1067 + SYS_PUTMSG = 0x42C // 1068 + SYS_PUTPMSG = 0x42D // 1069 + SYS___ISPOSIXON = 0x42E // 1070 + SYS___OPENMVSREL = 0x42F // 1071 + SYS_GETCONTEXT = 0x430 // 1072 + SYS_SETCONTEXT = 0x431 // 1073 + SYS_MAKECONTEXT = 0x432 // 1074 + SYS_SWAPCONTEXT = 0x433 // 1075 + SYS_PTHREAD_GETSPECIFIC_D8_NP = 0x434 // 1076 + SYS_GETCLIENTID = 0x470 // 1136 + SYS___GETCLIENTID = 0x471 // 1137 + SYS_GETSTABLESIZE = 0x472 // 1138 + SYS_GETIBMOPT = 0x473 // 1139 + SYS_GETIBMSOCKOPT = 0x474 // 1140 + SYS_GIVESOCKET = 0x475 // 1141 + SYS_IBMSFLUSH = 0x476 // 1142 + SYS_MAXDESC = 0x477 // 1143 + SYS_SETIBMOPT = 0x478 // 1144 + SYS_SETIBMSOCKOPT = 0x479 // 1145 + SYS_SOCK_DEBUG = 0x47A // 1146 + SYS_SOCK_DO_TESTSTOR = 0x47D // 1149 + SYS_TAKESOCKET = 0x47E // 1150 + SYS___SERVER_INIT = 0x47F // 1151 + SYS___SERVER_PWU = 0x480 // 1152 + SYS_PTHREAD_TAG_NP = 0x481 // 1153 + SYS___CONSOLE = 0x482 // 1154 + SYS___WSINIT = 0x483 // 1155 + SYS___IPTCPN = 0x489 // 1161 + SYS___SMF_RECORD = 0x48A // 1162 + SYS___IPHOST = 0x48B // 1163 + SYS___IPNODE = 0x48C // 1164 + SYS___SERVER_CLASSIFY_CREATE = 0x48D // 1165 + SYS___SERVER_CLASSIFY_DESTROY = 0x48E // 1166 + SYS___SERVER_CLASSIFY_RESET = 0x48F // 1167 + SYS___SERVER_CLASSIFY = 0x490 // 1168 + SYS___HEAPRPT = 0x496 // 1174 + SYS___FNWSA = 0x49B // 1179 + SYS___SPAWN2 = 0x49D // 1181 + SYS___SPAWNP2 = 0x49E // 1182 + SYS___GDRR = 0x4A1 // 1185 + SYS___HRRNO = 0x4A2 // 1186 + SYS___OPRG = 0x4A3 // 1187 + SYS___OPRR = 0x4A4 // 1188 + SYS___OPND = 0x4A5 // 1189 + SYS___OPPT = 0x4A6 // 1190 + SYS___SIGGM = 0x4A7 // 1191 + SYS___DGHT = 0x4A8 // 1192 + SYS___TZNE = 0x4A9 // 1193 + SYS___TZZN = 0x4AA // 1194 + SYS___TRRNO = 0x4AF // 1199 + SYS___ENVN = 0x4B0 // 1200 + SYS___MLOCKALL = 0x4B1 // 1201 + SYS_CREATEWO = 0x4B2 // 1202 + SYS_CREATEWORKUNIT = 0x4B2 // 1202 + SYS_CONTINUE = 0x4B3 // 1203 + SYS_CONTINUEWORKUNIT = 0x4B3 // 1203 + SYS_CONNECTW = 0x4B4 // 1204 + SYS_CONNECTWORKMGR = 0x4B4 // 1204 + SYS_CONNECTS = 0x4B5 // 1205 + SYS_CONNECTSERVER = 0x4B5 // 1205 + SYS_DISCONNE = 0x4B6 // 1206 + SYS_DISCONNECTSERVER = 0x4B6 // 1206 + SYS_JOINWORK = 0x4B7 // 1207 + SYS_JOINWORKUNIT = 0x4B7 // 1207 + SYS_LEAVEWOR = 0x4B8 // 1208 + SYS_LEAVEWORKUNIT = 0x4B8 // 1208 + SYS_DELETEWO = 0x4B9 // 1209 + SYS_DELETEWORKUNIT = 0x4B9 // 1209 + SYS_QUERYMET = 0x4BA // 1210 + SYS_QUERYMETRICS = 0x4BA // 1210 + SYS_QUERYSCH = 0x4BB // 1211 + SYS_QUERYSCHENV = 0x4BB // 1211 + SYS_CHECKSCH = 0x4BC // 1212 + SYS_CHECKSCHENV = 0x4BC // 1212 + SYS___PID_AFFINITY = 0x4BD // 1213 + SYS___ASINH_B = 0x4BE // 1214 + SYS___ATAN_B = 0x4BF // 1215 + SYS___CBRT_B = 0x4C0 // 1216 + SYS___CEIL_B = 0x4C1 // 1217 + SYS_COPYSIGN = 0x4C2 // 1218 + SYS___COS_B = 0x4C3 // 1219 + SYS___ERF_B = 0x4C4 // 1220 + SYS___ERFC_B = 0x4C5 // 1221 + SYS___EXPM1_B = 0x4C6 // 1222 + SYS___FABS_B = 0x4C7 // 1223 + SYS_FINITE = 0x4C8 // 1224 + SYS___FLOOR_B = 0x4C9 // 1225 + SYS___FREXP_B = 0x4CA // 1226 + SYS___ILOGB_B = 0x4CB // 1227 + SYS___ISNAN_B = 0x4CC // 1228 + SYS___LDEXP_B = 0x4CD // 1229 + SYS___LOG1P_B = 0x4CE // 1230 + SYS___LOGB_B = 0x4CF // 1231 + SYS_MATHERR = 0x4D0 // 1232 + SYS___MODF_B = 0x4D1 // 1233 + SYS___NEXTAFTER_B = 0x4D2 // 1234 + SYS___RINT_B = 0x4D3 // 1235 + SYS_SCALBN = 0x4D4 // 1236 + SYS_SIGNIFIC = 0x4D5 // 1237 + SYS_SIGNIFICAND = 0x4D5 // 1237 + SYS___SIN_B = 0x4D6 // 1238 + SYS___TAN_B = 0x4D7 // 1239 + SYS___TANH_B = 0x4D8 // 1240 + SYS___ACOS_B = 0x4D9 // 1241 + SYS___ACOSH_B = 0x4DA // 1242 + SYS___ASIN_B = 0x4DB // 1243 + SYS___ATAN2_B = 0x4DC // 1244 + SYS___ATANH_B = 0x4DD // 1245 + SYS___COSH_B = 0x4DE // 1246 + SYS___EXP_B = 0x4DF // 1247 + SYS___FMOD_B = 0x4E0 // 1248 + SYS___GAMMA_B = 0x4E1 // 1249 + SYS_GAMMA_R = 0x4E2 // 1250 + SYS___HYPOT_B = 0x4E3 // 1251 + SYS___J0_B = 0x4E4 // 1252 + SYS___Y0_B = 0x4E5 // 1253 + SYS___J1_B = 0x4E6 // 1254 + SYS___Y1_B = 0x4E7 // 1255 + SYS___JN_B = 0x4E8 // 1256 + SYS___YN_B = 0x4E9 // 1257 + SYS___LGAMMA_B = 0x4EA // 1258 + SYS_LGAMMA_R = 0x4EB // 1259 + SYS___LOG_B = 0x4EC // 1260 + SYS___LOG10_B = 0x4ED // 1261 + SYS___POW_B = 0x4EE // 1262 + SYS___REMAINDER_B = 0x4EF // 1263 + SYS___SCALB_B = 0x4F0 // 1264 + SYS___SINH_B = 0x4F1 // 1265 + SYS___SQRT_B = 0x4F2 // 1266 + SYS___OPENDIR2 = 0x4F3 // 1267 + SYS___READDIR2 = 0x4F4 // 1268 + SYS___LOGIN = 0x4F5 // 1269 + SYS___OPEN_STAT = 0x4F6 // 1270 + SYS_ACCEPT_AND_RECV = 0x4F7 // 1271 + SYS___FP_SETMODE = 0x4F8 // 1272 + SYS___SIGACTIONSET = 0x4FB // 1275 + SYS___UCREATE = 0x4FC // 1276 + SYS___UMALLOC = 0x4FD // 1277 + SYS___UFREE = 0x4FE // 1278 + SYS___UHEAPREPORT = 0x4FF // 1279 + SYS___ISBFP = 0x500 // 1280 + SYS___FP_CAST = 0x501 // 1281 + SYS___CERTIFICATE = 0x502 // 1282 + SYS_SEND_FILE = 0x503 // 1283 + SYS_AIO_CANCEL = 0x504 // 1284 + SYS_AIO_ERROR = 0x505 // 1285 + SYS_AIO_READ = 0x506 // 1286 + SYS_AIO_RETURN = 0x507 // 1287 + SYS_AIO_SUSPEND = 0x508 // 1288 + SYS_AIO_WRITE = 0x509 // 1289 + SYS_PTHREAD_MUTEXATTR_GETPSHARED = 0x50A // 1290 + SYS_PTHREAD_MUTEXATTR_SETPSHARED = 0x50B // 1291 + SYS_PTHREAD_RWLOCK_DESTROY = 0x50C // 1292 + SYS_PTHREAD_RWLOCK_INIT = 0x50D // 1293 + SYS_PTHREAD_RWLOCK_RDLOCK = 0x50E // 1294 + SYS_PTHREAD_RWLOCK_TRYRDLOCK = 0x50F // 1295 + SYS_PTHREAD_RWLOCK_TRYWRLOCK = 0x510 // 1296 + SYS_PTHREAD_RWLOCK_UNLOCK = 0x511 // 1297 + SYS_PTHREAD_RWLOCK_WRLOCK = 0x512 // 1298 + SYS_PTHREAD_RWLOCKATTR_GETPSHARED = 0x513 // 1299 + SYS_PTHREAD_RWLOCKATTR_SETPSHARED = 0x514 // 1300 + SYS_PTHREAD_RWLOCKATTR_INIT = 0x515 // 1301 + SYS_PTHREAD_RWLOCKATTR_DESTROY = 0x516 // 1302 + SYS___CTTBL = 0x517 // 1303 + SYS_PTHREAD_MUTEXATTR_SETTYPE = 0x518 // 1304 + SYS_PTHREAD_MUTEXATTR_GETTYPE = 0x519 // 1305 + SYS___FP_CLR_FLAG = 0x51A // 1306 + SYS___FP_READ_FLAG = 0x51B // 1307 + SYS___FP_RAISE_XCP = 0x51C // 1308 + SYS___FP_CLASS = 0x51D // 1309 + SYS___FP_FINITE = 0x51E // 1310 + SYS___FP_ISNAN = 0x51F // 1311 + SYS___FP_UNORDERED = 0x520 // 1312 + SYS___FP_READ_RND = 0x521 // 1313 + SYS___FP_READ_RND_B = 0x522 // 1314 + SYS___FP_SWAP_RND = 0x523 // 1315 + SYS___FP_SWAP_RND_B = 0x524 // 1316 + SYS___FP_LEVEL = 0x525 // 1317 + SYS___FP_BTOH = 0x526 // 1318 + SYS___FP_HTOB = 0x527 // 1319 + SYS___FPC_RD = 0x528 // 1320 + SYS___FPC_WR = 0x529 // 1321 + SYS___FPC_RW = 0x52A // 1322 + SYS___FPC_SM = 0x52B // 1323 + SYS___FPC_RS = 0x52C // 1324 + SYS_SIGTIMEDWAIT = 0x52D // 1325 + SYS_SIGWAITINFO = 0x52E // 1326 + SYS___CHKBFP = 0x52F // 1327 + SYS___W_PIOCTL = 0x59E // 1438 + SYS___OSENV = 0x59F // 1439 + SYS_EXPORTWO = 0x5A1 // 1441 + SYS_EXPORTWORKUNIT = 0x5A1 // 1441 + SYS_UNDOEXPO = 0x5A2 // 1442 + SYS_UNDOEXPORTWORKUNIT = 0x5A2 // 1442 + SYS_IMPORTWO = 0x5A3 // 1443 + SYS_IMPORTWORKUNIT = 0x5A3 // 1443 + SYS_UNDOIMPO = 0x5A4 // 1444 + SYS_UNDOIMPORTWORKUNIT = 0x5A4 // 1444 + SYS_EXTRACTW = 0x5A5 // 1445 + SYS_EXTRACTWORKUNIT = 0x5A5 // 1445 + SYS___CPL = 0x5A6 // 1446 + SYS___MAP_INIT = 0x5A7 // 1447 + SYS___MAP_SERVICE = 0x5A8 // 1448 + SYS_SIGQUEUE = 0x5A9 // 1449 + SYS___MOUNT = 0x5AA // 1450 + SYS___GETUSERID = 0x5AB // 1451 + SYS___IPDOMAINNAME = 0x5AC // 1452 + SYS_QUERYENC = 0x5AD // 1453 + SYS_QUERYWORKUNITCLASSIFICATION = 0x5AD // 1453 + SYS_CONNECTE = 0x5AE // 1454 + SYS_CONNECTEXPORTIMPORT = 0x5AE // 1454 + SYS___FP_SWAPMODE = 0x5AF // 1455 + SYS_STRTOLL = 0x5B0 // 1456 + SYS_STRTOULL = 0x5B1 // 1457 + SYS___DSA_PREV = 0x5B2 // 1458 + SYS___EP_FIND = 0x5B3 // 1459 + SYS___SERVER_THREADS_QUERY = 0x5B4 // 1460 + SYS___MSGRCV_TIMED = 0x5B7 // 1463 + SYS___SEMOP_TIMED = 0x5B8 // 1464 + SYS___GET_CPUID = 0x5B9 // 1465 + SYS___GET_SYSTEM_SETTINGS = 0x5BA // 1466 + SYS_FTELLO = 0x5C8 // 1480 + SYS_FSEEKO = 0x5C9 // 1481 + SYS_LLDIV = 0x5CB // 1483 + SYS_WCSTOLL = 0x5CC // 1484 + SYS_WCSTOULL = 0x5CD // 1485 + SYS_LLABS = 0x5CE // 1486 + SYS___CONSOLE2 = 0x5D2 // 1490 + SYS_INET_NTOP = 0x5D3 // 1491 + SYS_INET_PTON = 0x5D4 // 1492 + SYS___RES = 0x5D6 // 1494 + SYS_RES_MKQUERY = 0x5D7 // 1495 + SYS_RES_INIT = 0x5D8 // 1496 + SYS_RES_QUERY = 0x5D9 // 1497 + SYS_RES_SEARCH = 0x5DA // 1498 + SYS_RES_SEND = 0x5DB // 1499 + SYS_RES_QUERYDOMAIN = 0x5DC // 1500 + SYS_DN_EXPAND = 0x5DD // 1501 + SYS_DN_SKIPNAME = 0x5DE // 1502 + SYS_DN_COMP = 0x5DF // 1503 + SYS_ASCTIME_R = 0x5E0 // 1504 + SYS_CTIME_R = 0x5E1 // 1505 + SYS_GMTIME_R = 0x5E2 // 1506 + SYS_LOCALTIME_R = 0x5E3 // 1507 + SYS_RAND_R = 0x5E4 // 1508 + SYS_STRTOK_R = 0x5E5 // 1509 + SYS_READDIR_R = 0x5E6 // 1510 + SYS_GETGRGID_R = 0x5E7 // 1511 + SYS_GETGRNAM_R = 0x5E8 // 1512 + SYS_GETLOGIN_R = 0x5E9 // 1513 + SYS_GETPWNAM_R = 0x5EA // 1514 + SYS_GETPWUID_R = 0x5EB // 1515 + SYS_TTYNAME_R = 0x5EC // 1516 + SYS_PTHREAD_ATFORK = 0x5ED // 1517 + SYS_PTHREAD_ATTR_GETGUARDSIZE = 0x5EE // 1518 + SYS_PTHREAD_ATTR_GETSTACKADDR = 0x5EF // 1519 + SYS_PTHREAD_ATTR_SETGUARDSIZE = 0x5F0 // 1520 + SYS_PTHREAD_ATTR_SETSTACKADDR = 0x5F1 // 1521 + SYS_PTHREAD_CONDATTR_GETPSHARED = 0x5F2 // 1522 + SYS_PTHREAD_CONDATTR_SETPSHARED = 0x5F3 // 1523 + SYS_PTHREAD_GETCONCURRENCY = 0x5F4 // 1524 + SYS_PTHREAD_KEY_DELETE = 0x5F5 // 1525 + SYS_PTHREAD_SETCONCURRENCY = 0x5F6 // 1526 + SYS_PTHREAD_SIGMASK = 0x5F7 // 1527 + SYS___DISCARDDATA = 0x5F8 // 1528 + SYS_PTHREAD_ATTR_GETSCHEDPARAM = 0x5F9 // 1529 + SYS_PTHREAD_ATTR_SETSCHEDPARAM = 0x5FA // 1530 + SYS_PTHREAD_ATTR_GETDETACHSTATE_U98 = 0x5FB // 1531 + SYS_PTHREAD_ATTR_SETDETACHSTATE_U98 = 0x5FC // 1532 + SYS_PTHREAD_DETACH_U98 = 0x5FD // 1533 + SYS_PTHREAD_GETSPECIFIC_U98 = 0x5FE // 1534 + SYS_PTHREAD_SETCANCELSTATE = 0x5FF // 1535 + SYS_PTHREAD_SETCANCELTYPE = 0x600 // 1536 + SYS_PTHREAD_TESTCANCEL = 0x601 // 1537 + SYS___ATANF_B = 0x602 // 1538 + SYS___ATANL_B = 0x603 // 1539 + SYS___CEILF_B = 0x604 // 1540 + SYS___CEILL_B = 0x605 // 1541 + SYS___COSF_B = 0x606 // 1542 + SYS___COSL_B = 0x607 // 1543 + SYS___FABSF_B = 0x608 // 1544 + SYS___FABSL_B = 0x609 // 1545 + SYS___FLOORF_B = 0x60A // 1546 + SYS___FLOORL_B = 0x60B // 1547 + SYS___FREXPF_B = 0x60C // 1548 + SYS___FREXPL_B = 0x60D // 1549 + SYS___LDEXPF_B = 0x60E // 1550 + SYS___LDEXPL_B = 0x60F // 1551 + SYS___SINF_B = 0x610 // 1552 + SYS___SINL_B = 0x611 // 1553 + SYS___TANF_B = 0x612 // 1554 + SYS___TANL_B = 0x613 // 1555 + SYS___TANHF_B = 0x614 // 1556 + SYS___TANHL_B = 0x615 // 1557 + SYS___ACOSF_B = 0x616 // 1558 + SYS___ACOSL_B = 0x617 // 1559 + SYS___ASINF_B = 0x618 // 1560 + SYS___ASINL_B = 0x619 // 1561 + SYS___ATAN2F_B = 0x61A // 1562 + SYS___ATAN2L_B = 0x61B // 1563 + SYS___COSHF_B = 0x61C // 1564 + SYS___COSHL_B = 0x61D // 1565 + SYS___EXPF_B = 0x61E // 1566 + SYS___EXPL_B = 0x61F // 1567 + SYS___LOGF_B = 0x620 // 1568 + SYS___LOGL_B = 0x621 // 1569 + SYS___LOG10F_B = 0x622 // 1570 + SYS___LOG10L_B = 0x623 // 1571 + SYS___POWF_B = 0x624 // 1572 + SYS___POWL_B = 0x625 // 1573 + SYS___SINHF_B = 0x626 // 1574 + SYS___SINHL_B = 0x627 // 1575 + SYS___SQRTF_B = 0x628 // 1576 + SYS___SQRTL_B = 0x629 // 1577 + SYS___ABSF_B = 0x62A // 1578 + SYS___ABS_B = 0x62B // 1579 + SYS___ABSL_B = 0x62C // 1580 + SYS___FMODF_B = 0x62D // 1581 + SYS___FMODL_B = 0x62E // 1582 + SYS___MODFF_B = 0x62F // 1583 + SYS___MODFL_B = 0x630 // 1584 + SYS_ABSF = 0x631 // 1585 + SYS_ABSL = 0x632 // 1586 + SYS_ACOSF = 0x633 // 1587 + SYS_ACOSL = 0x634 // 1588 + SYS_ASINF = 0x635 // 1589 + SYS_ASINL = 0x636 // 1590 + SYS_ATAN2F = 0x637 // 1591 + SYS_ATAN2L = 0x638 // 1592 + SYS_ATANF = 0x639 // 1593 + SYS_ATANL = 0x63A // 1594 + SYS_CEILF = 0x63B // 1595 + SYS_CEILL = 0x63C // 1596 + SYS_COSF = 0x63D // 1597 + SYS_COSL = 0x63E // 1598 + SYS_COSHF = 0x63F // 1599 + SYS_COSHL = 0x640 // 1600 + SYS_EXPF = 0x641 // 1601 + SYS_EXPL = 0x642 // 1602 + SYS_TANHF = 0x643 // 1603 + SYS_TANHL = 0x644 // 1604 + SYS_LOG10F = 0x645 // 1605 + SYS_LOG10L = 0x646 // 1606 + SYS_LOGF = 0x647 // 1607 + SYS_LOGL = 0x648 // 1608 + SYS_POWF = 0x649 // 1609 + SYS_POWL = 0x64A // 1610 + SYS_SINF = 0x64B // 1611 + SYS_SINL = 0x64C // 1612 + SYS_SQRTF = 0x64D // 1613 + SYS_SQRTL = 0x64E // 1614 + SYS_SINHF = 0x64F // 1615 + SYS_SINHL = 0x650 // 1616 + SYS_TANF = 0x651 // 1617 + SYS_TANL = 0x652 // 1618 + SYS_FABSF = 0x653 // 1619 + SYS_FABSL = 0x654 // 1620 + SYS_FLOORF = 0x655 // 1621 + SYS_FLOORL = 0x656 // 1622 + SYS_FMODF = 0x657 // 1623 + SYS_FMODL = 0x658 // 1624 + SYS_FREXPF = 0x659 // 1625 + SYS_FREXPL = 0x65A // 1626 + SYS_LDEXPF = 0x65B // 1627 + SYS_LDEXPL = 0x65C // 1628 + SYS_MODFF = 0x65D // 1629 + SYS_MODFL = 0x65E // 1630 + SYS_BTOWC = 0x65F // 1631 + SYS___CHATTR = 0x660 // 1632 + SYS___FCHATTR = 0x661 // 1633 + SYS___TOCCSID = 0x662 // 1634 + SYS___CSNAMETYPE = 0x663 // 1635 + SYS___TOCSNAME = 0x664 // 1636 + SYS___CCSIDTYPE = 0x665 // 1637 + SYS___AE_CORRESTBL_QUERY = 0x666 // 1638 + SYS___AE_AUTOCONVERT_STATE = 0x667 // 1639 + SYS_DN_FIND = 0x668 // 1640 + SYS___GETHOSTBYADDR_A = 0x669 // 1641 + SYS___GETHOSTBYNAME_A = 0x66A // 1642 + SYS___RES_INIT_A = 0x66B // 1643 + SYS___GETHOSTBYADDR_R_A = 0x66C // 1644 + SYS___GETHOSTBYNAME_R_A = 0x66D // 1645 + SYS___CHARMAP_INIT_A = 0x66E // 1646 + SYS___MBLEN_A = 0x66F // 1647 + SYS___MBLEN_SB_A = 0x670 // 1648 + SYS___MBLEN_STD_A = 0x671 // 1649 + SYS___MBLEN_UTF = 0x672 // 1650 + SYS___MBSTOWCS_A = 0x673 // 1651 + SYS___MBSTOWCS_STD_A = 0x674 // 1652 + SYS___MBTOWC_A = 0x675 // 1653 + SYS___MBTOWC_ISO1 = 0x676 // 1654 + SYS___MBTOWC_SBCS = 0x677 // 1655 + SYS___MBTOWC_MBCS = 0x678 // 1656 + SYS___MBTOWC_UTF = 0x679 // 1657 + SYS___WCSTOMBS_A = 0x67A // 1658 + SYS___WCSTOMBS_STD_A = 0x67B // 1659 + SYS___WCSWIDTH_A = 0x67C // 1660 + SYS___GETGRGID_R_A = 0x67D // 1661 + SYS___WCSWIDTH_STD_A = 0x67E // 1662 + SYS___WCSWIDTH_ASIA = 0x67F // 1663 + SYS___CSID_A = 0x680 // 1664 + SYS___CSID_STD_A = 0x681 // 1665 + SYS___WCSID_A = 0x682 // 1666 + SYS___WCSID_STD_A = 0x683 // 1667 + SYS___WCTOMB_A = 0x684 // 1668 + SYS___WCTOMB_ISO1 = 0x685 // 1669 + SYS___WCTOMB_STD_A = 0x686 // 1670 + SYS___WCTOMB_UTF = 0x687 // 1671 + SYS___WCWIDTH_A = 0x688 // 1672 + SYS___GETGRNAM_R_A = 0x689 // 1673 + SYS___WCWIDTH_STD_A = 0x68A // 1674 + SYS___WCWIDTH_ASIA = 0x68B // 1675 + SYS___GETPWNAM_R_A = 0x68C // 1676 + SYS___GETPWUID_R_A = 0x68D // 1677 + SYS___GETLOGIN_R_A = 0x68E // 1678 + SYS___TTYNAME_R_A = 0x68F // 1679 + SYS___READDIR_R_A = 0x690 // 1680 + SYS___E2A_S = 0x691 // 1681 + SYS___FNMATCH_A = 0x692 // 1682 + SYS___FNMATCH_C_A = 0x693 // 1683 + SYS___EXECL_A = 0x694 // 1684 + SYS___FNMATCH_STD_A = 0x695 // 1685 + SYS___REGCOMP_A = 0x696 // 1686 + SYS___REGCOMP_STD_A = 0x697 // 1687 + SYS___REGERROR_A = 0x698 // 1688 + SYS___REGERROR_STD_A = 0x699 // 1689 + SYS___REGEXEC_A = 0x69A // 1690 + SYS___REGEXEC_STD_A = 0x69B // 1691 + SYS___REGFREE_A = 0x69C // 1692 + SYS___REGFREE_STD_A = 0x69D // 1693 + SYS___STRCOLL_A = 0x69E // 1694 + SYS___STRCOLL_C_A = 0x69F // 1695 + SYS___EXECLE_A = 0x6A0 // 1696 + SYS___STRCOLL_STD_A = 0x6A1 // 1697 + SYS___STRXFRM_A = 0x6A2 // 1698 + SYS___STRXFRM_C_A = 0x6A3 // 1699 + SYS___EXECLP_A = 0x6A4 // 1700 + SYS___STRXFRM_STD_A = 0x6A5 // 1701 + SYS___WCSCOLL_A = 0x6A6 // 1702 + SYS___WCSCOLL_C_A = 0x6A7 // 1703 + SYS___WCSCOLL_STD_A = 0x6A8 // 1704 + SYS___WCSXFRM_A = 0x6A9 // 1705 + SYS___WCSXFRM_C_A = 0x6AA // 1706 + SYS___WCSXFRM_STD_A = 0x6AB // 1707 + SYS___COLLATE_INIT_A = 0x6AC // 1708 + SYS___WCTYPE_A = 0x6AD // 1709 + SYS___GET_WCTYPE_STD_A = 0x6AE // 1710 + SYS___CTYPE_INIT_A = 0x6AF // 1711 + SYS___ISWCTYPE_A = 0x6B0 // 1712 + SYS___EXECV_A = 0x6B1 // 1713 + SYS___IS_WCTYPE_STD_A = 0x6B2 // 1714 + SYS___TOWLOWER_A = 0x6B3 // 1715 + SYS___TOWLOWER_STD_A = 0x6B4 // 1716 + SYS___TOWUPPER_A = 0x6B5 // 1717 + SYS___TOWUPPER_STD_A = 0x6B6 // 1718 + SYS___LOCALE_INIT_A = 0x6B7 // 1719 + SYS___LOCALECONV_A = 0x6B8 // 1720 + SYS___LOCALECONV_STD_A = 0x6B9 // 1721 + SYS___NL_LANGINFO_A = 0x6BA // 1722 + SYS___NL_LNAGINFO_STD_A = 0x6BB // 1723 + SYS___MONETARY_INIT_A = 0x6BC // 1724 + SYS___STRFMON_A = 0x6BD // 1725 + SYS___STRFMON_STD_A = 0x6BE // 1726 + SYS___GETADDRINFO_A = 0x6BF // 1727 + SYS___CATGETS_A = 0x6C0 // 1728 + SYS___EXECVE_A = 0x6C1 // 1729 + SYS___EXECVP_A = 0x6C2 // 1730 + SYS___SPAWN_A = 0x6C3 // 1731 + SYS___GETNAMEINFO_A = 0x6C4 // 1732 + SYS___SPAWNP_A = 0x6C5 // 1733 + SYS___NUMERIC_INIT_A = 0x6C6 // 1734 + SYS___RESP_INIT_A = 0x6C7 // 1735 + SYS___RPMATCH_A = 0x6C8 // 1736 + SYS___RPMATCH_C_A = 0x6C9 // 1737 + SYS___RPMATCH_STD_A = 0x6CA // 1738 + SYS___TIME_INIT_A = 0x6CB // 1739 + SYS___STRFTIME_A = 0x6CC // 1740 + SYS___STRFTIME_STD_A = 0x6CD // 1741 + SYS___STRPTIME_A = 0x6CE // 1742 + SYS___STRPTIME_STD_A = 0x6CF // 1743 + SYS___WCSFTIME_A = 0x6D0 // 1744 + SYS___WCSFTIME_STD_A = 0x6D1 // 1745 + SYS_____SPAWN2_A = 0x6D2 // 1746 + SYS_____SPAWNP2_A = 0x6D3 // 1747 + SYS___SYNTAX_INIT_A = 0x6D4 // 1748 + SYS___TOD_INIT_A = 0x6D5 // 1749 + SYS___NL_CSINFO_A = 0x6D6 // 1750 + SYS___NL_MONINFO_A = 0x6D7 // 1751 + SYS___NL_NUMINFO_A = 0x6D8 // 1752 + SYS___NL_RESPINFO_A = 0x6D9 // 1753 + SYS___NL_TIMINFO_A = 0x6DA // 1754 + SYS___IF_NAMETOINDEX_A = 0x6DB // 1755 + SYS___IF_INDEXTONAME_A = 0x6DC // 1756 + SYS___PRINTF_A = 0x6DD // 1757 + SYS___ICONV_OPEN_A = 0x6DE // 1758 + SYS___DLLLOAD_A = 0x6DF // 1759 + SYS___DLLQUERYFN_A = 0x6E0 // 1760 + SYS___DLLQUERYVAR_A = 0x6E1 // 1761 + SYS_____CHATTR_A = 0x6E2 // 1762 + SYS___E2A_L = 0x6E3 // 1763 + SYS_____TOCCSID_A = 0x6E4 // 1764 + SYS_____TOCSNAME_A = 0x6E5 // 1765 + SYS_____CCSIDTYPE_A = 0x6E6 // 1766 + SYS_____CSNAMETYPE_A = 0x6E7 // 1767 + SYS___CHMOD_A = 0x6E8 // 1768 + SYS___MKDIR_A = 0x6E9 // 1769 + SYS___STAT_A = 0x6EA // 1770 + SYS___STAT_O_A = 0x6EB // 1771 + SYS___MKFIFO_A = 0x6EC // 1772 + SYS_____OPEN_STAT_A = 0x6ED // 1773 + SYS___LSTAT_A = 0x6EE // 1774 + SYS___LSTAT_O_A = 0x6EF // 1775 + SYS___MKNOD_A = 0x6F0 // 1776 + SYS___MOUNT_A = 0x6F1 // 1777 + SYS___UMOUNT_A = 0x6F2 // 1778 + SYS___CHAUDIT_A = 0x6F4 // 1780 + SYS___W_GETMNTENT_A = 0x6F5 // 1781 + SYS___CREAT_A = 0x6F6 // 1782 + SYS___OPEN_A = 0x6F7 // 1783 + SYS___SETLOCALE_A = 0x6F9 // 1785 + SYS___FPRINTF_A = 0x6FA // 1786 + SYS___SPRINTF_A = 0x6FB // 1787 + SYS___VFPRINTF_A = 0x6FC // 1788 + SYS___VPRINTF_A = 0x6FD // 1789 + SYS___VSPRINTF_A = 0x6FE // 1790 + SYS___VSWPRINTF_A = 0x6FF // 1791 + SYS___SWPRINTF_A = 0x700 // 1792 + SYS___FSCANF_A = 0x701 // 1793 + SYS___SCANF_A = 0x702 // 1794 + SYS___SSCANF_A = 0x703 // 1795 + SYS___SWSCANF_A = 0x704 // 1796 + SYS___ATOF_A = 0x705 // 1797 + SYS___ATOI_A = 0x706 // 1798 + SYS___ATOL_A = 0x707 // 1799 + SYS___STRTOD_A = 0x708 // 1800 + SYS___STRTOL_A = 0x709 // 1801 + SYS___STRTOUL_A = 0x70A // 1802 + SYS_____AE_CORRESTBL_QUERY_A = 0x70B // 1803 + SYS___A64L_A = 0x70C // 1804 + SYS___ECVT_A = 0x70D // 1805 + SYS___FCVT_A = 0x70E // 1806 + SYS___GCVT_A = 0x70F // 1807 + SYS___L64A_A = 0x710 // 1808 + SYS___STRERROR_A = 0x711 // 1809 + SYS___PERROR_A = 0x712 // 1810 + SYS___FETCH_A = 0x713 // 1811 + SYS___GETENV_A = 0x714 // 1812 + SYS___MKSTEMP_A = 0x717 // 1815 + SYS___PTSNAME_A = 0x718 // 1816 + SYS___PUTENV_A = 0x719 // 1817 + SYS___REALPATH_A = 0x71A // 1818 + SYS___SETENV_A = 0x71B // 1819 + SYS___SYSTEM_A = 0x71C // 1820 + SYS___GETOPT_A = 0x71D // 1821 + SYS___CATOPEN_A = 0x71E // 1822 + SYS___ACCESS_A = 0x71F // 1823 + SYS___CHDIR_A = 0x720 // 1824 + SYS___CHOWN_A = 0x721 // 1825 + SYS___CHROOT_A = 0x722 // 1826 + SYS___GETCWD_A = 0x723 // 1827 + SYS___GETWD_A = 0x724 // 1828 + SYS___LCHOWN_A = 0x725 // 1829 + SYS___LINK_A = 0x726 // 1830 + SYS___PATHCONF_A = 0x727 // 1831 + SYS___IF_NAMEINDEX_A = 0x728 // 1832 + SYS___READLINK_A = 0x729 // 1833 + SYS___RMDIR_A = 0x72A // 1834 + SYS___STATVFS_A = 0x72B // 1835 + SYS___SYMLINK_A = 0x72C // 1836 + SYS___TRUNCATE_A = 0x72D // 1837 + SYS___UNLINK_A = 0x72E // 1838 + SYS___GAI_STRERROR_A = 0x72F // 1839 + SYS___EXTLINK_NP_A = 0x730 // 1840 + SYS___ISALNUM_A = 0x731 // 1841 + SYS___ISALPHA_A = 0x732 // 1842 + SYS___A2E_S = 0x733 // 1843 + SYS___ISCNTRL_A = 0x734 // 1844 + SYS___ISDIGIT_A = 0x735 // 1845 + SYS___ISGRAPH_A = 0x736 // 1846 + SYS___ISLOWER_A = 0x737 // 1847 + SYS___ISPRINT_A = 0x738 // 1848 + SYS___ISPUNCT_A = 0x739 // 1849 + SYS___ISSPACE_A = 0x73A // 1850 + SYS___ISUPPER_A = 0x73B // 1851 + SYS___ISXDIGIT_A = 0x73C // 1852 + SYS___TOLOWER_A = 0x73D // 1853 + SYS___TOUPPER_A = 0x73E // 1854 + SYS___ISWALNUM_A = 0x73F // 1855 + SYS___ISWALPHA_A = 0x740 // 1856 + SYS___A2E_L = 0x741 // 1857 + SYS___ISWCNTRL_A = 0x742 // 1858 + SYS___ISWDIGIT_A = 0x743 // 1859 + SYS___ISWGRAPH_A = 0x744 // 1860 + SYS___ISWLOWER_A = 0x745 // 1861 + SYS___ISWPRINT_A = 0x746 // 1862 + SYS___ISWPUNCT_A = 0x747 // 1863 + SYS___ISWSPACE_A = 0x748 // 1864 + SYS___ISWUPPER_A = 0x749 // 1865 + SYS___ISWXDIGIT_A = 0x74A // 1866 + SYS___CONFSTR_A = 0x74B // 1867 + SYS___FTOK_A = 0x74C // 1868 + SYS___MKTEMP_A = 0x74D // 1869 + SYS___FDOPEN_A = 0x74E // 1870 + SYS___FLDATA_A = 0x74F // 1871 + SYS___REMOVE_A = 0x750 // 1872 + SYS___RENAME_A = 0x751 // 1873 + SYS___TMPNAM_A = 0x752 // 1874 + SYS___FOPEN_A = 0x753 // 1875 + SYS___FREOPEN_A = 0x754 // 1876 + SYS___CUSERID_A = 0x755 // 1877 + SYS___POPEN_A = 0x756 // 1878 + SYS___TEMPNAM_A = 0x757 // 1879 + SYS___FTW_A = 0x758 // 1880 + SYS___GETGRENT_A = 0x759 // 1881 + SYS___GETGRGID_A = 0x75A // 1882 + SYS___GETGRNAM_A = 0x75B // 1883 + SYS___GETGROUPSBYNAME_A = 0x75C // 1884 + SYS___GETHOSTENT_A = 0x75D // 1885 + SYS___GETHOSTNAME_A = 0x75E // 1886 + SYS___GETLOGIN_A = 0x75F // 1887 + SYS___INET_NTOP_A = 0x760 // 1888 + SYS___GETPASS_A = 0x761 // 1889 + SYS___GETPWENT_A = 0x762 // 1890 + SYS___GETPWNAM_A = 0x763 // 1891 + SYS___GETPWUID_A = 0x764 // 1892 + SYS_____CHECK_RESOURCE_AUTH_NP_A = 0x765 // 1893 + SYS___CHECKSCHENV_A = 0x766 // 1894 + SYS___CONNECTSERVER_A = 0x767 // 1895 + SYS___CONNECTWORKMGR_A = 0x768 // 1896 + SYS_____CONSOLE_A = 0x769 // 1897 + SYS___CREATEWORKUNIT_A = 0x76A // 1898 + SYS___CTERMID_A = 0x76B // 1899 + SYS___FMTMSG_A = 0x76C // 1900 + SYS___INITGROUPS_A = 0x76D // 1901 + SYS_____LOGIN_A = 0x76E // 1902 + SYS___MSGRCV_A = 0x76F // 1903 + SYS___MSGSND_A = 0x770 // 1904 + SYS___MSGXRCV_A = 0x771 // 1905 + SYS___NFTW_A = 0x772 // 1906 + SYS_____PASSWD_A = 0x773 // 1907 + SYS___PTHREAD_SECURITY_NP_A = 0x774 // 1908 + SYS___QUERYMETRICS_A = 0x775 // 1909 + SYS___QUERYSCHENV = 0x776 // 1910 + SYS___READV_A = 0x777 // 1911 + SYS_____SERVER_CLASSIFY_A = 0x778 // 1912 + SYS_____SERVER_INIT_A = 0x779 // 1913 + SYS_____SERVER_PWU_A = 0x77A // 1914 + SYS___STRCASECMP_A = 0x77B // 1915 + SYS___STRNCASECMP_A = 0x77C // 1916 + SYS___TTYNAME_A = 0x77D // 1917 + SYS___UNAME_A = 0x77E // 1918 + SYS___UTIMES_A = 0x77F // 1919 + SYS___W_GETPSENT_A = 0x780 // 1920 + SYS___WRITEV_A = 0x781 // 1921 + SYS___W_STATFS_A = 0x782 // 1922 + SYS___W_STATVFS_A = 0x783 // 1923 + SYS___FPUTC_A = 0x784 // 1924 + SYS___PUTCHAR_A = 0x785 // 1925 + SYS___PUTS_A = 0x786 // 1926 + SYS___FGETS_A = 0x787 // 1927 + SYS___GETS_A = 0x788 // 1928 + SYS___FPUTS_A = 0x789 // 1929 + SYS___FREAD_A = 0x78A // 1930 + SYS___FWRITE_A = 0x78B // 1931 + SYS___OPEN_O_A = 0x78C // 1932 + SYS___ISASCII = 0x78D // 1933 + SYS___CREAT_O_A = 0x78E // 1934 + SYS___ENVNA = 0x78F // 1935 + SYS___PUTC_A = 0x790 // 1936 + SYS___AE_THREAD_SETMODE = 0x791 // 1937 + SYS___AE_THREAD_SWAPMODE = 0x792 // 1938 + SYS___GETNETBYADDR_A = 0x793 // 1939 + SYS___GETNETBYNAME_A = 0x794 // 1940 + SYS___GETNETENT_A = 0x795 // 1941 + SYS___GETPROTOBYNAME_A = 0x796 // 1942 + SYS___GETPROTOBYNUMBER_A = 0x797 // 1943 + SYS___GETPROTOENT_A = 0x798 // 1944 + SYS___GETSERVBYNAME_A = 0x799 // 1945 + SYS___GETSERVBYPORT_A = 0x79A // 1946 + SYS___GETSERVENT_A = 0x79B // 1947 + SYS___ASCTIME_A = 0x79C // 1948 + SYS___CTIME_A = 0x79D // 1949 + SYS___GETDATE_A = 0x79E // 1950 + SYS___TZSET_A = 0x79F // 1951 + SYS___UTIME_A = 0x7A0 // 1952 + SYS___ASCTIME_R_A = 0x7A1 // 1953 + SYS___CTIME_R_A = 0x7A2 // 1954 + SYS___STRTOLL_A = 0x7A3 // 1955 + SYS___STRTOULL_A = 0x7A4 // 1956 + SYS___FPUTWC_A = 0x7A5 // 1957 + SYS___PUTWC_A = 0x7A6 // 1958 + SYS___PUTWCHAR_A = 0x7A7 // 1959 + SYS___FPUTWS_A = 0x7A8 // 1960 + SYS___UNGETWC_A = 0x7A9 // 1961 + SYS___FGETWC_A = 0x7AA // 1962 + SYS___GETWC_A = 0x7AB // 1963 + SYS___GETWCHAR_A = 0x7AC // 1964 + SYS___FGETWS_A = 0x7AD // 1965 + SYS___GETTIMEOFDAY_A = 0x7AE // 1966 + SYS___GMTIME_A = 0x7AF // 1967 + SYS___GMTIME_R_A = 0x7B0 // 1968 + SYS___LOCALTIME_A = 0x7B1 // 1969 + SYS___LOCALTIME_R_A = 0x7B2 // 1970 + SYS___MKTIME_A = 0x7B3 // 1971 + SYS___TZZNA = 0x7B4 // 1972 + SYS_UNATEXIT = 0x7B5 // 1973 + SYS___CEE3DMP_A = 0x7B6 // 1974 + SYS___CDUMP_A = 0x7B7 // 1975 + SYS___CSNAP_A = 0x7B8 // 1976 + SYS___CTEST_A = 0x7B9 // 1977 + SYS___CTRACE_A = 0x7BA // 1978 + SYS___VSWPRNTF2_A = 0x7BB // 1979 + SYS___INET_PTON_A = 0x7BC // 1980 + SYS___SYSLOG_A = 0x7BD // 1981 + SYS___CRYPT_A = 0x7BE // 1982 + SYS_____OPENDIR2_A = 0x7BF // 1983 + SYS_____READDIR2_A = 0x7C0 // 1984 + SYS___OPENDIR_A = 0x7C2 // 1986 + SYS___READDIR_A = 0x7C3 // 1987 + SYS_PREAD = 0x7C7 // 1991 + SYS_PWRITE = 0x7C8 // 1992 + SYS_M_CREATE_LAYOUT = 0x7C9 // 1993 + SYS_M_DESTROY_LAYOUT = 0x7CA // 1994 + SYS_M_GETVALUES_LAYOUT = 0x7CB // 1995 + SYS_M_SETVALUES_LAYOUT = 0x7CC // 1996 + SYS_M_TRANSFORM_LAYOUT = 0x7CD // 1997 + SYS_M_WTRANSFORM_LAYOUT = 0x7CE // 1998 + SYS_FWPRINTF = 0x7D1 // 2001 + SYS_WPRINTF = 0x7D2 // 2002 + SYS_VFWPRINT = 0x7D3 // 2003 + SYS_VFWPRINTF = 0x7D3 // 2003 + SYS_VWPRINTF = 0x7D4 // 2004 + SYS_FWSCANF = 0x7D5 // 2005 + SYS_WSCANF = 0x7D6 // 2006 + SYS_WCTRANS = 0x7D7 // 2007 + SYS_TOWCTRAN = 0x7D8 // 2008 + SYS_TOWCTRANS = 0x7D8 // 2008 + SYS___WCSTOD_A = 0x7D9 // 2009 + SYS___WCSTOL_A = 0x7DA // 2010 + SYS___WCSTOUL_A = 0x7DB // 2011 + SYS___BASENAME_A = 0x7DC // 2012 + SYS___DIRNAME_A = 0x7DD // 2013 + SYS___GLOB_A = 0x7DE // 2014 + SYS_FWIDE = 0x7DF // 2015 + SYS___OSNAME = 0x7E0 // 2016 + SYS_____OSNAME_A = 0x7E1 // 2017 + SYS___BTOWC_A = 0x7E4 // 2020 + SYS___WCTOB_A = 0x7E5 // 2021 + SYS___DBM_OPEN_A = 0x7E6 // 2022 + SYS___VFPRINTF2_A = 0x7E7 // 2023 + SYS___VPRINTF2_A = 0x7E8 // 2024 + SYS___VSPRINTF2_A = 0x7E9 // 2025 + SYS___CEIL_H = 0x7EA // 2026 + SYS___FLOOR_H = 0x7EB // 2027 + SYS___MODF_H = 0x7EC // 2028 + SYS___FABS_H = 0x7ED // 2029 + SYS___J0_H = 0x7EE // 2030 + SYS___J1_H = 0x7EF // 2031 + SYS___JN_H = 0x7F0 // 2032 + SYS___Y0_H = 0x7F1 // 2033 + SYS___Y1_H = 0x7F2 // 2034 + SYS___YN_H = 0x7F3 // 2035 + SYS___CEILF_H = 0x7F4 // 2036 + SYS___CEILL_H = 0x7F5 // 2037 + SYS___FLOORF_H = 0x7F6 // 2038 + SYS___FLOORL_H = 0x7F7 // 2039 + SYS___MODFF_H = 0x7F8 // 2040 + SYS___MODFL_H = 0x7F9 // 2041 + SYS___FABSF_H = 0x7FA // 2042 + SYS___FABSL_H = 0x7FB // 2043 + SYS___MALLOC24 = 0x7FC // 2044 + SYS___MALLOC31 = 0x7FD // 2045 + SYS_ACL_INIT = 0x7FE // 2046 + SYS_ACL_FREE = 0x7FF // 2047 + SYS_ACL_FIRST_ENTRY = 0x800 // 2048 + SYS_ACL_GET_ENTRY = 0x801 // 2049 + SYS_ACL_VALID = 0x802 // 2050 + SYS_ACL_CREATE_ENTRY = 0x803 // 2051 + SYS_ACL_DELETE_ENTRY = 0x804 // 2052 + SYS_ACL_UPDATE_ENTRY = 0x805 // 2053 + SYS_ACL_DELETE_FD = 0x806 // 2054 + SYS_ACL_DELETE_FILE = 0x807 // 2055 + SYS_ACL_GET_FD = 0x808 // 2056 + SYS_ACL_GET_FILE = 0x809 // 2057 + SYS_ACL_SET_FD = 0x80A // 2058 + SYS_ACL_SET_FILE = 0x80B // 2059 + SYS_ACL_FROM_TEXT = 0x80C // 2060 + SYS_ACL_TO_TEXT = 0x80D // 2061 + SYS_ACL_SORT = 0x80E // 2062 + SYS___SHUTDOWN_REGISTRATION = 0x80F // 2063 + SYS___ERFL_B = 0x810 // 2064 + SYS___ERFCL_B = 0x811 // 2065 + SYS___LGAMMAL_B = 0x812 // 2066 + SYS___SETHOOKEVENTS = 0x813 // 2067 + SYS_IF_NAMETOINDEX = 0x814 // 2068 + SYS_IF_INDEXTONAME = 0x815 // 2069 + SYS_IF_NAMEINDEX = 0x816 // 2070 + SYS_IF_FREENAMEINDEX = 0x817 // 2071 + SYS_GETADDRINFO = 0x818 // 2072 + SYS_GETNAMEINFO = 0x819 // 2073 + SYS_FREEADDRINFO = 0x81A // 2074 + SYS_GAI_STRERROR = 0x81B // 2075 + SYS_REXEC_AF = 0x81C // 2076 + SYS___POE = 0x81D // 2077 + SYS___DYNALLOC_A = 0x81F // 2079 + SYS___DYNFREE_A = 0x820 // 2080 + SYS___RES_QUERY_A = 0x821 // 2081 + SYS___RES_SEARCH_A = 0x822 // 2082 + SYS___RES_QUERYDOMAIN_A = 0x823 // 2083 + SYS___RES_MKQUERY_A = 0x824 // 2084 + SYS___RES_SEND_A = 0x825 // 2085 + SYS___DN_EXPAND_A = 0x826 // 2086 + SYS___DN_SKIPNAME_A = 0x827 // 2087 + SYS___DN_COMP_A = 0x828 // 2088 + SYS___DN_FIND_A = 0x829 // 2089 + SYS___NLIST_A = 0x82A // 2090 + SYS_____TCGETCP_A = 0x82B // 2091 + SYS_____TCSETCP_A = 0x82C // 2092 + SYS_____W_PIOCTL_A = 0x82E // 2094 + SYS___INET_ADDR_A = 0x82F // 2095 + SYS___INET_NTOA_A = 0x830 // 2096 + SYS___INET_NETWORK_A = 0x831 // 2097 + SYS___ACCEPT_A = 0x832 // 2098 + SYS___ACCEPT_AND_RECV_A = 0x833 // 2099 + SYS___BIND_A = 0x834 // 2100 + SYS___CONNECT_A = 0x835 // 2101 + SYS___GETPEERNAME_A = 0x836 // 2102 + SYS___GETSOCKNAME_A = 0x837 // 2103 + SYS___RECVFROM_A = 0x838 // 2104 + SYS___SENDTO_A = 0x839 // 2105 + SYS___SENDMSG_A = 0x83A // 2106 + SYS___RECVMSG_A = 0x83B // 2107 + SYS_____LCHATTR_A = 0x83C // 2108 + SYS___CABEND = 0x83D // 2109 + SYS___LE_CIB_GET = 0x83E // 2110 + SYS___SET_LAA_FOR_JIT = 0x83F // 2111 + SYS___LCHATTR = 0x840 // 2112 + SYS___WRITEDOWN = 0x841 // 2113 + SYS_PTHREAD_MUTEX_INIT2 = 0x842 // 2114 + SYS___ACOSHF_B = 0x843 // 2115 + SYS___ACOSHL_B = 0x844 // 2116 + SYS___ASINHF_B = 0x845 // 2117 + SYS___ASINHL_B = 0x846 // 2118 + SYS___ATANHF_B = 0x847 // 2119 + SYS___ATANHL_B = 0x848 // 2120 + SYS___CBRTF_B = 0x849 // 2121 + SYS___CBRTL_B = 0x84A // 2122 + SYS___COPYSIGNF_B = 0x84B // 2123 + SYS___COPYSIGNL_B = 0x84C // 2124 + SYS___COTANF_B = 0x84D // 2125 + SYS___COTAN_B = 0x84E // 2126 + SYS___COTANL_B = 0x84F // 2127 + SYS___EXP2F_B = 0x850 // 2128 + SYS___EXP2L_B = 0x851 // 2129 + SYS___EXPM1F_B = 0x852 // 2130 + SYS___EXPM1L_B = 0x853 // 2131 + SYS___FDIMF_B = 0x854 // 2132 + SYS___FDIM_B = 0x855 // 2133 + SYS___FDIML_B = 0x856 // 2134 + SYS___HYPOTF_B = 0x857 // 2135 + SYS___HYPOTL_B = 0x858 // 2136 + SYS___LOG1PF_B = 0x859 // 2137 + SYS___LOG1PL_B = 0x85A // 2138 + SYS___LOG2F_B = 0x85B // 2139 + SYS___LOG2_B = 0x85C // 2140 + SYS___LOG2L_B = 0x85D // 2141 + SYS___REMAINDERF_B = 0x85E // 2142 + SYS___REMAINDERL_B = 0x85F // 2143 + SYS___REMQUOF_B = 0x860 // 2144 + SYS___REMQUO_B = 0x861 // 2145 + SYS___REMQUOL_B = 0x862 // 2146 + SYS___TGAMMAF_B = 0x863 // 2147 + SYS___TGAMMA_B = 0x864 // 2148 + SYS___TGAMMAL_B = 0x865 // 2149 + SYS___TRUNCF_B = 0x866 // 2150 + SYS___TRUNC_B = 0x867 // 2151 + SYS___TRUNCL_B = 0x868 // 2152 + SYS___LGAMMAF_B = 0x869 // 2153 + SYS___LROUNDF_B = 0x86A // 2154 + SYS___LROUND_B = 0x86B // 2155 + SYS___ERFF_B = 0x86C // 2156 + SYS___ERFCF_B = 0x86D // 2157 + SYS_ACOSHF = 0x86E // 2158 + SYS_ACOSHL = 0x86F // 2159 + SYS_ASINHF = 0x870 // 2160 + SYS_ASINHL = 0x871 // 2161 + SYS_ATANHF = 0x872 // 2162 + SYS_ATANHL = 0x873 // 2163 + SYS_CBRTF = 0x874 // 2164 + SYS_CBRTL = 0x875 // 2165 + SYS_COPYSIGNF = 0x876 // 2166 + SYS_CPYSIGNF = 0x876 // 2166 + SYS_COPYSIGNL = 0x877 // 2167 + SYS_CPYSIGNL = 0x877 // 2167 + SYS_COTANF = 0x878 // 2168 + SYS___COTANF = 0x878 // 2168 + SYS_COTAN = 0x879 // 2169 + SYS___COTAN = 0x879 // 2169 + SYS_COTANL = 0x87A // 2170 + SYS___COTANL = 0x87A // 2170 + SYS_EXP2F = 0x87B // 2171 + SYS_EXP2L = 0x87C // 2172 + SYS_EXPM1F = 0x87D // 2173 + SYS_EXPM1L = 0x87E // 2174 + SYS_FDIMF = 0x87F // 2175 + SYS_FDIM = 0x881 // 2177 + SYS_FDIML = 0x882 // 2178 + SYS_HYPOTF = 0x883 // 2179 + SYS_HYPOTL = 0x884 // 2180 + SYS_LOG1PF = 0x885 // 2181 + SYS_LOG1PL = 0x886 // 2182 + SYS_LOG2F = 0x887 // 2183 + SYS_LOG2 = 0x888 // 2184 + SYS_LOG2L = 0x889 // 2185 + SYS_REMAINDERF = 0x88A // 2186 + SYS_REMAINDF = 0x88A // 2186 + SYS_REMAINDERL = 0x88B // 2187 + SYS_REMAINDL = 0x88B // 2187 + SYS_REMQUOF = 0x88C // 2188 + SYS_REMQUO = 0x88D // 2189 + SYS_REMQUOL = 0x88E // 2190 + SYS_TGAMMAF = 0x88F // 2191 + SYS_TGAMMA = 0x890 // 2192 + SYS_TGAMMAL = 0x891 // 2193 + SYS_TRUNCF = 0x892 // 2194 + SYS_TRUNC = 0x893 // 2195 + SYS_TRUNCL = 0x894 // 2196 + SYS_LGAMMAF = 0x895 // 2197 + SYS_LGAMMAL = 0x896 // 2198 + SYS_LROUNDF = 0x897 // 2199 + SYS_LROUND = 0x898 // 2200 + SYS_ERFF = 0x899 // 2201 + SYS_ERFL = 0x89A // 2202 + SYS_ERFCF = 0x89B // 2203 + SYS_ERFCL = 0x89C // 2204 + SYS___EXP2_B = 0x89D // 2205 + SYS_EXP2 = 0x89E // 2206 + SYS___FAR_JUMP = 0x89F // 2207 + SYS___TCGETATTR_A = 0x8A1 // 2209 + SYS___TCSETATTR_A = 0x8A2 // 2210 + SYS___SUPERKILL = 0x8A4 // 2212 + SYS___LE_CONDITION_TOKEN_BUILD = 0x8A5 // 2213 + SYS___LE_MSG_ADD_INSERT = 0x8A6 // 2214 + SYS___LE_MSG_GET = 0x8A7 // 2215 + SYS___LE_MSG_GET_AND_WRITE = 0x8A8 // 2216 + SYS___LE_MSG_WRITE = 0x8A9 // 2217 + SYS___ITOA = 0x8AA // 2218 + SYS___UTOA = 0x8AB // 2219 + SYS___LTOA = 0x8AC // 2220 + SYS___ULTOA = 0x8AD // 2221 + SYS___LLTOA = 0x8AE // 2222 + SYS___ULLTOA = 0x8AF // 2223 + SYS___ITOA_A = 0x8B0 // 2224 + SYS___UTOA_A = 0x8B1 // 2225 + SYS___LTOA_A = 0x8B2 // 2226 + SYS___ULTOA_A = 0x8B3 // 2227 + SYS___LLTOA_A = 0x8B4 // 2228 + SYS___ULLTOA_A = 0x8B5 // 2229 + SYS_____GETENV_A = 0x8C3 // 2243 + SYS___REXEC_A = 0x8C4 // 2244 + SYS___REXEC_AF_A = 0x8C5 // 2245 + SYS___GETUTXENT_A = 0x8C6 // 2246 + SYS___GETUTXID_A = 0x8C7 // 2247 + SYS___GETUTXLINE_A = 0x8C8 // 2248 + SYS___PUTUTXLINE_A = 0x8C9 // 2249 + SYS_____UTMPXNAME_A = 0x8CA // 2250 + SYS___PUTC_UNLOCKED_A = 0x8CB // 2251 + SYS___PUTCHAR_UNLOCKED_A = 0x8CC // 2252 + SYS___SNPRINTF_A = 0x8CD // 2253 + SYS___VSNPRINTF_A = 0x8CE // 2254 + SYS___DLOPEN_A = 0x8D0 // 2256 + SYS___DLSYM_A = 0x8D1 // 2257 + SYS___DLERROR_A = 0x8D2 // 2258 + SYS_FLOCKFILE = 0x8D3 // 2259 + SYS_FTRYLOCKFILE = 0x8D4 // 2260 + SYS_FUNLOCKFILE = 0x8D5 // 2261 + SYS_GETC_UNLOCKED = 0x8D6 // 2262 + SYS_GETCHAR_UNLOCKED = 0x8D7 // 2263 + SYS_PUTC_UNLOCKED = 0x8D8 // 2264 + SYS_PUTCHAR_UNLOCKED = 0x8D9 // 2265 + SYS_SNPRINTF = 0x8DA // 2266 + SYS_VSNPRINTF = 0x8DB // 2267 + SYS_DLOPEN = 0x8DD // 2269 + SYS_DLSYM = 0x8DE // 2270 + SYS_DLCLOSE = 0x8DF // 2271 + SYS_DLERROR = 0x8E0 // 2272 + SYS___SET_EXCEPTION_HANDLER = 0x8E2 // 2274 + SYS___RESET_EXCEPTION_HANDLER = 0x8E3 // 2275 + SYS___VHM_EVENT = 0x8E4 // 2276 + SYS___ABS_H = 0x8E6 // 2278 + SYS___ABSF_H = 0x8E7 // 2279 + SYS___ABSL_H = 0x8E8 // 2280 + SYS___ACOS_H = 0x8E9 // 2281 + SYS___ACOSF_H = 0x8EA // 2282 + SYS___ACOSL_H = 0x8EB // 2283 + SYS___ACOSH_H = 0x8EC // 2284 + SYS___ASIN_H = 0x8ED // 2285 + SYS___ASINF_H = 0x8EE // 2286 + SYS___ASINL_H = 0x8EF // 2287 + SYS___ASINH_H = 0x8F0 // 2288 + SYS___ATAN_H = 0x8F1 // 2289 + SYS___ATANF_H = 0x8F2 // 2290 + SYS___ATANL_H = 0x8F3 // 2291 + SYS___ATANH_H = 0x8F4 // 2292 + SYS___ATANHF_H = 0x8F5 // 2293 + SYS___ATANHL_H = 0x8F6 // 2294 + SYS___ATAN2_H = 0x8F7 // 2295 + SYS___ATAN2F_H = 0x8F8 // 2296 + SYS___ATAN2L_H = 0x8F9 // 2297 + SYS___CBRT_H = 0x8FA // 2298 + SYS___COPYSIGNF_H = 0x8FB // 2299 + SYS___COPYSIGNL_H = 0x8FC // 2300 + SYS___COS_H = 0x8FD // 2301 + SYS___COSF_H = 0x8FE // 2302 + SYS___COSL_H = 0x8FF // 2303 + SYS___COSHF_H = 0x900 // 2304 + SYS___COSHL_H = 0x901 // 2305 + SYS___COTAN_H = 0x902 // 2306 + SYS___COTANF_H = 0x903 // 2307 + SYS___COTANL_H = 0x904 // 2308 + SYS___ERF_H = 0x905 // 2309 + SYS___ERFF_H = 0x906 // 2310 + SYS___ERFL_H = 0x907 // 2311 + SYS___ERFC_H = 0x908 // 2312 + SYS___ERFCF_H = 0x909 // 2313 + SYS___ERFCL_H = 0x90A // 2314 + SYS___EXP_H = 0x90B // 2315 + SYS___EXPF_H = 0x90C // 2316 + SYS___EXPL_H = 0x90D // 2317 + SYS___EXPM1_H = 0x90E // 2318 + SYS___FDIM_H = 0x90F // 2319 + SYS___FDIMF_H = 0x910 // 2320 + SYS___FDIML_H = 0x911 // 2321 + SYS___FMOD_H = 0x912 // 2322 + SYS___FMODF_H = 0x913 // 2323 + SYS___FMODL_H = 0x914 // 2324 + SYS___GAMMA_H = 0x915 // 2325 + SYS___HYPOT_H = 0x916 // 2326 + SYS___ILOGB_H = 0x917 // 2327 + SYS___LGAMMA_H = 0x918 // 2328 + SYS___LGAMMAF_H = 0x919 // 2329 + SYS___LOG_H = 0x91A // 2330 + SYS___LOGF_H = 0x91B // 2331 + SYS___LOGL_H = 0x91C // 2332 + SYS___LOGB_H = 0x91D // 2333 + SYS___LOG2_H = 0x91E // 2334 + SYS___LOG2F_H = 0x91F // 2335 + SYS___LOG2L_H = 0x920 // 2336 + SYS___LOG1P_H = 0x921 // 2337 + SYS___LOG10_H = 0x922 // 2338 + SYS___LOG10F_H = 0x923 // 2339 + SYS___LOG10L_H = 0x924 // 2340 + SYS___LROUND_H = 0x925 // 2341 + SYS___LROUNDF_H = 0x926 // 2342 + SYS___NEXTAFTER_H = 0x927 // 2343 + SYS___POW_H = 0x928 // 2344 + SYS___POWF_H = 0x929 // 2345 + SYS___POWL_H = 0x92A // 2346 + SYS___REMAINDER_H = 0x92B // 2347 + SYS___RINT_H = 0x92C // 2348 + SYS___SCALB_H = 0x92D // 2349 + SYS___SIN_H = 0x92E // 2350 + SYS___SINF_H = 0x92F // 2351 + SYS___SINL_H = 0x930 // 2352 + SYS___SINH_H = 0x931 // 2353 + SYS___SINHF_H = 0x932 // 2354 + SYS___SINHL_H = 0x933 // 2355 + SYS___SQRT_H = 0x934 // 2356 + SYS___SQRTF_H = 0x935 // 2357 + SYS___SQRTL_H = 0x936 // 2358 + SYS___TAN_H = 0x937 // 2359 + SYS___TANF_H = 0x938 // 2360 + SYS___TANL_H = 0x939 // 2361 + SYS___TANH_H = 0x93A // 2362 + SYS___TANHF_H = 0x93B // 2363 + SYS___TANHL_H = 0x93C // 2364 + SYS___TGAMMA_H = 0x93D // 2365 + SYS___TGAMMAF_H = 0x93E // 2366 + SYS___TRUNC_H = 0x93F // 2367 + SYS___TRUNCF_H = 0x940 // 2368 + SYS___TRUNCL_H = 0x941 // 2369 + SYS___COSH_H = 0x942 // 2370 + SYS___LE_DEBUG_SET_RESUME_MCH = 0x943 // 2371 + SYS_VFSCANF = 0x944 // 2372 + SYS_VSCANF = 0x946 // 2374 + SYS_VSSCANF = 0x948 // 2376 + SYS_VFWSCANF = 0x94A // 2378 + SYS_VWSCANF = 0x94C // 2380 + SYS_VSWSCANF = 0x94E // 2382 + SYS_IMAXABS = 0x950 // 2384 + SYS_IMAXDIV = 0x951 // 2385 + SYS_STRTOIMAX = 0x952 // 2386 + SYS_STRTOUMAX = 0x953 // 2387 + SYS_WCSTOIMAX = 0x954 // 2388 + SYS_WCSTOUMAX = 0x955 // 2389 + SYS_ATOLL = 0x956 // 2390 + SYS_STRTOF = 0x957 // 2391 + SYS_STRTOLD = 0x958 // 2392 + SYS_WCSTOF = 0x959 // 2393 + SYS_WCSTOLD = 0x95A // 2394 + SYS_INET6_RTH_SPACE = 0x95B // 2395 + SYS_INET6_RTH_INIT = 0x95C // 2396 + SYS_INET6_RTH_ADD = 0x95D // 2397 + SYS_INET6_RTH_REVERSE = 0x95E // 2398 + SYS_INET6_RTH_SEGMENTS = 0x95F // 2399 + SYS_INET6_RTH_GETADDR = 0x960 // 2400 + SYS_INET6_OPT_INIT = 0x961 // 2401 + SYS_INET6_OPT_APPEND = 0x962 // 2402 + SYS_INET6_OPT_FINISH = 0x963 // 2403 + SYS_INET6_OPT_SET_VAL = 0x964 // 2404 + SYS_INET6_OPT_NEXT = 0x965 // 2405 + SYS_INET6_OPT_FIND = 0x966 // 2406 + SYS_INET6_OPT_GET_VAL = 0x967 // 2407 + SYS___POW_I = 0x987 // 2439 + SYS___POW_I_B = 0x988 // 2440 + SYS___POW_I_H = 0x989 // 2441 + SYS___POW_II = 0x98A // 2442 + SYS___POW_II_B = 0x98B // 2443 + SYS___POW_II_H = 0x98C // 2444 + SYS_CABS = 0x98E // 2446 + SYS___CABS_B = 0x98F // 2447 + SYS___CABS_H = 0x990 // 2448 + SYS_CABSF = 0x991 // 2449 + SYS___CABSF_B = 0x992 // 2450 + SYS___CABSF_H = 0x993 // 2451 + SYS_CABSL = 0x994 // 2452 + SYS___CABSL_B = 0x995 // 2453 + SYS___CABSL_H = 0x996 // 2454 + SYS_CACOS = 0x997 // 2455 + SYS___CACOS_B = 0x998 // 2456 + SYS___CACOS_H = 0x999 // 2457 + SYS_CACOSF = 0x99A // 2458 + SYS___CACOSF_B = 0x99B // 2459 + SYS___CACOSF_H = 0x99C // 2460 + SYS_CACOSL = 0x99D // 2461 + SYS___CACOSL_B = 0x99E // 2462 + SYS___CACOSL_H = 0x99F // 2463 + SYS_CACOSH = 0x9A0 // 2464 + SYS___CACOSH_B = 0x9A1 // 2465 + SYS___CACOSH_H = 0x9A2 // 2466 + SYS_CACOSHF = 0x9A3 // 2467 + SYS___CACOSHF_B = 0x9A4 // 2468 + SYS___CACOSHF_H = 0x9A5 // 2469 + SYS_CACOSHL = 0x9A6 // 2470 + SYS___CACOSHL_B = 0x9A7 // 2471 + SYS___CACOSHL_H = 0x9A8 // 2472 + SYS_CARG = 0x9A9 // 2473 + SYS___CARG_B = 0x9AA // 2474 + SYS___CARG_H = 0x9AB // 2475 + SYS_CARGF = 0x9AC // 2476 + SYS___CARGF_B = 0x9AD // 2477 + SYS___CARGF_H = 0x9AE // 2478 + SYS_CARGL = 0x9AF // 2479 + SYS___CARGL_B = 0x9B0 // 2480 + SYS___CARGL_H = 0x9B1 // 2481 + SYS_CASIN = 0x9B2 // 2482 + SYS___CASIN_B = 0x9B3 // 2483 + SYS___CASIN_H = 0x9B4 // 2484 + SYS_CASINF = 0x9B5 // 2485 + SYS___CASINF_B = 0x9B6 // 2486 + SYS___CASINF_H = 0x9B7 // 2487 + SYS_CASINL = 0x9B8 // 2488 + SYS___CASINL_B = 0x9B9 // 2489 + SYS___CASINL_H = 0x9BA // 2490 + SYS_CASINH = 0x9BB // 2491 + SYS___CASINH_B = 0x9BC // 2492 + SYS___CASINH_H = 0x9BD // 2493 + SYS_CASINHF = 0x9BE // 2494 + SYS___CASINHF_B = 0x9BF // 2495 + SYS___CASINHF_H = 0x9C0 // 2496 + SYS_CASINHL = 0x9C1 // 2497 + SYS___CASINHL_B = 0x9C2 // 2498 + SYS___CASINHL_H = 0x9C3 // 2499 + SYS_CATAN = 0x9C4 // 2500 + SYS___CATAN_B = 0x9C5 // 2501 + SYS___CATAN_H = 0x9C6 // 2502 + SYS_CATANF = 0x9C7 // 2503 + SYS___CATANF_B = 0x9C8 // 2504 + SYS___CATANF_H = 0x9C9 // 2505 + SYS_CATANL = 0x9CA // 2506 + SYS___CATANL_B = 0x9CB // 2507 + SYS___CATANL_H = 0x9CC // 2508 + SYS_CATANH = 0x9CD // 2509 + SYS___CATANH_B = 0x9CE // 2510 + SYS___CATANH_H = 0x9CF // 2511 + SYS_CATANHF = 0x9D0 // 2512 + SYS___CATANHF_B = 0x9D1 // 2513 + SYS___CATANHF_H = 0x9D2 // 2514 + SYS_CATANHL = 0x9D3 // 2515 + SYS___CATANHL_B = 0x9D4 // 2516 + SYS___CATANHL_H = 0x9D5 // 2517 + SYS_CCOS = 0x9D6 // 2518 + SYS___CCOS_B = 0x9D7 // 2519 + SYS___CCOS_H = 0x9D8 // 2520 + SYS_CCOSF = 0x9D9 // 2521 + SYS___CCOSF_B = 0x9DA // 2522 + SYS___CCOSF_H = 0x9DB // 2523 + SYS_CCOSL = 0x9DC // 2524 + SYS___CCOSL_B = 0x9DD // 2525 + SYS___CCOSL_H = 0x9DE // 2526 + SYS_CCOSH = 0x9DF // 2527 + SYS___CCOSH_B = 0x9E0 // 2528 + SYS___CCOSH_H = 0x9E1 // 2529 + SYS_CCOSHF = 0x9E2 // 2530 + SYS___CCOSHF_B = 0x9E3 // 2531 + SYS___CCOSHF_H = 0x9E4 // 2532 + SYS_CCOSHL = 0x9E5 // 2533 + SYS___CCOSHL_B = 0x9E6 // 2534 + SYS___CCOSHL_H = 0x9E7 // 2535 + SYS_CEXP = 0x9E8 // 2536 + SYS___CEXP_B = 0x9E9 // 2537 + SYS___CEXP_H = 0x9EA // 2538 + SYS_CEXPF = 0x9EB // 2539 + SYS___CEXPF_B = 0x9EC // 2540 + SYS___CEXPF_H = 0x9ED // 2541 + SYS_CEXPL = 0x9EE // 2542 + SYS___CEXPL_B = 0x9EF // 2543 + SYS___CEXPL_H = 0x9F0 // 2544 + SYS_CIMAG = 0x9F1 // 2545 + SYS___CIMAG_B = 0x9F2 // 2546 + SYS___CIMAG_H = 0x9F3 // 2547 + SYS_CIMAGF = 0x9F4 // 2548 + SYS___CIMAGF_B = 0x9F5 // 2549 + SYS___CIMAGF_H = 0x9F6 // 2550 + SYS_CIMAGL = 0x9F7 // 2551 + SYS___CIMAGL_B = 0x9F8 // 2552 + SYS___CIMAGL_H = 0x9F9 // 2553 + SYS___CLOG = 0x9FA // 2554 + SYS___CLOG_B = 0x9FB // 2555 + SYS___CLOG_H = 0x9FC // 2556 + SYS_CLOGF = 0x9FD // 2557 + SYS___CLOGF_B = 0x9FE // 2558 + SYS___CLOGF_H = 0x9FF // 2559 + SYS_CLOGL = 0xA00 // 2560 + SYS___CLOGL_B = 0xA01 // 2561 + SYS___CLOGL_H = 0xA02 // 2562 + SYS_CONJ = 0xA03 // 2563 + SYS___CONJ_B = 0xA04 // 2564 + SYS___CONJ_H = 0xA05 // 2565 + SYS_CONJF = 0xA06 // 2566 + SYS___CONJF_B = 0xA07 // 2567 + SYS___CONJF_H = 0xA08 // 2568 + SYS_CONJL = 0xA09 // 2569 + SYS___CONJL_B = 0xA0A // 2570 + SYS___CONJL_H = 0xA0B // 2571 + SYS_CPOW = 0xA0C // 2572 + SYS___CPOW_B = 0xA0D // 2573 + SYS___CPOW_H = 0xA0E // 2574 + SYS_CPOWF = 0xA0F // 2575 + SYS___CPOWF_B = 0xA10 // 2576 + SYS___CPOWF_H = 0xA11 // 2577 + SYS_CPOWL = 0xA12 // 2578 + SYS___CPOWL_B = 0xA13 // 2579 + SYS___CPOWL_H = 0xA14 // 2580 + SYS_CPROJ = 0xA15 // 2581 + SYS___CPROJ_B = 0xA16 // 2582 + SYS___CPROJ_H = 0xA17 // 2583 + SYS_CPROJF = 0xA18 // 2584 + SYS___CPROJF_B = 0xA19 // 2585 + SYS___CPROJF_H = 0xA1A // 2586 + SYS_CPROJL = 0xA1B // 2587 + SYS___CPROJL_B = 0xA1C // 2588 + SYS___CPROJL_H = 0xA1D // 2589 + SYS_CREAL = 0xA1E // 2590 + SYS___CREAL_B = 0xA1F // 2591 + SYS___CREAL_H = 0xA20 // 2592 + SYS_CREALF = 0xA21 // 2593 + SYS___CREALF_B = 0xA22 // 2594 + SYS___CREALF_H = 0xA23 // 2595 + SYS_CREALL = 0xA24 // 2596 + SYS___CREALL_B = 0xA25 // 2597 + SYS___CREALL_H = 0xA26 // 2598 + SYS_CSIN = 0xA27 // 2599 + SYS___CSIN_B = 0xA28 // 2600 + SYS___CSIN_H = 0xA29 // 2601 + SYS_CSINF = 0xA2A // 2602 + SYS___CSINF_B = 0xA2B // 2603 + SYS___CSINF_H = 0xA2C // 2604 + SYS_CSINL = 0xA2D // 2605 + SYS___CSINL_B = 0xA2E // 2606 + SYS___CSINL_H = 0xA2F // 2607 + SYS_CSINH = 0xA30 // 2608 + SYS___CSINH_B = 0xA31 // 2609 + SYS___CSINH_H = 0xA32 // 2610 + SYS_CSINHF = 0xA33 // 2611 + SYS___CSINHF_B = 0xA34 // 2612 + SYS___CSINHF_H = 0xA35 // 2613 + SYS_CSINHL = 0xA36 // 2614 + SYS___CSINHL_B = 0xA37 // 2615 + SYS___CSINHL_H = 0xA38 // 2616 + SYS_CSQRT = 0xA39 // 2617 + SYS___CSQRT_B = 0xA3A // 2618 + SYS___CSQRT_H = 0xA3B // 2619 + SYS_CSQRTF = 0xA3C // 2620 + SYS___CSQRTF_B = 0xA3D // 2621 + SYS___CSQRTF_H = 0xA3E // 2622 + SYS_CSQRTL = 0xA3F // 2623 + SYS___CSQRTL_B = 0xA40 // 2624 + SYS___CSQRTL_H = 0xA41 // 2625 + SYS_CTAN = 0xA42 // 2626 + SYS___CTAN_B = 0xA43 // 2627 + SYS___CTAN_H = 0xA44 // 2628 + SYS_CTANF = 0xA45 // 2629 + SYS___CTANF_B = 0xA46 // 2630 + SYS___CTANF_H = 0xA47 // 2631 + SYS_CTANL = 0xA48 // 2632 + SYS___CTANL_B = 0xA49 // 2633 + SYS___CTANL_H = 0xA4A // 2634 + SYS_CTANH = 0xA4B // 2635 + SYS___CTANH_B = 0xA4C // 2636 + SYS___CTANH_H = 0xA4D // 2637 + SYS_CTANHF = 0xA4E // 2638 + SYS___CTANHF_B = 0xA4F // 2639 + SYS___CTANHF_H = 0xA50 // 2640 + SYS_CTANHL = 0xA51 // 2641 + SYS___CTANHL_B = 0xA52 // 2642 + SYS___CTANHL_H = 0xA53 // 2643 + SYS___ACOSHF_H = 0xA54 // 2644 + SYS___ACOSHL_H = 0xA55 // 2645 + SYS___ASINHF_H = 0xA56 // 2646 + SYS___ASINHL_H = 0xA57 // 2647 + SYS___CBRTF_H = 0xA58 // 2648 + SYS___CBRTL_H = 0xA59 // 2649 + SYS___COPYSIGN_B = 0xA5A // 2650 + SYS___EXPM1F_H = 0xA5B // 2651 + SYS___EXPM1L_H = 0xA5C // 2652 + SYS___EXP2_H = 0xA5D // 2653 + SYS___EXP2F_H = 0xA5E // 2654 + SYS___EXP2L_H = 0xA5F // 2655 + SYS___LOG1PF_H = 0xA60 // 2656 + SYS___LOG1PL_H = 0xA61 // 2657 + SYS___LGAMMAL_H = 0xA62 // 2658 + SYS_FMA = 0xA63 // 2659 + SYS___FMA_B = 0xA64 // 2660 + SYS___FMA_H = 0xA65 // 2661 + SYS_FMAF = 0xA66 // 2662 + SYS___FMAF_B = 0xA67 // 2663 + SYS___FMAF_H = 0xA68 // 2664 + SYS_FMAL = 0xA69 // 2665 + SYS___FMAL_B = 0xA6A // 2666 + SYS___FMAL_H = 0xA6B // 2667 + SYS_FMAX = 0xA6C // 2668 + SYS___FMAX_B = 0xA6D // 2669 + SYS___FMAX_H = 0xA6E // 2670 + SYS_FMAXF = 0xA6F // 2671 + SYS___FMAXF_B = 0xA70 // 2672 + SYS___FMAXF_H = 0xA71 // 2673 + SYS_FMAXL = 0xA72 // 2674 + SYS___FMAXL_B = 0xA73 // 2675 + SYS___FMAXL_H = 0xA74 // 2676 + SYS_FMIN = 0xA75 // 2677 + SYS___FMIN_B = 0xA76 // 2678 + SYS___FMIN_H = 0xA77 // 2679 + SYS_FMINF = 0xA78 // 2680 + SYS___FMINF_B = 0xA79 // 2681 + SYS___FMINF_H = 0xA7A // 2682 + SYS_FMINL = 0xA7B // 2683 + SYS___FMINL_B = 0xA7C // 2684 + SYS___FMINL_H = 0xA7D // 2685 + SYS_ILOGBF = 0xA7E // 2686 + SYS___ILOGBF_B = 0xA7F // 2687 + SYS___ILOGBF_H = 0xA80 // 2688 + SYS_ILOGBL = 0xA81 // 2689 + SYS___ILOGBL_B = 0xA82 // 2690 + SYS___ILOGBL_H = 0xA83 // 2691 + SYS_LLRINT = 0xA84 // 2692 + SYS___LLRINT_B = 0xA85 // 2693 + SYS___LLRINT_H = 0xA86 // 2694 + SYS_LLRINTF = 0xA87 // 2695 + SYS___LLRINTF_B = 0xA88 // 2696 + SYS___LLRINTF_H = 0xA89 // 2697 + SYS_LLRINTL = 0xA8A // 2698 + SYS___LLRINTL_B = 0xA8B // 2699 + SYS___LLRINTL_H = 0xA8C // 2700 + SYS_LLROUND = 0xA8D // 2701 + SYS___LLROUND_B = 0xA8E // 2702 + SYS___LLROUND_H = 0xA8F // 2703 + SYS_LLROUNDF = 0xA90 // 2704 + SYS___LLROUNDF_B = 0xA91 // 2705 + SYS___LLROUNDF_H = 0xA92 // 2706 + SYS_LLROUNDL = 0xA93 // 2707 + SYS___LLROUNDL_B = 0xA94 // 2708 + SYS___LLROUNDL_H = 0xA95 // 2709 + SYS_LOGBF = 0xA96 // 2710 + SYS___LOGBF_B = 0xA97 // 2711 + SYS___LOGBF_H = 0xA98 // 2712 + SYS_LOGBL = 0xA99 // 2713 + SYS___LOGBL_B = 0xA9A // 2714 + SYS___LOGBL_H = 0xA9B // 2715 + SYS_LRINT = 0xA9C // 2716 + SYS___LRINT_B = 0xA9D // 2717 + SYS___LRINT_H = 0xA9E // 2718 + SYS_LRINTF = 0xA9F // 2719 + SYS___LRINTF_B = 0xAA0 // 2720 + SYS___LRINTF_H = 0xAA1 // 2721 + SYS_LRINTL = 0xAA2 // 2722 + SYS___LRINTL_B = 0xAA3 // 2723 + SYS___LRINTL_H = 0xAA4 // 2724 + SYS_LROUNDL = 0xAA5 // 2725 + SYS___LROUNDL_B = 0xAA6 // 2726 + SYS___LROUNDL_H = 0xAA7 // 2727 + SYS_NAN = 0xAA8 // 2728 + SYS___NAN_B = 0xAA9 // 2729 + SYS_NANF = 0xAAA // 2730 + SYS___NANF_B = 0xAAB // 2731 + SYS_NANL = 0xAAC // 2732 + SYS___NANL_B = 0xAAD // 2733 + SYS_NEARBYINT = 0xAAE // 2734 + SYS___NEARBYINT_B = 0xAAF // 2735 + SYS___NEARBYINT_H = 0xAB0 // 2736 + SYS_NEARBYINTF = 0xAB1 // 2737 + SYS___NEARBYINTF_B = 0xAB2 // 2738 + SYS___NEARBYINTF_H = 0xAB3 // 2739 + SYS_NEARBYINTL = 0xAB4 // 2740 + SYS___NEARBYINTL_B = 0xAB5 // 2741 + SYS___NEARBYINTL_H = 0xAB6 // 2742 + SYS_NEXTAFTERF = 0xAB7 // 2743 + SYS___NEXTAFTERF_B = 0xAB8 // 2744 + SYS___NEXTAFTERF_H = 0xAB9 // 2745 + SYS_NEXTAFTERL = 0xABA // 2746 + SYS___NEXTAFTERL_B = 0xABB // 2747 + SYS___NEXTAFTERL_H = 0xABC // 2748 + SYS_NEXTTOWARD = 0xABD // 2749 + SYS___NEXTTOWARD_B = 0xABE // 2750 + SYS___NEXTTOWARD_H = 0xABF // 2751 + SYS_NEXTTOWARDF = 0xAC0 // 2752 + SYS___NEXTTOWARDF_B = 0xAC1 // 2753 + SYS___NEXTTOWARDF_H = 0xAC2 // 2754 + SYS_NEXTTOWARDL = 0xAC3 // 2755 + SYS___NEXTTOWARDL_B = 0xAC4 // 2756 + SYS___NEXTTOWARDL_H = 0xAC5 // 2757 + SYS___REMAINDERF_H = 0xAC6 // 2758 + SYS___REMAINDERL_H = 0xAC7 // 2759 + SYS___REMQUO_H = 0xAC8 // 2760 + SYS___REMQUOF_H = 0xAC9 // 2761 + SYS___REMQUOL_H = 0xACA // 2762 + SYS_RINTF = 0xACB // 2763 + SYS___RINTF_B = 0xACC // 2764 + SYS_RINTL = 0xACD // 2765 + SYS___RINTL_B = 0xACE // 2766 + SYS_ROUND = 0xACF // 2767 + SYS___ROUND_B = 0xAD0 // 2768 + SYS___ROUND_H = 0xAD1 // 2769 + SYS_ROUNDF = 0xAD2 // 2770 + SYS___ROUNDF_B = 0xAD3 // 2771 + SYS___ROUNDF_H = 0xAD4 // 2772 + SYS_ROUNDL = 0xAD5 // 2773 + SYS___ROUNDL_B = 0xAD6 // 2774 + SYS___ROUNDL_H = 0xAD7 // 2775 + SYS_SCALBLN = 0xAD8 // 2776 + SYS___SCALBLN_B = 0xAD9 // 2777 + SYS___SCALBLN_H = 0xADA // 2778 + SYS_SCALBLNF = 0xADB // 2779 + SYS___SCALBLNF_B = 0xADC // 2780 + SYS___SCALBLNF_H = 0xADD // 2781 + SYS_SCALBLNL = 0xADE // 2782 + SYS___SCALBLNL_B = 0xADF // 2783 + SYS___SCALBLNL_H = 0xAE0 // 2784 + SYS___SCALBN_B = 0xAE1 // 2785 + SYS___SCALBN_H = 0xAE2 // 2786 + SYS_SCALBNF = 0xAE3 // 2787 + SYS___SCALBNF_B = 0xAE4 // 2788 + SYS___SCALBNF_H = 0xAE5 // 2789 + SYS_SCALBNL = 0xAE6 // 2790 + SYS___SCALBNL_B = 0xAE7 // 2791 + SYS___SCALBNL_H = 0xAE8 // 2792 + SYS___TGAMMAL_H = 0xAE9 // 2793 + SYS_FECLEAREXCEPT = 0xAEA // 2794 + SYS_FEGETENV = 0xAEB // 2795 + SYS_FEGETEXCEPTFLAG = 0xAEC // 2796 + SYS_FEGETROUND = 0xAED // 2797 + SYS_FEHOLDEXCEPT = 0xAEE // 2798 + SYS_FERAISEEXCEPT = 0xAEF // 2799 + SYS_FESETENV = 0xAF0 // 2800 + SYS_FESETEXCEPTFLAG = 0xAF1 // 2801 + SYS_FESETROUND = 0xAF2 // 2802 + SYS_FETESTEXCEPT = 0xAF3 // 2803 + SYS_FEUPDATEENV = 0xAF4 // 2804 + SYS___COPYSIGN_H = 0xAF5 // 2805 + SYS___HYPOTF_H = 0xAF6 // 2806 + SYS___HYPOTL_H = 0xAF7 // 2807 + SYS___CLASS = 0xAFA // 2810 + SYS___CLASS_B = 0xAFB // 2811 + SYS___CLASS_H = 0xAFC // 2812 + SYS___ISBLANK_A = 0xB2E // 2862 + SYS___ISWBLANK_A = 0xB2F // 2863 + SYS___LROUND_FIXUP = 0xB30 // 2864 + SYS___LROUNDF_FIXUP = 0xB31 // 2865 + SYS_SCHED_YIELD = 0xB32 // 2866 + SYS_STRERROR_R = 0xB33 // 2867 + SYS_UNSETENV = 0xB34 // 2868 + SYS___LGAMMA_H_C99 = 0xB38 // 2872 + SYS___LGAMMA_B_C99 = 0xB39 // 2873 + SYS___LGAMMA_R_C99 = 0xB3A // 2874 + SYS___FTELL2 = 0xB3B // 2875 + SYS___FSEEK2 = 0xB3C // 2876 + SYS___STATIC_REINIT = 0xB3D // 2877 + SYS_PTHREAD_ATTR_GETSTACK = 0xB3E // 2878 + SYS_PTHREAD_ATTR_SETSTACK = 0xB3F // 2879 + SYS___TGAMMA_H_C99 = 0xB78 // 2936 + SYS___TGAMMAF_H_C99 = 0xB79 // 2937 + SYS___LE_TRACEBACK = 0xB7A // 2938 + SYS___MUST_STAY_CLEAN = 0xB7C // 2940 + SYS___O_ENV = 0xB7D // 2941 + SYS_ACOSD32 = 0xB7E // 2942 + SYS_ACOSD64 = 0xB7F // 2943 + SYS_ACOSD128 = 0xB80 // 2944 + SYS_ACOSHD32 = 0xB81 // 2945 + SYS_ACOSHD64 = 0xB82 // 2946 + SYS_ACOSHD128 = 0xB83 // 2947 + SYS_ASIND32 = 0xB84 // 2948 + SYS_ASIND64 = 0xB85 // 2949 + SYS_ASIND128 = 0xB86 // 2950 + SYS_ASINHD32 = 0xB87 // 2951 + SYS_ASINHD64 = 0xB88 // 2952 + SYS_ASINHD128 = 0xB89 // 2953 + SYS_ATAND32 = 0xB8A // 2954 + SYS_ATAND64 = 0xB8B // 2955 + SYS_ATAND128 = 0xB8C // 2956 + SYS_ATAN2D32 = 0xB8D // 2957 + SYS_ATAN2D64 = 0xB8E // 2958 + SYS_ATAN2D128 = 0xB8F // 2959 + SYS_ATANHD32 = 0xB90 // 2960 + SYS_ATANHD64 = 0xB91 // 2961 + SYS_ATANHD128 = 0xB92 // 2962 + SYS_CBRTD32 = 0xB93 // 2963 + SYS_CBRTD64 = 0xB94 // 2964 + SYS_CBRTD128 = 0xB95 // 2965 + SYS_CEILD32 = 0xB96 // 2966 + SYS_CEILD64 = 0xB97 // 2967 + SYS_CEILD128 = 0xB98 // 2968 + SYS___CLASS2 = 0xB99 // 2969 + SYS___CLASS2_B = 0xB9A // 2970 + SYS___CLASS2_H = 0xB9B // 2971 + SYS_COPYSIGND32 = 0xB9C // 2972 + SYS_COPYSIGND64 = 0xB9D // 2973 + SYS_COPYSIGND128 = 0xB9E // 2974 + SYS_COSD32 = 0xB9F // 2975 + SYS_COSD64 = 0xBA0 // 2976 + SYS_COSD128 = 0xBA1 // 2977 + SYS_COSHD32 = 0xBA2 // 2978 + SYS_COSHD64 = 0xBA3 // 2979 + SYS_COSHD128 = 0xBA4 // 2980 + SYS_ERFD32 = 0xBA5 // 2981 + SYS_ERFD64 = 0xBA6 // 2982 + SYS_ERFD128 = 0xBA7 // 2983 + SYS_ERFCD32 = 0xBA8 // 2984 + SYS_ERFCD64 = 0xBA9 // 2985 + SYS_ERFCD128 = 0xBAA // 2986 + SYS_EXPD32 = 0xBAB // 2987 + SYS_EXPD64 = 0xBAC // 2988 + SYS_EXPD128 = 0xBAD // 2989 + SYS_EXP2D32 = 0xBAE // 2990 + SYS_EXP2D64 = 0xBAF // 2991 + SYS_EXP2D128 = 0xBB0 // 2992 + SYS_EXPM1D32 = 0xBB1 // 2993 + SYS_EXPM1D64 = 0xBB2 // 2994 + SYS_EXPM1D128 = 0xBB3 // 2995 + SYS_FABSD32 = 0xBB4 // 2996 + SYS_FABSD64 = 0xBB5 // 2997 + SYS_FABSD128 = 0xBB6 // 2998 + SYS_FDIMD32 = 0xBB7 // 2999 + SYS_FDIMD64 = 0xBB8 // 3000 + SYS_FDIMD128 = 0xBB9 // 3001 + SYS_FE_DEC_GETROUND = 0xBBA // 3002 + SYS_FE_DEC_SETROUND = 0xBBB // 3003 + SYS_FLOORD32 = 0xBBC // 3004 + SYS_FLOORD64 = 0xBBD // 3005 + SYS_FLOORD128 = 0xBBE // 3006 + SYS_FMAD32 = 0xBBF // 3007 + SYS_FMAD64 = 0xBC0 // 3008 + SYS_FMAD128 = 0xBC1 // 3009 + SYS_FMAXD32 = 0xBC2 // 3010 + SYS_FMAXD64 = 0xBC3 // 3011 + SYS_FMAXD128 = 0xBC4 // 3012 + SYS_FMIND32 = 0xBC5 // 3013 + SYS_FMIND64 = 0xBC6 // 3014 + SYS_FMIND128 = 0xBC7 // 3015 + SYS_FMODD32 = 0xBC8 // 3016 + SYS_FMODD64 = 0xBC9 // 3017 + SYS_FMODD128 = 0xBCA // 3018 + SYS___FP_CAST_D = 0xBCB // 3019 + SYS_FREXPD32 = 0xBCC // 3020 + SYS_FREXPD64 = 0xBCD // 3021 + SYS_FREXPD128 = 0xBCE // 3022 + SYS_HYPOTD32 = 0xBCF // 3023 + SYS_HYPOTD64 = 0xBD0 // 3024 + SYS_HYPOTD128 = 0xBD1 // 3025 + SYS_ILOGBD32 = 0xBD2 // 3026 + SYS_ILOGBD64 = 0xBD3 // 3027 + SYS_ILOGBD128 = 0xBD4 // 3028 + SYS_LDEXPD32 = 0xBD5 // 3029 + SYS_LDEXPD64 = 0xBD6 // 3030 + SYS_LDEXPD128 = 0xBD7 // 3031 + SYS_LGAMMAD32 = 0xBD8 // 3032 + SYS_LGAMMAD64 = 0xBD9 // 3033 + SYS_LGAMMAD128 = 0xBDA // 3034 + SYS_LLRINTD32 = 0xBDB // 3035 + SYS_LLRINTD64 = 0xBDC // 3036 + SYS_LLRINTD128 = 0xBDD // 3037 + SYS_LLROUNDD32 = 0xBDE // 3038 + SYS_LLROUNDD64 = 0xBDF // 3039 + SYS_LLROUNDD128 = 0xBE0 // 3040 + SYS_LOGD32 = 0xBE1 // 3041 + SYS_LOGD64 = 0xBE2 // 3042 + SYS_LOGD128 = 0xBE3 // 3043 + SYS_LOG10D32 = 0xBE4 // 3044 + SYS_LOG10D64 = 0xBE5 // 3045 + SYS_LOG10D128 = 0xBE6 // 3046 + SYS_LOG1PD32 = 0xBE7 // 3047 + SYS_LOG1PD64 = 0xBE8 // 3048 + SYS_LOG1PD128 = 0xBE9 // 3049 + SYS_LOG2D32 = 0xBEA // 3050 + SYS_LOG2D64 = 0xBEB // 3051 + SYS_LOG2D128 = 0xBEC // 3052 + SYS_LOGBD32 = 0xBED // 3053 + SYS_LOGBD64 = 0xBEE // 3054 + SYS_LOGBD128 = 0xBEF // 3055 + SYS_LRINTD32 = 0xBF0 // 3056 + SYS_LRINTD64 = 0xBF1 // 3057 + SYS_LRINTD128 = 0xBF2 // 3058 + SYS_LROUNDD32 = 0xBF3 // 3059 + SYS_LROUNDD64 = 0xBF4 // 3060 + SYS_LROUNDD128 = 0xBF5 // 3061 + SYS_MODFD32 = 0xBF6 // 3062 + SYS_MODFD64 = 0xBF7 // 3063 + SYS_MODFD128 = 0xBF8 // 3064 + SYS_NAND32 = 0xBF9 // 3065 + SYS_NAND64 = 0xBFA // 3066 + SYS_NAND128 = 0xBFB // 3067 + SYS_NEARBYINTD32 = 0xBFC // 3068 + SYS_NEARBYINTD64 = 0xBFD // 3069 + SYS_NEARBYINTD128 = 0xBFE // 3070 + SYS_NEXTAFTERD32 = 0xBFF // 3071 + SYS_NEXTAFTERD64 = 0xC00 // 3072 + SYS_NEXTAFTERD128 = 0xC01 // 3073 + SYS_NEXTTOWARDD32 = 0xC02 // 3074 + SYS_NEXTTOWARDD64 = 0xC03 // 3075 + SYS_NEXTTOWARDD128 = 0xC04 // 3076 + SYS_POWD32 = 0xC05 // 3077 + SYS_POWD64 = 0xC06 // 3078 + SYS_POWD128 = 0xC07 // 3079 + SYS_QUANTIZED32 = 0xC08 // 3080 + SYS_QUANTIZED64 = 0xC09 // 3081 + SYS_QUANTIZED128 = 0xC0A // 3082 + SYS_REMAINDERD32 = 0xC0B // 3083 + SYS_REMAINDERD64 = 0xC0C // 3084 + SYS_REMAINDERD128 = 0xC0D // 3085 + SYS___REMQUOD32 = 0xC0E // 3086 + SYS___REMQUOD64 = 0xC0F // 3087 + SYS___REMQUOD128 = 0xC10 // 3088 + SYS_RINTD32 = 0xC11 // 3089 + SYS_RINTD64 = 0xC12 // 3090 + SYS_RINTD128 = 0xC13 // 3091 + SYS_ROUNDD32 = 0xC14 // 3092 + SYS_ROUNDD64 = 0xC15 // 3093 + SYS_ROUNDD128 = 0xC16 // 3094 + SYS_SAMEQUANTUMD32 = 0xC17 // 3095 + SYS_SAMEQUANTUMD64 = 0xC18 // 3096 + SYS_SAMEQUANTUMD128 = 0xC19 // 3097 + SYS_SCALBLND32 = 0xC1A // 3098 + SYS_SCALBLND64 = 0xC1B // 3099 + SYS_SCALBLND128 = 0xC1C // 3100 + SYS_SCALBND32 = 0xC1D // 3101 + SYS_SCALBND64 = 0xC1E // 3102 + SYS_SCALBND128 = 0xC1F // 3103 + SYS_SIND32 = 0xC20 // 3104 + SYS_SIND64 = 0xC21 // 3105 + SYS_SIND128 = 0xC22 // 3106 + SYS_SINHD32 = 0xC23 // 3107 + SYS_SINHD64 = 0xC24 // 3108 + SYS_SINHD128 = 0xC25 // 3109 + SYS_SQRTD32 = 0xC26 // 3110 + SYS_SQRTD64 = 0xC27 // 3111 + SYS_SQRTD128 = 0xC28 // 3112 + SYS_STRTOD32 = 0xC29 // 3113 + SYS_STRTOD64 = 0xC2A // 3114 + SYS_STRTOD128 = 0xC2B // 3115 + SYS_TAND32 = 0xC2C // 3116 + SYS_TAND64 = 0xC2D // 3117 + SYS_TAND128 = 0xC2E // 3118 + SYS_TANHD32 = 0xC2F // 3119 + SYS_TANHD64 = 0xC30 // 3120 + SYS_TANHD128 = 0xC31 // 3121 + SYS_TGAMMAD32 = 0xC32 // 3122 + SYS_TGAMMAD64 = 0xC33 // 3123 + SYS_TGAMMAD128 = 0xC34 // 3124 + SYS_TRUNCD32 = 0xC3E // 3134 + SYS_TRUNCD64 = 0xC3F // 3135 + SYS_TRUNCD128 = 0xC40 // 3136 + SYS_WCSTOD32 = 0xC41 // 3137 + SYS_WCSTOD64 = 0xC42 // 3138 + SYS_WCSTOD128 = 0xC43 // 3139 + SYS___CODEPAGE_INFO = 0xC64 // 3172 + SYS_POSIX_OPENPT = 0xC66 // 3174 + SYS_PSELECT = 0xC67 // 3175 + SYS_SOCKATMARK = 0xC68 // 3176 + SYS_AIO_FSYNC = 0xC69 // 3177 + SYS_LIO_LISTIO = 0xC6A // 3178 + SYS___ATANPID32 = 0xC6B // 3179 + SYS___ATANPID64 = 0xC6C // 3180 + SYS___ATANPID128 = 0xC6D // 3181 + SYS___COSPID32 = 0xC6E // 3182 + SYS___COSPID64 = 0xC6F // 3183 + SYS___COSPID128 = 0xC70 // 3184 + SYS___SINPID32 = 0xC71 // 3185 + SYS___SINPID64 = 0xC72 // 3186 + SYS___SINPID128 = 0xC73 // 3187 + SYS_SETIPV4SOURCEFILTER = 0xC76 // 3190 + SYS_GETIPV4SOURCEFILTER = 0xC77 // 3191 + SYS_SETSOURCEFILTER = 0xC78 // 3192 + SYS_GETSOURCEFILTER = 0xC79 // 3193 + SYS_FWRITE_UNLOCKED = 0xC7A // 3194 + SYS_FREAD_UNLOCKED = 0xC7B // 3195 + SYS_FGETS_UNLOCKED = 0xC7C // 3196 + SYS_GETS_UNLOCKED = 0xC7D // 3197 + SYS_FPUTS_UNLOCKED = 0xC7E // 3198 + SYS_PUTS_UNLOCKED = 0xC7F // 3199 + SYS_FGETC_UNLOCKED = 0xC80 // 3200 + SYS_FPUTC_UNLOCKED = 0xC81 // 3201 + SYS_DLADDR = 0xC82 // 3202 + SYS_SHM_OPEN = 0xC8C // 3212 + SYS_SHM_UNLINK = 0xC8D // 3213 + SYS___CLASS2F = 0xC91 // 3217 + SYS___CLASS2L = 0xC92 // 3218 + SYS___CLASS2F_B = 0xC93 // 3219 + SYS___CLASS2F_H = 0xC94 // 3220 + SYS___CLASS2L_B = 0xC95 // 3221 + SYS___CLASS2L_H = 0xC96 // 3222 + SYS___CLASS2D32 = 0xC97 // 3223 + SYS___CLASS2D64 = 0xC98 // 3224 + SYS___CLASS2D128 = 0xC99 // 3225 + SYS___TOCSNAME2 = 0xC9A // 3226 + SYS___D1TOP = 0xC9B // 3227 + SYS___D2TOP = 0xC9C // 3228 + SYS___D4TOP = 0xC9D // 3229 + SYS___PTOD1 = 0xC9E // 3230 + SYS___PTOD2 = 0xC9F // 3231 + SYS___PTOD4 = 0xCA0 // 3232 + SYS_CLEARERR_UNLOCKED = 0xCA1 // 3233 + SYS_FDELREC_UNLOCKED = 0xCA2 // 3234 + SYS_FEOF_UNLOCKED = 0xCA3 // 3235 + SYS_FERROR_UNLOCKED = 0xCA4 // 3236 + SYS_FFLUSH_UNLOCKED = 0xCA5 // 3237 + SYS_FGETPOS_UNLOCKED = 0xCA6 // 3238 + SYS_FGETWC_UNLOCKED = 0xCA7 // 3239 + SYS_FGETWS_UNLOCKED = 0xCA8 // 3240 + SYS_FILENO_UNLOCKED = 0xCA9 // 3241 + SYS_FLDATA_UNLOCKED = 0xCAA // 3242 + SYS_FLOCATE_UNLOCKED = 0xCAB // 3243 + SYS_FPRINTF_UNLOCKED = 0xCAC // 3244 + SYS_FPUTWC_UNLOCKED = 0xCAD // 3245 + SYS_FPUTWS_UNLOCKED = 0xCAE // 3246 + SYS_FSCANF_UNLOCKED = 0xCAF // 3247 + SYS_FSEEK_UNLOCKED = 0xCB0 // 3248 + SYS_FSEEKO_UNLOCKED = 0xCB1 // 3249 + SYS_FSETPOS_UNLOCKED = 0xCB3 // 3251 + SYS_FTELL_UNLOCKED = 0xCB4 // 3252 + SYS_FTELLO_UNLOCKED = 0xCB5 // 3253 + SYS_FUPDATE_UNLOCKED = 0xCB7 // 3255 + SYS_FWIDE_UNLOCKED = 0xCB8 // 3256 + SYS_FWPRINTF_UNLOCKED = 0xCB9 // 3257 + SYS_FWSCANF_UNLOCKED = 0xCBA // 3258 + SYS_GETWC_UNLOCKED = 0xCBB // 3259 + SYS_GETWCHAR_UNLOCKED = 0xCBC // 3260 + SYS_PERROR_UNLOCKED = 0xCBD // 3261 + SYS_PRINTF_UNLOCKED = 0xCBE // 3262 + SYS_PUTWC_UNLOCKED = 0xCBF // 3263 + SYS_PUTWCHAR_UNLOCKED = 0xCC0 // 3264 + SYS_REWIND_UNLOCKED = 0xCC1 // 3265 + SYS_SCANF_UNLOCKED = 0xCC2 // 3266 + SYS_UNGETC_UNLOCKED = 0xCC3 // 3267 + SYS_UNGETWC_UNLOCKED = 0xCC4 // 3268 + SYS_VFPRINTF_UNLOCKED = 0xCC5 // 3269 + SYS_VFSCANF_UNLOCKED = 0xCC7 // 3271 + SYS_VFWPRINTF_UNLOCKED = 0xCC9 // 3273 + SYS_VFWSCANF_UNLOCKED = 0xCCB // 3275 + SYS_VPRINTF_UNLOCKED = 0xCCD // 3277 + SYS_VSCANF_UNLOCKED = 0xCCF // 3279 + SYS_VWPRINTF_UNLOCKED = 0xCD1 // 3281 + SYS_VWSCANF_UNLOCKED = 0xCD3 // 3283 + SYS_WPRINTF_UNLOCKED = 0xCD5 // 3285 + SYS_WSCANF_UNLOCKED = 0xCD6 // 3286 + SYS_ASCTIME64 = 0xCD7 // 3287 + SYS_ASCTIME64_R = 0xCD8 // 3288 + SYS_CTIME64 = 0xCD9 // 3289 + SYS_CTIME64_R = 0xCDA // 3290 + SYS_DIFFTIME64 = 0xCDB // 3291 + SYS_GMTIME64 = 0xCDC // 3292 + SYS_GMTIME64_R = 0xCDD // 3293 + SYS_LOCALTIME64 = 0xCDE // 3294 + SYS_LOCALTIME64_R = 0xCDF // 3295 + SYS_MKTIME64 = 0xCE0 // 3296 + SYS_TIME64 = 0xCE1 // 3297 + SYS___LOGIN_APPLID = 0xCE2 // 3298 + SYS___PASSWD_APPLID = 0xCE3 // 3299 + SYS_PTHREAD_SECURITY_APPLID_NP = 0xCE4 // 3300 + SYS___GETTHENT = 0xCE5 // 3301 + SYS_FREEIFADDRS = 0xCE6 // 3302 + SYS_GETIFADDRS = 0xCE7 // 3303 + SYS_POSIX_FALLOCATE = 0xCE8 // 3304 + SYS_POSIX_MEMALIGN = 0xCE9 // 3305 + SYS_SIZEOF_ALLOC = 0xCEA // 3306 + SYS_RESIZE_ALLOC = 0xCEB // 3307 + SYS_FREAD_NOUPDATE = 0xCEC // 3308 + SYS_FREAD_NOUPDATE_UNLOCKED = 0xCED // 3309 + SYS_FGETPOS64 = 0xCEE // 3310 + SYS_FSEEK64 = 0xCEF // 3311 + SYS_FSEEKO64 = 0xCF0 // 3312 + SYS_FSETPOS64 = 0xCF1 // 3313 + SYS_FTELL64 = 0xCF2 // 3314 + SYS_FTELLO64 = 0xCF3 // 3315 + SYS_FGETPOS64_UNLOCKED = 0xCF4 // 3316 + SYS_FSEEK64_UNLOCKED = 0xCF5 // 3317 + SYS_FSEEKO64_UNLOCKED = 0xCF6 // 3318 + SYS_FSETPOS64_UNLOCKED = 0xCF7 // 3319 + SYS_FTELL64_UNLOCKED = 0xCF8 // 3320 + SYS_FTELLO64_UNLOCKED = 0xCF9 // 3321 + SYS_FOPEN_UNLOCKED = 0xCFA // 3322 + SYS_FREOPEN_UNLOCKED = 0xCFB // 3323 + SYS_FDOPEN_UNLOCKED = 0xCFC // 3324 + SYS_TMPFILE_UNLOCKED = 0xCFD // 3325 + SYS___MOSERVICES = 0xD3D // 3389 + SYS___GETTOD = 0xD3E // 3390 + SYS_C16RTOMB = 0xD40 // 3392 + SYS_C32RTOMB = 0xD41 // 3393 + SYS_MBRTOC16 = 0xD42 // 3394 + SYS_MBRTOC32 = 0xD43 // 3395 + SYS_QUANTEXPD32 = 0xD44 // 3396 + SYS_QUANTEXPD64 = 0xD45 // 3397 + SYS_QUANTEXPD128 = 0xD46 // 3398 + SYS___LOCALE_CTL = 0xD47 // 3399 + SYS___SMF_RECORD2 = 0xD48 // 3400 + SYS_FOPEN64 = 0xD49 // 3401 + SYS_FOPEN64_UNLOCKED = 0xD4A // 3402 + SYS_FREOPEN64 = 0xD4B // 3403 + SYS_FREOPEN64_UNLOCKED = 0xD4C // 3404 + SYS_TMPFILE64 = 0xD4D // 3405 + SYS_TMPFILE64_UNLOCKED = 0xD4E // 3406 + SYS_GETDATE64 = 0xD4F // 3407 + SYS_GETTIMEOFDAY64 = 0xD50 // 3408 + SYS_BIND2ADDRSEL = 0xD59 // 3417 + SYS_INET6_IS_SRCADDR = 0xD5A // 3418 + SYS___GETGRGID1 = 0xD5B // 3419 + SYS___GETGRNAM1 = 0xD5C // 3420 + SYS___FBUFSIZE = 0xD60 // 3424 + SYS___FPENDING = 0xD61 // 3425 + SYS___FLBF = 0xD62 // 3426 + SYS___FREADABLE = 0xD63 // 3427 + SYS___FWRITABLE = 0xD64 // 3428 + SYS___FREADING = 0xD65 // 3429 + SYS___FWRITING = 0xD66 // 3430 + SYS___FSETLOCKING = 0xD67 // 3431 + SYS__FLUSHLBF = 0xD68 // 3432 + SYS___FPURGE = 0xD69 // 3433 + SYS___FREADAHEAD = 0xD6A // 3434 + SYS___FSETERR = 0xD6B // 3435 + SYS___FPENDING_UNLOCKED = 0xD6C // 3436 + SYS___FREADING_UNLOCKED = 0xD6D // 3437 + SYS___FWRITING_UNLOCKED = 0xD6E // 3438 + SYS__FLUSHLBF_UNLOCKED = 0xD6F // 3439 + SYS___FPURGE_UNLOCKED = 0xD70 // 3440 + SYS___FREADAHEAD_UNLOCKED = 0xD71 // 3441 + SYS___LE_CEEGTJS = 0xD72 // 3442 + SYS___LE_RECORD_DUMP = 0xD73 // 3443 + SYS_FSTAT64 = 0xD74 // 3444 + SYS_LSTAT64 = 0xD75 // 3445 + SYS_STAT64 = 0xD76 // 3446 + SYS___READDIR2_64 = 0xD77 // 3447 + SYS___OPEN_STAT64 = 0xD78 // 3448 + SYS_FTW64 = 0xD79 // 3449 + SYS_NFTW64 = 0xD7A // 3450 + SYS_UTIME64 = 0xD7B // 3451 + SYS_UTIMES64 = 0xD7C // 3452 + SYS___GETIPC64 = 0xD7D // 3453 + SYS_MSGCTL64 = 0xD7E // 3454 + SYS_SEMCTL64 = 0xD7F // 3455 + SYS_SHMCTL64 = 0xD80 // 3456 + SYS_MSGXRCV64 = 0xD81 // 3457 + SYS___MGXR64 = 0xD81 // 3457 + SYS_W_GETPSENT64 = 0xD82 // 3458 + SYS_PTHREAD_COND_TIMEDWAIT64 = 0xD83 // 3459 + SYS_FTIME64 = 0xD85 // 3461 + SYS_GETUTXENT64 = 0xD86 // 3462 + SYS_GETUTXID64 = 0xD87 // 3463 + SYS_GETUTXLINE64 = 0xD88 // 3464 + SYS_PUTUTXLINE64 = 0xD89 // 3465 + SYS_NEWLOCALE = 0xD8A // 3466 + SYS_FREELOCALE = 0xD8B // 3467 + SYS_USELOCALE = 0xD8C // 3468 + SYS_DUPLOCALE = 0xD8D // 3469 + SYS___CHATTR64 = 0xD9C // 3484 + SYS___LCHATTR64 = 0xD9D // 3485 + SYS___FCHATTR64 = 0xD9E // 3486 + SYS_____CHATTR64_A = 0xD9F // 3487 + SYS_____LCHATTR64_A = 0xDA0 // 3488 + SYS___LE_CEEUSGD = 0xDA1 // 3489 + SYS___LE_IFAM_CON = 0xDA2 // 3490 + SYS___LE_IFAM_DSC = 0xDA3 // 3491 + SYS___LE_IFAM_GET = 0xDA4 // 3492 + SYS___LE_IFAM_QRY = 0xDA5 // 3493 + SYS_ALIGNED_ALLOC = 0xDA6 // 3494 + SYS_ACCEPT4 = 0xDA7 // 3495 + SYS___ACCEPT4_A = 0xDA8 // 3496 + SYS_COPYFILERANGE = 0xDA9 // 3497 + SYS_GETLINE = 0xDAA // 3498 + SYS___GETLINE_A = 0xDAB // 3499 + SYS_DIRFD = 0xDAC // 3500 + SYS_CLOCK_GETTIME = 0xDAD // 3501 + SYS_DUP3 = 0xDAE // 3502 + SYS_EPOLL_CREATE = 0xDAF // 3503 + SYS_EPOLL_CREATE1 = 0xDB0 // 3504 + SYS_EPOLL_CTL = 0xDB1 // 3505 + SYS_EPOLL_WAIT = 0xDB2 // 3506 + SYS_EPOLL_PWAIT = 0xDB3 // 3507 + SYS_EVENTFD = 0xDB4 // 3508 + SYS_STATFS = 0xDB5 // 3509 + SYS___STATFS_A = 0xDB6 // 3510 + SYS_FSTATFS = 0xDB7 // 3511 + SYS_INOTIFY_INIT = 0xDB8 // 3512 + SYS_INOTIFY_INIT1 = 0xDB9 // 3513 + SYS_INOTIFY_ADD_WATCH = 0xDBA // 3514 + SYS___INOTIFY_ADD_WATCH_A = 0xDBB // 3515 + SYS_INOTIFY_RM_WATCH = 0xDBC // 3516 + SYS_PIPE2 = 0xDBD // 3517 + SYS_PIVOT_ROOT = 0xDBE // 3518 + SYS___PIVOT_ROOT_A = 0xDBF // 3519 + SYS_PRCTL = 0xDC0 // 3520 + SYS_PRLIMIT = 0xDC1 // 3521 + SYS_SETHOSTNAME = 0xDC2 // 3522 + SYS___SETHOSTNAME_A = 0xDC3 // 3523 + SYS_SETRESUID = 0xDC4 // 3524 + SYS_SETRESGID = 0xDC5 // 3525 + SYS_PTHREAD_CONDATTR_GETCLOCK = 0xDC6 // 3526 + SYS_FLOCK = 0xDC7 // 3527 + SYS_FGETXATTR = 0xDC8 // 3528 + SYS___FGETXATTR_A = 0xDC9 // 3529 + SYS_FLISTXATTR = 0xDCA // 3530 + SYS___FLISTXATTR_A = 0xDCB // 3531 + SYS_FREMOVEXATTR = 0xDCC // 3532 + SYS___FREMOVEXATTR_A = 0xDCD // 3533 + SYS_FSETXATTR = 0xDCE // 3534 + SYS___FSETXATTR_A = 0xDCF // 3535 + SYS_GETXATTR = 0xDD0 // 3536 + SYS___GETXATTR_A = 0xDD1 // 3537 + SYS_LGETXATTR = 0xDD2 // 3538 + SYS___LGETXATTR_A = 0xDD3 // 3539 + SYS_LISTXATTR = 0xDD4 // 3540 + SYS___LISTXATTR_A = 0xDD5 // 3541 + SYS_LLISTXATTR = 0xDD6 // 3542 + SYS___LLISTXATTR_A = 0xDD7 // 3543 + SYS_LREMOVEXATTR = 0xDD8 // 3544 + SYS___LREMOVEXATTR_A = 0xDD9 // 3545 + SYS_LSETXATTR = 0xDDA // 3546 + SYS___LSETXATTR_A = 0xDDB // 3547 + SYS_REMOVEXATTR = 0xDDC // 3548 + SYS___REMOVEXATTR_A = 0xDDD // 3549 + SYS_SETXATTR = 0xDDE // 3550 + SYS___SETXATTR_A = 0xDDF // 3551 + SYS_FDATASYNC = 0xDE0 // 3552 + SYS_SYNCFS = 0xDE1 // 3553 + SYS_FUTIMES = 0xDE2 // 3554 + SYS_FUTIMESAT = 0xDE3 // 3555 + SYS___FUTIMESAT_A = 0xDE4 // 3556 + SYS_LUTIMES = 0xDE5 // 3557 + SYS___LUTIMES_A = 0xDE6 // 3558 + SYS_INET_ATON = 0xDE7 // 3559 + SYS_GETRANDOM = 0xDE8 // 3560 + SYS_GETTID = 0xDE9 // 3561 + SYS_MEMFD_CREATE = 0xDEA // 3562 + SYS___MEMFD_CREATE_A = 0xDEB // 3563 + SYS_FACCESSAT = 0xDEC // 3564 + SYS___FACCESSAT_A = 0xDED // 3565 + SYS_FCHMODAT = 0xDEE // 3566 + SYS___FCHMODAT_A = 0xDEF // 3567 + SYS_FCHOWNAT = 0xDF0 // 3568 + SYS___FCHOWNAT_A = 0xDF1 // 3569 + SYS_FSTATAT = 0xDF2 // 3570 + SYS___FSTATAT_A = 0xDF3 // 3571 + SYS_LINKAT = 0xDF4 // 3572 + SYS___LINKAT_A = 0xDF5 // 3573 + SYS_MKDIRAT = 0xDF6 // 3574 + SYS___MKDIRAT_A = 0xDF7 // 3575 + SYS_MKFIFOAT = 0xDF8 // 3576 + SYS___MKFIFOAT_A = 0xDF9 // 3577 + SYS_MKNODAT = 0xDFA // 3578 + SYS___MKNODAT_A = 0xDFB // 3579 + SYS_OPENAT = 0xDFC // 3580 + SYS___OPENAT_A = 0xDFD // 3581 + SYS_READLINKAT = 0xDFE // 3582 + SYS___READLINKAT_A = 0xDFF // 3583 + SYS_RENAMEAT = 0xE00 // 3584 + SYS___RENAMEAT_A = 0xE01 // 3585 + SYS_RENAMEAT2 = 0xE02 // 3586 + SYS___RENAMEAT2_A = 0xE03 // 3587 + SYS_SYMLINKAT = 0xE04 // 3588 + SYS___SYMLINKAT_A = 0xE05 // 3589 + SYS_UNLINKAT = 0xE06 // 3590 + SYS___UNLINKAT_A = 0xE07 // 3591 + SYS_SYSINFO = 0xE08 // 3592 + SYS_WAIT4 = 0xE0A // 3594 + SYS_CLONE = 0xE0B // 3595 + SYS_UNSHARE = 0xE0C // 3596 + SYS_SETNS = 0xE0D // 3597 + SYS_CAPGET = 0xE0E // 3598 + SYS_CAPSET = 0xE0F // 3599 + SYS_STRCHRNUL = 0xE10 // 3600 + SYS_PTHREAD_CONDATTR_SETCLOCK = 0xE12 // 3602 + SYS_OPEN_BY_HANDLE_AT = 0xE13 // 3603 + SYS___OPEN_BY_HANDLE_AT_A = 0xE14 // 3604 + SYS___INET_ATON_A = 0xE15 // 3605 + SYS_MOUNT1 = 0xE16 // 3606 + SYS___MOUNT1_A = 0xE17 // 3607 + SYS_UMOUNT1 = 0xE18 // 3608 + SYS___UMOUNT1_A = 0xE19 // 3609 + SYS_UMOUNT2 = 0xE1A // 3610 + SYS___UMOUNT2_A = 0xE1B // 3611 + SYS___PRCTL_A = 0xE1C // 3612 + SYS_LOCALTIME_R2 = 0xE1D // 3613 + SYS___LOCALTIME_R2_A = 0xE1E // 3614 + SYS_OPENAT2 = 0xE1F // 3615 + SYS___OPENAT2_A = 0xE20 // 3616 + SYS___LE_CEEMICT = 0xE21 // 3617 + SYS_GETENTROPY = 0xE22 // 3618 + SYS_NANOSLEEP = 0xE23 // 3619 + SYS_UTIMENSAT = 0xE24 // 3620 + SYS___UTIMENSAT_A = 0xE25 // 3621 + SYS_ASPRINTF = 0xE26 // 3622 + SYS___ASPRINTF_A = 0xE27 // 3623 + SYS_VASPRINTF = 0xE28 // 3624 + SYS___VASPRINTF_A = 0xE29 // 3625 + SYS_DPRINTF = 0xE2A // 3626 + SYS___DPRINTF_A = 0xE2B // 3627 + SYS_GETOPT_LONG = 0xE2C // 3628 + SYS___GETOPT_LONG_A = 0xE2D // 3629 + SYS_PSIGNAL = 0xE2E // 3630 + SYS___PSIGNAL_A = 0xE2F // 3631 + SYS_PSIGNAL_UNLOCKED = 0xE30 // 3632 + SYS___PSIGNAL_UNLOCKED_A = 0xE31 // 3633 + SYS_FSTATAT_O = 0xE32 // 3634 + SYS___FSTATAT_O_A = 0xE33 // 3635 + SYS_FSTATAT64 = 0xE34 // 3636 + SYS___FSTATAT64_A = 0xE35 // 3637 + SYS___CHATTRAT = 0xE36 // 3638 + SYS_____CHATTRAT_A = 0xE37 // 3639 + SYS___CHATTRAT64 = 0xE38 // 3640 + SYS_____CHATTRAT64_A = 0xE39 // 3641 + SYS_MADVISE = 0xE3A // 3642 + SYS___AUTHENTICATE = 0xE3B // 3643 + ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index eff6bcdef..4740b8348 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1178,7 +1178,8 @@ const ( PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 0x10 PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 0x11 PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 0x12 - PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x13 + PERF_SAMPLE_BRANCH_COUNTERS = 0x80000 + PERF_SAMPLE_BRANCH_MAX_SHIFT = 0x14 PERF_SAMPLE_BRANCH_USER = 0x1 PERF_SAMPLE_BRANCH_KERNEL = 0x2 PERF_SAMPLE_BRANCH_HV = 0x4 @@ -1198,7 +1199,7 @@ const ( PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 PERF_SAMPLE_BRANCH_HW_INDEX = 0x20000 PERF_SAMPLE_BRANCH_PRIV_SAVE = 0x40000 - PERF_SAMPLE_BRANCH_MAX = 0x80000 + PERF_SAMPLE_BRANCH_MAX = 0x100000 PERF_BR_UNKNOWN = 0x0 PERF_BR_COND = 0x1 PERF_BR_UNCOND = 0x2 @@ -2481,6 +2482,15 @@ type XDPMmapOffsets struct { Cr XDPRingOffset } +type XDPUmemReg struct { + Addr uint64 + Len uint64 + Chunk_size uint32 + Headroom uint32 + Flags uint32 + Tx_metadata_len uint32 +} + type XDPStatistics struct { Rx_dropped uint64 Rx_invalid_descs uint64 @@ -2935,7 +2945,7 @@ const ( BPF_TCP_LISTEN = 0xa BPF_TCP_CLOSING = 0xb BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd + BPF_TCP_MAX_STATES = 0xe TCP_BPF_IW = 0x3e9 TCP_BPF_SNDCWND_CLAMP = 0x3ea TCP_BPF_DELACK_MAX = 0x3eb @@ -3211,7 +3221,7 @@ const ( DEVLINK_CMD_LINECARD_NEW = 0x50 DEVLINK_CMD_LINECARD_DEL = 0x51 DEVLINK_CMD_SELFTESTS_GET = 0x52 - DEVLINK_CMD_MAX = 0x53 + DEVLINK_CMD_MAX = 0x54 DEVLINK_PORT_TYPE_NOTSET = 0x0 DEVLINK_PORT_TYPE_AUTO = 0x1 DEVLINK_PORT_TYPE_ETH = 0x2 @@ -4595,7 +4605,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x146 + NL80211_ATTR_MAX = 0x14a NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -4861,7 +4871,7 @@ const ( NL80211_BSS_FREQUENCY_OFFSET = 0x14 NL80211_BSS_INFORMATION_ELEMENTS = 0x6 NL80211_BSS_LAST_SEEN_BOOTTIME = 0xf - NL80211_BSS_MAX = 0x16 + NL80211_BSS_MAX = 0x18 NL80211_BSS_MLD_ADDR = 0x16 NL80211_BSS_MLO_LINK_ID = 0x15 NL80211_BSS_PAD = 0x10 @@ -4965,7 +4975,7 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x9a + NL80211_CMD_MAX = 0x9b NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5199,7 +5209,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x1c + NL80211_FREQUENCY_ATTR_MAX = 0x20 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5693,7 +5703,7 @@ const ( NL80211_STA_FLAG_ASSOCIATED = 0x7 NL80211_STA_FLAG_AUTHENTICATED = 0x5 NL80211_STA_FLAG_AUTHORIZED = 0x1 - NL80211_STA_FLAG_MAX = 0x7 + NL80211_STA_FLAG_MAX = 0x8 NL80211_STA_FLAG_MAX_OLD_API = 0x6 NL80211_STA_FLAG_MFP = 0x4 NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2 @@ -5991,3 +6001,34 @@ type CachestatRange struct { Off uint64 Len uint64 } + +const ( + SK_MEMINFO_RMEM_ALLOC = 0x0 + SK_MEMINFO_RCVBUF = 0x1 + SK_MEMINFO_WMEM_ALLOC = 0x2 + SK_MEMINFO_SNDBUF = 0x3 + SK_MEMINFO_FWD_ALLOC = 0x4 + SK_MEMINFO_WMEM_QUEUED = 0x5 + SK_MEMINFO_OPTMEM = 0x6 + SK_MEMINFO_BACKLOG = 0x7 + SK_MEMINFO_DROPS = 0x8 + SK_MEMINFO_VARS = 0x9 + SKNLGRP_NONE = 0x0 + SKNLGRP_INET_TCP_DESTROY = 0x1 + SKNLGRP_INET_UDP_DESTROY = 0x2 + SKNLGRP_INET6_TCP_DESTROY = 0x3 + SKNLGRP_INET6_UDP_DESTROY = 0x4 + SK_DIAG_BPF_STORAGE_REQ_NONE = 0x0 + SK_DIAG_BPF_STORAGE_REQ_MAP_FD = 0x1 + SK_DIAG_BPF_STORAGE_REP_NONE = 0x0 + SK_DIAG_BPF_STORAGE = 0x1 + SK_DIAG_BPF_STORAGE_NONE = 0x0 + SK_DIAG_BPF_STORAGE_PAD = 0x1 + SK_DIAG_BPF_STORAGE_MAP_ID = 0x2 + SK_DIAG_BPF_STORAGE_MAP_VALUE = 0x3 +) + +type SockDiagReq struct { + Family uint8 + Protocol uint8 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 438a30aff..fd402da43 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -477,14 +477,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index adceca355..eb7a5e186 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -492,15 +492,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index eeaa00a37..d78ac108b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -470,15 +470,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 6739aa91d..cd06d47f1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -471,15 +471,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 9920ef631..2f28fe26c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -472,15 +472,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 2923b799a..71d6cac2f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -476,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index ce2750ee4..8596d4535 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -474,15 +474,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 3038811d7..cd60ea186 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -474,15 +474,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index efc6fed18..b0ae420c4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -476,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 9a654b75a..835972875 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -482,15 +482,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 40d358e33..69eb6a5c6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -481,15 +481,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 148c6ceb8..5f583cb62 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -481,15 +481,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 72ba81543..15adc0414 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -499,15 +499,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 71e765508..cf3ce9003 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -495,15 +495,6 @@ const ( BLKPG = 0x1269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 4abbdb9de..590b56739 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -476,15 +476,6 @@ const ( BLKPG = 0x20001269 ) -type XDPUmemReg struct { - Addr uint64 - Len uint64 - Size uint32 - Headroom uint32 - Flags uint32 - _ [4]byte -} - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index 54f31be63..d9a13af46 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -25,10 +25,13 @@ const ( SizeofIPv6Mreq = 20 SizeofICMPv6Filter = 32 SizeofIPv6MTUInfo = 32 + SizeofInet4Pktinfo = 8 + SizeofInet6Pktinfo = 20 SizeofLinger = 8 SizeofSockaddrInet4 = 16 SizeofSockaddrInet6 = 28 SizeofTCPInfo = 0x68 + SizeofUcred = 12 ) type ( @@ -69,12 +72,17 @@ type Utimbuf struct { } type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte + Sysname [16]byte + Nodename [32]byte + Release [8]byte + Version [8]byte + Machine [16]byte +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 } type RawSockaddrInet4 struct { @@ -325,7 +333,7 @@ type Statvfs_t struct { } type Statfs_t struct { - Type uint32 + Type uint64 Bsize uint64 Blocks uint64 Bfree uint64 @@ -336,6 +344,7 @@ type Statfs_t struct { Namelen uint64 Frsize uint64 Flags uint64 + _ [4]uint64 } type direntLE struct { @@ -412,3 +421,126 @@ type W_Mntent struct { Quiesceowner [8]byte _ [38]byte } + +type EpollEvent struct { + Events uint32 + _ int32 + Fd int32 + Pad int32 +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 + Name string +} + +const ( + SizeofInotifyEvent = 0x10 +) + +type ConsMsg2 struct { + Cm2Format uint16 + Cm2R1 uint16 + Cm2Msglength uint32 + Cm2Msg *byte + Cm2R2 [4]byte + Cm2R3 [4]byte + Cm2Routcde *uint32 + Cm2Descr *uint32 + Cm2Msgflag uint32 + Cm2Token uint32 + Cm2Msgid *uint32 + Cm2R4 [4]byte + Cm2DomToken uint32 + Cm2DomMsgid *uint32 + Cm2ModCartptr *byte + Cm2ModConsidptr *byte + Cm2MsgCart [8]byte + Cm2MsgConsid [4]byte + Cm2R5 [12]byte +} + +const ( + CC_modify = 1 + CC_stop = 2 + CONSOLE_FORMAT_2 = 2 + CONSOLE_FORMAT_3 = 3 + CONSOLE_HRDCPY = 0x80000000 +) + +type OpenHow struct { + Flags uint64 + Mode uint64 + Resolve uint64 +} + +const SizeofOpenHow = 0x18 + +const ( + RESOLVE_CACHED = 0x20 + RESOLVE_BENEATH = 0x8 + RESOLVE_IN_ROOT = 0x10 + RESOLVE_NO_MAGICLINKS = 0x2 + RESOLVE_NO_SYMLINKS = 0x4 + RESOLVE_NO_XDEV = 0x1 +) + +type Siginfo struct { + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + _ [44]byte +} + +type SysvIpcPerm struct { + Uid uint32 + Gid uint32 + Cuid uint32 + Cgid uint32 + Mode int32 +} + +type SysvShmDesc struct { + Perm SysvIpcPerm + _ [4]byte + Lpid int32 + Cpid int32 + Nattch uint32 + _ [4]byte + _ [4]byte + _ [4]byte + _ int32 + _ uint8 + _ uint8 + _ uint16 + _ *byte + Segsz uint64 + Atime Time_t + Dtime Time_t + Ctime Time_t +} + +type SysvShmDesc64 struct { + Perm SysvIpcPerm + _ [4]byte + Lpid int32 + Cpid int32 + Nattch uint32 + _ [4]byte + _ [4]byte + _ [4]byte + _ int32 + _ byte + _ uint8 + _ uint16 + _ *byte + Segsz uint64 + Atime int64 + Dtime int64 + Ctime int64 +} diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go index ce2d713d6..16f90560a 100644 --- a/vendor/golang.org/x/sys/windows/aliases.go +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build windows && go1.9 +//go:build windows package windows diff --git a/vendor/golang.org/x/sys/windows/empty.s b/vendor/golang.org/x/sys/windows/empty.s deleted file mode 100644 index ba64caca5..000000000 --- a/vendor/golang.org/x/sys/windows/empty.s +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.12 - -// This file is here to allow bodyless functions with go:linkname for Go 1.11 -// and earlier (see https://golang.org/issue/23311). diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 26be94a8a..97651b5bd 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -68,6 +68,7 @@ type UserInfo10 struct { //sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo //sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation //sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree +//sys NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) = netapi32.NetUserEnum const ( // do not reorder @@ -893,7 +894,7 @@ type ACL struct { aclRevision byte sbz1 byte aclSize uint16 - aceCount uint16 + AceCount uint16 sbz2 uint16 } @@ -1086,6 +1087,27 @@ type EXPLICIT_ACCESS struct { Trustee TRUSTEE } +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header +type ACE_HEADER struct { + AceType uint8 + AceFlags uint8 + AceSize uint16 +} + +// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-access_allowed_ace +type ACCESS_ALLOWED_ACE struct { + Header ACE_HEADER + Mask ACCESS_MASK + SidStart uint32 +} + +const ( + // Constants for AceType + // https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header + ACCESS_ALLOWED_ACE_TYPE = 0 + ACCESS_DENIED_ACE_TYPE = 1 +) + // This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. type TrusteeValue uintptr @@ -1157,6 +1179,7 @@ type OBJECTS_AND_NAME struct { //sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD //sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW +//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) = advapi32.GetAce // Control returns the security descriptor control bits. func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 6395a031d..6525c62f3 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -165,6 +165,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) +//sys DisconnectNamedPipe(pipe Handle) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -348,8 +349,19 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost //sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) //sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) +//sys ClearCommBreak(handle Handle) (err error) +//sys ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) +//sys EscapeCommFunction(handle Handle, dwFunc uint32) (err error) +//sys GetCommState(handle Handle, lpDCB *DCB) (err error) +//sys GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) //sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys PurgeComm(handle Handle, dwFlags uint32) (err error) +//sys SetCommBreak(handle Handle) (err error) +//sys SetCommMask(handle Handle, dwEvtMask uint32) (err error) +//sys SetCommState(handle Handle, lpDCB *DCB) (err error) //sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) +//sys WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) //sys GetActiveProcessorCount(groupNumber uint16) (ret uint32) //sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32) //sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows @@ -1834,3 +1846,73 @@ func ResizePseudoConsole(pconsole Handle, size Coord) error { // accept arguments that can be casted to uintptr, and Coord can't. return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size)))) } + +// DCB constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-dcb. +const ( + CBR_110 = 110 + CBR_300 = 300 + CBR_600 = 600 + CBR_1200 = 1200 + CBR_2400 = 2400 + CBR_4800 = 4800 + CBR_9600 = 9600 + CBR_14400 = 14400 + CBR_19200 = 19200 + CBR_38400 = 38400 + CBR_57600 = 57600 + CBR_115200 = 115200 + CBR_128000 = 128000 + CBR_256000 = 256000 + + DTR_CONTROL_DISABLE = 0x00000000 + DTR_CONTROL_ENABLE = 0x00000010 + DTR_CONTROL_HANDSHAKE = 0x00000020 + + RTS_CONTROL_DISABLE = 0x00000000 + RTS_CONTROL_ENABLE = 0x00001000 + RTS_CONTROL_HANDSHAKE = 0x00002000 + RTS_CONTROL_TOGGLE = 0x00003000 + + NOPARITY = 0 + ODDPARITY = 1 + EVENPARITY = 2 + MARKPARITY = 3 + SPACEPARITY = 4 + + ONESTOPBIT = 0 + ONE5STOPBITS = 1 + TWOSTOPBITS = 2 +) + +// EscapeCommFunction constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-escapecommfunction. +const ( + SETXOFF = 1 + SETXON = 2 + SETRTS = 3 + CLRRTS = 4 + SETDTR = 5 + CLRDTR = 6 + SETBREAK = 8 + CLRBREAK = 9 +) + +// PurgeComm constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-purgecomm. +const ( + PURGE_TXABORT = 0x0001 + PURGE_RXABORT = 0x0002 + PURGE_TXCLEAR = 0x0004 + PURGE_RXCLEAR = 0x0008 +) + +// SetCommMask constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setcommmask. +const ( + EV_RXCHAR = 0x0001 + EV_RXFLAG = 0x0002 + EV_TXEMPTY = 0x0004 + EV_CTS = 0x0008 + EV_DSR = 0x0010 + EV_RLSD = 0x0020 + EV_BREAK = 0x0040 + EV_ERR = 0x0080 + EV_RING = 0x0100 +) diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 359780f6a..d8cb71db0 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -3380,3 +3380,27 @@ type BLOB struct { Size uint32 BlobData *byte } + +type ComStat struct { + Flags uint32 + CBInQue uint32 + CBOutQue uint32 +} + +type DCB struct { + DCBlength uint32 + BaudRate uint32 + Flags uint32 + wReserved uint16 + XonLim uint16 + XoffLim uint16 + ByteSize uint8 + Parity uint8 + StopBits uint8 + XonChar byte + XoffChar byte + ErrorChar byte + EofChar byte + EvtChar byte + wReserved1 uint16 +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index e8791c82c..eba761018 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -91,6 +91,7 @@ var ( procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") procEqualSid = modadvapi32.NewProc("EqualSid") procFreeSid = modadvapi32.NewProc("FreeSid") + procGetAce = modadvapi32.NewProc("GetAce") procGetLengthSid = modadvapi32.NewProc("GetLengthSid") procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") @@ -188,6 +189,8 @@ var ( procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procClearCommBreak = modkernel32.NewProc("ClearCommBreak") + procClearCommError = modkernel32.NewProc("ClearCommError") procCloseHandle = modkernel32.NewProc("CloseHandle") procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") @@ -212,7 +215,9 @@ var ( procDeleteProcThreadAttributeList = modkernel32.NewProc("DeleteProcThreadAttributeList") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procEscapeCommFunction = modkernel32.NewProc("EscapeCommFunction") procExitProcess = modkernel32.NewProc("ExitProcess") procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") procFindClose = modkernel32.NewProc("FindClose") @@ -236,6 +241,8 @@ var ( procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") procGetACP = modkernel32.NewProc("GetACP") procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procGetCommModemStatus = modkernel32.NewProc("GetCommModemStatus") + procGetCommState = modkernel32.NewProc("GetCommState") procGetCommTimeouts = modkernel32.NewProc("GetCommTimeouts") procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") @@ -322,6 +329,7 @@ var ( procProcess32NextW = modkernel32.NewProc("Process32NextW") procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") procPulseEvent = modkernel32.NewProc("PulseEvent") + procPurgeComm = modkernel32.NewProc("PurgeComm") procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") procQueryFullProcessImageNameW = modkernel32.NewProc("QueryFullProcessImageNameW") procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") @@ -335,6 +343,9 @@ var ( procResetEvent = modkernel32.NewProc("ResetEvent") procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") + procSetCommBreak = modkernel32.NewProc("SetCommBreak") + procSetCommMask = modkernel32.NewProc("SetCommMask") + procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") @@ -342,7 +353,6 @@ var ( procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") - procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") procSetErrorMode = modkernel32.NewProc("SetErrorMode") procSetEvent = modkernel32.NewProc("SetEvent") @@ -351,6 +361,7 @@ var ( procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") procSetFilePointer = modkernel32.NewProc("SetFilePointer") procSetFileTime = modkernel32.NewProc("SetFileTime") + procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") procSetNamedPipeHandleState = modkernel32.NewProc("SetNamedPipeHandleState") @@ -361,6 +372,7 @@ var ( procSetStdHandle = modkernel32.NewProc("SetStdHandle") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procSetupComm = modkernel32.NewProc("SetupComm") procSizeofResource = modkernel32.NewProc("SizeofResource") procSleepEx = modkernel32.NewProc("SleepEx") procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") @@ -379,6 +391,7 @@ var ( procVirtualQueryEx = modkernel32.NewProc("VirtualQueryEx") procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") procWTSGetActiveConsoleSessionId = modkernel32.NewProc("WTSGetActiveConsoleSessionId") + procWaitCommEvent = modkernel32.NewProc("WaitCommEvent") procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") @@ -389,6 +402,7 @@ var ( procTransmitFile = modmswsock.NewProc("TransmitFile") procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetUserEnum = modnetapi32.NewProc("NetUserEnum") procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") procNtCreateFile = modntdll.NewProc("NtCreateFile") procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") @@ -1211,6 +1225,14 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE return } +func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) { + r0, _, _ := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce))) + if r0 == 0 { + ret = GetLastError() + } + return +} + func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) { r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor))) if r1 == 0 { @@ -1641,6 +1663,22 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { return } +func ClearCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CloseHandle(handle Handle) (err error) { r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { @@ -1845,6 +1883,14 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff return } +func DisconnectNamedPipe(pipe Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { var _p0 uint32 if bInheritHandle { @@ -1857,6 +1903,14 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP return } +func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { + r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ExitProcess(exitcode uint32) { syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) return @@ -2058,6 +2112,22 @@ func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { return } +func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2810,6 +2880,14 @@ func PulseEvent(event Handle) (err error) { return } +func PurgeComm(handle Handle, dwFlags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) @@ -2924,6 +3002,30 @@ func ResumeThread(thread Handle) (ret uint32, err error) { return } +func SetCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2989,14 +3091,6 @@ func SetEndOfFile(handle Handle) (err error) { return } -func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) if r1 == 0 { @@ -3060,6 +3154,14 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim return } +func SetFileValidData(handle Handle, validDataLength int64) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { @@ -3145,6 +3247,14 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro return } +func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) size = uint32(r0) @@ -3291,6 +3401,14 @@ func WTSGetActiveConsoleSessionId() (sessionID uint32) { return } +func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { var _p0 uint32 if waitAll { @@ -3378,6 +3496,14 @@ func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (nete return } +func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) { + r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) if r0 != 0 { diff --git a/vendor/modules.txt b/vendor/modules.txt index 8b532001e..e60454e67 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -55,7 +55,7 @@ github.com/alecthomas/template/parse # github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 ## explicit; go 1.15 github.com/alecthomas/units -# github.com/aws/aws-sdk-go v1.50.31 +# github.com/aws/aws-sdk-go v1.55.1 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn @@ -176,7 +176,7 @@ github.com/jessevdk/go-flags # github.com/jmespath/go-jmespath v0.4.0 ## explicit; go 1.14 github.com/jmespath/go-jmespath -# github.com/klauspost/compress v1.17.7 +# github.com/klauspost/compress v1.17.8 ## explicit; go 1.20 github.com/klauspost/compress github.com/klauspost/compress/flate @@ -207,8 +207,8 @@ github.com/mitchellh/go-homedir github.com/moby/docker-image-spec/specs-go/v1 # github.com/moby/term v0.5.0 ## explicit; go 1.18 -# github.com/mongodb/mongo-tools v0.0.0-20231117185435-bf0bef9e9f19 -## explicit; go 1.20 +# github.com/mongodb/mongo-tools v0.0.0-20240723193119-837c2bc263f4 +## explicit; go 1.21 github.com/mongodb/mongo-tools/common/archive github.com/mongodb/mongo-tools/common/auth github.com/mongodb/mongo-tools/common/bsonutil @@ -228,7 +228,7 @@ github.com/mongodb/mongo-tools/common/util github.com/mongodb/mongo-tools/mongodump github.com/mongodb/mongo-tools/mongorestore github.com/mongodb/mongo-tools/mongorestore/ns -# github.com/montanaflynn/stats v0.6.6 +# github.com/montanaflynn/stats v0.7.1 ## explicit; go 1.13 github.com/montanaflynn/stats # github.com/morikuni/aec v1.0.0 @@ -259,8 +259,8 @@ github.com/xdg-go/stringprep # github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a ## explicit; go 1.12 github.com/youmark/pkcs8 -# go.mongodb.org/mongo-driver v1.13.0 -## explicit; go 1.13 +# go.mongodb.org/mongo-driver v1.16.0 +## explicit; go 1.18 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec go.mongodb.org/mongo-driver/bson/bsonoptions @@ -336,42 +336,42 @@ go.opentelemetry.io/otel/metric/embedded ## explicit; go 1.20 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded -# golang.org/x/crypto v0.21.0 -## explicit; go 1.18 +# golang.org/x/crypto v0.25.0 +## explicit; go 1.20 golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b golang.org/x/crypto/ocsp golang.org/x/crypto/pbkdf2 golang.org/x/crypto/scrypt golang.org/x/crypto/ssh/terminal -# golang.org/x/exp v0.0.0-20230321023759-10a507213a29 -## explicit; go 1.18 +# golang.org/x/exp v0.0.0-20240529005216-23cca8864a10 +## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/mod v0.16.0 +# golang.org/x/mod v0.19.0 ## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.23.0 +# golang.org/x/net v0.25.0 ## explicit; go 1.18 golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/publicsuffix -# golang.org/x/sync v0.6.0 +# golang.org/x/sync v0.7.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/singleflight -# golang.org/x/sys v0.18.0 +# golang.org/x/sys v0.22.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.18.0 +# golang.org/x/term v0.22.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.14.0 +# golang.org/x/text v0.16.0 ## explicit; go 1.18 golang.org/x/text/secure/bidirule golang.org/x/text/transform From afdb48c63db221d6db80f9680857452d8e4c8d70 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 24 Jul 2024 12:06:11 +0200 Subject: [PATCH 133/203] Change explicit priority check It has to have at least one priority member specified in order to be marked as explicit priority. --- pbm/prio/priority.go | 2 +- pbm/prio/priority_test.go | 12 ------------ 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/pbm/prio/priority.go b/pbm/prio/priority.go index 231cbd694..b22d841ef 100644 --- a/pbm/prio/priority.go +++ b/pbm/prio/priority.go @@ -89,7 +89,7 @@ func CalcPriorityForAgent( cfgPrio config.Priority, coeffRules map[string]float64, ) float64 { - if cfgPrio != nil || len(cfgPrio) > 0 { + if len(cfgPrio) > 0 { // apply config level priorities return explicitPrioCalc(agent, cfgPrio) } diff --git a/pbm/prio/priority_test.go b/pbm/prio/priority_test.go index c08e42dec..cd4007f06 100644 --- a/pbm/prio/priority_test.go +++ b/pbm/prio/priority_test.go @@ -227,18 +227,6 @@ func TestCalcNodesPriority(t *testing.T) { {"rs01"}, }, }, - { - desc: "all defaults", - agents: []topo.AgentStat{ - newP("rs0", "rs01"), - newS("rs0", "rs02"), - newS("rs0", "rs03"), - }, - expPrio: config.Priority{}, - res: [][]string{ - {"rs01", "rs02", "rs03"}, - }, - }, { desc: "priorities are not defined -> implicit are applied", agents: []topo.AgentStat{ From bbeaf7a976c6a32307195306e508b033745b376f Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 24 Jul 2024 12:10:07 +0200 Subject: [PATCH 134/203] Remove unnecessary e2e test file --- e2e-tests/docker/conf/p2.yaml | 12 ------------ 1 file changed, 12 deletions(-) delete mode 100644 e2e-tests/docker/conf/p2.yaml diff --git a/e2e-tests/docker/conf/p2.yaml b/e2e-tests/docker/conf/p2.yaml deleted file mode 100644 index b8946f518..000000000 --- a/e2e-tests/docker/conf/p2.yaml +++ /dev/null @@ -1,12 +0,0 @@ -storage: - type: s3 - s3: - endpointUrl: http://minio:9000 - bucket: bcp - prefix: pbme2etest - credentials: - access-key-id: "minio1234" - secret-access-key: "minio1234" -pitr: - enabled: true - From b7682f589166e847cb3aec64b5992d476348c3a9 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 25 Jul 2024 21:42:06 +0200 Subject: [PATCH 135/203] Fix handling for pitr streaming error OnMovedError doesn't need special handling anymore. --- cmd/pbm-agent/pitr.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 58bb302d0..46ec75893 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -345,15 +345,11 @@ func (a *Agent) pitr(ctx context.Context) error { cfg.PITR.CompressionLevel, cfg.Backup.Timeouts) if streamErr != nil { - out := l.Error - if errors.Is(streamErr, slicer.OpMovedError{}) { - out = l.Info - } - retErr := errors.Wrap(streamErr, "streaming oplog: %v") + l.Error("streaming oplog: %v", streamErr) + retErr := errors.Wrap(streamErr, "streaming oplog") if err := oplog.SetErrorRSStatus(ctx, a.leadConn, nodeInfo.SetName, nodeInfo.Me, retErr.Error()); err != nil { - l.Error("setting RS status to status error, err = %v", err) + l.Error("setting RS status to StatusError: %v", err) } - out(retErr.Error()) } if err := lck.Release(); err != nil { From 72dff78ac4b1d61a97f121e9b9e57104eb76882b Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 25 Jul 2024 21:50:30 +0200 Subject: [PATCH 136/203] Inject starting node info into slicer ... instead of quering new one inside the slicer --- cmd/pbm-agent/pitr.go | 7 +++++-- pbm/slicer/slicer.go | 8 ++------ 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 46ec75893..ac664d08d 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -338,12 +338,15 @@ func (a *Agent) pitr(ctx context.Context) error { } }() - streamErr := s.Stream(ctx, + streamErr := s.Stream( + ctx, + nodeInfo, stopC, w, cfg.PITR.Compression, cfg.PITR.CompressionLevel, - cfg.Backup.Timeouts) + cfg.Backup.Timeouts, + ) if streamErr != nil { l.Error("streaming oplog: %v", streamErr) retErr := errors.Wrap(streamErr, "streaming oplog") diff --git a/pbm/slicer/slicer.go b/pbm/slicer/slicer.go index 5cf149d63..82c27d38e 100644 --- a/pbm/slicer/slicer.go +++ b/pbm/slicer/slicer.go @@ -277,6 +277,7 @@ const LogStartMsg = "start_ok" // Stream streaming (saving) chunks of the oplog to the given storage func (s *Slicer) Stream( ctx context.Context, + startingNode *topo.NodeInfo, stopC <-chan struct{}, backupSig <-chan ctrl.OPID, compression compress.CompressionType, @@ -292,11 +293,6 @@ func (s *Slicer) Stream( tk := time.NewTicker(cspan) defer tk.Stop() - nodeInfo, err := topo.GetNodeInfoExt(ctx, s.node) - if err != nil { - return errors.Wrap(err, "get NodeInfo data") - } - // early check for the log sufficiency to display error // before the timer clicks (not to wait minutes to report) ok, err := s.oplog.IsSufficient(s.lastTS) @@ -392,7 +388,7 @@ func (s *Slicer) Stream( if ld.Type != ctrl.CmdPITR { return errors.Errorf("another operation is running: %v", ld) } - if ld.Node != nodeInfo.Me { + if ld.Node != startingNode.Me { return OpMovedError{ld.Node} } if sliceTo.IsZero() { From 0628438fbc07579c1bd57f7b899b8f8d9d5fba63 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 25 Jul 2024 22:25:09 +0200 Subject: [PATCH 137/203] Extract constants for priority scores --- pbm/prio/priority.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/pbm/prio/priority.go b/pbm/prio/priority.go index b22d841ef..b9d41ffe5 100644 --- a/pbm/prio/priority.go +++ b/pbm/prio/priority.go @@ -8,7 +8,12 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/topo" ) -const defaultScore = 1.0 +const ( + defaultScore = 1.0 + scoreForPrimary = defaultScore / 2 + scoreForSecondary = defaultScore * 1 + scoreForHidden = defaultScore * 2 +) // NodesPriority groups nodes by priority according to // provided scores. Basically nodes are grouped and sorted by @@ -106,9 +111,9 @@ func implicitPrioCalc(a topo.AgentStat, rule map[string]float64) float64 { if coeff, ok := rule[a.Node]; ok && rule != nil { return defaultScore * coeff } else if a.State == defs.NodeStatePrimary { - return defaultScore / 2 + return scoreForPrimary } else if a.Hidden { - return defaultScore * 2 + return scoreForHidden } return defaultScore } From 95e1d7c4dd4b95f694f21fb29ff47674fb6eb43a Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Thu, 25 Jul 2024 22:25:37 +0200 Subject: [PATCH 138/203] Add priority calculation based on node info --- pbm/prio/priority.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/pbm/prio/priority.go b/pbm/prio/priority.go index b9d41ffe5..2dcc7d532 100644 --- a/pbm/prio/priority.go +++ b/pbm/prio/priority.go @@ -104,6 +104,18 @@ func CalcPriorityForAgent( return implicitPrioCalc(agent, coeffRules) } +// CalcPriorityForNode returns implicit priority based on node info. +func CalcPriorityForNode(node *topo.NodeInfo) float64 { + if node.IsPrimary { + return scoreForPrimary + } else if node.Secondary { + return scoreForSecondary + } else if node.Hidden { + return scoreForHidden + } + return defaultScore +} + // implicitPrioCalc provides priority calculation based on topology rules. // Instead of using explicitly specified priority numbers, topology rules are // applied for primary, secondary and hidden member. From 45d5e1205e9e6056c33426df2b257006f809480f Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 26 Jul 2024 11:04:57 +0200 Subject: [PATCH 139/203] Add monitoring prio logic within slicer --- cmd/pbm-agent/pitr.go | 4 ++++ pbm/slicer/slicer.go | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index ac664d08d..4a2ef79f7 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -338,6 +338,9 @@ func (a *Agent) pitr(ctx context.Context) error { } }() + // monitor implicit priority changes (secondary->primary) + monitorPrio := cfg.PITR.Priority == nil + streamErr := s.Stream( ctx, nodeInfo, @@ -346,6 +349,7 @@ func (a *Agent) pitr(ctx context.Context) error { cfg.PITR.Compression, cfg.PITR.CompressionLevel, cfg.Backup.Timeouts, + monitorPrio, ) if streamErr != nil { l.Error("streaming oplog: %v", streamErr) diff --git a/pbm/slicer/slicer.go b/pbm/slicer/slicer.go index 82c27d38e..c651d86ee 100644 --- a/pbm/slicer/slicer.go +++ b/pbm/slicer/slicer.go @@ -19,6 +19,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/lock" "github.com/percona/percona-backup-mongodb/pbm/log" "github.com/percona/percona-backup-mongodb/pbm/oplog" + "github.com/percona/percona-backup-mongodb/pbm/prio" "github.com/percona/percona-backup-mongodb/pbm/restore" "github.com/percona/percona-backup-mongodb/pbm/storage" "github.com/percona/percona-backup-mongodb/pbm/topo" @@ -283,6 +284,7 @@ func (s *Slicer) Stream( compression compress.CompressionType, level *int, timeouts *config.BackupTimeouts, + monitorPrio bool, ) error { if s.lastTS.T == 0 { return errors.New("no starting point defined") @@ -391,6 +393,12 @@ func (s *Slicer) Stream( if ld.Node != startingNode.Me { return OpMovedError{ld.Node} } + if monitorPrio && + prio.CalcPriorityForNode(startingNode) > prio.CalcPriorityForNode(ninf) { + return errors.Errorf("node priority has changed %.1f->%.1f", + prio.CalcPriorityForNode(startingNode), + prio.CalcPriorityForNode(ninf)) + } if sliceTo.IsZero() { majority, err := topo.IsWriteMajorityRequested(ctx, s.node, s.leadClient.MongoOptions().WriteConcern) if err != nil { From 85bb1610653a7a536b1a4c527ef9efca1399e827 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 26 Jul 2024 11:08:29 +0200 Subject: [PATCH 140/203] Increase error resolution in case of catchup/config error --- cmd/pbm-agent/pitr.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 4a2ef79f7..a762b0c8d 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -277,7 +277,8 @@ func (a *Agent) pitr(ctx context.Context) error { if err := lck.Release(); err != nil { l.Error("release lock: %v", err) } - return errors.Wrap(err, "unable to get storage configuration") + err = errors.Wrap(err, "unable to get storage configuration") + return err } s := slicer.NewSlicer(a.brief.SetName, a.leadConn, a.nodeConn, stg, cfg, log.FromContext(ctx)) @@ -292,7 +293,8 @@ func (a *Agent) pitr(ctx context.Context) error { if err := lck.Release(); err != nil { l.Error("release lock: %v", err) } - return errors.Wrap(err, "catchup") + err = errors.Wrap(err, "catchup") + return err } go func() { From cb8f0345b0e9f3da1f1108f9f0f0b1bc52f41271 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Fri, 26 Jul 2024 11:45:42 +0200 Subject: [PATCH 141/203] Clean up dubugging logs from ... from the PITR noise --- cmd/pbm-agent/pitr.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index a762b0c8d..f5ffd04f3 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -203,7 +203,6 @@ func (a *Agent) pitr(ctx context.Context) error { return errors.Wrap(err, "check if already run") } if !moveOn { - l.Debug("pitr running on another RS member") return nil } From 33a748c1371954085538e6a7bf3d85c9e6d580b6 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Fri, 26 Jul 2024 11:57:49 +0200 Subject: [PATCH 142/203] [PBM-1353] print error if any --- cmd/pbm/profile.go | 6 +++--- pbm/log/history.go | 23 +++++++++++++++++++++++ sdk/sdk.go | 25 +++++++++++++++++-------- 3 files changed, 43 insertions(+), 11 deletions(-) diff --git a/cmd/pbm/profile.go b/cmd/pbm/profile.go index 4f9ceaeb5..7aae35b75 100644 --- a/cmd/pbm/profile.go +++ b/cmd/pbm/profile.go @@ -120,7 +120,7 @@ func handleAddConfigProfile( if err != nil { return nil, errors.Wrap(err, "clear profile list") } - err = sdk.WaitForRemoveProfile(ctx, pbm, cid) + err = sdk.WaitForCommandWithErrorLog(ctx, pbm, cid) if err != nil { return nil, errors.Wrap(err, "wait") } @@ -130,7 +130,7 @@ func handleAddConfigProfile( if err != nil { return nil, errors.Wrap(err, "add config profile") } - err = sdk.WaitForAddProfile(ctx, pbm, cid) + err = sdk.WaitForCommandWithErrorLog(ctx, pbm, cid) if err != nil { return nil, errors.Wrap(err, "wait") } @@ -173,7 +173,7 @@ func handleRemoveConfigProfile( if err != nil { return nil, errors.Wrap(err, "sdk: remove config profile") } - err = sdk.WaitForRemoveProfile(ctx, pbm, cid) + err = sdk.WaitForCommandWithErrorLog(ctx, pbm, cid) if err != nil { return nil, errors.Wrap(err, "wait") } diff --git a/pbm/log/history.go b/pbm/log/history.go index d0205745a..c95cca9fc 100644 --- a/pbm/log/history.go +++ b/pbm/log/history.go @@ -9,6 +9,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" "github.com/percona/percona-backup-mongodb/pbm/connect" @@ -181,6 +182,28 @@ func fetch( return e, nil } +func CommandLastError(ctx context.Context, cc connect.Client, cid string) (string, error) { + filter := buildLogFilter(&LogRequest{LogKeys: LogKeys{OPID: cid, Severity: Error}}, false) + opts := options.FindOne(). + SetSort(bson.D{{"$natural", -1}}). + SetProjection(bson.D{{"msg", 1}}) + res := cc.LogCollection().FindOne(ctx, filter, opts) + if err := res.Err(); err != nil { + if errors.Is(err, mongo.ErrNoDocuments) { + return "", nil + } + return "", errors.Wrap(err, "find one") + } + + l := Entry{} + err := res.Decode(&l) + if err != nil { + return "", errors.Wrap(err, "message decode") + } + + return l.Msg, nil +} + func Follow( ctx context.Context, cc connect.Client, diff --git a/sdk/sdk.go b/sdk/sdk.go index cfb54cbe0..447f92fbc 100644 --- a/sdk/sdk.go +++ b/sdk/sdk.go @@ -2,6 +2,8 @@ package sdk import ( "context" + "errors" + "fmt" "go.mongodb.org/mongo-driver/bson/primitive" @@ -11,8 +13,8 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/ctrl" "github.com/percona/percona-backup-mongodb/pbm/defs" - "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/lock" + "github.com/percona/percona-backup-mongodb/pbm/log" "github.com/percona/percona-backup-mongodb/pbm/oplog" "github.com/percona/percona-backup-mongodb/pbm/restore" "github.com/percona/percona-backup-mongodb/pbm/topo" @@ -146,14 +148,21 @@ func NewClient(ctx context.Context, uri string) (*clientImpl, error) { return &clientImpl{conn: conn}, nil } -func WaitForAddProfile(ctx context.Context, client Client, cid CommandID) error { - lck := &lock.LockHeader{Type: ctrl.CmdAddConfigProfile, OPID: string(cid)} - return waitOp(ctx, client.(*clientImpl).conn, lck) -} +func WaitForCommandWithErrorLog(ctx context.Context, client Client, cid CommandID) error { + err := waitOp(ctx, client.(*clientImpl).conn, &lock.LockHeader{OPID: string(cid)}) + if err != nil { + return err + } -func WaitForRemoveProfile(ctx context.Context, client Client, cid CommandID) error { - lck := &lock.LockHeader{Type: ctrl.CmdRemoveConfigProfile, OPID: string(cid)} - return waitOp(ctx, client.(*clientImpl).conn, lck) + errorMessage, err := log.CommandLastError(ctx, client.(*clientImpl).conn, string(cid)) + if err != nil { + return fmt.Errorf("read error log: %w", err) + } + if errorMessage != "" { + return errors.New(errorMessage) + } + + return nil } func WaitForCleanup(ctx context.Context, client Client) error { From 66e94d48d3398a72a1fcf38cff29fd032199e67c Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Fri, 26 Jul 2024 12:05:08 +0200 Subject: [PATCH 143/203] disable linter for specific lines --- pbm/storage/s3/download.go | 2 +- pbm/storage/s3/s3.go | 2 ++ sdk/sdk.go | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pbm/storage/s3/download.go b/pbm/storage/s3/download.go index b3254aeed..36fbdf950 100644 --- a/pbm/storage/s3/download.go +++ b/pbm/storage/s3/download.go @@ -448,7 +448,7 @@ func (pr *partReader) getChunk(buf *arena, s *s3.S3, start, end int64) (io.ReadC if err != nil { // if object size is undefined, we would read // until HTTP code 416 (Requested Range Not Satisfiable) - rerr, ok := err.(awserr.RequestFailure) + rerr, ok := err.(awserr.RequestFailure) //nolint:errorlint if ok && rerr.StatusCode() == http.StatusRequestedRangeNotSatisfiable { return nil, io.EOF } diff --git a/pbm/storage/s3/s3.go b/pbm/storage/s3/s3.go index 439b50208..d5a52cd2f 100644 --- a/pbm/storage/s3/s3.go +++ b/pbm/storage/s3/s3.go @@ -555,6 +555,7 @@ func (s *S3) FileStat(name string) (storage.FileInfo, error) { h, err := s.s3s.HeadObject(headOpts) if err != nil { + //nolint:errorlint if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { return inf, storage.ErrNotExist } @@ -582,6 +583,7 @@ func (s *S3) Delete(name string) error { Key: aws.String(path.Join(s.opts.Prefix, name)), }) if err != nil { + //nolint:errorlint if aerr, ok := err.(awserr.Error); ok && aerr.Code() == s3.ErrCodeNoSuchKey { return storage.ErrNotExist } diff --git a/sdk/sdk.go b/sdk/sdk.go index 447f92fbc..a77adad98 100644 --- a/sdk/sdk.go +++ b/sdk/sdk.go @@ -159,7 +159,7 @@ func WaitForCommandWithErrorLog(ctx context.Context, client Client, cid CommandI return fmt.Errorf("read error log: %w", err) } if errorMessage != "" { - return errors.New(errorMessage) + return errors.New(errorMessage) //nolint:err113 } return nil From 45ea522f732324a3b9ed5fa35dfa7fcd4ecc0021 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 29 Jul 2024 14:58:55 +0200 Subject: [PATCH 144/203] fix agent version in pbm status --- cmd/pbm/status.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/pbm/status.go b/cmd/pbm/status.go index e75114383..81ad99d56 100644 --- a/cmd/pbm/status.go +++ b/cmd/pbm/status.go @@ -204,10 +204,10 @@ func (n node) String() string { var s string if len(n.PrioBcp) == 0 || len(n.PrioPITR) == 0 { - s = fmt.Sprintf("%s [%s]: pbm-agent [%s]", n.Host, role, n.Ver) + s = fmt.Sprintf("%s [%s]: pbm-agent [%s]", n.Host, role, ver) } else { s = fmt.Sprintf("%s [%s], Bkp Prio: [%s], PITR Prio: [%s]: pbm-agent [%s]", - n.Host, role, n.PrioBcp, n.PrioPITR, n.Ver) + n.Host, role, n.PrioBcp, n.PrioPITR, ver) } if n.OK { s += " OK" From b5bfb8a34452d0abcc969c6a4b0a9d948ec17764 Mon Sep 17 00:00:00 2001 From: Sandra Romanchenko <53295797+sandraromanchenko@users.noreply.github.com> Date: Mon, 29 Jul 2024 17:22:08 +0300 Subject: [PATCH 145/203] PBM. Remove outdated operator files (#968) --- docker/Dockerfile | 20 -------------- docker/Dockerfile.k8s | 58 ---------------------------------------- docker/start-agent.sh | 62 ------------------------------------------- 3 files changed, 140 deletions(-) delete mode 100644 docker/Dockerfile delete mode 100644 docker/Dockerfile.k8s delete mode 100755 docker/start-agent.sh diff --git a/docker/Dockerfile b/docker/Dockerfile deleted file mode 100644 index 2e0220e45..000000000 --- a/docker/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -FROM registry.access.redhat.com/ubi8/go-toolset:1.22 -WORKDIR /opt/pbm -COPY . . -RUN make install - -FROM registry.access.redhat.com/ubi8/ubi-minimal - -LABEL org.opencontainers.image.title="Percona Backup for MongoDB" -LABEL org.opencontainers.image.vendor="Percona" -LABEL org.opencontainers.image.description="Percona Backup for MongoDB is a distributed, \ - low-impact solution for achieving consistent backups of MongoDB Sharded Clusters and Replica Sets." -LABEL org.opencontainers.image.authors="info@percona.com" - -COPY LICENSE /licenses/ - -COPY --from=0 /opt/app-root/src/go/bin/pbm /opt/app-root/src/go/bin/pbm-agent /opt/app-root/src/go/bin/pbm-speed-test /usr/local/bin/ - -USER nobody - -CMD ["pbm-agent"] diff --git a/docker/Dockerfile.k8s b/docker/Dockerfile.k8s deleted file mode 100644 index 227043a0a..000000000 --- a/docker/Dockerfile.k8s +++ /dev/null @@ -1,58 +0,0 @@ -FROM registry.access.redhat.com/ubi8/go-toolset:1.22 -WORKDIR /opt/pbm -COPY . . -RUN make install-k8s - -FROM registry.access.redhat.com/ubi8/ubi-minimal -RUN microdnf update -y && microdnf clean all - -LABEL name="Percona Backup for MongoDB" \ - vendor="Percona" \ - summary="Percona Backup for MongoDB" \ - description="Percona Backup for MongoDB is a distributed, \ - low-impact solution for achieving consistent backups of MongoDB Sharded Clusters and Replica Sets." \ - org.opencontainers.image.authors="info@percona.com" - -COPY LICENSE /licenses/ - -# check repository package signature in secure way -RUN set -ex; \ - export GNUPGHOME="$(mktemp -d)"; \ - gpg --batch --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A; \ - gpg --batch --keyserver keyserver.ubuntu.com --recv-keys 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5; \ - gpg --batch --keyserver keyserver.ubuntu.com --recv-keys 91E97D7C4A5E96F17F3E888F6A2FAEA2352C64E5; \ - gpg --batch --keyserver keyserver.ubuntu.com --recv-keys 99DB70FAE1D7CE227FB6488205B555B38483C65D; \ - \ - gpg --batch --export --armor 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A > ${GNUPGHOME}/RPM-GPG-KEY-Percona; \ - gpg --batch --export --armor 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 > ${GNUPGHOME}/RPM-GPG-KEY-CentOS-7; \ - gpg --batch --export --armor 99DB70FAE1D7CE227FB6488205B555B38483C65D > ${GNUPGHOME}/RPM-GPG-KEY-centosofficial; \ - gpg --batch --export --armor 91E97D7C4A5E96F17F3E888F6A2FAEA2352C64E5 > ${GNUPGHOME}/RPM-GPG-KEY-EPEL-7; \ - rpmkeys --import ${GNUPGHOME}/RPM-GPG-KEY-Percona ${GNUPGHOME}/RPM-GPG-KEY-CentOS-7 ${GNUPGHOME}/RPM-GPG-KEY-EPEL-7 ${GNUPGHOME}/RPM-GPG-KEY-centosofficial; \ - \ - microdnf -y install findutils; \ - curl -Lf -o /tmp/percona-release.rpm https://repo.percona.com/yum/percona-release-latest.noarch.rpm; \ - rpmkeys --checksig /tmp/percona-release.rpm; \ - rpm -i /tmp/percona-release.rpm; \ - rm -rf "$GNUPGHOME" /tmp/percona-release.rpm; \ - rpm --import /etc/pki/rpm-gpg/PERCONA-PACKAGING-KEY; \ - percona-release setup psmdb-50; \ - percona-release enable psmdb-50 release - -RUN set -ex; \ - curl -Lf -o /tmp/jq.rpm https://download.fedoraproject.org/pub/epel/7/x86_64/Packages/j/jq-1.6-2.el7.x86_64.rpm; \ - curl -Lf -o /tmp/oniguruma.rpm http://vault.centos.org/centos/8/AppStream/x86_64/os/Packages/oniguruma-6.8.2-2.el8.x86_64.rpm; \ - rpmkeys --checksig /tmp/jq.rpm /tmp/oniguruma.rpm; \ - \ - rpm -i /tmp/jq.rpm /tmp/oniguruma.rpm; \ - rm -rf /tmp/jq.rpm /tmp/oniguruma.rpm - -RUN microdnf install percona-server-mongodb-shell - -COPY --from=0 /opt/app-root/src/go/bin/pbm /opt/app-root/src/go/bin/pbm-agent /opt/app-root/src/go/bin/pbm-agent-entrypoint /opt/app-root/src/go/bin/pbm-speed-test /usr/bin/ -COPY ./docker/start-agent.sh /start-agent.sh - -USER nobody - -ENV PBM_AGENT_SIDECAR=true -ENTRYPOINT ["/start-agent.sh"] -CMD ["pbm-agent-entrypoint"] diff --git a/docker/start-agent.sh b/docker/start-agent.sh deleted file mode 100755 index f18326abd..000000000 --- a/docker/start-agent.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash - -set -o xtrace - -PBM_MONGODB_URI="mongodb://${PBM_AGENT_MONGODB_USERNAME}:${PBM_AGENT_MONGODB_PASSWORD}@localhost:${PBM_MONGODB_PORT}/?replicaSet=${PBM_MONGODB_REPLSET}" - -PBM_MONGO_OPTS="" -MONGO_SSL_DIR=/etc/mongodb-ssl -if [[ -f "${MONGO_SSL_DIR}/tls.crt" ]] && [[ -f "${MONGO_SSL_DIR}/tls.key" ]]; then - cat "${MONGO_SSL_DIR}/tls.key" "${MONGO_SSL_DIR}/tls.crt" >/tmp/tls.pem - PBM_MONGO_OPTS="--tls --tlsAllowInvalidHostnames --tlsCertificateKeyFile=/tmp/tls.pem --tlsCAFile=${MONGO_SSL_DIR}/ca.crt" - PBM_MONGODB_URI="${PBM_MONGODB_URI}&tls=true&tlsCertificateKeyFile=%2Ftmp%2Ftls.pem&tlsCAFile=${MONGO_SSL_DIR}%2Fca.crt&tlsAllowInvalidCertificates=true" -fi - -export PBM_MONGODB_URI - -if [ "${1:0:9}" = "pbm-agent" ]; then - OUT="$(mktemp)" - OUT_CFG="$(mktemp)" - timeout=5 - for i in {1..10}; do - if [ "${SHARDED}" ]; then - echo "waiting for sharded scluster" - - # check in case if shard has role 'shardsrv' - set +o xtrace - mongo ${PBM_MONGO_OPTS} "${PBM_MONGODB_URI}" --eval="db.isMaster().\$configServerState.opTime.ts" --quiet | tee "$OUT" - set -o xtrace - exit_status=$? - - # check in case if shard has role 'configsrv' - set +o xtrace - mongo ${PBM_MONGO_OPTS} "${PBM_MONGODB_URI}" --eval="db.isMaster().configsvr" --quiet | tail -n 1 | tee "$OUT_CFG" - set -o xtrace - exit_status_cfg=$? - - ts=$(grep -E '^Timestamp\([0-9]+, [0-9]+\)$' "$OUT") - isCfg=$(grep -E '^2$' "$OUT_CFG") - - if [[ ${exit_status} == 0 && ${ts} ]] || [[ ${exit_status_cfg} == 0 && ${isCfg} ]]; then - break - else - sleep "$((timeout * i))" - fi - else - set +o xtrace - mongo ${PBM_MONGO_OPTS} "${PBM_MONGODB_URI}" --eval="(db.isMaster().hosts).length" --quiet | tee "$OUT" - set -o xtrace - exit_status=$? - rs_size=$(grep -E '^([0-9]+)$' "$OUT") - if [[ ${exit_status} == 0 ]] && [[ $rs_size -ge 1 ]]; then - break - else - sleep "$((timeout * i))" - fi - fi - done - - rm "$OUT" -fi - -exec "$@" From 5be2495e13f12351bb6d759be566f19968119abd Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 31 Jul 2024 15:53:40 +0200 Subject: [PATCH 146/203] [PBM-1350] hide mongodb uri in stderr (#970) --- pbm/connect/connect.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pbm/connect/connect.go b/pbm/connect/connect.go index a886eaceb..692c6e186 100644 --- a/pbm/connect/connect.go +++ b/pbm/connect/connect.go @@ -239,7 +239,7 @@ func Connect(ctx context.Context, uri, appName string) (*clientImpl, error) { curi, err := url.Parse(uri) if err != nil { - return nil, errors.Wrapf(err, "parse mongo-uri '%s'", uri) + return nil, errors.Wrap(err, "parse mongo-uri") } // Preserving the `replicaSet` parameter will cause an error @@ -247,7 +247,7 @@ func Connect(ctx context.Context, uri, appName string) (*clientImpl, error) { curi.Host = chost[1] client, err = MongoConnect(ctx, curi.String(), AppName(appName), NoRS()) if err != nil { - return nil, errors.Wrapf(err, "create mongo connection to configsvr with connection string '%s'", curi) + return nil, errors.Wrap(err, "create mongo connection to configsvr") } return &clientImpl{ From a029f136638a5883626cfe069358d2d571d0d697 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 31 Jul 2024 15:57:14 +0200 Subject: [PATCH 147/203] delete filelist.pbm file (#965) --- pbm/backup/storage.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pbm/backup/storage.go b/pbm/backup/storage.go index 0ed7df098..cccd5a9d9 100644 --- a/pbm/backup/storage.go +++ b/pbm/backup/storage.go @@ -259,6 +259,12 @@ func deletePhysicalBackupFiles(meta *BackupMeta, stg storage.Storage) error { return errors.Wrapf(err, "delete %s", fname) } } + if version.HasFilelistFile(meta.PBMVersion) { + err := stg.Delete(path.Join(meta.Name, r.Name, FilelistName)) + if err != nil && !errors.Is(err, storage.ErrNotExist) { + return errors.Wrapf(err, "delete %s", path.Join(meta.Name, r.Name, FilelistName)) + } + } } err := stg.Delete(meta.Name + defs.MetadataFileSuffix) From cfaf14d2752bca580d7d1c287832b3cbc186ee93 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 31 Jul 2024 18:05:48 +0200 Subject: [PATCH 148/203] return arch png (removed in v1.7.0) --- README.md | 2 +- pbm-architecture.png | Bin 0 -> 58276 bytes 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 pbm-architecture.png diff --git a/README.md b/README.md index 34c3a24b9..291dc2916 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,7 @@ Percona Backup for MongoDB consists of the following components: - **PBM Control collections** are special collections in MongoDB that store the configuration data and backup states - Remote backup storage as either s3-compatible or filesystem type storage -![Architecture](https://github.com/percona/pbm-docs/blob/main/docs/_images/pbm-architecture.png) +![Architecture](pbm-architecture.png) [Read more about PBM architecture](https://docs.percona.com/percona-backup-mongodb/details/architecture.html). diff --git a/pbm-architecture.png b/pbm-architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..17d13957db1fccc07572d967f94b5310ba539353 GIT binary patch literal 58276 zcma&N2l&(E8a56HqKNA_aG>Z>9H5wvG_Bxh+N`!2ZPPZ1BD86mrfJirZPGL%;>Ll9 zlLKxA^$3CrGE@W?D(-;_D2f9VJt)dv-&;N3xW4Q9{@4HNuWkA!?|$Crx$ozBem8Rr zarmH<2KDLF=WvRQruy{hyBWOCJ>+0e^86=*PVLjjQ)y#DyXKU%s?uk;FY@=*a4%G@ zx7x#f(cxZizR?&})beUGUu%srlr|^=_iOc{R??K>-_Lj@F0Nx&GoCh9< zoC_|)jYd_;DTTNO#(={orh<2Lq#^ zQGTx<8s+hLz>Rdily7SPM>D-SDeb)a-#PpJ{P~EZL?I&wt6|wEhns)T!&aItt#17N zn8!CN)SFDNa=Y75{w`JPb+rmQ?Ugpbyjy=4`+Frm3@q}0R}j2~0W7>~#K;a;@m@Ng!fkzueBIi%|o962BvyvSLp z4pH$C1)DFf)&C6FvN-o!`TntqW)b+((v{b0JAgQCJ_-My*1E!s|-9WDCMC+(O=z_k0JmB%1 z2|jtctof{jA8m*}%m_GHC@65O#2{WbVxYB1p`=<+22?3Ju$cwlX}^oKyafxPsaPau z<-;}J3tI-b0}Ex{hueC-QUbq%ULhccG8vl>MN1kABnr)8q((P5u@w+)Gi~Qw52AKp zv@N8AK_48_NSN_gz;o4no{h6L(!)eb0aP|xHIXRfnYL9CYMww|Pu9JbMEk8e7(Z3+ zsOc&!VhHYwp`8X*GYl^)(|C|hwW}Fc4|&lH%}3;R(F1FUHzibBC>ul_!HNUH!Ygeh z-IWn5OoNU~ML&|oxU$z3qBt7|n}+7aHce!~&cV@oy&_e8DHU{7k@1jKr&37Q6%)3e zB!aH$ZM3*F>b8hflrFe(xzfU8NldFSieDqL1zJ?{9vrF1(3qI3icBg~uA3rOP3BUj zUM`z@UghGf>d$mic8w_dW1&#O?V4Pj5@W7yE8v);Ss&3rfy%%mej9uSltifl77PzY zP&aKveXxhp!g8t`$w)L+K{FU-QFw++%cwt5q#|BAOxK!JtLb84mB%rr?i$^gMo2AG zLYl!gQ};`mXgvbUkp?RYNq;Vb$}P;v<#I*TXwZe8m>Lk8aXOtsnyVBUC#_XmblvOe zrhz`eRkFY)qV*IJkh4iFqD?m3*V05y%lgO!U8+G+%&CNt zs%DV1#euF0nU>fg%7i4Psvfeab$KQhfLuQ2b=(9}u0ur4+q>rrHo9hott*};ACI(B zuH=z5+HdeBf-RvkZCIf^@8&Z~IxYnnFmkPzZs!To4N$f!`us1@^BJ`gp-4W%2aK$1a%%5}oTeJM=tBKepnoK^jBBZRrUWvj7l zSSW+~Jk9{!hSrTt3>+n#EV7hS^CiI27ZG2P1~mhcQ*XgJ(FRHno^`v4`$_>E6_2o z=G7ySe3nX+ksNPA8OQRrot6~nfA|$rhWvqYCEL)2T<<_vi=LVY zgB_&dtcA98G6(uCbz-Q2m>lmb_~Bxyjc|noUT7rB9-8fmM$&>rPIRcW6-47C1dJWd zG?`Lw$T+Y|c#;I-jbawUMw^wW5M?RQD&8f*vnkt8LcQm7x(-1|g zU5oonmhm9ziWDt^CcK<1M^z|Or~ObfmQMPljvdJqOCq>OIUbkl?Wf%l5(YuIF%tEw zTC+%~dc;lD|5`3ds91iHD<>Oa&MYZdgSF};*#t)bBO3+WM`9(SRB@nMtEaRDiS?+Y zCm298Z(skew`qENFvDI=6`#PU4R z?urSr(^a5qMM8=>f1UvStEs4v_U0-PPZ~{TP^gdq5K3r<2-)XULTO*N!^WYk9xvMh z!XPlI7-dQB0!u}vs#%Tg`ctypt`zg$Kpcx8u%}ovofec~)p)gRBfe^dDTXt+BQtoj zP_Ji6yeYz3NKfzyTFEF(5qDf7R5siB5FW{zSR^1*aHAEM@Ti{%c&(s6Rx@M~t0l-r zBcVi#F)q*6IwF-rlD<|Jhz;(=OL!)fMqC<8TxF(aR&7`xiDVjm>?ERrZQAG z1g9E&vs{g*gKbQ+-~>RBxa`89jov`Jg|N4pOJq~&x*_BO(WI@!?GWu!EO?;U#sjdx z`U)|wRL8to#&XMK$e%;&Y6!MD&JH%^l3j>%5FJWa;8-H+RLroOZoysDjq5z&z^Fgn zswV4c)<>dYEqLg)gT{|*VTNDR>G{Vw45l{&dQSEtvfFBr3->^qzgtx}BcbFoFro$=(dGkA zES@DfGuvW4!93gW2W344TEMM%jP4RSR-wJMw3w=NR7&%KCR$LH(`(>D@x@D?E_l?R z0y3jng14U2OiC@Z{4SX+glih#ZZQ5>vyEB4rWF>zCAwd>6u~_Mc2R(Kq>-q&GlIUn)%lM+;ZLwKX zjjKc{S<<~7SIgBRJR7d6atlF{0@ne;hf)zIV+qNOl~e1*h%A;Psk{c!xKjZ_+$ADu ze?kxoq7)70>xn=Fj|EvboQd_&RK(+IQYaqFTfV9t;KD($WOOqe;@FCw>>$BzNn)!} zJP=G!mM>Zr@+A@Lb_*f0M%D#dNmPq<4pgc3tVOC}fbC|Qqb!CD~;j4v=?Cly!?{)5n;p zl7&_|I#NsWk%pu<^m09d1R`;{UZN8THJtP}>PoGKhUHAou97(<+97B%g2PEO6Vwz< z=2XX+RcCdQ4 znkgf`28E|hDuY(DkqCH4vqTj&xokKlQ~{Wb2z0vE0z;z?v_q3IHo|chq=h5(GSr)JEF905>97__CnzSRpa6HPjij1v(q_mGt0)d( zN>cTENDRPstrZty#bTozQ=lZ7@0b}#iy4fFlLb}J38GXbJaDoeNYiWtE_XX*nyEzb zZo11@;XK-uD;UXl;Vgl7GeWu(Q#GImI*w1_x|&w%MB-W@rb8`OgxQoB_Tzjq1L59` zsCd(&q&k(N3<2nbmcjvvG8l z!|_$=d{@Y4N?EM!3vpzXEH!I(HC!$uI2JX?bf{}b(@xw*bzt|R;aWh11HNi3DsxJe zQzMoU?TBt!R%OryT#Bg_)idT|s#H)LDHS41vWbS>aysbgrs`3i3U&QdgzXuMPDzuK zv5v&Rp$bKa>6F;5!wtJl#|bT&F)DV9Vc$`W=skZpOhu^bv+< z92MdquFIO;S`GIDpzP^ZQ`sy6hERdrXj{k^F^^CUfwf>rPfRoGMm5yTojZ6l4) zS}98~;To<(<$SB5rA#zf&$rAPS*l{uR87y)N`RG&aGH`?w=4k^g-27#R3h7Lgn2P5 zqA|W$iUR?6$^{I-T92O;sF<$0%~HmXu)dsF5h`ksmn|j|wyH8xM!j%S!}HOY8@FV^ zRa1=AiCMZUp-?rQveHe$X_*aQ2KJX4Q9XghBb4rnWJ}MmHkYN;3>^cra{!P-2&vXq ziF~$^YUNa=NQAp)Pw|KmG3uBpI~y;=@?t*KHTFS!vzAaISSa7*1DJf#D%133y(n?CrV{($g-%IRP)bt)^4yF_t!MdsE(0(JolQ zWSQnL9|XrSEG zEgWtTOfF7F8*v6tqIvq3|0aVz71rjuYZP;DW6oC6IRwQ#qtsINn0Bzt%Bw1(TRS7pTrni`NghsRo8;TMPLcVND zPX~Hh9#a&5)?Tm*p)2QH)< za=@E*(}_}|*tR@6lQ+|*qE&}kZI!0%cF z+(5nIHe*}SP|WU`=paie86yuGuM~xJE!c%%+sE2AmcWv3(BSx1jkY|EMy1B(Sh-m1 ziCDZHkLHphZ*Y)d1wa9WBEAY7j0Z5dQ07GWFQsX9a3O(a>siYeC*+jm&kH>ukqd<- z;4wuVaGlB8NH7}w{|n{#79dq#p6aG;w@KuQn31*qZz$I&01OJ{n{rueD{|ZFp}L;G z6wW$Q6>fQP0Ox{UwUi{l08l<6B_If9V=hyq4PQ*pU|xp`*Q+%eu@Qed5_TxZ3Zn%W z3OZ;k(&2noukf{J! z@rneX)|L-(C_8{;Jo!*h9(rayVF(6X^(Z`t2V+6Hr$*9^WU**RV2@e?(7}Km2vI%rS@a<=*M>t%sRj4=JJD0cWG%;+lrmUsjEaa&3i^u-f~UEv zWXNf*-jxfY#U?^3(MBXCW$3<8xFa?bW)Le&s_LyMxLc#L;2@xKjV17iTTg}C&8lwl zjuvWj5^z@Nbt}o*xCh~MlB0!`2-=71CeHfYG+Iek^#&;5Z6Y2ulBI^8A#E8rZwdfL z;!WV$K})@<#?UsE>onT97~_ae8zZTpk4|_qJY24M%?=#&b1JFI{&X&ww9qJUsw%~# z6AvcinM$L^Rsv#2$)~GQr4{C?XatHYdbYPLDYA?)n65#^PCb;SgH_g_6{CEVX*PXw zfMo&GlY~s~{+l-uxvYa+uQWQST&46 zOu;H}L_S%sCqqQ3o2*BT4%w)Burg`4%N<c#)#@=5s+w65H^p{0OXgxE(~~hA zhs>0RP5NO^4XvjeWFXJEz$w+Nm1=g^u33RP@w4_*q?94?;>l$(^^HWVk33^R;fFxrwN2D?$hWW{<*6qCLxk%YpuC8f|{L}E3n zWU$m<$I9ce(@MakxRO>5E41h$z;y!Lu!x=#P#pMa<4{np3PrmiX1hM6q#FgTQYa&W zT;L2at-P1bQ&^yG7MyZIG%&wPTh*Q=;)^{k*;PvvPNouwBg;)hGhUJq~WUy5BY_ zUTs&BElCBQ*`6xa;(CMWB2=vCHb9&r>Xh^r4RIJm;-rF$HK1J3aFhvXda#E|yQON> zTP=mUm8xyJb_Xw}IHLqd@2RjG3+gH%&RF!HK3n3}70Lai&!Z=F_-Agc3^J=%~HrYAOy( z1e;cesYZ(}JFNJ+$wH~eii(m2i!z);+R0cl3%7xTE1TjSH(%!)nNZMdMcdV$TSjk7 z%}z{2ur%t|(>bjsCJKq1jQE-olD49)yqS$StzxxSh-O8`4T^dYD${tKGy#8y*=(wi z&U;&tFx$>hn4Qi6p4DJuDJz)h>7YbQZvo zq@`Gw3>gtIkxKcR$xJibE%*E{^+q+RkQJ~ZIK=bG0ArPH(Vx>8A=!h53DwPlXcJ{} z4HDo&HJ}7SVWtP;B8r+|TghM;rNd4Ku#slVMhjX`J$Ey9JRHuZyjGDRGeEBXqT3ax z2uvbwSYppK*k;cUBkC2{OXxYhh(R1p@+sM}6$-d;=oF^qfg7gCv&|lQ17<{{n5IOK z^+F+^krgWwWq1s$YpdDt)p@5=B+oeD(i%3-=98l5< zI82=0Y}Q)YI0QmYcu{7^9&%uFo_06j3jizvehDO1Dg@2ENoP}mEXSKcgtvSVI8!%> z5)`Or%&wVLu`FnycL*V~lJT*WMY?erIv(LDiZF`x$U$B1uIC%7e(tVpbG|_ zDwg9dq+G2;OFk{063ws%jx}0H#p{8J83>{no7dB>DHf8!YOGxe)MBP$DrH#p0;39; z4_W|<6VElP5D^WFd_LLAmxzqkJKwlTl-mZ7|3nWq`vl600bfwczyejVRAw;6@WzV0 z?GFf@kX>yh!)n}(dvp#Uo*>rZ{Tfzxn|w1z%2}^TiMVCbnHr?a;d*cL<7uGEI!>#m zN%b{84wm(R4b572*7Bmf?@l?*#nVQrTrHY^4ZN$2)OXVk`6R>)$u@vknp7pp!rZdiijqV zm_XB;%Ov@{RiY7KK6=}TCnE*74s?pIRjPEVMWg1(c&FXP%y`*fz#5zt&NfZpo^wla zwrrExR8`OBQxq9zqavJdM6*!bP-}|Tw#)?Y%Ot{4Z?}qzrk&7oz7pw4h{Yg-6#Xs$ zs{1&&?ZHC zp-MGMDNUgZ;7YR6v(tDJg8m9k>j3yhdMDPWf~Y|=3xW~NnC!_pW+9hp8l+cG+i|1Z zP$5OgVx%`-NHsHTEU1?N+{;#24=_~8UdR@gnTCs+IMb#Zo-6|hG)il%>J}kVLAbIN z3@SZy2_y~=C;#G2|KDEl|Lx<>>woDUsIAn1Jp)tvUs)sNLX*fvVqz7f3Hbd*yOM{- z{Y{#XtfwG{>L4-1@8PI|!zX+Ayj*qjZZq%JG9JV0BJD`T4L1w8D8dcBH!x4UUC*^6 z-Eh>_QHrd`s+1_WrA*1FYk*IJDFs26Mo6<_Jr3C6{c#1*#@;RzdN*OXTFZ+}w`Y;^ z!A9O=#RC!}yIO}1B6!jVJmSH6!R5OiLs09EAkZ@A=P9b{b3rnIn5;#LGU0c6Svf(* zTVvUjA;^@Z11uABBbgi$#7mU~m#dd@b)x4w22NPXpgk?C3VQ)em0(cT<(n+fmOb-X zjoH8x(9<$F5h+P#P-v1#vk{DyWzs-`MBLYlg`%V%hDt!H!KLCtQ7Dua3rK>D=UlNw zWu%ncHp|VNTlcxB-0LA0Fj|}#gM4r!ZtImywHr!Ntz;Sa>OvS4p=pF+f(Iz?newOMaM^@fj2leWr`>AVhh4-M$4 zu+Re|C>G!&;c!gNW#j}A2Djw0P6btrPbO)_E|=31R@KV603&Ka5(-pO zF~v|(k3UVdMANS%aH-fzsNDq1;ZdH*z<7`IB|r?loYM<6g+K%(M)!m|*-1Q zCBiSbo_sk_i{~-7DEKmVRe^OB1d_D8CqltNvh0h)dPMa~0$r)5b7?GHwr$((5U!Fe z(InkWS`=hSWUM7uGF>l1G(aX!k6?N^&K)z^43Yqt2&tvDT#E~;yzDiZNUX`SKE%~Q^f<#QfMFwC zuidub0vv~15C}O$xkj<9n^+?m4remWh=-su-H6ZUXnZ}(aV?sUY9N;e0!cM7f6P#$ zPQ(OtD-pR_%E|#uOEQwr3ZN03a;umcZ3w(nVmp;KZn28T$nlDljv9Hf zmAwVR(A#tZX@T}*p-h|YF1Fh+O;dcqRI_vjQ%DF9{5B_o&Kd`>BWc7n%|x>!Mr%a4orbUy z8-|RK62LLRQR$c%C{~>uEqeo5znutGky1fvXg#!0lbx~w0u(hYs&&1-DBn&cHA>=P zAt10$O$$nVw2-buquEpkgbC1g#HVyLBbSM15*DEcIjP)jYe0p5kn?4Cd5@@!Y#|wNlN(iW`Z3T1b@(E!s`(U9V89bfuoL&E0#AQhmBUeA;~g8 zR24m(D-n3P#dh0W2e(3q&VszUN;yG7wLA;^^q|}UY^76n12##9DsfVh&;rQj3wYWs zf4L{M)u`Uiv%m{oNxK=+twtCTgy*AF0vu&r^r`7uhp}56Uu}|N%ZfFlEtwZ-DV5}L zDemFmLXy@bJl<<6nd#bvbR$#hfFESfX4JV@w9xdX^g`T8wCO012SkBa9TO)=xs1m` zs@G^Uc$f8vT?vp&JQR^!lL(g^zJ%B;*Bo4w>(y-7$)dqRr7AN{4-(~6FG!oJdU^&Y zS1aWc<#N{t_^BDE+dS98lYqjJ42stpm0ZA|lQkF0XH!stE|u$+2cfK@=4RwllP}sJ z-2o$U4&YKy5fuW*b$v?c!34sCYq& zmXhZKv~8D?Wm)H2VySLMsVYi=L>C{hn5|f&+)4&`KV2YuI4z@9^Fe|l9Zl%Mib)po zZ5PQBzC_2W2e5?R@))F(&BlqQC~0}tOB4%%zfyvHz+T{rDKJ$MLgIl!BUouQeL7x{ z%ej;fX;(TmuVD<3;(rEPz11Vy-l+<%d;8cQ*x62^P#}SY~W+8t&EJzp+F=Y^3sCJYz zno0HyC)sP!AUv^LFrIJK^Kps@1PgT{0DH7($OE)X)&jmB!1XJ>hSUxwQd-onH3K!D z)NW@;hb{qai-#Q$?*jQTpusxB=Fo6QR%#?4CGt&%L*;~kq_SuSB6LZka}Mn%nwZ

k%i`1XcV1 zO`&2*vhcR-unY(g8i^v5B8ue_p4V8yf`ej3@yEmMoF!6)vVyXDgEK(b5Y#KBEsf3l zc`wcy-eN1_abh&D)u~_|s|X$yI6^@WaZe5oP=TCRZdzQW90kKn7eU%7pGAs3&cin% z8Nx)ub_iH)SuIC!l?pphr-q`jq6Exe$F&@SG*aPoz3zZq;soZ4f~3P_nG7bJmM)Qb z4_aWUI$Cap(%wY905X~AbhH^{SRJ)d3yc#EX4@T#@TRKmW)L@HnGDNDt042uBbEi% z&zBlLw8rZm%Oe;J4FHA0RV)be=oBOP83<%crxn*o=ipvr5up|Lm3rhGiiUaTieJVKaPB6KnfTqQg`>{6{`&DNzIs`>_4sp{bB{g$*oo)NdLwq& zEN;-OegkgNrxo59^u|}GlfPa1kAI%|`d9naADb(OU;fs8FUChbw`a_^x4*d0R@Lp> zznb>IXL~lkv-P&gi@C`uZ*aVxJou{3Q%5zgyQ(|qf;%4@c6?~*fECvs)O*cnF8^Zb zKmObw44wVhRa3$VoxpmDS{pQ>{j~YMs;7@MZeFkWD zY5xKH{%Us0LD&&idFkB6SA7?_ zb?z09xhMF{!E4&(>qZWF=HYSoP25@damWc5C12clQ>XUT4NpBpzwxMY>xX|l_TY#o z-o5_YV~!d&Z0aS8_KRoEzPr!nf1W1fd(f((~9X8~M zo6qQeIpD!*)B5i7es=8Me=RBwC{1|f*nx}YUb@jfzwNywl}bHZ{WZJzJ}W$R$J(E+ zzwypB+O$5KuUxd{5q;(}ji&kPWb=%pPC0JMxa0S~>K%Rcjxl>5+?l_0QQzr*es<=B zzz@E;?=)Bbc*^uZ{-opgzi`ycyLY_v@%C{iA9!`iT^k$YKj`c5zb$`u)yf5HIuqfu zmS6ZvaO9fzrW_bxz^|_#S29 z*8A>R+}eBue)YvCfq$+Uyn5L7Jx5)${~BlSH=Z@GtUuMa?4iCspYQTut8e_r3-a8& zD_*>C>Gq#*dGM+$H~hG8=FW*f?%1?7Kjh%AmLC4|hEINfsIcpURpHZWx_;}3&;C7W zftwls)g0XS_?b_fHs-v`Cs}>>UK>r_-EaJJsUeRKxcZ{bp{I_Sc;ow97QTM=K^I3? zowaSrn?Jwz;(TYfoP6l0s}J9^n~&-`-3mSr<0{W#_4Yn?H_u6<+E z>ZcALb;XM}T-G`D;rdi&x^J8BgcDCJeKZHzyKu-zfKJ|bWc4n9C^Vh=y>jsHGkf*Z}yn4zWVLY zCBN*5NJ680!gW`dzt9J7`s{}A%31EnPyc-8w#%Nd!IwzhHM_S>7`E0PR$RYh)#xW0d%ix%f92JuElSSI!uOBqoOsWOyH~#4 z+S7gPw|yUsJnq-MV>&0D_5RMjSH0MIb==jX-rVx^qHmtpTF*9rAG-STSueCE?2x{G z@~u^Wb}vzXQrFL3?`izWzj4+No8JE8@@*ge@Y+jX{&cZ3^uR-#ChUBD>iT02x$}q9 z&-OXnMjY3=`^al{-~Ht!ThF>>XJhZX!zZkpclPT27hbTc|D#h@-C=&>?Ct;Cz}p`z z{A=*AFJ`X2xBlN`clDW%ExdY;^6T_T z*(;x{LH%&S9#%agkg_W~UdGeRBC;sQu|4a`l zXYE;x{O9Y*3l{hf*8BbN#;PHY8Lus!?yDUUn6qZmdsiZlnSZ>wY1b9(_PY+7zk1&V z*d%Mk+uy8Qzk2-7_hT_w z>?u!Y*&&P)kKFN0vz3=tM;LG;E`R=Y)#_zw4nxh;e+*Nv|_3H;O9bNgXcF0R3 zbITv_?P>PkJbK5lAI=%J=Co<|jOQMDCfNG+q=TBz-)Q}7;5$Ej_xm_`-wPL=H}mVU zqrds`);_^|?d6*%KRNWLSxa9Xd`07x$PW+RGFUj~qo>Y&>bE67ympq}kM~|O?~9lE zWwY7AtG}Oj)+_M8R=&9C*W1Id4a~3JD^B0D?egl*R%G?qS^p45&iwqc&-M4#{Q24) zH$jkpd~46Op~LzMn@bZ*KV`s19GYKu-}?NQGv>ba4E&D)w`XQPZ{L4{bIe(9zB$K2 zep>g?!u8KS^rt%i!F_K8C-|(PGvC?0G2)vtdN*@Cj{ky{Us z*ze6KKYVzfYv#W+_&;k8zxur02ag?i`?rUq+n-b3n0nfzGv=Rl{y4*X_Thg%q3GT3 zK3ckmJ7V3VeV(z09kD7s`kj4muaLezavSxKa=~Ml>_6mzwF`^$XHOa2_mOoheUka< zC#yo&ZoA~bgYPCU-Z*dF^6BTUf9{^}75p{Zp86x_M4~78k6W{M?+fD(UUlBt?$vvL zJgo2P0Xux}F5Q3J4eP!+Iahyu*d+(9IoJ2Z{CWQxgx&bT^f3oTh$FC-qfU9g|MYXO zvp%_(_Sl<*1)D!T;**Wc${9cC^R;cW&zrIB`xEc-*dGfYsWsg zd)FCIuz%;OVe)5hK2Y_ZU7K;~ZSQW_eD%x^mi==5>(T8i@4UJD_SS8KSF8KKe0_gm z#C^|vg@4EF6aE;}*>z!H)Md+0(hf{ZZ~gT45kOX-Z@y-4fA5&9-#lXT)Vn`D_Tr^K z{vyn{YIpzDuYdmD)#nuv^vfBw=~t{jY+$p$e$plX`X%%BO>55hWA}fyz4B;fIQs(n zWIu4KpWJjp|Ca{mZ|(Q{ls;Gfyz|==F2dlKXZ`vs(7@|Hn7`?oZJ*tm`~9PV!#v+! zHE`PvD#q^J{oTCkKMq*$&71I}6gug0FlYb8`|tYwmlut-kDPzjlo_voP}_OsK?4`u zaU!_4XzuzOe>yPp_6xUdemMNe!E?m)(c|YncH)TT(%HA2?W@efANX=;yw4xMKB~;2 z?|OkaX-BSg-+Xq##ywl|U#_VQzL$7m-nGv@J8jMQ^9NqlZ^YIUw_JSM=&{pInX~1R z34^wj#-1=~+5OPa1ADI=)PHjS1-~D9=-}Jt&V>7GYyUWY-uT)@-)uj0_UO^VU1weP zj6M2>tH*48d-(SIR`tzRMsER%XOq6*m-i=rJ>dDXZYlG;FA+S#$P#bAu)gZ)^qNAY{E$kwyavV<-qOX~`bC`g>yibDM|R)*Yb- z=Nt!;X8*v?KHD9=Q15$d(>n3f+y(n?-*sMWMdRVOrmyH6SUB^H1?Nt`ZP0*gtu3FN z^4iKHa{Z_8+p_sUe%<%x;w$%zTDtJ3H%q^1t=W%sR(^fU!@djeyK2MwV|F&T-}j{S z*of<3?U#0GW~>-J6aVw%Yk=VYcHpv?KOgkO?GOB8{8;FZ-yeASkoT63zX96+ba&^k zAG~)9v*?-2U++8i_d{>q+5Y&vD>nUb!-SR3&%SNIzp`)NpI9C_`mJL#m%nJSw+;Px z$jFJiZ`}9ll}DcP!G`D0nzVTPdgh4c?q60d89b{07s}Y7Umkn@=cnF7u>0HIyH*|c z5jKAA69czjHcvP(q(16NZv1frgty=Ot*zCew{O}> zB!A`4u`fM!D0%Yn`pEbji{DOMy#3ZKUp#v04S`a8*~}Y{sLe31Up&G+_rzP*{e0=A zR~0^3dhG*u4`00Kp9igOo`_-i>7LW4(VuqDZ9l&1qqmQ~5x;TC#rTW0f7jLez}8`Z zT)uh8!a*DEw~}{F|Iel=lW)Vhw?^{6HRJ;envZO`;^3QJeeLD*-hD3tpLOSi4?oy9 z?z#1g6WZv(r~0N$J@l)KUVP}9Gp{^-+{n-7{qn<#mG|F$*fqa?va#D3N_pLqB2TkBExc-&fzi(|!#hI;-OkvxJ^JcDTjr{cB zC5JsX?AJ#(KKIGU`mKR#@n4V2S3G*`;DNUc6^0DH?3T_2fC?>|aO=;T-n!uHXXpL$ z#hC+yqfhwc*=IJd>>R)L&l&HGzsOkp%g(3Hx%Qxc15&lL|NB5S4(B#u#kannH~jhP zTTeIm?IWfx`)ag~=@-m-?FqU3;{MPr+vm=BZr5eUuDIs@zbhU7SEa&RJ;H^}06gy2 z1v@`I!r6^oBQ z{rh6)@pGD&vh1)kE_>&8``B3<7QJ}!9n++{$>SFc`s?xXO$WI*et4PXUh(GSM|mE_oDR=?mlPzca=vr&O423T-tlB9s2M#cGAKZmPl`{J!bsaw+5W~ z)-gkm0 z$740{x+4P^i#a!+UNgWeDJ{j!qY1jJi32! zpUsbs3Wr|--1buHyGeaE-+7k#?en9q`Nw~U`*Ux&pALWW;C<-xU?3Zg1_J=EQ<@Ji zU%qtWzTFoNExrBo?w>#FZ2aK(@nfI+@wyY{_L}-=(IjJ}o$U^v08qx#h~&7M%Oe zdqe0k&JbbXn89!ETgCpqZr_>sf^*!H&z>>ohbvzi?TkWBJ$6U;pKspRIfDH1n;&1! zY-Wz{dxPnTz`UH1fdi(|%F`UpM~0jw4Jy1eeD0C-_am1to&4_cPoddoHXeXZxbMiX z-rRi0Igg+9#H(L?ddS>8fmd&SataXh$?FFEYu5*TpL%8P-Y35Q>fUFs-8AmcX|D|Z zu=vcO^WPu$WqGdk6gt}-z4||IZu@S>ai^Vs_NyPiIdse6)myGMYSH_o>vqW(U9=w= z`GEW7#nUo(em3x^Q;$7#VD7T%()8u;tVVVD-MOdd5B^|EWW$wK@U(4{Cet-a*$-_Ja9@rX%_pX@(fI^m4d&pH$!$Hvxk4xd_4zWr)3x@>D~lC|u) zMXv`&ZCjK%YVe4}f@5EI;?l4gZz06|duN#E1%oFr46|hGnBl_*;5fcy^~jr#dwl2K zS%btYJ~$9QYSgf0QzqY024vfN*|tCbgo@cCt4o#$dg-^vuP^q$c+iT+PM+u-cfvJi z4f_Va{zc z&L6Sy!tF0*j~f4gZ{>#XzI$ZeybC7E-3Rz3_dm=%7MgPEwB4gWfAW~$=fB^RlmWu# zMD@AbXANRMaW9x~UE|y3^y@F(K48t5Hzw~`_U?Vth-J5|_*z-{;p}np-A~W?>M?it zfH|H2th@Q*t9BQsvFn#z<(aUv(cbjox92aJHGB4RpZz<0-NB0|>}oE1Yhe%>(Obwd zKr+EX&ieCNIg3_?T3AbNQb=-E#cX{I}ZES1f;Q{Lmw=xa)!$ z3-7;&HC|r+-d)Q!ZQHeW`J;!v7#}2T`Qq7w`o9nBtD~##J^$M+Tc*BzpRUh-EoJ7}`A4yrk1ij03e@}!w|-r-aOckN zrY>9`?)$=%z3C5m_+j1}o^jU9$sIMM`-|lEKYa9O*UvnD;pRIJUOjH&uHh4Remn5N zK>x4%Z|J|~?(vho#*`C0{YGZzEZ@8950CujVE(DpfUEcKI{WGuYHI`SU)S6-?(o5b z7r(#ksy#b*odj3LfA;UpIb)alZy$Ea?c(#LZ$EmBcrrKd)ZII_d~o77qsA_Mefj!% z<1QIG+8(@6|NO_?z<~pQ`SSUjw14h?|AEmfPhxKvwCm){r$2N_x_8hX4}bl_DD9CY zsPxN}laUXfm5Id6z~Nti{q@7n88a?9QyuZui=B;Kty+EeWNOCQPcRP@Rxw+?e)+gb z?(ZA0#J{{ZY$RvJ?tv!mb#6NuIYQI4^7>^Leel%dkDoa-)Vv;<;>D;{1{8KM%pEL2mzRrWM%p3R0j~kzSZ#F%1>r1))V~@H(J8#@o+m5?< z=QjtvU)lN76AwLFTl>P!?Q83Yq4(FfJ#zDeKTeq=|Kp6K|8Yv6Go~+pIe|R@JqdM2 z{Jq%YuljE1MX#?s^HFifQc1p=8nAiOrt$*qlY>8>H*O_ZC3C~;hjP2G=sxm6`S-hK zuF$59oj<E`U~hB;N+zA% zYS2$67{aaF50Z(QSz-JK0M`YDgj{D7cu4uoipL|}0s59VIvZkOFZAD}L3lp@J^QLu zgd!p$!sPRGOV@>*m+p&P|F~Lh0G-636Bdd=bv&O@3CtZTsOllJ`!(m}`_!;sY0a0s z9bE&_v8voEv+jtWn8$%ykT?o+4d^u7V7lKM!nvp4$}a0Qsz|$e7B8?EUYs!a1+A*s zJCoU}DFns+&$|#-QIVJc`h*|wzlTky}+(?{2U{-o!VnU(A(`9`=lh5N*g;tZQ zACJXC0s7ovv@9aJ`)mmCBdOM$qtR}OAK{y=o6upVxr<_m-+alHcz24$#-x(imG3E1>Y4S#jT$?vr1KacbU6@*vcD<>T+q17~Jl{mrDkl7{p2DS7fo^sE9H;Kc> zXebI+LuWXsLi;Oe1P6pMEV)qflmgUYbu|;5)oe3jb7_o82uGr^DpfL)#hkG(CmUOT zZ!j_-*VbPvRifgki$BtC`5M^p;wHQijn9tcT-AFtSG;7*(1eW{M5prK!9@khJ)HaJ z#=>gbm~?_l9_$}F+arP!)Nv2-dfTjx7PUt6f=Pa@il|2j!m(xKxDZdKbE3(lvCVk4 zwXnNy^}^~ZJJBiz7f*P?LW0@Ov6Pw&=6iixYJ{%uf%$Tp)!f?`;KnV71{au2=Sie- zcIdUcGbhn$Kxz6V_N4g=jyCO~e~BFA-D z^4oGLMAcj%yedO6}AN=3QRO&Cg1#f5=WyT6D}QB=)UZ z_NBpOEaev)UD+%7ktC33ZQW^X2D86MtYw#(EWpYLpemiVL7&&FA(HRkzw1wAOKNKq zPN_JeQ3FNv0cL^tO&Stx;nMQ5rIvNv%A9DYCZtu}tS8s=E<#x|o2Y#ON#3?zZhZqm zJE0Xd96I46lmcSaB@Y9P0W+h4g<&Mex(|9XX7HF?RJE#);2}OuLTQMi>GBN~Lbb0K zF;;?Uhxe#zgvPwywdiDp;SWfwNA8o1Zut*jW0B91WKjp(Loz z?=xgJ>9Pd!&Ky{A??C!Kp?L?qYPIHNFj<&N!eMwFzE=Y{NZ5C$OY!WkNM9)`_^2k* zAQJb<{dut*A!DmjyVLerEY|CVNo6wqQufI-SS-E?-KYp$>=7x|b7@4FPtJuR=wQq2 zmNTnwv>XJUC`c$Q6$(gdjrjK0p3Z9Ie`6F7fPsdlXkJngp>~2_MQw(Fr^QKdD;Z65 z**qB{{EeNC8>+bPdZ_S>QQ_AJVFd|peR04J?xZ%4Wcy5g2Fg3i7-6 zYG;fg|1^5L$RL}feZFCRjI5J*z{pXWXCI)Sb`bI)!>G^BCP@b!Ir}-1oh3_eMny%T zfE1Aj)TY}DAeh61)EMfyzu<>{pf3J6{PYcM=sre~T<-T2PIt4}sTv??N^i220%B5gpUmaG8<0$5F6w|*xLZu_?v5AbfO{AX^W4eAg7E!PgCbiKKk zb*uSBADW=C6{H3d!i!7hwI;@+B?=gG(r(W_{H3 z%IKScxp7FCtV>MHr|9}y<43}=3dEiz zng_WL0ZSH3odA(8y^C3@MHohjT+%qCmtsEichPx-aTcv)h8jT=r(B!k%k#wIA%jRBg#};8IH>XW2f<-e-}?Zm*q}@oMl~yAq^T|u zdN$$EPzN7TyMd{7teFRLHMV}{!4C@JuL^`;r6Dv@%&8UPbNt}SL%rFdP73BlPum}; zZe?z{n#V_q_~H~l*}u*BvQ^9z;o%(-em2dqE8%Jt;|u)}AtYZwLcXKdGddqNrKL2j z;k<3Nn6gAc&kuVK%}7B{fmIZ;j}YbS|5wGUG?A^a+0W&MBXjy}&I9*vCnuP=J$dJ( zQ)SMn;(kZ(eJebAxzyUsc;T4Ky$21NM|rTbPHv{MlYrbPe(N~qJ~Hk%9AgfakaIrz zzd2$*Rv+jK4#l>KmZ>i`n((|UjRzZr{a-CWaLL{hlq+@=k_I2@-IgfTo|M|J@(LpU$tl)NNiYylWwq=i=<#}vzI6F3b%iE)JUM5a9;W;EbM=U8U zZwIK8iWS%H`TmcgKT5ef|6VM>xBOXgwZM8}e^NH#o&11}5c%CNu?3y@QKGjqfqL>8 zCRYnt4kl?{yZE-r>n7}E=R-7yx8_@gvC+ogYXLhk&y(?(`LO+1Yq z)#rVHo{8EdX-}HnN7mnjGb>xv<{k|68w?E1#UtMqxnz34&#+lV$HoW zsl<&arlwCH4DXTkecRt~%)ggS4+(>QKy{OcFop8^4=!kG^#SePkfD2KInrlL9ImKj zXB|m6c50Kr8C8bX#b}P}G4BFicbh_MkB}xm&&cNR=!kalFAoRqy`HYHPW1hvw5{2> z-@`7hhO&n=MEcN**S+ENloY!DD@W5jq?P3tpi*s<ue2IT=2E}M^Ir6R(f0X+B*UIRcFvGduIf_s~y zX!9v0CJiFB146V1VzJ_$!>k(>zPoFXe;H!cH)E4~J zMM;carVr8y(4y8zM7K!hJRFrZ_SvPWMnK+n4L>&2~Bk1JUD)6jXdtMeZ=?@p|37>2J z&_jV5kiqlIrRk8>bsXPNr`*gwf6}iw2VqTNeg5yT%a{T6=!y}>*^zqPB=Dvg((9BF z0=J~na0tT%X}p`EC1E8$e^{FB#A95v6WV=ll$O#5SzdQDWWA>?!<`Yl$r3(<){uo7 zVRMBV*3l&uWhvkmYd{147_s1Tpn%B4@5ro?ZQ+CjTgqr$$wNBjFhLpv$b#R1JbCNC z$NCS5TIDbVIXpHlt5;tJZ*=%Vd$JSh_U<+|DK;cw{2@}P}$y@!YKKHJ}V^HBegWIvXUzY$V_a~ zkTK|&iPO!iVl2Q8KV}wPe6aF9QRwHGb(>Yta)CqCQZ;|eL~Vmi#6u{5M<9zSi*SKt z&bWBATc%~Su_e#z8Wb z&a$VuGSiCHp7KS~hhKs~M3#$QR{Y1!O(r+4&W4}WSm!yn-{)!=eE6488SsX?Uz3y3 z(Fw+Jytp*mcGkzj&CmW|F%-^^ok%X!&+P5N>R>bKMo=p`PT8!NC{T3Lax{tHl<;zM z@2N(16-#YzsLV&gsoBOyIpPr<_mzWWqI3|elRh&j{1^g2Gsr(2M~W6RiJ!<_+JV#% zW23F|gj_1H1GY#UyPBo$SEQIlK>;NN-4m<;h?gAMgNo#|kH593rJmEWjXf~G;RXB0 zS)@prH!>z@FLUiy5dIxOP?>fmNKjHn&P1s>wdPT4!F(N+3yt46$LKEFp79dDvWqJ+ z%_Kc1%S)=ErLg=5y-TemR@S_yB5omSwZOqmT$`n8pO{|*W>Gj#a1!+Li;V@(_tN@9 zOPzQgfzp%g`M>iO$q8wG6Un~9CuKghYz}8j%u&hLG!(oQ&VR4S+QU$VD;(*s$X9U~ zd%d>|m53YG(t3Lo)8ak{iAXp;Pn_!W_+BUV-2bZrpO9e7r$R7>FXKAgSWK{ zNlb-aJ*8!~Nx&?u4~E9~te?Yon#|zNZ)m(;mJi3Kck~f0yDR{YHGG3uhByhtd2KGg zCrF;EkurcwCP!6}SomMCHQ)aQE8u#H>w304;1+w}HiMXLNh16nNqJOHwx}9s>ss~xcS1eHIODu(ir3vOTgjKGFl^AATtVlPc z47R*YJY!-~MmG;RITWkuxKO!jMYY$xEv4Pd!8iJe-*a8|9Y+Au^Kj0J`3EldDy5A37TE7be&dtqzxa>pY zF5hi-*s*`SHaT5w!GK32?jMP#x|-nJ@RLodw_5m+&S@uZZ2XzSX1SlV)AdaAAAD*| zt~oI1FRd$=1P-YGF@u`+{sT{yP(h-tb>yn#Sa_n5Er#F53!kNCBz1Co*c6+*nk?nmAsLkB{w)%dtEsidccNF4;F{-zTY$9FoT!q^s7ij z&X8=Opk_xOvG{Qs<%4$>G_e&*?NH6(CgcHsLAT@e$&&yv6#Y=?XT9c}Tzm^cp_S1CH4aC7@$Tqu%Y(oF}1?R*UN{E=aKHIeO>Q^M)Fxt&K-Tbqzt z-6;Etl$+R|c1OAY*3=5h1j;P!7cjj3#Tp|63yTo}^A>JiQc{T5r#mA(j?{j=4$okV zxgur@81OAhrg^N@CJb2l5CInpC_@iVx?1+tJ)~hvHE8d>HvduTDVXt53qQJr51 zDSLB8phQgPZ9i8x+4tx2uHep(nXYpS`w^#q9m^>7c5!m$&h)4zNE}uEb~3tl!!yy` zN$GT!sb7K3zMaeGMA{T#c>e5L>0A~!m+V--rB zm$uqu60{<=0w+Cz@J9W=(NX$0eC~-jY*&YKq)aa#Zw~TxJ3K*n82XGFwJ-o0o&*Di znN}Qa>*k@P4B}r!%VM10%4mQz+x~gL!ab=n%(fw9UoyN5DQ&!gut$?#vj3Y{ z0Q7&C-C-sb#L~Vh$^C&3UE65>(8gjW`(PaLiA#`zbwU*1S1!l%Ms^v5#3zN_E z)03zmT9vZ?eM=`Nr@vDFpeO%yp*ZhJ%sZJ4^pA1WT1e{ITnI+Rv@MS%Fho?{AW#{rrsjbmsy-Lgo%YE$X`Jf zVsibk?_??lH~IInO*>XP1^0Ko_X`HKsqa%lYmMtA3%W^)EXDsov>AOBKDy$)zp3!e z?w8oRoe2PcmcQ=x2Qw#=K%GRu8`r_Xp}MBVKXfF093Y)jYXSoTHV+RGzV&5rGN(nz z;DPo?W$@ei0`V>GF)bJh>2z8XCT>JI{m%-1#Mk=$-Qr!@B)Ni#0`JKeDRYBUj#+!jwNVaO%m%6Fj>fT)@5{ zBma!=pm6m&^!Lstm6O$<{W?PqpKCWR&)PsX@ZY3rx5nU(y5n%e7ZMmUJ}#)lhQ z;n-zt6l2l0BNUwtGC*pG0HTsk=}xOBEQpUcF{){5IvBof$MoF4J6U)^NZn2pezwcT zD$%Xvww@R>wzys@Z_7}u&r{M_)Zpn|U6kCLh;%N`-vIvlNXJee3_aFs6lh?!^;>fC zZZYCq|A%Ia<@?JSj+y*gVHWw+L)&2Ch&BdL4d5wBA6r6$q{>WvF1C$eo`cEw*@-X( z#f6P;v)Fpw9_7ME=11-gV%`N)yEy!ZZfei`m})!T3-%0(_G+U z;r`%( z7eS)U;AhdzryGnCtxlq^$LZ3`gCUd%p`KCQ3(*RblgJ5W^D7NkTEb?SIpUBOStgeD zs%)M~ZwX^3i~0`_*#p(4N=`)a5GUFipJ~hqK!xUD-k$;i>lnP2k&ni3G#fi>ZZ1k@ zX)mOXwdK8>GNzY{!|o|*=}h|JARU*RXV9eDz!(~0(j_PbMuKKh-5lQ^-m;(cZpLqWA?+Yv&^`*??73=2H z1+a0H5&~k^v0>zi(4M5-jg56d6-LzSrzJFy(E(RHT%Y#Jn!9J#gcZcF)W^EKF;IPR zjw~#2-*hg>aZo$2%g+c&lNZeGOIH`_iNDCV{CpQ!u2-m-z;Yq*w1Of$^BS_h&!t|$Hg7wYx*&UV^DK-g>-`rHbB9d>1uLjjac^FjxnugO7BXc zq82N2bNi9f?@&j-0^;nG`($g*{schl2`Mad?;$Vz;kJ5wxL;KdT-;Uv@Uf1iL1`oj z$kFAON3m>ELlt%tvwl5pFVT`j{cUV=+`BC>7U5)OQ%UI_Ih7GMamF&!4f`kYk(&QA z=>z`6l5(c`#He`-`o*$WyG~JFiGYLzP2_%lzdpIjueoLJskn^~JaX55 zj{!@Ak&)_MX!M@wyTLG^!fRRJLA4}NJo!=hg^HDHt*8Oe0JOVbL7@_G?B3=$Gs6>k zEd#{v0W70lCtsOzX|Gu(JSu*_0$A|OJXvdb~H5Y`)gYzCW4y48C+u?2(oocGy^R$GS&iU(?(jeyUCp;zR9-G)-)!shnzm zzh*J~)H~DDhW6a_-q8kKM5$V`tBh7SzV0U==9f;8zSU9Ip{(0M&1O}~sypvR1&<_I zC(v<%WG$(cZ#p)r8T(z>&qYL;QL;AB zg0eb^%h)c*4LCpj3=!3jP==CblA7!tRQgl8&ZyvY zhPR0R`Oj?S^Yq9T#-l|d`wl+*(0piZ$SfXeF0rY77cd!tO-e+~i_JT=8$a5^5XI^5 zleCks!^fmN#d*8|fYN%~itYY5qPDTts~R9AIgQGe(FIOco3&87{_WdPxSy_^5KVIY zLiaUi^}PcD5LPTLv&^I`EQO15G=PM#fxMTHz{7Qcqz2u=zI+?XtC-WQ{12>^qntLM zdwfsfkL2Zx&$|E8D(zUwaGGj>S1~1QeVlwJeEwvTlM2)DVY)k%O@Rn_|WiZso&A`|vu-b6vzk3F?J zd-eL45%(`T0YyNvCn>Y+Q>e|;VB2GIdn%g@=}l#C zZ|_A>fahNlK?93ktf!xB;+!KRV-N_C5T$>FRO4lwr&)*_!pOvhjqRLLO&D2 z<_8A9eR>c;Yzf{?;PRGtiYW+X=+^v{F8YQ0Uwxp+W_5fqQi0R#{lMWF;U%||FS86~ z#iJ-*v@m$FAkt8TRVN+s0(F80?>vxrl8oiU&D)YAHx}_mxJ>e!L**QitYTeQ9zq8@on7;avLC?1tsf{uQ*2e?SE|01-s}18J>5%uQJ-7i3rv3J9+7-k&$#xjYc}kcve} z&v$o&fI8C&uD0v_t?hdNJGu=1t;>5ILX@X}qlzb!AMcX8BNWzb){hkq7N)Iyb?1yR zp=|O|>ba#Ar;OU%nVBlbzlo2o43ctBq=z(~3j~kgrIQR}1^^Y!!ZE#Ue~AnGp##*+ z$`fhou6ueU4{NBRCjzJnt)Sy!X=>0PZ1p*ZoIb0mOKxonBMP=dc^$~*M?`QWK3mTv z83pwPUKXEE#Co9D=1_~&bk#is+>S>b&SnSr40tmmzP%H7FF1AfZ)p9^7hVfNs@8qG zp>aOC9YkBm^&(jPX^-Y)HaRrypZ(n+U(c1rlNt{7b7jTYQiXLSOJZKl^XJKJ3wZsdtg(3StF*WjveYvraaTvx67+JmQ zL6S)#1W0DlEKChdTT-HqJhqgn+%zU zm>a&&^glTP(G;5%IAUUz#QPF*Fp}WcdWY9P^u?OISgZA`!w*?~AArfUb(+fJCoDW&NA2hi z!RGt?(Y0^-xnh6-m$tsrc7@hJ$8o=x@K>uD(YX4Ija08%HKuCOe-FrC3O?Gwoj3u ztZ951vvS3L4VGvw5kNnG@I?kG%_&}uU4L0@b|JF1Ib$k#A9`@#w3lVLdLGrQKB_|f z{^N)ERAmQ~%l1OM`5pc|YM=mqSmH?%1vPc((b>C=a!P~_w(Cq2=*YUaR}4nZ(o z-|@!6%~eCyfpqZH+DgghQSg$U&ro{A3!IfCr=)+x4@7G3aarB3LeuL5aH_5vbutc7 z>Pv#D?@&YUyitGzk48ptd5~IV_$B1ij{1s*-{giwhkv03Ur+ut8dd-(bO`fI7qjCN4aa{lWyRywA2 z5{9~x9_FyP?-`(8>b#qykId)#Zt8HiMvBaZshx9!T)@@J-_;FdgG6Ere;F)pnPr(EYR`dJi&}c;b zemR?@gllebj0`y!R6ZQA8}tt$4a!o;QXuU%LQcC4A^YziTYpa9&{tD3Wi{D<@O?f) z6BU!#x+&eUe_O=L;QoofL7x(70a#RM63Yx!-zV5xrnivi63$Vg-o`h_bR1uUyK489=iZXh%4+_A%i}K$*jhc9 z^&6rjy$bmQXcyHq#2o+8at7ZktSzpP!kC1hy_RQx9!EnROy4IpD8J?73g^O zzwE7>!&TI?txu0k9E|o+N9g->n_j5i02xe2`K5XeUI<<*u=7#KG zsITUz$}TOBFG5o6O_1g2we$4eTE~>#N1%P{mjq`P1^vx6JP(yjvIAEOZ};##7XTzL znaT8{p1(79X53;)YOFBfwk6WTQ%RTs7VYh~z%~Hv7))Yc+1k^H?b2&B{?e)Sfckd7 zgkz0#F+W}mPsJfj^lfM=#BMs8YEAz*rA8K$24E+6P5p?>J=AtmYyd7{$(} z`MF>=ZoxUL>f_9)SX5;~bGxPR9Z(=2sD)OM^A^x~|V2|5pouUnf}xsCuo&Eob~P zppVVLX^wJQruInUnw-ye;Ih3DX?*7r|6{oL1jRQiz4xUJFGG4U zI|2`iQI2su?QaDX=wkK8faFds-Yn|&vaLjHX1j!L{Ay*+`EoF3??$8jv zA4C}P8UJwaGE$odsn!YgJfWJyZ=SB7Fg39a{KDYbrBzqjF)m%YSz%^7Rffj0?C_CF znQviqVV(m1qnHqm!?^u)cLL((v|A^{qEU{KRLwfU60c4|BPZRat`raqPLB00QL5wy z_#w3jMmi1zc<;tM*+-7wQD|mA+5H<@YG6I*!F+y7|K{t!V>A@raoevbt$`;4c;2&@ zJ@C*fpK2&&e2?)b)?t3{QmLEWYtY`1@~Y3dZ7@E4Xu4SKZ**zH)R)P5)Ll?b;q#^W z*<|`C{*kE+>O?oNb?5bi4wLtA@hVm-SSM45*=>u^d%C8)*IHKKjN+YWuxta&q8?wG zQU9Xd^R%e}eqz*V@r#x~>&+ZPTe_7@`gHN6yZCUxF*%W$4QcAg+RStF<1GSE<~Vl! z)2aKuyWBpL-L+UxQz^2_~$!As{hj`X89 zeqhBa%6gFrZ~gH+rrc|my^v}nOr|QqwNlxPO*@cSAeu|DZ zpx0{u*8vyOm9c|@-ru-U<)V?S;izu7{9P<9ku2HD0*;i- zoQ$7Sor;-UXTLva`}&=5Rwm(A1MIHb($A8mG8VqFL=~%sX=7%!@^4r2{*FQzk6{pX zP*^HgN1(4(;ET8bh8~`0ZmY1_ES4>)4+x5ICj(utzrS?R_nw7!RyWj`RG$NG>844I zU%|f&hKpafoGv)hR|WB|sh>-!XEOo@hBIUfRU?`>KU78HusBqu1e zTW(%|`bVgOq>VDXMUyD(7g7Rr+OV3Ri+@!CI{1oJ9d+q_0hc*^zm|uAL6CBQW|BhuOXF+wX zs{q=dx+a1razZm#y>C@h8)sBgp~N<`~q9J*?JLW@zv5 zInySleknw6*mllK!yM(`@G_KWNc;co7+@yk6|k}=!^%u$K9Qb1Rf)krs#5AMqM{n- ztab7jlJ8u9Su?+i^?r*G)v0zZ-wvANtsl?7W>2ZnaGYGjw7MNz2K%P{@0Vj7K6tj+Du|PS zWVM<@1&oO_+xEQMJcTMlu2h&WmA@my)HH<>u~oR6So{`op7k|kA6Y#9aAzE`1Ohd& zy_X$$7vv!UO&m-rz(W>JH$0@VJn^CMw4y$%N18z7-DNMmh2%S8Xg zd_qWubVMw%R8hc^pY-&p$-abFDip2E?)%ezx}DI^YmPcf{6rt`qiOnn@j=>U7G~Cv z(iUcI8Xj``FCF#)cO-&4TRZUTwVMlc2N^d!%u!hnI2+!A&<9bvaAIzPKrZBB^=HhA zRkNasT74|E#chk3;r%2qp&zrL*sOYLxiR97*J8o~QiU_J1j4F9Hq!7<3dD)wm&rWl z6lk2R9w9lBOnkGp0D0KQgqh>TB;?z5^!y1fWCCQQigxDfzRLJkn4Fte2V zgVs=0+8Y!bvyo+S$Nm)0HTEOkqC=v&RtLl>Zv%8GDJh$WR}?8-?hF9|x_K5m``)i# zzA!$r!&euL@qOLk)JYTO?m8Nz<@s5046bmkSa$l9fb%xR-L}*myCaOxt23QDX`{`? z(5m!YrK1%alj0{#g4**igYks~HvUFTOP}6O&UY+bjoz*Pd_|tVy6D&Z4~jBG^*I~0 z`-=@m{BXYf8H}t4%9d?-}PR7?#5&aJer%_corvgcJwQWrTc>D3Ijcp;o)~@uP&-220av> zu6C;%ftNl|WjW_7e ziJDxiVg1%1( zHtTj#;Z&777ahFoP$%(bvX?5U2`Ukv()m38;#Tl-_TAM9cpuc4X*J~;8DW~*n`E=E z%_c6aG<%b*52b`=JHB0uqvG$(?EI>3s*^~ajq-?d^99fWC`V{3ODaYSQCM}Uz4nr1 z9KhH)iQFWr0}EeZ4o)C7e9hoLydQY3Zv5%gm(XfGkFDNS{vH5jHJWK-chG$=PK)!j zIx-V#x11+(kD>-TiwbOegN1Z*&Axk@u&S}WIc^GZwtlC+J-qiH)oM!c0gvj-P|MtS z5~!I}(WI~#k-MHReLzN*x;+a&jvi;HnT0~y^^e+sN&F*J`FQNtkC4Dhj zVLY@6Elv};+3qN04))kfDVZXS$XtN#%YImzDxoBC+LmnqzTlMIM-C|x?`ExWXtdK7e=W_ZkGp#Aye%Eq-Li+aQ9QE zo-nE~llgG2^KiNdL*U6C!jhvr0ONTo7gSCVUTe7KxY%{Z#Gupm8|W*sc|hX(t09s7 z3GJe<^ClYp!&Jn1GW8+|&A5|hoi?)q8PyvVN;GV>(S@$CM2-r+H-KEe6+|?u;u_MD zh?h(P zAyhXw5jzJgX>QNqd>s*}LUL>6Xk+JwacJxMiU0u-b%1IIuIE0M4!^~;K~ufKnTU># zUcSTOO9sCq;`#aBQh`omEk)FsVCZb&SdJ1Pouv`FRjt^Lyrbpl85~rNQ{U?*I=x3C zdow^8Z8VW2`$C;q7pp+*Hk(7;5}H~_-SopxrpF(>Di2w@uE@8r2pvnG{>N>eA~Db` zj3*ieI8I4Mh;tWs=xAiE~rW6#J>%|nz%%OIddBe^U5)y|;U0Z?u zW9=VbUjAt9+d^5r|LQ^T#D|j7LdH$SW%)feB=o}Ym6hvC$@1R(9@^KEoe z&WSYad91m&-V~BQQf(z+L%~=eL}l~#(@>uqv_R58H%fcjxGv0Tm^{o33k$rAFxE)L z@;K_WaFh(hY?E7)x1$pW^4Nr3<$}2smYx(n#EepU4}zAplGG(E=%!BM1A}B6W8-vO z9z`($3&}f4fLNP5SejT%7!G+S-~RZ@gY7!W0noo?=n{pZflBZ|FJ(*q$lus#$z1=e z?GEG%lYRiDxvX1{M4>*`LOsXz!z!`k6W6Frdo2&kZJfp06HE2DGVde%&3o-<+ZIv- z(Cow*Mk#pgs&|dhf)f#f8o9D#IPl@3P55spTEe^X!f`Wf7y8 z-^9Z1nqb2~IRlR49Z25Nx^ul0tlFvHy2s&uyw@shy}IJv4FMFRYF_bdRGb^PfL(i$ z+q6M)*ChnGdXjcX)JMl2I1&uax+qWMGIf*Tl;nv-!FV8*NZ}95jt~@e4oO_*13zoU zB+Q^1Yaca299S;B+S(zFAKhx_6g;Onz4K>g;Y8rZEnXyVhocp!t(J{#Iart#{dlzC zFqhrVi+oI;YM~y|BT-DEDDDt@5(@VNbQt}eQT@go(z z?1t6)1h&a-F-_JEwG6wW1H6fz9BXUC{+-MKwsaBwsiK7Gx5O%|Qtp(H=A*pc`w zz->MTczAD$d`z>USoSnb!K%%_u}p8Ib5uOP(yLT5o2At$DVnId!TOD5F|)A*#g>u< zvciXa`Aosg@B?nJ6z23O>>g#(!UE(&3!g*XI#CeI0k`W`#Edb0^DIqTd|yDOmB2FP zsZtomU1fb<)HYzGJLWxmi;t7(cl}43{jUsiHA7x_Bx(A;@T6fHxa4QqtW>?J!+1g= z6ij;y1!qy}vI!_^_u?Vv}X5*(9(#{)HdYo8x{6mtMM+xG*t2!f$sjsLK)Oi{@w^u)Sf3kqSKsJ>FS zCXbi{?NDe3@KvuqC5?J^NLHJV5i+P6Z8E=QMq&23`e#&fU`C((IZaQd8O2o zSmI5PDQr?G>RC6;I`^@ACN=rtOJhy5?p*fW=4u|R0Ef0d%9HbXbcA#5?OZIQ22)ya z17Z6_3~qMDMW;osMsmzRY03vBKNn^!YhSeTW-Bm9VTA^AeLb_<=be~QB`A7uD`N8; zOH2~Iq-JHLDfeDt#}#J^9d8qz;L~Tczj-@ygK*ZfvOj)&(^mH0*Ms`B{RvB=;1=a} zQPjr9Y-H2g#;)grdgQxrlAj+$bIV9=CSaH&zZCmuNU^gBk~xQYqL|2d_$DznY?*6k z9#hq60SSJjc9?PXb@JyN9}$*X zETHT>UG7~+I5+Bn3N8yaPte8@2r)X|iDh{Kw_w6mOe;7ATEd6JK)GB4UqM})-O3PY z&tLU}eb|TSiFqj2{N}ki19R(sN@G0t^tvMrbp1jDKfKf=6G*+jtkKU2-0^Ikh3m^ zN3m1q<+0`$?CHz;d<|qKsTt)X7OTo=Ve`7gN$tJ6)i<;q&!K}hlTu&Wmzz#}szW~9 z=n@3T%Dx3Je7x9H0a!2?tX^x3DDb$8A(J+lY$oQX(OIp+R%Y_AwF%3&a1kd!k+}`h zkcuUJq!ved#29MQN%e+O8$_e8KNPd0Gv$ zX?S_LnKGX*L0@jNiw5M4+becm0~cNIQ9v&O!_990#~&hDD-6>5{O*_ft6hw3chU*l zL;iqG-f(9y(h^nLuR+tAb{P#&IZL&g>eQPF^~k>i!dIXR0u~xE-{uEsXy~ouvZX?T zM_~^GC@Vnv3{N7O4zvyV*JXs~eNXAoa~e(^d7^bXv|$?E8Bjso9?ikKU98hP*HdGB zc7aUvY!$ZTSCNjDl*%@nF-cR-&m}ht`%xrgzNM(_2557%!f7)EcF}Tp9<(8r=n%hSGTp|l zKbnS9W6&1vVtBnA7-wYi%riasRmW7=j5^M- zMJ!|gk4!5uY|zD8=dVhe*4~i;M2f*kA!lVaUyTRk=!2KFn@g|lUyy5X`m<{BmrDs# z^`pB)>2CVVK^E!{DzG>xTYzo|(he6$H0|$ZRB#D!&VTRbw4c@cro0^6on=$B*ynI19{$r&k zv7oCnQ)ef)+U|<6G4(>MBXAnvL^YLb8`%2b{8Fs`cH{W*yHZ;Je+@~WPW9`;vwWNoQLN7A*4>}3=Z@^fZe4ST3~N#b+oN#V=8;Y4E|(z zA=diM_1^hcemq6`=!o�a$Q}O)n*!qg&G)9IVkB9M}kGfCHi&XRHhu6mjHBN&gRH zZy8lruyu(BcMa|lED7!&g1ZNI2#|xjyCe|Y!oeNF!QCym+ri!4-FlP0_l^EW_m9Uf z80@{vt5&Tw=Uktv(*T(u{*PxNoP`d>y+CZf#yA)r+tK@Th|)DGo+fPTcu04>%IG3*SsE4h6m5-jOa5Cx6loepK!czwD- zd-We##WTYHAF_%GrvTQl!mZh<848gTLU-%EP3v1|F&Ku82d?bzw&fG6<;HfkuWbC( zsY}1`$}L3!EF%K2Qzl{MFbcot4fBikg-8hIZc2g+4%PptAw)9(APqQtNsScpHb#lR za$NmG?NoxVS+-Q?{_g4K7E3mXC&+6^m5$Z3dxMyW*Pf2odG|MUn=COTxyCZIT%^ao z>gu9?qWs$}iKSLEeb_evw{DYPsq6{!^QnK=qa?<6=n)_I;e=xW}T2 z%2T>SM=*TM!Q#Q9$-v-tW%_4UHkYHM+52VydF8M*lspZq%8PI^V;t${N$7rk6plpU z>dRIdEf)KBzN@Rt?rRMI#x-91`_uJm)(i&+r@el7*B#!%#>NJngdO>XIMaETpl!V> z>6-3!MI0IyR!@G(<9wjudDcfuMt(5Ncf8>f`)OA%&3%u45X=$wu5;08TK6}rDXPTe z{!gRc2XNO|F`NgcK|2KD)BG*hkc5fs`-GP2IJq^MR-Jw=E0E0zw2Wrr2Ci19&0|{S zn{4|#7om>B7Tr?@CREmd+ThE`<%b_#k&N9@t0K%s2gp!Jp$_tXN+l1GkU9Ue)z!l+ z1I5b5jJU^Mp3cV0o+sxPEN5&9oMT%ddz3y8h9q!%26)Pp8Oj{r3NK})Dg_<3(1Dl& zi~EK7#qo0bs-*8dfcPLDgm3EYl>}5ne1}Jt9YzFPpC7M@bMPL5KR5e4d$&+mbZL$m zO=F@>{i!NOpsFwF7b=gxuGisI-TD%r7TF+$*PYVB_3^#!rzfHkuqS?Ud%EYU)L^5d zUf2XNkB`+-8Lq z$$e0+lYZV^U<@48=SVh4+0gA4BIh(bXs44yJ{F*UpFvZ4a=ME$aO1RNcC z@SwvMW{V~)feY+EMMawX%|{LU8uLT_{YFD^_OVz*%)SaMQR^$zi8|x3PU9sO!`mb2 zBfE+yX*umNK|vA6V_~m^Q=u^J;Lmcqs(n2nF)dM7a+wypb<5+qGUXN%G_q;@#P1{` zHV9#0y#R43;TG(i+t^@1KxT;oE&O^jQU9C9>z!RsA?k?J(^Es1#JISV<6W_=f&yec zJpp5c_y*`833RSKF+M?9H~ZV`YQdlg$LMKJYPnH0IuY!t>$5BW*viHhpJTF9o z%H%6+VrC1iIL)vsa8wjF6avf}yQY&4w3%uUp^&50K5OpVOPCM%z|CbUP+CiDgLUf4$IYk4k39tj8tSRqpD zP>TJ^@^V9~q`^HUB_%+v^ENe0t>tzr*%Lm>z-!htu{Tq2;=a)f=N{| z>?QnSww?7QA9n@-o*rAw&CPWT4@amNj|cp=*N#mP-;z}Ms5~7uc?q0U^zCt*0VymjwA{<{QMqCthwLuH>(5ov$R%B6NySnZ-{hxz8zIMD$uL=7RET@@ zPGTihNqNWv8X+35|DtTEYITBrWtnNL(r{b(?{>0N&{7TLXk72Effd!mggQs~3s~(! zD8&dN-NI1VV)`^FWm1>NA4y#hEL4C)gvMF---x_UR((wp0kxcUm%OkQblkz_`;?>= zefKIJ6|kUWQK0EQPdt43i{AH#DV{-9ruNwHd}B4iDI-spr9PPo=}L(nB>%+%2sRzd zoJ;#SPv>Be|+afjy>d1o`iG4;@%9q;|9ABWKklyu2XMMjTJ^EgQ>rrxL^K?#mH4H#p_g)BgUmqW@ zmutsVQPN}*J{b<6KISlBTh(uYK^n%iA*|#5eB%I6*`x&89oPDBHsDs8^v^`+44OcL zwb+9iTcb^e{gg+^8t7 z290O!Aj`FSKg!x)ylk0o{!I@pK7zRts&lqO(pZZIr#Bg~(@q*1L^gKjXh|7;3VyQC zqoX6Mxu`1uBkbejgIF~AWu6vDEO0<2PO(#rmb7B=i#-CZXGZUN<3gN7z%3$nDuXs` zvYN(VF}Bj=%X9)9VDWKgqHwflj;5_I)H}=?Psde%$4#9?Do{n1DK-l&<80408I9`g z5P`y0;9);_@HXTC8|52_(CnSPii%3M@E4r$AqiMc&KeDFyK-ST0&unPT4iWAU`v>q zy}cWaCRJhhZ+m>JPu1a*S}m6E{uGt0y)zfk%_*tIG<=+`Q3oHToH!~hhtzpNWwaf|<}? zGs~VTZwKt1C9*;bs}6gU_?(W3StJpWo_GDzRl~3M8-F81Hw;Vwu6^zDRvT$BK_Ql6 z;mg&gZEiG)t@TQI8thwrJ5;PB{s{?EtW>F&d;-U>3%7a+VnCnn>;LZu{>06p9B_~$3aOWM8acW zg#mWbQexdBW&d%uk3~9TVAa9OUBHih{^AaW` z2!n1VjigWQcB>+t>gZ(AYL>l-niYakKu_CrwfX>Tfz4z_)dxyywcD~vHL7tNs%NX7 zKD9b_X9}`7d0Uq_Z`VBa741tl+pbQ>8NgoCj z&}HLI-U|bgnTFJ+YXyYPNnc=)_<pkWj|GZ4hdlk;MX$E*~A=CtCKUNthlYgeTc;G9JgIU=eDjuHbtC{3oH*Xt_wgJl2AonlLH68F?}xxys!AHt zsP~fX9CV@hV#!+7-|=a`XI$aE4?06tQN}`Lk`kK_QH4Pn()yDlDxebW>swto7?|`+ z1XDx1;+w9Tc7#vN*NmDLOfOi8wysQ`4=N(Y0(+!;QQB#KY;F>nRVp15JlJoRn%!)) za~(T=9(WZL&<8hnz_7rGtgdbhSUE^5+wyyG9i=~1BtdX(A)8lC?evrH5Rx-~nBeu> z0cZFPAi`A+aF^wiZvbb5!&zXVO1{x-g()2a!)93=yd5gB6mgQLmlw|#TbD)N%orYP z%qw}Nyc_};P+<)2rK}h%FcVo{e`CeuF=%!-LsgaL8!TIaXd_M3uFAHfphu<*vnBto zCN(=-H?+y`7%fBLPEC9z6lweSrpJ%{umWtvJ=<78+*Do&2;$c;e0=-H0g3%`s?Kgw zB<3p$H3e1p1d&8OvONG2-f#>`#n&x++K5%qV514CKtLm~m6VY7ku1wY{x&>3%njI& z*7chNpY&VENV%*x2Ib}+EkmbdPqXOV;7u(X)+6|cuJ9USI#iY4gs0qDlp<@$Xh~oHNflODSR6N1P$u>tv)H$cAFVuZl|R%6;E9yyf{8?r>ZY^j%R`y`5skxkmcQ z$w`;T(&|7_F|7~Nyro1K-EsV{*19c$2pYjSt5@&z-Rff+)P- z#C~GasVIAlLJ>gztnEqN9=o}T_TGll4W9{Mig}W885Jo&a|1N7uEN4F`pKy6TFJ3> z9FScbcFqrRsXtk{Q8a4feKW^4H}q5vFKH#(NO-!X00Psd=0hL-z-L1>MjBE5h#y}i zn5(CGw85cz6y0bt9}F0JerYLD+PBTcaDZYIs(i@4c!Ae7W4Ue;a+b087LkP0lx%BU zRs}4q-rS`13;0)3=a0?Iyb5hTTAxG1X@MJ^nH+`PfUHf}R=EUAuOjn?kgt*l*w$)5 zbE>0Cii*~)=|ymo;=BSSSn)Z zhPZ{^Tq$?09rN6FniBBiEL=>dm;0s>fz31L%uondV3W}8RtqPMWpmy?}-~TfoVNJ{`iNcDlHZ1iyG-cDbni>SXaQ6D}Ow{3N zlnGTW?*q5#s+V?ey>2y<9mIdYro!MUq9YE%H_NmrFg>;zZ>grProzdc68mvR8K@Ob zqBq=f;3@8>${HqrW>*42TAVNWcDT72l$G^1yBO5}8giJ87uCNJZOCbO1g1yU^5pI$4RirqNgiqn|r_xwrCH>;S zELOff*7@x|QR~x>i*PHctFc87sDi*H9-j|!g<0|C%~4@IlR+^hWQe69r3Lg+5QJsJ zp@d6bOuZjQUTSFmw<_!{%D4=-UqeHUf9!cwF2rfp<(ojasJz&37aOmTF+(r<5i=Oj z>|d)naw$#pf9q`@&Fm)skupPCrStQZ1De%qI%+{<()3D9TP%=;_-DpHgTeU@uk8EB znVDVrb;$V*N)-*K`=;uuo;`0G5NjHv2HQ!S8XCX>nS<#<`W1b1M9U;9L6?3QKZyjh zL1TM4o<33ux-0?OypULXEaT~Jp+C~B2!EIame96 zq7NrWb_pqcHbBQDp5IjkpYecnL1m3k)_=;LE`Fw$orITA+eV)~9->1N2z2H5wEie2 zu5H6n8cN=%x^mxsE-n<8w?O5h1?oOfUgx*CQ`9jWgge@ zHP<7OUs^N~5vw0~PE{u> zo)~+~HblAK6nt3Lm}$D&*o&ioKF(=#p%-o9=ArJ}=s%F3Q`Do&muxGbOU8%A9ov|d z)l}`+D{o$zJ-8^&Hoon*iOQkz>GBzna(h_(@@mF5xpOxeXAM7GFKx^-)cCrOgHD|= zGf~|}bt)mVjuq*Rp3-k}m&P1WmAcg40|^FHnaUo}5yn)Ylb(K0I#{s)mbHQ{I^5-g}usTEN^F1I!6;`y-qj zJTeJyG0wMM_p6TWEWg6)!2(?%b~Q5i-15S-=i{vv1f*Q!fsQvS+RlB%7iXctMFF(9 zmCo(+eMswBOl;v@Q|)x_Bv3w=BX|VcT{^wG!VdTA^bo`2jK5_TcgNZ?w@=c3)w!aH zv3Cf4^fEmWD*LWJVAjp(E*b5e4Md*g%|x>|^d6MSl|VYem0gm*Ei~1a-zPaH?Kgz) z(qed0%?{aK_ScUoXoBEq*C=^_8EUpvy(`rn$TYYx6*2JDB-YR za}wd#EhCw;@4pVcs3ng7Kp57!>{wkU6wb<^3Ba{R{s?CazcZt@x;mg=J_zC65l48? zR39C$KZohW9iUzs9UbilhzQxcDoPIuO9Zq$UTH_5QBY7s0Mgv{y_3n-4L{+^n(mJp zQy}kndBL5(#(56cdmSV(!qS)20A35c*w+2op`@azp{>oF{zC^k(AXy&ZNEnV)emxSE5&7*%+}VvFxuP)22bnxMC#rpu zBZ#hWP)yg0p+_)dU8`6IxaRGV?(^u_dIZ4|kL_YrKy*272%5Llv^8H&wg|op#C-W& zyg7ALbv#f`})v#>h!OqN$5oR$VR$=SpyM*{n*jGW%JXzI&+_XqM*tVCBh$}D`PG%>9%hRp(o{H=!Fy!E+>O~|t=r>LibY?bQI2Wqmt9Ip7!;9V+Q48t3% z8hp=RnUhSCwU6qp{uo>{S>ScX3BiM^tJQ)gud$qW_3(mB*WpMgNIoaw7s?q`r>Bii}`2y&f{sSvQ=1jF=eJpeyk~y*W5@S zJ(VP^Hlb^xRcBY%73*v@#-B%-rD*wp z{y~7qomkL2oozr~P9ELLBTM=*;-RZ7>ZFykeMiZ|c5q7j)5Ey}6pZp0;OieU%r)l8 zYSYuxy8?2`w>DV(C$9kNY|po&mrMvP_eY&gZs%n{jD<5=C4n(iVF?G~0*K&wJw4qY z9~3wnvTO?MqxHCNnIn$JRwzYj`uC52m@IUnwYIRaw>-4Op_oTQs;H?!1F3{EO3YIq zQWs_x<5Xn4Bn2R6AL#GbI?I~iS0P#eqR3}x3;qab+$zA$m-M;CX^bY#ZR3(J(|VMu zqxM+T9?n8mT z8FyC3{61Hj_eI`t&GC@O<5g92%ft_4Rqa#Wrn3Tya zpt@4Z6PjyD9dni3arLi7I_88T^_5ioMRt=}*gZdZc*RO3e|g@D&t11XwFq<<3-}VR z9NdJKI@~K9PB#Ab5wT+G*W7r%6Mf0xW^Yh!xOR@pgL3-10D#hglaOkSS9~*AIxD9a zca9sy>t+uHEUGV+H4kbFV=Z(RB00B;qF=@XKYaYC*+?V72K!MM{G@RB?%lgtcKM)( zuRC-EDN+uoaX4hrw zM7N27w-=3cI9R2cavaX@OFIHv92BQB#8dNvYpduqa4i;t@HNR~gF2#>2YqzK_8gkD z4V#6m*yK0Jj3UL6JP6~|@mGd_D%Ac)QrM(jF^5v1?{T0uKjsGgn4m^>c=+$`kdUSM ze0StN=uyY>t%fX!?2~LesZN9kONihF0xWgti^!kbF_>a>BQQ^3&h*!PwTXt$-QzJ zzUIiMrMy^TctdyVVnSvEpqheGL`GI028VmC;NQ2u~!X z5Css}vX}L0^wMC3G`bJZnRm2g7>h(q5uc}2JO6H_e8P=)ir*pUMlZqCI}F{NnO!`1 zMPkp$99pC7ZiuUlhC4GqKVMB4LQDOcmnYvsZpGw^CZAQht56~Oj_mlWhK>9kIchGO zyBnvECcQs;kbAa+g)=AWU&P2Qwm+)9q>)R?)c+q?T{E)4LVrpXCtGxh?r!(5j@?D7mLxAsvksFvijmG zWu%r_bP40`7GwCVKYOgl@DhN2GUkJhc<=7>a^mRx^tsFxscYB~(BGaCP2cwu zgfWScLJH_Re$9Z~d^|eK^;a_NO-pTEdd^2_v21BQBxLuw)S1kb3(rBS8nLvx;-~cJ z0O6xO4M!ukEG3p$^uFiRX}37}?dvgb;oJszM$9w@5BQ4&+|3E~1UwVMrJR;IVRgcu zpu?P&OPN%Dp8Y-;kS^HCTO6mRl>bLP)=-p84tCj*9^ehI+ve43q{RTTNDz+69I+6= zN~(lZQ;R3=?>+dmZtE88gIG$g)vOw*R5rF2sK$sGu$jYF(nmsl$;ugX@G{fYXpwRcvi!)_3NOY(x!f#jrOI(I?Q@KF)%yy~D z;&q;EQb%k44{1E~A5B>R?yQ%kY*&`WO$3nUitIGLI$oxHK`}@h}to=fb0n-A&=3v-jA2+f*qnx7U(ujr{cBd&=^{ABQY4zqJPXCb8W(o zeFHVrJ6s>r)4$Txf=Cuz9nO$n#!y86B}d@FZ3Wc`JitwG+>|`w^?b{{7G!5r{ z=sSrX@}&W(sWF{xN)^Y(nvJXLB!RZ+-UnCmj29a}#PvLmb6s;Xx*i(+XEtjIKg zNlh=ek|E`VJ*MAC)4&_BJx@#ubrg>u_EjF--%j^QOU->jr8OQkgpHs8A{EK8@}mmU z;=)_zya}Q(baef=2sb|>Bez{=@Q4Bp<-0sbOWF-MpJCuYTi=9q2|&}!n?p^*}a=WP}D zyKE2ino%o5?R=Z3yJ00k_+I@s?m;wVn9pBNk7Z|*xYJ){TJ4RX17A8Qo7sk9*n5YZ zIWG2X)T{5`2W%gE@8_NJ6W_->#KS}8rNfK}p5yPSfhb>PxarN|+yp1e-}`g$ zjE%C;0AZJ7hU*^~>=B4@YDZ8@Pb)0K9l;op_ zhx6;EBK)#4aYnv0-`hGfu6?y1h8ZNSImxP=CJ)!%(_%w~^pWk5WOUTTg1BY1ElZU= zQ=2N#TZmabBO=Xpf^tzH4R2ul?mUI9k6} znTBUt*ceuLsA-lu*wu20Lf7|EWIYFOfkXEi=4sdj8vRXIrLg;$@*zAr5#!6`N`y$X zv#DSM8uTU0lw4v2Mf5k%gBcbIM&ZQ4c|4l)=IbTUt-xbAe>y$yv@Z;thR5*0z&a;h zP}tx)yn1M+oEE#quBy3}a;PM0v&$r9(k#8z1^@!7$DnSYif!<9Qva%WZv{Ag<;bB4 zM7r`Vn$|}avrEsJ3M$l>FHO!Okim(dUM2L=~J_;O}}w^ z2-F61kQ-+aO0Y(%yunC1<5yuXjkwFURK=@_&hkI){5H!vjKyHUF#RVc&3I`HsPQ&w_U6q~ zD`ICuH30fo_Kgjhu-#K6>DFO^f9zpTFHUUyjS8_y2jikOT|%`}?xKCAS$?53RrM_5 zKnfYVM4y1XVRekc>8xr>1Vs^deD)j2eDO*h=yUJ#1*{v=e5*Z+WcJ`DCk^Fr%K_iu z*$`_P&8o*F2fKmo&g~6hj^?q#Z5Z62Nv^3*sol`JlRhhDb7hMC?@lOrJ;Q9S+(2;r zpe*kpt@Y@ZtmXr$Gb7pwvP5wnVxxR<_so)dc41+3>fZxPd z-qc!}{7QQ#79=ej^U0OJvtH)re?kH}Q0g~{EvFA0k;G5Wtlbh?iB};hRb_=hj2#PYSo zW;jO^lrR*AGwBsL<$fZ}+;uzzJqa5^85ZBas1{jiuBmEy!vaC0iIr6Vl~d6P=6x{Q zPbMPc@tgQ*ftG|pH!4+k{OaQD*9NxjoCCD*_GgBG1yb(2I8+H}(?~)Nb6KCt$WZ%v z*54I+grBy`1*mX^OY3<}!>YXdk;>A_g06E+T+?rx+MPyHCPV65MmT_;$k#Tyo9_J} z)$Vsu+{-A}88v!J^qQwR_x0ScZtMwMaG-*?n-flYJB8CeX7t`inhMT&s z`q+V*UUb*i>2A5hyi?(^tS=NDRII}q1J2Y<(MFUY#Q#o9&PD`n7}g|hDB#nAY^C%D zR3!sEg0mLR+^C#vgDNa52C+7+Ygy7W=IO=8g0Tr)W)p7=XXfk-x)a6DuBG^)MW1vH zznDEw!5{KUgKs^Ns(QB#_QWraIl$NX)w$v-L0O@e@O2@^GUKI0IBxXmPT8W%dE+CYRvB?4K)VpO5rhrwD(YbPh0NVBar+i0@m!1G?@B!|nI}k?!P<1qu-> zuwj9RHG%uQ)$O0Z)M?6w@phjO^Lvv|eEG$4I>S;z{`_xwI*9Q@iBxow0#H~I9qf;R=782; zTnM_F;m}2Kpz|L7N9UOaaRi}0I$!gRvVSLn;${B2N)>6ow@;Z0&O5BWe>mYHLflZy-g#CZT%GC=u z;&abg%5FQ4S2vdLfN1T%R@{|{DgmNK*Cfmvh9!+IO4<3;2E7mcn4n^2)DI3(nvNu6 z-jC&bJ6uG#oIykLn|sP7S>avMJq>i~bn!Ioelb3Ys$2F(SU$DPWFt!fGFhdfQTJV1 zwRr7-I(H7Woqyb&{gfLJL)RJ}afWUeC}nfU#+H2Pdy|t^E?~RzIe+n|07!S7z=tas z{>4z8iVCSeS`rISOYnVj4r12;1=+uMpkbM+di7Uvo zMcs1nLg)v6a#9nc>{<3%cz3I$a3&*5euD;dM>VBE4hk4K@0K4UmegB^?6hO zOQrG`Ugn@{U}_`-5-GQtyONo7dDMO@er!?kW$jgn2q$AI{kkPZjuMu(<<3q+{%+5@ zds?7lMpM7)(o$=t!wE4INqU|L81Hx38w7pEeZ{?PHB~|@>$rcV_(@3EiE#MwGbzT; z1Hjjl9NWU)fY;M$sT+WjqUw5#@$&Jg3;!#bK@BrY?^?P|b!bI7ca*#=ftNsO`<~%PCuhz4xzp$1<2qTE~ z^xEdoY30yoP2N)(EOx3Gmb}m!u$ai$nwos8iRHmW6@8K!DjLaybRMo$jvIlYIg*D(Es$l+>F6+^~VP*plk+$z^KpDFW(Y&hJ_NMNtr`&2_g#K8Zbb8 zqkLt7dM8jx!T{McWHe~9LNQU0hqON7yzwgmnx^G{01t4N7z~qse1L@-VIZRr$pNSz zT+xp&7-^INQB@qEt3FG%ng9C=knrz6U-7NhfcpX~9t%u)_%T$VOSC#zan#@;T_o7J z%wS-VQU3rEnz6I*A22+JG$=>sYjwnM^nGZQ{$>El-`%WC_yBR@dkP=uX=SKxD4=~$ zRRn(^aM!+7X;sR;$g>J)jo0K1!$agmDr<5$OvA5%Griaj+9fbR4+8^2+2Z^2_U-Rf zLcH`!XGMd#lcRwK>g_B-FQ-6(tjy2q2m(`Tq61aSPsd4cjI%5wmF1VUZxvE4E_#4#H zwb;+}pBBWALM{oCrnrDN?x?BY4Kjc?5FnR&Uta2M@^u-g_gW%V$)f*V+ zHmz}7t@=ZBqt510+R4|&9Q%Q6V!gdgJrEY0m^++!hzc<5P@dzplFgSf^{vjCx_C(R zP}EZkIeEne@5+~%S&l+nkKfBsBA!vpMzF{&t1Ofi<0K7~&bzIB&%B`AUp-3{V^x2dbXH^ZCf#sd#Hu60=r z(ERTQ2x7lB=FwFfm1;4KX*!t#gPP`Tcg`yLuXBz~FAlaFR}V7&zUIQ1ZY(x8NCnzc zGwep_!jS%-{+7d^a-u7Q1dR_9OzpX=7IG9677JZl_%@WJ0r7T@@KJM;3=4Bn0Uz+_ z%*@}O7D={zJk~g|?qEUAi*mozVhkX2OGa6CB!j+4VO4;%1zP+#Cgv3?_n(86f~<5@ zZwp&{MO$wxbqVV0H=(;Pv-M(2^jB}w%L+GGEP+QZ5HsbVV^q(s%Oar0ucj+55Ytp6 zPp@^hSL{&A4GfTmiT671(JNdr)`eI1$R+2?pg%YuYH)UjH_}EdFAm2Lspe28UXYVB z&%uIg#=*dl+ooE6CFk{ylJwop^!Gib{6YzaX1vKMks&5j64fFOTZfvE#!3Nx9kmlj zwe~PUYs?d87l5Fl&cT5qm`9fuF9%+!gklW$QaoKA|W zt;5$-1wvX6ZoawX1Fnoed6@B27lmSOY7Ebi0_E(gFBp_qNbs&a96n8{Jio(Uxn9fW z*0c<-1f1KVTooFh;Ess+OL-qD?D zy0l<^;z)P;=8YghB{NVg`c)XAX(1IXWIZG#ut>&mgr=;l7qC|G9WPOx>-kfWcvWaxoLTX+&&BD_f8J(K6@lTnH zkMC`xUzV$6Wu7k$Et^bKiV~`a2f{=1BKFEUs*epJr<}I-n^+G$C2D)Lk5THX5e*IQ z7busXkFga35s+;M>L~^>ppM5LPznT#nQ)A7hODycO4D#KyR}jve8bBUB)|mCJqy_X zc_krFnAL)VjB+BXk%5$4C1&vvhBYaFJMJx8Ej+3=72_O|9ZL&D_ovZs_(2?Kj;_z; z0xJ6`;kHh>%FJ2~Vc=J*neW)ejcq+>5U!&U(eQ3WJFftU(dpG+ISQ%%;Oq*N35mdqzneo> z;0A-o{*R@<51W(v+W_jW^Y ze`eNt*eT-@UK7(8zs-pN*^D-pXhzl`Rd2}~ZT$CwweeU8AB)*{ANX5TBsycS`Uu@;q?)%y_!RiVM+ znK(}U&*{Tq_Qv5thsWzO&LeoV|F0Z`_Pgq1ETauyMKbP$An)iFBD~jhSvGwq4B2$H zr}_4_Z*yOD-Xj_78zH7OSo}s8&{iOXg?6Wc9Q>UC$c8isRal4vP=-S7M)y#kqhE#C z2Zdx~Jx>A_KU%u5#~`hZqL^kWy%(*k z=vV1xYHMG$&AqoOoMtudkE*fXY7bb|iAjPCgD_C|z3+1Pp1NIQ%7qTviqROfxomLS zZFByHi+51EbO(7u90zaVbrX(0)1} z7mhvl%ae0^!;hjFk)+$_)-OnKugR2BS}sW9Cr$s;^&VqDccHAjq&wpy!p+VT!i?5ti#Hh z$w*SY4%y-AK|x?_K(pqd&qT9o(Y~Tuwy>!jIbHTwQz!ENqDi_^$fn1eSkBGEyhuW? z_q>}Z8Zw%OR{Ome`@#W;tCNK)KYsjZBX?Qw12FGY+;y`$=HXqFhF&%7_?(=mb-X`% zL+?sPP@U+(;5`J06Uwl%;o_3l^yGK$9VDMWD304gQ{OT0oNFLnde(1MNDZ%DHa&wbH#heNAXA+J+!>?mqeahl0BN!Q{B-=x z3?wAr=FyzCKvoI4>}ztpsSo-g@cprsH$&jy;Y=2Ysa5dBYxZM3d9Ug<{JFYWz8r%z zvd%!6A;m=!44oCX`N2=R%f5FG%+yI7*aj#~7~zm=E>>TZUO6i6*GapyBKKrXC~G;F zb7}V$5~H=%Fwa$$R&K;)`Gj!P&7VfY{QZ6Y9PEm%Jq(DdA1O(b0-$DPUj2|Qas z9X9$uoxyh<4NejOCGQ2me**9*0-g?m%&GgOtrT2KuJ6Pnsv@X_^Dy^~XOVFB7PVW0 z-hHo?q{`mO@vSbow7oGAoCG8O+NCTg`X2cbK07)kWsinI*4XF5*;I4NNL(`X;u}V> zMut>pXC}4*q_?#`+VsD}seHH7)YR0rw&u&%pIaN559a^^hJa9TggkIjfDap(;!afy zjV_15{eEqU53_$1-RfKIZ9h!^r@MUett{!1-Pvmq!_@TjNPx(;l5Eq`4uJ4neXAlL zfqZo$nPr|mKu+&RXiohUL0JN`vB}b+BOTgAaLCzG5j`RBxg>Jn-y2PQEJ%9Rm^-ic z=!=K}JTJ)$Y&wBSKLSW2VY>|EWIC|rbUD&-b$9j@csy#m0HJ9lKaml$et`k~9OsLQ zi_FUt|FvT_HwPH8e^EHf0D_b^>_SphIJOXB;GGLurJnxKozU|w7P6wMdE{M5E^+^NqHWxs*bnrE1rJrW+I8KcHE+@f~4V0(U0j z*L`&8Mtln!3#m$QsOR+iErFjrigX9eO|g&P|Ku}4c6Yu4oI2a{A+Acoy5{C)zsm1R zLkN%|k0-})JpgDFsP*_Ls>O1wtIs2-9QpO^tC!<0=T%-B>nC^s%ccu&)iU}k2< z78hz4{0WE-)`O)>fo*kI)HEDXJJQ7PA%m)pujssA z25QDOVYe~rJ=ox$wn6%2+@)}{b&hp1KW7y)@2Fay)r1cBwh7xO5BrFsd$!UN1H_QS zMcKdhW{mi(s9Rz!<_(lIBVZ#+MKy^0bhhq&38ANeQ@R_J5UNao>gErGXEn^GLMXk7 zA_eaXhTZdYgxlug%z94BQ8ZkHgFPa*jXvjaK@X*RCNtl7LI!#kfqR1$q5DK@Uo7RJ zXnX^Y3PviQHOArdlc%UXJlkXGr{b<kDvIuYha;?2L_?XrxK?_(5sMz(oyV1DgDD`vcF zBXQEUz@7c*dD-|#R$b(jiL~x3Nr@L%$mH)NA_7-Z??`6d@JQi#_cVWVaF1_mXV-PE zAntG{1B$ZF4VOIya9PP-lftkt2!=YUDJeB1d& ztQ9|r^eq{i2N1`S>#2oF`q1Axl-Sd?WErK>v6WyRwy5X?|AEq!fXLRTpnK2FSut*h z55%!~gKd?iO>hcK$DHf58m+Y(rs!n+c>B{OUs|66PWI+0vlLptnPp-vd69qg(F+c~ zbC*qGd9BcTLUV9%5aq}Z6GqvHMH|s&y!R{V=6ZsuV}3r71LR+w+=!&-WsP%o?r?gR ze7v4lpLqIsj|89j=MV0*z5s^)V;%Tj^Ku@`*4}=LFyyyjx$9vn77p=Z{v!UDvMVQ- z&qmU3NJx^4$>!E}w|q5?J8pICWPgNoJr52RwYAYvk_l@1vcSPcC20GtdOdK977KO} zl|i@fw)6mQ-GW)!`6-&l0Cy|=$x$zDM(Hx}AN&+u9lPtt=WZ)BN(1xy`o{G% zBtpaO8lRKZMjNU(xyYvym}}iUY(!P+dX=w6KDoaV3%YeZntHCt(R01SIgpZ+Fcss; z3n%ZTYPBDRMYM9$ByPEPB5%2Kq;I*|@wvH5!{ja*t!>g$- z%Ho0uf>fzeMHHk-k&YBmKuTzW^b%TxP^C$YfPkQY^xiR2LN7t2BOo2&GX_HM5FqqU zGS8V=vu3`vCVxQQ&C0#+y|ec|zvHtshm-MUDNFV^9VeQyFf&EVGOeFvps%IM+k-L*tB8=XAC=P#1M z-f=G|<<$Sk(d7!F;Jb|uoBJcJ9xdl3K>kwgq;IFFR3?{Y&X$z0xI|c}qnYj8IhBaJ zk^4sOPLg`hVn=k{?vXrpE!}Q3n(A zK5^HEWA>r?YCOd6USZ&g9q3>d!))QbRwZ;syO8kft;YgLCV9GL^kj>krZ!3{qq6iz zr2qT@&e(SHPhK_Za%gaP&Eoea>*2;qn3DANGb6|Jm0 z9&K2kK+?oL;Joh2NX`1>X6uDS-y>aJt-fD8>SvS8Oq`Knm&6w0D~Z$>MZ&VqUYDny z42(?NLPC^9eQ}WnrE{(^KcZO?5ntFfo; z{%H#~2EMm0bsli3SIvu}UUPjX_)7Ue!Ju0@w;Y5KG&`6GJ9726x|tN)XI66D)WsQY znEILwNI|~D96K9)VfPO@x@l*#R3^!I(7LQ;+67_W8a9kB zHl6s`4dKEwLv(nT;@eO1o68bm?MKSuT|9-JLU>U}9nM)|=oAJui$obH>%1vgVCMRb z-O85qkHTsGTwU@he31s!y56P@#uJ$~!KY(jcG(~j&u5FLr?Vp*@(b(pAN1|i7Pcnq zeh&Bdw45Dyh@ag*-k#}ucYL`2qfr<`%RxVEgWt)*&F}1txQ-i)d+aZ3Ob20y$6iaE zf+uNM#B50nsRh`lVYEk7M@<{6S#nzM}E(KXI~<*0N5&#i|_Aocy?oe-dHNJUdG5s}xLd$Wp+%hnR)yFYzuOG3Rz|rYKoU z=xhspyy>`K<)`&dGUp0TU6#1Do%pF_7%x{tdMOpeIhzHKuh zOj}7?!vewU)dd#u^PtapkKL)e&ucT%xJ5-j6J1>)=j=wNWgk9z)SEE@5q0gAa7#-6 z{L}R&s@%B34cTsLJk&L+boJ?%o!aAKtTUEn_fY(c=p4s*gUa;!+|Rot&7-i6Kx>DseQ?Fnk?xYPY8qn3DW)}584$ePD2 z871U}#R~@9WYs51)^RoTyvT&~S9O(T2>U3h%>zu{ADhz>PZ#p0H57ET;iPBTDXq@w zs;VOD^vOw5{PZx4$MtHPrp^8!2#r5>V!ZfdlG!2`yi@gc`7zT@Lly|c$!ObD_Fz?^Lw^?3KsO2PMsNNYscD=`} z(=&W&Zl+jHR`tL-@6r-`89Cfg=`NajJy5@9QC{gJR>#c9lwW-}{^W;IS=hZ3=H!?vZD(iEA_#?wxA9F5(n{0U#9M({Yapiyeb8{{g(k}D``A3GAv5? zQyu{Uf!%Q}kp-alySppm*4~WYT*EDB*}-OcmG}@qULck~t;g~=eV=EV97E|aX@4>YQq?^Fop1|PJe=6588Hu{<-10oP2N`3j+f&MSm(xH{IQ0`!mhB^`NemE2`6r$ zk_sIff`s*=_+4OciEER$Z9Ab1qYCr%gZ6=+8-4+32dXYELIFpcH^!T9AVUAE6yEA^ zc+JhC39!@Bw5OKY)pqY!WM;SjtU>UYq%ow~oK()Fdu~uRHGi{RI*)xQA_6fkd7SC)q3C8LH2QfGRLV}t(`?|grgWhB{W3XN)V|mIzb~Ru2F-Z7-djgY|Zfv z45C46iL@qrnJ1B28XEo(<$UZ&sTR*UJVoD`BC%-s-531xGq-Sc+x~!HLVEho+16zG ztd+`Tmd7^d950`Komr!a+)AOcBIK0ZF^-9MzWQE(N^oe79`luGGq zW>j5O29+O;-|<~tx!m`xK2+@=qbv#5qA{IZhBK$d=nKg(?cKnC_K)+Xz?L+KdqO3Cmf zN1Z8OH8oP4xzgEk62c!zhAppbj=><2TotXU?;2XFQIMlwADK*5w_LX8w+iL=ZgG&8 zmsWzu##7mx#!LB2(?d4A+gu=5zZB(MSMKn5SG;RbtaBsv`MZR~|6T&G9sq~#1}0uU zb{y&eU}|)%tOIb5usxs!B0dn;%Dim62js%`2^vXwKsGS90XmJY)&SAty6%%b5>%D- zs98W@H7e{>WjND{cD-{3>26(MTC`?Ufzy0YO*lWOVqup~=NMj$|6%u|gziI3+&EIu z6bqCJ0wcLN1RofrW7AHk&F+pjIDD-2h#0j$T4_=3)(V*kNH#M;&erkX1TwO0fX;w! z4fMN(NJ>f)3wm28EvnE0Ovt9Ar5$^O0N9EHxpT`aAov@(#20WsaU}GlCc@EGu^KKb zBJ|LnOf{h37A1xK%eT6Ku92Hdlu+26(>>ttIMhBJrYVq#*`y3e1J{GC7tWJ_~z z)cYSe&T{?f%4``k&+BXh5J8_)Q+ZoDSFClI{uBXHGRVoV^Sp@A8l9IG+|vhti2ivM zuNaH1%#Bo#M|$)k90aBEi<@YH@-ELVLA~#=2PhMV*Wmnb(BpnqJ)glv)%$4nAtyJly0r z#xzS$Dl+-AIdc)uY0rVfhSQorI?g-c-*e8g9GfU%x%W`(zWAy<3mqnPc@X`rad=?Og!-Y?-b9gH1*5?jC zFjC6Opbkulk#vfAfFCL(+dW-OQ)aDrRi~pR8;hxIry%E7WoHN*;WfA&iwiLwycnwh zw}uSkOx_+?j3hG3-=!XG!!wkJT!exZz${L`;}=)u@jZ4$$yr~%^rZXE2y-}6^9awO zSL~f1!Kvz8_~h&ccf0U^m(}#IWnIlJiuv19TI3i7G$vc+NX!evs=CKVQ|mMWI*ne6 zVmJM>S?p|zr6IV&tM#Y9+VY32Zn-sc$Twcb2I88i_aGFWuL(O+)$sdV$yYJ#!vmTh zJr5e{WP*8D5<@OK%_Ke1$QsYac>@N9*al)3PFl#sTv7O;p8JO z)~Ll-9T1i7KtT$(=$H~c%LoAC3NQ1x75b2#FkWW~sSjA_lZyD;Z^lAAb$pq!I{PoO z#69(Gsl@YuQnI=z7bZq zMTZqIq4Ub{}?Dg*Ya;C-}zXN{ZSO9JUf$I z@o{K}(1w#b+>pL@3rCP1O=HjKKFm1%G;P2xmm7_7JPt2UYE=ZOT5x6Ll)uqfsh3t*S_&j`yuX$}d9()k`(j)nqN2PvgS%qeVVDMeO{nT* z^l zN*EhQ_(9J^;ynO{RQ5&7qO;4B&(XcS=9C}E-^WM!M-qzB16k6Q-8(tvCgyn|lPT+L z^3~>F5&tqt9dc{8UKRK(*b<{GY0b)c;nYx8XE#%(GD)LdAIVv%9I$~zE9wU0q=$hV zPltjD5Cwp%x6yIPWyH@!f7|u;Q`0UHxorI|trs5TH-4qC!f3Q;s79nYP$ufAd@b@g zRb1D8H(s8#zOxvPC`3qHPcu3bm_5+Dk1l z0sl?e&<&J#n*D8&3jXE1Oww4ev|DIJim#?jxy8c5dbd)8YR42Y-}fSO&C!aE{-N9V zA<12{Rzxs&X_C{X+Fnzx|1n?V+ z_B0Un357;OqnA+w&gcIWwv?s9UKy6^n-xUHM_ngw$<9=o9l3ev8<`{O$>#5)fd%CL z%xt-`2mk9T1~i`%u2~tr3SWF986)VdZ=nBiJjN{9=yyR&8MO|zrc7C~haP`^tvenq z$f(E9s3)*(Q&k^s0KOHG!vWZB)pd11q(m4;NbH}9R9bWYq;oOh4;2SZCW<6&jZ z)SDaoa2^SSLD}@bI=xeycxMJu0IbQ93Mjq=a4cVzKW#9(E)5B6lE-v+8{N<~W^7qs zTD|7(r%trWX`*f*!fzvNWU&1Q@Qt-YR>#abrq~_(fAqf4t0?)EUlw`IG+v;ARFYEr zTtXQ{>rf&kZXro&asK-0^bZ?eUH)0v5W)klRj$oI9;`niDfiJ=U!%GG& zIA2m5UKA4|Fu#=#c$dT@ahgqX0^ex#hjfWap!$?a9g965sj$lRQcmQTN(4K%-ztm6 R)YpKIhN{l Date: Wed, 31 Jul 2024 18:06:00 +0200 Subject: [PATCH 149/203] fix links for pbm commands --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 291dc2916..743de9549 100644 --- a/README.md +++ b/README.md @@ -61,8 +61,8 @@ As a general rule of thumb, please try to create bug reports that are: When submitting a bug report or a feature, please attach the following information: -- The output of the [`pbm status`](https://docs.percona.com/percona-backup-mongodb/status.htm) command -- The output of the [`pbm logs`](https://docs.percona.com/percona-backup-mongodb/running.html#pbm-logs) command. Use the following filters: +- The output of the [`pbm status`](https://docs.percona.com/percona-backup-mongodb/reference/pbm-commands.html#pbm-status) command +- The output of the [`pbm logs`](https://docs.percona.com/percona-backup-mongodb/reference/pbm-commands.html#pbm-logs) command. Use the following filters: ```sh $ pbm logs -x -s D -t 0 From d4b1835eaa9486907444249b25ce7fe3a69418a3 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 31 Jul 2024 18:12:49 +0200 Subject: [PATCH 150/203] fix links in CONTRIBUTING.md --- CONTRIBUTING.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index befa2ab9a..b982eff5b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -12,8 +12,8 @@ Percona Backup for MongoDB (PBM) is a distributed, low-impact solution for achie You can contribute in one of the following ways: -1. Reach us on our [Forums](https://forums.percona.com) and [Discord]([https://discord.gg/mQEyGPkNbR](https://discord.gg/mQEyGPkNbR)). -2. [Submit a bug report or a feature request](https://github.com/percona/percona-backup-mongodb/blob/main/README.md) +1. Reach us on our [Forums](https://forums.percona.com/c/mongodb/percona-backup-for-mongodb). +2. [Submit a bug report or a feature request](https://jira.percona.com/projects/PBM) 3. Submit a pull request (PR) with the code patch 4. Contribute to documentation @@ -23,7 +23,7 @@ Before submitting code contributions, we ask you to complete the following prere ### 1. Sign the CLA -Before you can contribute, we kindly ask you to sign our [Contributor License Agreement](https://cla-assistant.percona.com/<linktoCLA>) (CLA). You can do this in on click using your GitHub account. +Before you can contribute, we kindly ask you to sign our [Contributor License Agreement](https://cla-assistant.percona.com/percona/percona-backup-mongodb) (CLA). You can do this in on click using your GitHub account. **Note**: You can sign it later, when submitting your first pull request. The CLA assistant validates the PR and asks you to sign the CLA to proceed. @@ -53,7 +53,7 @@ Otherwise, we will contact you for additional information or with the request to To build Percona Backup for MongoDB from source code, you require the following: -* Go 1.11 or above. See [Installing and setting up Go tools]( +* Go 1.22 or above. See [Installing and setting up Go tools]( https://golang.org/doc/install) for more information * make * ``krb5-devel`` for Red Hat Enterprise Linux / CentOS or ``libkrb5-dev`` for Debian / Ubuntu. This package is required for Kerberos authentication in Percona Server for MongoDB. @@ -116,11 +116,11 @@ You can run tests on your local machine with whatever operating system you have. ## Contributing to documentation -We welcome contributions to our [documentation](https://docs.percona.com/percona-backup-mongodb/index.html). +We welcome contributions to our [documentation](https://docs.percona.com/percona-backup-mongodb). Documentation source files are in the [dedicated docs repository](https://github.com/percona/pbm-docs). The contents of the `doc` folder is outdated and will be removed. -Please follow the [Docs contributing guidelines](https://github.com/percona/pbm-docs/CONTRBUTING.md) for how to contribute to documentation. +Please follow the [Docs contributing guidelines](https://github.com/percona/pbm-docs/blob/main/CONTRIBUTING.md) for how to contribute to documentation. ## After your pull request is merged From 9904ff7d5eb6359f7ee8a19d180040f1b4ce7cf5 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 31 Jul 2024 16:22:53 +0200 Subject: [PATCH 151/203] [PBM-1361] 'add profile' command returns OK despite profile wasn't added --- cmd/pbm-agent/profile.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/pbm-agent/profile.go b/cmd/pbm-agent/profile.go index 787a67592..0f9e88d8f 100644 --- a/cmd/pbm-agent/profile.go +++ b/cmd/pbm-agent/profile.go @@ -69,7 +69,7 @@ func (a *Agent) handleAddConfigProfile( return } if !got { - err = errors.Wrap(err, "lock not acquired") + err = errors.New("lock not acquired") return } defer func() { From 7750b345e50f6982e1d001f5dc8362b503e3c49a Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 31 Jul 2024 17:20:07 +0200 Subject: [PATCH 152/203] use go1.22.5 for build --- e2e-tests/docker/pbm.dockerfile | 2 +- e2e-tests/docker/tests.dockerfile | 2 +- packaging/scripts/mongodb-backup_builder.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/e2e-tests/docker/pbm.dockerfile b/e2e-tests/docker/pbm.dockerfile index 4c733609a..503bd1e1f 100644 --- a/e2e-tests/docker/pbm.dockerfile +++ b/e2e-tests/docker/pbm.dockerfile @@ -12,7 +12,7 @@ COPY --from=mongo_image /bin/mongod /bin/ RUN dnf install epel-release && dnf update && dnf install make gcc krb5-devel iproute-tc libfaketime RUN arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) && \ -curl -sL -o /tmp/golang.tar.gz https://go.dev/dl/go1.22.4.linux-${arch}.tar.gz && \ +curl -sL -o /tmp/golang.tar.gz https://go.dev/dl/go1.22.5.linux-${arch}.tar.gz && \ rm -rf /usr/local/go && tar -C /usr/local -xzf /tmp/golang.tar.gz && rm /tmp/golang.tar.gz ENV PATH=$PATH:/usr/local/go/bin diff --git a/e2e-tests/docker/tests.dockerfile b/e2e-tests/docker/tests.dockerfile index 1b16ca2be..cb8d43417 100644 --- a/e2e-tests/docker/tests.dockerfile +++ b/e2e-tests/docker/tests.dockerfile @@ -3,7 +3,7 @@ WORKDIR /build RUN dnf update && dnf install make gcc krb5-devel RUN arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) && \ -curl -sL -o /tmp/golang.tar.gz https://go.dev/dl/go1.22.4.linux-${arch}.tar.gz && \ +curl -sL -o /tmp/golang.tar.gz https://go.dev/dl/go1.22.5.linux-${arch}.tar.gz && \ rm -rf /usr/local/go && tar -C /usr/local -xzf /tmp/golang.tar.gz && rm /tmp/golang.tar.gz ENV PATH=$PATH:/usr/local/go/bin diff --git a/packaging/scripts/mongodb-backup_builder.sh b/packaging/scripts/mongodb-backup_builder.sh index cbf8e72d6..a7a20a93a 100644 --- a/packaging/scripts/mongodb-backup_builder.sh +++ b/packaging/scripts/mongodb-backup_builder.sh @@ -141,7 +141,7 @@ install_golang() { elif [ x"$ARCH" = "xaarch64" ]; then GO_ARCH="arm64" fi - wget https://go.dev/dl/go1.22.4.linux-${GO_ARCH}.tar.gz -O /tmp/go1.22.tar.gz + wget https://go.dev/dl/go1.22.5.linux-${GO_ARCH}.tar.gz -O /tmp/go1.22.tar.gz tar --transform=s,go,go1.22, -zxf /tmp/go1.22.tar.gz rm -rf /usr/local/go* mv go1.22 /usr/local/ From 5eca738cb8ada98f86ce1cd18ed5d0577ec47830 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 31 Jul 2024 17:09:46 +0200 Subject: [PATCH 153/203] drop support MongoDB v4.4 --- CONTRIBUTING.md | 6 +- Jenkinsfile | 63 ------------------- Makefile | 2 +- README.md | 2 +- e2e-tests/README.md | 8 +-- .../docker/docker-compose-remapping.yaml | 8 +-- e2e-tests/docker/docker-compose-rs.yaml | 16 ++--- e2e-tests/docker/docker-compose-single.yaml | 6 +- e2e-tests/docker/docker-compose.yaml | 42 ++++++------- e2e-tests/docker/mongodb.dockerfile | 2 +- e2e-tests/docker/pbm.dockerfile | 2 +- e2e-tests/functions | 4 +- e2e-tests/run-new-cluster | 2 +- e2e-tests/run-remapping | 2 +- e2e-tests/run-rs | 2 +- e2e-tests/run-sharded | 2 +- e2e-tests/run-single | 2 +- e2e-tests/start-cluster | 2 +- e2e-tests/start-replset | 2 +- pbm/version/version.go | 6 +- 20 files changed, 57 insertions(+), 124 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b982eff5b..c08b8956d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -101,15 +101,15 @@ You can find the tests in the ``e2e-tests`` directory. To save time on tests execution during development, we recommend running general and consistency tests for a sharded cluster: ```sh -$ MONGODB_VERSION=4.4 ./run-sharded +$ MONGODB_VERSION=5.0 ./run-sharded ``` -``$ MONGODB_VERSION`` stands for the Percona Server for MongoDB version Percona Backup for MongoDB is running with. Default is 4.4. +``$ MONGODB_VERSION`` stands for the Percona Server for MongoDB version Percona Backup for MongoDB is running with. Default is 5.0. After the development is complete and you are ready to submit a pull request, run all tests using the following command: ```sh -$ MONGODB_VERSION=4.4 ./run-all +$ MONGODB_VERSION=5.0 ./run-all ``` You can run tests on your local machine with whatever operating system you have. After you submit the pull request, we will check your patch on multiple operating systems. diff --git a/Jenkinsfile b/Jenkinsfile index 143d5bf83..2b3e92ccd 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -83,15 +83,6 @@ pipeline { } } parallel { - stage('New cluster 4.4 logical') { - agent { - label 'docker' - } - steps { - prepareCluster('sharded', '44-newc-logic', '4.4') - runTest('New cluster', 'run-new-cluster', '4.4', 'logical') - } - } stage('New cluster 5.0 logical') { agent { label 'docker' @@ -120,15 +111,6 @@ pipeline { } } - stage('Sharded 4.4 logical') { - agent { - label 'docker-32gb' - } - steps { - prepareCluster('sharded', '44-shrd-logic', '4.4') - runTest('Sharded', 'run-sharded', '4.4', 'logical') - } - } stage('Sharded 5.0 logical') { agent { label 'docker-32gb' @@ -157,15 +139,6 @@ pipeline { } } - stage('Non-sharded 4.4 logical') { - agent { - label 'docker' - } - steps { - prepareCluster('rs', '44-rs-logic', '4.4') - runTest('Non-sharded', 'run-rs', '4.4', 'logical') - } - } stage('Non-sharded 5.0 logical') { agent { label 'docker' @@ -194,15 +167,6 @@ pipeline { } } - stage('Single-node 4.4 logical') { - agent { - label 'docker' - } - steps { - prepareCluster('single', '44-single-logic', '4.4') - runTest('Single-node', 'run-single', '4.4', 'logical') - } - } stage('Single-node 5.0 logical') { agent { label 'docker' @@ -231,15 +195,6 @@ pipeline { } } - stage('Sharded 4.4 physical') { - agent { - label 'docker-32gb' - } - steps { - prepareCluster('sharded', '44-shrd-phys', '4.4') - runTest('Sharded', 'run-sharded', '4.4', 'physical') - } - } stage('Sharded 5.0 physical') { agent { label 'docker-32gb' @@ -268,15 +223,6 @@ pipeline { } } - stage('Non-sharded 4.4 physical') { - agent { - label 'docker' - } - steps { - prepareCluster('rs', '44-rs-phys', '4.4') - runTest('Non-sharded', 'run-rs', '4.4', 'physical') - } - } stage('Non-sharded 5.0 physical') { agent { label 'docker' @@ -305,15 +251,6 @@ pipeline { } } - stage('Single-node 4.4 physical') { - agent { - label 'docker' - } - steps { - prepareCluster('single', '44-single-phys', '4.4') - runTest('Single-node', 'run-single', '4.4', 'physical') - } - } stage('Single-node 5.0 physical') { agent { label 'docker' diff --git a/Makefile b/Makefile index 9d9478b13..d88df31ca 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ CGO_ENABLED?=0 GITCOMMIT?=$(shell git rev-parse HEAD 2>/dev/null) GITBRANCH?=$(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) BUILDTIME?=$(shell TZ=UTC date "+%Y-%m-%d_%H:%M_UTC") -MONGO_TEST_VERSION?=4.4 +MONGO_TEST_VERSION?=5.0 define ENVS GO111MODULE=$(GOMOD) \ diff --git a/README.md b/README.md index 743de9549..d40e9fadb 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ ![PBM logo](backup-mongo.jpeg) Percona Backup for MongoDB (PBM) is a distributed, low-impact solution for achieving -consistent backups of MongoDB sharded clusters and replica sets. Percona Backup for MongoDB supports Percona Server for MongoDB and MongoDB Community Edition v4.4 and higher. +consistent backups of MongoDB sharded clusters and replica sets. Percona Backup for MongoDB supports Percona Server for MongoDB and MongoDB Community Edition v5.0 and higher. For more information about PBM components and how to use it, see [Percona Backup for MongoDB documentation](https://docs.percona.com/percona-backup-mongodb/) diff --git a/e2e-tests/README.md b/e2e-tests/README.md index 47f5b781a..62de9f9e5 100644 --- a/e2e-tests/README.md +++ b/e2e-tests/README.md @@ -3,9 +3,9 @@ ## Run tests Run all tests ``` -$ MONGODB_VERSION=4.4 ./run-all +$ MONGODB_VERSION=5.0 ./run-all ``` -`MONGODB_VERSION` is a PSMDB version (e.g. 4.4/5.0/6.0/7.0). Default is `4.4` +`MONGODB_VERSION` is a PSMDB version (e.g. 5.0/6.0/7.0). Default is `5.0` `./run-all` would run all tests both on a sharded cluster and a non-sharded replica set. @@ -20,9 +20,9 @@ $ MONGODB_VERSION=4.4 ./run-all ## Start test cluster To start tests with a running pbm-agent and minio storage: ``` -$ MONGODB_VERSION=4.4 ./start-cluster +$ MONGODB_VERSION=5.0 ./start-cluster ``` -`MONGODB_VERSION` is a PSMDB version (e.g. 4.4/5.0/6.0/7.0). Default is `4.4` +`MONGODB_VERSION` is a PSMDB version (e.g. 5.0/6.0/7.0). Default is `5.0` `./start-replset` - to start a non-sharded replica set. diff --git a/e2e-tests/docker/docker-compose-remapping.yaml b/e2e-tests/docker/docker-compose-remapping.yaml index 77ebd36a8..886458e2d 100644 --- a/e2e-tests/docker/docker-compose-remapping.yaml +++ b/e2e-tests/docker/docker-compose-remapping.yaml @@ -24,7 +24,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} hostname: rs101 labels: @@ -54,7 +54,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} volumes: - ./conf:/etc/pbm @@ -69,7 +69,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} hostname: rs201 labels: @@ -99,7 +99,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} volumes: - ./conf:/etc/pbm diff --git a/e2e-tests/docker/docker-compose-rs.yaml b/e2e-tests/docker/docker-compose-rs.yaml index cc880f71d..2701aee29 100644 --- a/e2e-tests/docker/docker-compose-rs.yaml +++ b/e2e-tests/docker/docker-compose-rs.yaml @@ -28,7 +28,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} volumes: - ./conf:/etc/pbm @@ -41,7 +41,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} hostname: rs101 labels: @@ -51,7 +51,7 @@ services: - MONGO_USER=dba - BACKUP_USER=bcp - MONGO_PASS=test1234 - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} command: mongod --replSet rs1 --directoryperdb --port 27017 --dbpath=/data/db/ --storageEngine wiredTiger --keyFile /opt/keyFile --wiredTigerCacheSizeGB 1 volumes: - data-rs101:/data/db @@ -61,7 +61,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} hostname: rs102 labels: @@ -74,7 +74,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} hostname: rs103 labels: @@ -98,7 +98,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} volumes: - ./conf:/etc/pbm @@ -123,7 +123,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} command: pbm-agent cap_add: @@ -147,7 +147,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} command: pbm-agent cap_add: diff --git a/e2e-tests/docker/docker-compose-single.yaml b/e2e-tests/docker/docker-compose-single.yaml index fbf414b25..62f25d5c2 100644 --- a/e2e-tests/docker/docker-compose-single.yaml +++ b/e2e-tests/docker/docker-compose-single.yaml @@ -24,7 +24,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} hostname: rs101 labels: @@ -35,7 +35,7 @@ services: - BACKUP_USER=bcp - MONGO_PASS=test1234 - SINGLE_NODE=true - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} command: mongod --replSet rs1 --port 27017 --storageEngine wiredTiger --keyFile /opt/keyFile --wiredTigerCacheSizeGB 1 volumes: - data-rs101:/data/db @@ -55,7 +55,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} volumes: - ./conf:/etc/pbm diff --git a/e2e-tests/docker/docker-compose.yaml b/e2e-tests/docker/docker-compose.yaml index f304e69c9..90a728455 100644 --- a/e2e-tests/docker/docker-compose.yaml +++ b/e2e-tests/docker/docker-compose.yaml @@ -30,7 +30,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} volumes: - ./conf:/etc/pbm @@ -43,7 +43,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} hostname: cfg01 labels: @@ -54,7 +54,7 @@ services: - MONGO_USER=dba - BACKUP_USER=bcp - MONGO_PASS=test1234 - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} command: mongod --configsvr --dbpath /data/db --replSet cfg --bind_ip_all --port 27017 --keyFile /opt/keyFile --storageEngine wiredTiger --wiredTigerCacheSizeGB 1 volumes: - ./scripts/start.sh:/opt/start.sh @@ -64,7 +64,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} hostname: cfg02 labels: @@ -77,7 +77,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} hostname: cfg03 labels: @@ -99,7 +99,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} command: pbm-agent cap_add: @@ -121,7 +121,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} command: pbm-agent cap_add: @@ -143,7 +143,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} command: pbm-agent cap_add: @@ -157,7 +157,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} hostname: rs101 labels: @@ -177,7 +177,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} hostname: rs102 labels: @@ -190,7 +190,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} hostname: rs103 labels: @@ -212,7 +212,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} volumes: - ./conf:/etc/pbm @@ -235,7 +235,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} command: pbm-agent cap_add: @@ -257,7 +257,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} command: pbm-agent cap_add: @@ -271,7 +271,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} hostname: rs201 labels: @@ -291,7 +291,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} hostname: rs202 labels: @@ -304,7 +304,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} hostname: rs203 labels: @@ -326,7 +326,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} command: pbm-agent cap_add: @@ -348,7 +348,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} command: pbm-agent cap_add: @@ -370,7 +370,7 @@ services: dockerfile: ./e2e-tests/docker/pbm.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} command: pbm-agent cap_add: @@ -384,7 +384,7 @@ services: dockerfile: ./e2e-tests/docker/mongodb.dockerfile context: ../.. args: - - MONGODB_VERSION=${MONGODB_VERSION:-4.4} + - MONGODB_VERSION=${MONGODB_VERSION:-5.0} - MONGODB_IMAGE=${MONGODB_IMAGE:-percona/percona-server-mongodb} hostname: mongos labels: diff --git a/e2e-tests/docker/mongodb.dockerfile b/e2e-tests/docker/mongodb.dockerfile index d68fe861c..417c1027c 100644 --- a/e2e-tests/docker/mongodb.dockerfile +++ b/e2e-tests/docker/mongodb.dockerfile @@ -1,4 +1,4 @@ -ARG MONGODB_VERSION=4.4 +ARG MONGODB_VERSION=5.0 ARG MONGODB_IMAGE=percona/percona-server-mongodb FROM ${MONGODB_IMAGE}:${MONGODB_VERSION}-multi USER root diff --git a/e2e-tests/docker/pbm.dockerfile b/e2e-tests/docker/pbm.dockerfile index 503bd1e1f..4583659ee 100644 --- a/e2e-tests/docker/pbm.dockerfile +++ b/e2e-tests/docker/pbm.dockerfile @@ -1,4 +1,4 @@ -ARG MONGODB_VERSION=4.4 +ARG MONGODB_VERSION=5.0 ARG MONGODB_IMAGE=percona/percona-server-mongodb FROM ${MONGODB_IMAGE}:${MONGODB_VERSION}-multi as mongo_image diff --git a/e2e-tests/functions b/e2e-tests/functions index 9dc66dcf1..3d4fa189f 100644 --- a/e2e-tests/functions +++ b/e2e-tests/functions @@ -169,7 +169,7 @@ start_cluster() { mkdir "${test_dir}/docker/backups" chmod -R 777 "${test_dir}/docker/backups" fi - export MONGODB_VERSION=${mongo_version:-"4.4"} + export MONGODB_VERSION=${mongo_version:-"5.0"} export MONGODB_IMAGE=${MONGODB_IMAGE:-"percona/percona-server-mongodb"} docker compose -f $COMPOSE_PATH up --quiet-pull --no-color -d \ cfg01 cfg02 cfg03 rs101 rs102 rs103 rs201 rs202 rs203 mongos minio createbucket @@ -210,7 +210,7 @@ start_replset() { chmod -R 777 "${test_dir}/docker/backups" fi - export MONGODB_VERSION=${mongo_version:-"4.4"} + export MONGODB_VERSION=${mongo_version:-"5.0"} export MONGODB_IMAGE=${MONGODB_IMAGE:-"percona/percona-server-mongodb"} docker compose -f $compose up --quiet-pull --no-color -d \ $nodes diff --git a/e2e-tests/run-new-cluster b/e2e-tests/run-new-cluster index e5eada9d7..88b09bbef 100755 --- a/e2e-tests/run-new-cluster +++ b/e2e-tests/run-new-cluster @@ -6,7 +6,7 @@ set -o xtrace test_dir=$(realpath $(dirname $0)) . ${test_dir}/functions -MONGO_VERSION=${MONGODB_VERSION:-"4.4"} +MONGO_VERSION=${MONGODB_VERSION:-"5.0"} desc 'RUN RESTORE ON NEW CLUSTER TEST' diff --git a/e2e-tests/run-remapping b/e2e-tests/run-remapping index b9480433e..ec0d35a36 100755 --- a/e2e-tests/run-remapping +++ b/e2e-tests/run-remapping @@ -6,7 +6,7 @@ set -o xtrace test_dir=$(realpath $(dirname $0)) . ${test_dir}/functions -MONGO_VERSION=${MONGODB_VERSION:-"4.4"} +MONGO_VERSION=${MONGODB_VERSION:-"5.0"} desc 'RUN REMAPPING TESTS' diff --git a/e2e-tests/run-rs b/e2e-tests/run-rs index ce10fd261..1599af3f6 100755 --- a/e2e-tests/run-rs +++ b/e2e-tests/run-rs @@ -6,7 +6,7 @@ set -o xtrace test_dir=$(realpath $(dirname $0)) . ${test_dir}/functions -MONGO_VERSION=${MONGODB_VERSION:-"4.4"} +MONGO_VERSION=${MONGODB_VERSION:-"5.0"} desc 'RUN REPLICA SET TESTS' diff --git a/e2e-tests/run-sharded b/e2e-tests/run-sharded index 39b9f63d7..8f2f4eecf 100755 --- a/e2e-tests/run-sharded +++ b/e2e-tests/run-sharded @@ -6,7 +6,7 @@ set -o xtrace test_dir=$(realpath $(dirname $0)) . ${test_dir}/functions -MONGO_VERSION=${MONGODB_VERSION:-"4.4"} +MONGO_VERSION=${MONGODB_VERSION:-"5.0"} desc 'RUN SHARDED CLUTER TESTS' diff --git a/e2e-tests/run-single b/e2e-tests/run-single index 1f68921b1..257abe4cb 100755 --- a/e2e-tests/run-single +++ b/e2e-tests/run-single @@ -6,7 +6,7 @@ set -o xtrace test_dir=$(realpath $(dirname $0)) . ${test_dir}/functions -MONGO_VERSION=${MONGODB_VERSION:-"4.4"} +MONGO_VERSION=${MONGODB_VERSION:-"5.0"} desc 'RUN REPLICA SET TESTS' diff --git a/e2e-tests/start-cluster b/e2e-tests/start-cluster index 2a95a12b9..66f6c6bd5 100755 --- a/e2e-tests/start-cluster +++ b/e2e-tests/start-cluster @@ -6,7 +6,7 @@ set -o xtrace test_dir=$(realpath $(dirname $0)) . ${test_dir}/functions -MONGO_VERSION=${MONGODB_VERSION:-"4.4"} +MONGO_VERSION=${MONGODB_VERSION:-"5.0"} desc 'Start cluster' start_cluster $MONGO_VERSION diff --git a/e2e-tests/start-replset b/e2e-tests/start-replset index 3cc7af616..31cc056e6 100755 --- a/e2e-tests/start-replset +++ b/e2e-tests/start-replset @@ -6,7 +6,7 @@ set -o xtrace test_dir=$(realpath $(dirname $0)) . ${test_dir}/functions -MONGO_VERSION=${MONGODB_VERSION:-"4.4"} +MONGO_VERSION=${MONGODB_VERSION:-"5.0"} desc 'Start replicaset' start_replset $MONGO_VERSION "$COMPOSE_RS_PATH" diff --git a/pbm/version/version.go b/pbm/version/version.go index 0ace69d11..2b11cce86 100644 --- a/pbm/version/version.go +++ b/pbm/version/version.go @@ -208,15 +208,11 @@ type FeatureSupport MongoVersion func (f FeatureSupport) PBMSupport() error { v := MongoVersion(f) - if v.Version[0] == 4 && v.Version[1] == 4 { - return nil - } - if (v.Version[0] >= 5 && v.Version[0] <= 7) && v.Version[1] == 0 { return nil } - return errors.New("Unsupported MongoDB version. PBM works with v4.4, v5.0, v6.0, v7.0") + return errors.New("Unsupported MongoDB version. PBM works with v5.0, v6.0, v7.0") } func (f FeatureSupport) FullPhysicalBackup() bool { From c1c12e9ed76d707d0acaede4aba63e4dfaad1cbd Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 31 Jul 2024 17:15:53 +0200 Subject: [PATCH 154/203] remove Jenkinsfile --- Jenkinsfile | 315 ---------------------------------------------------- 1 file changed, 315 deletions(-) delete mode 100644 Jenkinsfile diff --git a/Jenkinsfile b/Jenkinsfile deleted file mode 100644 index 2b3e92ccd..000000000 --- a/Jenkinsfile +++ /dev/null @@ -1,315 +0,0 @@ -def skipBranchBulds = true -if ( env.CHANGE_URL ) { - skipBranchBulds = false -} - -TestsReport = '| Test name | Logical | Physical |\\r\\n| ------------- | ------------- | ------------- |' -testsReportMap = [:] - -void makeReport() { - for ( test in testsReportMap ) { - TestsReport = TestsReport + "\\r\\n| ${test.key} | ${test.value.logical} | ${test.value.physical} |" - } -} - -void runTest(String TEST_NAME, String TEST_SCRIPT, String MONGO_VERSION, String BCP_TYPE) { - def mkey = "$TEST_NAME psmdb $MONGO_VERSION" - - if (!testsReportMap[mkey]) { - testsReportMap[mkey]=[:] - testsReportMap[mkey]['logical'] = '-' - testsReportMap[mkey]['physical'] = '-' - } - testsReportMap[mkey][BCP_TYPE] = 'failed' - - sh """ - chmod 777 -R e2e-tests/docker/backups - export MONGODB_VERSION=${MONGO_VERSION} - export TESTS_BCP_TYPE=${BCP_TYPE} - ./e2e-tests/${TEST_SCRIPT} - """ - - testsReportMap[mkey][BCP_TYPE] = 'passed' -} - -void prepareCluster(String CLUSTER_TYPE, String TEST_TYPE, String MONGO_VERSION) { - def compose = 'docker-compose.yaml' - - switch(CLUSTER_TYPE) { - case 'rs': - compose = 'docker-compose-rs.yaml' - break - case 'single': - compose = 'docker-compose-single.yaml' - break - default: - compose = 'docker-compose.yaml' - break - } - - withCredentials([file(credentialsId: 'PBM-AWS-S3', variable: 'PBM_AWS_S3_YML'), file(credentialsId: 'PBM-GCS-S3', variable: 'PBM_GCS_S3_YML'), file(credentialsId: 'PBM-AZURE', variable: 'PBM_AZURE_YML')]) { - sh """ - sudo curl -L "https://github.com/docker/compose/releases/download/1.25.3/docker-compose-\$(uname -s)-\$(uname -m)" -o /usr/local/bin/docker-compose - sudo chmod +x /usr/local/bin/docker-compose - - cp $PBM_AWS_S3_YML ./e2e-tests/docker/conf/aws.yaml - cp $PBM_GCS_S3_YML ./e2e-tests/docker/conf/gcs.yaml - cp $PBM_AZURE_YML ./e2e-tests/docker/conf/azure.yaml - sed -i s:pbme2etest:pbme2etest-${TEST_TYPE}:g ./e2e-tests/docker/conf/aws.yaml - sed -i s:pbme2etest:pbme2etest-${TEST_TYPE}:g ./e2e-tests/docker/conf/gcs.yaml - sed -i s:pbme2etest:pbme2etest-${TEST_TYPE}:g ./e2e-tests/docker/conf/azure.yaml - - chmod 664 ./e2e-tests/docker/conf/aws.yaml - chmod 664 ./e2e-tests/docker/conf/gcs.yaml - chmod 664 ./e2e-tests/docker/conf/azure.yaml - - openssl rand -base64 756 > ./e2e-tests/docker/keyFile - """ - } -} - -pipeline { - environment { - AUTHOR_NAME = sh(script: "echo ${CHANGE_AUTHOR_EMAIL} | awk -F'@' '{print \$1}'", , returnStdout: true).trim() - } - agent { - label 'micro-amazon' - } - stages { - stage('Run tests for PBM') { - when { - expression { - !skipBranchBulds - } - } - parallel { - stage('New cluster 5.0 logical') { - agent { - label 'docker' - } - steps { - prepareCluster('sharded', '50-newc-logic', '5.0') - runTest('New cluster', 'run-new-cluster', '5.0', 'logical') - } - } - stage('New cluster 6.0 logical') { - agent { - label 'docker' - } - steps { - prepareCluster('sharded', '60-newc-logic', '6.0') - runTest('New cluster', 'run-new-cluster', '6.0', 'logical') - } - } - stage('New cluster 7.0 logical') { - agent { - label 'docker' - } - steps { - prepareCluster('sharded', '70-newc-logic', '7.0') - runTest('New cluster', 'run-new-cluster', '7.0', 'logical') - } - } - - stage('Sharded 5.0 logical') { - agent { - label 'docker-32gb' - } - steps { - prepareCluster('sharded', '50-shrd-logic', '5.0') - runTest('Sharded', 'run-sharded', '5.0', 'logical') - } - } - stage('Sharded 6.0 logical') { - agent { - label 'docker-32gb' - } - steps { - prepareCluster('sharded', '60-shrd-logic', '6.0') - runTest('Sharded', 'run-sharded', '6.0', 'logical') - } - } - stage('Sharded 7.0 logical') { - agent { - label 'docker-32gb' - } - steps { - prepareCluster('sharded', '70-shrd-logic', '7.0') - runTest('Sharded', 'run-sharded', '7.0', 'logical') - } - } - - stage('Non-sharded 5.0 logical') { - agent { - label 'docker' - } - steps { - prepareCluster('rs', '50-rs-logic', '5.0') - runTest('Non-sharded', 'run-rs', '5.0', 'logical') - } - } - stage('Non-sharded 6.0 logical') { - agent { - label 'docker' - } - steps { - prepareCluster('rs', '60-rs-logic', '6.0') - runTest('Non-sharded', 'run-rs', '6.0', 'logical') - } - } - stage('Non-sharded 7.0 logical') { - agent { - label 'docker' - } - steps { - prepareCluster('rs', '70-rs-logic', '7.0') - runTest('Non-sharded', 'run-rs', '7.0', 'logical') - } - } - - stage('Single-node 5.0 logical') { - agent { - label 'docker' - } - steps { - prepareCluster('single', '50-single-logic', '5.0') - runTest('Single-node', 'run-single', '5.0', 'logical') - } - } - stage('Single-node 6.0 logical') { - agent { - label 'docker' - } - steps { - prepareCluster('single', '60-single-logic', '6.0') - runTest('Single-node', 'run-single', '6.0', 'logical') - } - } - stage('Single-node 7.0 logical') { - agent { - label 'docker' - } - steps { - prepareCluster('single', '70-single-logic', '7.0') - runTest('Single-node', 'run-single', '7.0', 'logical') - } - } - - stage('Sharded 5.0 physical') { - agent { - label 'docker-32gb' - } - steps { - prepareCluster('sharded', '50-shrd-phys', '5.0') - runTest('Sharded', 'run-sharded', '5.0', 'physical') - } - } - stage('Sharded 6.0 physical') { - agent { - label 'docker-32gb' - } - steps { - prepareCluster('sharded', '60-shrd-phys', '6.0') - runTest('Sharded', 'run-sharded', '6.0', 'physical') - } - } - stage('Sharded 7.0 physical') { - agent { - label 'docker-32gb' - } - steps { - prepareCluster('sharded', '70-shrd-phys', '7.0') - runTest('Sharded', 'run-sharded', '7.0', 'physical') - } - } - - stage('Non-sharded 5.0 physical') { - agent { - label 'docker' - } - steps { - prepareCluster('rs', '50-rs-phys', '5.0') - runTest('Non-sharded', 'run-rs', '5.0', 'physical') - } - } - stage('Non-sharded 6.0 physical') { - agent { - label 'docker' - } - steps { - prepareCluster('rs', '60-rs-phys', '6.0') - runTest('Non-sharded', 'run-rs', '6.0', 'physical') - } - } - stage('Non-sharded 7.0 physical') { - agent { - label 'docker' - } - steps { - prepareCluster('rs', '70-rs-phys', '7.0') - runTest('Non-sharded', 'run-rs', '7.0', 'physical') - } - } - - stage('Single-node 5.0 physical') { - agent { - label 'docker' - } - steps { - prepareCluster('single', '50-single-phys', '5.0') - runTest('Single-node', 'run-single', '5.0', 'physical') - } - } - stage('Single-node 6.0 physical') { - agent { - label 'docker' - } - steps { - prepareCluster('single', '60-single-phys', '6.0') - runTest('Single-node', 'run-single', '6.0', 'physical') - } - } - stage('Single-node 7.0 physical') { - agent { - label 'docker' - } - steps { - prepareCluster('single', '70-single-phys', '7.0') - runTest('Single-node', 'run-single', '7.0', 'physical') - } - } - } - } - } - post { - always { - script { - if (env.CHANGE_URL) { - withCredentials([string(credentialsId: 'GITHUB_API_TOKEN', variable: 'GITHUB_API_TOKEN')]) { - makeReport() - sh """ - curl -v -X POST \ - -H "Authorization: token ${GITHUB_API_TOKEN}" \ - -d "{\\"body\\":\\"${TestsReport}\\"}" \ - "https://api.github.com/repos/\$(echo $CHANGE_URL | cut -d '/' -f 4-5)/issues/${CHANGE_ID}/comments" - """ - } - } - } - sh ''' - sudo docker rmi -f \$(sudo docker images -q) || true - sudo rm -rf ./* - ''' - deleteDir() - } - failure { - script { - try { - slackSend channel: "@${AUTHOR_NAME}", color: '#FF0000', message: "[${JOB_NAME}]: build ${currentBuild.result}, ${BUILD_URL} owner: @${AUTHOR_NAME}" - } catch (exc) { - slackSend channel: '#cloud-dev-ci', color: '#FF0000', message: "[${JOB_NAME}]: build ${currentBuild.result}, ${BUILD_URL} owner: @${AUTHOR_NAME}" - } - } - } - } -} From 4f5adec2102814b95c6d57bda3b701a0ce58300b Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 5 Aug 2024 10:00:49 +0200 Subject: [PATCH 155/203] add check for nil --- cmd/pbm-agent/agent.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 31e7fc8df..0ebbb46f0 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -303,7 +303,7 @@ func (a *Agent) HbStatus(ctx context.Context) { } } - if inf.ArbiterOnly { + if inf != nil && inf.ArbiterOnly { hb.State = defs.NodeStateArbiter hb.StateStr = "ARBITER" } else { From e56282879b7d7fad70f74857e2616009ca416afe Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 5 Aug 2024 10:05:48 +0200 Subject: [PATCH 156/203] [PBM-1303] add --wait-time param to configure wait time (#969) --- cmd/pbm/backup.go | 10 ++++++++ cmd/pbm/common.go | 3 +++ cmd/pbm/config.go | 25 ++++++++++-------- cmd/pbm/delete.go | 40 +++++++++++++++++++++-------- cmd/pbm/main.go | 18 +++++++++++++ cmd/pbm/oplog.go | 21 ++++++++++++---- cmd/pbm/profile.go | 63 ++++++++++++++++++++++++++++++++++++---------- cmd/pbm/restore.go | 10 ++++++++ 8 files changed, 150 insertions(+), 40 deletions(-) diff --git a/cmd/pbm/backup.go b/cmd/pbm/backup.go index ad71a3fe8..52bbd5989 100644 --- a/cmd/pbm/backup.go +++ b/cmd/pbm/backup.go @@ -36,6 +36,7 @@ type backupOpts struct { profile string ns string wait bool + waitTime time.Duration externList bool } @@ -191,11 +192,20 @@ func runBackup( } if b.wait { + if b.waitTime > time.Second { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, b.waitTime) + defer cancel() + } + fmt.Printf("\nWaiting for '%s' backup...", b.name) s, err := waitBackup(ctx, conn, b.name, defs.StatusDone) if s != nil { fmt.Printf(" %s\n", *s) } + if errors.Is(err, context.DeadlineExceeded) { + err = errWaitTimeout + } return outMsg{}, err } diff --git a/cmd/pbm/common.go b/cmd/pbm/common.go index c1f472d8b..77fb6eb86 100644 --- a/cmd/pbm/common.go +++ b/cmd/pbm/common.go @@ -6,8 +6,11 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/ctrl" + "github.com/percona/percona-backup-mongodb/pbm/errors" ) +var errWaitTimeout = errors.New("Operation is in progress. Check pbm status and logs") + func sendCmd(ctx context.Context, conn connect.Client, cmd ctrl.Cmd) error { cmd.TS = time.Now().UTC().Unix() _, err := conn.CmdStreamCollection().InsertOne(ctx, cmd) diff --git a/cmd/pbm/config.go b/cmd/pbm/config.go index ded50a133..fd1e3160b 100644 --- a/cmd/pbm/config.go +++ b/cmd/pbm/config.go @@ -17,15 +17,14 @@ import ( "github.com/percona/percona-backup-mongodb/sdk" ) -const resyncWaitDuration = 30 * time.Second - type configOpts struct { - rsync bool - wait bool - list bool - file string - set map[string]string - key string + rsync bool + wait bool + waitTime time.Duration + list bool + file string + set map[string]string + key string } type confKV struct { @@ -87,14 +86,18 @@ func runConfig(ctx context.Context, conn connect.Client, pbm sdk.Client, c *conf return outMsg{"Storage resync started"}, nil } - ctx, cancel := context.WithTimeout(ctx, resyncWaitDuration) - defer cancel() + if c.waitTime > time.Second { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, c.waitTime) + defer cancel() + } err = sdk.WaitForResync(ctx, pbm, cid) if err != nil { if errors.Is(err, context.DeadlineExceeded) { - err = errors.New("timeout") + err = errWaitTimeout } + return nil, errors.Wrapf(err, "waiting for resync [opid %q]", cid) } diff --git a/cmd/pbm/delete.go b/cmd/pbm/delete.go index e77f32586..0fd16ac7a 100644 --- a/cmd/pbm/delete.go +++ b/cmd/pbm/delete.go @@ -153,6 +153,7 @@ type deletePitrOpts struct { yes bool all bool wait bool + waitTime time.Duration dryRun bool } @@ -222,13 +223,24 @@ func deletePITR( return outMsg{"Processing by agents. Please check status later"}, nil } - return waitForDelete(ctx, conn, pbm, cid) + if d.waitTime > time.Second { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, d.waitTime) + defer cancel() + } + + rv, err := waitForDelete(ctx, conn, pbm, cid) + if errors.Is(err, context.DeadlineExceeded) { + err = errWaitTimeout + } + return rv, err } type cleanupOptions struct { olderThan string yes bool wait bool + waitTime time.Duration dryRun bool } @@ -274,7 +286,17 @@ func doCleanup(ctx context.Context, conn connect.Client, pbm sdk.Client, d *clea return outMsg{"Processing by agents. Please check status later"}, nil } - return waitForDelete(ctx, conn, pbm, cid) + if d.waitTime > time.Second { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, d.waitTime) + defer cancel() + } + + rv, err := waitForDelete(ctx, conn, pbm, cid) + if errors.Is(err, context.DeadlineExceeded) { + err = errWaitTimeout + } + return rv, err } func parseOlderThan(s string) (primitive.Timestamp, error) { @@ -401,7 +423,7 @@ func askConfirmation(question string) error { } func waitForDelete(ctx context.Context, conn connect.Client, pbm sdk.Client, cid sdk.CommandID) (fmt.Stringer, error) { - progressCtx, stopProgress := context.WithCancel(ctx) + commandCtx, stopProgress := context.WithCancel(ctx) defer stopProgress() go func() { @@ -411,13 +433,13 @@ func waitForDelete(ctx context.Context, conn connect.Client, pbm sdk.Client, cid select { case <-tick.C: fmt.Print(".") - case <-progressCtx.Done(): + case <-commandCtx.Done(): return } } }() - cmd, err := pbm.CommandInfo(progressCtx, cid) + cmd, err := pbm.CommandInfo(commandCtx, cid) if err != nil { return nil, errors.Wrap(err, "get command info") } @@ -434,17 +456,13 @@ func waitForDelete(ctx context.Context, conn connect.Client, pbm sdk.Client, cid return nil, errors.New("wrong command") } - waitCtx, stopWaiting := context.WithTimeout(progressCtx, time.Minute) - err = waitFn(waitCtx, pbm) - stopWaiting() + err = waitFn(commandCtx, pbm) if err != nil { if !errors.Is(err, context.DeadlineExceeded) { return nil, err } - waitCtx, stopWaiting := context.WithTimeout(progressCtx, time.Minute) - msg, err := sdk.WaitForErrorLog(waitCtx, pbm, cmd) - stopWaiting() + msg, err := sdk.WaitForErrorLog(ctx, pbm, cmd) if err != nil { return nil, errors.Wrap(err, "read agents log") } diff --git a/cmd/pbm/main.go b/cmd/pbm/main.go index 532c45c78..78091d826 100644 --- a/cmd/pbm/main.go +++ b/cmd/pbm/main.go @@ -98,6 +98,8 @@ func main() { configCmd.Flag("wait", "Wait for finish"). Short('w'). BoolVar(&cfg.wait) + configCmd.Flag("wait-time", "Maximum wait time"). + DurationVar(&cfg.waitTime) configProfileCmd := pbmCmd. Command("profile", "Configuration profiles") @@ -132,6 +134,8 @@ func main() { Flag("wait", "Wait for done by agents"). Short('w'). BoolVar(&addConfigProfileOpts.wait) + addConfigProfileCmd.Flag("wait-time", "Maximum wait time"). + DurationVar(&addConfigProfileOpts.waitTime) removeConfigProfileOpts := removeConfigProfileOptions{} removeConfigProfileCmd := configProfileCmd. @@ -144,6 +148,8 @@ func main() { Flag("wait", "Wait for done by agents"). Short('w'). BoolVar(&removeConfigProfileOpts.wait) + removeConfigProfileCmd.Flag("wait-time", "Maximum wait time"). + DurationVar(&removeConfigProfileOpts.waitTime) syncConfigProfileOpts := syncConfigProfileOptions{} syncConfigProfileCmd := configProfileCmd. @@ -161,6 +167,8 @@ func main() { Flag("wait", "Wait for done by agents"). Short('w'). BoolVar(&syncConfigProfileOpts.wait) + syncConfigProfileCmd.Flag("wait-time", "Maximum wait time"). + DurationVar(&syncConfigProfileOpts.waitTime) backupCmd := pbmCmd.Command("backup", "Make backup") backupOptions := backupOpts{} @@ -196,6 +204,8 @@ func main() { backupCmd.Flag("wait", "Wait for the backup to finish"). Short('w'). BoolVar(&backupOptions.wait) + backupCmd.Flag("wait-time", "Maximum wait time"). + DurationVar(&backupOptions.waitTime) backupCmd.Flag("list-files", "Shows the list of files per node to copy (only for external backups)"). Short('l'). BoolVar(&backupOptions.externList) @@ -239,6 +249,8 @@ func main() { restoreCmd.Flag("wait", "Wait for the restore to finish."). Short('w'). BoolVar(&restore.wait) + restoreCmd.Flag("wait-time", "Maximum wait time"). + DurationVar(&restore.waitTime) restoreCmd.Flag("external", "External restore."). Short('x'). BoolVar(&restore.extern) @@ -263,6 +275,8 @@ func main() { replayCmd.Flag("wait", "Wait for the restore to finish."). Short('w'). BoolVar(&replayOpts.wait) + replayCmd.Flag("wait-time", "Maximum wait time"). + DurationVar(&replayOpts.waitTime) replayCmd.Flag(RSMappingFlag, RSMappingDoc). Envar(RSMappingEnvVar). StringVar(&replayOpts.rsMap) @@ -341,6 +355,8 @@ func main() { deletePitrCmd.Flag("wait", "Wait for deletion done"). Short('w'). BoolVar(&deletePitr.wait) + deletePitrCmd.Flag("wait-time", "Maximum wait time"). + DurationVar(&deletePitr.waitTime) deletePitrCmd.Flag("dry-run", "Report but do not delete"). BoolVar(&deletePitr.dryRun) @@ -357,6 +373,8 @@ func main() { cleanupCmd.Flag("wait", "Wait for deletion done"). Short('w'). BoolVar(&cleanupOpts.wait) + cleanupCmd.Flag("wait-time", "Maximum wait time"). + DurationVar(&cleanupOpts.waitTime) cleanupCmd.Flag("dry-run", "Report but do not delete"). BoolVar(&cleanupOpts.dryRun) diff --git a/cmd/pbm/oplog.go b/cmd/pbm/oplog.go index da7502abd..c72be995d 100644 --- a/cmd/pbm/oplog.go +++ b/cmd/pbm/oplog.go @@ -13,10 +13,11 @@ import ( ) type replayOptions struct { - start string - end string - wait bool - rsMap string + start string + end string + wait bool + waitTime time.Duration + rsMap string } type oplogReplayResult struct { @@ -91,10 +92,20 @@ func replayOplog(ctx context.Context, conn connect.Client, o replayOptions, outf return oplogReplayResult{Name: name}, nil } + if o.waitTime > time.Second { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, o.waitTime) + defer cancel() + } + fmt.Print("Started.\nWaiting to finish") err = waitRestore(ctx, conn, m, defs.StatusDone, 0) if err != nil { - return oplogReplayResult{err: err.Error()}, nil //nolint:nilerr + if errors.Is(err, context.DeadlineExceeded) { + err = errWaitTimeout + } + + return oplogReplayResult{err: err.Error()}, nil } return oplogReplayResult{Name: name, done: true}, nil diff --git a/cmd/pbm/profile.go b/cmd/pbm/profile.go index 7aae35b75..17a6e243a 100644 --- a/cmd/pbm/profile.go +++ b/cmd/pbm/profile.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "strings" + "time" "go.mongodb.org/mongo-driver/mongo" @@ -18,22 +19,25 @@ type showConfigProfileOptions struct { } type addConfigProfileOptions struct { - name string - file *os.File - sync bool - wait bool + name string + file *os.File + sync bool + wait bool + waitTime time.Duration } type removeConfigProfileOptions struct { - name string - wait bool + name string + wait bool + waitTime time.Duration } type syncConfigProfileOptions struct { - name string - all bool - clear bool - wait bool + name string + all bool + clear bool + wait bool + waitTime time.Duration } type configProfileList struct { @@ -142,8 +146,18 @@ func handleAddConfigProfile( } if opts.wait { + if opts.waitTime > time.Second { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, opts.waitTime) + defer cancel() + } + err = sdk.WaitForResync(ctx, pbm, cid) if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + err = errWaitTimeout + } + return nil, errors.Wrap(err, "wait") } } @@ -173,9 +187,22 @@ func handleRemoveConfigProfile( if err != nil { return nil, errors.Wrap(err, "sdk: remove config profile") } - err = sdk.WaitForCommandWithErrorLog(ctx, pbm, cid) - if err != nil { - return nil, errors.Wrap(err, "wait") + + if opts.wait { + if opts.waitTime > time.Second { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, opts.waitTime) + defer cancel() + } + + err = sdk.WaitForCommandWithErrorLog(ctx, pbm, cid) + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + err = errWaitTimeout + } + + return nil, errors.Wrap(err, "wait") + } } return &outMsg{"OK"}, nil @@ -214,8 +241,18 @@ func handleSyncConfigProfile( } if opts.wait { + if opts.waitTime > time.Second { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, opts.waitTime) + defer cancel() + } + err = sdk.WaitForResync(ctx, pbm, cid) if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + err = errWaitTimeout + } + return nil, errors.Wrap(err, "wait") } } diff --git a/cmd/pbm/restore.go b/cmd/pbm/restore.go index 648dbf4eb..e9d76dd55 100644 --- a/cmd/pbm/restore.go +++ b/cmd/pbm/restore.go @@ -33,6 +33,7 @@ type restoreOpts struct { pitr string pitrBase string wait bool + waitTime time.Duration extern bool ns string usersAndRoles bool @@ -141,6 +142,12 @@ func runRestore(ctx context.Context, conn connect.Client, o *restoreOpts, outf o }, nil } + if o.waitTime > time.Second { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, o.waitTime) + defer cancel() + } + typ := " logical restore.\nWaiting to finish" if m.Type == defs.PhysicalBackup { typ = " physical restore.\nWaiting to finish" @@ -158,6 +165,9 @@ func runRestore(ctx context.Context, conn connect.Client, o *restoreOpts, outf o if errors.Is(err, restoreFailedError{}) { return restoreRet{err: err.Error()}, nil } + if errors.Is(err, context.DeadlineExceeded) { + err = errWaitTimeout + } return restoreRet{err: fmt.Sprintf("%s.\n Try to check logs on node %s", err.Error(), m.Leader)}, nil } From 7e29a80bb14f837c6ee24ea99e121a81c5e3a38c Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 5 Aug 2024 10:06:13 +0200 Subject: [PATCH 157/203] [PBM-1344] skip chunk between last chunk and last backup if there is no such (#975) --- pbm/slicer/slicer.go | 14 ++++++++++---- pbm/storage/storage.go | 13 +++++++++++++ 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/pbm/slicer/slicer.go b/pbm/slicer/slicer.go index c651d86ee..4dd90a264 100644 --- a/pbm/slicer/slicer.go +++ b/pbm/slicer/slicer.go @@ -145,11 +145,17 @@ func (s *Slicer) Catchup(ctx context.Context) error { cfg.PITR.Compression, cfg.PITR.CompressionLevel) if err != nil { - return err - } + var rangeErr oplog.InsuffRangeError + if !errors.As(err, &rangeErr) { + return err + } - s.l.Info("uploaded chunk %s - %s", formatts(lastChunk.EndTS), formatts(rs.FirstWriteTS)) - s.lastTS = rs.FirstWriteTS + s.l.Warning("skip chunk %s - %s: %v", + formatts(lastChunk.EndTS), formatts(rs.FirstWriteTS), rangeErr) + } else { + s.l.Info("uploaded chunk %s - %s", formatts(lastChunk.EndTS), formatts(rs.FirstWriteTS)) + s.lastTS = rs.FirstWriteTS + } } err = s.copyReplsetOplog(ctx, rs) diff --git a/pbm/storage/storage.go b/pbm/storage/storage.go index ded8ad6f2..25f223977 100644 --- a/pbm/storage/storage.go +++ b/pbm/storage/storage.go @@ -172,6 +172,19 @@ func (rwe rwError) Error() string { return r } +func (rwe rwError) Unwrap() error { + if rwe.read != nil { + return rwe.read + } + if rwe.write != nil { + return rwe.write + } + if rwe.compress != nil { + return rwe.compress + } + return nil +} + func (rwe rwError) nil() bool { return rwe.read == nil && rwe.compress == nil && rwe.write == nil } From ba9aa4770b1d3d5475cfb6ab0cb279effeb5e685 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 5 Aug 2024 10:05:48 +0200 Subject: [PATCH 158/203] [PBM-1303] add --wait-time param to configure wait time (#969) --- cmd/pbm/backup.go | 10 ++++++++ cmd/pbm/common.go | 3 +++ cmd/pbm/config.go | 25 ++++++++++-------- cmd/pbm/delete.go | 47 +++++++++++++++++++++++++--------- cmd/pbm/main.go | 18 +++++++++++++ cmd/pbm/oplog.go | 21 ++++++++++++---- cmd/pbm/profile.go | 63 ++++++++++++++++++++++++++++++++++++---------- cmd/pbm/restore.go | 10 ++++++++ 8 files changed, 156 insertions(+), 41 deletions(-) diff --git a/cmd/pbm/backup.go b/cmd/pbm/backup.go index 0845a1db7..dd0ed0c24 100644 --- a/cmd/pbm/backup.go +++ b/cmd/pbm/backup.go @@ -36,6 +36,7 @@ type backupOpts struct { profile string ns string wait bool + waitTime time.Duration externList bool } @@ -191,11 +192,20 @@ func runBackup( } if b.wait { + if b.waitTime > time.Second { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, b.waitTime) + defer cancel() + } + fmt.Printf("\nWaiting for '%s' backup...", b.name) s, err := waitBackup(ctx, conn, b.name, defs.StatusDone) if s != nil { fmt.Printf(" %s\n", *s) } + if errors.Is(err, context.DeadlineExceeded) { + err = errWaitTimeout + } return outMsg{}, err } diff --git a/cmd/pbm/common.go b/cmd/pbm/common.go index c1f472d8b..77fb6eb86 100644 --- a/cmd/pbm/common.go +++ b/cmd/pbm/common.go @@ -6,8 +6,11 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/ctrl" + "github.com/percona/percona-backup-mongodb/pbm/errors" ) +var errWaitTimeout = errors.New("Operation is in progress. Check pbm status and logs") + func sendCmd(ctx context.Context, conn connect.Client, cmd ctrl.Cmd) error { cmd.TS = time.Now().UTC().Unix() _, err := conn.CmdStreamCollection().InsertOne(ctx, cmd) diff --git a/cmd/pbm/config.go b/cmd/pbm/config.go index a7e500a33..64fa9c0f5 100644 --- a/cmd/pbm/config.go +++ b/cmd/pbm/config.go @@ -17,15 +17,14 @@ import ( "github.com/percona/percona-backup-mongodb/sdk" ) -const resyncWaitDuration = 30 * time.Second - type configOpts struct { - rsync bool - wait bool - list bool - file string - set map[string]string - key string + rsync bool + wait bool + waitTime time.Duration + list bool + file string + set map[string]string + key string } type confKV struct { @@ -87,14 +86,18 @@ func runConfig(ctx context.Context, conn connect.Client, pbm *sdk.Client, c *con return outMsg{"Storage resync started"}, nil } - ctx, cancel := context.WithTimeout(ctx, resyncWaitDuration) - defer cancel() + if c.waitTime > time.Second { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, c.waitTime) + defer cancel() + } err = sdk.WaitForResync(ctx, pbm, cid) if err != nil { if errors.Is(err, context.DeadlineExceeded) { - err = errors.New("timeout") + err = errWaitTimeout } + return nil, errors.Wrapf(err, "waiting for resync [opid %q]", cid) } diff --git a/cmd/pbm/delete.go b/cmd/pbm/delete.go index 0ac071275..923496fd7 100644 --- a/cmd/pbm/delete.go +++ b/cmd/pbm/delete.go @@ -153,6 +153,7 @@ type deletePitrOpts struct { yes bool all bool wait bool + waitTime time.Duration dryRun bool } @@ -222,13 +223,24 @@ func deletePITR( return outMsg{"Processing by agents. Please check status later"}, nil } - return waitForDelete(ctx, conn, pbm, cid) + if d.waitTime > time.Second { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, d.waitTime) + defer cancel() + } + + rv, err := waitForDelete(ctx, conn, pbm, cid) + if errors.Is(err, context.DeadlineExceeded) { + err = errWaitTimeout + } + return rv, err } type cleanupOptions struct { olderThan string yes bool wait bool + waitTime time.Duration dryRun bool } @@ -274,7 +286,17 @@ func doCleanup(ctx context.Context, conn connect.Client, pbm *sdk.Client, d *cle return outMsg{"Processing by agents. Please check status later"}, nil } - return waitForDelete(ctx, conn, pbm, cid) + if d.waitTime > time.Second { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, d.waitTime) + defer cancel() + } + + rv, err := waitForDelete(ctx, conn, pbm, cid) + if errors.Is(err, context.DeadlineExceeded) { + err = errWaitTimeout + } + return rv, err } func parseOlderThan(s string) (primitive.Timestamp, error) { @@ -400,8 +422,13 @@ func askConfirmation(question string) error { return errUserCanceled } -func waitForDelete(ctx context.Context, conn connect.Client, pbm *sdk.Client, cid sdk.CommandID) (fmt.Stringer, error) { - progressCtx, stopProgress := context.WithCancel(ctx) +func waitForDelete( + ctx context.Context, + conn connect.Client, + pbm *sdk.Client, + cid sdk.CommandID, +) (fmt.Stringer, error) { + commandCtx, stopProgress := context.WithCancel(ctx) defer stopProgress() go func() { @@ -411,13 +438,13 @@ func waitForDelete(ctx context.Context, conn connect.Client, pbm *sdk.Client, ci select { case <-tick.C: fmt.Print(".") - case <-progressCtx.Done(): + case <-commandCtx.Done(): return } } }() - cmd, err := pbm.CommandInfo(progressCtx, cid) + cmd, err := pbm.CommandInfo(commandCtx, cid) if err != nil { return nil, errors.Wrap(err, "get command info") } @@ -434,17 +461,13 @@ func waitForDelete(ctx context.Context, conn connect.Client, pbm *sdk.Client, ci return nil, errors.New("wrong command") } - waitCtx, stopWaiting := context.WithTimeout(progressCtx, time.Minute) - err = waitFn(waitCtx, pbm) - stopWaiting() + err = waitFn(commandCtx, pbm) if err != nil { if !errors.Is(err, context.DeadlineExceeded) { return nil, err } - waitCtx, stopWaiting := context.WithTimeout(progressCtx, time.Minute) - msg, err := sdk.WaitForErrorLog(waitCtx, pbm, cmd) - stopWaiting() + msg, err := sdk.WaitForErrorLog(ctx, pbm, cmd) if err != nil { return nil, errors.Wrap(err, "read agents log") } diff --git a/cmd/pbm/main.go b/cmd/pbm/main.go index 58b338dea..a71143d35 100644 --- a/cmd/pbm/main.go +++ b/cmd/pbm/main.go @@ -97,6 +97,8 @@ func main() { configCmd.Flag("wait", "Wait for finish"). Short('w'). BoolVar(&cfg.wait) + configCmd.Flag("wait-time", "Maximum wait time"). + DurationVar(&cfg.waitTime) configProfileCmd := pbmCmd. Command("profile", "Configuration profiles") @@ -131,6 +133,8 @@ func main() { Flag("wait", "Wait for done by agents"). Short('w'). BoolVar(&addConfigProfileOpts.wait) + addConfigProfileCmd.Flag("wait-time", "Maximum wait time"). + DurationVar(&addConfigProfileOpts.waitTime) removeConfigProfileOpts := removeConfigProfileOptions{} removeConfigProfileCmd := configProfileCmd. @@ -143,6 +147,8 @@ func main() { Flag("wait", "Wait for done by agents"). Short('w'). BoolVar(&removeConfigProfileOpts.wait) + removeConfigProfileCmd.Flag("wait-time", "Maximum wait time"). + DurationVar(&removeConfigProfileOpts.waitTime) syncConfigProfileOpts := syncConfigProfileOptions{} syncConfigProfileCmd := configProfileCmd. @@ -160,6 +166,8 @@ func main() { Flag("wait", "Wait for done by agents"). Short('w'). BoolVar(&syncConfigProfileOpts.wait) + syncConfigProfileCmd.Flag("wait-time", "Maximum wait time"). + DurationVar(&syncConfigProfileOpts.waitTime) backupCmd := pbmCmd.Command("backup", "Make backup") backupOptions := backupOpts{} @@ -195,6 +203,8 @@ func main() { backupCmd.Flag("wait", "Wait for the backup to finish"). Short('w'). BoolVar(&backupOptions.wait) + backupCmd.Flag("wait-time", "Maximum wait time"). + DurationVar(&backupOptions.waitTime) backupCmd.Flag("list-files", "Shows the list of files per node to copy (only for external backups)"). Short('l'). BoolVar(&backupOptions.externList) @@ -238,6 +248,8 @@ func main() { restoreCmd.Flag("wait", "Wait for the restore to finish."). Short('w'). BoolVar(&restore.wait) + restoreCmd.Flag("wait-time", "Maximum wait time"). + DurationVar(&restore.waitTime) restoreCmd.Flag("external", "External restore."). Short('x'). BoolVar(&restore.extern) @@ -262,6 +274,8 @@ func main() { replayCmd.Flag("wait", "Wait for the restore to finish."). Short('w'). BoolVar(&replayOpts.wait) + replayCmd.Flag("wait-time", "Maximum wait time"). + DurationVar(&replayOpts.waitTime) replayCmd.Flag(RSMappingFlag, RSMappingDoc). Envar(RSMappingEnvVar). StringVar(&replayOpts.rsMap) @@ -340,6 +354,8 @@ func main() { deletePitrCmd.Flag("wait", "Wait for deletion done"). Short('w'). BoolVar(&deletePitr.wait) + deletePitrCmd.Flag("wait-time", "Maximum wait time"). + DurationVar(&deletePitr.waitTime) deletePitrCmd.Flag("dry-run", "Report but do not delete"). BoolVar(&deletePitr.dryRun) @@ -356,6 +372,8 @@ func main() { cleanupCmd.Flag("wait", "Wait for deletion done"). Short('w'). BoolVar(&cleanupOpts.wait) + cleanupCmd.Flag("wait-time", "Maximum wait time"). + DurationVar(&cleanupOpts.waitTime) cleanupCmd.Flag("dry-run", "Report but do not delete"). BoolVar(&cleanupOpts.dryRun) diff --git a/cmd/pbm/oplog.go b/cmd/pbm/oplog.go index da7502abd..c72be995d 100644 --- a/cmd/pbm/oplog.go +++ b/cmd/pbm/oplog.go @@ -13,10 +13,11 @@ import ( ) type replayOptions struct { - start string - end string - wait bool - rsMap string + start string + end string + wait bool + waitTime time.Duration + rsMap string } type oplogReplayResult struct { @@ -91,10 +92,20 @@ func replayOplog(ctx context.Context, conn connect.Client, o replayOptions, outf return oplogReplayResult{Name: name}, nil } + if o.waitTime > time.Second { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, o.waitTime) + defer cancel() + } + fmt.Print("Started.\nWaiting to finish") err = waitRestore(ctx, conn, m, defs.StatusDone, 0) if err != nil { - return oplogReplayResult{err: err.Error()}, nil //nolint:nilerr + if errors.Is(err, context.DeadlineExceeded) { + err = errWaitTimeout + } + + return oplogReplayResult{err: err.Error()}, nil } return oplogReplayResult{Name: name, done: true}, nil diff --git a/cmd/pbm/profile.go b/cmd/pbm/profile.go index f2eb9e8b3..f6617cc8e 100644 --- a/cmd/pbm/profile.go +++ b/cmd/pbm/profile.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "strings" + "time" "go.mongodb.org/mongo-driver/mongo" @@ -18,22 +19,25 @@ type showConfigProfileOptions struct { } type addConfigProfileOptions struct { - name string - file *os.File - sync bool - wait bool + name string + file *os.File + sync bool + wait bool + waitTime time.Duration } type removeConfigProfileOptions struct { - name string - wait bool + name string + wait bool + waitTime time.Duration } type syncConfigProfileOptions struct { - name string - all bool - clear bool - wait bool + name string + all bool + clear bool + wait bool + waitTime time.Duration } type configProfileList struct { @@ -142,8 +146,18 @@ func handleAddConfigProfile( } if opts.wait { + if opts.waitTime > time.Second { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, opts.waitTime) + defer cancel() + } + err = sdk.WaitForResync(ctx, pbm, cid) if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + err = errWaitTimeout + } + return nil, errors.Wrap(err, "wait") } } @@ -173,9 +187,22 @@ func handleRemoveConfigProfile( if err != nil { return nil, errors.Wrap(err, "sdk: remove config profile") } - err = sdk.WaitForCommandWithErrorLog(ctx, pbm, cid) - if err != nil { - return nil, errors.Wrap(err, "wait") + + if opts.wait { + if opts.waitTime > time.Second { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, opts.waitTime) + defer cancel() + } + + err = sdk.WaitForCommandWithErrorLog(ctx, pbm, cid) + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + err = errWaitTimeout + } + + return nil, errors.Wrap(err, "wait") + } } return &outMsg{"OK"}, nil @@ -214,8 +241,18 @@ func handleSyncConfigProfile( } if opts.wait { + if opts.waitTime > time.Second { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, opts.waitTime) + defer cancel() + } + err = sdk.WaitForResync(ctx, pbm, cid) if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + err = errWaitTimeout + } + return nil, errors.Wrap(err, "wait") } } diff --git a/cmd/pbm/restore.go b/cmd/pbm/restore.go index 648dbf4eb..e9d76dd55 100644 --- a/cmd/pbm/restore.go +++ b/cmd/pbm/restore.go @@ -33,6 +33,7 @@ type restoreOpts struct { pitr string pitrBase string wait bool + waitTime time.Duration extern bool ns string usersAndRoles bool @@ -141,6 +142,12 @@ func runRestore(ctx context.Context, conn connect.Client, o *restoreOpts, outf o }, nil } + if o.waitTime > time.Second { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, o.waitTime) + defer cancel() + } + typ := " logical restore.\nWaiting to finish" if m.Type == defs.PhysicalBackup { typ = " physical restore.\nWaiting to finish" @@ -158,6 +165,9 @@ func runRestore(ctx context.Context, conn connect.Client, o *restoreOpts, outf o if errors.Is(err, restoreFailedError{}) { return restoreRet{err: err.Error()}, nil } + if errors.Is(err, context.DeadlineExceeded) { + err = errWaitTimeout + } return restoreRet{err: fmt.Sprintf("%s.\n Try to check logs on node %s", err.Error(), m.Leader)}, nil } From 1a91de6ab384134d3b0c21a3f59a8310b862e9ff Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 5 Aug 2024 10:06:13 +0200 Subject: [PATCH 159/203] [PBM-1344] skip chunk between last chunk and last backup if there is no such (#975) --- pbm/slicer/slicer.go | 14 ++++++++++---- pbm/storage/storage.go | 13 +++++++++++++ 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/pbm/slicer/slicer.go b/pbm/slicer/slicer.go index c651d86ee..4dd90a264 100644 --- a/pbm/slicer/slicer.go +++ b/pbm/slicer/slicer.go @@ -145,11 +145,17 @@ func (s *Slicer) Catchup(ctx context.Context) error { cfg.PITR.Compression, cfg.PITR.CompressionLevel) if err != nil { - return err - } + var rangeErr oplog.InsuffRangeError + if !errors.As(err, &rangeErr) { + return err + } - s.l.Info("uploaded chunk %s - %s", formatts(lastChunk.EndTS), formatts(rs.FirstWriteTS)) - s.lastTS = rs.FirstWriteTS + s.l.Warning("skip chunk %s - %s: %v", + formatts(lastChunk.EndTS), formatts(rs.FirstWriteTS), rangeErr) + } else { + s.l.Info("uploaded chunk %s - %s", formatts(lastChunk.EndTS), formatts(rs.FirstWriteTS)) + s.lastTS = rs.FirstWriteTS + } } err = s.copyReplsetOplog(ctx, rs) diff --git a/pbm/storage/storage.go b/pbm/storage/storage.go index ded8ad6f2..25f223977 100644 --- a/pbm/storage/storage.go +++ b/pbm/storage/storage.go @@ -172,6 +172,19 @@ func (rwe rwError) Error() string { return r } +func (rwe rwError) Unwrap() error { + if rwe.read != nil { + return rwe.read + } + if rwe.write != nil { + return rwe.write + } + if rwe.compress != nil { + return rwe.compress + } + return nil +} + func (rwe rwError) nil() bool { return rwe.read == nil && rwe.compress == nil && rwe.write == nil } From 870dd42b3877e01327a98121f640710b5642b56d Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 5 Aug 2024 10:22:46 +0200 Subject: [PATCH 160/203] Add stale lock check within PITR main loop (#977) --- cmd/pbm-agent/pitr.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index f5ffd04f3..ece27b8c2 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -122,6 +122,11 @@ func (a *Agent) PITR(ctx context.Context) { // canSlicingNow returns lock.ConcurrentOpError if there is a parallel operation. // Only physical backups (full, incremental, external) is allowed. func canSlicingNow(ctx context.Context, conn connect.Client, stgCfg *config.StorageConf) error { + ts, err := topo.GetClusterTime(ctx, conn) + if err != nil { + return errors.Wrap(err, "read cluster time") + } + locks, err := lock.GetLocks(ctx, conn, &lock.LockHeader{}) if err != nil { return errors.Wrap(err, "get locks data") @@ -130,6 +135,11 @@ func canSlicingNow(ctx context.Context, conn connect.Client, stgCfg *config.Stor for i := range locks { l := &locks[i] + if l.Heartbeat.T+defs.StaleFrameSec < ts.T { + // lock is stale, PITR can ignore it + continue + } + if l.Type != ctrl.CmdBackup { return lock.ConcurrentOpError{l.LockHeader} } From ceff3eaa1065d6309a9da7e0f58ab4cda44cf059 Mon Sep 17 00:00:00 2001 From: Daniel Oliver Date: Tue, 25 Jun 2024 10:13:13 +0100 Subject: [PATCH 161/203] PBM-1329 Write to temporary file name and sync before renaming --- pbm/storage/fs/fs.go | 38 ++++++++++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/pbm/storage/fs/fs.go b/pbm/storage/fs/fs.go index 4a42b5eaa..290b7ecd1 100644 --- a/pbm/storage/fs/fs.go +++ b/pbm/storage/fs/fs.go @@ -80,7 +80,8 @@ func (*FS) Type() storage.Type { } func (fs *FS) Save(name string, data io.Reader, _ int64) error { - filepath := path.Join(fs.root, name) + filepath := path.Join(fs.root, name+".tmp") + finalpath := path.Join(fs.root, name) err := os.MkdirAll(path.Dir(filepath), os.ModeDir|0o755) if err != nil { @@ -103,7 +104,18 @@ func (fs *FS) Save(name string, data io.Reader, _ int64) error { return errors.Wrapf(err, "copy file <%s>", filepath) } - return errors.Wrap(fw.Sync(), "write to file") + err = fw.Sync() + if err != nil { + return errors.Wrapf(err, "sync file <%s>", filepath) + } + + err = fw.Close() + if err != nil { + return errors.Wrapf(err, "close file <%s>", filepath) + } + + err = os.Rename(filepath, finalpath) + return errors.Wrapf(err, "rename <%s> to <%s>", filepath, finalpath) } func (fs *FS) SourceReader(name string) (io.ReadCloser, error) { @@ -174,7 +186,8 @@ func (fs *FS) Copy(src, dst string) error { return errors.Wrap(err, "open src") } - destFilename := path.Join(fs.root, dst) + destFilename := path.Join(fs.root, dst+".tmp") + finalFilename := path.Join(fs.root, dst) err = os.MkdirAll(path.Dir(destFilename), os.ModeDir|0o755) if err != nil { return errors.Wrap(err, "create dst dir") @@ -184,8 +197,25 @@ func (fs *FS) Copy(src, dst string) error { if err != nil { return errors.Wrap(err, "create dst") } + defer to.Close() + _, err = io.Copy(to, from) - return err + if err != nil { + return errors.Wrapf(err, "copy to <%s>", destFilename) + } + + err = to.Sync() + if err != nil { + return errors.Wrapf(err, "sync file <%s>", destFilename) + } + + err = to.Close() + if err != nil { + return errors.Wrapf(err, "close file <%s>", destFilename) + } + + err = os.Rename(destFilename, finalFilename) + return errors.Wrapf(err, "rename <%s> to <%s>", destFilename, finalFilename) } // Delete deletes given file from FS. From 64b3a069aca637ba35800069d8c9c13122bd8d4d Mon Sep 17 00:00:00 2001 From: Daniel Oliver Date: Mon, 29 Jul 2024 14:03:46 +0100 Subject: [PATCH 162/203] PBM-1329 Ensure temp files are always removed --- pbm/storage/fs/fs.go | 45 ++++++++++++++------------------------------ 1 file changed, 14 insertions(+), 31 deletions(-) diff --git a/pbm/storage/fs/fs.go b/pbm/storage/fs/fs.go index 290b7ecd1..f115b9d71 100644 --- a/pbm/storage/fs/fs.go +++ b/pbm/storage/fs/fs.go @@ -79,10 +79,7 @@ func (*FS) Type() storage.Type { return storage.Filesystem } -func (fs *FS) Save(name string, data io.Reader, _ int64) error { - filepath := path.Join(fs.root, name+".tmp") - finalpath := path.Join(fs.root, name) - +func WriteSync(filepath string, data io.Reader) error { err := os.MkdirAll(path.Dir(filepath), os.ModeDir|0o755) if err != nil { return errors.Wrapf(err, "create path %s", path.Dir(filepath)) @@ -105,13 +102,18 @@ func (fs *FS) Save(name string, data io.Reader, _ int64) error { } err = fw.Sync() - if err != nil { - return errors.Wrapf(err, "sync file <%s>", filepath) - } + return errors.Wrapf(err, "sync file <%s>", filepath) +} + + +func (fs *FS) Save(name string, data io.Reader, _ int64) error { + filepath := path.Join(fs.root, name+".tmp") + finalpath := path.Join(fs.root, name) - err = fw.Close() + err := WriteSync(filepath, data) if err != nil { - return errors.Wrapf(err, "close file <%s>", filepath) + os.Remove(filepath) + return errors.Wrapf(err, "write-sync %s", path.Dir(filepath)) } err = os.Rename(filepath, finalpath) @@ -188,30 +190,11 @@ func (fs *FS) Copy(src, dst string) error { destFilename := path.Join(fs.root, dst+".tmp") finalFilename := path.Join(fs.root, dst) - err = os.MkdirAll(path.Dir(destFilename), os.ModeDir|0o755) - if err != nil { - return errors.Wrap(err, "create dst dir") - } - - to, err := os.Create(destFilename) - if err != nil { - return errors.Wrap(err, "create dst") - } - defer to.Close() - - _, err = io.Copy(to, from) - if err != nil { - return errors.Wrapf(err, "copy to <%s>", destFilename) - } - - err = to.Sync() - if err != nil { - return errors.Wrapf(err, "sync file <%s>", destFilename) - } - err = to.Close() + err = WriteSync(destFilename, from) if err != nil { - return errors.Wrapf(err, "close file <%s>", destFilename) + os.Remove(destFilename) + return errors.Wrapf(err, "write-sync %s", path.Dir(destFilename)) } err = os.Rename(destFilename, finalFilename) From afddb60ea9f57f18315fd4a10e478f2bf9fe301f Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 6 Aug 2024 11:40:08 +0200 Subject: [PATCH 163/203] PBM-557: Fix for handling priority change in case of Hidden member (#978) * Fix priority detection hidden->secondary for PITR * Add tests for CalcPriorityForNode * Remove scoreForSecondary --- pbm/prio/priority.go | 10 ++++------ pbm/prio/priority_test.go | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 6 deletions(-) diff --git a/pbm/prio/priority.go b/pbm/prio/priority.go index 2dcc7d532..6d25f5daa 100644 --- a/pbm/prio/priority.go +++ b/pbm/prio/priority.go @@ -9,10 +9,9 @@ import ( ) const ( - defaultScore = 1.0 - scoreForPrimary = defaultScore / 2 - scoreForSecondary = defaultScore * 1 - scoreForHidden = defaultScore * 2 + defaultScore = 1.0 + scoreForPrimary = defaultScore / 2 + scoreForHidden = defaultScore * 2 ) // NodesPriority groups nodes by priority according to @@ -108,11 +107,10 @@ func CalcPriorityForAgent( func CalcPriorityForNode(node *topo.NodeInfo) float64 { if node.IsPrimary { return scoreForPrimary - } else if node.Secondary { - return scoreForSecondary } else if node.Hidden { return scoreForHidden } + return defaultScore } diff --git a/pbm/prio/priority_test.go b/pbm/prio/priority_test.go index cd4007f06..605847870 100644 --- a/pbm/prio/priority_test.go +++ b/pbm/prio/priority_test.go @@ -378,6 +378,42 @@ func TestCalcNodesPriority(t *testing.T) { }) } +func TestCalcPriorityForNode(t *testing.T) { + t.Run("for primary", func(t *testing.T) { + nodeInfo := &topo.NodeInfo{ + IsPrimary: true, + } + + p := CalcPriorityForNode(nodeInfo) + if p != scoreForPrimary { + t.Errorf("wrong priority for primary: want=%v, got=%v", scoreForPrimary, p) + } + }) + + t.Run("for secondary", func(t *testing.T) { + nodeInfo := &topo.NodeInfo{ + Secondary: true, + } + + p := CalcPriorityForNode(nodeInfo) + if p != defaultScore { + t.Errorf("wrong priority for secondary: want=%v, got=%v", defaultScore, p) + } + }) + + t.Run("for hidden", func(t *testing.T) { + nodeInfo := &topo.NodeInfo{ + Hidden: true, + Secondary: true, // hidden is also secondary + } + + p := CalcPriorityForNode(nodeInfo) + if p != scoreForHidden { + t.Errorf("wrong priority for hidden: want=%v, got=%v", scoreForHidden, p) + } + }) +} + func newP(rs, node string) topo.AgentStat { return newAgent(rs, node, defs.NodeStatePrimary, false) } From f84af204eef7dd51c4bbfd5824303d1b20b4923d Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 7 Aug 2024 10:39:07 +0200 Subject: [PATCH 164/203] [PBM-1341] retry shutdown on ConflictingOperationInProgress --- pbm/restore/physical.go | 34 +++++++++++++++++++++++++++++----- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/pbm/restore/physical.go b/pbm/restore/physical.go index ffb46f008..778af441c 100644 --- a/pbm/restore/physical.go +++ b/pbm/restore/physical.go @@ -53,6 +53,8 @@ const ( tryConnCount = 5 tryConnTimeout = 5 * time.Minute + + maxShutdownTriesOnStandaloneRecovery = 10 ) type files struct { @@ -1259,7 +1261,17 @@ func (r *PhysRestore) prepareData() error { } func shutdown(c *mongo.Client, dbpath string) error { - err := c.Database("admin").RunCommand(context.TODO(), bson.D{{"shutdown", 1}}).Err() + return shutdownImpl(c, dbpath, false) +} + +func forceShutdown(c *mongo.Client, dbpath string) error { + return shutdownImpl(c, dbpath, true) +} + +func shutdownImpl(c *mongo.Client, dbpath string, force bool) error { + res := c.Database("admin").RunCommand(context.TODO(), + bson.D{{"shutdown", 1}, {"force", force}}) + err := res.Err() if err != nil && !strings.Contains(err.Error(), "socket was unexpectedly closed") { return err } @@ -1285,12 +1297,24 @@ func (r *PhysRestore) recoverStandalone() error { return errors.Wrap(err, "connect to mongo") } - err = shutdown(c, r.dbpath) - if err != nil { - return errors.Wrap(err, "shutdown mongo") + for i := 0; i != maxShutdownTriesOnStandaloneRecovery; i++ { + err = shutdown(c, r.dbpath) + if err == nil { + return nil // OK + } + + if strings.Contains(err.Error(), "ConflictingOperationInProgress") { + r.log.Warning("retry shutdown in 5 seconds. reason: %v", err) + time.Sleep(5 * time.Second) + continue + } + + return errors.Wrap(err, "shutdown mongo") // unexpected } - return nil + r.log.Debug("force shutdown") + err = forceShutdown(c, r.dbpath) + return errors.Wrap(err, "force shutdown mongo") } func (r *PhysRestore) replayOplog( From 85455b0539695afb624fb39262a5b9a511718364 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 7 Aug 2024 12:09:56 +0200 Subject: [PATCH 165/203] [PBM-1360] drop PBM collection on physical restore --- pbm/restore/physical.go | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/pbm/restore/physical.go b/pbm/restore/physical.go index 778af441c..3fd8fe9ba 100644 --- a/pbm/restore/physical.go +++ b/pbm/restore/physical.go @@ -1561,6 +1561,8 @@ func (r *PhysRestore) resetRS() error { if err != nil { return errors.Wrap(err, "turn off pitr") } + + r.dropPBMCollections(ctx, c) } err = shutdown(c, r.dbpath) @@ -1571,6 +1573,40 @@ func (r *PhysRestore) resetRS() error { return nil } +func (r *PhysRestore) dropPBMCollections(ctx context.Context, c *mongo.Client) { + pbmCollections := []string{ + defs.LockCollection, + defs.LogCollection, + // defs.ConfigCollection, + defs.LockCollection, + defs.LockOpCollection, + defs.BcpCollection, + defs.RestoresCollection, + defs.CmdStreamCollection, + defs.PITRChunksCollection, + defs.PITRCollection, + defs.PBMOpLogCollection, + defs.AgentsStatusCollection, + } + + wg := &sync.WaitGroup{} + wg.Add(len(pbmCollections)) + + for _, coll := range pbmCollections { + go func() { + defer wg.Done() + + r.log.Debug("dropping 'admin.%s'", coll) + err := c.Database(defs.DB).Collection(coll).Drop(ctx) + if err != nil { + r.log.Warning("failed to drop 'admin.%s': %v", coll, err) + } + }() + } + + wg.Wait() +} + func (r *PhysRestore) getShardMapping(bcp *backup.BackupMeta) map[string]string { source := make(map[string]string) if bcp != nil && bcp.ShardRemap != nil { From 708b3682dfa730724a62b5b6f5ed34b30e1e8b18 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Wed, 7 Aug 2024 15:00:38 +0200 Subject: [PATCH 166/203] [PBM-1360] ensure an array in pbm logs -o json --- pbm/log/history.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pbm/log/history.go b/pbm/log/history.go index c95cca9fc..1b0d8d86f 100644 --- a/pbm/log/history.go +++ b/pbm/log/history.go @@ -94,7 +94,11 @@ func (e *Entries) SetLocation(l string) error { } func (e Entries) MarshalJSON() ([]byte, error) { - return json.Marshal(e.Data) + data := e.Data + if data == nil { + data = []Entry{} + } + return json.Marshal(data) } func (e Entries) String() string { From 1debffdb340d40d9347ef822b4cfa00958d03972 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 12 Aug 2024 10:16:26 +0200 Subject: [PATCH 167/203] Add db logic for setting PITR heartbeat --- pbm/oplog/nomination.go | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/pbm/oplog/nomination.go b/pbm/oplog/nomination.go index d7ced267a..31853c49c 100644 --- a/pbm/oplog/nomination.go +++ b/pbm/oplog/nomination.go @@ -6,19 +6,22 @@ import ( "time" "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" "github.com/percona/percona-backup-mongodb/pbm/connect" "github.com/percona/percona-backup-mongodb/pbm/errors" + "github.com/percona/percona-backup-mongodb/pbm/topo" ) // PITRMeta contains all operational data about PITR execution process. type PITRMeta struct { - StartTS int64 `bson:"start_ts" json:"start_ts"` - Status Status `bson:"status" json:"status"` - Nomination []PITRNomination `bson:"n" json:"n"` - Replsets []PITRReplset `bson:"replsets" json:"replsets"` + StartTS int64 `bson:"start_ts" json:"start_ts"` + Hb primitive.Timestamp `bson:"hb" json:"hb"` + Status Status `bson:"status" json:"status"` + Nomination []PITRNomination `bson:"n" json:"n"` + Replsets []PITRReplset `bson:"replsets" json:"replsets"` } // PITRNomination is used to choose (nominate and elect) member(s) @@ -265,3 +268,20 @@ func GetAgentsWithACK(ctx context.Context, conn connect.Client) ([]string, error return agents, nil } + +func SetHbForPITR(ctx context.Context, conn connect.Client) error { + ts, err := topo.GetClusterTime(ctx, conn) + if err != nil { + return errors.Wrap(err, "read cluster time") + } + + _, err = conn.PITRCollection().UpdateOne( + ctx, + bson.D{}, + bson.D{ + {"$set", bson.M{"hb": ts}}, + }, + ) + + return errors.Wrap(err, "update pbmPITR") +} From 2da6a80ca80c3c71c570f480f6a3946210cd9131 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 12 Aug 2024 11:04:15 +0200 Subject: [PATCH 168/203] Add PITR job for HB --- cmd/pbm-agent/pitr.go | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index ece27b8c2..016e14623 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -32,6 +32,7 @@ const ( pitrWatchMonitorPollingCycle = 15 * time.Second pitrTopoMonitorPollingCycle = 2 * time.Minute pitrActivityMonitorPollingCycle = 2 * time.Minute + pitrHb = 5 * time.Second ) type currentPitr struct { @@ -76,6 +77,8 @@ func (a *Agent) startMon(ctx context.Context, cfg *config.Config) { go a.pitrErrorMonitor(ctx) go a.pitrTopoMonitor(ctx) go a.pitrActivityMonitor(ctx) + + go a.pitrHb(ctx) } // stopMon stops monitor (watcher) jobs @@ -872,3 +875,28 @@ func (a *Agent) pitrErrorMonitor(ctx context.Context) { } } } + +func (a *Agent) pitrHb(ctx context.Context) { + l := log.LogEventFromContext(ctx) + l.Debug("start pitr hb") + defer l.Debug("stop pitr hb") + + tk := time.NewTicker(pitrHb) + defer tk.Stop() + + for { + select { + case <-tk.C: + err := oplog.SetHbForPITR(ctx, a.leadConn) + if err != nil { + l.Error("error while setting hb for pitr: %v", err) + } + + case <-ctx.Done(): + return + + case <-a.monStopSig: + return + } + } +} From de75e6e0f37714cf49ba8fdd0f03998096937328 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 12 Aug 2024 11:08:13 +0200 Subject: [PATCH 169/203] Add logic for checkibng PITR heartbeat status --- cmd/pbm-agent/pitr.go | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 016e14623..1e20e3f4f 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -334,7 +334,7 @@ func (a *Agent) pitr(ctx context.Context) error { for { select { case <-tk.C: - cStatus := a.getPITRClusterStatus(ctx) + cStatus, isHbStale := a.getPITRClusterAndStaleStatus(ctx) if cStatus == oplog.StatusReconfig { l.Debug("stop slicing because of reconfig") stopSlicing() @@ -345,6 +345,11 @@ func (a *Agent) pitr(ctx context.Context) error { stopSlicing() return } + if isHbStale { + l.Debug("stop slicing because PITR heartbeat is stale") + stopSlicing() + return + } case <-stopSlicingCtx.Done(): return @@ -644,19 +649,28 @@ func (a *Agent) reconcileReadyStatus(ctx context.Context, agents []topo.AgentSta } } -// getPITRClusterStatus gets cluster status from pbmPITR collection. -// In case of error, it returns StatusUnset and log the error. -func (a *Agent) getPITRClusterStatus(ctx context.Context) oplog.Status { +// getPITRClusterAndStaleStatus gets cluster and heartbeat stale status from pbmPITR collection. +// In case of error, it returns StatusUnset and HB non stale status, and logs the error. +func (a *Agent) getPITRClusterAndStaleStatus(ctx context.Context) (oplog.Status, bool) { l := log.LogEventFromContext(ctx) + isStale := false meta, err := oplog.GetMeta(ctx, a.leadConn) if err != nil { if !errors.Is(err, errors.ErrNotFound) { l.Error("getting metta for reconfig status check: %v", err) } - return oplog.StatusUnset + return oplog.StatusUnset, isStale + } + + ts, err := topo.GetClusterTime(ctx, a.leadConn) + if err != nil { + l.Error("read cluster time for pitr stale check: %v", err) + return meta.Status, isStale } - return meta.Status + isStale = meta.Hb.T+defs.StaleFrameSec < ts.T + + return meta.Status, isStale } // pitrConfigMonitor watches changes in PITR section within PBM configuration. From d9c30b455de904bb60ba88301451b9e4484070cd Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Mon, 12 Aug 2024 11:41:43 +0200 Subject: [PATCH 170/203] Set initial HB for PITR --- cmd/pbm-agent/pitr.go | 4 +++- pbm/oplog/nomination.go | 8 +++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 1e20e3f4f..991f7f120 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -63,7 +63,7 @@ func (a *Agent) getPitr() *currentPitr { return a.pitrjob } -// startMon starts monitor (watcher) jobs only on cluster leader. +// startMon starts monitor (watcher) and heartbeat jobs only on cluster leader. func (a *Agent) startMon(ctx context.Context, cfg *config.Config) { a.monMx.Lock() defer a.monMx.Unlock() @@ -409,6 +409,7 @@ func (a *Agent) leadNomination( err = oplog.InitMeta(ctx, a.leadConn) if err != nil { l.Error("init meta: %v", err) + return } agents, err := topo.ListAgentStatuses(ctx, a.leadConn) @@ -890,6 +891,7 @@ func (a *Agent) pitrErrorMonitor(ctx context.Context) { } } +// pitrHB job sets PITR heartbeat. func (a *Agent) pitrHb(ctx context.Context) { l := log.LogEventFromContext(ctx) l.Debug("start pitr hb") diff --git a/pbm/oplog/nomination.go b/pbm/oplog/nomination.go index 31853c49c..22766700d 100644 --- a/pbm/oplog/nomination.go +++ b/pbm/oplog/nomination.go @@ -56,12 +56,18 @@ const ( // Init add initial PITR document. func InitMeta(ctx context.Context, conn connect.Client) error { + ts, err := topo.GetClusterTime(ctx, conn) + if err != nil { + return errors.Wrap(err, "init pitr meta, read cluster time") + } + pitrMeta := PITRMeta{ StartTS: time.Now().Unix(), Nomination: []PITRNomination{}, Replsets: []PITRReplset{}, + Hb: ts, } - _, err := conn.PITRCollection().ReplaceOne( + _, err = conn.PITRCollection().ReplaceOne( ctx, bson.D{}, pitrMeta, From 1606c2d1b3e25c652022977e9b07e99fc3abd11d Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Tue, 13 Aug 2024 08:17:47 +0200 Subject: [PATCH 171/203] (e2e-tests) update docker to 27.1.1 (#981) --- e2e-tests/pkg/pbm/docker.go | 4 +- e2e-tests/pkg/pbm/pbm_ctl.go | 5 +- go.mod | 2 +- go.sum | 4 +- vendor/github.com/docker/docker/AUTHORS | 20 +- vendor/github.com/docker/docker/api/common.go | 2 +- .../github.com/docker/docker/api/swagger.yaml | 714 ++++++++++++++---- .../docker/docker/api/types/client.go | 59 +- .../docker/docker/api/types/configs.go | 18 - .../docker/api/types/container/config.go | 9 - .../docker/api/types/container/container.go | 44 ++ .../api/types/container/create_request.go | 13 + .../docker/docker/api/types/container/exec.go | 43 ++ .../docker/api/types/container/hostconfig.go | 22 +- .../api/types/container/hostconfig_unix.go | 38 +- .../api/types/container/hostconfig_windows.go | 29 +- .../docker/api/types/{ => container}/stats.go | 10 +- .../docker/docker/api/types/events/events.go | 8 + .../docker/docker/api/types/image/image.go | 40 +- .../docker/docker/api/types/image/opts.go | 34 +- .../docker/docker/api/types/mount/mount.go | 6 +- .../api/types/network/create_response.go | 19 + .../docker/api/types/network/endpoint.go | 2 +- .../docker/api/types/network/network.go | 94 +++ .../docker/api/types/registry/registry.go | 26 - .../docker/api/types/registry/search.go | 47 ++ .../docker/api/types/swarm/container.go | 4 +- .../docker/docker/api/types/system/info.go | 40 +- .../docker/docker/api/types/types.go | 159 +--- .../docker/api/types/types_deprecated.go | 211 +++++- .../docker/docker/api/types/volume/options.go | 7 + .../github.com/docker/docker/client/client.go | 28 +- .../docker/docker/client/container_copy.go | 16 +- .../docker/docker/client/container_exec.go | 17 +- .../docker/docker/client/container_prune.go | 6 +- .../docker/docker/client/container_stats.go | 14 +- .../github.com/docker/docker/client/events.go | 5 +- .../docker/docker/client/image_import.go | 3 +- .../docker/docker/client/image_load.go | 8 +- .../docker/docker/client/image_prune.go | 6 +- .../docker/docker/client/image_pull.go | 2 +- .../docker/docker/client/image_push.go | 18 +- .../docker/docker/client/image_search.go | 5 +- .../docker/docker/client/interface.go | 42 +- .../docker/docker/client/network_connect.go | 3 +- .../docker/docker/client/network_create.go | 13 +- .../docker/client/network_disconnect.go | 7 +- .../docker/docker/client/network_inspect.go | 29 +- .../docker/docker/client/network_list.go | 6 +- .../docker/docker/client/network_prune.go | 6 +- .../docker/docker/client/plugin_install.go | 4 +- .../docker/docker/client/request.go | 8 +- .../docker/docker/client/volume_prune.go | 6 +- vendor/modules.txt | 2 +- 54 files changed, 1402 insertions(+), 585 deletions(-) delete mode 100644 vendor/github.com/docker/docker/api/types/configs.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container.go create mode 100644 vendor/github.com/docker/docker/api/types/container/create_request.go create mode 100644 vendor/github.com/docker/docker/api/types/container/exec.go rename vendor/github.com/docker/docker/api/types/{ => container}/stats.go (96%) create mode 100644 vendor/github.com/docker/docker/api/types/network/create_response.go create mode 100644 vendor/github.com/docker/docker/api/types/registry/search.go diff --git a/e2e-tests/pkg/pbm/docker.go b/e2e-tests/pkg/pbm/docker.go index 9b1fc9942..8556c1cb7 100644 --- a/e2e-tests/pkg/pbm/docker.go +++ b/e2e-tests/pkg/pbm/docker.go @@ -208,7 +208,7 @@ func (d *Docker) RunOnReplSet(rsName string, wait time.Duration, cmd ...string) } func (d *Docker) RunCmd(containerID string, wait time.Duration, cmd ...string) (string, error) { - execConf := types.ExecConfig{ + execConf := container.ExecOptions{ User: "root", Cmd: cmd, Privileged: true, @@ -220,7 +220,7 @@ func (d *Docker) RunCmd(containerID string, wait time.Duration, cmd ...string) ( return "", errors.Wrap(err, "ContainerExecCreate") } - container, err := d.cn.ContainerExecAttach(d.ctx, id.ID, types.ExecStartCheck{}) + container, err := d.cn.ContainerExecAttach(d.ctx, id.ID, container.ExecStartOptions{}) if err != nil { return "", errors.Wrap(err, "attach to failed container") } diff --git a/e2e-tests/pkg/pbm/pbm_ctl.go b/e2e-tests/pkg/pbm/pbm_ctl.go index 94e3e0407..e0acfbe12 100644 --- a/e2e-tests/pkg/pbm/pbm_ctl.go +++ b/e2e-tests/pkg/pbm/pbm_ctl.go @@ -10,7 +10,6 @@ import ( "strings" "time" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" @@ -287,7 +286,7 @@ func (c *Ctl) PITRestoreClusterTime(t, i uint32) error { } func (c *Ctl) RunCmd(cmds ...string) (string, error) { - execConf := types.ExecConfig{ + execConf := container.ExecOptions{ Env: c.env, Cmd: cmds, AttachStderr: true, @@ -298,7 +297,7 @@ func (c *Ctl) RunCmd(cmds ...string) (string, error) { return "", errors.Wrap(err, "ContainerExecCreate") } - container, err := c.cn.ContainerExecAttach(c.ctx, id.ID, types.ExecStartCheck{}) + container, err := c.cn.ContainerExecAttach(c.ctx, id.ID, container.ExecAttachOptions{}) if err != nil { return "", errors.Wrap(err, "attach to failed container") } diff --git a/go.mod b/go.mod index 6fbdd764f..ec1a07560 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 github.com/alecthomas/kingpin v2.2.6+incompatible github.com/aws/aws-sdk-go v1.55.1 - github.com/docker/docker v26.1.2+incompatible + github.com/docker/docker v27.1.1+incompatible github.com/golang/snappy v0.0.4 github.com/google/uuid v1.6.0 github.com/klauspost/compress v1.17.8 diff --git a/go.sum b/go.sum index 617625bee..467a850d6 100644 --- a/go.sum +++ b/go.sum @@ -34,8 +34,8 @@ github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v26.1.2+incompatible h1:UVX5ZOrrfTGZZYEP+ZDq3Xn9PdHNXaSYMFPDumMqG2k= -github.com/docker/docker v26.1.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= +github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index 36315d429..5f93eeb4e 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -10,6 +10,7 @@ Aaron Huslage Aaron L. Xu Aaron Lehmann Aaron Welch +Aaron Yoshitake Abel Muiño Abhijeet Kasurde Abhinandan Prativadi @@ -62,6 +63,7 @@ alambike Alan Hoyle Alan Scherger Alan Thompson +Alano Terblanche Albert Callarisa Albert Zhang Albin Kerouanton @@ -141,6 +143,7 @@ Andreas Tiefenthaler Andrei Gherzan Andrei Ushakov Andrei Vagin +Andrew Baxter <423qpsxzhh8k3h@s.rendaw.me> Andrew C. Bodine Andrew Clay Shafer Andrew Duckworth @@ -193,6 +196,7 @@ Anton Löfgren Anton Nikitin Anton Polonskiy Anton Tiurin +Antonio Aguilar Antonio Murdaca Antonis Kalipetis Antony Messerli @@ -221,7 +225,6 @@ Avi Das Avi Kivity Avi Miller Avi Vaid -ayoshitake Azat Khuyiyakhmetov Bao Yonglei Bardia Keyoumarsi @@ -316,6 +319,7 @@ Burke Libbey Byung Kang Caleb Spare Calen Pennington +Calvin Liu Cameron Boehmer Cameron Sparr Cameron Spear @@ -362,6 +366,7 @@ Chen Qiu Cheng-mean Liu Chengfei Shang Chengguang Xu +Chentianze Chenyang Yan chenyuzhu Chetan Birajdar @@ -409,6 +414,7 @@ Christopher Crone Christopher Currie Christopher Jones Christopher Latham +Christopher Petito Christopher Rigor Christy Norman Chun Chen @@ -777,6 +783,7 @@ Gabriel L. Somlo Gabriel Linder Gabriel Monroy Gabriel Nicolas Avellaneda +Gabriel Tomitsuka Gaetan de Villele Galen Sampson Gang Qiao @@ -792,6 +799,7 @@ Geoff Levand Geoffrey Bachelet Geon Kim George Kontridze +George Ma George MacRorie George Xie Georgi Hristozov @@ -913,6 +921,7 @@ Illo Abdulrahim Ilya Dmitrichenko Ilya Gusev Ilya Khlopotov +imalasong <2879499479@qq.com> imre Fitos inglesp Ingo Gottwald @@ -930,6 +939,7 @@ J Bruni J. Nunn Jack Danger Canty Jack Laxson +Jack Walker <90711509+j2walker@users.noreply.github.com> Jacob Atzen Jacob Edelman Jacob Tomlinson @@ -989,6 +999,7 @@ Jason Shepherd Jason Smith Jason Sommer Jason Stangroome +Jasper Siepkes Javier Bassi jaxgeller Jay @@ -1100,6 +1111,7 @@ Jon Johnson Jon Surrell Jon Wedaman Jonas Dohse +Jonas Geiler Jonas Heinrich Jonas Pfenniger Jonathan A. Schweder @@ -1267,6 +1279,7 @@ Lakshan Perera Lalatendu Mohanty Lance Chen Lance Kinley +Lars Andringa Lars Butler Lars Kellogg-Stedman Lars R. Damerow @@ -1673,6 +1686,7 @@ Patrick Böänziger Patrick Devine Patrick Haas Patrick Hemmer +Patrick St. laurent Patrick Stapleton Patrik Cyvoct pattichen @@ -1878,6 +1892,7 @@ Royce Remer Rozhnov Alexandr Rudolph Gottesheim Rui Cao +Rui JingAn Rui Lopes Ruilin Li Runshen Zhu @@ -2184,6 +2199,7 @@ Tomek Mańko Tommaso Visconti Tomoya Tabuchi Tomáš Hrčka +Tomáš Virtus tonic Tonny Xu Tony Abboud @@ -2228,6 +2244,7 @@ Victor I. Wood Victor Lyuboslavsky Victor Marmol Victor Palma +Victor Toni Victor Vieux Victoria Bialas Vijaya Kumar K @@ -2279,6 +2296,7 @@ Wassim Dhif Wataru Ishida Wayne Chang Wayne Song +weebney Weerasak Chongnguluam Wei Fu Wei Wu diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index b11c2fe02..f831735f8 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of the current REST API. - DefaultVersion = "1.45" + DefaultVersion = "1.46" // MinSupportedAPIVersion is the minimum API version that can be supported // by the API server, specified as "major.minor". Note that the daemon diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index 5677340db..78f0ce1f2 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.45" +basePath: "/v1.46" info: title: "Docker Engine API" - version: "1.45" + version: "1.46" x-logo: url: "https://docs.docker.com/assets/images/logo-docker-main.png" description: | @@ -55,8 +55,8 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.45) is used. - For example, calling `/info` is the same as calling `/v1.45/info`. Using the + If you omit the version-prefix, the current version of the API (v1.46) is used. + For example, calling `/info` is the same as calling `/v1.46/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, @@ -442,6 +442,21 @@ definitions: Mode: description: "The permission mode for the tmpfs mount in an integer." type: "integer" + Options: + description: | + The options to be passed to the tmpfs mount. An array of arrays. + Flag options should be provided as 1-length arrays. Other types + should be provided as as 2-length arrays, where the first item is + the key and the second the value. + type: "array" + items: + type: "array" + minItems: 1 + maxItems: 2 + items: + type: "string" + example: + [["noexec"]] RestartPolicy: description: | @@ -1198,13 +1213,6 @@ definitions: ContainerConfig: description: | Configuration for a container that is portable between hosts. - - When used as `ContainerConfig` field in an image, `ContainerConfig` is an - optional field containing the configuration of the container that was last - committed when creating the image. - - Previous versions of Docker builder used this field to store build cache, - and it is not in active use anymore. type: "object" properties: Hostname: @@ -1363,6 +1371,289 @@ definitions: type: "string" example: ["/bin/sh", "-c"] + ImageConfig: + description: | + Configuration of the image. These fields are used as defaults + when starting a container from the image. + type: "object" + properties: + Hostname: + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always empty. It must not be used, and will be removed in API v1.47. + type: "string" + example: "" + Domainname: + description: | + The domain name to use for the container. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always empty. It must not be used, and will be removed in API v1.47. + type: "string" + example: "" + User: + description: "The user that commands are run as inside the container." + type: "string" + example: "web:web" + AttachStdin: + description: | + Whether to attach to `stdin`. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.47. + type: "boolean" + default: false + example: false + AttachStdout: + description: | + Whether to attach to `stdout`. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.47. + type: "boolean" + default: false + example: false + AttachStderr: + description: | + Whether to attach to `stderr`. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.47. + type: "boolean" + default: false + example: false + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + x-nullable: true + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.47. + type: "boolean" + default: false + example: false + OpenStdin: + description: | + Open `stdin` + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.47. + type: "boolean" + default: false + example: false + StdinOnce: + description: | + Close `stdin` after one attached client disconnects. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always false. It must not be used, and will be removed in API v1.47. + type: "boolean" + default: false + example: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + example: ["/bin/sh"] + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + default: false + example: false + x-nullable: true + Image: + description: | + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always empty. It must not be used, and will be removed in API v1.47. + type: "string" + default: "" + example: "" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + example: + "/app/data": {} + "/app/config": {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + example: "/public/" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + example: [] + NetworkDisabled: + description: | + Disable networking for the container. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always omitted. It must not be used, and will be removed in API v1.47. + type: "boolean" + default: false + example: false + x-nullable: true + MacAddress: + description: | + MAC address of the container. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always omitted. It must not be used, and will be removed in API v1.47. + type: "string" + default: "" + example: "" + x-nullable: true + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + x-nullable: true + items: + type: "string" + example: [] + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + example: "SIGTERM" + x-nullable: true + StopTimeout: + description: | + Timeout to stop a container in seconds. + +


+ + > **Deprecated**: this field is not part of the image specification and is + > always omitted. It must not be used, and will be removed in API v1.47. + type: "integer" + default: 10 + x-nullable: true + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + x-nullable: true + items: + type: "string" + example: ["/bin/sh", "-c"] + # FIXME(thaJeztah): temporarily using a full example to remove some "omitempty" fields. Remove once the fields are removed. + example: + "Hostname": "" + "Domainname": "" + "User": "web:web" + "AttachStdin": false + "AttachStdout": false + "AttachStderr": false + "ExposedPorts": { + "80/tcp": {}, + "443/tcp": {} + } + "Tty": false + "OpenStdin": false + "StdinOnce": false + "Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"] + "Cmd": ["/bin/sh"] + "Healthcheck": { + "Test": ["string"], + "Interval": 0, + "Timeout": 0, + "Retries": 0, + "StartPeriod": 0, + "StartInterval": 0 + } + "ArgsEscaped": true + "Image": "" + "Volumes": { + "/app/data": {}, + "/app/config": {} + } + "WorkingDir": "/public/" + "Entrypoint": [] + "OnBuild": [] + "Labels": { + "com.example.some-label": "some-value", + "com.example.some-other-label": "some-other-value" + } + "StopSignal": "SIGTERM" + "Shell": ["/bin/sh", "-c"] + NetworkingConfig: description: | NetworkingConfig represents the container's networking configuration for @@ -1758,21 +2049,6 @@ definitions: format: "dateTime" x-nullable: true example: "2022-02-04T21:20:12.497794809Z" - Container: - description: | - The ID of the container that was used to create the image. - - Depending on how the image was created, this field may be empty. - - **Deprecated**: this field is kept for backward compatibility, but - will be removed in API v1.45. - type: "string" - example: "65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735" - ContainerConfig: - description: | - **Deprecated**: this field is kept for backward compatibility, but - will be removed in API v1.45. - $ref: "#/definitions/ContainerConfig" DockerVersion: description: | The version of Docker that was used to build the image. @@ -1780,7 +2056,7 @@ definitions: Depending on how the image was created, this field may be empty. type: "string" x-nullable: false - example: "20.10.7" + example: "27.0.1" Author: description: | Name of the author that was specified when committing the image, or as @@ -1789,7 +2065,7 @@ definitions: x-nullable: false example: "" Config: - $ref: "#/definitions/ContainerConfig" + $ref: "#/definitions/ImageConfig" Architecture: description: | Hardware CPU architecture that the image runs on. @@ -1866,6 +2142,7 @@ definitions: format: "dateTime" example: "2022-02-28T14:40:02.623929178Z" x-nullable: true + ImageSummary: type: "object" x-go-name: "Summary" @@ -2179,72 +2456,129 @@ definitions: type: "object" properties: Name: + description: | + Name of the network. type: "string" + example: "my_network" Id: + description: | + ID that uniquely identifies a network on a single machine. type: "string" + example: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" Created: + description: | + Date and time at which the network was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" + example: "2016-10-19T04:33:30.360899459Z" Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level) type: "string" + example: "local" Driver: + description: | + The name of the driver used to create the network (e.g. `bridge`, + `overlay`). type: "string" + example: "overlay" EnableIPv6: + description: | + Whether the network was created with IPv6 enabled. type: "boolean" + example: false IPAM: $ref: "#/definitions/IPAM" Internal: + description: | + Whether the network is created to only allow internal networking + connectivity. type: "boolean" + default: false + example: false Attachable: + description: | + Wheter a global / swarm scope network is manually attachable by regular + containers from workers in swarm mode. type: "boolean" + default: false + example: false Ingress: + description: | + Whether the network is providing the routing-mesh for the swarm cluster. + type: "boolean" + default: false + example: false + ConfigFrom: + $ref: "#/definitions/ConfigReference" + ConfigOnly: + description: | + Whether the network is a config-only network. Config-only networks are + placeholder networks for network configurations to be used by other + networks. Config-only networks cannot be used directly to run containers + or services. type: "boolean" + default: false Containers: + description: | + Contains endpoints attached to the network. type: "object" additionalProperties: $ref: "#/definitions/NetworkContainer" + example: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" Options: + description: | + Network-specific options uses when creating the network. type: "object" additionalProperties: type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" Labels: + description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" - example: - Name: "net01" - Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" - Created: "2016-10-19T04:33:30.360899459Z" - Scope: "local" - Driver: "bridge" - EnableIPv6: false - IPAM: - Driver: "default" - Config: - - Subnet: "172.19.0.0/16" - Gateway: "172.19.0.1" - Options: - foo: "bar" - Internal: false - Attachable: false - Ingress: false - Containers: - 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: - Name: "test" - EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" - MacAddress: "02:42:ac:13:00:02" - IPv4Address: "172.19.0.2/16" - IPv6Address: "" - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Peers: + description: | + List of peer nodes for an overlay network. This field is only present + for overlay networks, and omitted for other network types. + type: "array" + items: + $ref: "#/definitions/PeerInfo" + x-nullable: true + # TODO: Add Services (only present when "verbose" is set). + + ConfigReference: + description: | + The config-only network source to provide the configuration for + this network. + type: "object" + properties: + Network: + description: | + The name of the config-only network that provides the network's + configuration. The specified network must be an existing config-only + network. Only network names are allowed, not network IDs. + type: "string" + example: "config_only_network_01" + IPAM: type: "object" properties: @@ -2252,6 +2586,7 @@ definitions: description: "Name of the IPAM driver to use." type: "string" default: "default" + example: "default" Config: description: | List of IPAM configuration options, specified as a map: @@ -2267,16 +2602,21 @@ definitions: type: "object" additionalProperties: type: "string" + example: + foo: "bar" IPAMConfig: type: "object" properties: Subnet: type: "string" + example: "172.20.0.0/16" IPRange: type: "string" + example: "172.20.10.0/24" Gateway: type: "string" + example: "172.20.10.11" AuxiliaryAddresses: type: "object" additionalProperties: @@ -2287,14 +2627,53 @@ definitions: properties: Name: type: "string" + example: "container_1" EndpointID: type: "string" + example: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" MacAddress: type: "string" + example: "02:42:ac:13:00:02" IPv4Address: type: "string" + example: "172.19.0.2/16" IPv6Address: type: "string" + example: "" + + PeerInfo: + description: | + PeerInfo represents one peer of an overlay network. + type: "object" + properties: + Name: + description: + ID of the peer-node in the Swarm cluster. + type: "string" + example: "6869d7c1732b" + IP: + description: + IP-address of the peer-node in the Swarm cluster. + type: "string" + example: "10.133.77.91" + + NetworkCreateResponse: + description: "OK response to NetworkCreate operation" + type: "object" + title: "NetworkCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warning] + properties: + Id: + description: "The ID of the created network." + type: "string" + x-nullable: false + example: "b5c4fc71e8022147cd25de22b22173de4e3b170134117172eb595cb91b4e7e5d" + Warning: + description: "Warnings encountered when creating the container" + type: "string" + x-nullable: false + example: "" BuildInfo: type: "object" @@ -2495,6 +2874,17 @@ definitions: example: - "server_x" - "server_y" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" # Operational data NetworkID: @@ -2538,17 +2928,6 @@ definitions: type: "integer" format: "int64" example: 64 - DriverOpts: - description: | - DriverOpts is a mapping of driver options and values. These options - are passed directly to the driver and are driver specific. - type: "object" - x-nullable: true - additionalProperties: - type: "string" - example: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" DNSNames: description: | List of all DNS names an endpoint has on a specific network. This @@ -3720,6 +4099,13 @@ definitions: but this is just provided for lookup/display purposes. The secret in the reference will be identified by its ID. type: "string" + OomScoreAdj: + type: "integer" + format: "int64" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 0 Configs: description: | Configs contains references to zero or more configs that will be @@ -3916,7 +4302,7 @@ definitions: `node.platform.os` | Node operating system | `node.platform.os==windows` `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` `node.labels` | User-defined node labels | `node.labels.security==high` - `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04` + `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-24.04` `engine.labels` apply to Docker Engine labels like operating system, drivers, etc. Swarm administrators add `node.labels` for operational @@ -4639,6 +5025,12 @@ definitions: properties: NetworkMode: type: "string" + Annotations: + description: "Arbitrary key-value metadata attached to container" + type: "object" + x-nullable: true + additionalProperties: + type: "string" NetworkSettings: description: "A summary of the container's network settings" type: "object" @@ -4907,7 +5299,7 @@ definitions: Version of the component type: "string" x-nullable: false - example: "19.03.12" + example: "27.0.1" Details: description: | Key/value pairs of strings with additional information about the @@ -4921,17 +5313,17 @@ definitions: Version: description: "The version of the daemon" type: "string" - example: "19.03.12" + example: "27.0.1" ApiVersion: description: | The default (and highest) API version that is supported by the daemon type: "string" - example: "1.40" + example: "1.46" MinAPIVersion: description: | The minimum API version that is supported by the daemon type: "string" - example: "1.12" + example: "1.24" GitCommit: description: | The Git commit of the source code that was used to build the daemon @@ -4942,7 +5334,7 @@ definitions: The version Go used to compile the daemon, and the version of the Go runtime in use. type: "string" - example: "go1.13.14" + example: "go1.21.12" Os: description: | The operating system that the daemon is running on ("linux" or "windows") @@ -4959,7 +5351,7 @@ definitions: This field is omitted when empty. type: "string" - example: "4.19.76-linuxkit" + example: "6.8.0-31-generic" Experimental: description: | Indicates if the daemon is started with experimental features enabled. @@ -5165,13 +5557,13 @@ definitions: information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. type: "string" - example: "4.9.38-moby" + example: "6.8.0-31-generic" OperatingSystem: description: | - Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" + Name of the host's operating system, for example: "Ubuntu 24.04 LTS" or "Windows Server 2016 Datacenter" type: "string" - example: "Alpine Linux v3.5" + example: "Ubuntu 24.04 LTS" OSVersion: description: | Version of the host's operating system @@ -5182,7 +5574,7 @@ definitions: > very existence, and the formatting of values, should not be considered > stable, and may change without notice. type: "string" - example: "16.04" + example: "24.04" OSType: description: | Generic type of the operating system of the host, as returned by the @@ -5284,7 +5676,7 @@ definitions: description: | Version string of the daemon. type: "string" - example: "24.0.2" + example: "27.0.1" Runtimes: description: | List of [OCI compliant](https://github.com/opencontainers/runtime-spec) @@ -5436,6 +5828,58 @@ definitions: example: - "/etc/cdi" - "/var/run/cdi" + Containerd: + $ref: "#/definitions/ContainerdInfo" + x-nullable: true + + ContainerdInfo: + description: | + Information for connecting to the containerd instance that is used by the daemon. + This is included for debugging purposes only. + type: "object" + properties: + Address: + description: "The address of the containerd socket." + type: "string" + example: "/run/containerd/containerd.sock" + Namespaces: + description: | + The namespaces that the daemon uses for running containers and + plugins in containerd. These namespaces can be configured in the + daemon configuration, and are considered to be used exclusively + by the daemon, Tampering with the containerd instance may cause + unexpected behavior. + + As these namespaces are considered to be exclusively accessed + by the daemon, it is not recommended to change these values, + or to change them to a value that is used by other systems, + such as cri-containerd. + type: "object" + properties: + Containers: + description: | + The default containerd namespace used for containers managed + by the daemon. + + The default namespace for containers is "moby", but will be + suffixed with the `.` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "moby" + example: "moby" + Plugins: + description: | + The default containerd namespace used for plugins managed by + the daemon. + + The default namespace for plugins is "plugins.moby", but will be + suffixed with the `.` of the remapped `root` if + user-namespaces are enabled and the containerd image-store + is used. + type: "string" + default: "plugins.moby" + example: "plugins.moby" # PluginsInfo is a temp struct holding Plugins name # registered with docker daemon. It is used by Info struct @@ -6288,6 +6732,8 @@ paths: SizeRootFs: 0 HostConfig: NetworkMode: "default" + Annotations: + io.kubernetes.docker.type: "container" NetworkSettings: Networks: bridge: @@ -6323,6 +6769,9 @@ paths: SizeRootFs: 0 HostConfig: NetworkMode: "default" + Annotations: + io.kubernetes.docker.type: "container" + io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" NetworkSettings: Networks: bridge: @@ -6351,6 +6800,9 @@ paths: SizeRootFs: 0 HostConfig: NetworkMode: "default" + Annotations: + io.kubernetes.image.id: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + io.kubernetes.image.name: "ubuntu:latest" NetworkSettings: Networks: bridge: @@ -6379,6 +6831,8 @@ paths: SizeRootFs: 0 HostConfig: NetworkMode: "default" + Annotations: + io.kubernetes.config.source: "api" NetworkSettings: Networks: bridge: @@ -8656,6 +9110,11 @@ paths: details. type: "string" required: true + - name: "platform" + in: "query" + description: "Select a platform-specific manifest to be pushed. OCI platform (JSON encoded)" + type: "string" + x-nullable: true tags: ["Image"] /images/{name}/tag: post: @@ -9104,7 +9563,7 @@ paths: Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` - Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + Images report these events: `create`, `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` @@ -10060,19 +10519,9 @@ paths: - "application/json" responses: 201: - description: "No error" + description: "Network created successfully" schema: - type: "object" - title: "NetworkCreateResponse" - properties: - Id: - description: "The ID of the created network." - type: "string" - Warning: - type: "string" - example: - Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" - Warning: "" + $ref: "#/definitions/NetworkCreateResponse" 400: description: "bad parameter" schema: @@ -10104,14 +10553,17 @@ paths: Name: description: "The network's name." type: "string" - CheckDuplicate: - description: | - Deprecated: CheckDuplicate is now always enabled. - type: "boolean" + example: "my_network" Driver: description: "Name of the network driver plugin to use." type: "string" default: "bridge" + example: "bridge" + Scope: + description: | + The level at which the network exists (e.g. `swarm` for cluster-wide + or `local` for machine level). + type: "string" Internal: description: "Restrict external access to the network." type: "boolean" @@ -10120,55 +10572,55 @@ paths: Globally scoped network is manually attachable by regular containers from workers in swarm mode. type: "boolean" + example: true Ingress: description: | Ingress network is the network which provides the routing-mesh in swarm mode. type: "boolean" + example: false + ConfigOnly: + description: | + Creates a config-only network. Config-only networks are placeholder + networks for network configurations to be used by other networks. + Config-only networks cannot be used directly to run containers + or services. + type: "boolean" + default: false + example: false + ConfigFrom: + description: | + Specifies the source which will provide the configuration for + this network. The specified network must be an existing + config-only network; see ConfigOnly. + $ref: "#/definitions/ConfigReference" IPAM: description: "Optional custom IP scheme for the network." $ref: "#/definitions/IPAM" EnableIPv6: description: "Enable IPv6 on the network." type: "boolean" + example: true Options: description: "Network specific options to be used by the drivers." type: "object" additionalProperties: type: "string" + example: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" - example: - Name: "isolated_nw" - CheckDuplicate: false - Driver: "bridge" - EnableIPv6: true - IPAM: - Driver: "default" - Config: - - Subnet: "172.20.0.0/16" - IPRange: "172.20.10.0/24" - Gateway: "172.20.10.11" - - Subnet: "2001:db8:abcd::/64" - Gateway: "2001:db8:abcd::1011" - Options: - foo: "bar" - Internal: true - Attachable: false - Ingress: false - Options: - com.docker.network.bridge.default_bridge: "true" - com.docker.network.bridge.enable_icc: "true" - com.docker.network.bridge.enable_ip_masquerade: "true" - com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" - com.docker.network.bridge.name: "docker0" - com.docker.network.driver.mtu: "1500" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" tags: ["Network"] /networks/{id}/connect: @@ -11274,6 +11726,7 @@ paths: Mode: 384 SecretID: "fpjqlhnwb19zds35k8wn80lq9" SecretName: "example_org_domain_key" + OomScoreAdj: 0 LogDriver: Name: "json-file" Options: @@ -11426,6 +11879,7 @@ paths: Image: "busybox" Args: - "top" + OomScoreAdj: 0 Resources: Limits: {} Reservations: {} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index 882201f0e..df791f02a 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -2,43 +2,15 @@ package types // import "github.com/docker/docker/api/types" import ( "bufio" + "context" "io" "net" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" - units "github.com/docker/go-units" ) -// ContainerExecInspect holds information returned by exec inspect. -type ContainerExecInspect struct { - ExecID string `json:"ID"` - ContainerID string - Running bool - ExitCode int - Pid int -} - -// CopyToContainerOptions holds information -// about files to copy into a container -type CopyToContainerOptions struct { - AllowOverwriteDirWithFile bool - CopyUIDGID bool -} - -// EventsOptions holds parameters to filter events with. -type EventsOptions struct { - Since string - Until string - Filters filters.Args -} - -// NetworkListOptions holds parameters to filter the list of networks with. -type NetworkListOptions struct { - Filters filters.Args -} - // NewHijackedResponse intializes a HijackedResponse type func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse { return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType} @@ -101,7 +73,7 @@ type ImageBuildOptions struct { NetworkMode string ShmSize int64 Dockerfile string - Ulimits []*units.Ulimit + Ulimits []*container.Ulimit // BuildArgs needs to be a *string instead of just a string so that // we can tell the difference between "" (empty string) and no value // at all (nil). See the parsing of buildArgs in @@ -122,7 +94,7 @@ type ImageBuildOptions struct { Target string SessionID string Platform string - // Version specifies the version of the unerlying builder to use + // Version specifies the version of the underlying builder to use Version BuilderVersion // BuildID is an optional identifier that can be passed together with the // build request. The same identifier can be used to gracefully cancel the @@ -157,34 +129,13 @@ type ImageBuildResponse struct { OSType string } -// ImageImportSource holds source information for ImageImport -type ImageImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. - SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. -} - -// ImageLoadResponse returns information to the client about a load process. -type ImageLoadResponse struct { - // Body must be closed to avoid a resource leak - Body io.ReadCloser - JSON bool -} - // RequestPrivilegeFunc is a function interface that // clients can supply to retry operations after // getting an authorization error. // This function returns the registry authentication // header value in base 64 format, or an error // if the privilege request fails. -type RequestPrivilegeFunc func() (string, error) - -// ImageSearchOptions holds parameters to search images with. -type ImageSearchOptions struct { - RegistryAuth string - PrivilegeFunc RequestPrivilegeFunc - Filters filters.Args - Limit int -} +type RequestPrivilegeFunc func(context.Context) (string, error) // NodeListOptions holds parameters to list nodes with. type NodeListOptions struct { @@ -289,7 +240,7 @@ type PluginInstallOptions struct { RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry RemoteRef string // RemoteRef is the plugin name on the registry PrivilegeFunc RequestPrivilegeFunc - AcceptPermissionsFunc func(PluginPrivileges) (bool, error) + AcceptPermissionsFunc func(context.Context, PluginPrivileges) (bool, error) Args []string } diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go deleted file mode 100644 index 945b6efad..000000000 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ /dev/null @@ -1,18 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -// ExecConfig is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. -type ExecConfig struct { - User string // User that will run the command - Privileged bool // Is the container in privileged mode - Tty bool // Attach standard streams to a tty. - ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width] - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStderr bool // Attach the standard error - AttachStdout bool // Attach the standard output - Detach bool // Execute in detach mode - DetachKeys string // Escape keys for detach - Env []string // Environment variables - WorkingDir string // Working directory - Cmd []string // Execution commands and args -} diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go index 86f46b74a..d6b03e8b2 100644 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -1,7 +1,6 @@ package container // import "github.com/docker/docker/api/types/container" import ( - "io" "time" "github.com/docker/docker/api/types/strslice" @@ -36,14 +35,6 @@ type StopOptions struct { // HealthConfig holds configuration settings for the HEALTHCHECK feature. type HealthConfig = dockerspec.HealthcheckConfig -// ExecStartOptions holds the options to start container's exec. -type ExecStartOptions struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer - ConsoleSize *[2]uint `json:",omitempty"` -} - // Config contains the configuration data about a container. // It should hold only portable information about the container. // Here, "portable" means "independent from the host we are running on". diff --git a/vendor/github.com/docker/docker/api/types/container/container.go b/vendor/github.com/docker/docker/api/types/container/container.go new file mode 100644 index 000000000..711af12c9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container.go @@ -0,0 +1,44 @@ +package container + +import ( + "io" + "os" + "time" +) + +// PruneReport contains the response for Engine API: +// POST "/containers/prune" +type PruneReport struct { + ContainersDeleted []string + SpaceReclaimed uint64 +} + +// PathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type PathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + AllowOverwriteDirWithFile bool + CopyUIDGID bool +} + +// StatsResponseReader wraps an io.ReadCloser to read (a stream of) stats +// for a container, as produced by the GET "/stats" endpoint. +// +// The OSType field is set to the server's platform to allow +// platform-specific handling of the response. +// +// TODO(thaJeztah): remove this wrapper, and make OSType part of [StatsResponse]. +type StatsResponseReader struct { + Body io.ReadCloser `json:"body"` + OSType string `json:"ostype"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/create_request.go b/vendor/github.com/docker/docker/api/types/container/create_request.go new file mode 100644 index 000000000..e98dd6ad4 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/create_request.go @@ -0,0 +1,13 @@ +package container + +import "github.com/docker/docker/api/types/network" + +// CreateRequest is the request message sent to the server for container +// create calls. It is a config wrapper that holds the container [Config] +// (portable) and the corresponding [HostConfig] (non-portable) and +// [network.NetworkingConfig]. +type CreateRequest struct { + *Config + HostConfig *HostConfig `json:"HostConfig,omitempty"` + NetworkingConfig *network.NetworkingConfig `json:"NetworkingConfig,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/exec.go b/vendor/github.com/docker/docker/api/types/container/exec.go new file mode 100644 index 000000000..96093eb5c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/exec.go @@ -0,0 +1,43 @@ +package container + +// ExecOptions is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecOptions struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width] + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Env []string // Environment variables + WorkingDir string // Working directory + Cmd []string // Execution commands and args +} + +// ExecStartOptions is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartOptions struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool + // Terminal size [height, width], unused if Tty == false + ConsoleSize *[2]uint `json:",omitempty"` +} + +// ExecAttachOptions is a temp struct used by execAttach. +// +// TODO(thaJeztah): make this a separate type; ContainerExecAttach does not use the Detach option, and cannot run detached. +type ExecAttachOptions = ExecStartOptions + +// ExecInspect holds information returned by exec inspect. +type ExecInspect struct { + ExecID string `json:"ID"` + ContainerID string + Running bool + ExitCode int + Pid int +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/vendor/github.com/docker/docker/api/types/container/hostconfig.go index efb96266e..727da8839 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig.go @@ -360,6 +360,12 @@ type LogConfig struct { Config map[string]string } +// Ulimit is an alias for [units.Ulimit], which may be moving to a different +// location or become a local type. This alias is to help transitioning. +// +// Users are recommended to use this alias instead of using [units.Ulimit] directly. +type Ulimit = units.Ulimit + // Resources contains container's resources (cgroups config, ulimits...) type Resources struct { // Applicable to all platforms @@ -387,14 +393,14 @@ type Resources struct { // KernelMemory specifies the kernel memory limit (in bytes) for the container. // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes. - KernelMemory int64 `json:",omitempty"` - KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. - Ulimits []*units.Ulimit // List of ulimits to be set in the container + KernelMemory int64 `json:",omitempty"` + KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. + Ulimits []*Ulimit // List of ulimits to be set in the container // Applicable to Windows CPUCount int64 `json:"CpuCount"` // CPU count diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go index 421329237..cdee49ea3 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -9,24 +9,6 @@ func (i Isolation) IsValid() bool { return i.IsDefault() } -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsBridge() { - return network.NetworkBridge - } else if n.IsHost() { - return network.NetworkHost - } else if n.IsContainer() { - return "container" - } else if n.IsNone() { - return network.NetworkNone - } else if n.IsDefault() { - return network.NetworkDefault - } else if n.IsUserDefined() { - return n.UserDefined() - } - return "" -} - // IsBridge indicates whether container uses the bridge network stack func (n NetworkMode) IsBridge() bool { return n == network.NetworkBridge @@ -41,3 +23,23 @@ func (n NetworkMode) IsHost() bool { func (n NetworkMode) IsUserDefined() bool { return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() } + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + switch { + case n.IsDefault(): + return network.NetworkDefault + case n.IsBridge(): + return network.NetworkBridge + case n.IsHost(): + return network.NetworkHost + case n.IsNone(): + return network.NetworkNone + case n.IsContainer(): + return "container" + case n.IsUserDefined(): + return n.UserDefined() + default: + return "" + } +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go index 154667f4f..f08545542 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go @@ -2,6 +2,11 @@ package container // import "github.com/docker/docker/api/types/container" import "github.com/docker/docker/api/types/network" +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + // IsBridge indicates whether container uses the bridge network stack // in windows it is given the name NAT func (n NetworkMode) IsBridge() bool { @@ -19,24 +24,24 @@ func (n NetworkMode) IsUserDefined() bool { return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() } -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() || i.IsHyperV() || i.IsProcess() -} - // NetworkName returns the name of the network stack. func (n NetworkMode) NetworkName() string { - if n.IsDefault() { + switch { + case n.IsDefault(): return network.NetworkDefault - } else if n.IsBridge() { + case n.IsBridge(): return network.NetworkNat - } else if n.IsNone() { + case n.IsHost(): + // Windows currently doesn't support host network-mode, so + // this would currently never happen.. + return network.NetworkHost + case n.IsNone(): return network.NetworkNone - } else if n.IsContainer() { + case n.IsContainer(): return "container" - } else if n.IsUserDefined() { + case n.IsUserDefined(): return n.UserDefined() + default: + return "" } - - return "" } diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/container/stats.go similarity index 96% rename from vendor/github.com/docker/docker/api/types/stats.go rename to vendor/github.com/docker/docker/api/types/container/stats.go index 20daebed1..3b3fb131a 100644 --- a/vendor/github.com/docker/docker/api/types/stats.go +++ b/vendor/github.com/docker/docker/api/types/container/stats.go @@ -1,6 +1,4 @@ -// Package types is used for API stability in the types and response to the -// consumers of the API stats endpoint. -package types // import "github.com/docker/docker/api/types" +package container import "time" @@ -169,8 +167,10 @@ type Stats struct { MemoryStats MemoryStats `json:"memory_stats,omitempty"` } -// StatsJSON is newly used Networks -type StatsJSON struct { +// StatsResponse is newly used Networks. +// +// TODO(thaJeztah): unify with [Stats]. This wrapper was to account for pre-api v1.21 changes, see https://github.com/moby/moby/commit/d3379946ec96fb6163cb8c4517d7d5a067045801 +type StatsResponse struct { Stats Name string `json:"name,omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go index 6dbcd9223..e225df4ec 100644 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -1,4 +1,5 @@ package events // import "github.com/docker/docker/api/types/events" +import "github.com/docker/docker/api/types/filters" // Type is used for event-types. type Type string @@ -125,3 +126,10 @@ type Message struct { Time int64 `json:"time,omitempty"` TimeNano int64 `json:"timeNano,omitempty"` } + +// ListOptions holds parameters to filter events with. +type ListOptions struct { + Since string + Until string + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/image/image.go b/vendor/github.com/docker/docker/api/types/image/image.go index 167df28c7..abb7ffd80 100644 --- a/vendor/github.com/docker/docker/api/types/image/image.go +++ b/vendor/github.com/docker/docker/api/types/image/image.go @@ -1,9 +1,47 @@ package image -import "time" +import ( + "io" + "time" +) // Metadata contains engine-local data about the image. type Metadata struct { // LastTagTime is the date and time at which the image was last tagged. LastTagTime time.Time `json:",omitempty"` } + +// PruneReport contains the response for Engine API: +// POST "/images/prune" +type PruneReport struct { + ImagesDeleted []DeleteResponse + SpaceReclaimed uint64 +} + +// LoadResponse returns information to the client about a load process. +// +// TODO(thaJeztah): remove this type, and just use an io.ReadCloser +// +// This type was added in https://github.com/moby/moby/pull/18878, related +// to https://github.com/moby/moby/issues/19177; +// +// Make docker load to output json when the response content type is json +// Swarm hijacks the response from docker load and returns JSON rather +// than plain text like the Engine does. This makes the API library to return +// information to figure that out. +// +// However the "load" endpoint unconditionally returns JSON; +// https://github.com/moby/moby/blob/7b9d2ef6e5518a3d3f3cc418459f8df786cfbbd1/api/server/router/image/image_routes.go#L248-L255 +// +// PR https://github.com/moby/moby/pull/21959 made the response-type depend +// on whether "quiet" was set, but this logic got changed in a follow-up +// https://github.com/moby/moby/pull/25557, which made the JSON response-type +// unconditionally, but the output produced depend on whether"quiet" was set. +// +// We should deprecated the "quiet" option, as it's really a client +// responsibility. +type LoadResponse struct { + // Body must be closed to avoid a resource leak + Body io.ReadCloser + JSON bool +} diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go index c6b1f351b..8e32c9af8 100644 --- a/vendor/github.com/docker/docker/api/types/image/opts.go +++ b/vendor/github.com/docker/docker/api/types/image/opts.go @@ -1,6 +1,18 @@ package image -import "github.com/docker/docker/api/types/filters" +import ( + "context" + "io" + + "github.com/docker/docker/api/types/filters" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImportSource holds source information for ImageImport +type ImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. + SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. +} // ImportOptions holds information to import images from the client host. type ImportOptions struct { @@ -27,12 +39,28 @@ type PullOptions struct { // privilege request fails. // // Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc]. - PrivilegeFunc func() (string, error) + PrivilegeFunc func(context.Context) (string, error) Platform string } // PushOptions holds information to push images. -type PushOptions PullOptions +type PushOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + + // PrivilegeFunc is a function that clients can supply to retry operations + // after getting an authorization error. This function returns the registry + // authentication header value in base64 encoded format, or an error if the + // privilege request fails. + // + // Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc]. + PrivilegeFunc func(context.Context) (string, error) + + // Platform is an optional field that selects a specific platform to push + // when the image is a multi-platform image. + // Using this will only push a single platform-specific manifest. + Platform *ocispec.Platform `json:",omitempty"` +} // ListOptions holds parameters to list images with. type ListOptions struct { diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go index 6fe04da25..c68dcf65b 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -119,7 +119,11 @@ type TmpfsOptions struct { SizeBytes int64 `json:",omitempty"` // Mode of the tmpfs upon creation Mode os.FileMode `json:",omitempty"` - + // Options to be passed to the tmpfs mount. An array of arrays. Flag + // options should be provided as 1-length arrays. Other types should be + // provided as 2-length arrays, where the first item is the key and the + // second the value. + Options [][]string `json:",omitempty"` // TODO(stevvooe): There are several more tmpfs flags, specified in the // daemon, that are accepted. Only the most basic are added for now. // diff --git a/vendor/github.com/docker/docker/api/types/network/create_response.go b/vendor/github.com/docker/docker/api/types/network/create_response.go new file mode 100644 index 000000000..c32b35bff --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/network/create_response.go @@ -0,0 +1,19 @@ +package network + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateResponse NetworkCreateResponse +// +// OK response to NetworkCreate operation +// swagger:model CreateResponse +type CreateResponse struct { + + // The ID of the created network. + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warning string `json:"Warning"` +} diff --git a/vendor/github.com/docker/docker/api/types/network/endpoint.go b/vendor/github.com/docker/docker/api/types/network/endpoint.go index 9edd1c38d..0fbb40b35 100644 --- a/vendor/github.com/docker/docker/api/types/network/endpoint.go +++ b/vendor/github.com/docker/docker/api/types/network/endpoint.go @@ -18,6 +18,7 @@ type EndpointSettings struct { // Once the container is running, it becomes operational data (it may contain a // generated address). MacAddress string + DriverOpts map[string]string // Operational data NetworkID string EndpointID string @@ -27,7 +28,6 @@ type EndpointSettings struct { IPv6Gateway string GlobalIPv6Address string GlobalIPv6PrefixLen int - DriverOpts map[string]string // DNSNames holds all the (non fully qualified) DNS names associated to this endpoint. First entry is used to // generate PTR records. DNSNames []string diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go index f1f300f3d..c8db97a7e 100644 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -1,6 +1,8 @@ package network // import "github.com/docker/docker/api/types/network" import ( + "time" + "github.com/docker/docker/api/types/filters" ) @@ -17,6 +19,82 @@ const ( NetworkNat = "nat" ) +// CreateRequest is the request message sent to the server for network create call. +type CreateRequest struct { + CreateOptions + Name string // Name is the requested name of the network. + + // Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client + // package to older daemons. + CheckDuplicate *bool `json:",omitempty"` +} + +// CreateOptions holds options to create a network. +type CreateOptions struct { + Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`) + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level). + EnableIPv6 *bool `json:",omitempty"` // EnableIPv6 represents whether to enable IPv6. + IPAM *IPAM // IPAM is the network's IP Address Management. + Internal bool // Internal represents if the network is used internal only. + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigOnly bool // ConfigOnly creates a config-only network. Config-only networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + ConfigFrom *ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. The specified network must be a config-only network; see [CreateOptions.ConfigOnly]. + Options map[string]string // Options specifies the network-specific options to use for when creating the network. + Labels map[string]string // Labels holds metadata specific to the network being created. +} + +// ListOptions holds parameters to filter the list of networks with. +type ListOptions struct { + Filters filters.Args +} + +// InspectOptions holds parameters to inspect network. +type InspectOptions struct { + Scope string + Verbose bool +} + +// ConnectOptions represents the data to be used to connect a container to the +// network. +type ConnectOptions struct { + Container string + EndpointConfig *EndpointSettings `json:",omitempty"` +} + +// DisconnectOptions represents the data to be used to disconnect a container +// from the network. +type DisconnectOptions struct { + Container string + Force bool +} + +// Inspect is the body of the "get network" http response message. +type Inspect struct { + Name string // Name is the name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigFrom ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. + ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created + Peers []PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network + Services map[string]ServiceInfo `json:",omitempty"` +} + +// Summary is used as response when listing networks. It currently is an alias +// for [Inspect], but may diverge in the future, as not all information may +// be included when listing networks. +type Summary = Inspect + // Address represents an IP address type Address struct { Addr string @@ -45,6 +123,16 @@ type ServiceInfo struct { Tasks []Task } +// EndpointResource contains network resources allocated and used for a +// container in a network. +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + // NetworkingConfig represents the container's networking configuration for each of its interfaces // Carries the networking configs specified in the `docker run` and `docker network connect` commands type NetworkingConfig struct { @@ -70,3 +158,9 @@ var acceptedFilters = map[string]bool{ func ValidateFilters(filter filters.Args) error { return filter.Validate(acceptedFilters) } + +// PruneReport contains the response for Engine API: +// POST "/networks/prune" +type PruneReport struct { + NetworksDeleted []string +} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index 6bbae93ef..75ee07b15 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -84,32 +84,6 @@ type IndexInfo struct { Official bool } -// SearchResult describes a search result returned from a registry -type SearchResult struct { - // StarCount indicates the number of stars this repository has - StarCount int `json:"star_count"` - // IsOfficial is true if the result is from an official repository. - IsOfficial bool `json:"is_official"` - // Name is the name of the repository - Name string `json:"name"` - // IsAutomated indicates whether the result is automated. - // - // Deprecated: the "is_automated" field is deprecated and will always be "false". - IsAutomated bool `json:"is_automated"` - // Description is a textual description of the repository - Description string `json:"description"` -} - -// SearchResults lists a collection search results returned from a registry -type SearchResults struct { - // Query contains the query string that generated the search results - Query string `json:"query"` - // NumResults indicates the number of results the query returned - NumResults int `json:"num_results"` - // Results is a slice containing the actual results for the search - Results []SearchResult `json:"results"` -} - // DistributionInspect describes the result obtained from contacting the // registry to retrieve image metadata type DistributionInspect struct { diff --git a/vendor/github.com/docker/docker/api/types/registry/search.go b/vendor/github.com/docker/docker/api/types/registry/search.go new file mode 100644 index 000000000..a0a1eec54 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/search.go @@ -0,0 +1,47 @@ +package registry + +import ( + "context" + + "github.com/docker/docker/api/types/filters" +) + +// SearchOptions holds parameters to search images with. +type SearchOptions struct { + RegistryAuth string + + // PrivilegeFunc is a [types.RequestPrivilegeFunc] the client can + // supply to retry operations after getting an authorization error. + // + // It must return the registry authentication header value in base64 + // format, or an error if the privilege request fails. + PrivilegeFunc func(context.Context) (string, error) + Filters filters.Args + Limit int +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial is true if the result is from an official repository. + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsAutomated indicates whether the result is automated. + // + // Deprecated: the "is_automated" field is deprecated and will always be "false". + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go index 65f61d2d2..30e3de70c 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -5,7 +5,6 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/mount" - "github.com/docker/go-units" ) // DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) @@ -115,5 +114,6 @@ type ContainerSpec struct { Sysctls map[string]string `json:",omitempty"` CapabilityAdd []string `json:",omitempty"` CapabilityDrop []string `json:",omitempty"` - Ulimits []*units.Ulimit `json:",omitempty"` + Ulimits []*container.Ulimit `json:",omitempty"` + OomScoreAdj int64 `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/system/info.go b/vendor/github.com/docker/docker/api/types/system/info.go index 89d4a0098..c66a2afb8 100644 --- a/vendor/github.com/docker/docker/api/types/system/info.go +++ b/vendor/github.com/docker/docker/api/types/system/info.go @@ -75,8 +75,7 @@ type Info struct { DefaultAddressPools []NetworkAddressPool `json:",omitempty"` CDISpecDirs []string - // Legacy API fields for older API versions. - legacyFields + Containerd *ContainerdInfo `json:",omitempty"` // Warnings contains a slice of warnings that occurred while collecting // system information. These warnings are intended to be informational @@ -85,8 +84,41 @@ type Info struct { Warnings []string } -type legacyFields struct { - ExecutionDriver string `json:",omitempty"` // Deprecated: deprecated since API v1.25, but returned for older versions. +// ContainerdInfo holds information about the containerd instance used by the daemon. +type ContainerdInfo struct { + // Address is the path to the containerd socket. + Address string `json:",omitempty"` + // Namespaces is the containerd namespaces used by the daemon. + Namespaces ContainerdNamespaces +} + +// ContainerdNamespaces reflects the containerd namespaces used by the daemon. +// +// These namespaces can be configured in the daemon configuration, and are +// considered to be used exclusively by the daemon, +// +// As these namespaces are considered to be exclusively accessed +// by the daemon, it is not recommended to change these values, +// or to change them to a value that is used by other systems, +// such as cri-containerd. +type ContainerdNamespaces struct { + // Containers holds the default containerd namespace used for + // containers managed by the daemon. + // + // The default namespace for containers is "moby", but will be + // suffixed with the `.` of the remapped `root` if + // user-namespaces are enabled and the containerd image-store + // is used. + Containers string + + // Plugins holds the default containerd namespace used for + // plugins managed by the daemon. + // + // The default namespace for plugins is "moby", but will be + // suffixed with the `.` of the remapped `root` if + // user-namespaces are enabled and the containerd image-store + // is used. + Plugins string } // PluginsInfo is a temp struct holding Plugins name diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index ca07162a2..fe99b7439 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -1,8 +1,6 @@ package types // import "github.com/docker/docker/api/types" import ( - "io" - "os" "time" "github.com/docker/docker/api/types/container" @@ -155,36 +153,13 @@ type Container struct { State string Status string HostConfig struct { - NetworkMode string `json:",omitempty"` + NetworkMode string `json:",omitempty"` + Annotations map[string]string `json:",omitempty"` } NetworkSettings *SummaryNetworkSettings Mounts []MountPoint } -// CopyConfig contains request body of Engine API: -// POST "/containers/"+containerID+"/copy" -type CopyConfig struct { - Resource string -} - -// ContainerPathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. -type ContainerPathStat struct { - Name string `json:"name"` - Size int64 `json:"size"` - Mode os.FileMode `json:"mode"` - Mtime time.Time `json:"mtime"` - LinkTarget string `json:"linkTarget"` -} - -// ContainerStats contains response of Engine API: -// GET "/stats" -type ContainerStats struct { - Body io.ReadCloser `json:"body"` - OSType string `json:"ostype"` -} - // Ping contains response of Engine API: // GET "/_ping" type Ping struct { @@ -230,17 +205,6 @@ type Version struct { BuildTime string `json:",omitempty"` } -// ExecStartCheck is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package -type ExecStartCheck struct { - // ExecStart will first check if it's detached - Detach bool - // Check if there's a tty - Tty bool - // Terminal size [height, width], unused if Tty == false - ConsoleSize *[2]uint `json:",omitempty"` -} - // HealthcheckResult stores information about a single run of a healthcheck probe type HealthcheckResult struct { Start time.Time // Start is the time this check started @@ -281,18 +245,6 @@ type ContainerState struct { Health *Health `json:",omitempty"` } -// ContainerNode stores information about the node that a container -// is running on. It's only used by the Docker Swarm standalone API -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int64 - Labels map[string]string -} - // ContainerJSONBase contains response of Engine API: // GET "/containers/{name:.*}/json" type ContainerJSONBase struct { @@ -306,7 +258,7 @@ type ContainerJSONBase struct { HostnamePath string HostsPath string LogPath string - Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API + Node *ContainerNode `json:",omitempty"` // Deprecated: Node was only propagated by Docker Swarm standalone API. It sill be removed in the next release. Name string RestartCount int Driver string @@ -423,84 +375,6 @@ type MountPoint struct { Propagation mount.Propagation } -// NetworkResource is the body of the "get network" http response message -type NetworkResource struct { - Name string // Name is the requested name of the network - ID string `json:"Id"` // ID uniquely identifies a network on a single machine - Created time.Time // Created is the time the network created - Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) - Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) - EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 - IPAM network.IPAM // IPAM is the network's IP Address Management - Internal bool // Internal represents if the network is used internal only - Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. - Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. - ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. - ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. - Containers map[string]EndpointResource // Containers contains endpoints belonging to the network - Options map[string]string // Options holds the network specific options to use for when creating the network - Labels map[string]string // Labels holds metadata specific to the network being created - Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network - Services map[string]network.ServiceInfo `json:",omitempty"` -} - -// EndpointResource contains network resources allocated and used for a container in a network -type EndpointResource struct { - Name string - EndpointID string - MacAddress string - IPv4Address string - IPv6Address string -} - -// NetworkCreate is the expected body of the "create network" http request message -type NetworkCreate struct { - // Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client - // package to older daemons. - CheckDuplicate bool `json:",omitempty"` - Driver string - Scope string - EnableIPv6 bool - IPAM *network.IPAM - Internal bool - Attachable bool - Ingress bool - ConfigOnly bool - ConfigFrom *network.ConfigReference - Options map[string]string - Labels map[string]string -} - -// NetworkCreateRequest is the request message sent to the server for network create call. -type NetworkCreateRequest struct { - NetworkCreate - Name string -} - -// NetworkCreateResponse is the response message sent by the server for network create call -type NetworkCreateResponse struct { - ID string `json:"Id"` - Warning string -} - -// NetworkConnect represents the data to be used to connect a container to the network -type NetworkConnect struct { - Container string - EndpointConfig *network.EndpointSettings `json:",omitempty"` -} - -// NetworkDisconnect represents the data to be used to disconnect a container from the network -type NetworkDisconnect struct { - Container string - Force bool -} - -// NetworkInspectOptions holds parameters to inspect network -type NetworkInspectOptions struct { - Scope string - Verbose bool -} - // DiskUsageObject represents an object type used for disk usage query filtering. type DiskUsageObject string @@ -533,27 +407,6 @@ type DiskUsage struct { BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40. } -// ContainersPruneReport contains the response for Engine API: -// POST "/containers/prune" -type ContainersPruneReport struct { - ContainersDeleted []string - SpaceReclaimed uint64 -} - -// VolumesPruneReport contains the response for Engine API: -// POST "/volumes/prune" -type VolumesPruneReport struct { - VolumesDeleted []string - SpaceReclaimed uint64 -} - -// ImagesPruneReport contains the response for Engine API: -// POST "/images/prune" -type ImagesPruneReport struct { - ImagesDeleted []image.DeleteResponse - SpaceReclaimed uint64 -} - // BuildCachePruneReport contains the response for Engine API: // POST "/build/prune" type BuildCachePruneReport struct { @@ -561,12 +414,6 @@ type BuildCachePruneReport struct { SpaceReclaimed uint64 } -// NetworksPruneReport contains the response for Engine API: -// POST "/networks/prune" -type NetworksPruneReport struct { - NetworksDeleted []string -} - // SecretCreateResponse contains the information returned to a client // on the creation of a new secret. type SecretCreateResponse struct { diff --git a/vendor/github.com/docker/docker/api/types/types_deprecated.go b/vendor/github.com/docker/docker/api/types/types_deprecated.go index 231a5cca4..43ffe104a 100644 --- a/vendor/github.com/docker/docker/api/types/types_deprecated.go +++ b/vendor/github.com/docker/docker/api/types/types_deprecated.go @@ -1,35 +1,210 @@ package types import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/volume" ) -// ImageImportOptions holds information to import images from the client host. +// ImagesPruneReport contains the response for Engine API: +// POST "/images/prune" // -// Deprecated: use [image.ImportOptions]. -type ImageImportOptions = image.ImportOptions +// Deprecated: use [image.PruneReport]. +type ImagesPruneReport = image.PruneReport -// ImageCreateOptions holds information to create images. +// VolumesPruneReport contains the response for Engine API: +// POST "/volumes/prune". // -// Deprecated: use [image.CreateOptions]. -type ImageCreateOptions = image.CreateOptions +// Deprecated: use [volume.PruneReport]. +type VolumesPruneReport = volume.PruneReport -// ImagePullOptions holds information to pull images. +// NetworkCreateRequest is the request message sent to the server for network create call. // -// Deprecated: use [image.PullOptions]. -type ImagePullOptions = image.PullOptions +// Deprecated: use [network.CreateRequest]. +type NetworkCreateRequest = network.CreateRequest -// ImagePushOptions holds information to push images. +// NetworkCreate is the expected body of the "create network" http request message // -// Deprecated: use [image.PushOptions]. -type ImagePushOptions = image.PushOptions +// Deprecated: use [network.CreateOptions]. +type NetworkCreate = network.CreateOptions -// ImageListOptions holds parameters to list images with. +// NetworkListOptions holds parameters to filter the list of networks with. // -// Deprecated: use [image.ListOptions]. -type ImageListOptions = image.ListOptions +// Deprecated: use [network.ListOptions]. +type NetworkListOptions = network.ListOptions -// ImageRemoveOptions holds parameters to remove images. +// NetworkCreateResponse is the response message sent by the server for network create call. // -// Deprecated: use [image.RemoveOptions]. -type ImageRemoveOptions = image.RemoveOptions +// Deprecated: use [network.CreateResponse]. +type NetworkCreateResponse = network.CreateResponse + +// NetworkInspectOptions holds parameters to inspect network. +// +// Deprecated: use [network.InspectOptions]. +type NetworkInspectOptions = network.InspectOptions + +// NetworkConnect represents the data to be used to connect a container to the network +// +// Deprecated: use [network.ConnectOptions]. +type NetworkConnect = network.ConnectOptions + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +// +// Deprecated: use [network.DisconnectOptions]. +type NetworkDisconnect = network.DisconnectOptions + +// EndpointResource contains network resources allocated and used for a container in a network. +// +// Deprecated: use [network.EndpointResource]. +type EndpointResource = network.EndpointResource + +// NetworkResource is the body of the "get network" http response message/ +// +// Deprecated: use [network.Inspect] or [network.Summary] (for list operations). +type NetworkResource = network.Inspect + +// NetworksPruneReport contains the response for Engine API: +// POST "/networks/prune" +// +// Deprecated: use [network.PruneReport]. +type NetworksPruneReport = network.PruneReport + +// ExecConfig is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +// +// Deprecated: use [container.ExecOptions]. +type ExecConfig = container.ExecOptions + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +// +// Deprecated: use [container.ExecStartOptions] or [container.ExecAttachOptions]. +type ExecStartCheck = container.ExecStartOptions + +// ContainerExecInspect holds information returned by exec inspect. +// +// Deprecated: use [container.ExecInspect]. +type ContainerExecInspect = container.ExecInspect + +// ContainersPruneReport contains the response for Engine API: +// POST "/containers/prune" +// +// Deprecated: use [container.PruneReport]. +type ContainersPruneReport = container.PruneReport + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +// +// Deprecated: use [container.PathStat]. +type ContainerPathStat = container.PathStat + +// CopyToContainerOptions holds information +// about files to copy into a container. +// +// Deprecated: use [container.CopyToContainerOptions], +type CopyToContainerOptions = container.CopyToContainerOptions + +// ContainerStats contains response of Engine API: +// GET "/stats" +// +// Deprecated: use [container.StatsResponseReader]. +type ContainerStats = container.StatsResponseReader + +// ThrottlingData stores CPU throttling stats of one running container. +// Not used on Windows. +// +// Deprecated: use [container.ThrottlingData]. +type ThrottlingData = container.ThrottlingData + +// CPUUsage stores All CPU stats aggregated since container inception. +// +// Deprecated: use [container.CPUUsage]. +type CPUUsage = container.CPUUsage + +// CPUStats aggregates and wraps all CPU related info of container +// +// Deprecated: use [container.CPUStats]. +type CPUStats = container.CPUStats + +// MemoryStats aggregates all memory stats since container inception on Linux. +// Windows returns stats for commit and private working set only. +// +// Deprecated: use [container.MemoryStats]. +type MemoryStats = container.MemoryStats + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// Not used on Windows. +// +// Deprecated: use [container.BlkioStatEntry]. +type BlkioStatEntry = container.BlkioStatEntry + +// BlkioStats stores All IO service stats for data read and write. +// This is a Linux specific structure as the differences between expressing +// block I/O on Windows and Linux are sufficiently significant to make +// little sense attempting to morph into a combined structure. +// +// Deprecated: use [container.BlkioStats]. +type BlkioStats = container.BlkioStats + +// StorageStats is the disk I/O stats for read/write on Windows. +// +// Deprecated: use [container.StorageStats]. +type StorageStats = container.StorageStats + +// NetworkStats aggregates the network stats of one container +// +// Deprecated: use [container.NetworkStats]. +type NetworkStats = container.NetworkStats + +// PidsStats contains the stats of a container's pids +// +// Deprecated: use [container.PidsStats]. +type PidsStats = container.PidsStats + +// Stats is Ultimate struct aggregating all types of stats of one container +// +// Deprecated: use [container.Stats]. +type Stats = container.Stats + +// StatsJSON is newly used Networks +// +// Deprecated: use [container.StatsResponse]. +type StatsJSON = container.StatsResponse + +// EventsOptions holds parameters to filter events with. +// +// Deprecated: use [events.ListOptions]. +type EventsOptions = events.ListOptions + +// ImageSearchOptions holds parameters to search images with. +// +// Deprecated: use [registry.SearchOptions]. +type ImageSearchOptions = registry.SearchOptions + +// ImageImportSource holds source information for ImageImport +// +// Deprecated: use [image.ImportSource]. +type ImageImportSource image.ImportSource + +// ImageLoadResponse returns information to the client about a load process. +// +// Deprecated: use [image.LoadResponse]. +type ImageLoadResponse = image.LoadResponse + +// ContainerNode stores information about the node that a container +// is running on. It's only used by the Docker Swarm standalone API. +// +// Deprecated: ContainerNode was used for the classic Docker Swarm standalone API. It will be removed in the next release. +type ContainerNode struct { + ID string + IPAddress string `json:"IP"` + Addr string + Name string + Cpus int + Memory int64 + Labels map[string]string +} diff --git a/vendor/github.com/docker/docker/api/types/volume/options.go b/vendor/github.com/docker/docker/api/types/volume/options.go index 8b0dd1389..0b9645e00 100644 --- a/vendor/github.com/docker/docker/api/types/volume/options.go +++ b/vendor/github.com/docker/docker/api/types/volume/options.go @@ -6,3 +6,10 @@ import "github.com/docker/docker/api/types/filters" type ListOptions struct { Filters filters.Args } + +// PruneReport contains the response for Engine API: +// POST "/volumes/prune" +type PruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index f2eeb6c57..60d91bc65 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -49,6 +49,8 @@ import ( "net/url" "path" "strings" + "sync" + "sync/atomic" "time" "github.com/docker/docker/api" @@ -131,7 +133,10 @@ type Client struct { negotiateVersion bool // negotiated indicates that API version negotiation took place - negotiated bool + negotiated atomic.Bool + + // negotiateLock is used to single-flight the version negotiation process + negotiateLock sync.Mutex tp trace.TracerProvider @@ -266,7 +271,16 @@ func (cli *Client) Close() error { // be negotiated when making the actual requests, and for which cases // we cannot do the negotiation lazily. func (cli *Client) checkVersion(ctx context.Context) error { - if !cli.manualOverride && cli.negotiateVersion && !cli.negotiated { + if !cli.manualOverride && cli.negotiateVersion && !cli.negotiated.Load() { + // Ensure exclusive write access to version and negotiated fields + cli.negotiateLock.Lock() + defer cli.negotiateLock.Unlock() + + // May have been set during last execution of critical zone + if cli.negotiated.Load() { + return nil + } + ping, err := cli.Ping(ctx) if err != nil { return err @@ -312,6 +326,10 @@ func (cli *Client) ClientVersion() string { // added (1.24). func (cli *Client) NegotiateAPIVersion(ctx context.Context) { if !cli.manualOverride { + // Avoid concurrent modification of version-related fields + cli.negotiateLock.Lock() + defer cli.negotiateLock.Unlock() + ping, err := cli.Ping(ctx) if err != nil { // FIXME(thaJeztah): Ping returns an error when failing to connect to the API; we should not swallow the error here, and instead returning it. @@ -336,6 +354,10 @@ func (cli *Client) NegotiateAPIVersion(ctx context.Context) { // added (1.24). func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) { if !cli.manualOverride { + // Avoid concurrent modification of version-related fields + cli.negotiateLock.Lock() + defer cli.negotiateLock.Unlock() + cli.negotiateAPIVersionPing(pingResponse) } } @@ -361,7 +383,7 @@ func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) { // Store the results, so that automatic API version negotiation (if enabled) // won't be performed on the next request. if cli.negotiateVersion { - cli.negotiated = true + cli.negotiated.Store(true) } } diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go index 883be7fa3..8490a3b15 100644 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -11,11 +11,11 @@ import ( "path/filepath" "strings" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerStatPath returns stat information about a path inside the container filesystem. -func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { +func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (container.PathStat, error) { query := url.Values{} query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. @@ -23,14 +23,14 @@ func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path stri response, err := cli.head(ctx, urlStr, query, nil) defer ensureReaderClosed(response) if err != nil { - return types.ContainerPathStat{}, err + return container.PathStat{}, err } return getContainerPathStatFromHeader(response.header) } // CopyToContainer copies content into the container filesystem. // Note that `content` must be a Reader for a TAR archive -func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error { +func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options container.CopyToContainerOptions) error { query := url.Values{} query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. @@ -55,14 +55,14 @@ func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath str // CopyFromContainer gets the content from the container and returns it as a Reader // for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. -func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { +func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, container.PathStat, error) { query := make(url.Values, 1) query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. apiPath := "/containers/" + containerID + "/archive" response, err := cli.get(ctx, apiPath, query, nil) if err != nil { - return nil, types.ContainerPathStat{}, err + return nil, container.PathStat{}, err } // In order to get the copy behavior right, we need to know information @@ -78,8 +78,8 @@ func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath s return response.body, stat, err } -func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { - var stat types.ContainerPathStat +func getContainerPathStatFromHeader(header http.Header) (container.PathStat, error) { + var stat container.PathStat encodedStat := header.Get("X-Docker-Container-Path-Stat") statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go index 526a3876a..9379448d1 100644 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -6,11 +6,12 @@ import ( "net/http" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/versions" ) // ContainerExecCreate creates a new exec configuration to run an exec process. -func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { +func (cli *Client) ContainerExecCreate(ctx context.Context, container string, options container.ExecOptions) (types.IDResponse, error) { var response types.IDResponse // Make sure we negotiated (if the client is configured to do so), @@ -22,14 +23,14 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, co return response, err } - if err := cli.NewVersionError(ctx, "1.25", "env"); len(config.Env) != 0 && err != nil { + if err := cli.NewVersionError(ctx, "1.25", "env"); len(options.Env) != 0 && err != nil { return response, err } if versions.LessThan(cli.ClientVersion(), "1.42") { - config.ConsoleSize = nil + options.ConsoleSize = nil } - resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, options, nil) defer ensureReaderClosed(resp) if err != nil { return response, err @@ -39,7 +40,7 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, co } // ContainerExecStart starts an exec process already created in the docker host. -func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { +func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config container.ExecStartOptions) error { if versions.LessThan(cli.ClientVersion(), "1.42") { config.ConsoleSize = nil } @@ -52,7 +53,7 @@ func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config // It returns a types.HijackedConnection with the hijacked connection // and the a reader to get output. It's up to the called to close // the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config container.ExecAttachOptions) (types.HijackedResponse, error) { if versions.LessThan(cli.ClientVersion(), "1.42") { config.ConsoleSize = nil } @@ -62,8 +63,8 @@ func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, confi } // ContainerExecInspect returns information about a specific exec process on the docker host. -func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { - var response types.ContainerExecInspect +func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (container.ExecInspect, error) { + var response container.ExecInspect resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) if err != nil { return response, err diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go index ca5092384..29c922da7 100644 --- a/vendor/github.com/docker/docker/client/container_prune.go +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -5,13 +5,13 @@ import ( "encoding/json" "fmt" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" ) // ContainersPrune requests the daemon to delete unused data -func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { - var report types.ContainersPruneReport +func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (container.PruneReport, error) { + var report container.PruneReport if err := cli.NewVersionError(ctx, "1.25", "container prune"); err != nil { return report, err diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go index 3fabb75f3..b5641daee 100644 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -4,12 +4,12 @@ import ( "context" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerStats returns near realtime stats for a given container. // It's up to the caller to close the io.ReadCloser returned. -func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { +func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (container.StatsResponseReader, error) { query := url.Values{} query.Set("stream", "0") if stream { @@ -18,10 +18,10 @@ func (cli *Client) ContainerStats(ctx context.Context, containerID string, strea resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) if err != nil { - return types.ContainerStats{}, err + return container.StatsResponseReader{}, err } - return types.ContainerStats{ + return container.StatsResponseReader{ Body: resp.body, OSType: getDockerOS(resp.header.Get("Server")), }, nil @@ -29,17 +29,17 @@ func (cli *Client) ContainerStats(ctx context.Context, containerID string, strea // ContainerStatsOneShot gets a single stat entry from a container. // It differs from `ContainerStats` in that the API should not wait to prime the stats -func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (types.ContainerStats, error) { +func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (container.StatsResponseReader, error) { query := url.Values{} query.Set("stream", "0") query.Set("one-shot", "1") resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) if err != nil { - return types.ContainerStats{}, err + return container.StatsResponseReader{}, err } - return types.ContainerStats{ + return container.StatsResponseReader{ Body: resp.body, OSType: getDockerOS(resp.header.Get("Server")), }, nil diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go index a9c48a928..d3ab26bed 100644 --- a/vendor/github.com/docker/docker/client/events.go +++ b/vendor/github.com/docker/docker/client/events.go @@ -6,7 +6,6 @@ import ( "net/url" "time" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" timetypes "github.com/docker/docker/api/types/time" @@ -16,7 +15,7 @@ import ( // by cancelling the context. Once the stream has been completely read an io.EOF error will // be sent over the error channel. If an error is sent all processing will be stopped. It's up // to the caller to reopen the stream in the event of an error by reinvoking this method. -func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { +func (cli *Client) Events(ctx context.Context, options events.ListOptions) (<-chan events.Message, <-chan error) { messages := make(chan events.Message) errs := make(chan error, 1) @@ -68,7 +67,7 @@ func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-c return messages, errs } -func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { +func buildEventsQueryParams(cliVersion string, options events.ListOptions) (url.Values, error) { query := url.Values{} ref := time.Now() diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go index 5a890b0c5..43d55eda8 100644 --- a/vendor/github.com/docker/docker/client/image_import.go +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -7,13 +7,12 @@ import ( "strings" "github.com/distribution/reference" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/image" ) // ImageImport creates a new image based on the source options. // It returns the JSON content in the response body. -func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) { +func (cli *Client) ImageImport(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) { if ref != "" { // Check if the given image name can be resolved if _, err := reference.ParseNormalizedNamed(ref); err != nil { diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go index c825206ea..c68f0013e 100644 --- a/vendor/github.com/docker/docker/client/image_load.go +++ b/vendor/github.com/docker/docker/client/image_load.go @@ -6,13 +6,13 @@ import ( "net/http" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/image" ) // ImageLoad loads an image in the docker host from the client host. // It's up to the caller to close the io.ReadCloser in the // ImageLoadResponse returned by this function. -func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (image.LoadResponse, error) { v := url.Values{} v.Set("quiet", "0") if quiet { @@ -22,9 +22,9 @@ func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) ( "Content-Type": {"application/x-tar"}, }) if err != nil { - return types.ImageLoadResponse{}, err + return image.LoadResponse{}, err } - return types.ImageLoadResponse{ + return image.LoadResponse{ Body: resp.body, JSON: resp.header.Get("Content-Type") == "application/json", }, nil diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go index 6b82d6ab6..5ee987e24 100644 --- a/vendor/github.com/docker/docker/client/image_prune.go +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -5,13 +5,13 @@ import ( "encoding/json" "fmt" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" ) // ImagesPrune requests the daemon to delete unused data -func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { - var report types.ImagesPruneReport +func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (image.PruneReport, error) { + var report image.PruneReport if err := cli.NewVersionError(ctx, "1.25", "image prune"); err != nil { return report, err diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go index 6438cf6a9..1634c4c80 100644 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -36,7 +36,7 @@ func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.P resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() + newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) if privilegeErr != nil { return nil, privilegeErr } diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go index e6a6b11ee..16f9c4651 100644 --- a/vendor/github.com/docker/docker/client/image_push.go +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -2,7 +2,9 @@ package client // import "github.com/docker/docker/client" import ( "context" + "encoding/json" "errors" + "fmt" "io" "net/http" "net/url" @@ -36,9 +38,23 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu } } + if options.Platform != nil { + if err := cli.NewVersionError(ctx, "1.46", "platform"); err != nil { + return nil, err + } + + p := *options.Platform + pJson, err := json.Marshal(p) + if err != nil { + return nil, fmt.Errorf("invalid platform: %v", err) + } + + query.Set("platform", string(pJson)) + } + resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() + newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) if privilegeErr != nil { return nil, privilegeErr } diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go index 8971b139a..0a0745757 100644 --- a/vendor/github.com/docker/docker/client/image_search.go +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -7,7 +7,6 @@ import ( "net/url" "strconv" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" @@ -15,7 +14,7 @@ import ( // ImageSearch makes the docker host search by a term in a remote registry. // The list of results is not sorted in any fashion. -func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { +func (cli *Client) ImageSearch(ctx context.Context, term string, options registry.SearchOptions) ([]registry.SearchResult, error) { var results []registry.SearchResult query := url.Values{} query.Set("term", term) @@ -34,7 +33,7 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options types.I resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) defer ensureReaderClosed(resp) if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() + newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) if privilegeErr != nil { return results, privilegeErr } diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index 45d233f25..cc60a5d13 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -50,11 +50,11 @@ type ContainerAPIClient interface { ContainerCommit(ctx context.Context, container string, options container.CommitOptions) (types.IDResponse, error) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) ContainerDiff(ctx context.Context, container string) ([]container.FilesystemChange, error) - ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) - ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) - ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + ContainerExecAttach(ctx context.Context, execID string, options container.ExecAttachOptions) (types.HijackedResponse, error) + ContainerExecCreate(ctx context.Context, container string, options container.ExecOptions) (types.IDResponse, error) + ContainerExecInspect(ctx context.Context, execID string) (container.ExecInspect, error) ContainerExecResize(ctx context.Context, execID string, options container.ResizeOptions) error - ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error + ContainerExecStart(ctx context.Context, execID string, options container.ExecStartOptions) error ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) @@ -66,18 +66,18 @@ type ContainerAPIClient interface { ContainerRename(ctx context.Context, container, newContainerName string) error ContainerResize(ctx context.Context, container string, options container.ResizeOptions) error ContainerRestart(ctx context.Context, container string, options container.StopOptions) error - ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) - ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) - ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error) + ContainerStatPath(ctx context.Context, container, path string) (container.PathStat, error) + ContainerStats(ctx context.Context, container string, stream bool) (container.StatsResponseReader, error) + ContainerStatsOneShot(ctx context.Context, container string) (container.StatsResponseReader, error) ContainerStart(ctx context.Context, container string, options container.StartOptions) error ContainerStop(ctx context.Context, container string, options container.StopOptions) error ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) ContainerUnpause(ctx context.Context, container string) error ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) - CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) - CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error - ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) + CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, container.PathStat, error) + CopyToContainer(ctx context.Context, container, path string, content io.Reader, options container.CopyToContainerOptions) error + ContainersPrune(ctx context.Context, pruneFilters filters.Args) (container.PruneReport, error) } // DistributionAPIClient defines API client methods for the registry @@ -92,29 +92,29 @@ type ImageAPIClient interface { BuildCancel(ctx context.Context, id string) error ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error) ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) - ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) + ImageImport(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) - ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (image.LoadResponse, error) ImagePull(ctx context.Context, ref string, options image.PullOptions) (io.ReadCloser, error) ImagePush(ctx context.Context, ref string, options image.PushOptions) (io.ReadCloser, error) ImageRemove(ctx context.Context, image string, options image.RemoveOptions) ([]image.DeleteResponse, error) - ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) + ImageSearch(ctx context.Context, term string, options registry.SearchOptions) ([]registry.SearchResult, error) ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) ImageTag(ctx context.Context, image, ref string) error - ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) + ImagesPrune(ctx context.Context, pruneFilter filters.Args) (image.PruneReport, error) } // NetworkAPIClient defines API client methods for the networks type NetworkAPIClient interface { NetworkConnect(ctx context.Context, network, container string, config *network.EndpointSettings) error - NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + NetworkCreate(ctx context.Context, name string, options network.CreateOptions) (network.CreateResponse, error) NetworkDisconnect(ctx context.Context, network, container string, force bool) error - NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) - NetworkInspectWithRaw(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) - NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + NetworkInspect(ctx context.Context, network string, options network.InspectOptions) (network.Inspect, error) + NetworkInspectWithRaw(ctx context.Context, network string, options network.InspectOptions) (network.Inspect, []byte, error) + NetworkList(ctx context.Context, options network.ListOptions) ([]network.Summary, error) NetworkRemove(ctx context.Context, network string) error - NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) + NetworksPrune(ctx context.Context, pruneFilter filters.Args) (network.PruneReport, error) } // NodeAPIClient defines API client methods for the nodes @@ -165,7 +165,7 @@ type SwarmAPIClient interface { // SystemAPIClient defines API client methods for the system type SystemAPIClient interface { - Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) + Events(ctx context.Context, options events.ListOptions) (<-chan events.Message, <-chan error) Info(ctx context.Context) (system.Info, error) RegistryLogin(ctx context.Context, auth registry.AuthConfig) (registry.AuthenticateOKBody, error) DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) @@ -179,7 +179,7 @@ type VolumeAPIClient interface { VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) VolumeList(ctx context.Context, options volume.ListOptions) (volume.ListResponse, error) VolumeRemove(ctx context.Context, volumeID string, force bool) error - VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) + VolumesPrune(ctx context.Context, pruneFilter filters.Args) (volume.PruneReport, error) VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error } diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go index 571894613..8daf89063 100644 --- a/vendor/github.com/docker/docker/client/network_connect.go +++ b/vendor/github.com/docker/docker/client/network_connect.go @@ -3,13 +3,12 @@ package client // import "github.com/docker/docker/client" import ( "context" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/network" ) // NetworkConnect connects a container to an existent network in the docker host. func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { - nc := types.NetworkConnect{ + nc := network.ConnectOptions{ Container: containerID, EndpointConfig: config, } diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go index d510feb3d..850e31cc9 100644 --- a/vendor/github.com/docker/docker/client/network_create.go +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -4,13 +4,13 @@ import ( "context" "encoding/json" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/versions" ) // NetworkCreate creates a new network in the docker host. -func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { - var response types.NetworkCreateResponse +func (cli *Client) NetworkCreate(ctx context.Context, name string, options network.CreateOptions) (network.CreateResponse, error) { + var response network.CreateResponse // Make sure we negotiated (if the client is configured to do so), // as code below contains API-version specific handling of options. @@ -21,12 +21,13 @@ func (cli *Client) NetworkCreate(ctx context.Context, name string, options types return response, err } - networkCreateRequest := types.NetworkCreateRequest{ - NetworkCreate: options, + networkCreateRequest := network.CreateRequest{ + CreateOptions: options, Name: name, } if versions.LessThan(cli.version, "1.44") { - networkCreateRequest.CheckDuplicate = true //nolint:staticcheck // ignore SA1019: CheckDuplicate is deprecated since API v1.44. + enabled := true + networkCreateRequest.CheckDuplicate = &enabled //nolint:staticcheck // ignore SA1019: CheckDuplicate is deprecated since API v1.44. } serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go index dd1567665..aaf428d85 100644 --- a/vendor/github.com/docker/docker/client/network_disconnect.go +++ b/vendor/github.com/docker/docker/client/network_disconnect.go @@ -3,12 +3,15 @@ package client // import "github.com/docker/docker/client" import ( "context" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" ) // NetworkDisconnect disconnects a container from an existent network in the docker host. func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { - nd := types.NetworkDisconnect{Container: containerID, Force: force} + nd := network.DisconnectOptions{ + Container: containerID, + Force: force, + } resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go index 0f90e2bb9..afc47de6f 100644 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -7,25 +7,20 @@ import ( "io" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" ) // NetworkInspect returns the information for a specific network configured in the docker host. -func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { +func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options network.InspectOptions) (network.Inspect, error) { networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) return networkResource, err } // NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. -func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) { +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options network.InspectOptions) (network.Inspect, []byte, error) { if networkID == "" { - return types.NetworkResource{}, nil, objectNotFoundError{object: "network", id: networkID} + return network.Inspect{}, nil, objectNotFoundError{object: "network", id: networkID} } - var ( - networkResource types.NetworkResource - resp serverResponse - err error - ) query := url.Values{} if options.Verbose { query.Set("verbose", "true") @@ -33,17 +28,19 @@ func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, if options.Scope != "" { query.Set("scope", options.Scope) } - resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) + + resp, err := cli.get(ctx, "/networks/"+networkID, query, nil) defer ensureReaderClosed(resp) if err != nil { - return networkResource, nil, err + return network.Inspect{}, nil, err } - body, err := io.ReadAll(resp.body) + raw, err := io.ReadAll(resp.body) if err != nil { - return networkResource, nil, err + return network.Inspect{}, nil, err } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&networkResource) - return networkResource, body, err + + var nw network.Inspect + err = json.NewDecoder(bytes.NewReader(raw)).Decode(&nw) + return nw, raw, err } diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go index ed2acb557..72957d47f 100644 --- a/vendor/github.com/docker/docker/client/network_list.go +++ b/vendor/github.com/docker/docker/client/network_list.go @@ -5,12 +5,12 @@ import ( "encoding/json" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" ) // NetworkList returns the list of networks configured in the docker host. -func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { +func (cli *Client) NetworkList(ctx context.Context, options network.ListOptions) ([]network.Summary, error) { query := url.Values{} if options.Filters.Len() > 0 { //nolint:staticcheck // ignore SA1019 for old code @@ -21,7 +21,7 @@ func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOpt query.Set("filters", filterJSON) } - var networkResources []types.NetworkResource + var networkResources []network.Summary resp, err := cli.get(ctx, "/networks", query, nil) defer ensureReaderClosed(resp) if err != nil { diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go index 7b5f831ef..708cc61a4 100644 --- a/vendor/github.com/docker/docker/client/network_prune.go +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -5,13 +5,13 @@ import ( "encoding/json" "fmt" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" ) // NetworksPrune requests the daemon to delete unused networks -func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { - var report types.NetworksPruneReport +func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (network.PruneReport, error) { + var report network.PruneReport if err := cli.NewVersionError(ctx, "1.25", "network prune"); err != nil { return report, err diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go index 69184619a..a0d8c3500 100644 --- a/vendor/github.com/docker/docker/client/plugin_install.go +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -84,7 +84,7 @@ func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { // todo: do inspect before to check existing name before checking privileges - newAuthHeader, privilegeErr := options.PrivilegeFunc() + newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) if privilegeErr != nil { ensureReaderClosed(resp) return nil, privilegeErr @@ -105,7 +105,7 @@ func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, ensureReaderClosed(resp) if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { - accept, err := options.AcceptPermissionsFunc(privileges) + accept, err := options.AcceptPermissionsFunc(ctx, privileges) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index 50e213b50..6eea9b4e4 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -184,10 +184,10 @@ func (cli *Client) doRequest(req *http.Request) (serverResponse, error) { // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { // Checks if client is running with elevated privileges - if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil { + if f, elevatedErr := os.Open(`\\.\PHYSICALDRIVE0`); elevatedErr != nil { err = errors.Wrap(err, "in the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect") } else { - f.Close() + _ = f.Close() err = errors.Wrap(err, "this error may indicate that the docker daemon is not running") } } @@ -278,7 +278,7 @@ func encodeData(data interface{}) (*bytes.Buffer, error) { func ensureReaderClosed(response serverResponse) { if response.body != nil { // Drain up to 512 bytes and close the body to let the Transport reuse the connection - io.CopyN(io.Discard, response.body, 512) - response.body.Close() + _, _ = io.CopyN(io.Discard, response.body, 512) + _ = response.body.Close() } } diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go index 9333f6ee7..9b09c30fa 100644 --- a/vendor/github.com/docker/docker/client/volume_prune.go +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -5,13 +5,13 @@ import ( "encoding/json" "fmt" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/volume" ) // VolumesPrune requests the daemon to delete unused data -func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { - var report types.VolumesPruneReport +func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (volume.PruneReport, error) { + var report volume.PruneReport if err := cli.NewVersionError(ctx, "1.25", "volume prune"); err != nil { return report, err diff --git a/vendor/modules.txt b/vendor/modules.txt index e60454e67..66eae1977 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -115,7 +115,7 @@ github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/distribution/reference v0.5.0 ## explicit; go 1.20 github.com/distribution/reference -# github.com/docker/docker v26.1.2+incompatible +# github.com/docker/docker v27.1.1+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types From d59b3f5624f7d44ac201b3df4cb4d0b0bf4aa92a Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Tue, 13 Aug 2024 08:18:10 +0200 Subject: [PATCH 172/203] [PBM-341] fix invalid S3 url (#985) --- pbm/config/config.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pbm/config/config.go b/pbm/config/config.go index f1bee8bb2..550911f6f 100644 --- a/pbm/config/config.go +++ b/pbm/config/config.go @@ -288,11 +288,11 @@ func (s *StorageConf) Path() string { path := "" switch s.Type { case storage.S3: - path = "s3://" - if s.S3.EndpointURL != "" { - path += s.S3.EndpointURL + "/" + path = s.S3.EndpointURL + if !strings.Contains(path, "://") { + path = "s3://" + path } - path += s.S3.Bucket + path += "/" + s.S3.Bucket if s.S3.Prefix != "" { path += "/" + s.S3.Prefix } From 3afb5113c3a56ad4a5e5e9160ff5eb3ad22b74f4 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Fri, 9 Aug 2024 11:57:35 +0200 Subject: [PATCH 173/203] [PBM-1367] disallow backup with sharded timeseries --- cmd/pbm-agent/agent.go | 30 ++++++++++++++++++++++-------- cmd/pbm-agent/backup.go | 2 +- pbm/backup/logical.go | 33 +++++++++++++++++++++++++++++++++ pbm/topo/cluster.go | 25 +++++++++++++++++++++++++ pbm/topo/node.go | 11 +++++++---- pbm/version/version.go | 4 ++++ 6 files changed, 92 insertions(+), 13 deletions(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 0ebbb46f0..4f0544714 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -3,6 +3,7 @@ package main import ( "context" "fmt" + "strings" "sync" "sync/atomic" "time" @@ -33,8 +34,6 @@ type Agent struct { brief topo.NodeBrief - mongoVersion version.MongoVersion - dumpConns int closeCMD chan struct{} @@ -66,13 +65,14 @@ func newAgent(ctx context.Context, leadConn connect.Client, uri string, dumpConn closeCMD: make(chan struct{}), nodeConn: nodeConn, brief: topo.NodeBrief{ - URI: uri, - SetName: info.SetName, - Me: info.Me, - Sharded: info.IsSharded(), + URI: uri, + SetName: info.SetName, + Me: info.Me, + Sharded: info.IsSharded(), + ConfigSvr: info.IsConfigSrv(), + Version: mongoVersion, }, - mongoVersion: mongoVersion, - dumpConns: dumpConns, + dumpConns: dumpConns, } return a, nil } @@ -113,6 +113,20 @@ func (a *Agent) CanStart(ctx context.Context) error { Warning("", "", "", primitive.Timestamp{}, "WARNING: %v", err) } + if ver.IsShardedTimeseriesSupported() { + tss, err := topo.ListShardedTimeseries(ctx, a.leadConn) + if err != nil { + log.FromContext(ctx). + Error("", "", "", primitive.Timestamp{}, + "failed to list sharded timeseries: %v", err) + } else if len(tss) != 0 { + log.FromContext(ctx). + Warning("", "", "", primitive.Timestamp{}, + "WARNING: cannot backup following sharded timeseries: %s", + strings.Join(tss, ", ")) + } + } + return nil } diff --git a/cmd/pbm-agent/backup.go b/cmd/pbm-agent/backup.go index 27f520808..9a5567d0c 100644 --- a/cmd/pbm-agent/backup.go +++ b/cmd/pbm-agent/backup.go @@ -105,7 +105,7 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, } bcp.SetConfig(cfg) - bcp.SetMongoVersion(a.mongoVersion.VersionString) + bcp.SetMongoVersion(a.brief.Version.VersionString) bcp.SetSlicerInterval(cfg.BackupSlicerInterval()) bcp.SetTimeouts(cfg.Backup.Timeouts) diff --git a/pbm/backup/logical.go b/pbm/backup/logical.go index ef9a51f59..88b1d9422 100644 --- a/pbm/backup/logical.go +++ b/pbm/backup/logical.go @@ -36,6 +36,10 @@ func (b *Backup) doLogical( stg storage.Storage, l log.LogEvent, ) error { + if err := b.checkForTimeseries(ctx, bcp.Namespaces); err != nil { + return errors.Wrap(err, "check for timeseries") + } + var db, coll string if util.IsSelective(bcp.Namespaces) { // for selective backup, configsvr does not hold any data. @@ -441,3 +445,32 @@ func getNamespacesSize(ctx context.Context, m *mongo.Client, db, coll string) (m err = eg.Wait() return rv, err } + +func (b *Backup) checkForTimeseries(ctx context.Context, nss []string) error { + if !b.brief.Version.IsShardedTimeseriesSupported() { + return nil + } + + tss, err := topo.ListShardedTimeseries(ctx, b.leadConn) + if err != nil { + return errors.Wrap(err, "list sharded timeseries") + } + + if util.IsSelective(nss) { + selected := util.MakeSelectedPred(nss) + origTSs := tss + tss = make([]string, 0, len(tss)) + for _, ts := range origTSs { + if selected(ts) { + tss = append(tss, ts) + } + } + } + + if len(tss) != 0 { + return errors.Errorf("cannot backup following sharded timeseries: %s", + strings.Join(tss, ", ")) + } + + return nil +} diff --git a/pbm/topo/cluster.go b/pbm/topo/cluster.go index 4fc4960fc..37637aa17 100644 --- a/pbm/topo/cluster.go +++ b/pbm/topo/cluster.go @@ -7,6 +7,7 @@ import ( "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" "go.mongodb.org/mongo-driver/mongo/writeconcern" "github.com/percona/percona-backup-mongodb/pbm/connect" @@ -228,3 +229,27 @@ func GetBalancerStatus(ctx context.Context, m connect.Client) (*BalancerStatus, } return inf, nil } + +func ListShardedTimeseries(ctx context.Context, conn connect.Client) ([]string, error) { + cur, err := conn.MongoClient(). + Database("config").Collection("collections"). + Find(ctx, + bson.D{{"timeseriesFields", bson.M{"$exists": 1}}}, + options.Find().SetProjection(bson.D{{"_id", 1}})) + if err != nil { + return nil, errors.Wrap(err, "find") + } + defer cur.Close(context.Background()) + + nss := []string{} + for cur.Next(ctx) { + ns, _ := cur.Current.Lookup("_id").StringValueOK() + db, coll, _ := strings.Cut(ns, ".system.buckets.") + nss = append(nss, db+"."+coll) + } + if err := cur.Err(); err != nil { + return nil, errors.Wrap(err, "cursor") + } + + return nss, nil +} diff --git a/pbm/topo/node.go b/pbm/topo/node.go index 7a7a01a7f..6c99e3b14 100644 --- a/pbm/topo/node.go +++ b/pbm/topo/node.go @@ -10,6 +10,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" + "github.com/percona/percona-backup-mongodb/pbm/version" ) // ReplsetRole is a replicaset role in sharded cluster @@ -57,10 +58,12 @@ type ConfigServerState struct { } type NodeBrief struct { - URI string - SetName string - Me string - Sharded bool + URI string + SetName string + Me string + Sharded bool + ConfigSvr bool + Version version.MongoVersion } // NodeInfo represents the mongo's node info diff --git a/pbm/version/version.go b/pbm/version/version.go index 2b11cce86..f1cbeb5d8 100644 --- a/pbm/version/version.go +++ b/pbm/version/version.go @@ -189,6 +189,10 @@ func (v MongoVersion) Major() int { return v.Version[0] } +func (v MongoVersion) IsShardedTimeseriesSupported() bool { + return v.Version[0] >= 6 // sharded timeseries introduced in 5.1 +} + func GetMongoVersion(ctx context.Context, m *mongo.Client) (MongoVersion, error) { res := m.Database("admin").RunCommand(ctx, bson.D{{"buildInfo", 1}}) if err := res.Err(); err != nil { From 609d32f405726ecc5fcfd86077e2263907dacf7a Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 12 Aug 2024 13:57:51 +0200 Subject: [PATCH 174/203] Update pbm/topo/cluster.go Co-authored-by: Boris Ilijic --- pbm/topo/cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbm/topo/cluster.go b/pbm/topo/cluster.go index 37637aa17..1c16a88b3 100644 --- a/pbm/topo/cluster.go +++ b/pbm/topo/cluster.go @@ -239,7 +239,7 @@ func ListShardedTimeseries(ctx context.Context, conn connect.Client) ([]string, if err != nil { return nil, errors.Wrap(err, "find") } - defer cur.Close(context.Background()) + defer cur.Close(ctx) nss := []string{} for cur.Next(ctx) { From 6cd04231cfa7a1d6641c3fcf78e255a6c4e7c1d4 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 12 Aug 2024 14:27:37 +0200 Subject: [PATCH 175/203] [PBM-1367] perform check on configsvr only --- pbm/backup/logical.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pbm/backup/logical.go b/pbm/backup/logical.go index 88b1d9422..15821e365 100644 --- a/pbm/backup/logical.go +++ b/pbm/backup/logical.go @@ -36,8 +36,10 @@ func (b *Backup) doLogical( stg storage.Storage, l log.LogEvent, ) error { - if err := b.checkForTimeseries(ctx, bcp.Namespaces); err != nil { - return errors.Wrap(err, "check for timeseries") + if b.brief.ConfigSvr { + if err := b.checkForTimeseries(ctx, bcp.Namespaces); err != nil { + return errors.Wrap(err, "check for timeseries") + } } var db, coll string @@ -447,7 +449,7 @@ func getNamespacesSize(ctx context.Context, m *mongo.Client, db, coll string) (m } func (b *Backup) checkForTimeseries(ctx context.Context, nss []string) error { - if !b.brief.Version.IsShardedTimeseriesSupported() { + if !b.brief.Version.IsShardedTimeseriesSupported() && b.brief.Sharded { return nil } From 032390b6d5d71a330a313cd54e1704f66b904054 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 12 Aug 2024 14:44:24 +0200 Subject: [PATCH 176/203] [PBM-1367] typo --- pbm/backup/logical.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbm/backup/logical.go b/pbm/backup/logical.go index 15821e365..ab0cb512f 100644 --- a/pbm/backup/logical.go +++ b/pbm/backup/logical.go @@ -449,7 +449,7 @@ func getNamespacesSize(ctx context.Context, m *mongo.Client, db, coll string) (m } func (b *Backup) checkForTimeseries(ctx context.Context, nss []string) error { - if !b.brief.Version.IsShardedTimeseriesSupported() && b.brief.Sharded { + if !b.brief.Version.IsShardedTimeseriesSupported() || !b.brief.Sharded { return nil } From 6ba7f4a0c288d71be0646a168bd1d010d08c897c Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Wed, 14 Aug 2024 14:29:05 +0200 Subject: [PATCH 177/203] PBM-1208: Add active lock check before running the backup (#982) * Add active lock check before running the backup --- cmd/pbm-agent/backup.go | 42 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/cmd/pbm-agent/backup.go b/cmd/pbm-agent/backup.go index 9a5567d0c..05d3ced83 100644 --- a/cmd/pbm-agent/backup.go +++ b/cmd/pbm-agent/backup.go @@ -65,6 +65,19 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, } isClusterLeader := nodeInfo.IsClusterLeader() + + if isClusterLeader { + moveOn, err := a.startBcpLockCheck(ctx) + if err != nil { + l.Error("start backup lock check: %v", err) + return + } + if !moveOn { + l.Error("unable to proceed with the backup, active lock is present") + return + } + } + canRunBackup, err := topo.NodeSuitsExt(ctx, a.nodeConn, nodeInfo, cmd.Type) if err != nil { l.Error("node check: %v", err) @@ -314,3 +327,32 @@ func (a *Agent) waitNomination(ctx context.Context, bcp string) (bool, error) { } } } + +// startBcpLockCheck checks if there is any active lock. +// It fetches all existing pbm locks, and if any exists, it is also +// checked for staleness. +// false is returned in case a single active lock exists or error happens. +// true means that there's no active locks. +func (a *Agent) startBcpLockCheck(ctx context.Context) (bool, error) { + locks, err := lock.GetLocks(ctx, a.leadConn, &lock.LockHeader{}) + if err != nil { + return false, errors.Wrap(err, "get all locks for backup start") + } + if len(locks) == 0 { + return true, nil + } + + // stale lock check + ts, err := topo.GetClusterTime(ctx, a.leadConn) + if err != nil { + return false, errors.Wrap(err, "read cluster time") + } + + for _, l := range locks { + if l.Heartbeat.T+defs.StaleFrameSec >= ts.T { + return false, nil + } + } + + return true, nil +} From 5285f7975ff636074e9746a9b8da61d7872f819f Mon Sep 17 00:00:00 2001 From: Oleksandr Havryliak <88387200+olexandr-havryliak@users.noreply.github.com> Date: Wed, 14 Aug 2024 16:06:53 +0300 Subject: [PATCH 178/203] PBM-1374 downgrade golang to version 1.22 (#988) --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8696a691c..453a444e5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -36,7 +36,7 @@ jobs: test: [logical, physical, incremental, external] env: PBM_BRANCH: ${{ github.event.inputs.pbm_branch || 'main' }} - GO_VER: ${{ github.event.inputs.go_ver || 'bullseye' }} + GO_VER: ${{ github.event.inputs.go_ver || '1.22-bullseye' }} PR_NUMBER: ${{ github.event.number|| github.event.inputs.pr_ver }} steps: From c5a9557262d59ad38e9b190bfba908e45137fba0 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 19 Aug 2024 12:44:14 +0200 Subject: [PATCH 179/203] [PBM-1373] force shutdown on ConflictingOperationInProgress (#990) --- pbm/restore/physical.go | 75 ++++++++++++----------------------------- 1 file changed, 22 insertions(+), 53 deletions(-) diff --git a/pbm/restore/physical.go b/pbm/restore/physical.go index 3fd8fe9ba..f61295d16 100644 --- a/pbm/restore/physical.go +++ b/pbm/restore/physical.go @@ -53,8 +53,6 @@ const ( tryConnCount = 5 tryConnTimeout = 5 * time.Minute - - maxShutdownTriesOnStandaloneRecovery = 10 ) type files struct { @@ -1198,12 +1196,8 @@ func (r *PhysRestore) getLasOpTime() (primitive.Timestamp, error) { return ts, errors.Errorf("get the timestamp of record %v", rb) } - err = shutdown(c, r.dbpath) - if err != nil { - return ts, errors.Wrap(err, "shutdown mongo") - } - - return ts, nil + err = r.shutdown(c) + return ts, err } func (r *PhysRestore) prepareData() error { @@ -1252,33 +1246,35 @@ func (r *PhysRestore) prepareData() error { return errors.Wrap(err, "set oplogTruncateAfterPoint") } - err = shutdown(c, r.dbpath) + return r.shutdown(c) +} + +func (r *PhysRestore) shutdown(c *mongo.Client) error { + err := shutdownImpl(c, r.dbpath, false) if err != nil { - return errors.Wrap(err, "shutdown mongo") + if strings.Contains(err.Error(), "ConflictingOperationInProgress") { + r.log.Warning("try force shutdown. reason: %v", err) + err = shutdownImpl(c, r.dbpath, true) + return errors.Wrap(err, "force shutdown mongo") + } + + return errors.Wrap(err, "shutdown mongo") // unexpected } return nil } -func shutdown(c *mongo.Client, dbpath string) error { - return shutdownImpl(c, dbpath, false) -} - -func forceShutdown(c *mongo.Client, dbpath string) error { - return shutdownImpl(c, dbpath, true) -} - func shutdownImpl(c *mongo.Client, dbpath string, force bool) error { res := c.Database("admin").RunCommand(context.TODO(), bson.D{{"shutdown", 1}, {"force", force}}) err := res.Err() if err != nil && !strings.Contains(err.Error(), "socket was unexpectedly closed") { - return err + return errors.Wrapf(err, "run shutdown (force: %v)", force) } err = waitMgoShutdown(dbpath) if err != nil { - return errors.Wrap(err, "shutdown") + return errors.Wrap(err, "wait") } return nil @@ -1297,24 +1293,7 @@ func (r *PhysRestore) recoverStandalone() error { return errors.Wrap(err, "connect to mongo") } - for i := 0; i != maxShutdownTriesOnStandaloneRecovery; i++ { - err = shutdown(c, r.dbpath) - if err == nil { - return nil // OK - } - - if strings.Contains(err.Error(), "ConflictingOperationInProgress") { - r.log.Warning("retry shutdown in 5 seconds. reason: %v", err) - time.Sleep(5 * time.Second) - continue - } - - return errors.Wrap(err, "shutdown mongo") // unexpected - } - - r.log.Debug("force shutdown") - err = forceShutdown(c, r.dbpath) - return errors.Wrap(err, "force shutdown mongo") + return r.shutdown(c) } func (r *PhysRestore) replayOplog( @@ -1350,12 +1329,12 @@ func (r *PhysRestore) replayOplog( }, ) if err != nil { - return errors.Wrapf(err, "upate rs.member host to %s", r.nodeInfo.Me) + return errors.Wrapf(err, "update rs.member host to %s", r.nodeInfo.Me) } - err = shutdown(nodeConn, r.dbpath) + err = r.shutdown(nodeConn) if err != nil { - return errors.Wrap(err, "shutdown mongo") + return errors.Wrap(err, "after update member host") } flags := []string{ @@ -1417,12 +1396,7 @@ func (r *PhysRestore) replayOplog( } } - err = shutdown(nodeConn, r.dbpath) - if err != nil { - return errors.Wrap(err, "shutdown mongo") - } - - return nil + return r.shutdown(nodeConn) } func (r *PhysRestore) resetRS() error { @@ -1565,12 +1539,7 @@ func (r *PhysRestore) resetRS() error { r.dropPBMCollections(ctx, c) } - err = shutdown(c, r.dbpath) - if err != nil { - return errors.Wrap(err, "shutdown mongo") - } - - return nil + return r.shutdown(c) } func (r *PhysRestore) dropPBMCollections(ctx context.Context, c *mongo.Client) { From b071ae27a7f1b481c1da349e73e3112d8b008270 Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 20 Aug 2024 09:02:20 +0200 Subject: [PATCH 180/203] PBM-1248: When using `backup` command -w flag does not work with -o json (#987) * Enable backup --wait option together with json fmt * Fix reviewdog issue * Make running backup message generic --------- Co-authored-by: Sandra Romanchenko <53295797+sandraromanchenko@users.noreply.github.com> --- cmd/pbm/backup.go | 44 +++++++++++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/cmd/pbm/backup.go b/cmd/pbm/backup.go index dd0ed0c24..ec16fe545 100644 --- a/cmd/pbm/backup.go +++ b/cmd/pbm/backup.go @@ -46,7 +46,7 @@ type backupOut struct { } func (b backupOut) String() string { - return fmt.Sprintf("Backup '%s' to remote store '%s' has started", b.Name, b.Storage) + return fmt.Sprintf("Backup '%s' to remote store '%s'", b.Name, b.Storage) } type externBcpOut struct { @@ -151,20 +151,20 @@ func runBackup( return nil, errors.Wrap(err, "send command") } - if outf != outText { - return backupOut{b.name, cfg.Storage.Path()}, nil - } + showProgress := outf == outText - fmt.Printf("Starting backup '%s'", b.name) + if showProgress { + fmt.Printf("Starting backup '%s'", b.name) + } startCtx, cancel := context.WithTimeout(ctx, cfg.Backup.Timeouts.StartingStatus()) defer cancel() - err = waitForBcpStatus(startCtx, conn, b.name) + err = waitForBcpStatus(startCtx, conn, b.name, showProgress) if err != nil { return nil, err } if b.typ == string(defs.ExternalBackup) { - s, err := waitBackup(ctx, conn, b.name, defs.StatusCopyReady) + s, err := waitBackup(ctx, conn, b.name, defs.StatusCopyReady, showProgress) if err != nil { return nil, errors.Wrap(err, "waiting for the `copyReady` status") } @@ -198,15 +198,19 @@ func runBackup( defer cancel() } - fmt.Printf("\nWaiting for '%s' backup...", b.name) - s, err := waitBackup(ctx, conn, b.name, defs.StatusDone) - if s != nil { + if showProgress { + fmt.Printf("\nWaiting for '%s' backup...", b.name) + } + s, err := waitBackup(ctx, conn, b.name, defs.StatusDone, showProgress) + if s != nil && showProgress { fmt.Printf(" %s\n", *s) } if errors.Is(err, context.DeadlineExceeded) { err = errWaitTimeout } - return outMsg{}, err + if err != nil { + return nil, err + } } return backupOut{b.name, cfg.Storage.Path()}, nil @@ -228,7 +232,13 @@ func runFinishBcp(ctx context.Context, conn connect.Client, bcp string) (fmt.Str backup.ChangeBackupState(conn, bcp, defs.StatusCopyDone, "") } -func waitBackup(ctx context.Context, conn connect.Client, name string, status defs.Status) (*defs.Status, error) { +func waitBackup( + ctx context.Context, + conn connect.Client, + name string, + status defs.Status, + showProgress bool, +) (*defs.Status, error) { t := time.NewTicker(time.Second) defer t.Stop() @@ -250,11 +260,13 @@ func waitBackup(ctx context.Context, conn connect.Client, name string, status de } } - fmt.Print(".") + if showProgress { + fmt.Print(".") + } } } -func waitForBcpStatus(ctx context.Context, conn connect.Client, bcpName string) error { +func waitForBcpStatus(ctx context.Context, conn connect.Client, bcpName string, showProgress bool) error { tk := time.NewTicker(time.Second) defer tk.Stop() @@ -262,7 +274,9 @@ func waitForBcpStatus(ctx context.Context, conn connect.Client, bcpName string) for { select { case <-tk.C: - fmt.Print(".") + if showProgress { + fmt.Print(".") + } var err error bmeta, err = backup.NewDBManager(conn).GetBackupByName(ctx, bcpName) if errors.Is(err, errors.ErrNotFound) { From 59402cc6297cd70675678593d24020b90552f0d1 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Tue, 20 Aug 2024 10:48:56 +0200 Subject: [PATCH 181/203] change upload part size message for S3 (#992) --- cmd/pbm/delete.go | 3 ++- cmd/pbm/status.go | 28 ++-------------------------- pbm/backup/physical.go | 27 +-------------------------- pbm/storage/s3/s3.go | 8 ++++++-- pbm/storage/storage.go | 29 +++++++++++++++++++++++++++++ 5 files changed, 40 insertions(+), 55 deletions(-) diff --git a/cmd/pbm/delete.go b/cmd/pbm/delete.go index 923496fd7..93656cf54 100644 --- a/cmd/pbm/delete.go +++ b/cmd/pbm/delete.go @@ -17,6 +17,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/oplog" + "github.com/percona/percona-backup-mongodb/pbm/storage" "github.com/percona/percona-backup-mongodb/sdk" ) @@ -355,7 +356,7 @@ func printDeleteInfoTo(w io.Writer, backups []backup.BackupMeta, chunks []oplog. restoreTime := time.Unix(int64(bcp.LastWriteTS.T), 0).UTC().Format(time.RFC3339) fmt.Fprintf(w, " - %q [size: %s type: <%s>, restore time: %s]\n", - bcp.Name, fmtSize(bcp.Size), t, restoreTime) + bcp.Name, storage.PrettySize(bcp.Size), t, restoreTime) } } diff --git a/cmd/pbm/status.go b/cmd/pbm/status.go index 81ad99d56..f81a4f397 100644 --- a/cmd/pbm/status.go +++ b/cmd/pbm/status.go @@ -143,30 +143,6 @@ func status( return out, err } -func fmtSize(size int64) string { - const ( - _ = iota - KB float64 = 1 << (10 * iota) - MB - GB - TB - ) - - s := float64(size) - - switch { - case s >= TB: - return fmt.Sprintf("%.2fTB", s/TB) - case s >= GB: - return fmt.Sprintf("%.2fGB", s/GB) - case s >= MB: - return fmt.Sprintf("%.2fMB", s/MB) - case s >= KB: - return fmt.Sprintf("%.2fKB", s/KB) - } - return fmt.Sprintf("%.2fB", s) -} - func sprinth(s string) string { return fmt.Sprintf("%s:\n%s", s, strings.Repeat("=", len(s)+1)) } @@ -520,14 +496,14 @@ func (s storageStat) String() string { if ss.StoreName != "" { t += ", *" } - ret += fmt.Sprintf(" %s %s <%s> %s\n", ss.Name, fmtSize(ss.Size), t, status) + ret += fmt.Sprintf(" %s %s <%s> %s\n", ss.Name, storage.PrettySize(ss.Size), t, status) } if len(s.PITR.Ranges) == 0 { return ret } - ret += fmt.Sprintf(" PITR chunks [%s]:\n", fmtSize(s.PITR.Size)) + ret += fmt.Sprintf(" PITR chunks [%s]:\n", storage.PrettySize(s.PITR.Size)) sort.Slice(s.PITR.Ranges, func(i, j int) bool { a, b := s.PITR.Ranges[i], s.PITR.Ranges[j] diff --git a/pbm/backup/physical.go b/pbm/backup/physical.go index 3ee1d81cb..3865ea934 100644 --- a/pbm/backup/physical.go +++ b/pbm/backup/physical.go @@ -524,7 +524,7 @@ func (b *Backup) uploadPhysical( if err != nil { return errors.Wrapf(err, "upload filelist %q", filelistPath) } - l.Info("uploaded: %q %s", filelistPath, fmtSize(flSize)) + l.Info("uploaded: %q %s", filelistPath, storage.PrettySize(flSize)) err = IncBackupSize(ctx, b.leadConn, bcp.Name, size+flSize) if err != nil { @@ -690,7 +690,6 @@ func writeFile( } dst += fmt.Sprintf(".%d-%d", src.Off, src.Len) } - l.Debug("uploading: %s %s", src, fmtSize(sz)) _, err = storage.Upload(ctx, &src, stg, compression, compressLevel, dst, sz) if err != nil { @@ -711,27 +710,3 @@ func writeFile( Len: src.Len, }, nil } - -func fmtSize(size int64) string { - const ( - _ = iota - KB float64 = 1 << (10 * iota) - MB - GB - TB - ) - - s := float64(size) - - switch { - case s >= TB: - return fmt.Sprintf("%.2fTB", s/TB) - case s >= GB: - return fmt.Sprintf("%.2fGB", s/GB) - case s >= MB: - return fmt.Sprintf("%.2fMB", s/MB) - case s >= KB: - return fmt.Sprintf("%.2fKB", s/KB) - } - return fmt.Sprintf("%.2fB", s) -} diff --git a/pbm/storage/s3/s3.go b/pbm/storage/s3/s3.go index d5a52cd2f..1fff9ea14 100644 --- a/pbm/storage/s3/s3.go +++ b/pbm/storage/s3/s3.go @@ -392,8 +392,12 @@ func (s *S3) Save(name string, data io.Reader, sizeb int64) error { } if s.log != nil { - s.log.Info("s3.uploadPartSize is set to %d (~%dMb)", partSize, partSize>>20) - s.log.Info("s3.maxUploadParts is set to %d", s.opts.MaxUploadParts) + s.log.Debug("uploading %q [size hint: %v (%v); part size: %v (%v)]", + name, + sizeb, + storage.PrettySize(sizeb), + partSize, + storage.PrettySize(partSize)) } _, err = s3manager.NewUploader(awsSession, func(u *s3manager.Uploader) { diff --git a/pbm/storage/storage.go b/pbm/storage/storage.go index 25f223977..777232764 100644 --- a/pbm/storage/storage.go +++ b/pbm/storage/storage.go @@ -2,6 +2,7 @@ package storage import ( "context" + "fmt" "io" "strings" @@ -253,3 +254,31 @@ func Upload( return n, nil } + +func PrettySize(size int64) string { + const ( + _ = iota + KB float64 = 1 << (10 * iota) + MB + GB + TB + ) + + if size < 0 { + return "unknown" + } + + s := float64(size) + + switch { + case s >= TB: + return fmt.Sprintf("%.2fTB", s/TB) + case s >= GB: + return fmt.Sprintf("%.2fGB", s/GB) + case s >= MB: + return fmt.Sprintf("%.2fMB", s/MB) + case s >= KB: + return fmt.Sprintf("%.2fKB", s/KB) + } + return fmt.Sprintf("%.2fB", s) +} From f982d168d3e79d7aa46c41c9eb432a0af48eb31a Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Tue, 20 Aug 2024 10:49:37 +0200 Subject: [PATCH 182/203] [PBM-1344] do not try to copy oplog from physical backup (#994) --- pbm/slicer/slicer.go | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/pbm/slicer/slicer.go b/pbm/slicer/slicer.go index 4dd90a264..599564b47 100644 --- a/pbm/slicer/slicer.go +++ b/pbm/slicer/slicer.go @@ -118,13 +118,6 @@ func (s *Slicer) Catchup(ctx context.Context) error { return nil } - if lastBackup.Type != defs.LogicalBackup { - // the backup does not contain complete oplog to copy from - // NOTE: the chunk' last op can be later than backup' first write ts - s.lastTS = lastChunk.EndTS - return nil - } - if !lastChunk.EndTS.Before(rs.LastWriteTS) { // no need to copy oplog from backup s.lastTS = lastChunk.EndTS @@ -150,18 +143,20 @@ func (s *Slicer) Catchup(ctx context.Context) error { return err } - s.l.Warning("skip chunk %s - %s: %v", - formatts(lastChunk.EndTS), formatts(rs.FirstWriteTS), rangeErr) + s.l.Warning("skip chunk %s - %s: oplog has insufficient range", + formatts(lastChunk.EndTS), formatts(rs.FirstWriteTS)) } else { s.l.Info("uploaded chunk %s - %s", formatts(lastChunk.EndTS), formatts(rs.FirstWriteTS)) s.lastTS = rs.FirstWriteTS } } - err = s.copyReplsetOplog(ctx, rs) - if err != nil { - s.l.Error("copy oplog from %q backup: %v", lastBackup.Name, err) - return nil + if lastBackup.Type == defs.LogicalBackup { + err = s.copyReplsetOplog(ctx, rs) + if err != nil { + s.l.Error("copy oplog from %q backup: %v", lastBackup.Name, err) + return nil + } } s.lastTS = rs.LastWriteTS From 9d3317e5e81e21375112dff02220732110640e99 Mon Sep 17 00:00:00 2001 From: Sandra Romanchenko <53295797+sandraromanchenko@users.noreply.github.com> Date: Tue, 20 Aug 2024 11:53:14 +0300 Subject: [PATCH 183/203] PBM-1376. Add support for SSE-S3 for S3 providers (#997) --- pbm/storage/s3/s3.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pbm/storage/s3/s3.go b/pbm/storage/s3/s3.go index 1fff9ea14..55a91e9d1 100644 --- a/pbm/storage/s3/s3.go +++ b/pbm/storage/s3/s3.go @@ -355,7 +355,9 @@ func (s *S3) Save(name string, data io.Reader, sizeb int64) error { sse := s.opts.ServerSideEncryption if sse != nil { - if sse.SseAlgorithm == s3.ServerSideEncryptionAwsKms { + if sse.SseAlgorithm == s3.ServerSideEncryptionAes256 { + uplInput.ServerSideEncryption = aws.String(sse.SseAlgorithm) + } else if sse.SseAlgorithm == s3.ServerSideEncryptionAwsKms { uplInput.ServerSideEncryption = aws.String(sse.SseAlgorithm) uplInput.SSEKMSKeyId = aws.String(sse.KmsKeyID) } else if sse.SseCustomerAlgorithm != "" { From 5cadf8abff1689131e00535352227ab7564b305b Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Tue, 13 Aug 2024 17:35:23 +0200 Subject: [PATCH 184/203] [PBM-1371] remove tmp file if os.Rename() fails --- pbm/storage/fs/fs.go | 54 ++++++++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 25 deletions(-) diff --git a/pbm/storage/fs/fs.go b/pbm/storage/fs/fs.go index f115b9d71..467c5e208 100644 --- a/pbm/storage/fs/fs.go +++ b/pbm/storage/fs/fs.go @@ -79,8 +79,11 @@ func (*FS) Type() storage.Type { return storage.Filesystem } -func WriteSync(filepath string, data io.Reader) error { - err := os.MkdirAll(path.Dir(filepath), os.ModeDir|0o755) +//nolint:nonamedreturns +func writeSync(finalpath string, data io.Reader) (err error) { + filepath := finalpath + ".tmp" + + err = os.MkdirAll(path.Dir(filepath), os.ModeDir|0o755) if err != nil { return errors.Wrapf(err, "create path %s", path.Dir(filepath)) } @@ -89,7 +92,14 @@ func WriteSync(filepath string, data io.Reader) error { if err != nil { return errors.Wrapf(err, "create destination file <%s>", filepath) } - defer fw.Close() + defer func() { + if err != nil { + if fw != nil { + fw.Close() + } + os.Remove(filepath) + } + }() err = os.Chmod(filepath, 0o644) if err != nil { @@ -102,22 +112,26 @@ func WriteSync(filepath string, data io.Reader) error { } err = fw.Sync() - return errors.Wrapf(err, "sync file <%s>", filepath) -} - - -func (fs *FS) Save(name string, data io.Reader, _ int64) error { - filepath := path.Join(fs.root, name+".tmp") - finalpath := path.Join(fs.root, name) + if err != nil { + return errors.Wrapf(err, "sync file <%s>", filepath) + } - err := WriteSync(filepath, data) + err = fw.Close() if err != nil { - os.Remove(filepath) - return errors.Wrapf(err, "write-sync %s", path.Dir(filepath)) + return errors.Wrapf(err, "close file <%s>", filepath) } + fw = nil err = os.Rename(filepath, finalpath) - return errors.Wrapf(err, "rename <%s> to <%s>", filepath, finalpath) + if err != nil { + return err + } + + return nil +} + +func (fs *FS) Save(name string, data io.Reader, _ int64) error { + return writeSync(path.Join(fs.root, name), data) } func (fs *FS) SourceReader(name string) (io.ReadCloser, error) { @@ -188,17 +202,7 @@ func (fs *FS) Copy(src, dst string) error { return errors.Wrap(err, "open src") } - destFilename := path.Join(fs.root, dst+".tmp") - finalFilename := path.Join(fs.root, dst) - - err = WriteSync(destFilename, from) - if err != nil { - os.Remove(destFilename) - return errors.Wrapf(err, "write-sync %s", path.Dir(destFilename)) - } - - err = os.Rename(destFilename, finalFilename) - return errors.Wrapf(err, "rename <%s> to <%s>", destFilename, finalFilename) + return writeSync(path.Join(fs.root, dst), from) } // Delete deletes given file from FS. From f00ca99cd985772455dc701c45d3158aa5ba0cc8 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 15 Aug 2024 11:47:37 +0200 Subject: [PATCH 185/203] [PBM-1371] return all errors from unclosed files --- pbm/archive/archive.go | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/pbm/archive/archive.go b/pbm/archive/archive.go index 140691592..3556e34d1 100644 --- a/pbm/archive/archive.go +++ b/pbm/archive/archive.go @@ -397,16 +397,27 @@ func (c *consumer) BodyBSON(data []byte) error { } func (c *consumer) End() error { - eg := errgroup.Group{} + errs := []error{} + + wg := &sync.WaitGroup{} + mu := &sync.Mutex{} + wg.Add(len(c.nss)) for ns, w := range c.nss { - ns, w := ns, w - eg.Go(func() error { - return errors.Wrapf(w.Close(), "close: %q", ns) - }) + go func() { + defer wg.Done() + + err := w.Close() + if err != nil { + mu.Lock() + errs = append(errs, errors.Wrapf(err, "close: %q", ns)) + mu.Unlock() + } + }() } - return eg.Wait() + wg.Wait() + return errors.Join(errs...) } func SecureWrite(w io.Writer, data []byte) error { From 8211f28c01f4a20dc9a3e50c8b724ac90c3500ff Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 15 Aug 2024 11:50:42 +0200 Subject: [PATCH 186/203] [PBM-1371] wait for error from Close() --- pbm/snapshot/dump.go | 42 ++++++++++++++++++++---------------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/pbm/snapshot/dump.go b/pbm/snapshot/dump.go index 370667873..3abdeaf31 100644 --- a/pbm/snapshot/dump.go +++ b/pbm/snapshot/dump.go @@ -4,7 +4,6 @@ import ( "context" "io" "strings" - "sync" "sync/atomic" "github.com/percona/percona-backup-mongodb/pbm/archive" @@ -29,7 +28,6 @@ type UploadDumpOptions struct { type UploadFunc func(ns, ext string, r io.Reader) error func UploadDump(ctx context.Context, wt io.WriterTo, upload UploadFunc, opts UploadDumpOptions) (int64, error) { - wg := sync.WaitGroup{} pr, pw := io.Pipe() size := int64(0) @@ -49,19 +47,21 @@ func UploadDump(ctx context.Context, wt io.WriterTo, upload UploadFunc, opts Upl newWriter := func(ns string) (io.WriteCloser, error) { pr, pw := io.Pipe() - wg.Add(1) + done := make(chan error) go func() { - defer wg.Done() + defer close(done) ext := "" if ns != archive.MetaFile { - ext += opts.Compression.Suffix() + ext = opts.Compression.Suffix() } rc := &readCounter{r: pr} err := upload(ns, ext, rc) if err != nil { - pr.CloseWithError(errors.Wrapf(err, "upload: %q", ns)) + err = errors.Wrapf(err, "upload: %q", ns) + pr.CloseWithError(err) + done <- err } atomic.AddInt64(&size, rc.n) @@ -72,7 +72,12 @@ func UploadDump(ctx context.Context, wt io.WriterTo, upload UploadFunc, opts Upl } w, err := compress.Compress(pw, opts.Compression, opts.CompressionLevel) - dwc := io.WriteCloser(&delegatedWriteCloser{w, pw}) + dwc := io.WriteCloser(&delegatedWriteCloser{w, funcCloser(func() error { + err0 := w.Close() + err1 := pw.Close() + err2 := <-done + return errors.Join(err0, err1, err2) + })}) return dwc, errors.Wrapf(err, "create compressor: %q", ns) } @@ -84,7 +89,6 @@ func UploadDump(ctx context.Context, wt io.WriterTo, upload UploadFunc, opts Upl } } - wg.Wait() return size, errors.Wrap(err, "decompose") } @@ -134,8 +138,14 @@ func (c *readCounter) Read(p []byte) (int, error) { return n, err } +type funcCloser func() error + +func (f funcCloser) Close() error { + return f() +} + type delegatedWriteCloser struct { - w io.WriteCloser + w io.Writer c io.Closer } @@ -144,17 +154,5 @@ func (d *delegatedWriteCloser) Write(b []byte) (int, error) { } func (d *delegatedWriteCloser) Close() error { - we := d.w.Close() - ce := d.c.Close() - - switch { - case we != nil && ce != nil: - return errors.Errorf("writer: %s; closer: %s", we.Error(), ce.Error()) - case we != nil: - return errors.Errorf("writer: %s", we.Error()) - case ce != nil: - return errors.Errorf("closer: %s", ce.Error()) - } - - return nil + return d.c.Close() } From 8351d406dab5eb6856be32b98c41eef966a607d4 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 19 Aug 2024 14:16:08 +0200 Subject: [PATCH 187/203] [PBM-1379] retry to write init file for fs storage --- cmd/pbm-agent/profile.go | 2 +- pbm/backup/backup.go | 2 +- pbm/resync/rsync.go | 4 ++-- pbm/storage/fs/fs.go | 21 ++++++++++++++++++++- pbm/storage/storage.go | 26 -------------------------- pbm/util/storage.go | 38 ++++++++++++++++++++++++++++++++++++++ 6 files changed, 62 insertions(+), 31 deletions(-) diff --git a/cmd/pbm-agent/profile.go b/cmd/pbm-agent/profile.go index 0f9e88d8f..cb27387db 100644 --- a/cmd/pbm-agent/profile.go +++ b/cmd/pbm-agent/profile.go @@ -99,7 +99,7 @@ func (a *Agent) handleAddConfigProfile( return } - err = storage.Initialize(ctx, stg) + err = util.Initialize(ctx, stg) if err != nil { err = errors.Wrap(err, "init storage") return diff --git a/pbm/backup/backup.go b/pbm/backup/backup.go index 44e843642..e366a34ec 100644 --- a/pbm/backup/backup.go +++ b/pbm/backup/backup.go @@ -271,7 +271,7 @@ func (b *Backup) Run(ctx context.Context, bcp *ctrl.BackupCmd, opid ctrl.OPID, l } if inf.IsLeader() { - err = storage.Initialize(ctx, stg) + err = util.Initialize(ctx, stg) if err != nil { return errors.Wrap(err, "init storage") } diff --git a/pbm/resync/rsync.go b/pbm/resync/rsync.go index ecf06f228..9fffc84aa 100644 --- a/pbm/resync/rsync.go +++ b/pbm/resync/rsync.go @@ -41,13 +41,13 @@ func Resync(ctx context.Context, conn connect.Client, cfg *config.StorageConf) e return errors.Wrap(err, "check read access") } - err = storage.Initialize(ctx, stg) + err = util.Initialize(ctx, stg) if err != nil { return errors.Wrap(err, "init storage") } } else { // check write permission and update PBM version - err = storage.Reinitialize(ctx, stg) + err = util.Reinitialize(ctx, stg) if err != nil { return errors.Wrap(err, "reinit storage") } diff --git a/pbm/storage/fs/fs.go b/pbm/storage/fs/fs.go index 467c5e208..984d38857 100644 --- a/pbm/storage/fs/fs.go +++ b/pbm/storage/fs/fs.go @@ -11,6 +11,19 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/storage" ) +type RetryableError struct { + Err error +} + +func (e *RetryableError) Error() string { + return e.Err.Error() +} + +func IsRetryableError(err error) bool { + var e *RetryableError + return errors.As(err, &e) +} + type Config struct { Path string `bson:"path" json:"path" yaml:"path"` } @@ -97,7 +110,13 @@ func writeSync(finalpath string, data io.Reader) (err error) { if fw != nil { fw.Close() } - os.Remove(filepath) + + if os.IsNotExist(err) { + err = &RetryableError{Err: err} + } else { + os.Remove(filepath) + } + } }() diff --git a/pbm/storage/storage.go b/pbm/storage/storage.go index 777232764..228d6f8f9 100644 --- a/pbm/storage/storage.go +++ b/pbm/storage/storage.go @@ -4,13 +4,11 @@ import ( "context" "fmt" "io" - "strings" "github.com/percona/percona-backup-mongodb/pbm/compress" "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/log" - "github.com/percona/percona-backup-mongodb/pbm/version" ) var ( @@ -127,30 +125,6 @@ func HasReadAccess(ctx context.Context, stg Storage) error { return nil } -// Initialize write current PBM version to PBM init file. -// -// It does not handle "file already exists" error. -func Initialize(ctx context.Context, stg Storage) error { - err := stg.Save(defs.StorInitFile, strings.NewReader(version.Current().Version), 0) - if err != nil { - return errors.Wrap(err, "write init file") - } - - return nil -} - -// Reinitialize delete existing PBM init file and create new once with current PBM version. -// -// It expects that the file exists. -func Reinitialize(ctx context.Context, stg Storage) error { - err := stg.Delete(defs.StorInitFile) - if err != nil { - return errors.Wrap(err, "delete init file") - } - - return Initialize(ctx, stg) -} - // rwError multierror for the read/compress/write-to-store operations set type rwError struct { read error diff --git a/pbm/util/storage.go b/pbm/util/storage.go index e887ba22d..b89e6f081 100644 --- a/pbm/util/storage.go +++ b/pbm/util/storage.go @@ -1,10 +1,12 @@ package util import ( + "bytes" "context" "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/connect" + "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/log" "github.com/percona/percona-backup-mongodb/pbm/storage" @@ -12,6 +14,7 @@ import ( "github.com/percona/percona-backup-mongodb/pbm/storage/blackhole" "github.com/percona/percona-backup-mongodb/pbm/storage/fs" "github.com/percona/percona-backup-mongodb/pbm/storage/s3" + "github.com/percona/percona-backup-mongodb/pbm/version" ) // ErrStorageUndefined is an error for undefined storage @@ -45,3 +48,38 @@ func GetStorage(ctx context.Context, m connect.Client, l log.LogEvent) (storage. return StorageFromConfig(&c.Storage, l) } + +// Initialize write current PBM version to PBM init file. +// +// It does not handle "file already exists" error. +func Initialize(ctx context.Context, stg storage.Storage) error { + err := RetryableWrite(stg, defs.StorInitFile, []byte(version.Current().Version)) + if err != nil { + return errors.Wrap(err, "write init file") + } + + return nil +} + +// Reinitialize delete existing PBM init file and create new once with current PBM version. +// +// It expects that the file exists. +func Reinitialize(ctx context.Context, stg storage.Storage) error { + err := stg.Delete(defs.StorInitFile) + if err != nil { + return errors.Wrap(err, "delete init file") + } + + return Initialize(ctx, stg) +} + +func RetryableWrite(stg storage.Storage, name string, data []byte) error { + err := stg.Save(name, bytes.NewBuffer(data), int64(len(data))) + if err != nil && stg.Type() == storage.Filesystem { + if fs.IsRetryableError(err) { + err = stg.Save(name, bytes.NewBuffer(data), int64(len(data))) + } + } + + return err +} From 5b05de80c28bac6d1d74477fcc523d544c6d77e3 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 19 Aug 2024 14:17:54 +0200 Subject: [PATCH 188/203] [PBM-1379] retry to write replset and cluster ts during physical restore --- cmd/pbm/restore.go | 10 ++--- pbm/restore/physical.go | 95 +++++++++++++++++------------------------ 2 files changed, 42 insertions(+), 63 deletions(-) diff --git a/cmd/pbm/restore.go b/cmd/pbm/restore.go index e9d76dd55..3db18c5b7 100644 --- a/cmd/pbm/restore.go +++ b/cmd/pbm/restore.go @@ -1,7 +1,6 @@ package main import ( - "bytes" "context" "encoding/json" "fmt" @@ -429,11 +428,10 @@ func runFinishRestore(o descrRestoreOpts) (fmt.Stringer, error) { } path := fmt.Sprintf("%s/%s/cluster", defs.PhysRestoresDir, o.restore) - return outMsg{"Command sent. Check `pbm describe-restore ...` for the result."}, - stg.Save(path+"."+string(defs.StatusCopyDone), - bytes.NewReader([]byte( - fmt.Sprintf("%d", time.Now().Unix()), - )), -1) + msg := outMsg{"Command sent. Check `pbm describe-restore ...` for the result."} + err = stg.Save(path+"."+string(defs.StatusCopyDone), + strings.NewReader(fmt.Sprintf("%d", time.Now().Unix())), -1) + return msg, err } func parseTS(t string) (primitive.Timestamp, error) { diff --git a/pbm/restore/physical.go b/pbm/restore/physical.go index f61295d16..8d00016a8 100644 --- a/pbm/restore/physical.go +++ b/pbm/restore/physical.go @@ -283,14 +283,13 @@ func (r *PhysRestore) flush(ctx context.Context) error { } if r.nodeInfo.IsPrimary { - err = r.stg.Save(r.syncPathRS+"."+string(defs.StatusDown), - okStatus(), -1) + err = util.RetryableWrite(r.stg, r.syncPathRS+"."+string(defs.StatusDown), okStatus()) if err != nil { return errors.Wrap(err, "write replset StatusDown") } } - r.log.Debug("revome old data") + r.log.Debug("remove old data") err = removeAll(r.dbpath, r.log) if err != nil { return errors.Wrapf(err, "flush dbpath %s", r.dbpath) @@ -384,15 +383,15 @@ func (r *PhysRestore) toState(status defs.Status) (_ defs.Status, err error) { defer func() { if err != nil { if r.nodeInfo.IsPrimary && status != defs.StatusDone { - serr := r.stg.Save(r.syncPathRS+"."+string(defs.StatusError), - errStatus(err), -1) + serr := util.RetryableWrite(r.stg, + r.syncPathRS+"."+string(defs.StatusError), errStatus(err)) if serr != nil { r.log.Error("toState: write replset error state `%v`: %v", err, serr) } } if r.nodeInfo.IsClusterLeader() && status != defs.StatusDone { - serr := r.stg.Save(r.syncPathCluster+"."+string(defs.StatusError), - errStatus(err), -1) + serr := util.RetryableWrite(r.stg, + r.syncPathCluster+"."+string(defs.StatusError), errStatus(err)) if serr != nil { r.log.Error("toState: write cluster error state `%v`: %v", err, serr) } @@ -402,8 +401,7 @@ func (r *PhysRestore) toState(status defs.Status) (_ defs.Status, err error) { r.log.Info("moving to state %s", status) - err = r.stg.Save(r.syncPathNode+"."+string(status), - okStatus(), -1) + err = util.RetryableWrite(r.stg, r.syncPathNode+"."+string(status), okStatus()) if err != nil { return defs.StatusError, errors.Wrap(err, "write node state") } @@ -415,8 +413,7 @@ func (r *PhysRestore) toState(status defs.Status) (_ defs.Status, err error) { return defs.StatusError, errors.Wrap(err, "wait for nodes in rs") } - err = r.stg.Save(r.syncPathRS+"."+string(cstat), - okStatus(), -1) + err = util.RetryableWrite(r.stg, r.syncPathRS+"."+string(cstat), okStatus()) if err != nil { return defs.StatusError, errors.Wrap(err, "write replset state") } @@ -429,10 +426,9 @@ func (r *PhysRestore) toState(status defs.Status) (_ defs.Status, err error) { return defs.StatusError, errors.Wrap(err, "wait for shards") } - err = r.stg.Save(r.syncPathCluster+"."+string(cstat), - okStatus(), -1) + err = util.RetryableWrite(r.stg, r.syncPathCluster+"."+string(cstat), okStatus()) if err != nil { - return defs.StatusError, errors.Wrap(err, "write replset state") + return defs.StatusError, errors.Wrap(err, "write cluster state") } } @@ -479,16 +475,12 @@ func (r *PhysRestore) getTSFromSyncFile(path string) (primitive.Timestamp, error }, nil } -func errStatus(err error) io.Reader { - return bytes.NewReader([]byte( - fmt.Sprintf("%d:%v", time.Now().Unix(), err), - )) +func errStatus(err error) []byte { + return []byte(fmt.Sprintf("%d:%v", time.Now().Unix(), err)) } -func okStatus() io.Reader { - return bytes.NewReader([]byte( - fmt.Sprintf("%d", time.Now().Unix()), - )) +func okStatus() []byte { + return []byte(fmt.Sprintf("%d", time.Now().Unix())) } type nodeError struct { @@ -1024,7 +1016,7 @@ func (r *PhysRestore) writeStat(stat any) error { return errors.Wrap(err, "marshal") } - err = r.stg.Save(r.syncPathNodeStat, bytes.NewBuffer(b), -1) + err = util.RetryableWrite(r.stg, r.syncPathNodeStat, b) if err != nil { return errors.Wrap(err, "write") } @@ -1065,14 +1057,11 @@ func (r *PhysRestore) dumpMeta(meta *RestoreMeta, s defs.Status, msg string) err meta.Error = fmt.Sprintf("%s/%s: %s", r.nodeInfo.SetName, r.nodeInfo.Me, msg) } - var buf bytes.Buffer - enc := json.NewEncoder(&buf) - enc.SetIndent("", "\t") - err = enc.Encode(meta) + buf, err := json.MarshalIndent(meta, "", "\t") if err != nil { return errors.Wrap(err, "encode restore meta") } - err = r.stg.Save(name, &buf, int64(buf.Len())) + err = util.RetryableWrite(r.stg, name, buf) if err != nil { return errors.Wrap(err, "write restore meta") } @@ -1385,12 +1374,11 @@ func (r *PhysRestore) replayOplog( tops = append(tops, t.Oplog...) } - var b bytes.Buffer - err := json.NewEncoder(&b).Encode(tops) + buf, err := json.Marshal(tops) if err != nil { return errors.Wrap(err, "encode") } - err = r.stg.Save(r.syncPathRS+".partTxn", &b, int64(b.Len())) + err = util.RetryableWrite(r.stg, r.syncPathRS+".partTxn", buf) if err != nil { return errors.Wrap(err, "write partial transactions") } @@ -1617,12 +1605,11 @@ func (r *PhysRestore) agreeCommonRestoreTS() (primitive.Timestamp, error) { return ts, errors.Wrap(err, "define last op time") } - bts := bytes.NewReader([]byte( - fmt.Sprintf("%d:%d,%d", time.Now().Unix(), cts.T, cts.I), - )) // saving straight for RS as backup for nodes in the RS the same, // hence TS would be the same as well - err = r.stg.Save(r.syncPathRS+"."+string(defs.StatusExtTS), bts, -1) + err = util.RetryableWrite(r.stg, + r.syncPathRS+"."+string(defs.StatusExtTS), + []byte(fmt.Sprintf("%d:%d,%d", time.Now().Unix(), cts.T, cts.I))) if err != nil { return ts, errors.Wrap(err, "write RS timestamp") } @@ -1643,10 +1630,10 @@ func (r *PhysRestore) agreeCommonRestoreTS() (primitive.Timestamp, error) { mints = ts } } - bts := bytes.NewReader([]byte( - fmt.Sprintf("%d:%d,%d", time.Now().Unix(), mints.T, mints.I), - )) - err = r.stg.Save(r.syncPathCluster+"."+string(defs.StatusExtTS), bts, -1) + + err = util.RetryableWrite(r.stg, + r.syncPathCluster+"."+string(defs.StatusExtTS), + []byte(fmt.Sprintf("%d:%d,%d", time.Now().Unix(), mints.T, mints.I))) if err != nil { return ts, errors.Wrap(err, "write") } @@ -1669,14 +1656,11 @@ func (r *PhysRestore) setcommittedTxn(_ context.Context, txn []phys.RestoreTxn) if txn == nil { txn = []phys.RestoreTxn{} } - var b bytes.Buffer - err := json.NewEncoder(&b).Encode(txn) + b, err := json.Marshal(txn) if err != nil { return errors.Wrap(err, "encode") } - return r.stg.Save(r.syncPathRS+".txn", - &b, int64(b.Len()), - ) + return util.RetryableWrite(r.stg, r.syncPathRS+".txn", b) } func (r *PhysRestore) getcommittedTxn(context.Context) (map[string]primitive.Timestamp, error) { @@ -1895,22 +1879,19 @@ func (r *PhysRestore) init(ctx context.Context, name string, opid ctrl.OPID, l l const syncHbSuffix = "hb" func (r *PhysRestore) hb() error { - ts := time.Now().Unix() + now := []byte(strconv.FormatInt(time.Now().Unix(), 10)) - err := r.stg.Save(r.syncPathNode+"."+syncHbSuffix, - bytes.NewReader([]byte(strconv.FormatInt(ts, 10))), -1) + err := util.RetryableWrite(r.stg, r.syncPathNode+"."+syncHbSuffix, now) if err != nil { return errors.Wrap(err, "write node hb") } - err = r.stg.Save(r.syncPathRS+"."+syncHbSuffix, - bytes.NewReader([]byte(strconv.FormatInt(ts, 10))), -1) + err = util.RetryableWrite(r.stg, r.syncPathRS+"."+syncHbSuffix, now) if err != nil { return errors.Wrap(err, "write rs hb") } - err = r.stg.Save(r.syncPathCluster+"."+syncHbSuffix, - bytes.NewReader([]byte(strconv.FormatInt(ts, 10))), -1) + err = util.RetryableWrite(r.stg, r.syncPathCluster+"."+syncHbSuffix, now) if err != nil { return errors.Wrap(err, "write rs hb") } @@ -2307,8 +2288,8 @@ func (r *PhysRestore) MarkFailed(meta *RestoreMeta, e error, markCluster bool) { meta.Replsets[0].Error = e.Error() } - err := r.stg.Save(r.syncPathNode+"."+string(defs.StatusError), - errStatus(e), -1) + err := util.RetryableWrite(r.stg, + r.syncPathNode+"."+string(defs.StatusError), errStatus(e)) if err != nil { r.log.Error("write error state `%v` to storage: %v", e, err) } @@ -2317,15 +2298,15 @@ func (r *PhysRestore) MarkFailed(meta *RestoreMeta, e error, markCluster bool) { // (in `toState` method). // Here we are not aware of partlyDone etc so leave it to the `toState`. if r.nodeInfo.IsPrimary && markCluster { - serr := r.stg.Save(r.syncPathRS+"."+string(defs.StatusError), - errStatus(e), -1) + serr := util.RetryableWrite(r.stg, + r.syncPathRS+"."+string(defs.StatusError), errStatus(e)) if serr != nil { r.log.Error("MarkFailed: write replset error state `%v`: %v", e, serr) } } if r.nodeInfo.IsClusterLeader() && markCluster { - serr := r.stg.Save(r.syncPathCluster+"."+string(defs.StatusError), - errStatus(e), -1) + serr := util.RetryableWrite(r.stg, + r.syncPathCluster+"."+string(defs.StatusError), errStatus(e)) if serr != nil { r.log.Error("MarkFailed: write cluster error state `%v`: %v", e, serr) } From d114970179e6bb9d66849b9cdcadec7fa626bae7 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 19 Aug 2024 14:36:09 +0200 Subject: [PATCH 189/203] [PBM-1379] do not list temp files unless they are not requested explicitly --- pbm/storage/fs/fs.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pbm/storage/fs/fs.go b/pbm/storage/fs/fs.go index 984d38857..36952a6eb 100644 --- a/pbm/storage/fs/fs.go +++ b/pbm/storage/fs/fs.go @@ -24,6 +24,8 @@ func IsRetryableError(err error) bool { return errors.As(err, &e) } +const tmpFileSuffix = ".tmp" + type Config struct { Path string `bson:"path" json:"path" yaml:"path"` } @@ -94,7 +96,7 @@ func (*FS) Type() storage.Type { //nolint:nonamedreturns func writeSync(finalpath string, data io.Reader) (err error) { - filepath := finalpath + ".tmp" + filepath := finalpath + tmpFileSuffix err = os.MkdirAll(path.Dir(filepath), os.ModeDir|0o755) if err != nil { @@ -206,6 +208,11 @@ func (fs *FS) List(prefix, suffix string) ([]storage.FileInfo, error) { if f[0] == '/' { f = f[1:] } + + // ignore temp file unless it is not requested explicitly + if suffix == "" && strings.HasSuffix(f, tmpFileSuffix) { + return nil + } if strings.HasSuffix(f, suffix) { files = append(files, storage.FileInfo{Name: f, Size: info.Size()}) } From c6b73bc95e1adb62e18e563d4e5005e2dd1a438e Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 15 Aug 2024 14:16:09 +0200 Subject: [PATCH 190/203] [PBM-1239] use AWS SDK for any S3-like --- .../tests/sharded/test_backup_cancellation.go | 47 +++- .../pkg/tests/sharded/test_delete_backup.go | 28 ++- pbm/storage/s3/s3.go | 208 ++++++------------ 3 files changed, 121 insertions(+), 162 deletions(-) diff --git a/e2e-tests/pkg/tests/sharded/test_backup_cancellation.go b/e2e-tests/pkg/tests/sharded/test_backup_cancellation.go index 16539dde4..1adabecc1 100644 --- a/e2e-tests/pkg/tests/sharded/test_backup_cancellation.go +++ b/e2e-tests/pkg/tests/sharded/test_backup_cancellation.go @@ -2,18 +2,23 @@ package sharded import ( "context" - "fmt" "log" "net/url" "os" "strings" "time" - "github.com/minio/minio-go" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + awsS3 "github.com/aws/aws-sdk-go/service/s3" + "gopkg.in/yaml.v2" "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/defs" + "github.com/percona/percona-backup-mongodb/pbm/errors" + "github.com/percona/percona-backup-mongodb/pbm/storage/s3" ) func (c *Cluster) BackupCancellation(storage string) { @@ -72,20 +77,40 @@ func checkNoBackupFiles(backupName, conf string) { endopintURL = eu.Host } - mc, err := minio.NewWithRegion(endopintURL, - stg.S3.Credentials.AccessKeyID, stg.S3.Credentials.SecretAccessKey, false, stg.S3.Region) + ss, err := newS3Client(endopintURL, stg.S3.Region, &stg.S3.Credentials) if err != nil { - log.Fatalln("Error: NewWithRegion:", err) + log.Fatalf("create S3 client: %v", err) } - for object := range mc.ListObjects(stg.S3.Bucket, stg.S3.Prefix, true, nil) { - if object.Err != nil { - fmt.Println("Error: ListObjects: ", object.Err) - continue - } + res, err := ss.ListObjectsV2(&awsS3.ListObjectsV2Input{ + Bucket: &stg.S3.Bucket, + Prefix: &stg.S3.Prefix, + }) + if err != nil { + log.Fatalf("list files on S3: %v", err) + } - if strings.Contains(object.Key, backupName) { + for _, object := range res.Contents { + s := object.String() + if strings.Contains(s, backupName) { log.Fatalln("Error: failed to delete lefover", object.Key) } } } + +func newS3Client(uri, region string, creds *s3.Credentials) (*awsS3.S3, error) { + sess, err := session.NewSession(&aws.Config{ + Region: ®ion, + Endpoint: &uri, + Credentials: credentials.NewStaticCredentials( + creds.AccessKeyID, + creds.SecretAccessKey, + creds.SessionToken, + ), + }) + if err != nil { + return nil, errors.Wrap(err, "create AWS session") + } + + return awsS3.New(sess), nil +} diff --git a/e2e-tests/pkg/tests/sharded/test_delete_backup.go b/e2e-tests/pkg/tests/sharded/test_delete_backup.go index a6de214c5..217f35096 100644 --- a/e2e-tests/pkg/tests/sharded/test_delete_backup.go +++ b/e2e-tests/pkg/tests/sharded/test_delete_backup.go @@ -9,7 +9,7 @@ import ( "strings" "time" - "github.com/minio/minio-go" + awsS3 "github.com/aws/aws-sdk-go/service/s3" "gopkg.in/yaml.v2" "github.com/percona/percona-backup-mongodb/pbm/config" @@ -197,30 +197,34 @@ func checkArtefacts(conf string, shouldStay map[string]struct{}) { endopintURL = eu.Host } - mc, err := minio.NewWithRegion(endopintURL, - stg.S3.Credentials.AccessKeyID, stg.S3.Credentials.SecretAccessKey, false, stg.S3.Region) + ss, err := newS3Client(endopintURL, stg.S3.Region, &stg.S3.Credentials) if err != nil { - log.Fatalln("ERROR: NewWithRegion:", err) + log.Fatalf("create S3 client: %v", err) } - for object := range mc.ListObjects(stg.S3.Bucket, stg.S3.Prefix, true, nil) { - if strings.Contains(object.Key, defs.StorInitFile) || strings.Contains(object.Key, "/pbmPitr/") { - continue - } - if object.Err != nil { - fmt.Println("ERROR: ListObjects: ", object.Err) + res, err := ss.ListObjectsV2(&awsS3.ListObjectsV2Input{ + Bucket: &stg.S3.Bucket, + Prefix: &stg.S3.Prefix, + }) + if err != nil { + log.Fatalf("list files on S3: %v", err) + } + + for _, object := range res.Contents { + objectKey := *object.Key + if strings.Contains(objectKey, defs.StorInitFile) || strings.Contains(objectKey, "/pbmPitr/") { continue } var ok bool for b := range shouldStay { - if strings.Contains(object.Key, b) { + if strings.Contains(objectKey, b) { ok = true break } } if !ok { - log.Fatalln("ERROR: failed to delete lefover", object.Key) + log.Fatalln("ERROR: failed to delete lefover", objectKey) } } } diff --git a/pbm/storage/s3/s3.go b/pbm/storage/s3/s3.go index 55a91e9d1..f3c2aab6e 100644 --- a/pbm/storage/s3/s3.go +++ b/pbm/storage/s3/s3.go @@ -7,7 +7,6 @@ import ( "fmt" "io" "net/http" - "net/url" "os" "path" "reflect" @@ -27,8 +26,6 @@ import ( "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/aws/aws-sdk-go/service/sts" - "github.com/minio/minio-go" - "github.com/minio/minio-go/pkg/encrypt" "github.com/percona/percona-backup-mongodb/pbm/errors" "github.com/percona/percona-backup-mongodb/pbm/log" @@ -44,7 +41,6 @@ const ( //nolint:lll type Config struct { - Provider S3Provider `bson:"provider,omitempty" json:"provider,omitempty" yaml:"provider,omitempty"` Region string `bson:"region" json:"region" yaml:"region"` EndpointURL string `bson:"endpointUrl,omitempty" json:"endpointUrl" yaml:"endpointUrl,omitempty"` ForcePathStyle *bool `bson:"forcePathStyle,omitempty" json:"forcePathStyle,omitempty" yaml:"forcePathStyle,omitempty"` @@ -157,9 +153,6 @@ func (cfg *Config) Equal(other *Config) bool { return cfg == other } - if cfg.Provider != other.Provider { - return false - } if cfg.Region != other.Region { return false } @@ -206,18 +199,6 @@ func (cfg *Config) Cast() error { if cfg.ForcePathStyle == nil { cfg.ForcePathStyle = aws.Bool(true) } - if cfg.Provider == S3ProviderUndef { - cfg.Provider = S3ProviderAWS - if cfg.EndpointURL != "" { - eu, err := url.Parse(cfg.EndpointURL) - if err != nil { - return errors.Wrap(err, "parse EndpointURL") - } - if eu.Host == GCSEndpointURL { - cfg.Provider = S3ProviderGCS - } - } - } if cfg.MaxUploadParts <= 0 { cfg.MaxUploadParts = s3manager.MaxUploadParts } @@ -283,14 +264,6 @@ type Credentials struct { } `bson:"vault" json:"vault" yaml:"vault,omitempty"` } -type S3Provider string - -const ( - S3ProviderUndef S3Provider = "" - S3ProviderAWS S3Provider = "aws" - S3ProviderGCS S3Provider = "gcs" -) - type S3 struct { opts *Config log log.LogEvent @@ -335,131 +308,88 @@ func (*S3) Type() storage.Type { } func (s *S3) Save(name string, data io.Reader, sizeb int64) error { - switch s.opts.Provider { - default: - awsSession, err := s.session() - if err != nil { - return errors.Wrap(err, "create AWS session") - } - cc := runtime.NumCPU() / 2 - if cc == 0 { - cc = 1 - } + awsSession, err := s.session() + if err != nil { + return errors.Wrap(err, "create AWS session") + } + cc := runtime.NumCPU() / 2 + if cc == 0 { + cc = 1 + } - uplInput := &s3manager.UploadInput{ - Bucket: aws.String(s.opts.Bucket), - Key: aws.String(path.Join(s.opts.Prefix, name)), - Body: data, - StorageClass: &s.opts.StorageClass, - } + uplInput := &s3manager.UploadInput{ + Bucket: aws.String(s.opts.Bucket), + Key: aws.String(path.Join(s.opts.Prefix, name)), + Body: data, + StorageClass: &s.opts.StorageClass, + } - sse := s.opts.ServerSideEncryption - if sse != nil { - if sse.SseAlgorithm == s3.ServerSideEncryptionAes256 { - uplInput.ServerSideEncryption = aws.String(sse.SseAlgorithm) - } else if sse.SseAlgorithm == s3.ServerSideEncryptionAwsKms { - uplInput.ServerSideEncryption = aws.String(sse.SseAlgorithm) - uplInput.SSEKMSKeyId = aws.String(sse.KmsKeyID) - } else if sse.SseCustomerAlgorithm != "" { - uplInput.SSECustomerAlgorithm = aws.String(sse.SseCustomerAlgorithm) - decodedKey, err := base64.StdEncoding.DecodeString(sse.SseCustomerKey) - uplInput.SSECustomerKey = aws.String(string(decodedKey)) - if err != nil { - return errors.Wrap(err, "SseCustomerAlgorithm specified with invalid SseCustomerKey") - } - keyMD5 := md5.Sum(decodedKey) - uplInput.SSECustomerKeyMD5 = aws.String(base64.StdEncoding.EncodeToString(keyMD5[:])) + sse := s.opts.ServerSideEncryption + if sse != nil { + if sse.SseAlgorithm == s3.ServerSideEncryptionAes256 { + uplInput.ServerSideEncryption = aws.String(sse.SseAlgorithm) + } else if sse.SseAlgorithm == s3.ServerSideEncryptionAwsKms { + uplInput.ServerSideEncryption = aws.String(sse.SseAlgorithm) + uplInput.SSEKMSKeyId = aws.String(sse.KmsKeyID) + } else if sse.SseCustomerAlgorithm != "" { + uplInput.SSECustomerAlgorithm = aws.String(sse.SseCustomerAlgorithm) + decodedKey, err := base64.StdEncoding.DecodeString(sse.SseCustomerKey) + uplInput.SSECustomerKey = aws.String(string(decodedKey)) + if err != nil { + return errors.Wrap(err, "SseCustomerAlgorithm specified with invalid SseCustomerKey") } + keyMD5 := md5.Sum(decodedKey) + uplInput.SSECustomerKeyMD5 = aws.String(base64.StdEncoding.EncodeToString(keyMD5[:])) } + } - // MaxUploadParts is 1e4 so with PartSize 10Mb the max allowed file size - // would be ~ 97.6Gb. Hence if the file size is bigger we're enlarging PartSize - // so PartSize * MaxUploadParts could fit the file. - // If calculated PartSize is smaller than the default we leave the default. - // If UploadPartSize option was set we use it instead of the default. Even - // with the UploadPartSize set the calculated PartSize woulbe used if it's bigger. - partSize := defaultPartSize - if s.opts.UploadPartSize > 0 { - if s.opts.UploadPartSize < int(s3manager.MinUploadPartSize) { - s.opts.UploadPartSize = int(s3manager.MinUploadPartSize) - } - - partSize = int64(s.opts.UploadPartSize) - } - if sizeb > 0 { - ps := sizeb / s3manager.MaxUploadParts * 11 / 10 // add 10% just in case - if ps > partSize { - partSize = ps - } + // MaxUploadParts is 1e4 so with PartSize 10Mb the max allowed file size + // would be ~ 97.6Gb. Hence if the file size is bigger we're enlarging PartSize + // so PartSize * MaxUploadParts could fit the file. + // If calculated PartSize is smaller than the default we leave the default. + // If UploadPartSize option was set we use it instead of the default. Even + // with the UploadPartSize set the calculated PartSize woulbe used if it's bigger. + partSize := defaultPartSize + if s.opts.UploadPartSize > 0 { + if s.opts.UploadPartSize < int(s3manager.MinUploadPartSize) { + s.opts.UploadPartSize = int(s3manager.MinUploadPartSize) } - if s.log != nil { - s.log.Debug("uploading %q [size hint: %v (%v); part size: %v (%v)]", - name, - sizeb, - storage.PrettySize(sizeb), - partSize, - storage.PrettySize(partSize)) + partSize = int64(s.opts.UploadPartSize) + } + if sizeb > 0 { + ps := sizeb / s3manager.MaxUploadParts * 11 / 10 // add 10% just in case + if ps > partSize { + partSize = ps } + } - _, err = s3manager.NewUploader(awsSession, func(u *s3manager.Uploader) { - u.MaxUploadParts = s.opts.MaxUploadParts - u.PartSize = partSize // 10MB part size - u.LeavePartsOnError = true // Don't delete the parts if the upload fails. - u.Concurrency = cc - - u.RequestOptions = append(u.RequestOptions, func(r *request.Request) { - if s.opts.Retryer != nil { - r.Retryer = client.DefaultRetryer{ - NumMaxRetries: s.opts.Retryer.NumMaxRetries, - MinRetryDelay: s.opts.Retryer.MinRetryDelay, - MaxRetryDelay: s.opts.Retryer.MaxRetryDelay, - } - } - }) - }).Upload(uplInput) - return errors.Wrap(err, "upload to S3") - case S3ProviderGCS: - // using minio client with GCS because it - // allows to disable chuncks muiltipertition for upload - mc, err := minio.NewWithRegion(GCSEndpointURL, - s.opts.Credentials.AccessKeyID, - s.opts.Credentials.SecretAccessKey, - true, - s.opts.Region) - if err != nil { - return errors.Wrap(err, "NewWithRegion") - } - putOpts := minio.PutObjectOptions{ - StorageClass: s.opts.StorageClass, - } + if s.log != nil { + s.log.Debug("uploading %q [size hint: %v (%v); part size: %v (%v)]", + name, + sizeb, + storage.PrettySize(sizeb), + partSize, + storage.PrettySize(partSize)) + } - // Enable server-side encryption if configured - sse := s.opts.ServerSideEncryption - if sse != nil { - if sse.SseAlgorithm == s3.ServerSideEncryptionAwsKms { - sseKms, err := encrypt.NewSSEKMS(sse.KmsKeyID, nil) - if err != nil { - return errors.Wrap(err, "Could not create SSE KMS") - } - putOpts.ServerSideEncryption = sseKms - } else if sse.SseCustomerAlgorithm != "" { - decodedKey, err := base64.StdEncoding.DecodeString(sse.SseCustomerKey) - if err != nil { - return errors.Wrap(err, "SseCustomerAlgorithm specified with invalid SseCustomerKey") - } + _, err = s3manager.NewUploader(awsSession, func(u *s3manager.Uploader) { + u.MaxUploadParts = s.opts.MaxUploadParts + u.PartSize = partSize // 10MB part size + u.LeavePartsOnError = true // Don't delete the parts if the upload fails. + u.Concurrency = cc - sseCus, err := encrypt.NewSSEC(decodedKey) - if err != nil { - return errors.Wrap(err, "Could not create SSE-C SSE key") + u.RequestOptions = append(u.RequestOptions, func(r *request.Request) { + if s.opts.Retryer != nil { + r.Retryer = client.DefaultRetryer{ + NumMaxRetries: s.opts.Retryer.NumMaxRetries, + MinRetryDelay: s.opts.Retryer.MinRetryDelay, + MaxRetryDelay: s.opts.Retryer.MaxRetryDelay, } - putOpts.ServerSideEncryption = sseCus } - } - - _, err = mc.PutObject(s.opts.Bucket, path.Join(s.opts.Prefix, name), data, -1, putOpts) - return errors.Wrap(err, "upload to GCS") - } + }) + }).Upload(uplInput) + return errors.Wrap(err, "upload to S3") } func (s *S3) List(prefix, suffix string) ([]storage.FileInfo, error) { From a539338e90477e1e065bf3e3f797b4a31eae003a Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 15 Aug 2024 14:16:29 +0200 Subject: [PATCH 191/203] [PBM-1239] remove minio deps --- go.mod | 5 +- go.sum | 10 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 19 + .../aws/aws-sdk-go/aws/request/waiter.go | 13 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- vendor/github.com/go-ini/ini/.editorconfig | 12 - vendor/github.com/go-ini/ini/.gitignore | 7 - vendor/github.com/go-ini/ini/.golangci.yml | 27 - vendor/github.com/go-ini/ini/LICENSE | 191 ---- vendor/github.com/go-ini/ini/Makefile | 15 - vendor/github.com/go-ini/ini/README.md | 43 - vendor/github.com/go-ini/ini/codecov.yml | 16 - vendor/github.com/go-ini/ini/data_source.go | 76 -- vendor/github.com/go-ini/ini/deprecated.go | 22 - vendor/github.com/go-ini/ini/error.go | 49 - vendor/github.com/go-ini/ini/file.go | 541 ----------- vendor/github.com/go-ini/ini/helper.go | 24 - vendor/github.com/go-ini/ini/ini.go | 176 ---- vendor/github.com/go-ini/ini/key.go | 837 ---------------- vendor/github.com/go-ini/ini/parser.go | 520 ---------- vendor/github.com/go-ini/ini/section.go | 256 ----- vendor/github.com/go-ini/ini/struct.go | 747 --------------- vendor/github.com/minio/minio-go/.gitignore | 3 - vendor/github.com/minio/minio-go/.travis.yml | 28 - .../github.com/minio/minio-go/CONTRIBUTING.md | 23 - vendor/github.com/minio/minio-go/LICENSE | 202 ---- .../github.com/minio/minio-go/MAINTAINERS.md | 35 - vendor/github.com/minio/minio-go/Makefile | 15 - vendor/github.com/minio/minio-go/NOTICE | 2 - vendor/github.com/minio/minio-go/README.md | 239 ----- .../github.com/minio/minio-go/README_zh_CN.md | 245 ----- .../minio/minio-go/api-compose-object.go | 565 ----------- .../minio/minio-go/api-datatypes.go | 84 -- .../minio/minio-go/api-error-response.go | 282 ------ .../minio/minio-go/api-get-lifecycle.go | 77 -- .../minio/minio-go/api-get-object-acl.go | 136 --- .../minio/minio-go/api-get-object-context.go | 26 - .../minio/minio-go/api-get-object-file.go | 125 --- .../minio/minio-go/api-get-object.go | 659 ------------- .../minio/minio-go/api-get-options.go | 128 --- .../minio/minio-go/api-get-policy.go | 78 -- vendor/github.com/minio/minio-go/api-list.go | 715 -------------- .../minio/minio-go/api-notification.go | 228 ----- .../minio/minio-go/api-presigned.go | 215 ----- .../minio/minio-go/api-put-bucket.go | 306 ------ .../minio/minio-go/api-put-object-common.go | 111 --- .../minio/minio-go/api-put-object-context.go | 33 - .../minio/minio-go/api-put-object-copy.go | 83 -- .../minio-go/api-put-object-file-context.go | 64 -- .../minio/minio-go/api-put-object-file.go | 27 - .../minio-go/api-put-object-multipart.go | 372 -------- .../minio-go/api-put-object-streaming.go | 417 -------- .../minio/minio-go/api-put-object.go | 267 ------ .../github.com/minio/minio-go/api-remove.go | 303 ------ .../minio/minio-go/api-s3-datatypes.go | 245 ----- .../github.com/minio/minio-go/api-select.go | 532 ----------- vendor/github.com/minio/minio-go/api-stat.go | 185 ---- vendor/github.com/minio/minio-go/api.go | 898 ------------------ vendor/github.com/minio/minio-go/appveyor.yml | 39 - .../github.com/minio/minio-go/bucket-cache.go | 221 ----- .../minio/minio-go/bucket-notification.go | 273 ------ vendor/github.com/minio/minio-go/constants.go | 62 -- vendor/github.com/minio/minio-go/core.go | 153 --- .../github.com/minio/minio-go/hook-reader.go | 71 -- .../minio/minio-go/pkg/credentials/chain.go | 89 -- .../pkg/credentials/config.json.sample | 17 - .../minio-go/pkg/credentials/credentials.go | 175 ---- .../pkg/credentials/credentials.sample | 12 - .../minio/minio-go/pkg/credentials/doc.go | 62 -- .../minio/minio-go/pkg/credentials/env_aws.go | 71 -- .../minio-go/pkg/credentials/env_minio.go | 62 -- .../pkg/credentials/file_aws_credentials.go | 120 --- .../pkg/credentials/file_minio_client.go | 133 --- .../minio/minio-go/pkg/credentials/iam_aws.go | 250 ----- .../pkg/credentials/signature-type.go | 77 -- .../minio/minio-go/pkg/credentials/static.go | 67 -- .../pkg/credentials/sts_client_grants.go | 173 ---- .../pkg/credentials/sts_web_identity.go | 169 ---- .../minio/minio-go/pkg/encrypt/server-side.go | 195 ---- .../s3signer/request-signature-streaming.go | 306 ------ .../pkg/s3signer/request-signature-v2.go | 316 ------ .../pkg/s3signer/request-signature-v4.go | 315 ------ .../minio/minio-go/pkg/s3signer/utils.go | 49 - .../minio/minio-go/pkg/s3utils/utils.go | 331 ------- .../minio/minio-go/pkg/set/stringset.go | 197 ---- .../github.com/minio/minio-go/post-policy.go | 270 ------ .../minio/minio-go/retry-continous.go | 69 -- vendor/github.com/minio/minio-go/retry.go | 153 --- .../github.com/minio/minio-go/s3-endpoints.go | 52 - vendor/github.com/minio/minio-go/s3-error.go | 61 -- vendor/github.com/minio/minio-go/transport.go | 50 - vendor/github.com/minio/minio-go/utils.go | 272 ------ .../github.com/mitchellh/go-homedir/LICENSE | 21 - .../github.com/mitchellh/go-homedir/README.md | 14 - .../mitchellh/go-homedir/homedir.go | 167 ---- vendor/golang.org/x/crypto/argon2/argon2.go | 283 ------ vendor/golang.org/x/crypto/argon2/blake2b.go | 53 -- .../x/crypto/argon2/blamka_amd64.go | 60 -- .../golang.org/x/crypto/argon2/blamka_amd64.s | 243 ----- .../x/crypto/argon2/blamka_generic.go | 163 ---- .../golang.org/x/crypto/argon2/blamka_ref.go | 15 - vendor/golang.org/x/crypto/blake2b/blake2b.go | 291 ------ .../x/crypto/blake2b/blake2bAVX2_amd64.go | 37 - .../x/crypto/blake2b/blake2bAVX2_amd64.s | 744 --------------- .../x/crypto/blake2b/blake2b_amd64.s | 278 ------ .../x/crypto/blake2b/blake2b_generic.go | 182 ---- .../x/crypto/blake2b/blake2b_ref.go | 11 - vendor/golang.org/x/crypto/blake2b/blake2x.go | 177 ---- .../golang.org/x/crypto/blake2b/register.go | 30 - .../x/net/publicsuffix/data/children | Bin 2976 -> 0 bytes .../golang.org/x/net/publicsuffix/data/nodes | Bin 46610 -> 0 bytes .../golang.org/x/net/publicsuffix/data/text | 1 - vendor/golang.org/x/net/publicsuffix/list.go | 203 ---- vendor/golang.org/x/net/publicsuffix/table.go | 70 -- vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 17 - vendor/golang.org/x/sys/cpu/byteorder.go | 66 -- vendor/golang.org/x/sys/cpu/cpu.go | 291 ------ vendor/golang.org/x/sys/cpu/cpu_aix.go | 33 - vendor/golang.org/x/sys/cpu/cpu_arm.go | 73 -- vendor/golang.org/x/sys/cpu/cpu_arm64.go | 182 ---- vendor/golang.org/x/sys/cpu/cpu_arm64.s | 39 - vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 12 - vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 21 - vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 15 - .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 11 - .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 22 - vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 37 - vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go | 31 - vendor/golang.org/x/sys/cpu/cpu_linux.go | 15 - vendor/golang.org/x/sys/cpu/cpu_linux_arm.go | 39 - .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 116 --- .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 22 - .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 9 - .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 30 - .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 40 - vendor/golang.org/x/sys/cpu/cpu_loong64.go | 12 - vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 15 - vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 11 - .../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 173 ---- .../golang.org/x/sys/cpu/cpu_openbsd_arm64.go | 65 -- .../golang.org/x/sys/cpu/cpu_openbsd_arm64.s | 11 - vendor/golang.org/x/sys/cpu/cpu_other_arm.go | 9 - .../golang.org/x/sys/cpu/cpu_other_arm64.go | 9 - .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 11 - .../golang.org/x/sys/cpu/cpu_other_ppc64x.go | 12 - .../golang.org/x/sys/cpu/cpu_other_riscv64.go | 11 - vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 16 - vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 11 - vendor/golang.org/x/sys/cpu/cpu_s390x.go | 172 ---- vendor/golang.org/x/sys/cpu/cpu_s390x.s | 57 -- vendor/golang.org/x/sys/cpu/cpu_wasm.go | 17 - vendor/golang.org/x/sys/cpu/cpu_x86.go | 151 --- vendor/golang.org/x/sys/cpu/cpu_x86.s | 26 - vendor/golang.org/x/sys/cpu/cpu_zos.go | 10 - vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go | 25 - vendor/golang.org/x/sys/cpu/endian_big.go | 10 - vendor/golang.org/x/sys/cpu/endian_little.go | 10 - vendor/golang.org/x/sys/cpu/hwcap_linux.go | 71 -- vendor/golang.org/x/sys/cpu/parse.go | 43 - .../x/sys/cpu/proc_cpuinfo_linux.go | 53 -- vendor/golang.org/x/sys/cpu/runtime_auxv.go | 16 - .../x/sys/cpu/runtime_auxv_go121.go | 18 - .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 26 - .../x/sys/cpu/syscall_aix_ppc64_gc.go | 35 - vendor/modules.txt | 20 +- 165 files changed, 35 insertions(+), 21748 deletions(-) delete mode 100644 vendor/github.com/go-ini/ini/.editorconfig delete mode 100644 vendor/github.com/go-ini/ini/.gitignore delete mode 100644 vendor/github.com/go-ini/ini/.golangci.yml delete mode 100644 vendor/github.com/go-ini/ini/LICENSE delete mode 100644 vendor/github.com/go-ini/ini/Makefile delete mode 100644 vendor/github.com/go-ini/ini/README.md delete mode 100644 vendor/github.com/go-ini/ini/codecov.yml delete mode 100644 vendor/github.com/go-ini/ini/data_source.go delete mode 100644 vendor/github.com/go-ini/ini/deprecated.go delete mode 100644 vendor/github.com/go-ini/ini/error.go delete mode 100644 vendor/github.com/go-ini/ini/file.go delete mode 100644 vendor/github.com/go-ini/ini/helper.go delete mode 100644 vendor/github.com/go-ini/ini/ini.go delete mode 100644 vendor/github.com/go-ini/ini/key.go delete mode 100644 vendor/github.com/go-ini/ini/parser.go delete mode 100644 vendor/github.com/go-ini/ini/section.go delete mode 100644 vendor/github.com/go-ini/ini/struct.go delete mode 100644 vendor/github.com/minio/minio-go/.gitignore delete mode 100644 vendor/github.com/minio/minio-go/.travis.yml delete mode 100644 vendor/github.com/minio/minio-go/CONTRIBUTING.md delete mode 100644 vendor/github.com/minio/minio-go/LICENSE delete mode 100644 vendor/github.com/minio/minio-go/MAINTAINERS.md delete mode 100644 vendor/github.com/minio/minio-go/Makefile delete mode 100644 vendor/github.com/minio/minio-go/NOTICE delete mode 100644 vendor/github.com/minio/minio-go/README.md delete mode 100644 vendor/github.com/minio/minio-go/README_zh_CN.md delete mode 100644 vendor/github.com/minio/minio-go/api-compose-object.go delete mode 100644 vendor/github.com/minio/minio-go/api-datatypes.go delete mode 100644 vendor/github.com/minio/minio-go/api-error-response.go delete mode 100644 vendor/github.com/minio/minio-go/api-get-lifecycle.go delete mode 100644 vendor/github.com/minio/minio-go/api-get-object-acl.go delete mode 100644 vendor/github.com/minio/minio-go/api-get-object-context.go delete mode 100644 vendor/github.com/minio/minio-go/api-get-object-file.go delete mode 100644 vendor/github.com/minio/minio-go/api-get-object.go delete mode 100644 vendor/github.com/minio/minio-go/api-get-options.go delete mode 100644 vendor/github.com/minio/minio-go/api-get-policy.go delete mode 100644 vendor/github.com/minio/minio-go/api-list.go delete mode 100644 vendor/github.com/minio/minio-go/api-notification.go delete mode 100644 vendor/github.com/minio/minio-go/api-presigned.go delete mode 100644 vendor/github.com/minio/minio-go/api-put-bucket.go delete mode 100644 vendor/github.com/minio/minio-go/api-put-object-common.go delete mode 100644 vendor/github.com/minio/minio-go/api-put-object-context.go delete mode 100644 vendor/github.com/minio/minio-go/api-put-object-copy.go delete mode 100644 vendor/github.com/minio/minio-go/api-put-object-file-context.go delete mode 100644 vendor/github.com/minio/minio-go/api-put-object-file.go delete mode 100644 vendor/github.com/minio/minio-go/api-put-object-multipart.go delete mode 100644 vendor/github.com/minio/minio-go/api-put-object-streaming.go delete mode 100644 vendor/github.com/minio/minio-go/api-put-object.go delete mode 100644 vendor/github.com/minio/minio-go/api-remove.go delete mode 100644 vendor/github.com/minio/minio-go/api-s3-datatypes.go delete mode 100644 vendor/github.com/minio/minio-go/api-select.go delete mode 100644 vendor/github.com/minio/minio-go/api-stat.go delete mode 100644 vendor/github.com/minio/minio-go/api.go delete mode 100644 vendor/github.com/minio/minio-go/appveyor.yml delete mode 100644 vendor/github.com/minio/minio-go/bucket-cache.go delete mode 100644 vendor/github.com/minio/minio-go/bucket-notification.go delete mode 100644 vendor/github.com/minio/minio-go/constants.go delete mode 100644 vendor/github.com/minio/minio-go/core.go delete mode 100644 vendor/github.com/minio/minio-go/hook-reader.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/credentials/chain.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample delete mode 100644 vendor/github.com/minio/minio-go/pkg/credentials/credentials.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample delete mode 100644 vendor/github.com/minio/minio-go/pkg/credentials/doc.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/credentials/static.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/credentials/sts_client_grants.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/credentials/sts_web_identity.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/s3signer/utils.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/s3utils/utils.go delete mode 100644 vendor/github.com/minio/minio-go/pkg/set/stringset.go delete mode 100644 vendor/github.com/minio/minio-go/post-policy.go delete mode 100644 vendor/github.com/minio/minio-go/retry-continous.go delete mode 100644 vendor/github.com/minio/minio-go/retry.go delete mode 100644 vendor/github.com/minio/minio-go/s3-endpoints.go delete mode 100644 vendor/github.com/minio/minio-go/s3-error.go delete mode 100644 vendor/github.com/minio/minio-go/transport.go delete mode 100644 vendor/github.com/minio/minio-go/utils.go delete mode 100644 vendor/github.com/mitchellh/go-homedir/LICENSE delete mode 100644 vendor/github.com/mitchellh/go-homedir/README.md delete mode 100644 vendor/github.com/mitchellh/go-homedir/homedir.go delete mode 100644 vendor/golang.org/x/crypto/argon2/argon2.go delete mode 100644 vendor/golang.org/x/crypto/argon2/blake2b.go delete mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.go delete mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.s delete mode 100644 vendor/golang.org/x/crypto/argon2/blamka_generic.go delete mode 100644 vendor/golang.org/x/crypto/argon2/blamka_ref.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_generic.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_ref.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/blake2x.go delete mode 100644 vendor/golang.org/x/crypto/blake2b/register.go delete mode 100644 vendor/golang.org/x/net/publicsuffix/data/children delete mode 100644 vendor/golang.org/x/net/publicsuffix/data/nodes delete mode 100644 vendor/golang.org/x/net/publicsuffix/data/text delete mode 100644 vendor/golang.org/x/net/publicsuffix/list.go delete mode 100644 vendor/golang.org/x/net/publicsuffix/table.go delete mode 100644 vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s delete mode 100644 vendor/golang.org/x/sys/cpu/byteorder.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_aix.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.s delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_x86.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_loong64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_mips64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_mipsx.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_ppc64x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_riscv64.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.s delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_wasm.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.s delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos.go delete mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go delete mode 100644 vendor/golang.org/x/sys/cpu/endian_big.go delete mode 100644 vendor/golang.org/x/sys/cpu/endian_little.go delete mode 100644 vendor/golang.org/x/sys/cpu/hwcap_linux.go delete mode 100644 vendor/golang.org/x/sys/cpu/parse.go delete mode 100644 vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go delete mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv.go delete mode 100644 vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go delete mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go delete mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go diff --git a/go.mod b/go.mod index ec1a07560..25c9304d9 100644 --- a/go.mod +++ b/go.mod @@ -6,13 +6,12 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 github.com/alecthomas/kingpin v2.2.6+incompatible - github.com/aws/aws-sdk-go v1.55.1 + github.com/aws/aws-sdk-go v1.55.5 github.com/docker/docker v27.1.1+incompatible github.com/golang/snappy v0.0.4 github.com/google/uuid v1.6.0 github.com/klauspost/compress v1.17.8 github.com/klauspost/pgzip v1.2.6 - github.com/minio/minio-go v6.0.14+incompatible github.com/mongodb/mongo-tools v0.0.0-20240723193119-837c2bc263f4 github.com/pierrec/lz4 v2.6.1+incompatible github.com/pkg/errors v0.9.1 @@ -33,13 +32,11 @@ require ( github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/frankban/quicktest v1.14.6 // indirect - github.com/go-ini/ini v1.67.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/jessevdk/go-flags v1.5.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/term v0.5.0 // indirect github.com/montanaflynn/stats v0.7.1 // indirect diff --git a/go.sum b/go.sum index 467a850d6..cefe4c034 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafo github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/aws/aws-sdk-go v1.55.1 h1:ZTNPmbRMxaK5RlTJrBullX9r/rF1MPf3yAJOLlwDiT8= -github.com/aws/aws-sdk-go v1.55.1/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -44,8 +44,6 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= -github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -89,10 +87,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/minio/minio-go v6.0.14+incompatible h1:fnV+GD28LeqdN6vT2XdGKW8Qe/IfjJDswNVuni6km9o= -github.com/minio/minio-go v6.0.14+incompatible/go.mod h1:7guKYtitv8dktvNUGrhzmNlA5wrAABTQXCoesZdFQO8= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index d517a35a4..c3516e018 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -9503,6 +9503,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -32566,6 +32572,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -46412,6 +46421,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "appconfig": service{ @@ -47725,6 +47737,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "appconfig": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go index 4601f883c..992ed0464 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -256,8 +256,17 @@ func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err erro s := a.Expected.(int) result = s == req.HTTPResponse.StatusCode case ErrorWaiterMatch: - if aerr, ok := err.(awserr.Error); ok { - result = aerr.Code() == a.Expected.(string) + switch ex := a.Expected.(type) { + case string: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == ex + } + case bool: + if ex { + result = err != nil + } else { + result = err == nil + } } default: waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index ae3853dba..d15e3c84c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.55.1" +const SDKVersion = "1.55.5" diff --git a/vendor/github.com/go-ini/ini/.editorconfig b/vendor/github.com/go-ini/ini/.editorconfig deleted file mode 100644 index 4a2d9180f..000000000 --- a/vendor/github.com/go-ini/ini/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -# http://editorconfig.org - -root = true - -[*] -charset = utf-8 -end_of_line = lf -insert_final_newline = true -trim_trailing_whitespace = true - -[*_test.go] -trim_trailing_whitespace = false diff --git a/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore deleted file mode 100644 index 588388bda..000000000 --- a/vendor/github.com/go-ini/ini/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -testdata/conf_out.ini -ini.sublime-project -ini.sublime-workspace -testdata/conf_reflect.ini -.idea -/.vscode -.DS_Store diff --git a/vendor/github.com/go-ini/ini/.golangci.yml b/vendor/github.com/go-ini/ini/.golangci.yml deleted file mode 100644 index 631e36925..000000000 --- a/vendor/github.com/go-ini/ini/.golangci.yml +++ /dev/null @@ -1,27 +0,0 @@ -linters-settings: - staticcheck: - checks: [ - "all", - "-SA1019" # There are valid use cases of strings.Title - ] - nakedret: - max-func-lines: 0 # Disallow any unnamed return statement - -linters: - enable: - - deadcode - - errcheck - - gosimple - - govet - - ineffassign - - staticcheck - - structcheck - - typecheck - - unused - - varcheck - - nakedret - - gofmt - - rowserrcheck - - unconvert - - goimports - - unparam diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE deleted file mode 100644 index d361bbcdf..000000000 --- a/vendor/github.com/go-ini/ini/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright 2014 Unknwon - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile deleted file mode 100644 index f3b0dae2d..000000000 --- a/vendor/github.com/go-ini/ini/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -.PHONY: build test bench vet coverage - -build: vet bench - -test: - go test -v -cover -race - -bench: - go test -v -cover -test.bench=. -test.benchmem - -vet: - go vet - -coverage: - go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md deleted file mode 100644 index 30606d970..000000000 --- a/vendor/github.com/go-ini/ini/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# INI - -[![GitHub Workflow Status](https://img.shields.io/github/checks-status/go-ini/ini/main?logo=github&style=for-the-badge)](https://github.com/go-ini/ini/actions?query=branch%3Amain) -[![codecov](https://img.shields.io/codecov/c/github/go-ini/ini/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/go-ini/ini) -[![GoDoc](https://img.shields.io/badge/GoDoc-Reference-blue?style=for-the-badge&logo=go)](https://pkg.go.dev/github.com/go-ini/ini?tab=doc) -[![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg?style=for-the-badge&logo=sourcegraph)](https://sourcegraph.com/github.com/go-ini/ini) - -![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) - -Package ini provides INI file read and write functionality in Go. - -## Features - -- Load from multiple data sources(file, `[]byte`, `io.Reader` and `io.ReadCloser`) with overwrites. -- Read with recursion values. -- Read with parent-child sections. -- Read with auto-increment key names. -- Read with multiple-line values. -- Read with tons of helper methods. -- Read and convert values to Go types. -- Read and **WRITE** comments of sections and keys. -- Manipulate sections, keys and comments with ease. -- Keep sections and keys in order as you parse and save. - -## Installation - -The minimum requirement of Go is **1.13**. - -```sh -$ go get gopkg.in/ini.v1 -``` - -Please add `-u` flag to update in the future. - -## Getting Help - -- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started) -- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) -- 中国大陆镜像:https://ini.unknwon.cn - -## License - -This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/codecov.yml b/vendor/github.com/go-ini/ini/codecov.yml deleted file mode 100644 index e02ec84bc..000000000 --- a/vendor/github.com/go-ini/ini/codecov.yml +++ /dev/null @@ -1,16 +0,0 @@ -coverage: - range: "60...95" - status: - project: - default: - threshold: 1% - informational: true - patch: - defualt: - only_pulls: true - informational: true - -comment: - layout: 'diff' - -github_checks: false diff --git a/vendor/github.com/go-ini/ini/data_source.go b/vendor/github.com/go-ini/ini/data_source.go deleted file mode 100644 index c3a541f1d..000000000 --- a/vendor/github.com/go-ini/ini/data_source.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2019 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" -) - -var ( - _ dataSource = (*sourceFile)(nil) - _ dataSource = (*sourceData)(nil) - _ dataSource = (*sourceReadCloser)(nil) -) - -// dataSource is an interface that returns object which can be read and closed. -type dataSource interface { - ReadCloser() (io.ReadCloser, error) -} - -// sourceFile represents an object that contains content on the local file system. -type sourceFile struct { - name string -} - -func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { - return os.Open(s.name) -} - -// sourceData represents an object that contains content in memory. -type sourceData struct { - data []byte -} - -func (s *sourceData) ReadCloser() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewReader(s.data)), nil -} - -// sourceReadCloser represents an input stream with Close method. -type sourceReadCloser struct { - reader io.ReadCloser -} - -func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { - return s.reader, nil -} - -func parseDataSource(source interface{}) (dataSource, error) { - switch s := source.(type) { - case string: - return sourceFile{s}, nil - case []byte: - return &sourceData{s}, nil - case io.ReadCloser: - return &sourceReadCloser{s}, nil - case io.Reader: - return &sourceReadCloser{ioutil.NopCloser(s)}, nil - default: - return nil, fmt.Errorf("error parsing data source: unknown type %q", s) - } -} diff --git a/vendor/github.com/go-ini/ini/deprecated.go b/vendor/github.com/go-ini/ini/deprecated.go deleted file mode 100644 index 48b8e66d6..000000000 --- a/vendor/github.com/go-ini/ini/deprecated.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -var ( - // Deprecated: Use "DefaultSection" instead. - DEFAULT_SECTION = DefaultSection - // Deprecated: AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. - AllCapsUnderscore = SnackCase -) diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go deleted file mode 100644 index f66bc94b8..000000000 --- a/vendor/github.com/go-ini/ini/error.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "fmt" -) - -// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one. -type ErrDelimiterNotFound struct { - Line string -} - -// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound. -func IsErrDelimiterNotFound(err error) bool { - _, ok := err.(ErrDelimiterNotFound) - return ok -} - -func (err ErrDelimiterNotFound) Error() string { - return fmt.Sprintf("key-value delimiter not found: %s", err.Line) -} - -// ErrEmptyKeyName indicates the error type of no key name is found which there should be one. -type ErrEmptyKeyName struct { - Line string -} - -// IsErrEmptyKeyName returns true if the given error is an instance of ErrEmptyKeyName. -func IsErrEmptyKeyName(err error) bool { - _, ok := err.(ErrEmptyKeyName) - return ok -} - -func (err ErrEmptyKeyName) Error() string { - return fmt.Sprintf("empty key name: %s", err.Line) -} diff --git a/vendor/github.com/go-ini/ini/file.go b/vendor/github.com/go-ini/ini/file.go deleted file mode 100644 index f8b22408b..000000000 --- a/vendor/github.com/go-ini/ini/file.go +++ /dev/null @@ -1,541 +0,0 @@ -// Copyright 2017 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "sync" -) - -// File represents a combination of one or more INI files in memory. -type File struct { - options LoadOptions - dataSources []dataSource - - // Should make things safe, but sometimes doesn't matter. - BlockMode bool - lock sync.RWMutex - - // To keep data in order. - sectionList []string - // To keep track of the index of a section with same name. - // This meta list is only used with non-unique section names are allowed. - sectionIndexes []int - - // Actual data is stored here. - sections map[string][]*Section - - NameMapper - ValueMapper -} - -// newFile initializes File object with given data sources. -func newFile(dataSources []dataSource, opts LoadOptions) *File { - if len(opts.KeyValueDelimiters) == 0 { - opts.KeyValueDelimiters = "=:" - } - if len(opts.KeyValueDelimiterOnWrite) == 0 { - opts.KeyValueDelimiterOnWrite = "=" - } - if len(opts.ChildSectionDelimiter) == 0 { - opts.ChildSectionDelimiter = "." - } - - return &File{ - BlockMode: true, - dataSources: dataSources, - sections: make(map[string][]*Section), - options: opts, - } -} - -// Empty returns an empty file object. -func Empty(opts ...LoadOptions) *File { - var opt LoadOptions - if len(opts) > 0 { - opt = opts[0] - } - - // Ignore error here, we are sure our data is good. - f, _ := LoadSources(opt, []byte("")) - return f -} - -// NewSection creates a new section. -func (f *File) NewSection(name string) (*Section, error) { - if len(name) == 0 { - return nil, errors.New("empty section name") - } - - if (f.options.Insensitive || f.options.InsensitiveSections) && name != DefaultSection { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if !f.options.AllowNonUniqueSections && inSlice(name, f.sectionList) { - return f.sections[name][0], nil - } - - f.sectionList = append(f.sectionList, name) - - // NOTE: Append to indexes must happen before appending to sections, - // otherwise index will have off-by-one problem. - f.sectionIndexes = append(f.sectionIndexes, len(f.sections[name])) - - sec := newSection(f, name) - f.sections[name] = append(f.sections[name], sec) - - return sec, nil -} - -// NewRawSection creates a new section with an unparseable body. -func (f *File) NewRawSection(name, body string) (*Section, error) { - section, err := f.NewSection(name) - if err != nil { - return nil, err - } - - section.isRawSection = true - section.rawBody = body - return section, nil -} - -// NewSections creates a list of sections. -func (f *File) NewSections(names ...string) (err error) { - for _, name := range names { - if _, err = f.NewSection(name); err != nil { - return err - } - } - return nil -} - -// GetSection returns section by given name. -func (f *File) GetSection(name string) (*Section, error) { - secs, err := f.SectionsByName(name) - if err != nil { - return nil, err - } - - return secs[0], err -} - -// HasSection returns true if the file contains a section with given name. -func (f *File) HasSection(name string) bool { - section, _ := f.GetSection(name) - return section != nil -} - -// SectionsByName returns all sections with given name. -func (f *File) SectionsByName(name string) ([]*Section, error) { - if len(name) == 0 { - name = DefaultSection - } - if f.options.Insensitive || f.options.InsensitiveSections { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - secs := f.sections[name] - if len(secs) == 0 { - return nil, fmt.Errorf("section %q does not exist", name) - } - - return secs, nil -} - -// Section assumes named section exists and returns a zero-value when not. -func (f *File) Section(name string) *Section { - sec, err := f.GetSection(name) - if err != nil { - if name == "" { - name = DefaultSection - } - sec, _ = f.NewSection(name) - return sec - } - return sec -} - -// SectionWithIndex assumes named section exists and returns a new section when not. -func (f *File) SectionWithIndex(name string, index int) *Section { - secs, err := f.SectionsByName(name) - if err != nil || len(secs) <= index { - // NOTE: It's OK here because the only possible error is empty section name, - // but if it's empty, this piece of code won't be executed. - newSec, _ := f.NewSection(name) - return newSec - } - - return secs[index] -} - -// Sections returns a list of Section stored in the current instance. -func (f *File) Sections() []*Section { - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sections := make([]*Section, len(f.sectionList)) - for i, name := range f.sectionList { - sections[i] = f.sections[name][f.sectionIndexes[i]] - } - return sections -} - -// ChildSections returns a list of child sections of given section name. -func (f *File) ChildSections(name string) []*Section { - return f.Section(name).ChildSections() -} - -// SectionStrings returns list of section names. -func (f *File) SectionStrings() []string { - list := make([]string, len(f.sectionList)) - copy(list, f.sectionList) - return list -} - -// DeleteSection deletes a section or all sections with given name. -func (f *File) DeleteSection(name string) { - secs, err := f.SectionsByName(name) - if err != nil { - return - } - - for i := 0; i < len(secs); i++ { - // For non-unique sections, it is always needed to remove the first one so - // in the next iteration, the subsequent section continue having index 0. - // Ignoring the error as index 0 never returns an error. - _ = f.DeleteSectionWithIndex(name, 0) - } -} - -// DeleteSectionWithIndex deletes a section with given name and index. -func (f *File) DeleteSectionWithIndex(name string, index int) error { - if !f.options.AllowNonUniqueSections && index != 0 { - return fmt.Errorf("delete section with non-zero index is only allowed when non-unique sections is enabled") - } - - if len(name) == 0 { - name = DefaultSection - } - if f.options.Insensitive || f.options.InsensitiveSections { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - // Count occurrences of the sections - occurrences := 0 - - sectionListCopy := make([]string, len(f.sectionList)) - copy(sectionListCopy, f.sectionList) - - for i, s := range sectionListCopy { - if s != name { - continue - } - - if occurrences == index { - if len(f.sections[name]) <= 1 { - delete(f.sections, name) // The last one in the map - } else { - f.sections[name] = append(f.sections[name][:index], f.sections[name][index+1:]...) - } - - // Fix section lists - f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) - f.sectionIndexes = append(f.sectionIndexes[:i], f.sectionIndexes[i+1:]...) - - } else if occurrences > index { - // Fix the indices of all following sections with this name. - f.sectionIndexes[i-1]-- - } - - occurrences++ - } - - return nil -} - -func (f *File) reload(s dataSource) error { - r, err := s.ReadCloser() - if err != nil { - return err - } - defer r.Close() - - return f.parse(r) -} - -// Reload reloads and parses all data sources. -func (f *File) Reload() (err error) { - for _, s := range f.dataSources { - if err = f.reload(s); err != nil { - // In loose mode, we create an empty default section for nonexistent files. - if os.IsNotExist(err) && f.options.Loose { - _ = f.parse(bytes.NewBuffer(nil)) - continue - } - return err - } - if f.options.ShortCircuit { - return nil - } - } - return nil -} - -// Append appends one or more data sources and reloads automatically. -func (f *File) Append(source interface{}, others ...interface{}) error { - ds, err := parseDataSource(source) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - for _, s := range others { - ds, err = parseDataSource(s) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - } - return f.Reload() -} - -func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { - equalSign := DefaultFormatLeft + f.options.KeyValueDelimiterOnWrite + DefaultFormatRight - - if PrettyFormat || PrettyEqual { - equalSign = fmt.Sprintf(" %s ", f.options.KeyValueDelimiterOnWrite) - } - - // Use buffer to make sure target is safe until finish encoding. - buf := bytes.NewBuffer(nil) - lastSectionIdx := len(f.sectionList) - 1 - for i, sname := range f.sectionList { - sec := f.SectionWithIndex(sname, f.sectionIndexes[i]) - if len(sec.Comment) > 0 { - // Support multiline comments - lines := strings.Split(sec.Comment, LineBreak) - for i := range lines { - if lines[i][0] != '#' && lines[i][0] != ';' { - lines[i] = "; " + lines[i] - } else { - lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) - } - - if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { - return nil, err - } - } - } - - if i > 0 || DefaultHeader || (i == 0 && strings.ToUpper(sec.name) != DefaultSection) { - if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { - return nil, err - } - } else { - // Write nothing if default section is empty - if len(sec.keyList) == 0 { - continue - } - } - - isLastSection := i == lastSectionIdx - if sec.isRawSection { - if _, err := buf.WriteString(sec.rawBody); err != nil { - return nil, err - } - - if PrettySection && !isLastSection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } - } - continue - } - - // Count and generate alignment length and buffer spaces using the - // longest key. Keys may be modified if they contain certain characters so - // we need to take that into account in our calculation. - alignLength := 0 - if PrettyFormat { - for _, kname := range sec.keyList { - keyLength := len(kname) - // First case will surround key by ` and second by """ - if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) { - keyLength += 2 - } else if strings.Contains(kname, "`") { - keyLength += 6 - } - - if keyLength > alignLength { - alignLength = keyLength - } - } - } - alignSpaces := bytes.Repeat([]byte(" "), alignLength) - - KeyList: - for _, kname := range sec.keyList { - key := sec.Key(kname) - if len(key.Comment) > 0 { - if len(indent) > 0 && sname != DefaultSection { - buf.WriteString(indent) - } - - // Support multiline comments - lines := strings.Split(key.Comment, LineBreak) - for i := range lines { - if lines[i][0] != '#' && lines[i][0] != ';' { - lines[i] = "; " + strings.TrimSpace(lines[i]) - } else { - lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) - } - - if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { - return nil, err - } - } - } - - if len(indent) > 0 && sname != DefaultSection { - buf.WriteString(indent) - } - - switch { - case key.isAutoIncrement: - kname = "-" - case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters): - kname = "`" + kname + "`" - case strings.Contains(kname, "`"): - kname = `"""` + kname + `"""` - } - - writeKeyValue := func(val string) (bool, error) { - if _, err := buf.WriteString(kname); err != nil { - return false, err - } - - if key.isBooleanType { - buf.WriteString(LineBreak) - return true, nil - } - - // Write out alignment spaces before "=" sign - if PrettyFormat { - buf.Write(alignSpaces[:alignLength-len(kname)]) - } - - // In case key value contains "\n", "`", "\"", "#" or ";" - if strings.ContainsAny(val, "\n`") { - val = `"""` + val + `"""` - } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { - val = "`" + val + "`" - } else if len(strings.TrimSpace(val)) != len(val) { - val = `"` + val + `"` - } - if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { - return false, err - } - return false, nil - } - - shadows := key.ValueWithShadows() - if len(shadows) == 0 { - if _, err := writeKeyValue(""); err != nil { - return nil, err - } - } - - for _, val := range shadows { - exitLoop, err := writeKeyValue(val) - if err != nil { - return nil, err - } else if exitLoop { - continue KeyList - } - } - - for _, val := range key.nestedValues { - if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { - return nil, err - } - } - } - - if PrettySection && !isLastSection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } - } - } - - return buf, nil -} - -// WriteToIndent writes content into io.Writer with given indention. -// If PrettyFormat has been set to be true, -// it will align "=" sign with spaces under each section. -func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { - buf, err := f.writeToBuffer(indent) - if err != nil { - return 0, err - } - return buf.WriteTo(w) -} - -// WriteTo writes file content into io.Writer. -func (f *File) WriteTo(w io.Writer) (int64, error) { - return f.WriteToIndent(w, "") -} - -// SaveToIndent writes content to file system with given value indention. -func (f *File) SaveToIndent(filename, indent string) error { - // Note: Because we are truncating with os.Create, - // so it's safer to save to a temporary file location and rename after done. - buf, err := f.writeToBuffer(indent) - if err != nil { - return err - } - - return ioutil.WriteFile(filename, buf.Bytes(), 0666) -} - -// SaveTo writes content to file system. -func (f *File) SaveTo(filename string) error { - return f.SaveToIndent(filename, "") -} diff --git a/vendor/github.com/go-ini/ini/helper.go b/vendor/github.com/go-ini/ini/helper.go deleted file mode 100644 index f9d80a682..000000000 --- a/vendor/github.com/go-ini/ini/helper.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2019 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -func inSlice(str string, s []string) bool { - for _, v := range s { - if str == v { - return true - } - } - return false -} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go deleted file mode 100644 index 99e7f8651..000000000 --- a/vendor/github.com/go-ini/ini/ini.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package ini provides INI file read and write functionality in Go. -package ini - -import ( - "os" - "regexp" - "runtime" - "strings" -) - -const ( - // Maximum allowed depth when recursively substituing variable names. - depthValues = 99 -) - -var ( - // DefaultSection is the name of default section. You can use this var or the string literal. - // In most of cases, an empty string is all you need to access the section. - DefaultSection = "DEFAULT" - - // LineBreak is the delimiter to determine or compose a new line. - // This variable will be changed to "\r\n" automatically on Windows at package init time. - LineBreak = "\n" - - // Variable regexp pattern: %(variable)s - varPattern = regexp.MustCompile(`%\(([^)]+)\)s`) - - // DefaultHeader explicitly writes default section header. - DefaultHeader = false - - // PrettySection indicates whether to put a line between sections. - PrettySection = true - // PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output - // or reduce all possible spaces for compact format. - PrettyFormat = true - // PrettyEqual places spaces around "=" sign even when PrettyFormat is false. - PrettyEqual = false - // DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled. - DefaultFormatLeft = "" - // DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled. - DefaultFormatRight = "" -) - -var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") - -func init() { - if runtime.GOOS == "windows" && !inTest { - LineBreak = "\r\n" - } -} - -// LoadOptions contains all customized options used for load data source(s). -type LoadOptions struct { - // Loose indicates whether the parser should ignore nonexistent files or return error. - Loose bool - // Insensitive indicates whether the parser forces all section and key names to lowercase. - Insensitive bool - // InsensitiveSections indicates whether the parser forces all section to lowercase. - InsensitiveSections bool - // InsensitiveKeys indicates whether the parser forces all key names to lowercase. - InsensitiveKeys bool - // IgnoreContinuation indicates whether to ignore continuation lines while parsing. - IgnoreContinuation bool - // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. - IgnoreInlineComment bool - // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. - SkipUnrecognizableLines bool - // ShortCircuit indicates whether to ignore other configuration sources after loaded the first available configuration source. - ShortCircuit bool - // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. - // This type of keys are mostly used in my.cnf. - AllowBooleanKeys bool - // AllowShadows indicates whether to keep track of keys with same name under same section. - AllowShadows bool - // AllowNestedValues indicates whether to allow AWS-like nested values. - // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values - AllowNestedValues bool - // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values. - // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure - // Relevant quote: Values can also span multiple lines, as long as they are indented deeper - // than the first line of the value. - AllowPythonMultilineValues bool - // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value. - // Docs: https://docs.python.org/2/library/configparser.html - // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names. - // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment. - SpaceBeforeInlineComment bool - // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format - // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" - UnescapeValueDoubleQuotes bool - // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format - // when value is NOT surrounded by any quotes. - // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. - UnescapeValueCommentSymbols bool - // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise - // conform to key/value pairs. Specify the names of those blocks here. - UnparseableSections []string - // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:". - KeyValueDelimiters string - // KeyValueDelimiterOnWrite is the delimiter that are used to separate key and value output. By default, it is "=". - KeyValueDelimiterOnWrite string - // ChildSectionDelimiter is the delimiter that is used to separate child sections. By default, it is ".". - ChildSectionDelimiter string - // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes). - PreserveSurroundedQuote bool - // DebugFunc is called to collect debug information (currently only useful to debug parsing Python-style multiline values). - DebugFunc DebugFunc - // ReaderBufferSize is the buffer size of the reader in bytes. - ReaderBufferSize int - // AllowNonUniqueSections indicates whether to allow sections with the same name multiple times. - AllowNonUniqueSections bool - // AllowDuplicateShadowValues indicates whether values for shadowed keys should be deduplicated. - AllowDuplicateShadowValues bool -} - -// DebugFunc is the type of function called to log parse events. -type DebugFunc func(message string) - -// LoadSources allows caller to apply customized options for loading from data source(s). -func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { - sources := make([]dataSource, len(others)+1) - sources[0], err = parseDataSource(source) - if err != nil { - return nil, err - } - for i := range others { - sources[i+1], err = parseDataSource(others[i]) - if err != nil { - return nil, err - } - } - f := newFile(sources, opts) - if err = f.Reload(); err != nil { - return nil, err - } - return f, nil -} - -// Load loads and parses from INI data sources. -// Arguments can be mixed of file name with string type, or raw data in []byte. -// It will return error if list contains nonexistent files. -func Load(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{}, source, others...) -} - -// LooseLoad has exactly same functionality as Load function -// except it ignores nonexistent files instead of returning error. -func LooseLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Loose: true}, source, others...) -} - -// InsensitiveLoad has exactly same functionality as Load function -// except it forces all section and key names to be lowercased. -func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Insensitive: true}, source, others...) -} - -// ShadowLoad has exactly same functionality as Load function -// except it allows have shadow keys. -func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{AllowShadows: true}, source, others...) -} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go deleted file mode 100644 index a19d9f38e..000000000 --- a/vendor/github.com/go-ini/ini/key.go +++ /dev/null @@ -1,837 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" - "time" -) - -// Key represents a key under a section. -type Key struct { - s *Section - Comment string - name string - value string - isAutoIncrement bool - isBooleanType bool - - isShadow bool - shadows []*Key - - nestedValues []string -} - -// newKey simply return a key object with given values. -func newKey(s *Section, name, val string) *Key { - return &Key{ - s: s, - name: name, - value: val, - } -} - -func (k *Key) addShadow(val string) error { - if k.isShadow { - return errors.New("cannot add shadow to another shadow key") - } else if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add shadow to auto-increment or boolean key") - } - - if !k.s.f.options.AllowDuplicateShadowValues { - // Deduplicate shadows based on their values. - if k.value == val { - return nil - } - for i := range k.shadows { - if k.shadows[i].value == val { - return nil - } - } - } - - shadow := newKey(k.s, k.name, val) - shadow.isShadow = true - k.shadows = append(k.shadows, shadow) - return nil -} - -// AddShadow adds a new shadow key to itself. -func (k *Key) AddShadow(val string) error { - if !k.s.f.options.AllowShadows { - return errors.New("shadow key is not allowed") - } - return k.addShadow(val) -} - -func (k *Key) addNestedValue(val string) error { - if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add nested value to auto-increment or boolean key") - } - - k.nestedValues = append(k.nestedValues, val) - return nil -} - -// AddNestedValue adds a nested value to the key. -func (k *Key) AddNestedValue(val string) error { - if !k.s.f.options.AllowNestedValues { - return errors.New("nested value is not allowed") - } - return k.addNestedValue(val) -} - -// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv -type ValueMapper func(string) string - -// Name returns name of key. -func (k *Key) Name() string { - return k.name -} - -// Value returns raw value of key for performance purpose. -func (k *Key) Value() string { - return k.value -} - -// ValueWithShadows returns raw values of key and its shadows if any. Shadow -// keys with empty values are ignored from the returned list. -func (k *Key) ValueWithShadows() []string { - if len(k.shadows) == 0 { - if k.value == "" { - return []string{} - } - return []string{k.value} - } - - vals := make([]string, 0, len(k.shadows)+1) - if k.value != "" { - vals = append(vals, k.value) - } - for _, s := range k.shadows { - if s.value != "" { - vals = append(vals, s.value) - } - } - return vals -} - -// NestedValues returns nested values stored in the key. -// It is possible returned value is nil if no nested values stored in the key. -func (k *Key) NestedValues() []string { - return k.nestedValues -} - -// transformValue takes a raw value and transforms to its final string. -func (k *Key) transformValue(val string) string { - if k.s.f.ValueMapper != nil { - val = k.s.f.ValueMapper(val) - } - - // Fail-fast if no indicate char found for recursive value - if !strings.Contains(val, "%") { - return val - } - for i := 0; i < depthValues; i++ { - vr := varPattern.FindString(val) - if len(vr) == 0 { - break - } - - // Take off leading '%(' and trailing ')s'. - noption := vr[2 : len(vr)-2] - - // Search in the same section. - // If not found or found the key itself, then search again in default section. - nk, err := k.s.GetKey(noption) - if err != nil || k == nk { - nk, _ = k.s.f.Section("").GetKey(noption) - if nk == nil { - // Stop when no results found in the default section, - // and returns the value as-is. - break - } - } - - // Substitute by new value and take off leading '%(' and trailing ')s'. - val = strings.Replace(val, vr, nk.value, -1) - } - return val -} - -// String returns string representation of value. -func (k *Key) String() string { - return k.transformValue(k.value) -} - -// Validate accepts a validate function which can -// return modifed result as key value. -func (k *Key) Validate(fn func(string) string) string { - return fn(k.String()) -} - -// parseBool returns the boolean value represented by the string. -// -// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, -// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. -// Any other value returns an error. -func parseBool(str string) (value bool, err error) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": - return false, nil - } - return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) -} - -// Bool returns bool type value. -func (k *Key) Bool() (bool, error) { - return parseBool(k.String()) -} - -// Float64 returns float64 type value. -func (k *Key) Float64() (float64, error) { - return strconv.ParseFloat(k.String(), 64) -} - -// Int returns int type value. -func (k *Key) Int() (int, error) { - v, err := strconv.ParseInt(k.String(), 0, 64) - return int(v), err -} - -// Int64 returns int64 type value. -func (k *Key) Int64() (int64, error) { - return strconv.ParseInt(k.String(), 0, 64) -} - -// Uint returns uint type valued. -func (k *Key) Uint() (uint, error) { - u, e := strconv.ParseUint(k.String(), 0, 64) - return uint(u), e -} - -// Uint64 returns uint64 type value. -func (k *Key) Uint64() (uint64, error) { - return strconv.ParseUint(k.String(), 0, 64) -} - -// Duration returns time.Duration type value. -func (k *Key) Duration() (time.Duration, error) { - return time.ParseDuration(k.String()) -} - -// TimeFormat parses with given format and returns time.Time type value. -func (k *Key) TimeFormat(format string) (time.Time, error) { - return time.Parse(format, k.String()) -} - -// Time parses with RFC3339 format and returns time.Time type value. -func (k *Key) Time() (time.Time, error) { - return k.TimeFormat(time.RFC3339) -} - -// MustString returns default value if key value is empty. -func (k *Key) MustString(defaultVal string) string { - val := k.String() - if len(val) == 0 { - k.value = defaultVal - return defaultVal - } - return val -} - -// MustBool always returns value without error, -// it returns false if error occurs. -func (k *Key) MustBool(defaultVal ...bool) bool { - val, err := k.Bool() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatBool(defaultVal[0]) - return defaultVal[0] - } - return val -} - -// MustFloat64 always returns value without error, -// it returns 0.0 if error occurs. -func (k *Key) MustFloat64(defaultVal ...float64) float64 { - val, err := k.Float64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) - return defaultVal[0] - } - return val -} - -// MustInt always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt(defaultVal ...int) int { - val, err := k.Int() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(int64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustInt64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt64(defaultVal ...int64) int64 { - val, err := k.Int64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustUint always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint(defaultVal ...uint) uint { - val, err := k.Uint() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustUint64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint64(defaultVal ...uint64) uint64 { - val, err := k.Uint64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustDuration always returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { - val, err := k.Duration() - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].String() - return defaultVal[0] - } - return val -} - -// MustTimeFormat always parses with given format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { - val, err := k.TimeFormat(format) - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].Format(format) - return defaultVal[0] - } - return val -} - -// MustTime always parses with RFC3339 format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTime(defaultVal ...time.Time) time.Time { - return k.MustTimeFormat(time.RFC3339, defaultVal...) -} - -// In always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) In(defaultVal string, candidates []string) string { - val := k.String() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InFloat64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { - val := k.MustFloat64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt(defaultVal int, candidates []int) int { - val := k.MustInt() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { - val := k.MustInt64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint(defaultVal uint, candidates []uint) uint { - val := k.MustUint() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { - val := k.MustUint64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTimeFormat always parses with given format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { - val := k.MustTimeFormat(format) - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTime always parses with RFC3339 format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { - return k.InTimeFormat(time.RFC3339, defaultVal, candidates) -} - -// RangeFloat64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { - val := k.MustFloat64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt(defaultVal, min, max int) int { - val := k.MustInt() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { - val := k.MustInt64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeTimeFormat checks if value with given format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { - val := k.MustTimeFormat(format) - if val.Unix() < min.Unix() || val.Unix() > max.Unix() { - return defaultVal - } - return val -} - -// RangeTime checks if value with RFC3339 format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { - return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) -} - -// Strings returns list of string divided by given delimiter. -func (k *Key) Strings(delim string) []string { - str := k.String() - if len(str) == 0 { - return []string{} - } - - runes := []rune(str) - vals := make([]string, 0, 2) - var buf bytes.Buffer - escape := false - idx := 0 - for { - if escape { - escape = false - if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { - buf.WriteRune('\\') - } - buf.WriteRune(runes[idx]) - } else { - if runes[idx] == '\\' { - escape = true - } else if strings.HasPrefix(string(runes[idx:]), delim) { - idx += len(delim) - 1 - vals = append(vals, strings.TrimSpace(buf.String())) - buf.Reset() - } else { - buf.WriteRune(runes[idx]) - } - } - idx++ - if idx == len(runes) { - break - } - } - - if buf.Len() > 0 { - vals = append(vals, strings.TrimSpace(buf.String())) - } - - return vals -} - -// StringsWithShadows returns list of string divided by given delimiter. -// Shadows will also be appended if any. -func (k *Key) StringsWithShadows(delim string) []string { - vals := k.ValueWithShadows() - results := make([]string, 0, len(vals)*2) - for i := range vals { - if len(vals) == 0 { - continue - } - - results = append(results, strings.Split(vals[i], delim)...) - } - - for i := range results { - results[i] = k.transformValue(strings.TrimSpace(results[i])) - } - return results -} - -// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Float64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), true, false) - return vals -} - -// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Ints(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), true, false) - return vals -} - -// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Int64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), true, false) - return vals -} - -// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), true, false) - return vals -} - -// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), true, false) - return vals -} - -// Bools returns list of bool divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Bools(delim string) []bool { - vals, _ := k.parseBools(k.Strings(delim), true, false) - return vals -} - -// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) TimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) - return vals -} - -// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) Times(delim string) []time.Time { - return k.TimesFormat(time.RFC3339, delim) -} - -// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then -// it will not be included to result list. -func (k *Key) ValidFloat64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), false, false) - return vals -} - -// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will -// not be included to result list. -func (k *Key) ValidInts(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), false, false) - return vals -} - -// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, -// then it will not be included to result list. -func (k *Key) ValidInt64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), false, false) - return vals -} - -// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, -// then it will not be included to result list. -func (k *Key) ValidUints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), false, false) - return vals -} - -// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned -// integer, then it will not be included to result list. -func (k *Key) ValidUint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), false, false) - return vals -} - -// ValidBools returns list of bool divided by given delimiter. If some value is not 64-bit unsigned -// integer, then it will not be included to result list. -func (k *Key) ValidBools(delim string) []bool { - vals, _ := k.parseBools(k.Strings(delim), false, false) - return vals -} - -// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) - return vals -} - -// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimes(delim string) []time.Time { - return k.ValidTimesFormat(time.RFC3339, delim) -} - -// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictFloat64s(delim string) ([]float64, error) { - return k.parseFloat64s(k.Strings(delim), false, true) -} - -// StrictInts returns list of int divided by given delimiter or error on first invalid input. -func (k *Key) StrictInts(delim string) ([]int, error) { - return k.parseInts(k.Strings(delim), false, true) -} - -// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictInt64s(delim string) ([]int64, error) { - return k.parseInt64s(k.Strings(delim), false, true) -} - -// StrictUints returns list of uint divided by given delimiter or error on first invalid input. -func (k *Key) StrictUints(delim string) ([]uint, error) { - return k.parseUints(k.Strings(delim), false, true) -} - -// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictUint64s(delim string) ([]uint64, error) { - return k.parseUint64s(k.Strings(delim), false, true) -} - -// StrictBools returns list of bool divided by given delimiter or error on first invalid input. -func (k *Key) StrictBools(delim string) ([]bool, error) { - return k.parseBools(k.Strings(delim), false, true) -} - -// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { - return k.parseTimesFormat(format, k.Strings(delim), false, true) -} - -// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimes(delim string) ([]time.Time, error) { - return k.StrictTimesFormat(time.RFC3339, delim) -} - -// parseBools transforms strings to bools. -func (k *Key) parseBools(strs []string, addInvalid, returnOnInvalid bool) ([]bool, error) { - vals := make([]bool, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := parseBool(str) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(bool)) - } - } - return vals, err -} - -// parseFloat64s transforms strings to float64s. -func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { - vals := make([]float64, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseFloat(str, 64) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(float64)) - } - } - return vals, err -} - -// parseInts transforms strings to ints. -func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { - vals := make([]int, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseInt(str, 0, 64) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, int(val.(int64))) - } - } - return vals, err -} - -// parseInt64s transforms strings to int64s. -func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { - vals := make([]int64, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseInt(str, 0, 64) - return val, err - } - - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(int64)) - } - } - return vals, err -} - -// parseUints transforms strings to uints. -func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { - vals := make([]uint, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseUint(str, 0, 64) - return val, err - } - - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, uint(val.(uint64))) - } - } - return vals, err -} - -// parseUint64s transforms strings to uint64s. -func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { - vals := make([]uint64, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := strconv.ParseUint(str, 0, 64) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(uint64)) - } - } - return vals, err -} - -type Parser func(str string) (interface{}, error) - -// parseTimesFormat transforms strings to times in given format. -func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { - vals := make([]time.Time, 0, len(strs)) - parser := func(str string) (interface{}, error) { - val, err := time.Parse(format, str) - return val, err - } - rawVals, err := k.doParse(strs, addInvalid, returnOnInvalid, parser) - if err == nil { - for _, val := range rawVals { - vals = append(vals, val.(time.Time)) - } - } - return vals, err -} - -// doParse transforms strings to different types -func (k *Key) doParse(strs []string, addInvalid, returnOnInvalid bool, parser Parser) ([]interface{}, error) { - vals := make([]interface{}, 0, len(strs)) - for _, str := range strs { - val, err := parser(str) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// SetValue changes key value. -func (k *Key) SetValue(v string) { - if k.s.f.BlockMode { - k.s.f.lock.Lock() - defer k.s.f.lock.Unlock() - } - - k.value = v - k.s.keysHash[k.name] = v -} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go deleted file mode 100644 index 44fc526c2..000000000 --- a/vendor/github.com/go-ini/ini/parser.go +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright 2015 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bufio" - "bytes" - "fmt" - "io" - "regexp" - "strconv" - "strings" - "unicode" -) - -const minReaderBufferSize = 4096 - -var pythonMultiline = regexp.MustCompile(`^([\t\f ]+)(.*)`) - -type parserOptions struct { - IgnoreContinuation bool - IgnoreInlineComment bool - AllowPythonMultilineValues bool - SpaceBeforeInlineComment bool - UnescapeValueDoubleQuotes bool - UnescapeValueCommentSymbols bool - PreserveSurroundedQuote bool - DebugFunc DebugFunc - ReaderBufferSize int -} - -type parser struct { - buf *bufio.Reader - options parserOptions - - isEOF bool - count int - comment *bytes.Buffer -} - -func (p *parser) debug(format string, args ...interface{}) { - if p.options.DebugFunc != nil { - p.options.DebugFunc(fmt.Sprintf(format, args...)) - } -} - -func newParser(r io.Reader, opts parserOptions) *parser { - size := opts.ReaderBufferSize - if size < minReaderBufferSize { - size = minReaderBufferSize - } - - return &parser{ - buf: bufio.NewReaderSize(r, size), - options: opts, - count: 1, - comment: &bytes.Buffer{}, - } -} - -// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. -// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding -func (p *parser) BOM() error { - mask, err := p.buf.Peek(2) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 2 { - return nil - } - - switch { - case mask[0] == 254 && mask[1] == 255: - fallthrough - case mask[0] == 255 && mask[1] == 254: - _, err = p.buf.Read(mask) - if err != nil { - return err - } - case mask[0] == 239 && mask[1] == 187: - mask, err := p.buf.Peek(3) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 3 { - return nil - } - if mask[2] == 191 { - _, err = p.buf.Read(mask) - if err != nil { - return err - } - } - } - return nil -} - -func (p *parser) readUntil(delim byte) ([]byte, error) { - data, err := p.buf.ReadBytes(delim) - if err != nil { - if err == io.EOF { - p.isEOF = true - } else { - return nil, err - } - } - return data, nil -} - -func cleanComment(in []byte) ([]byte, bool) { - i := bytes.IndexAny(in, "#;") - if i == -1 { - return nil, false - } - return in[i:], true -} - -func readKeyName(delimiters string, in []byte) (string, int, error) { - line := string(in) - - // Check if key name surrounded by quotes. - var keyQuote string - if line[0] == '"' { - if len(line) > 6 && line[0:3] == `"""` { - keyQuote = `"""` - } else { - keyQuote = `"` - } - } else if line[0] == '`' { - keyQuote = "`" - } - - // Get out key name - var endIdx int - if len(keyQuote) > 0 { - startIdx := len(keyQuote) - // FIXME: fail case -> """"""name"""=value - pos := strings.Index(line[startIdx:], keyQuote) - if pos == -1 { - return "", -1, fmt.Errorf("missing closing key quote: %s", line) - } - pos += startIdx - - // Find key-value delimiter - i := strings.IndexAny(line[pos+startIdx:], delimiters) - if i < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - endIdx = pos + i - return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil - } - - endIdx = strings.IndexAny(line, delimiters) - if endIdx < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - if endIdx == 0 { - return "", -1, ErrEmptyKeyName{line} - } - - return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil -} - -func (p *parser) readMultilines(line, val, valQuote string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := string(data) - - pos := strings.LastIndex(next, valQuote) - if pos > -1 { - val += next[:pos] - - comment, has := cleanComment([]byte(next[pos:])) - if has { - p.comment.Write(bytes.TrimSpace(comment)) - } - break - } - val += next - if p.isEOF { - return "", fmt.Errorf("missing closing key quote from %q to %q", line, next) - } - } - return val, nil -} - -func (p *parser) readContinuationLines(val string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := strings.TrimSpace(string(data)) - - if len(next) == 0 { - break - } - val += next - if val[len(val)-1] != '\\' { - break - } - val = val[:len(val)-1] - } - return val, nil -} - -// hasSurroundedQuote check if and only if the first and last characters -// are quotes \" or \'. -// It returns false if any other parts also contain same kind of quotes. -func hasSurroundedQuote(in string, quote byte) bool { - return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && - strings.IndexByte(in[1:], quote) == len(in)-2 -} - -func (p *parser) readValue(in []byte, bufferSize int) (string, error) { - - line := strings.TrimLeftFunc(string(in), unicode.IsSpace) - if len(line) == 0 { - if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' { - return p.readPythonMultilines(line, bufferSize) - } - return "", nil - } - - var valQuote string - if len(line) > 3 && line[0:3] == `"""` { - valQuote = `"""` - } else if line[0] == '`' { - valQuote = "`" - } else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' { - valQuote = `"` - } - - if len(valQuote) > 0 { - startIdx := len(valQuote) - pos := strings.LastIndex(line[startIdx:], valQuote) - // Check for multi-line value - if pos == -1 { - return p.readMultilines(line, line[startIdx:], valQuote) - } - - if p.options.UnescapeValueDoubleQuotes && valQuote == `"` { - return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil - } - return line[startIdx : pos+startIdx], nil - } - - lastChar := line[len(line)-1] - // Won't be able to reach here if value only contains whitespace - line = strings.TrimSpace(line) - trimmedLastChar := line[len(line)-1] - - // Check continuation lines when desired - if !p.options.IgnoreContinuation && trimmedLastChar == '\\' { - return p.readContinuationLines(line[:len(line)-1]) - } - - // Check if ignore inline comment - if !p.options.IgnoreInlineComment { - var i int - if p.options.SpaceBeforeInlineComment { - i = strings.Index(line, " #") - if i == -1 { - i = strings.Index(line, " ;") - } - - } else { - i = strings.IndexAny(line, "#;") - } - - if i > -1 { - p.comment.WriteString(line[i:]) - line = strings.TrimSpace(line[:i]) - } - - } - - // Trim single and double quotes - if (hasSurroundedQuote(line, '\'') || - hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote { - line = line[1 : len(line)-1] - } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols { - line = strings.ReplaceAll(line, `\;`, ";") - line = strings.ReplaceAll(line, `\#`, "#") - } else if p.options.AllowPythonMultilineValues && lastChar == '\n' { - return p.readPythonMultilines(line, bufferSize) - } - - return line, nil -} - -func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) { - parserBufferPeekResult, _ := p.buf.Peek(bufferSize) - peekBuffer := bytes.NewBuffer(parserBufferPeekResult) - - for { - peekData, peekErr := peekBuffer.ReadBytes('\n') - if peekErr != nil && peekErr != io.EOF { - p.debug("readPythonMultilines: failed to peek with error: %v", peekErr) - return "", peekErr - } - - p.debug("readPythonMultilines: parsing %q", string(peekData)) - - peekMatches := pythonMultiline.FindStringSubmatch(string(peekData)) - p.debug("readPythonMultilines: matched %d parts", len(peekMatches)) - for n, v := range peekMatches { - p.debug(" %d: %q", n, v) - } - - // Return if not a Python multiline value. - if len(peekMatches) != 3 { - p.debug("readPythonMultilines: end of value, got: %q", line) - return line, nil - } - - // Advance the parser reader (buffer) in-sync with the peek buffer. - _, err := p.buf.Discard(len(peekData)) - if err != nil { - p.debug("readPythonMultilines: failed to skip to the end, returning error") - return "", err - } - - line += "\n" + peekMatches[0] - } -} - -// parse parses data through an io.Reader. -func (f *File) parse(reader io.Reader) (err error) { - p := newParser(reader, parserOptions{ - IgnoreContinuation: f.options.IgnoreContinuation, - IgnoreInlineComment: f.options.IgnoreInlineComment, - AllowPythonMultilineValues: f.options.AllowPythonMultilineValues, - SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment, - UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes, - UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols, - PreserveSurroundedQuote: f.options.PreserveSurroundedQuote, - DebugFunc: f.options.DebugFunc, - ReaderBufferSize: f.options.ReaderBufferSize, - }) - if err = p.BOM(); err != nil { - return fmt.Errorf("BOM: %v", err) - } - - // Ignore error because default section name is never empty string. - name := DefaultSection - if f.options.Insensitive || f.options.InsensitiveSections { - name = strings.ToLower(DefaultSection) - } - section, _ := f.NewSection(name) - - // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key - var isLastValueEmpty bool - var lastRegularKey *Key - - var line []byte - var inUnparseableSection bool - - // NOTE: Iterate and increase `currentPeekSize` until - // the size of the parser buffer is found. - // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. - parserBufferSize := 0 - // NOTE: Peek 4kb at a time. - currentPeekSize := minReaderBufferSize - - if f.options.AllowPythonMultilineValues { - for { - peekBytes, _ := p.buf.Peek(currentPeekSize) - peekBytesLength := len(peekBytes) - - if parserBufferSize >= peekBytesLength { - break - } - - currentPeekSize *= 2 - parserBufferSize = peekBytesLength - } - } - - for !p.isEOF { - line, err = p.readUntil('\n') - if err != nil { - return err - } - - if f.options.AllowNestedValues && - isLastValueEmpty && len(line) > 0 { - if line[0] == ' ' || line[0] == '\t' { - err = lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) - if err != nil { - return err - } - continue - } - } - - line = bytes.TrimLeftFunc(line, unicode.IsSpace) - if len(line) == 0 { - continue - } - - // Comments - if line[0] == '#' || line[0] == ';' { - // Note: we do not care ending line break, - // it is needed for adding second line, - // so just clean it once at the end when set to value. - p.comment.Write(line) - continue - } - - // Section - if line[0] == '[' { - // Read to the next ']' (TODO: support quoted strings) - closeIdx := bytes.LastIndexByte(line, ']') - if closeIdx == -1 { - return fmt.Errorf("unclosed section: %s", line) - } - - name := string(line[1:closeIdx]) - section, err = f.NewSection(name) - if err != nil { - return err - } - - comment, has := cleanComment(line[closeIdx+1:]) - if has { - p.comment.Write(comment) - } - - section.Comment = strings.TrimSpace(p.comment.String()) - - // Reset auto-counter and comments - p.comment.Reset() - p.count = 1 - // Nested values can't span sections - isLastValueEmpty = false - - inUnparseableSection = false - for i := range f.options.UnparseableSections { - if f.options.UnparseableSections[i] == name || - ((f.options.Insensitive || f.options.InsensitiveSections) && strings.EqualFold(f.options.UnparseableSections[i], name)) { - inUnparseableSection = true - continue - } - } - continue - } - - if inUnparseableSection { - section.isRawSection = true - section.rawBody += string(line) - continue - } - - kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line) - if err != nil { - switch { - // Treat as boolean key when desired, and whole line is key name. - case IsErrDelimiterNotFound(err): - switch { - case f.options.AllowBooleanKeys: - kname, err := p.readValue(line, parserBufferSize) - if err != nil { - return err - } - key, err := section.NewBooleanKey(kname) - if err != nil { - return err - } - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - continue - - case f.options.SkipUnrecognizableLines: - continue - } - case IsErrEmptyKeyName(err) && f.options.SkipUnrecognizableLines: - continue - } - return err - } - - // Auto increment. - isAutoIncr := false - if kname == "-" { - isAutoIncr = true - kname = "#" + strconv.Itoa(p.count) - p.count++ - } - - value, err := p.readValue(line[offset:], parserBufferSize) - if err != nil { - return err - } - isLastValueEmpty = len(value) == 0 - - key, err := section.NewKey(kname, value) - if err != nil { - return err - } - key.isAutoIncrement = isAutoIncr - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - lastRegularKey = key - } - return nil -} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go deleted file mode 100644 index a3615d820..000000000 --- a/vendor/github.com/go-ini/ini/section.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "errors" - "fmt" - "strings" -) - -// Section represents a config section. -type Section struct { - f *File - Comment string - name string - keys map[string]*Key - keyList []string - keysHash map[string]string - - isRawSection bool - rawBody string -} - -func newSection(f *File, name string) *Section { - return &Section{ - f: f, - name: name, - keys: make(map[string]*Key), - keyList: make([]string, 0, 10), - keysHash: make(map[string]string), - } -} - -// Name returns name of Section. -func (s *Section) Name() string { - return s.name -} - -// Body returns rawBody of Section if the section was marked as unparseable. -// It still follows the other rules of the INI format surrounding leading/trailing whitespace. -func (s *Section) Body() string { - return strings.TrimSpace(s.rawBody) -} - -// SetBody updates body content only if section is raw. -func (s *Section) SetBody(body string) { - if !s.isRawSection { - return - } - s.rawBody = body -} - -// NewKey creates a new key to given section. -func (s *Section) NewKey(name, val string) (*Key, error) { - if len(name) == 0 { - return nil, errors.New("error creating new key: empty key name") - } else if s.f.options.Insensitive || s.f.options.InsensitiveKeys { - name = strings.ToLower(name) - } - - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - if inSlice(name, s.keyList) { - if s.f.options.AllowShadows { - if err := s.keys[name].addShadow(val); err != nil { - return nil, err - } - } else { - s.keys[name].value = val - s.keysHash[name] = val - } - return s.keys[name], nil - } - - s.keyList = append(s.keyList, name) - s.keys[name] = newKey(s, name, val) - s.keysHash[name] = val - return s.keys[name], nil -} - -// NewBooleanKey creates a new boolean type key to given section. -func (s *Section) NewBooleanKey(name string) (*Key, error) { - key, err := s.NewKey(name, "true") - if err != nil { - return nil, err - } - - key.isBooleanType = true - return key, nil -} - -// GetKey returns key in section by given name. -func (s *Section) GetKey(name string) (*Key, error) { - if s.f.BlockMode { - s.f.lock.RLock() - } - if s.f.options.Insensitive || s.f.options.InsensitiveKeys { - name = strings.ToLower(name) - } - key := s.keys[name] - if s.f.BlockMode { - s.f.lock.RUnlock() - } - - if key == nil { - // Check if it is a child-section. - sname := s.name - for { - if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - return sec.GetKey(name) - } - break - } - return nil, fmt.Errorf("error when getting key of section %q: key %q not exists", s.name, name) - } - return key, nil -} - -// HasKey returns true if section contains a key with given name. -func (s *Section) HasKey(name string) bool { - key, _ := s.GetKey(name) - return key != nil -} - -// Deprecated: Use "HasKey" instead. -func (s *Section) Haskey(name string) bool { - return s.HasKey(name) -} - -// HasValue returns true if section contains given raw value. -func (s *Section) HasValue(value string) bool { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - for _, k := range s.keys { - if value == k.value { - return true - } - } - return false -} - -// Key assumes named Key exists in section and returns a zero-value when not. -func (s *Section) Key(name string) *Key { - key, err := s.GetKey(name) - if err != nil { - // It's OK here because the only possible error is empty key name, - // but if it's empty, this piece of code won't be executed. - key, _ = s.NewKey(name, "") - return key - } - return key -} - -// Keys returns list of keys of section. -func (s *Section) Keys() []*Key { - keys := make([]*Key, len(s.keyList)) - for i := range s.keyList { - keys[i] = s.Key(s.keyList[i]) - } - return keys -} - -// ParentKeys returns list of keys of parent section. -func (s *Section) ParentKeys() []*Key { - var parentKeys []*Key - sname := s.name - for { - if i := strings.LastIndex(sname, s.f.options.ChildSectionDelimiter); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - parentKeys = append(parentKeys, sec.Keys()...) - } else { - break - } - - } - return parentKeys -} - -// KeyStrings returns list of key names of section. -func (s *Section) KeyStrings() []string { - list := make([]string, len(s.keyList)) - copy(list, s.keyList) - return list -} - -// KeysHash returns keys hash consisting of names and values. -func (s *Section) KeysHash() map[string]string { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - hash := make(map[string]string, len(s.keysHash)) - for key, value := range s.keysHash { - hash[key] = value - } - return hash -} - -// DeleteKey deletes a key from section. -func (s *Section) DeleteKey(name string) { - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - for i, k := range s.keyList { - if k == name { - s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) - delete(s.keys, name) - delete(s.keysHash, name) - return - } - } -} - -// ChildSections returns a list of child sections of current section. -// For example, "[parent.child1]" and "[parent.child12]" are child sections -// of section "[parent]". -func (s *Section) ChildSections() []*Section { - prefix := s.name + s.f.options.ChildSectionDelimiter - children := make([]*Section, 0, 3) - for _, name := range s.f.sectionList { - if strings.HasPrefix(name, prefix) { - children = append(children, s.f.sections[name]...) - } - } - return children -} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go deleted file mode 100644 index a486b2fe0..000000000 --- a/vendor/github.com/go-ini/ini/struct.go +++ /dev/null @@ -1,747 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "strings" - "time" - "unicode" -) - -// NameMapper represents a ini tag name mapper. -type NameMapper func(string) string - -// Built-in name getters. -var ( - // SnackCase converts to format SNACK_CASE. - SnackCase NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - } - newstr = append(newstr, unicode.ToUpper(chr)) - } - return string(newstr) - } - // TitleUnderscore converts to format title_underscore. - TitleUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - chr -= 'A' - 'a' - } - newstr = append(newstr, chr) - } - return string(newstr) - } -) - -func (s *Section) parseFieldName(raw, actual string) string { - if len(actual) > 0 { - return actual - } - if s.f.NameMapper != nil { - return s.f.NameMapper(raw) - } - return raw -} - -func parseDelim(actual string) string { - if len(actual) > 0 { - return actual - } - return "," -} - -var reflectTime = reflect.TypeOf(time.Now()).Kind() - -// setSliceWithProperType sets proper values to slice based on its type. -func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - var strs []string - if allowShadow { - strs = key.StringsWithShadows(delim) - } else { - strs = key.Strings(delim) - } - - numVals := len(strs) - if numVals == 0 { - return nil - } - - var vals interface{} - var err error - - sliceOf := field.Type().Elem().Kind() - switch sliceOf { - case reflect.String: - vals = strs - case reflect.Int: - vals, err = key.parseInts(strs, true, false) - case reflect.Int64: - vals, err = key.parseInt64s(strs, true, false) - case reflect.Uint: - vals, err = key.parseUints(strs, true, false) - case reflect.Uint64: - vals, err = key.parseUint64s(strs, true, false) - case reflect.Float64: - vals, err = key.parseFloat64s(strs, true, false) - case reflect.Bool: - vals, err = key.parseBools(strs, true, false) - case reflectTime: - vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - if err != nil && isStrict { - return err - } - - slice := reflect.MakeSlice(field.Type(), numVals, numVals) - for i := 0; i < numVals; i++ { - switch sliceOf { - case reflect.String: - slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) - case reflect.Int: - slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) - case reflect.Int64: - slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) - case reflect.Uint: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) - case reflect.Uint64: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) - case reflect.Float64: - slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) - case reflect.Bool: - slice.Index(i).Set(reflect.ValueOf(vals.([]bool)[i])) - case reflectTime: - slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) - } - } - field.Set(slice) - return nil -} - -func wrapStrictError(err error, isStrict bool) error { - if isStrict { - return err - } - return nil -} - -// setWithProperType sets proper value to field based on its type, -// but it does not return error for failing parsing, -// because we want to use default value that is already assigned to struct. -func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - vt := t - isPtr := t.Kind() == reflect.Ptr - if isPtr { - vt = t.Elem() - } - switch vt.Kind() { - case reflect.String: - stringVal := key.String() - if isPtr { - field.Set(reflect.ValueOf(&stringVal)) - } else if len(stringVal) > 0 { - field.SetString(key.String()) - } - case reflect.Bool: - boolVal, err := key.Bool() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - field.Set(reflect.ValueOf(&boolVal)) - } else { - field.SetBool(boolVal) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - // ParseDuration will not return err for `0`, so check the type name - if vt.Name() == "Duration" { - durationVal, err := key.Duration() - if err != nil { - if intVal, err := key.Int64(); err == nil { - field.SetInt(intVal) - return nil - } - return wrapStrictError(err, isStrict) - } - if isPtr { - field.Set(reflect.ValueOf(&durationVal)) - } else if int64(durationVal) > 0 { - field.Set(reflect.ValueOf(durationVal)) - } - return nil - } - - intVal, err := key.Int64() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - pv := reflect.New(t.Elem()) - pv.Elem().SetInt(intVal) - field.Set(pv) - } else { - field.SetInt(intVal) - } - // byte is an alias for uint8, so supporting uint8 breaks support for byte - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: - durationVal, err := key.Duration() - // Skip zero value - if err == nil && uint64(durationVal) > 0 { - if isPtr { - field.Set(reflect.ValueOf(&durationVal)) - } else { - field.Set(reflect.ValueOf(durationVal)) - } - return nil - } - - uintVal, err := key.Uint64() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - pv := reflect.New(t.Elem()) - pv.Elem().SetUint(uintVal) - field.Set(pv) - } else { - field.SetUint(uintVal) - } - - case reflect.Float32, reflect.Float64: - floatVal, err := key.Float64() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - pv := reflect.New(t.Elem()) - pv.Elem().SetFloat(floatVal) - field.Set(pv) - } else { - field.SetFloat(floatVal) - } - case reflectTime: - timeVal, err := key.Time() - if err != nil { - return wrapStrictError(err, isStrict) - } - if isPtr { - field.Set(reflect.ValueOf(&timeVal)) - } else { - field.Set(reflect.ValueOf(timeVal)) - } - case reflect.Slice: - return setSliceWithProperType(key, field, delim, allowShadow, isStrict) - default: - return fmt.Errorf("unsupported type %q", t) - } - return nil -} - -func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool, allowNonUnique bool, extends bool) { - opts := strings.SplitN(tag, ",", 5) - rawName = opts[0] - for _, opt := range opts[1:] { - omitEmpty = omitEmpty || (opt == "omitempty") - allowShadow = allowShadow || (opt == "allowshadow") - allowNonUnique = allowNonUnique || (opt == "nonunique") - extends = extends || (opt == "extends") - } - return rawName, omitEmpty, allowShadow, allowNonUnique, extends -} - -// mapToField maps the given value to the matching field of the given section. -// The sectionIndex is the index (if non unique sections are enabled) to which the value should be added. -func (s *Section) mapToField(val reflect.Value, isStrict bool, sectionIndex int, sectionName string) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - rawName, _, allowShadow, allowNonUnique, extends := parseTagOptions(tag) - fieldName := s.parseFieldName(tpField.Name, rawName) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - isStruct := tpField.Type.Kind() == reflect.Struct - isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct - isAnonymousPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous - if isAnonymousPtr { - field.Set(reflect.New(tpField.Type.Elem())) - } - - if extends && (isAnonymousPtr || (isStruct && tpField.Anonymous)) { - if isStructPtr && field.IsNil() { - field.Set(reflect.New(tpField.Type.Elem())) - } - fieldSection := s - if rawName != "" { - sectionName = s.name + s.f.options.ChildSectionDelimiter + rawName - if secs, err := s.f.SectionsByName(sectionName); err == nil && sectionIndex < len(secs) { - fieldSection = secs[sectionIndex] - } - } - if err := fieldSection.mapToField(field, isStrict, sectionIndex, sectionName); err != nil { - return fmt.Errorf("map to field %q: %v", fieldName, err) - } - } else if isAnonymousPtr || isStruct || isStructPtr { - if secs, err := s.f.SectionsByName(fieldName); err == nil { - if len(secs) <= sectionIndex { - return fmt.Errorf("there are not enough sections (%d <= %d) for the field %q", len(secs), sectionIndex, fieldName) - } - // Only set the field to non-nil struct value if we have a section for it. - // Otherwise, we end up with a non-nil struct ptr even though there is no data. - if isStructPtr && field.IsNil() { - field.Set(reflect.New(tpField.Type.Elem())) - } - if err = secs[sectionIndex].mapToField(field, isStrict, sectionIndex, fieldName); err != nil { - return fmt.Errorf("map to field %q: %v", fieldName, err) - } - continue - } - } - - // Map non-unique sections - if allowNonUnique && tpField.Type.Kind() == reflect.Slice { - newField, err := s.mapToSlice(fieldName, field, isStrict) - if err != nil { - return fmt.Errorf("map to slice %q: %v", fieldName, err) - } - - field.Set(newField) - continue - } - - if key, err := s.GetKey(fieldName); err == nil { - delim := parseDelim(tpField.Tag.Get("delim")) - if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { - return fmt.Errorf("set field %q: %v", fieldName, err) - } - } - } - return nil -} - -// mapToSlice maps all sections with the same name and returns the new value. -// The type of the Value must be a slice. -func (s *Section) mapToSlice(secName string, val reflect.Value, isStrict bool) (reflect.Value, error) { - secs, err := s.f.SectionsByName(secName) - if err != nil { - return reflect.Value{}, err - } - - typ := val.Type().Elem() - for i, sec := range secs { - elem := reflect.New(typ) - if err = sec.mapToField(elem, isStrict, i, sec.name); err != nil { - return reflect.Value{}, fmt.Errorf("map to field from section %q: %v", secName, err) - } - - val = reflect.Append(val, elem.Elem()) - } - return val, nil -} - -// mapTo maps a section to object v. -func (s *Section) mapTo(v interface{}, isStrict bool) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("not a pointer to a struct") - } - - if typ.Kind() == reflect.Slice { - newField, err := s.mapToSlice(s.name, val, isStrict) - if err != nil { - return err - } - - val.Set(newField) - return nil - } - - return s.mapToField(val, isStrict, 0, s.name) -} - -// MapTo maps section to given struct. -func (s *Section) MapTo(v interface{}) error { - return s.mapTo(v, false) -} - -// StrictMapTo maps section to given struct in strict mode, -// which returns all possible error including value parsing error. -func (s *Section) StrictMapTo(v interface{}) error { - return s.mapTo(v, true) -} - -// MapTo maps file to given struct. -func (f *File) MapTo(v interface{}) error { - return f.Section("").MapTo(v) -} - -// StrictMapTo maps file to given struct in strict mode, -// which returns all possible error including value parsing error. -func (f *File) StrictMapTo(v interface{}) error { - return f.Section("").StrictMapTo(v) -} - -// MapToWithMapper maps data sources to given struct with name mapper. -func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.MapTo(v) -} - -// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, -// which returns all possible error including value parsing error. -func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.StrictMapTo(v) -} - -// MapTo maps data sources to given struct. -func MapTo(v, source interface{}, others ...interface{}) error { - return MapToWithMapper(v, nil, source, others...) -} - -// StrictMapTo maps data sources to given struct in strict mode, -// which returns all possible error including value parsing error. -func StrictMapTo(v, source interface{}, others ...interface{}) error { - return StrictMapToWithMapper(v, nil, source, others...) -} - -// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. -func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { - slice := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - sliceOf := field.Type().Elem().Kind() - - if allowShadow { - var keyWithShadows *Key - for i := 0; i < field.Len(); i++ { - var val string - switch sliceOf { - case reflect.String: - val = slice.Index(i).String() - case reflect.Int, reflect.Int64: - val = fmt.Sprint(slice.Index(i).Int()) - case reflect.Uint, reflect.Uint64: - val = fmt.Sprint(slice.Index(i).Uint()) - case reflect.Float64: - val = fmt.Sprint(slice.Index(i).Float()) - case reflect.Bool: - val = fmt.Sprint(slice.Index(i).Bool()) - case reflectTime: - val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - - if i == 0 { - keyWithShadows = newKey(key.s, key.name, val) - } else { - _ = keyWithShadows.AddShadow(val) - } - } - *key = *keyWithShadows - return nil - } - - var buf bytes.Buffer - for i := 0; i < field.Len(); i++ { - switch sliceOf { - case reflect.String: - buf.WriteString(slice.Index(i).String()) - case reflect.Int, reflect.Int64: - buf.WriteString(fmt.Sprint(slice.Index(i).Int())) - case reflect.Uint, reflect.Uint64: - buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) - case reflect.Float64: - buf.WriteString(fmt.Sprint(slice.Index(i).Float())) - case reflect.Bool: - buf.WriteString(fmt.Sprint(slice.Index(i).Bool())) - case reflectTime: - buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - buf.WriteString(delim) - } - key.SetValue(buf.String()[:buf.Len()-len(delim)]) - return nil -} - -// reflectWithProperType does the opposite thing as setWithProperType. -func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { - switch t.Kind() { - case reflect.String: - key.SetValue(field.String()) - case reflect.Bool: - key.SetValue(fmt.Sprint(field.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - key.SetValue(fmt.Sprint(field.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - key.SetValue(fmt.Sprint(field.Uint())) - case reflect.Float32, reflect.Float64: - key.SetValue(fmt.Sprint(field.Float())) - case reflectTime: - key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) - case reflect.Slice: - return reflectSliceWithProperType(key, field, delim, allowShadow) - case reflect.Ptr: - if !field.IsNil() { - return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow) - } - default: - return fmt.Errorf("unsupported type %q", t) - } - return nil -} - -// CR: copied from encoding/json/encode.go with modifications of time.Time support. -// TODO: add more test coverage. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflectTime: - t, ok := v.Interface().(time.Time) - return ok && t.IsZero() - } - return false -} - -// StructReflector is the interface implemented by struct types that can extract themselves into INI objects. -type StructReflector interface { - ReflectINIStruct(*File) error -} - -func (s *Section) reflectFrom(val reflect.Value) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - if !val.Field(i).CanInterface() { - continue - } - - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - rawName, omitEmpty, allowShadow, allowNonUnique, extends := parseTagOptions(tag) - if omitEmpty && isEmptyValue(field) { - continue - } - - if r, ok := field.Interface().(StructReflector); ok { - return r.ReflectINIStruct(s.f) - } - - fieldName := s.parseFieldName(tpField.Name, rawName) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - if extends && tpField.Anonymous && (tpField.Type.Kind() == reflect.Ptr || tpField.Type.Kind() == reflect.Struct) { - if err := s.reflectFrom(field); err != nil { - return fmt.Errorf("reflect from field %q: %v", fieldName, err) - } - continue - } - - if (tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct) || - (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { - // Note: The only error here is section doesn't exist. - sec, err := s.f.GetSection(fieldName) - if err != nil { - // Note: fieldName can never be empty here, ignore error. - sec, _ = s.f.NewSection(fieldName) - } - - // Add comment from comment tag - if len(sec.Comment) == 0 { - sec.Comment = tpField.Tag.Get("comment") - } - - if err = sec.reflectFrom(field); err != nil { - return fmt.Errorf("reflect from field %q: %v", fieldName, err) - } - continue - } - - if allowNonUnique && tpField.Type.Kind() == reflect.Slice { - slice := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - sliceOf := field.Type().Elem().Kind() - - for i := 0; i < field.Len(); i++ { - if sliceOf != reflect.Struct && sliceOf != reflect.Ptr { - return fmt.Errorf("field %q is not a slice of pointer or struct", fieldName) - } - - sec, err := s.f.NewSection(fieldName) - if err != nil { - return err - } - - // Add comment from comment tag - if len(sec.Comment) == 0 { - sec.Comment = tpField.Tag.Get("comment") - } - - if err := sec.reflectFrom(slice.Index(i)); err != nil { - return fmt.Errorf("reflect from field %q: %v", fieldName, err) - } - } - continue - } - - // Note: Same reason as section. - key, err := s.GetKey(fieldName) - if err != nil { - key, _ = s.NewKey(fieldName, "") - } - - // Add comment from comment tag - if len(key.Comment) == 0 { - key.Comment = tpField.Tag.Get("comment") - } - - delim := parseDelim(tpField.Tag.Get("delim")) - if err = reflectWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { - return fmt.Errorf("reflect field %q: %v", fieldName, err) - } - - } - return nil -} - -// ReflectFrom reflects section from given struct. It overwrites existing ones. -func (s *Section) ReflectFrom(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - - if s.name != DefaultSection && s.f.options.AllowNonUniqueSections && - (typ.Kind() == reflect.Slice || typ.Kind() == reflect.Ptr) { - // Clear sections to make sure none exists before adding the new ones - s.f.DeleteSection(s.name) - - if typ.Kind() == reflect.Ptr { - sec, err := s.f.NewSection(s.name) - if err != nil { - return err - } - return sec.reflectFrom(val.Elem()) - } - - slice := val.Slice(0, val.Len()) - sliceOf := val.Type().Elem().Kind() - if sliceOf != reflect.Ptr { - return fmt.Errorf("not a slice of pointers") - } - - for i := 0; i < slice.Len(); i++ { - sec, err := s.f.NewSection(s.name) - if err != nil { - return err - } - - err = sec.reflectFrom(slice.Index(i)) - if err != nil { - return fmt.Errorf("reflect from %dth field: %v", i, err) - } - } - - return nil - } - - if typ.Kind() == reflect.Ptr { - val = val.Elem() - } else { - return errors.New("not a pointer to a struct") - } - - return s.reflectFrom(val) -} - -// ReflectFrom reflects file from given struct. -func (f *File) ReflectFrom(v interface{}) error { - return f.Section("").ReflectFrom(v) -} - -// ReflectFromWithMapper reflects data sources from given struct with name mapper. -func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { - cfg.NameMapper = mapper - return cfg.ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct. -func ReflectFrom(cfg *File, v interface{}) error { - return ReflectFromWithMapper(cfg, v, nil) -} diff --git a/vendor/github.com/minio/minio-go/.gitignore b/vendor/github.com/minio/minio-go/.gitignore deleted file mode 100644 index fa967abd7..000000000 --- a/vendor/github.com/minio/minio-go/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*~ -*.test -validator diff --git a/vendor/github.com/minio/minio-go/.travis.yml b/vendor/github.com/minio/minio-go/.travis.yml deleted file mode 100644 index 7ed7df14e..000000000 --- a/vendor/github.com/minio/minio-go/.travis.yml +++ /dev/null @@ -1,28 +0,0 @@ -sudo: false -language: go - -os: -- linux - -env: -- ARCH=x86_64 -- ARCH=i686 - -go: -- 1.11.x -- tip - -matrix: - fast_finish: true - allow_failures: - - go: tip - -addons: - apt: - packages: - - devscripts - -script: -- diff -au <(gofmt -d .) <(printf "") -- diff -au <(licensecheck --check '.go$' --recursive --lines 0 * | grep -v -w 'Apache (v2.0)') <(printf "") -- make diff --git a/vendor/github.com/minio/minio-go/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/CONTRIBUTING.md deleted file mode 100644 index 8b1ee86c6..000000000 --- a/vendor/github.com/minio/minio-go/CONTRIBUTING.md +++ /dev/null @@ -1,23 +0,0 @@ - -### Developer Guidelines - -``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following: - -* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes. - - Fork it - - Create your feature branch (git checkout -b my-new-feature) - - Commit your changes (git commit -am 'Add some feature') - - Push to the branch (git push origin my-new-feature) - - Create new Pull Request - -* When you're ready to create a pull request, be sure to: - - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request. - - Run `go fmt` - - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request. - - Make sure `go test -race ./...` and `go build` completes. - NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables - ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...`` - -* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project - - `minio-go` project is strictly conformant with Golang style - - if you happen to observe offending code, please feel free to send a pull request diff --git a/vendor/github.com/minio/minio-go/LICENSE b/vendor/github.com/minio/minio-go/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/minio/minio-go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/minio/minio-go/MAINTAINERS.md b/vendor/github.com/minio/minio-go/MAINTAINERS.md deleted file mode 100644 index 17973078e..000000000 --- a/vendor/github.com/minio/minio-go/MAINTAINERS.md +++ /dev/null @@ -1,35 +0,0 @@ -# For maintainers only - -## Responsibilities - -Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) - -### Making new releases -Tag and sign your release commit, additionally this step requires you to have access to Minio's trusted private key. -```sh -$ export GNUPGHOME=/media/${USER}/minio/trusted -$ git tag -s 4.0.0 -$ git push -$ git push --tags -``` - -### Update version -Once release has been made update `libraryVersion` constant in `api.go` to next to be released version. - -```sh -$ grep libraryVersion api.go - libraryVersion = "4.0.1" -``` - -Commit your changes -``` -$ git commit -a -m "Update version for next release" --author "Minio Trusted " -``` - -### Announce -Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@minio.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release. - -To generate `changelog` -```sh -$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' .. -``` diff --git a/vendor/github.com/minio/minio-go/Makefile b/vendor/github.com/minio/minio-go/Makefile deleted file mode 100644 index bad81ffaf..000000000 --- a/vendor/github.com/minio/minio-go/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -all: checks - -checks: - @go get -t ./... - @go vet ./... - @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... - @go get github.com/dustin/go-humanize/... - @go get github.com/sirupsen/logrus/... - @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go - @mkdir -p /tmp/examples && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done - @go get -u github.com/a8m/mark/... - @go get -u github.com/minio/cli/... - @go get -u golang.org/x/tools/cmd/goimports - @go get -u github.com/gernest/wow/... - @go build docs/validator.go && ./validator -m docs/API.md -t docs/checker.go.tpl diff --git a/vendor/github.com/minio/minio-go/NOTICE b/vendor/github.com/minio/minio-go/NOTICE deleted file mode 100644 index c521791c5..000000000 --- a/vendor/github.com/minio/minio-go/NOTICE +++ /dev/null @@ -1,2 +0,0 @@ -minio-go -Copyright 2015-2017 Minio, Inc. \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/README.md b/vendor/github.com/minio/minio-go/README.md deleted file mode 100644 index ad9d5e60b..000000000 --- a/vendor/github.com/minio/minio-go/README.md +++ /dev/null @@ -1,239 +0,0 @@ -# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) - -The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. - -This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference). - -This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang). - -## Download from Github -```sh -go get -u github.com/minio/minio-go -``` - -## Initialize Minio Client -Minio client requires the following four parameters specified to connect to an Amazon S3 compatible object storage. - -| Parameter | Description| -| :--- | :--- | -| endpoint | URL to object storage service. | -| accessKeyID | Access key is the user ID that uniquely identifies your account. | -| secretAccessKey | Secret key is the password to your account. | -| secure | Set this value to 'true' to enable secure (HTTPS) access. | - - -```go -package main - -import ( - "github.com/minio/minio-go" - "log" -) - -func main() { - endpoint := "play.minio.io:9000" - accessKeyID := "Q3AM3UQ867SPQQA43P2F" - secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" - useSSL := true - - // Initialize minio client object. - minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) - if err != nil { - log.Fatalln(err) - } - - log.Printf("%#v\n", minioClient) // minioClient is now setup -} -``` - -## Quick Start Example - File Uploader -This example program connects to an object storage server, creates a bucket and uploads a file to the bucket. - -We will use the Minio server running at [https://play.minio.io:9000](https://play.minio.io:9000) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public. - -### FileUploader.go -```go -package main - -import ( - "github.com/minio/minio-go" - "log" -) - -func main() { - endpoint := "play.minio.io:9000" - accessKeyID := "Q3AM3UQ867SPQQA43P2F" - secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" - useSSL := true - - // Initialize minio client object. - minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) - if err != nil { - log.Fatalln(err) - } - - // Make a new bucket called mymusic. - bucketName := "mymusic" - location := "us-east-1" - - err = minioClient.MakeBucket(bucketName, location) - if err != nil { - // Check to see if we already own this bucket (which happens if you run this twice) - exists, err := minioClient.BucketExists(bucketName) - if err == nil && exists { - log.Printf("We already own %s\n", bucketName) - } else { - log.Fatalln(err) - } - } else { - log.Printf("Successfully created %s\n", bucketName) - } - - // Upload the zip file - objectName := "golden-oldies.zip" - filePath := "/tmp/golden-oldies.zip" - contentType := "application/zip" - - // Upload the zip file with FPutObject - n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType}) - if err != nil { - log.Fatalln(err) - } - - log.Printf("Successfully uploaded %s of size %d\n", objectName, n) -} -``` - -### Run FileUploader -```sh -go run file-uploader.go -2016/08/13 17:03:28 Successfully created mymusic -2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 - -mc ls play/mymusic/ -[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip -``` - -## API Reference -The full API Reference is available here. - -* [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference) - -### API Reference : Bucket Operations -* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket) -* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets) -* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists) -* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket) -* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects) -* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2) -* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads) - -### API Reference : Bucket policy Operations -* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy) -* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy) - -### API Reference : Bucket notification Operations -* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification) -* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification) -* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification) -* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension) - -### API Reference : File Object Operations -* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) -* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FGetObject) -* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext) -* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext) - -### API Reference : Object Operations -* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject) -* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject) -* [`GetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#GetObjectWithContext) -* [`PutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectWithContext) -* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming) -* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject) -* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject) -* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject) -* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects) -* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload) -* [`SelectObjectContent`](https://docs.minio.io/docs/golang-client-api-reference#SelectObjectContent) - - -### API Reference : Presigned Operations -* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject) -* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject) -* [`PresignedHeadObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedHeadObject) -* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy) - -### API Reference : Client custom settings -* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo) -* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport) -* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn) -* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff) - -## Full Examples - -### Full Examples : Bucket Operations -* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) -* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) -* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) -* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) -* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) -* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) -* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) - -### Full Examples : Bucket policy Operations -* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) -* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) -* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) - -### Full Examples : Bucket lifecycle Operations -* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go) -* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go) - -### Full Examples : Bucket notification Operations -* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) -* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) -* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) -* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension) - -### Full Examples : File Object Operations -* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) -* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) -* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go) -* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go) - -### Full Examples : Object Operations -* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) -* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) -* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go) -* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go) -* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) -* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) -* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) -* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) -* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) - -### Full Examples : Encrypted Object Operations -* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) -* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) -* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) - -### Full Examples : Presigned Operations -* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) -* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) -* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) -* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) - -## Explore Further -* [Complete Documentation](https://docs.minio.io) -* [Minio Go Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference) -* [Go Music Player App Full Application Example](https://docs.minio.io/docs/go-music-player-app) - -## Contribute -[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) - -[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) -[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go) - -## License -This SDK is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](./LICENSE) and [NOTICE](./NOTICE) for more information. diff --git a/vendor/github.com/minio/minio-go/README_zh_CN.md b/vendor/github.com/minio/minio-go/README_zh_CN.md deleted file mode 100644 index a5acf199e..000000000 --- a/vendor/github.com/minio/minio-go/README_zh_CN.md +++ /dev/null @@ -1,245 +0,0 @@ -# 适用于与Amazon S3兼容云存储的Minio Go SDK [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) - -Minio Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对象存储服务。 - -**支持的云存储:** - -- AWS Signature Version 4 - - Amazon S3 - - Minio - -- AWS Signature Version 2 - - Google Cloud Storage (兼容模式) - - Openstack Swift + Swift3 middleware - - Ceph Object Gateway - - Riak CS - -本文我们将学习如何安装Minio client SDK,连接到Minio,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference)。 - -本文假设你已经有 [Go开发环境](https://docs.minio.io/docs/how-to-install-golang)。 - -## 从Github下载 -```sh -go get -u github.com/minio/minio-go -``` - -## 初始化Minio Client -Minio client需要以下4个参数来连接与Amazon S3兼容的对象存储。 - -| 参数 | 描述| -| :--- | :--- | -| endpoint | 对象存储服务的URL | -| accessKeyID | Access key是唯一标识你的账户的用户ID。 | -| secretAccessKey | Secret key是你账户的密码。 | -| secure | true代表使用HTTPS | - - -```go -package main - -import ( - "github.com/minio/minio-go" - "log" -) - -func main() { - endpoint := "play.minio.io:9000" - accessKeyID := "Q3AM3UQ867SPQQA43P2F" - secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" - useSSL := true - - // 初使化 minio client对象。 - minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) - if err != nil { - log.Fatalln(err) - } - - log.Printf("%#v\n", minioClient) // minioClient初使化成功 -} -``` - -## 示例-文件上传 -本示例连接到一个对象存储服务,创建一个存储桶并上传一个文件到存储桶中。 - -我们在本示例中使用运行在 [https://play.minio.io:9000](https://play.minio.io:9000) 上的Minio服务,你可以用这个服务来开发和测试。示例中的访问凭据是公开的。 - -### FileUploader.go -```go -package main - -import ( - "github.com/minio/minio-go" - "log" -) - -func main() { - endpoint := "play.minio.io:9000" - accessKeyID := "Q3AM3UQ867SPQQA43P2F" - secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" - useSSL := true - - // 初使化minio client对象。 - minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) - if err != nil { - log.Fatalln(err) - } - - // 创建一个叫mymusic的存储桶。 - bucketName := "mymusic" - location := "us-east-1" - - err = minioClient.MakeBucket(bucketName, location) - if err != nil { - // 检查存储桶是否已经存在。 - exists, err := minioClient.BucketExists(bucketName) - if err == nil && exists { - log.Printf("We already own %s\n", bucketName) - } else { - log.Fatalln(err) - } - } - log.Printf("Successfully created %s\n", bucketName) - - // 上传一个zip文件。 - objectName := "golden-oldies.zip" - filePath := "/tmp/golden-oldies.zip" - contentType := "application/zip" - - // 使用FPutObject上传一个zip文件。 - n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType}) - if err != nil { - log.Fatalln(err) - } - - log.Printf("Successfully uploaded %s of size %d\n", objectName, n) -} -``` - -### 运行FileUploader -```sh -go run file-uploader.go -2016/08/13 17:03:28 Successfully created mymusic -2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 - -mc ls play/mymusic/ -[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip -``` - -## API文档 -完整的API文档在这里。 -* [完整API文档](https://docs.minio.io/docs/golang-client-api-reference) - -### API文档 : 操作存储桶 -* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket) -* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets) -* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists) -* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket) -* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects) -* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2) -* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads) - -### API文档 : 存储桶策略 -* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy) -* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy) - -### API文档 : 存储桶通知 -* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification) -* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification) -* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification) -* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension) - -### API文档 : 操作文件对象 -* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) -* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) -* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext) -* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext) - -### API文档 : 操作对象 -* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject) -* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject) -* [`GetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#GetObjectWithContext) -* [`PutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectWithContext) -* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming) -* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject) -* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject) -* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject) -* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects) -* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload) - -### API文档: 操作加密对象 -* [`GetEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#GetEncryptedObject) -* [`PutEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#PutEncryptedObject) - -### API文档 : Presigned操作 -* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject) -* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject) -* [`PresignedHeadObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedHeadObject) -* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy) - -### API文档 : 客户端自定义设置 -* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo) -* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport) -* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn) -* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff) - -## 完整示例 - -### 完整示例 : 操作存储桶 -* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) -* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) -* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) -* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) -* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) -* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) -* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) - -### 完整示例 : 存储桶策略 -* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) -* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) -* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) - -### 完整示例 : 存储桶通知 -* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) -* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) -* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) -* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio扩展) - -### 完整示例 : 操作文件对象 -* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) -* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) -* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go) -* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go) - -### 完整示例 : 操作对象 -* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) -* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) -* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go) -* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go) -* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) -* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) -* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) -* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) -* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) - -### 完整示例 : 操作加密对象 -* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) -* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) -* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) - -### 完整示例 : Presigned操作 -* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) -* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) -* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) -* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) - -## 了解更多 -* [完整文档](https://docs.minio.io) -* [Minio Go Client SDK API文档](https://docs.minio.io/docs/golang-client-api-reference) -* [Go 音乐播放器完整示例](https://docs.minio.io/docs/go-music-player-app) - -## 贡献 -[贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md) - -[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) -[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go) - diff --git a/vendor/github.com/minio/minio-go/api-compose-object.go b/vendor/github.com/minio/minio-go/api-compose-object.go deleted file mode 100644 index 3ac36c502..000000000 --- a/vendor/github.com/minio/minio-go/api-compose-object.go +++ /dev/null @@ -1,565 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017, 2018 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/minio/minio-go/pkg/encrypt" - "github.com/minio/minio-go/pkg/s3utils" -) - -// DestinationInfo - type with information about the object to be -// created via server-side copy requests, using the Compose API. -type DestinationInfo struct { - bucket, object string - encryption encrypt.ServerSide - - // if no user-metadata is provided, it is copied from source - // (when there is only once source object in the compose - // request) - userMetadata map[string]string -} - -// NewDestinationInfo - creates a compose-object/copy-source -// destination info object. -// -// `encSSEC` is the key info for server-side-encryption with customer -// provided key. If it is nil, no encryption is performed. -// -// `userMeta` is the user-metadata key-value pairs to be set on the -// destination. The keys are automatically prefixed with `x-amz-meta-` -// if needed. If nil is passed, and if only a single source (of any -// size) is provided in the ComposeObject call, then metadata from the -// source is copied to the destination. -func NewDestinationInfo(bucket, object string, sse encrypt.ServerSide, userMeta map[string]string) (d DestinationInfo, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucket); err != nil { - return d, err - } - if err = s3utils.CheckValidObjectName(object); err != nil { - return d, err - } - - // Process custom-metadata to remove a `x-amz-meta-` prefix if - // present and validate that keys are distinct (after this - // prefix removal). - m := make(map[string]string) - for k, v := range userMeta { - if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { - k = k[len("x-amz-meta-"):] - } - if _, ok := m[k]; ok { - return d, ErrInvalidArgument(fmt.Sprintf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k)) - } - m[k] = v - } - - return DestinationInfo{ - bucket: bucket, - object: object, - encryption: sse, - userMetadata: m, - }, nil -} - -// getUserMetaHeadersMap - construct appropriate key-value pairs to send -// as headers from metadata map to pass into copy-object request. For -// single part copy-object (i.e. non-multipart object), enable the -// withCopyDirectiveHeader to set the `x-amz-metadata-directive` to -// `REPLACE`, so that metadata headers from the source are not copied -// over. -func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) map[string]string { - if len(d.userMetadata) == 0 { - return nil - } - r := make(map[string]string) - if withCopyDirectiveHeader { - r["x-amz-metadata-directive"] = "REPLACE" - } - for k, v := range d.userMetadata { - if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { - r[k] = v - } else { - r["x-amz-meta-"+k] = v - } - } - return r -} - -// SourceInfo - represents a source object to be copied, using -// server-side copying APIs. -type SourceInfo struct { - bucket, object string - start, end int64 - encryption encrypt.ServerSide - // Headers to send with the upload-part-copy request involving - // this source object. - Headers http.Header -} - -// NewSourceInfo - create a compose-object/copy-object source info -// object. -// -// `decryptSSEC` is the decryption key using server-side-encryption -// with customer provided key. It may be nil if the source is not -// encrypted. -func NewSourceInfo(bucket, object string, sse encrypt.ServerSide) SourceInfo { - r := SourceInfo{ - bucket: bucket, - object: object, - start: -1, // range is unspecified by default - encryption: sse, - Headers: make(http.Header), - } - - // Set the source header - r.Headers.Set("x-amz-copy-source", s3utils.EncodePath(bucket+"/"+object)) - return r -} - -// SetRange - Set the start and end offset of the source object to be -// copied. If this method is not called, the whole source object is -// copied. -func (s *SourceInfo) SetRange(start, end int64) error { - if start > end || start < 0 { - return ErrInvalidArgument("start must be non-negative, and start must be at most end.") - } - // Note that 0 <= start <= end - s.start, s.end = start, end - return nil -} - -// SetMatchETagCond - Set ETag match condition. The object is copied -// only if the etag of the source matches the value given here. -func (s *SourceInfo) SetMatchETagCond(etag string) error { - if etag == "" { - return ErrInvalidArgument("ETag cannot be empty.") - } - s.Headers.Set("x-amz-copy-source-if-match", etag) - return nil -} - -// SetMatchETagExceptCond - Set the ETag match exception -// condition. The object is copied only if the etag of the source is -// not the value given here. -func (s *SourceInfo) SetMatchETagExceptCond(etag string) error { - if etag == "" { - return ErrInvalidArgument("ETag cannot be empty.") - } - s.Headers.Set("x-amz-copy-source-if-none-match", etag) - return nil -} - -// SetModifiedSinceCond - Set the modified since condition. -func (s *SourceInfo) SetModifiedSinceCond(modTime time.Time) error { - if modTime.IsZero() { - return ErrInvalidArgument("Input time cannot be 0.") - } - s.Headers.Set("x-amz-copy-source-if-modified-since", modTime.Format(http.TimeFormat)) - return nil -} - -// SetUnmodifiedSinceCond - Set the unmodified since condition. -func (s *SourceInfo) SetUnmodifiedSinceCond(modTime time.Time) error { - if modTime.IsZero() { - return ErrInvalidArgument("Input time cannot be 0.") - } - s.Headers.Set("x-amz-copy-source-if-unmodified-since", modTime.Format(http.TimeFormat)) - return nil -} - -// Helper to fetch size and etag of an object using a StatObject call. -func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[string]string, err error) { - // Get object info - need size and etag here. Also, decryption - // headers are added to the stat request if given. - var objInfo ObjectInfo - opts := StatObjectOptions{GetObjectOptions{ServerSideEncryption: encrypt.SSE(s.encryption)}} - objInfo, err = c.statObject(context.Background(), s.bucket, s.object, opts) - if err != nil { - err = ErrInvalidArgument(fmt.Sprintf("Could not stat object - %s/%s: %v", s.bucket, s.object, err)) - } else { - size = objInfo.Size - etag = objInfo.ETag - userMeta = make(map[string]string) - for k, v := range objInfo.Metadata { - if strings.HasPrefix(k, "x-amz-meta-") { - if len(v) > 0 { - userMeta[k] = v[0] - } - } - } - } - return -} - -// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy. -func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, - metadata map[string]string) (ObjectInfo, error) { - - // Build headers. - headers := make(http.Header) - - // Set all the metadata headers. - for k, v := range metadata { - headers.Set(k, v) - } - - // Set the source header - headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) - - // Send upload-part-copy request - resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ - bucketName: destBucket, - objectName: destObject, - customHeader: headers, - }) - defer closeResponse(resp) - if err != nil { - return ObjectInfo{}, err - } - - // Check if we got an error response. - if resp.StatusCode != http.StatusOK { - return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject) - } - - cpObjRes := copyObjectResult{} - err = xmlDecoder(resp.Body, &cpObjRes) - if err != nil { - return ObjectInfo{}, err - } - - objInfo := ObjectInfo{ - Key: destObject, - ETag: strings.Trim(cpObjRes.ETag, "\""), - LastModified: cpObjRes.LastModified, - } - return objInfo, nil -} - -func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, - partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) { - - headers := make(http.Header) - - // Set source - headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) - - if startOffset < 0 { - return p, ErrInvalidArgument("startOffset must be non-negative") - } - - if length >= 0 { - headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1)) - } - - for k, v := range metadata { - headers.Set(k, v) - } - - queryValues := make(url.Values) - queryValues.Set("partNumber", strconv.Itoa(partID)) - queryValues.Set("uploadId", uploadID) - - resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ - bucketName: destBucket, - objectName: destObject, - customHeader: headers, - queryValues: queryValues, - }) - defer closeResponse(resp) - if err != nil { - return - } - - // Check if we got an error response. - if resp.StatusCode != http.StatusOK { - return p, httpRespToErrorResponse(resp, destBucket, destObject) - } - - // Decode copy-part response on success. - cpObjRes := copyObjectResult{} - err = xmlDecoder(resp.Body, &cpObjRes) - if err != nil { - return p, err - } - p.PartNumber, p.ETag = partID, cpObjRes.ETag - return p, nil -} - -// uploadPartCopy - helper function to create a part in a multipart -// upload via an upload-part-copy request -// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html -func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, - headers http.Header) (p CompletePart, err error) { - - // Build query parameters - urlValues := make(url.Values) - urlValues.Set("partNumber", strconv.Itoa(partNumber)) - urlValues.Set("uploadId", uploadID) - - // Send upload-part-copy request - resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ - bucketName: bucket, - objectName: object, - customHeader: headers, - queryValues: urlValues, - }) - defer closeResponse(resp) - if err != nil { - return p, err - } - - // Check if we got an error response. - if resp.StatusCode != http.StatusOK { - return p, httpRespToErrorResponse(resp, bucket, object) - } - - // Decode copy-part response on success. - cpObjRes := copyObjectResult{} - err = xmlDecoder(resp.Body, &cpObjRes) - if err != nil { - return p, err - } - p.PartNumber, p.ETag = partNumber, cpObjRes.ETag - return p, nil -} - -// ComposeObjectWithProgress - creates an object using server-side copying of -// existing objects. It takes a list of source objects (with optional -// offsets) and concatenates them into a new object using only -// server-side copying operations. Optionally takes progress reader hook -// for applications to look at current progress. -func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo, progress io.Reader) error { - if len(srcs) < 1 || len(srcs) > maxPartsCount { - return ErrInvalidArgument("There must be as least one and up to 10000 source objects.") - } - ctx := context.Background() - srcSizes := make([]int64, len(srcs)) - var totalSize, size, totalParts int64 - var srcUserMeta map[string]string - etags := make([]string, len(srcs)) - var err error - for i, src := range srcs { - size, etags[i], srcUserMeta, err = src.getProps(c) - if err != nil { - return err - } - - // Error out if client side encryption is used in this source object when - // more than one source objects are given. - if len(srcs) > 1 && src.Headers.Get("x-amz-meta-x-amz-key") != "" { - return ErrInvalidArgument( - fmt.Sprintf("Client side encryption is used in source object %s/%s", src.bucket, src.object)) - } - - // Check if a segment is specified, and if so, is the - // segment within object bounds? - if src.start != -1 { - // Since range is specified, - // 0 <= src.start <= src.end - // so only invalid case to check is: - if src.end >= size { - return ErrInvalidArgument( - fmt.Sprintf("SourceInfo %d has invalid segment-to-copy [%d, %d] (size is %d)", - i, src.start, src.end, size)) - } - size = src.end - src.start + 1 - } - - // Only the last source may be less than `absMinPartSize` - if size < absMinPartSize && i < len(srcs)-1 { - return ErrInvalidArgument( - fmt.Sprintf("SourceInfo %d is too small (%d) and it is not the last part", i, size)) - } - - // Is data to copy too large? - totalSize += size - if totalSize > maxMultipartPutObjectSize { - return ErrInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize)) - } - - // record source size - srcSizes[i] = size - - // calculate parts needed for current source - totalParts += partsRequired(size) - // Do we need more parts than we are allowed? - if totalParts > maxPartsCount { - return ErrInvalidArgument(fmt.Sprintf( - "Your proposed compose object requires more than %d parts", maxPartsCount)) - } - } - - // Single source object case (i.e. when only one source is - // involved, it is being copied wholly and at most 5GiB in - // size, emptyfiles are also supported). - if (totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize) || (totalSize == 0) { - return c.CopyObjectWithProgress(dst, srcs[0], progress) - } - - // Now, handle multipart-copy cases. - - // 1. Ensure that the object has not been changed while - // we are copying data. - for i, src := range srcs { - if src.Headers.Get("x-amz-copy-source-if-match") == "" { - src.SetMatchETagCond(etags[i]) - } - } - - // 2. Initiate a new multipart upload. - - // Set user-metadata on the destination object. If no - // user-metadata is specified, and there is only one source, - // (only) then metadata from source is copied. - userMeta := dst.getUserMetaHeadersMap(false) - metaMap := userMeta - if len(userMeta) == 0 && len(srcs) == 1 { - metaMap = srcUserMeta - } - metaHeaders := make(map[string]string) - for k, v := range metaMap { - metaHeaders[k] = v - } - - uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{ServerSideEncryption: dst.encryption, UserMetadata: metaHeaders}) - if err != nil { - return err - } - - // 3. Perform copy part uploads - objParts := []CompletePart{} - partIndex := 1 - for i, src := range srcs { - h := src.Headers - if src.encryption != nil { - encrypt.SSECopy(src.encryption).Marshal(h) - } - // Add destination encryption headers - if dst.encryption != nil { - dst.encryption.Marshal(h) - } - - // calculate start/end indices of parts after - // splitting. - startIdx, endIdx := calculateEvenSplits(srcSizes[i], src) - for j, start := range startIdx { - end := endIdx[j] - - // Add (or reset) source range header for - // upload part copy request. - h.Set("x-amz-copy-source-range", - fmt.Sprintf("bytes=%d-%d", start, end)) - - // make upload-part-copy request - complPart, err := c.uploadPartCopy(ctx, dst.bucket, - dst.object, uploadID, partIndex, h) - if err != nil { - return err - } - if progress != nil { - io.CopyN(ioutil.Discard, progress, end-start+1) - } - objParts = append(objParts, complPart) - partIndex++ - } - } - - // 4. Make final complete-multipart request. - _, err = c.completeMultipartUpload(ctx, dst.bucket, dst.object, uploadID, - completeMultipartUpload{Parts: objParts}) - if err != nil { - return err - } - return nil -} - -// ComposeObject - creates an object using server-side copying of -// existing objects. It takes a list of source objects (with optional -// offsets) and concatenates them into a new object using only -// server-side copying operations. -func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { - return c.ComposeObjectWithProgress(dst, srcs, nil) -} - -// partsRequired is maximum parts possible with -// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1)) -func partsRequired(size int64) int64 { - maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1) - r := size / int64(maxPartSize) - if size%int64(maxPartSize) > 0 { - r++ - } - return r -} - -// calculateEvenSplits - computes splits for a source and returns -// start and end index slices. Splits happen evenly to be sure that no -// part is less than 5MiB, as that could fail the multipart request if -// it is not the last part. -func calculateEvenSplits(size int64, src SourceInfo) (startIndex, endIndex []int64) { - if size == 0 { - return - } - - reqParts := partsRequired(size) - startIndex = make([]int64, reqParts) - endIndex = make([]int64, reqParts) - // Compute number of required parts `k`, as: - // - // k = ceiling(size / copyPartSize) - // - // Now, distribute the `size` bytes in the source into - // k parts as evenly as possible: - // - // r parts sized (q+1) bytes, and - // (k - r) parts sized q bytes, where - // - // size = q * k + r (by simple division of size by k, - // so that 0 <= r < k) - // - start := src.start - if start == -1 { - start = 0 - } - quot, rem := size/reqParts, size%reqParts - nextStart := start - for j := int64(0); j < reqParts; j++ { - curPartSize := quot - if j < rem { - curPartSize++ - } - - cStart := nextStart - cEnd := cStart + curPartSize - 1 - nextStart = cEnd + 1 - - startIndex[j], endIndex[j] = cStart, cEnd - } - return -} diff --git a/vendor/github.com/minio/minio-go/api-datatypes.go b/vendor/github.com/minio/minio-go/api-datatypes.go deleted file mode 100644 index 63fc08905..000000000 --- a/vendor/github.com/minio/minio-go/api-datatypes.go +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "net/http" - "time" -) - -// BucketInfo container for bucket metadata. -type BucketInfo struct { - // The name of the bucket. - Name string `json:"name"` - // Date the bucket was created. - CreationDate time.Time `json:"creationDate"` -} - -// ObjectInfo container for object metadata. -type ObjectInfo struct { - // An ETag is optionally set to md5sum of an object. In case of multipart objects, - // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of - // each parts concatenated into one string. - ETag string `json:"etag"` - - Key string `json:"name"` // Name of the object - LastModified time.Time `json:"lastModified"` // Date and time the object was last modified. - Size int64 `json:"size"` // Size in bytes of the object. - ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data. - - // Collection of additional metadata on the object. - // eg: x-amz-meta-*, content-encoding etc. - Metadata http.Header `json:"metadata" xml:"-"` - - // Owner name. - Owner struct { - DisplayName string `json:"name"` - ID string `json:"id"` - } `json:"owner"` - - // The class of storage used to store the object. - StorageClass string `json:"storageClass"` - - // Error - Err error `json:"-"` -} - -// ObjectMultipartInfo container for multipart object metadata. -type ObjectMultipartInfo struct { - // Date and time at which the multipart upload was initiated. - Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"` - - Initiator initiator - Owner owner - - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass string - - // Key of the object for which the multipart upload was initiated. - Key string - - // Size in bytes of the object. - Size int64 - - // Upload ID that identifies the multipart upload. - UploadID string `xml:"UploadId"` - - // Error - Err error -} diff --git a/vendor/github.com/minio/minio-go/api-error-response.go b/vendor/github.com/minio/minio-go/api-error-response.go deleted file mode 100644 index 0170b8de8..000000000 --- a/vendor/github.com/minio/minio-go/api-error-response.go +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "encoding/xml" - "fmt" - "net/http" -) - -/* **** SAMPLE ERROR RESPONSE **** - - - AccessDenied - Access Denied - bucketName - objectName - F19772218238A85A - GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD - -*/ - -// ErrorResponse - Is the typed error returned by all API operations. -// ErrorResponse struct should be comparable since it is compared inside -// golang http API (https://github.com/golang/go/issues/29768) -type ErrorResponse struct { - XMLName xml.Name `xml:"Error" json:"-"` - Code string - Message string - BucketName string - Key string - RequestID string `xml:"RequestId"` - HostID string `xml:"HostId"` - - // Region where the bucket is located. This header is returned - // only in HEAD bucket and ListObjects response. - Region string - - // Underlying HTTP status code for the returned error - StatusCode int `xml:"-" json:"-"` -} - -// ToErrorResponse - Returns parsed ErrorResponse struct from body and -// http headers. -// -// For example: -// -// import s3 "github.com/minio/minio-go" -// ... -// ... -// reader, stat, err := s3.GetObject(...) -// if err != nil { -// resp := s3.ToErrorResponse(err) -// } -// ... -func ToErrorResponse(err error) ErrorResponse { - switch err := err.(type) { - case ErrorResponse: - return err - default: - return ErrorResponse{} - } -} - -// Error - Returns S3 error string. -func (e ErrorResponse) Error() string { - if e.Message == "" { - msg, ok := s3ErrorResponseMap[e.Code] - if !ok { - msg = fmt.Sprintf("Error response code %s.", e.Code) - } - return msg - } - return e.Message -} - -// Common string for errors to report issue location in unexpected -// cases. -const ( - reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." -) - -// httpRespToErrorResponse returns a new encoded ErrorResponse -// structure as error. -func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { - if resp == nil { - msg := "Response is empty. " + reportIssue - return ErrInvalidArgument(msg) - } - - errResp := ErrorResponse{ - StatusCode: resp.StatusCode, - } - - err := xmlDecoder(resp.Body, &errResp) - // Xml decoding failed with no body, fall back to HTTP headers. - if err != nil { - switch resp.StatusCode { - case http.StatusNotFound: - if objectName == "" { - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "NoSuchBucket", - Message: "The specified bucket does not exist.", - BucketName: bucketName, - } - } else { - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "NoSuchKey", - Message: "The specified key does not exist.", - BucketName: bucketName, - Key: objectName, - } - } - case http.StatusForbidden: - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "AccessDenied", - Message: "Access Denied.", - BucketName: bucketName, - Key: objectName, - } - case http.StatusConflict: - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "Conflict", - Message: "Bucket not empty.", - BucketName: bucketName, - } - case http.StatusPreconditionFailed: - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: "PreconditionFailed", - Message: s3ErrorResponseMap["PreconditionFailed"], - BucketName: bucketName, - Key: objectName, - } - default: - errResp = ErrorResponse{ - StatusCode: resp.StatusCode, - Code: resp.Status, - Message: resp.Status, - BucketName: bucketName, - } - } - } - - // Save hostID, requestID and region information - // from headers if not available through error XML. - if errResp.RequestID == "" { - errResp.RequestID = resp.Header.Get("x-amz-request-id") - } - if errResp.HostID == "" { - errResp.HostID = resp.Header.Get("x-amz-id-2") - } - if errResp.Region == "" { - errResp.Region = resp.Header.Get("x-amz-bucket-region") - } - if errResp.Code == "InvalidRegion" && errResp.Region != "" { - errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region) - } - - return errResp -} - -// ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. -func ErrTransferAccelerationBucket(bucketName string) error { - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "InvalidArgument", - Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.", - BucketName: bucketName, - } -} - -// ErrEntityTooLarge - Input size is larger than supported maximum. -func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { - msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "EntityTooLarge", - Message: msg, - BucketName: bucketName, - Key: objectName, - } -} - -// ErrEntityTooSmall - Input size is smaller than supported minimum. -func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error { - msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize) - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "EntityTooSmall", - Message: msg, - BucketName: bucketName, - Key: objectName, - } -} - -// ErrUnexpectedEOF - Unexpected end of file reached. -func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { - msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize) - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "UnexpectedEOF", - Message: msg, - BucketName: bucketName, - Key: objectName, - } -} - -// ErrInvalidBucketName - Invalid bucket name response. -func ErrInvalidBucketName(message string) error { - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "InvalidBucketName", - Message: message, - RequestID: "minio", - } -} - -// ErrInvalidObjectName - Invalid object name response. -func ErrInvalidObjectName(message string) error { - return ErrorResponse{ - StatusCode: http.StatusNotFound, - Code: "NoSuchKey", - Message: message, - RequestID: "minio", - } -} - -// ErrInvalidObjectPrefix - Invalid object prefix response is -// similar to object name response. -var ErrInvalidObjectPrefix = ErrInvalidObjectName - -// ErrInvalidArgument - Invalid argument response. -func ErrInvalidArgument(message string) error { - return ErrorResponse{ - StatusCode: http.StatusBadRequest, - Code: "InvalidArgument", - Message: message, - RequestID: "minio", - } -} - -// ErrNoSuchBucketPolicy - No Such Bucket Policy response -// The specified bucket does not have a bucket policy. -func ErrNoSuchBucketPolicy(message string) error { - return ErrorResponse{ - StatusCode: http.StatusNotFound, - Code: "NoSuchBucketPolicy", - Message: message, - RequestID: "minio", - } -} - -// ErrAPINotSupported - API not supported response -// The specified API call is not supported -func ErrAPINotSupported(message string) error { - return ErrorResponse{ - StatusCode: http.StatusNotImplemented, - Code: "APINotSupported", - Message: message, - RequestID: "minio", - } -} diff --git a/vendor/github.com/minio/minio-go/api-get-lifecycle.go b/vendor/github.com/minio/minio-go/api-get-lifecycle.go deleted file mode 100644 index 8097bfc02..000000000 --- a/vendor/github.com/minio/minio-go/api-get-lifecycle.go +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io/ioutil" - "net/http" - "net/url" - - "github.com/minio/minio-go/pkg/s3utils" -) - -// GetBucketLifecycle - get bucket lifecycle. -func (c Client) GetBucketLifecycle(bucketName string) (string, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - bucketLifecycle, err := c.getBucketLifecycle(bucketName) - if err != nil { - errResponse := ToErrorResponse(err) - if errResponse.Code == "NoSuchLifecycleConfiguration" { - return "", nil - } - return "", err - } - return bucketLifecycle, nil -} - -// Request server for current bucket lifecycle. -func (c Client) getBucketLifecycle(bucketName string) (string, error) { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("lifecycle", "") - - // Execute GET on bucket to get lifecycle. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - - defer closeResponse(resp) - if err != nil { - return "", err - } - - if resp != nil { - if resp.StatusCode != http.StatusOK { - return "", httpRespToErrorResponse(resp, bucketName, "") - } - } - - bucketLifecycleBuf, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - - lifecycle := string(bucketLifecycleBuf) - return lifecycle, err -} diff --git a/vendor/github.com/minio/minio-go/api-get-object-acl.go b/vendor/github.com/minio/minio-go/api-get-object-acl.go deleted file mode 100644 index af5544da3..000000000 --- a/vendor/github.com/minio/minio-go/api-get-object-acl.go +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2018 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "net/http" - "net/url" -) - -type accessControlPolicy struct { - Owner struct { - ID string `xml:"ID"` - DisplayName string `xml:"DisplayName"` - } `xml:"Owner"` - AccessControlList struct { - Grant []struct { - Grantee struct { - ID string `xml:"ID"` - DisplayName string `xml:"DisplayName"` - URI string `xml:"URI"` - } `xml:"Grantee"` - Permission string `xml:"Permission"` - } `xml:"Grant"` - } `xml:"AccessControlList"` -} - -//GetObjectACL get object ACLs -func (c Client) GetObjectACL(bucketName, objectName string) (*ObjectInfo, error) { - - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: url.Values{ - "acl": []string{""}, - }, - }) - if err != nil { - return nil, err - } - defer closeResponse(resp) - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, bucketName, objectName) - } - - res := &accessControlPolicy{} - - if err := xmlDecoder(resp.Body, res); err != nil { - return nil, err - } - - objInfo, err := c.statObject(context.Background(), bucketName, objectName, StatObjectOptions{}) - if err != nil { - return nil, err - } - - cannedACL := getCannedACL(res) - if cannedACL != "" { - objInfo.Metadata.Add("X-Amz-Acl", cannedACL) - return &objInfo, nil - } - - grantACL := getAmzGrantACL(res) - for k, v := range grantACL { - objInfo.Metadata[k] = v - } - - return &objInfo, nil -} - -func getCannedACL(aCPolicy *accessControlPolicy) string { - grants := aCPolicy.AccessControlList.Grant - - switch { - case len(grants) == 1: - if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" { - return "private" - } - case len(grants) == 2: - for _, g := range grants { - if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { - return "authenticated-read" - } - if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { - return "public-read" - } - if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID { - return "bucket-owner-read" - } - } - case len(grants) == 3: - for _, g := range grants { - if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { - return "public-read-write" - } - } - } - return "" -} - -func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string { - grants := aCPolicy.AccessControlList.Grant - res := map[string][]string{} - - for _, g := range grants { - switch { - case g.Permission == "READ": - res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID) - case g.Permission == "WRITE": - res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID) - case g.Permission == "READ_ACP": - res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID) - case g.Permission == "WRITE_ACP": - res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID) - case g.Permission == "FULL_CONTROL": - res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID) - } - } - return res -} diff --git a/vendor/github.com/minio/minio-go/api-get-object-context.go b/vendor/github.com/minio/minio-go/api-get-object-context.go deleted file mode 100644 index f8dfac7d6..000000000 --- a/vendor/github.com/minio/minio-go/api-get-object-context.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import "context" - -// GetObjectWithContext - returns an seekable, readable object. -// The options can be used to specify the GET request further. -func (c Client) GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { - return c.getObjectWithContext(ctx, bucketName, objectName, opts) -} diff --git a/vendor/github.com/minio/minio-go/api-get-object-file.go b/vendor/github.com/minio/minio-go/api-get-object-file.go deleted file mode 100644 index a852220a2..000000000 --- a/vendor/github.com/minio/minio-go/api-get-object-file.go +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" - "os" - "path/filepath" - - "github.com/minio/minio-go/pkg/s3utils" -) - -// FGetObjectWithContext - download contents of an object to a local file. -// The options can be used to specify the GET request further. -func (c Client) FGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { - return c.fGetObjectWithContext(ctx, bucketName, objectName, filePath, opts) -} - -// FGetObject - download contents of an object to a local file. -func (c Client) FGetObject(bucketName, objectName, filePath string, opts GetObjectOptions) error { - return c.fGetObjectWithContext(context.Background(), bucketName, objectName, filePath, opts) -} - -// fGetObjectWithContext - fgetObject wrapper function with context -func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - - // Verify if destination already exists. - st, err := os.Stat(filePath) - if err == nil { - // If the destination exists and is a directory. - if st.IsDir() { - return ErrInvalidArgument("fileName is a directory.") - } - } - - // Proceed if file does not exist. return for all other errors. - if err != nil { - if !os.IsNotExist(err) { - return err - } - } - - // Extract top level directory. - objectDir, _ := filepath.Split(filePath) - if objectDir != "" { - // Create any missing top level directories. - if err := os.MkdirAll(objectDir, 0700); err != nil { - return err - } - } - - // Gather md5sum. - objectStat, err := c.StatObject(bucketName, objectName, StatObjectOptions{opts}) - if err != nil { - return err - } - - // Write to a temporary file "fileName.part.minio" before saving. - filePartPath := filePath + objectStat.ETag + ".part.minio" - - // If exists, open in append mode. If not create it as a part file. - filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) - if err != nil { - return err - } - - // Issue Stat to get the current offset. - st, err = filePart.Stat() - if err != nil { - return err - } - - // Initialize get object request headers to set the - // appropriate range offsets to read from. - if st.Size() > 0 { - opts.SetRange(st.Size(), 0) - } - - // Seek to current position for incoming reader. - objectReader, objectStat, err := c.getObject(ctx, bucketName, objectName, opts) - if err != nil { - return err - } - - // Write to the part file. - if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil { - return err - } - - // Close the file before rename, this is specifically needed for Windows users. - if err = filePart.Close(); err != nil { - return err - } - - // Safely completed. Now commit by renaming to actual filename. - if err = os.Rename(filePartPath, filePath); err != nil { - return err - } - - // Return. - return nil -} diff --git a/vendor/github.com/minio/minio-go/api-get-object.go b/vendor/github.com/minio/minio-go/api-get-object.go deleted file mode 100644 index 0bf556ec6..000000000 --- a/vendor/github.com/minio/minio-go/api-get-object.go +++ /dev/null @@ -1,659 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "strings" - "sync" - "time" - - "github.com/minio/minio-go/pkg/s3utils" -) - -// GetObject - returns an seekable, readable object. -func (c Client) GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error) { - return c.getObjectWithContext(context.Background(), bucketName, objectName, opts) -} - -// GetObject wrapper function that accepts a request context -func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - - var httpReader io.ReadCloser - var objectInfo ObjectInfo - var err error - - // Create request channel. - reqCh := make(chan getRequest) - // Create response channel. - resCh := make(chan getResponse) - // Create done channel. - doneCh := make(chan struct{}) - - // This routine feeds partial object data as and when the caller reads. - go func() { - defer close(reqCh) - defer close(resCh) - - // Used to verify if etag of object has changed since last read. - var etag string - - // Loop through the incoming control messages and read data. - for { - select { - // When the done channel is closed exit our routine. - case <-doneCh: - // Close the http response body before returning. - // This ends the connection with the server. - if httpReader != nil { - httpReader.Close() - } - return - - // Gather incoming request. - case req := <-reqCh: - // If this is the first request we may not need to do a getObject request yet. - if req.isFirstReq { - // First request is a Read/ReadAt. - if req.isReadOp { - // Differentiate between wanting the whole object and just a range. - if req.isReadAt { - // If this is a ReadAt request only get the specified range. - // Range is set with respect to the offset and length of the buffer requested. - // Do not set objectInfo from the first readAt request because it will not get - // the whole object. - opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) - } else if req.Offset > 0 { - opts.SetRange(req.Offset, 0) - } - httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts) - if err != nil { - resCh <- getResponse{Error: err} - return - } - etag = objectInfo.ETag - // Read at least firstReq.Buffer bytes, if not we have - // reached our EOF. - size, err := io.ReadFull(httpReader, req.Buffer) - if size > 0 && err == io.ErrUnexpectedEOF { - // If an EOF happens after reading some but not - // all the bytes ReadFull returns ErrUnexpectedEOF - err = io.EOF - } - // Send back the first response. - resCh <- getResponse{ - objectInfo: objectInfo, - Size: int(size), - Error: err, - didRead: true, - } - } else { - // First request is a Stat or Seek call. - // Only need to run a StatObject until an actual Read or ReadAt request comes through. - - // Remove range header if already set, for stat Operations to get original file size. - delete(opts.headers, "Range") - objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts}) - if err != nil { - resCh <- getResponse{ - Error: err, - } - // Exit the go-routine. - return - } - etag = objectInfo.ETag - // Send back the first response. - resCh <- getResponse{ - objectInfo: objectInfo, - } - } - } else if req.settingObjectInfo { // Request is just to get objectInfo. - // Remove range header if already set, for stat Operations to get original file size. - delete(opts.headers, "Range") - if etag != "" { - opts.SetMatchETag(etag) - } - objectInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts}) - if err != nil { - resCh <- getResponse{ - Error: err, - } - // Exit the goroutine. - return - } - // Send back the objectInfo. - resCh <- getResponse{ - objectInfo: objectInfo, - } - } else { - // Offset changes fetch the new object at an Offset. - // Because the httpReader may not be set by the first - // request if it was a stat or seek it must be checked - // if the object has been read or not to only initialize - // new ones when they haven't been already. - // All readAt requests are new requests. - if req.DidOffsetChange || !req.beenRead { - if etag != "" { - opts.SetMatchETag(etag) - } - if httpReader != nil { - // Close previously opened http reader. - httpReader.Close() - } - // If this request is a readAt only get the specified range. - if req.isReadAt { - // Range is set with respect to the offset and length of the buffer requested. - opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) - } else if req.Offset > 0 { // Range is set with respect to the offset. - opts.SetRange(req.Offset, 0) - } - httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts) - if err != nil { - resCh <- getResponse{ - Error: err, - } - return - } - } - - // Read at least req.Buffer bytes, if not we have - // reached our EOF. - size, err := io.ReadFull(httpReader, req.Buffer) - if err == io.ErrUnexpectedEOF { - // If an EOF happens after reading some but not - // all the bytes ReadFull returns ErrUnexpectedEOF - err = io.EOF - } - // Reply back how much was read. - resCh <- getResponse{ - Size: int(size), - Error: err, - didRead: true, - objectInfo: objectInfo, - } - } - } - } - }() - - // Create a newObject through the information sent back by reqCh. - return newObject(reqCh, resCh, doneCh), nil -} - -// get request message container to communicate with internal -// go-routine. -type getRequest struct { - Buffer []byte - Offset int64 // readAt offset. - DidOffsetChange bool // Tracks the offset changes for Seek requests. - beenRead bool // Determines if this is the first time an object is being read. - isReadAt bool // Determines if this request is a request to a specific range - isReadOp bool // Determines if this request is a Read or Read/At request. - isFirstReq bool // Determines if this request is the first time an object is being accessed. - settingObjectInfo bool // Determines if this request is to set the objectInfo of an object. -} - -// get response message container to reply back for the request. -type getResponse struct { - Size int - Error error - didRead bool // Lets subsequent calls know whether or not httpReader has been initiated. - objectInfo ObjectInfo // Used for the first request. -} - -// Object represents an open object. It implements -// Reader, ReaderAt, Seeker, Closer for a HTTP stream. -type Object struct { - // Mutex. - mutex *sync.Mutex - - // User allocated and defined. - reqCh chan<- getRequest - resCh <-chan getResponse - doneCh chan<- struct{} - currOffset int64 - objectInfo ObjectInfo - - // Ask lower level to initiate data fetching based on currOffset - seekData bool - - // Keeps track of closed call. - isClosed bool - - // Keeps track of if this is the first call. - isStarted bool - - // Previous error saved for future calls. - prevErr error - - // Keeps track of if this object has been read yet. - beenRead bool - - // Keeps track of if objectInfo has been set yet. - objectInfoSet bool -} - -// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object. -// Returns back the size of the buffer read, if anything was read, as well -// as any error encountered. For all first requests sent on the object -// it is also responsible for sending back the objectInfo. -func (o *Object) doGetRequest(request getRequest) (getResponse, error) { - o.reqCh <- request - response := <-o.resCh - - // Return any error to the top level. - if response.Error != nil { - return response, response.Error - } - - // This was the first request. - if !o.isStarted { - // The object has been operated on. - o.isStarted = true - } - // Set the objectInfo if the request was not readAt - // and it hasn't been set before. - if !o.objectInfoSet && !request.isReadAt { - o.objectInfo = response.objectInfo - o.objectInfoSet = true - } - // Set beenRead only if it has not been set before. - if !o.beenRead { - o.beenRead = response.didRead - } - // Data are ready on the wire, no need to reinitiate connection in lower level - o.seekData = false - - return response, nil -} - -// setOffset - handles the setting of offsets for -// Read/ReadAt/Seek requests. -func (o *Object) setOffset(bytesRead int64) error { - // Update the currentOffset. - o.currOffset += bytesRead - - if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size { - return io.EOF - } - return nil -} - -// Read reads up to len(b) bytes into b. It returns the number of -// bytes read (0 <= n <= len(b)) and any error encountered. Returns -// io.EOF upon end of file. -func (o *Object) Read(b []byte) (n int, err error) { - if o == nil { - return 0, ErrInvalidArgument("Object is nil") - } - - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - // prevErr is previous error saved from previous operation. - if o.prevErr != nil || o.isClosed { - return 0, o.prevErr - } - // Create a new request. - readReq := getRequest{ - isReadOp: true, - beenRead: o.beenRead, - Buffer: b, - } - - // Alert that this is the first request. - if !o.isStarted { - readReq.isFirstReq = true - } - - // Ask to establish a new data fetch routine based on seekData flag - readReq.DidOffsetChange = o.seekData - readReq.Offset = o.currOffset - - // Send and receive from the first request. - response, err := o.doGetRequest(readReq) - if err != nil && err != io.EOF { - // Save the error for future calls. - o.prevErr = err - return response.Size, err - } - - // Bytes read. - bytesRead := int64(response.Size) - - // Set the new offset. - oerr := o.setOffset(bytesRead) - if oerr != nil { - // Save the error for future calls. - o.prevErr = oerr - return response.Size, oerr - } - - // Return the response. - return response.Size, err -} - -// Stat returns the ObjectInfo structure describing Object. -func (o *Object) Stat() (ObjectInfo, error) { - if o == nil { - return ObjectInfo{}, ErrInvalidArgument("Object is nil") - } - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { - return ObjectInfo{}, o.prevErr - } - - // This is the first request. - if !o.isStarted || !o.objectInfoSet { - // Send the request and get the response. - _, err := o.doGetRequest(getRequest{ - isFirstReq: !o.isStarted, - settingObjectInfo: !o.objectInfoSet, - }) - if err != nil { - o.prevErr = err - return ObjectInfo{}, err - } - } - - return o.objectInfo, nil -} - -// ReadAt reads len(b) bytes from the File starting at byte offset -// off. It returns the number of bytes read and the error, if any. -// ReadAt always returns a non-nil error when n < len(b). At end of -// file, that error is io.EOF. -func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { - if o == nil { - return 0, ErrInvalidArgument("Object is nil") - } - - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - // prevErr is error which was saved in previous operation. - if o.prevErr != nil || o.isClosed { - return 0, o.prevErr - } - - // Can only compare offsets to size when size has been set. - if o.objectInfoSet { - // If offset is negative than we return io.EOF. - // If offset is greater than or equal to object size we return io.EOF. - if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 { - return 0, io.EOF - } - } - - // Create the new readAt request. - readAtReq := getRequest{ - isReadOp: true, - isReadAt: true, - DidOffsetChange: true, // Offset always changes. - beenRead: o.beenRead, // Set if this is the first request to try and read. - Offset: offset, // Set the offset. - Buffer: b, - } - - // Alert that this is the first request. - if !o.isStarted { - readAtReq.isFirstReq = true - } - - // Send and receive from the first request. - response, err := o.doGetRequest(readAtReq) - if err != nil && err != io.EOF { - // Save the error. - o.prevErr = err - return response.Size, err - } - // Bytes read. - bytesRead := int64(response.Size) - // There is no valid objectInfo yet - // to compare against for EOF. - if !o.objectInfoSet { - // Update the currentOffset. - o.currOffset += bytesRead - } else { - // If this was not the first request update - // the offsets and compare against objectInfo - // for EOF. - oerr := o.setOffset(bytesRead) - if oerr != nil { - o.prevErr = oerr - return response.Size, oerr - } - } - return response.Size, err -} - -// Seek sets the offset for the next Read or Write to offset, -// interpreted according to whence: 0 means relative to the -// origin of the file, 1 means relative to the current offset, -// and 2 means relative to the end. -// Seek returns the new offset and an error, if any. -// -// Seeking to a negative offset is an error. Seeking to any positive -// offset is legal, subsequent io operations succeed until the -// underlying object is not closed. -func (o *Object) Seek(offset int64, whence int) (n int64, err error) { - if o == nil { - return 0, ErrInvalidArgument("Object is nil") - } - - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - if o.prevErr != nil { - // At EOF seeking is legal allow only io.EOF, for any other errors we return. - if o.prevErr != io.EOF { - return 0, o.prevErr - } - } - - // Negative offset is valid for whence of '2'. - if offset < 0 && whence != 2 { - return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence)) - } - - // This is the first request. So before anything else - // get the ObjectInfo. - if !o.isStarted || !o.objectInfoSet { - // Create the new Seek request. - seekReq := getRequest{ - isReadOp: false, - Offset: offset, - isFirstReq: true, - } - // Send and receive from the seek request. - _, err := o.doGetRequest(seekReq) - if err != nil { - // Save the error. - o.prevErr = err - return 0, err - } - } - - // Switch through whence. - switch whence { - default: - return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) - case 0: - if o.objectInfo.Size > -1 && offset > o.objectInfo.Size { - return 0, io.EOF - } - o.currOffset = offset - case 1: - if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size { - return 0, io.EOF - } - o.currOffset += offset - case 2: - // If we don't know the object size return an error for io.SeekEnd - if o.objectInfo.Size < 0 { - return 0, ErrInvalidArgument("Whence END is not supported when the object size is unknown") - } - // Seeking to positive offset is valid for whence '2', but - // since we are backing a Reader we have reached 'EOF' if - // offset is positive. - if offset > 0 { - return 0, io.EOF - } - // Seeking to negative position not allowed for whence. - if o.objectInfo.Size+offset < 0 { - return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) - } - o.currOffset = o.objectInfo.Size + offset - } - // Reset the saved error since we successfully seeked, let the Read - // and ReadAt decide. - if o.prevErr == io.EOF { - o.prevErr = nil - } - - // Ask lower level to fetch again from source - o.seekData = true - - // Return the effective offset. - return o.currOffset, nil -} - -// Close - The behavior of Close after the first call returns error -// for subsequent Close() calls. -func (o *Object) Close() (err error) { - if o == nil { - return ErrInvalidArgument("Object is nil") - } - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - // if already closed return an error. - if o.isClosed { - return o.prevErr - } - - // Close successfully. - close(o.doneCh) - - // Save for future operations. - errMsg := "Object is already closed. Bad file descriptor." - o.prevErr = errors.New(errMsg) - // Save here that we closed done channel successfully. - o.isClosed = true - return nil -} - -// newObject instantiates a new *minio.Object* -// ObjectInfo will be set by setObjectInfo -func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- struct{}) *Object { - return &Object{ - mutex: &sync.Mutex{}, - reqCh: reqCh, - resCh: resCh, - doneCh: doneCh, - } -} - -// getObject - retrieve object from Object Storage. -// -// Additionally this function also takes range arguments to download the specified -// range bytes of an object. Setting offset and length = 0 will download the full object. -// -// For more information about the HTTP Range header. -// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. -func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) { - // Validate input arguments. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, ObjectInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, ObjectInfo{}, err - } - - // Execute GET on objectName. - resp, err := c.executeMethod(ctx, "GET", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - customHeader: opts.Header(), - contentSHA256Hex: emptySHA256Hex, - }) - if err != nil { - return nil, ObjectInfo{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { - return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - // Trim off the odd double quotes from ETag in the beginning and end. - md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") - md5sum = strings.TrimSuffix(md5sum, "\"") - - // Parse the date. - date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) - if err != nil { - msg := "Last-Modified time format not recognized. " + reportIssue - return nil, ObjectInfo{}, ErrorResponse{ - Code: "InternalError", - Message: msg, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - Region: resp.Header.Get("x-amz-bucket-region"), - } - } - - // Get content-type. - contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) - if contentType == "" { - contentType = "application/octet-stream" - } - - objectStat := ObjectInfo{ - ETag: md5sum, - Key: objectName, - Size: resp.ContentLength, - LastModified: date, - ContentType: contentType, - // Extract only the relevant header keys describing the object. - // following function filters out a list of standard set of keys - // which are not part of object metadata. - Metadata: extractObjMetadata(resp.Header), - } - - // do not close body here, caller will close - return resp.Body, objectStat, nil -} diff --git a/vendor/github.com/minio/minio-go/api-get-options.go b/vendor/github.com/minio/minio-go/api-get-options.go deleted file mode 100644 index dbf062d61..000000000 --- a/vendor/github.com/minio/minio-go/api-get-options.go +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "fmt" - "net/http" - "time" - - "github.com/minio/minio-go/pkg/encrypt" -) - -// GetObjectOptions are used to specify additional headers or options -// during GET requests. -type GetObjectOptions struct { - headers map[string]string - ServerSideEncryption encrypt.ServerSide -} - -// StatObjectOptions are used to specify additional headers or options -// during GET info/stat requests. -type StatObjectOptions struct { - GetObjectOptions -} - -// Header returns the http.Header representation of the GET options. -func (o GetObjectOptions) Header() http.Header { - headers := make(http.Header, len(o.headers)) - for k, v := range o.headers { - headers.Set(k, v) - } - if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { - o.ServerSideEncryption.Marshal(headers) - } - return headers -} - -// Set adds a key value pair to the options. The -// key-value pair will be part of the HTTP GET request -// headers. -func (o *GetObjectOptions) Set(key, value string) { - if o.headers == nil { - o.headers = make(map[string]string) - } - o.headers[http.CanonicalHeaderKey(key)] = value -} - -// SetMatchETag - set match etag. -func (o *GetObjectOptions) SetMatchETag(etag string) error { - if etag == "" { - return ErrInvalidArgument("ETag cannot be empty.") - } - o.Set("If-Match", "\""+etag+"\"") - return nil -} - -// SetMatchETagExcept - set match etag except. -func (o *GetObjectOptions) SetMatchETagExcept(etag string) error { - if etag == "" { - return ErrInvalidArgument("ETag cannot be empty.") - } - o.Set("If-None-Match", "\""+etag+"\"") - return nil -} - -// SetUnmodified - set unmodified time since. -func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error { - if modTime.IsZero() { - return ErrInvalidArgument("Modified since cannot be empty.") - } - o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat)) - return nil -} - -// SetModified - set modified time since. -func (o *GetObjectOptions) SetModified(modTime time.Time) error { - if modTime.IsZero() { - return ErrInvalidArgument("Modified since cannot be empty.") - } - o.Set("If-Modified-Since", modTime.Format(http.TimeFormat)) - return nil -} - -// SetRange - set the start and end offset of the object to be read. -// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference. -func (o *GetObjectOptions) SetRange(start, end int64) error { - switch { - case start == 0 && end < 0: - // Read last '-end' bytes. `bytes=-N`. - o.Set("Range", fmt.Sprintf("bytes=%d", end)) - case 0 < start && end == 0: - // Read everything starting from offset - // 'start'. `bytes=N-`. - o.Set("Range", fmt.Sprintf("bytes=%d-", start)) - case 0 <= start && start <= end: - // Read everything starting at 'start' till the - // 'end'. `bytes=N-M` - o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end)) - default: - // All other cases such as - // bytes=-3- - // bytes=5-3 - // bytes=-2-4 - // bytes=-3-0 - // bytes=-3--2 - // are invalid. - return ErrInvalidArgument( - fmt.Sprintf( - "Invalid range specified: start=%d end=%d", - start, end)) - } - return nil -} diff --git a/vendor/github.com/minio/minio-go/api-get-policy.go b/vendor/github.com/minio/minio-go/api-get-policy.go deleted file mode 100644 index 12d4c590e..000000000 --- a/vendor/github.com/minio/minio-go/api-get-policy.go +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io/ioutil" - "net/http" - "net/url" - - "github.com/minio/minio-go/pkg/s3utils" -) - -// GetBucketPolicy - get bucket policy at a given path. -func (c Client) GetBucketPolicy(bucketName string) (string, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - bucketPolicy, err := c.getBucketPolicy(bucketName) - if err != nil { - errResponse := ToErrorResponse(err) - if errResponse.Code == "NoSuchBucketPolicy" { - return "", nil - } - return "", err - } - return bucketPolicy, nil -} - -// Request server for current bucket policy. -func (c Client) getBucketPolicy(bucketName string) (string, error) { - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("policy", "") - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - - defer closeResponse(resp) - if err != nil { - return "", err - } - - if resp != nil { - if resp.StatusCode != http.StatusOK { - return "", httpRespToErrorResponse(resp, bucketName, "") - } - } - - bucketPolicyBuf, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - - policy := string(bucketPolicyBuf) - return policy, err -} diff --git a/vendor/github.com/minio/minio-go/api-list.go b/vendor/github.com/minio/minio-go/api-list.go deleted file mode 100644 index 2f1350a34..000000000 --- a/vendor/github.com/minio/minio-go/api-list.go +++ /dev/null @@ -1,715 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/minio/minio-go/pkg/s3utils" -) - -// ListBuckets list all buckets owned by this authenticated user. -// -// This call requires explicit authentication, no anonymous requests are -// allowed for listing buckets. -// -// api := client.New(....) -// for message := range api.ListBuckets() { -// fmt.Println(message) -// } -// -func (c Client) ListBuckets() ([]BucketInfo, error) { - // Execute GET on service. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{contentSHA256Hex: emptySHA256Hex}) - defer closeResponse(resp) - if err != nil { - return nil, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, "", "") - } - } - listAllMyBucketsResult := listAllMyBucketsResult{} - err = xmlDecoder(resp.Body, &listAllMyBucketsResult) - if err != nil { - return nil, err - } - return listAllMyBucketsResult.Buckets.Bucket, nil -} - -/// Bucket Read Operations. - -// ListObjectsV2 lists all objects matching the objectPrefix from -// the specified bucket. If recursion is enabled it would list -// all subdirectories and all its contents. -// -// Your input parameters are just bucketName, objectPrefix, recursive -// and a done channel for pro-actively closing the internal go -// routine. If you enable recursive as 'true' this function will -// return back all the objects in a given bucket name and object -// prefix. -// -// api := client.New(....) -// // Create a done channel. -// doneCh := make(chan struct{}) -// defer close(doneCh) -// // Recursively list all objects in 'mytestbucket' -// recursive := true -// for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) { -// fmt.Println(message) -// } -// -func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo { - // Allocate new list objects channel. - objectStatCh := make(chan ObjectInfo, 1) - // Default listing is delimited at "/" - delimiter := "/" - if recursive { - // If recursive we do not delimit. - delimiter = "" - } - - // Return object owner information by default - fetchOwner := true - - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - defer close(objectStatCh) - objectStatCh <- ObjectInfo{ - Err: err, - } - return objectStatCh - } - - // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { - defer close(objectStatCh) - objectStatCh <- ObjectInfo{ - Err: err, - } - return objectStatCh - } - - // Initiate list objects goroutine here. - go func(objectStatCh chan<- ObjectInfo) { - defer close(objectStatCh) - // Save continuationToken for next request. - var continuationToken string - for { - // Get list of objects a maximum of 1000 per request. - result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, 1000, "") - if err != nil { - objectStatCh <- ObjectInfo{ - Err: err, - } - return - } - - // If contents are available loop through and send over channel. - for _, object := range result.Contents { - select { - // Send object content. - case objectStatCh <- object: - // If receives done from the caller, return here. - case <-doneCh: - return - } - } - - // Send all common prefixes if any. - // NOTE: prefixes are only present if the request is delimited. - for _, obj := range result.CommonPrefixes { - select { - // Send object prefixes. - case objectStatCh <- ObjectInfo{ - Key: obj.Prefix, - Size: 0, - }: - // If receives done from the caller, return here. - case <-doneCh: - return - } - } - - // If continuation token present, save it for next request. - if result.NextContinuationToken != "" { - continuationToken = result.NextContinuationToken - } - - // Listing ends result is not truncated, return right here. - if !result.IsTruncated { - return - } - } - }(objectStatCh) - return objectStatCh -} - -// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket. -// -// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. -// request parameters :- -// --------- -// ?continuation-token - Used to continue iterating over a set of objects -// ?delimiter - A delimiter is a character you use to group keys. -// ?prefix - Limits the response to keys that begin with the specified prefix. -// ?max-keys - Sets the maximum number of keys returned in the response body. -// ?start-after - Specifies the key to start after when listing objects in a bucket. -func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int, startAfter string) (ListBucketV2Result, error) { - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ListBucketV2Result{}, err - } - // Validate object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { - return ListBucketV2Result{}, err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - - // Always set list-type in ListObjects V2 - urlValues.Set("list-type", "2") - - // Set object prefix, prefix value to be set to empty is okay. - urlValues.Set("prefix", objectPrefix) - - // Set delimiter, delimiter value to be set to empty is okay. - urlValues.Set("delimiter", delimiter) - - // Set continuation token - if continuationToken != "" { - urlValues.Set("continuation-token", continuationToken) - } - - // Fetch owner when listing - if fetchOwner { - urlValues.Set("fetch-owner", "true") - } - - // maxkeys should default to 1000 or less. - if maxkeys == 0 || maxkeys > 1000 { - maxkeys = 1000 - } - // Set max keys. - urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) - - // Set start-after - if startAfter != "" { - urlValues.Set("start-after", startAfter) - } - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return ListBucketV2Result{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "") - } - } - - // Decode listBuckets XML. - listBucketResult := ListBucketV2Result{} - if err = xmlDecoder(resp.Body, &listBucketResult); err != nil { - return listBucketResult, err - } - - // This is an additional verification check to make - // sure proper responses are received. - if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" { - return listBucketResult, errors.New("Truncated response should have continuation token set") - } - - // Success. - return listBucketResult, nil -} - -// ListObjects - (List Objects) - List some objects or all recursively. -// -// ListObjects lists all objects matching the objectPrefix from -// the specified bucket. If recursion is enabled it would list -// all subdirectories and all its contents. -// -// Your input parameters are just bucketName, objectPrefix, recursive -// and a done channel for pro-actively closing the internal go -// routine. If you enable recursive as 'true' this function will -// return back all the objects in a given bucket name and object -// prefix. -// -// api := client.New(....) -// // Create a done channel. -// doneCh := make(chan struct{}) -// defer close(doneCh) -// // Recurively list all objects in 'mytestbucket' -// recursive := true -// for message := range api.ListObjects("mytestbucket", "starthere", recursive, doneCh) { -// fmt.Println(message) -// } -// -func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo { - // Allocate new list objects channel. - objectStatCh := make(chan ObjectInfo, 1) - // Default listing is delimited at "/" - delimiter := "/" - if recursive { - // If recursive we do not delimit. - delimiter = "" - } - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - defer close(objectStatCh) - objectStatCh <- ObjectInfo{ - Err: err, - } - return objectStatCh - } - // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { - defer close(objectStatCh) - objectStatCh <- ObjectInfo{ - Err: err, - } - return objectStatCh - } - - // Initiate list objects goroutine here. - go func(objectStatCh chan<- ObjectInfo) { - defer close(objectStatCh) - // Save marker for next request. - var marker string - for { - // Get list of objects a maximum of 1000 per request. - result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000) - if err != nil { - objectStatCh <- ObjectInfo{ - Err: err, - } - return - } - - // If contents are available loop through and send over channel. - for _, object := range result.Contents { - // Save the marker. - marker = object.Key - select { - // Send object content. - case objectStatCh <- object: - // If receives done from the caller, return here. - case <-doneCh: - return - } - } - - // Send all common prefixes if any. - // NOTE: prefixes are only present if the request is delimited. - for _, obj := range result.CommonPrefixes { - object := ObjectInfo{} - object.Key = obj.Prefix - object.Size = 0 - select { - // Send object prefixes. - case objectStatCh <- object: - // If receives done from the caller, return here. - case <-doneCh: - return - } - } - - // If next marker present, save it for next request. - if result.NextMarker != "" { - marker = result.NextMarker - } - - // Listing ends result is not truncated, return right here. - if !result.IsTruncated { - return - } - } - }(objectStatCh) - return objectStatCh -} - -// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. -// -// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. -// request parameters :- -// --------- -// ?marker - Specifies the key to start with when listing objects in a bucket. -// ?delimiter - A delimiter is a character you use to group keys. -// ?prefix - Limits the response to keys that begin with the specified prefix. -// ?max-keys - Sets the maximum number of keys returned in the response body. -func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) { - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ListBucketResult{}, err - } - // Validate object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { - return ListBucketResult{}, err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - - // Set object prefix, prefix value to be set to empty is okay. - urlValues.Set("prefix", objectPrefix) - - // Set delimiter, delimiter value to be set to empty is okay. - urlValues.Set("delimiter", delimiter) - - // Set object marker. - if objectMarker != "" { - urlValues.Set("marker", objectMarker) - } - - // maxkeys should default to 1000 or less. - if maxkeys == 0 || maxkeys > 1000 { - maxkeys = 1000 - } - // Set max keys. - urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return ListBucketResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "") - } - } - // Decode listBuckets XML. - listBucketResult := ListBucketResult{} - err = xmlDecoder(resp.Body, &listBucketResult) - if err != nil { - return listBucketResult, err - } - return listBucketResult, nil -} - -// ListIncompleteUploads - List incompletely uploaded multipart objects. -// -// ListIncompleteUploads lists all incompleted objects matching the -// objectPrefix from the specified bucket. If recursion is enabled -// it would list all subdirectories and all its contents. -// -// Your input parameters are just bucketName, objectPrefix, recursive -// and a done channel to pro-actively close the internal go routine. -// If you enable recursive as 'true' this function will return back all -// the multipart objects in a given bucket name. -// -// api := client.New(....) -// // Create a done channel. -// doneCh := make(chan struct{}) -// defer close(doneCh) -// // Recurively list all objects in 'mytestbucket' -// recursive := true -// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) { -// fmt.Println(message) -// } -// -func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { - // Turn on size aggregation of individual parts. - isAggregateSize := true - return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh) -} - -// listIncompleteUploads lists all incomplete uploads. -func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { - // Allocate channel for multipart uploads. - objectMultipartStatCh := make(chan ObjectMultipartInfo, 1) - // Delimiter is set to "/" by default. - delimiter := "/" - if recursive { - // If recursive do not delimit. - delimiter = "" - } - // Validate bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - defer close(objectMultipartStatCh) - objectMultipartStatCh <- ObjectMultipartInfo{ - Err: err, - } - return objectMultipartStatCh - } - // Validate incoming object prefix. - if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { - defer close(objectMultipartStatCh) - objectMultipartStatCh <- ObjectMultipartInfo{ - Err: err, - } - return objectMultipartStatCh - } - go func(objectMultipartStatCh chan<- ObjectMultipartInfo) { - defer close(objectMultipartStatCh) - // object and upload ID marker for future requests. - var objectMarker string - var uploadIDMarker string - for { - // list all multipart uploads. - result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000) - if err != nil { - objectMultipartStatCh <- ObjectMultipartInfo{ - Err: err, - } - return - } - // Save objectMarker and uploadIDMarker for next request. - objectMarker = result.NextKeyMarker - uploadIDMarker = result.NextUploadIDMarker - // Send all multipart uploads. - for _, obj := range result.Uploads { - // Calculate total size of the uploaded parts if 'aggregateSize' is enabled. - if aggregateSize { - // Get total multipart size. - obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID) - if err != nil { - objectMultipartStatCh <- ObjectMultipartInfo{ - Err: err, - } - continue - } - } - select { - // Send individual uploads here. - case objectMultipartStatCh <- obj: - // If done channel return here. - case <-doneCh: - return - } - } - // Send all common prefixes if any. - // NOTE: prefixes are only present if the request is delimited. - for _, obj := range result.CommonPrefixes { - object := ObjectMultipartInfo{} - object.Key = obj.Prefix - object.Size = 0 - select { - // Send delimited prefixes here. - case objectMultipartStatCh <- object: - // If done channel return here. - case <-doneCh: - return - } - } - // Listing ends if result not truncated, return right here. - if !result.IsTruncated { - return - } - } - }(objectMultipartStatCh) - // return. - return objectMultipartStatCh -} - -// listMultipartUploads - (List Multipart Uploads). -// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket. -// -// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. -// request parameters. :- -// --------- -// ?key-marker - Specifies the multipart upload after which listing should begin. -// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin. -// ?delimiter - A delimiter is a character you use to group keys. -// ?prefix - Limits the response to keys that begin with the specified prefix. -// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. -func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) { - // Get resources properly escaped and lined up before using them in http request. - urlValues := make(url.Values) - // Set uploads. - urlValues.Set("uploads", "") - // Set object key marker. - if keyMarker != "" { - urlValues.Set("key-marker", keyMarker) - } - // Set upload id marker. - if uploadIDMarker != "" { - urlValues.Set("upload-id-marker", uploadIDMarker) - } - - // Set object prefix, prefix value to be set to empty is okay. - urlValues.Set("prefix", prefix) - - // Set delimiter, delimiter value to be set to empty is okay. - urlValues.Set("delimiter", delimiter) - - // maxUploads should be 1000 or less. - if maxUploads == 0 || maxUploads > 1000 { - maxUploads = 1000 - } - // Set max-uploads. - urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) - - // Execute GET on bucketName to list multipart uploads. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return ListMultipartUploadsResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "") - } - } - // Decode response body. - listMultipartUploadsResult := ListMultipartUploadsResult{} - err = xmlDecoder(resp.Body, &listMultipartUploadsResult) - if err != nil { - return listMultipartUploadsResult, err - } - return listMultipartUploadsResult, nil -} - -// listObjectParts list all object parts recursively. -func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { - // Part number marker for the next batch of request. - var nextPartNumberMarker int - partsInfo = make(map[int]ObjectPart) - for { - // Get list of uploaded parts a maximum of 1000 per request. - listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000) - if err != nil { - return nil, err - } - // Append to parts info. - for _, part := range listObjPartsResult.ObjectParts { - // Trim off the odd double quotes from ETag in the beginning and end. - part.ETag = strings.TrimPrefix(part.ETag, "\"") - part.ETag = strings.TrimSuffix(part.ETag, "\"") - partsInfo[part.PartNumber] = part - } - // Keep part number marker, for the next iteration. - nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker - // Listing ends result is not truncated, return right here. - if !listObjPartsResult.IsTruncated { - break - } - } - - // Return all the parts. - return partsInfo, nil -} - -// findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name. -func (c Client) findUploadIDs(bucketName, objectName string) ([]string, error) { - var uploadIDs []string - // Make list incomplete uploads recursive. - isRecursive := true - // Turn off size aggregation of individual parts, in this request. - isAggregateSize := false - // Create done channel to cleanup the routine. - doneCh := make(chan struct{}) - defer close(doneCh) - // List all incomplete uploads. - for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, doneCh) { - if mpUpload.Err != nil { - return nil, mpUpload.Err - } - if objectName == mpUpload.Key { - uploadIDs = append(uploadIDs, mpUpload.UploadID) - } - } - // Return the latest upload id. - return uploadIDs, nil -} - -// getTotalMultipartSize - calculate total uploaded size for the a given multipart object. -func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (size int64, err error) { - // Iterate over all parts and aggregate the size. - partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) - if err != nil { - return 0, err - } - for _, partInfo := range partsInfo { - size += partInfo.Size - } - return size, nil -} - -// listObjectPartsQuery (List Parts query) -// - lists some or all (up to 1000) parts that have been uploaded -// for a specific multipart upload -// -// You can use the request parameters as selection criteria to return -// a subset of the uploads in a bucket, request parameters :- -// --------- -// ?part-number-marker - Specifies the part after which listing should -// begin. -// ?max-parts - Maximum parts to be listed per request. -func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) { - // Get resources properly escaped and lined up before using them in http request. - urlValues := make(url.Values) - // Set part number marker. - urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker)) - // Set upload id. - urlValues.Set("uploadId", uploadID) - - // maxParts should be 1000 or less. - if maxParts == 0 || maxParts > 1000 { - maxParts = 1000 - } - // Set max parts. - urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) - - // Execute GET on objectName to get list of parts. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return ListObjectPartsResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - // Decode list object parts XML. - listObjectPartsResult := ListObjectPartsResult{} - err = xmlDecoder(resp.Body, &listObjectPartsResult) - if err != nil { - return listObjectPartsResult, err - } - return listObjectPartsResult, nil -} diff --git a/vendor/github.com/minio/minio-go/api-notification.go b/vendor/github.com/minio/minio-go/api-notification.go deleted file mode 100644 index 1c01e362b..000000000 --- a/vendor/github.com/minio/minio-go/api-notification.go +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bufio" - "context" - "encoding/json" - "io" - "net/http" - "net/url" - "time" - - "github.com/minio/minio-go/pkg/s3utils" -) - -// GetBucketNotification - get bucket notification at a given path. -func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return BucketNotification{}, err - } - notification, err := c.getBucketNotification(bucketName) - if err != nil { - return BucketNotification{}, err - } - return notification, nil -} - -// Request server for notification rules. -func (c Client) getBucketNotification(bucketName string) (BucketNotification, error) { - urlValues := make(url.Values) - urlValues.Set("notification", "") - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - - defer closeResponse(resp) - if err != nil { - return BucketNotification{}, err - } - return processBucketNotificationResponse(bucketName, resp) - -} - -// processes the GetNotification http response from the server. -func processBucketNotificationResponse(bucketName string, resp *http.Response) (BucketNotification, error) { - if resp.StatusCode != http.StatusOK { - errResponse := httpRespToErrorResponse(resp, bucketName, "") - return BucketNotification{}, errResponse - } - var bucketNotification BucketNotification - err := xmlDecoder(resp.Body, &bucketNotification) - if err != nil { - return BucketNotification{}, err - } - return bucketNotification, nil -} - -// Indentity represents the user id, this is a compliance field. -type identity struct { - PrincipalID string `json:"principalId"` -} - -// Notification event bucket metadata. -type bucketMeta struct { - Name string `json:"name"` - OwnerIdentity identity `json:"ownerIdentity"` - ARN string `json:"arn"` -} - -// Notification event object metadata. -type objectMeta struct { - Key string `json:"key"` - Size int64 `json:"size,omitempty"` - ETag string `json:"eTag,omitempty"` - VersionID string `json:"versionId,omitempty"` - Sequencer string `json:"sequencer"` -} - -// Notification event server specific metadata. -type eventMeta struct { - SchemaVersion string `json:"s3SchemaVersion"` - ConfigurationID string `json:"configurationId"` - Bucket bucketMeta `json:"bucket"` - Object objectMeta `json:"object"` -} - -// sourceInfo represents information on the client that -// triggered the event notification. -type sourceInfo struct { - Host string `json:"host"` - Port string `json:"port"` - UserAgent string `json:"userAgent"` -} - -// NotificationEvent represents an Amazon an S3 bucket notification event. -type NotificationEvent struct { - EventVersion string `json:"eventVersion"` - EventSource string `json:"eventSource"` - AwsRegion string `json:"awsRegion"` - EventTime string `json:"eventTime"` - EventName string `json:"eventName"` - UserIdentity identity `json:"userIdentity"` - RequestParameters map[string]string `json:"requestParameters"` - ResponseElements map[string]string `json:"responseElements"` - S3 eventMeta `json:"s3"` - Source sourceInfo `json:"source"` -} - -// NotificationInfo - represents the collection of notification events, additionally -// also reports errors if any while listening on bucket notifications. -type NotificationInfo struct { - Records []NotificationEvent - Err error -} - -// ListenBucketNotification - listen on bucket notifications. -func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo { - notificationInfoCh := make(chan NotificationInfo, 1) - // Only success, start a routine to start reading line by line. - go func(notificationInfoCh chan<- NotificationInfo) { - defer close(notificationInfoCh) - - // Validate the bucket name. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - notificationInfoCh <- NotificationInfo{ - Err: err, - } - return - } - - // Check ARN partition to verify if listening bucket is supported - if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) { - notificationInfoCh <- NotificationInfo{ - Err: ErrAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"), - } - return - } - - // Continuously run and listen on bucket notification. - // Create a done channel to control 'ListObjects' go routine. - retryDoneCh := make(chan struct{}, 1) - - // Indicate to our routine to exit cleanly upon return. - defer close(retryDoneCh) - - // Wait on the jitter retry loop. - for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) { - urlValues := make(url.Values) - urlValues.Set("prefix", prefix) - urlValues.Set("suffix", suffix) - urlValues["events"] = events - - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - if err != nil { - notificationInfoCh <- NotificationInfo{ - Err: err, - } - return - } - - // Validate http response, upon error return quickly. - if resp.StatusCode != http.StatusOK { - errResponse := httpRespToErrorResponse(resp, bucketName, "") - notificationInfoCh <- NotificationInfo{ - Err: errResponse, - } - return - } - - // Initialize a new bufio scanner, to read line by line. - bio := bufio.NewScanner(resp.Body) - - // Close the response body. - defer resp.Body.Close() - - // Unmarshal each line, returns marshalled values. - for bio.Scan() { - var notificationInfo NotificationInfo - if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil { - continue - } - // Send notificationInfo - select { - case notificationInfoCh <- notificationInfo: - case <-doneCh: - return - } - } - // Look for any underlying errors. - if err = bio.Err(); err != nil { - // For an unexpected connection drop from server, we close the body - // and re-connect. - if err == io.ErrUnexpectedEOF { - resp.Body.Close() - } - } - } - }(notificationInfoCh) - - // Returns the notification info channel, for caller to start reading from. - return notificationInfoCh -} diff --git a/vendor/github.com/minio/minio-go/api-presigned.go b/vendor/github.com/minio/minio-go/api-presigned.go deleted file mode 100644 index a2c060786..000000000 --- a/vendor/github.com/minio/minio-go/api-presigned.go +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "errors" - "net/http" - "net/url" - "time" - - "github.com/minio/minio-go/pkg/s3signer" - "github.com/minio/minio-go/pkg/s3utils" -) - -// presignURL - Returns a presigned URL for an input 'method'. -// Expires maximum is 7days - ie. 604800 and minimum is 1. -func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { - // Input validation. - if method == "" { - return nil, ErrInvalidArgument("method cannot be empty.") - } - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err - } - if err = isValidExpiry(expires); err != nil { - return nil, err - } - - // Convert expires into seconds. - expireSeconds := int64(expires / time.Second) - reqMetadata := requestMetadata{ - presignURL: true, - bucketName: bucketName, - objectName: objectName, - expires: expireSeconds, - queryValues: reqParams, - } - - // Instantiate a new request. - // Since expires is set newRequest will presign the request. - var req *http.Request - if req, err = c.newRequest(method, reqMetadata); err != nil { - return nil, err - } - return req.URL, nil -} - -// PresignedGetObject - Returns a presigned URL to access an object -// data without credentials. URL can have a maximum expiry of -// upto 7days or a minimum of 1sec. Additionally you can override -// a set of response headers using the query parameters. -func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - return c.presignURL("GET", bucketName, objectName, expires, reqParams) -} - -// PresignedHeadObject - Returns a presigned URL to access object -// metadata without credentials. URL can have a maximum expiry of -// upto 7days or a minimum of 1sec. Additionally you can override -// a set of response headers using the query parameters. -func (c Client) PresignedHeadObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - return c.presignURL("HEAD", bucketName, objectName, expires, reqParams) -} - -// PresignedPutObject - Returns a presigned URL to upload an object -// without credentials. URL can have a maximum expiry of upto 7days -// or a minimum of 1sec. -func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) { - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - return c.presignURL("PUT", bucketName, objectName, expires, nil) -} - -// Presign - returns a presigned URL for any http method of your choice -// along with custom request params. URL can have a maximum expiry of -// upto 7days or a minimum of 1sec. -func (c Client) Presign(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { - return c.presignURL(method, bucketName, objectName, expires, reqParams) -} - -// PresignedPostPolicy - Returns POST urlString, form data to upload an object. -func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[string]string, err error) { - // Validate input arguments. - if p.expiration.IsZero() { - return nil, nil, errors.New("Expiration time must be specified") - } - if _, ok := p.formData["key"]; !ok { - return nil, nil, errors.New("object key must be specified") - } - if _, ok := p.formData["bucket"]; !ok { - return nil, nil, errors.New("bucket name must be specified") - } - - bucketName := p.formData["bucket"] - // Fetch the bucket location. - location, err := c.getBucketLocation(bucketName) - if err != nil { - return nil, nil, err - } - - isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName) - - u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil) - if err != nil { - return nil, nil, err - } - - // Get credentials from the configured credentials provider. - credValues, err := c.credsProvider.Get() - if err != nil { - return nil, nil, err - } - - var ( - signerType = credValues.SignerType - sessionToken = credValues.SessionToken - accessKeyID = credValues.AccessKeyID - secretAccessKey = credValues.SecretAccessKey - ) - - if signerType.IsAnonymous() { - return nil, nil, ErrInvalidArgument("Presigned operations are not supported for anonymous credentials") - } - - // Keep time. - t := time.Now().UTC() - // For signature version '2' handle here. - if signerType.IsV2() { - policyBase64 := p.base64() - p.formData["policy"] = policyBase64 - // For Google endpoint set this value to be 'GoogleAccessId'. - if s3utils.IsGoogleEndpoint(*c.endpointURL) { - p.formData["GoogleAccessId"] = accessKeyID - } else { - // For all other endpoints set this value to be 'AWSAccessKeyId'. - p.formData["AWSAccessKeyId"] = accessKeyID - } - // Sign the policy. - p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, secretAccessKey) - return u, p.formData, nil - } - - // Add date policy. - if err = p.addNewPolicy(policyCondition{ - matchType: "eq", - condition: "$x-amz-date", - value: t.Format(iso8601DateFormat), - }); err != nil { - return nil, nil, err - } - - // Add algorithm policy. - if err = p.addNewPolicy(policyCondition{ - matchType: "eq", - condition: "$x-amz-algorithm", - value: signV4Algorithm, - }); err != nil { - return nil, nil, err - } - - // Add a credential policy. - credential := s3signer.GetCredential(accessKeyID, location, t) - if err = p.addNewPolicy(policyCondition{ - matchType: "eq", - condition: "$x-amz-credential", - value: credential, - }); err != nil { - return nil, nil, err - } - - if sessionToken != "" { - if err = p.addNewPolicy(policyCondition{ - matchType: "eq", - condition: "$x-amz-security-token", - value: sessionToken, - }); err != nil { - return nil, nil, err - } - } - - // Get base64 encoded policy. - policyBase64 := p.base64() - - // Fill in the form data. - p.formData["policy"] = policyBase64 - p.formData["x-amz-algorithm"] = signV4Algorithm - p.formData["x-amz-credential"] = credential - p.formData["x-amz-date"] = t.Format(iso8601DateFormat) - if sessionToken != "" { - p.formData["x-amz-security-token"] = sessionToken - } - p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location) - return u, p.formData, nil -} diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go deleted file mode 100644 index 33dc0cf3d..000000000 --- a/vendor/github.com/minio/minio-go/api-put-bucket.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/minio/minio-go/pkg/s3utils" -) - -/// Bucket operations - -// MakeBucket creates a new bucket with bucketName. -// -// Location is an optional argument, by default all buckets are -// created in US Standard Region. -// -// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html -// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations -func (c Client) MakeBucket(bucketName string, location string) (err error) { - defer func() { - // Save the location into cache on a successful makeBucket response. - if err == nil { - c.bucketLocCache.Set(bucketName, location) - } - }() - - // Validate the input arguments. - if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil { - return err - } - - // If location is empty, treat is a default region 'us-east-1'. - if location == "" { - location = "us-east-1" - // For custom region clients, default - // to custom region instead not 'us-east-1'. - if c.region != "" { - location = c.region - } - } - // PUT bucket request metadata. - reqMetadata := requestMetadata{ - bucketName: bucketName, - bucketLocation: location, - } - - // If location is not 'us-east-1' create bucket location config. - if location != "us-east-1" && location != "" { - createBucketConfig := createBucketConfiguration{} - createBucketConfig.Location = location - var createBucketConfigBytes []byte - createBucketConfigBytes, err = xml.Marshal(createBucketConfig) - if err != nil { - return err - } - reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes) - reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes) - reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes) - reqMetadata.contentLength = int64(len(createBucketConfigBytes)) - } - - // Execute PUT to create a new bucket. - resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - - // Success. - return nil -} - -// SetBucketPolicy set the access permissions on an existing bucket. -func (c Client) SetBucketPolicy(bucketName, policy string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // If policy is empty then delete the bucket policy. - if policy == "" { - return c.removeBucketPolicy(bucketName) - } - - // Save the updated policies. - return c.putBucketPolicy(bucketName, policy) -} - -// Saves a new bucket policy. -func (c Client) putBucketPolicy(bucketName, policy string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("policy", "") - - // Content-length is mandatory for put policy request - policyReader := strings.NewReader(policy) - b, err := ioutil.ReadAll(policyReader) - if err != nil { - return err - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: policyReader, - contentLength: int64(len(b)), - } - - // Execute PUT to upload a new bucket policy. - resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// Removes all policies on a bucket. -func (c Client) removeBucketPolicy(bucketName string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("policy", "") - - // Execute DELETE on objectName. - resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - return nil -} - -// SetBucketLifecycle set the lifecycle on an existing bucket. -func (c Client) SetBucketLifecycle(bucketName, lifecycle string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // If lifecycle is empty then delete it. - if lifecycle == "" { - return c.removeBucketLifecycle(bucketName) - } - - // Save the updated lifecycle. - return c.putBucketLifecycle(bucketName, lifecycle) -} - -// Saves a new bucket lifecycle. -func (c Client) putBucketLifecycle(bucketName, lifecycle string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("lifecycle", "") - - // Content-length is mandatory for put lifecycle request - lifecycleReader := strings.NewReader(lifecycle) - b, err := ioutil.ReadAll(lifecycleReader) - if err != nil { - return err - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: lifecycleReader, - contentLength: int64(len(b)), - contentMD5Base64: sumMD5Base64(b), - } - - // Execute PUT to upload a new bucket lifecycle. - resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// Remove lifecycle from a bucket. -func (c Client) removeBucketLifecycle(bucketName string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("lifecycle", "") - - // Execute DELETE on objectName. - resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - return nil -} - -// SetBucketNotification saves a new bucket notification. -func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - - // Get resources properly escaped and lined up before - // using them in http request. - urlValues := make(url.Values) - urlValues.Set("notification", "") - - notifBytes, err := xml.Marshal(bucketNotification) - if err != nil { - return err - } - - notifBuffer := bytes.NewReader(notifBytes) - reqMetadata := requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: notifBuffer, - contentLength: int64(len(notifBytes)), - contentMD5Base64: sumMD5Base64(notifBytes), - contentSHA256Hex: sum256Hex(notifBytes), - } - - // Execute PUT to upload a new bucket notification. - resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - return nil -} - -// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config -func (c Client) RemoveAllBucketNotification(bucketName string) error { - return c.SetBucketNotification(bucketName, BucketNotification{}) -} diff --git a/vendor/github.com/minio/minio-go/api-put-object-common.go b/vendor/github.com/minio/minio-go/api-put-object-common.go deleted file mode 100644 index c16c3c69a..000000000 --- a/vendor/github.com/minio/minio-go/api-put-object-common.go +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" - "math" - "os" - - "github.com/minio/minio-go/pkg/s3utils" -) - -// Verify if reader is *minio.Object -func isObject(reader io.Reader) (ok bool) { - _, ok = reader.(*Object) - return -} - -// Verify if reader is a generic ReaderAt -func isReadAt(reader io.Reader) (ok bool) { - _, ok = reader.(io.ReaderAt) - if ok { - var v *os.File - v, ok = reader.(*os.File) - if ok { - // Stdin, Stdout and Stderr all have *os.File type - // which happen to also be io.ReaderAt compatible - // we need to add special conditions for them to - // be ignored by this function. - for _, f := range []string{ - "/dev/stdin", - "/dev/stdout", - "/dev/stderr", - } { - if f == v.Name() { - ok = false - break - } - } - } - } - return -} - -// optimalPartInfo - calculate the optimal part info for a given -// object size. -// -// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible -// object storage it will have the following parameters as constants. -// -// maxPartsCount - 10000 -// minPartSize - 64MiB -// maxMultipartPutObjectSize - 5TiB -// -func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) { - // object size is '-1' set it to 5TiB. - if objectSize == -1 { - objectSize = maxMultipartPutObjectSize - } - // object size is larger than supported maximum. - if objectSize > maxMultipartPutObjectSize { - err = ErrEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "") - return - } - // Use floats for part size for all calculations to avoid - // overflows during float64 to int64 conversions. - partSizeFlt := math.Ceil(float64(objectSize / maxPartsCount)) - partSizeFlt = math.Ceil(partSizeFlt/minPartSize) * minPartSize - // Total parts count. - totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt)) - // Part size. - partSize = int64(partSizeFlt) - // Last part size. - lastPartSize = objectSize - int64(totalPartsCount-1)*partSize - return totalPartsCount, partSize, lastPartSize, nil -} - -// getUploadID - fetch upload id if already present for an object name -// or initiate a new request to fetch a new upload id. -func (c Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return "", err - } - - // Initiate multipart upload for an object. - initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts) - if err != nil { - return "", err - } - return initMultipartUploadResult.UploadID, nil -} diff --git a/vendor/github.com/minio/minio-go/api-put-object-context.go b/vendor/github.com/minio/minio-go/api-put-object-context.go deleted file mode 100644 index ff4663e2f..000000000 --- a/vendor/github.com/minio/minio-go/api-put-object-context.go +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" -) - -// PutObjectWithContext - Identical to PutObject call, but accepts context to facilitate request cancellation. -func (c Client) PutObjectWithContext(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, - opts PutObjectOptions) (n int64, err error) { - err = opts.validate() - if err != nil { - return 0, err - } - return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) -} diff --git a/vendor/github.com/minio/minio-go/api-put-object-copy.go b/vendor/github.com/minio/minio-go/api-put-object-copy.go deleted file mode 100644 index 21322ef6a..000000000 --- a/vendor/github.com/minio/minio-go/api-put-object-copy.go +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017, 2018 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" - "io/ioutil" - "net/http" - - "github.com/minio/minio-go/pkg/encrypt" -) - -// CopyObject - copy a source object into a new object -func (c Client) CopyObject(dst DestinationInfo, src SourceInfo) error { - return c.CopyObjectWithProgress(dst, src, nil) -} - -// CopyObjectWithProgress - copy a source object into a new object, optionally takes -// progress bar input to notify current progress. -func (c Client) CopyObjectWithProgress(dst DestinationInfo, src SourceInfo, progress io.Reader) error { - header := make(http.Header) - for k, v := range src.Headers { - header[k] = v - } - - var err error - var size int64 - // If progress bar is specified, size should be requested as well initiate a StatObject request. - if progress != nil { - size, _, _, err = src.getProps(c) - if err != nil { - return err - } - } - - if src.encryption != nil { - encrypt.SSECopy(src.encryption).Marshal(header) - } - - if dst.encryption != nil { - dst.encryption.Marshal(header) - } - for k, v := range dst.getUserMetaHeadersMap(true) { - header.Set(k, v) - } - - resp, err := c.executeMethod(context.Background(), "PUT", requestMetadata{ - bucketName: dst.bucket, - objectName: dst.object, - customHeader: header, - }) - if err != nil { - return err - } - defer closeResponse(resp) - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, dst.bucket, dst.object) - } - - // Update the progress properly after successful copy. - if progress != nil { - io.CopyN(ioutil.Discard, progress, size) - } - - return nil -} diff --git a/vendor/github.com/minio/minio-go/api-put-object-file-context.go b/vendor/github.com/minio/minio-go/api-put-object-file-context.go deleted file mode 100644 index 140a9c069..000000000 --- a/vendor/github.com/minio/minio-go/api-put-object-file-context.go +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "mime" - "os" - "path/filepath" - - "github.com/minio/minio-go/pkg/s3utils" -) - -// FPutObjectWithContext - Create an object in a bucket, with contents from file at filePath. Allows request cancellation. -func (c Client) FPutObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - - // Open the referenced file. - fileReader, err := os.Open(filePath) - // If any error fail quickly here. - if err != nil { - return 0, err - } - defer fileReader.Close() - - // Save the file stat. - fileStat, err := fileReader.Stat() - if err != nil { - return 0, err - } - - // Save the file size. - fileSize := fileStat.Size() - - // Set contentType based on filepath extension if not given or default - // value of "application/octet-stream" if the extension has no associated type. - if opts.ContentType == "" { - if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" { - opts.ContentType = "application/octet-stream" - } - } - return c.PutObjectWithContext(ctx, bucketName, objectName, fileReader, fileSize, opts) -} diff --git a/vendor/github.com/minio/minio-go/api-put-object-file.go b/vendor/github.com/minio/minio-go/api-put-object-file.go deleted file mode 100644 index 7c8e05117..000000000 --- a/vendor/github.com/minio/minio-go/api-put-object-file.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" -) - -// FPutObject - Create an object in a bucket, with contents from file at filePath -func (c Client) FPutObject(bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) { - return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, opts) -} diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go deleted file mode 100644 index db92520e8..000000000 --- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go +++ /dev/null @@ -1,372 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/hex" - "encoding/xml" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "runtime/debug" - "sort" - "strconv" - "strings" - - "github.com/minio/minio-go/pkg/encrypt" - "github.com/minio/minio-go/pkg/s3utils" -) - -func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, - opts PutObjectOptions) (n int64, err error) { - n, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) - if err != nil { - errResp := ToErrorResponse(err) - // Verify if multipart functionality is not available, if not - // fall back to single PutObject operation. - if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { - // Verify if size of reader is greater than '5GiB'. - if size > maxSinglePutObjectSize { - return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) - } - // Fall back to uploading as single PutObject operation. - return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) - } - } - return n, err -} - -func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - - // Total data read and written to server. should be equal to - // 'size' at the end of the call. - var totalUploadedSize int64 - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, _, err := optimalPartInfo(-1) - if err != nil { - return 0, err - } - - // Initiate a new multipart upload. - uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) - if err != nil { - return 0, err - } - - defer func() { - if err != nil { - c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) - } - }() - - // Part number always starts with '1'. - partNumber := 1 - - // Initialize parts uploaded map. - partsInfo := make(map[int]ObjectPart) - - // Create a buffer. - buf := make([]byte, partSize) - defer debug.FreeOSMemory() - - for partNumber <= totalPartsCount { - // Choose hash algorithms to be calculated by hashCopyN, - // avoid sha256 with non-v4 signature request or - // HTTPS connection. - hashAlgos, hashSums := c.hashMaterials() - - length, rErr := io.ReadFull(reader, buf) - if rErr == io.EOF { - break - } - if rErr != nil && rErr != io.ErrUnexpectedEOF { - return 0, rErr - } - - // Calculates hash sums while copying partSize bytes into cw. - for k, v := range hashAlgos { - v.Write(buf[:length]) - hashSums[k] = v.Sum(nil) - } - - // Update progress reader appropriately to the latest offset - // as we read from the source. - rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) - - // Checksums.. - var ( - md5Base64 string - sha256Hex string - ) - if hashSums["md5"] != nil { - md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"]) - } - if hashSums["sha256"] != nil { - sha256Hex = hex.EncodeToString(hashSums["sha256"]) - } - - // Proceed to upload the part. - var objPart ObjectPart - objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, - md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption) - if err != nil { - return totalUploadedSize, err - } - - // Save successfully uploaded part metadata. - partsInfo[partNumber] = objPart - - // Save successfully uploaded size. - totalUploadedSize += int64(length) - - // Increment part number. - partNumber++ - - // For unknown size, Read EOF we break away. - // We do not have to upload till totalPartsCount. - if rErr == io.EOF { - break - } - } - - // Loop over total uploaded parts to save them in - // Parts array before completing the multipart request. - for i := 1; i < partNumber; i++ { - part, ok := partsInfo[i] - if !ok { - return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) - } - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - }) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil { - return totalUploadedSize, err - } - - // Return final size. - return totalUploadedSize, nil -} - -// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. -func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return initiateMultipartUploadResult{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return initiateMultipartUploadResult{}, err - } - - // Initialize url queries. - urlValues := make(url.Values) - urlValues.Set("uploads", "") - - // Set ContentType header. - customHeader := opts.Header() - - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - customHeader: customHeader, - } - - // Execute POST on an objectName to initiate multipart upload. - resp, err := c.executeMethod(ctx, "POST", reqMetadata) - defer closeResponse(resp) - if err != nil { - return initiateMultipartUploadResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - // Decode xml for new multipart upload. - initiateMultipartUploadResult := initiateMultipartUploadResult{} - err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) - if err != nil { - return initiateMultipartUploadResult, err - } - return initiateMultipartUploadResult, nil -} - -// uploadPart - Uploads a part in a multipart upload. -func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, - partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide) (ObjectPart, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ObjectPart{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return ObjectPart{}, err - } - if size > maxPartSize { - return ObjectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName) - } - if size <= -1 { - return ObjectPart{}, ErrEntityTooSmall(size, bucketName, objectName) - } - if partNumber <= 0 { - return ObjectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.") - } - if uploadID == "" { - return ObjectPart{}, ErrInvalidArgument("UploadID cannot be empty.") - } - - // Get resources properly escaped and lined up before using them in http request. - urlValues := make(url.Values) - // Set part number. - urlValues.Set("partNumber", strconv.Itoa(partNumber)) - // Set upload id. - urlValues.Set("uploadId", uploadID) - - // Set encryption headers, if any. - customHeader := make(http.Header) - // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html - // Server-side encryption is supported by the S3 Multipart Upload actions. - // Unless you are using a customer-provided encryption key, you don't need - // to specify the encryption parameters in each UploadPart request. - if sse != nil && sse.Type() == encrypt.SSEC { - sse.Marshal(customHeader) - } - - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - customHeader: customHeader, - contentBody: reader, - contentLength: size, - contentMD5Base64: md5Base64, - contentSHA256Hex: sha256Hex, - } - - // Execute PUT on each part. - resp, err := c.executeMethod(ctx, "PUT", reqMetadata) - defer closeResponse(resp) - if err != nil { - return ObjectPart{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - // Once successfully uploaded, return completed part. - objPart := ObjectPart{} - objPart.Size = size - objPart.PartNumber = partNumber - // Trim off the odd double quotes from ETag in the beginning and end. - objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") - objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"") - return objPart, nil -} - -// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. -func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, - complete completeMultipartUpload) (completeMultipartUploadResult, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return completeMultipartUploadResult{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return completeMultipartUploadResult{}, err - } - - // Initialize url queries. - urlValues := make(url.Values) - urlValues.Set("uploadId", uploadID) - // Marshal complete multipart body. - completeMultipartUploadBytes, err := xml.Marshal(complete) - if err != nil { - return completeMultipartUploadResult{}, err - } - - // Instantiate all the complete multipart buffer. - completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentBody: completeMultipartUploadBuffer, - contentLength: int64(len(completeMultipartUploadBytes)), - contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), - } - - // Execute POST to complete multipart upload for an objectName. - resp, err := c.executeMethod(ctx, "POST", reqMetadata) - defer closeResponse(resp) - if err != nil { - return completeMultipartUploadResult{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return completeMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - // Read resp.Body into a []bytes to parse for Error response inside the body - var b []byte - b, err = ioutil.ReadAll(resp.Body) - if err != nil { - return completeMultipartUploadResult{}, err - } - // Decode completed multipart upload response on success. - completeMultipartUploadResult := completeMultipartUploadResult{} - err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult) - if err != nil { - // xml parsing failure due to presence an ill-formed xml fragment - return completeMultipartUploadResult, err - } else if completeMultipartUploadResult.Bucket == "" { - // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied. - // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values - // of the members. - - // Decode completed multipart upload response on failure - completeMultipartUploadErr := ErrorResponse{} - err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr) - if err != nil { - // xml parsing failure due to presence an ill-formed xml fragment - return completeMultipartUploadResult, err - } - return completeMultipartUploadResult, completeMultipartUploadErr - } - return completeMultipartUploadResult, nil -} diff --git a/vendor/github.com/minio/minio-go/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/api-put-object-streaming.go deleted file mode 100644 index 211d1c23c..000000000 --- a/vendor/github.com/minio/minio-go/api-put-object-streaming.go +++ /dev/null @@ -1,417 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "fmt" - "io" - "net/http" - "sort" - "strings" - - "github.com/minio/minio-go/pkg/s3utils" -) - -// putObjectMultipartStream - upload a large object using -// multipart upload and streaming signature for signing payload. -// Comprehensive put object operation involving multipart uploads. -// -// Following code handles these types of readers. -// -// - *minio.Object -// - Any reader which has a method 'ReadAt()' -// -func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, - reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { - - if !isObject(reader) && isReadAt(reader) { - // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. - n, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) - } else { - n, err = c.putObjectMultipartStreamNoChecksum(ctx, bucketName, objectName, reader, size, opts) - } - if err != nil { - errResp := ToErrorResponse(err) - // Verify if multipart functionality is not available, if not - // fall back to single PutObject operation. - if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { - // Verify if size of reader is greater than '5GiB'. - if size > maxSinglePutObjectSize { - return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) - } - // Fall back to uploading as single PutObject operation. - return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) - } - } - return n, err -} - -// uploadedPartRes - the response received from a part upload. -type uploadedPartRes struct { - Error error // Any error encountered while uploading the part. - PartNum int // Number of the part uploaded. - Size int64 // Size of the part uploaded. - Part *ObjectPart -} - -type uploadPartReq struct { - PartNum int // Number of the part uploaded. - Part *ObjectPart // Size of the part uploaded. -} - -// putObjectMultipartFromReadAt - Uploads files bigger than 64MiB. -// Supports all readers which implements io.ReaderAt interface -// (ReadAt method). -// -// NOTE: This function is meant to be used for all readers which -// implement io.ReaderAt which allows us for resuming multipart -// uploads but reading at an offset, which would avoid re-read the -// data which was already uploaded. Internally this function uses -// temporary files for staging all the data, these temporary files are -// cleaned automatically when the caller i.e http client closes the -// stream after uploading all the contents successfully. -func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, - reader io.ReaderAt, size int64, opts PutObjectOptions) (n int64, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size) - if err != nil { - return 0, err - } - - // Initiate a new multipart upload. - uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) - if err != nil { - return 0, err - } - - // Aborts the multipart upload in progress, if the - // function returns any error, since we do not resume - // we should purge the parts which have been uploaded - // to relinquish storage space. - defer func() { - if err != nil { - c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) - } - }() - - // Total data read and written to server. should be equal to 'size' at the end of the call. - var totalUploadedSize int64 - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Declare a channel that sends the next part number to be uploaded. - // Buffered to 10000 because thats the maximum number of parts allowed - // by S3. - uploadPartsCh := make(chan uploadPartReq, 10000) - - // Declare a channel that sends back the response of a part upload. - // Buffered to 10000 because thats the maximum number of parts allowed - // by S3. - uploadedPartsCh := make(chan uploadedPartRes, 10000) - - // Used for readability, lastPartNumber is always totalPartsCount. - lastPartNumber := totalPartsCount - - // Send each part number to the channel to be processed. - for p := 1; p <= totalPartsCount; p++ { - uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil} - } - close(uploadPartsCh) - // Receive each part number from the channel allowing three parallel uploads. - for w := 1; w <= opts.getNumThreads(); w++ { - go func(partSize int64) { - // Each worker will draw from the part channel and upload in parallel. - for uploadReq := range uploadPartsCh { - - // If partNumber was not uploaded we calculate the missing - // part offset and size. For all other part numbers we - // calculate offset based on multiples of partSize. - readOffset := int64(uploadReq.PartNum-1) * partSize - - // As a special case if partNumber is lastPartNumber, we - // calculate the offset based on the last part size. - if uploadReq.PartNum == lastPartNumber { - readOffset = (size - lastPartSize) - partSize = lastPartSize - } - - // Get a section reader on a particular offset. - sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) - - // Proceed to upload the part. - var objPart ObjectPart - objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, - sectionReader, uploadReq.PartNum, - "", "", partSize, opts.ServerSideEncryption) - if err != nil { - uploadedPartsCh <- uploadedPartRes{ - Size: 0, - Error: err, - } - // Exit the goroutine. - return - } - - // Save successfully uploaded part metadata. - uploadReq.Part = &objPart - - // Send successful part info through the channel. - uploadedPartsCh <- uploadedPartRes{ - Size: objPart.Size, - PartNum: uploadReq.PartNum, - Part: uploadReq.Part, - Error: nil, - } - } - }(partSize) - } - - // Gather the responses as they occur and update any - // progress bar. - for u := 1; u <= totalPartsCount; u++ { - uploadRes := <-uploadedPartsCh - if uploadRes.Error != nil { - return totalUploadedSize, uploadRes.Error - } - // Retrieve each uploaded part and store it to be completed. - // part, ok := partsInfo[uploadRes.PartNum] - part := uploadRes.Part - if part == nil { - return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum)) - } - // Update the totalUploadedSize. - totalUploadedSize += uploadRes.Size - // Store the parts to be completed in order. - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - }) - } - - // Verify if we uploaded all the data. - if totalUploadedSize != size { - return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) - if err != nil { - return totalUploadedSize, err - } - - // Return final size. - return totalUploadedSize, nil -} - -func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketName, objectName string, - reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size) - if err != nil { - return 0, err - } - // Initiates a new multipart request - uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) - if err != nil { - return 0, err - } - - // Aborts the multipart upload if the function returns - // any error, since we do not resume we should purge - // the parts which have been uploaded to relinquish - // storage space. - defer func() { - if err != nil { - c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) - } - }() - - // Total data read and written to server. should be equal to 'size' at the end of the call. - var totalUploadedSize int64 - - // Initialize parts uploaded map. - partsInfo := make(map[int]ObjectPart) - - // Part number always starts with '1'. - var partNumber int - for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { - // Update progress reader appropriately to the latest offset - // as we read from the source. - hookReader := newHook(reader, opts.Progress) - - // Proceed to upload the part. - if partNumber == totalPartsCount { - partSize = lastPartSize - } - var objPart ObjectPart - objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, - io.LimitReader(hookReader, partSize), - partNumber, "", "", partSize, opts.ServerSideEncryption) - if err != nil { - return totalUploadedSize, err - } - - // Save successfully uploaded part metadata. - partsInfo[partNumber] = objPart - - // Save successfully uploaded size. - totalUploadedSize += partSize - } - - // Verify if we uploaded all the data. - if size > 0 { - if totalUploadedSize != size { - return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) - } - } - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Loop over total uploaded parts to save them in - // Parts array before completing the multipart request. - for i := 1; i < partNumber; i++ { - part, ok := partsInfo[i] - if !ok { - return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) - } - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - }) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) - if err != nil { - return totalUploadedSize, err - } - - // Return final size. - return totalUploadedSize, nil -} - -// putObjectNoChecksum special function used Google Cloud Storage. This special function -// is used for Google Cloud Storage since Google's multipart API is not S3 compatible. -func (c Client) putObjectNoChecksum(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - - // Size -1 is only supported on Google Cloud Storage, we error - // out in all other situations. - if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) { - return 0, ErrEntityTooSmall(size, bucketName, objectName) - } - if size > 0 { - if isReadAt(reader) && !isObject(reader) { - seeker, _ := reader.(io.Seeker) - offset, err := seeker.Seek(0, io.SeekCurrent) - if err != nil { - return 0, ErrInvalidArgument(err.Error()) - } - reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size) - } - } - - // Update progress reader appropriately to the latest offset as we - // read from the source. - readSeeker := newHook(reader, opts.Progress) - - // This function does not calculate sha256 and md5sum for payload. - // Execute put object. - st, err := c.putObjectDo(ctx, bucketName, objectName, readSeeker, "", "", size, opts) - if err != nil { - return 0, err - } - if st.Size != size { - return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) - } - return size, nil -} - -// putObjectDo - executes the put object http operation. -// NOTE: You must have WRITE permissions on a bucket to add an object to it. -func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (ObjectInfo, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ObjectInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return ObjectInfo{}, err - } - // Set headers. - customHeader := opts.Header() - - // Populate request metadata. - reqMetadata := requestMetadata{ - bucketName: bucketName, - objectName: objectName, - customHeader: customHeader, - contentBody: reader, - contentLength: size, - contentMD5Base64: md5Base64, - contentSHA256Hex: sha256Hex, - } - - // Execute PUT an objectName. - resp, err := c.executeMethod(ctx, "PUT", reqMetadata) - defer closeResponse(resp) - if err != nil { - return ObjectInfo{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - var objInfo ObjectInfo - // Trim off the odd double quotes from ETag in the beginning and end. - objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") - objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"") - // A success here means data was written to server successfully. - objInfo.Size = size - - // Return here. - return objInfo, nil -} diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go deleted file mode 100644 index 0330cd99d..000000000 --- a/vendor/github.com/minio/minio-go/api-put-object.go +++ /dev/null @@ -1,267 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - "runtime/debug" - "sort" - - "github.com/minio/minio-go/pkg/encrypt" - "github.com/minio/minio-go/pkg/s3utils" - "golang.org/x/net/http/httpguts" -) - -// PutObjectOptions represents options specified by user for PutObject call -type PutObjectOptions struct { - UserMetadata map[string]string - Progress io.Reader - ContentType string - ContentEncoding string - ContentDisposition string - ContentLanguage string - CacheControl string - ServerSideEncryption encrypt.ServerSide - NumThreads uint - StorageClass string - WebsiteRedirectLocation string -} - -// getNumThreads - gets the number of threads to be used in the multipart -// put object operation -func (opts PutObjectOptions) getNumThreads() (numThreads int) { - if opts.NumThreads > 0 { - numThreads = int(opts.NumThreads) - } else { - numThreads = totalWorkers - } - return -} - -// Header - constructs the headers from metadata entered by user in -// PutObjectOptions struct -func (opts PutObjectOptions) Header() (header http.Header) { - header = make(http.Header) - - if opts.ContentType != "" { - header["Content-Type"] = []string{opts.ContentType} - } else { - header["Content-Type"] = []string{"application/octet-stream"} - } - if opts.ContentEncoding != "" { - header["Content-Encoding"] = []string{opts.ContentEncoding} - } - if opts.ContentDisposition != "" { - header["Content-Disposition"] = []string{opts.ContentDisposition} - } - if opts.ContentLanguage != "" { - header["Content-Language"] = []string{opts.ContentLanguage} - } - if opts.CacheControl != "" { - header["Cache-Control"] = []string{opts.CacheControl} - } - if opts.ServerSideEncryption != nil { - opts.ServerSideEncryption.Marshal(header) - } - if opts.StorageClass != "" { - header[amzStorageClass] = []string{opts.StorageClass} - } - if opts.WebsiteRedirectLocation != "" { - header[amzWebsiteRedirectLocation] = []string{opts.WebsiteRedirectLocation} - } - for k, v := range opts.UserMetadata { - if !isAmzHeader(k) && !isStandardHeader(k) && !isStorageClassHeader(k) { - header["X-Amz-Meta-"+k] = []string{v} - } else { - header[k] = []string{v} - } - } - return -} - -// validate() checks if the UserMetadata map has standard headers or and raises an error if so. -func (opts PutObjectOptions) validate() (err error) { - for k, v := range opts.UserMetadata { - if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) { - return ErrInvalidArgument(k + " unsupported user defined metadata name") - } - if !httpguts.ValidHeaderFieldValue(v) { - return ErrInvalidArgument(v + " unsupported user defined metadata value") - } - } - return nil -} - -// completedParts is a collection of parts sortable by their part numbers. -// used for sorting the uploaded parts before completing the multipart request. -type completedParts []CompletePart - -func (a completedParts) Len() int { return len(a) } -func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } - -// PutObject creates an object in a bucket. -// -// You must have WRITE permissions on a bucket to create an object. -// -// - For size smaller than 64MiB PutObject automatically does a -// single atomic Put operation. -// - For size larger than 64MiB PutObject automatically does a -// multipart Put operation. -// - For size input as -1 PutObject does a multipart Put operation -// until input stream reaches EOF. Maximum object size that can -// be uploaded through this operation will be 5TiB. -func (c Client) PutObject(bucketName, objectName string, reader io.Reader, objectSize int64, - opts PutObjectOptions) (n int64, err error) { - return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, objectSize, opts) -} - -func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { - // Check for largest object size allowed. - if size > int64(maxMultipartPutObjectSize) { - return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) - } - - // NOTE: Streaming signature is not supported by GCS. - if s3utils.IsGoogleEndpoint(*c.endpointURL) { - // Do not compute MD5 for Google Cloud Storage. - return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) - } - - if c.overrideSignerType.IsV2() { - if size >= 0 && size < minPartSize { - return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) - } - return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts) - } - if size < 0 { - return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) - } - - if size < minPartSize { - return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) - } - // For all sizes greater than 64MiB do multipart. - return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts) -} - -func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) { - // Input validation. - if err = s3utils.CheckValidBucketName(bucketName); err != nil { - return 0, err - } - if err = s3utils.CheckValidObjectName(objectName); err != nil { - return 0, err - } - - // Total data read and written to server. should be equal to - // 'size' at the end of the call. - var totalUploadedSize int64 - - // Complete multipart upload. - var complMultipartUpload completeMultipartUpload - - // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, _, err := optimalPartInfo(-1) - if err != nil { - return 0, err - } - // Initiate a new multipart upload. - uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) - if err != nil { - return 0, err - } - - defer func() { - if err != nil { - c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) - } - }() - - // Part number always starts with '1'. - partNumber := 1 - - // Initialize parts uploaded map. - partsInfo := make(map[int]ObjectPart) - - // Create a buffer. - buf := make([]byte, partSize) - defer debug.FreeOSMemory() - - for partNumber <= totalPartsCount { - length, rErr := io.ReadFull(reader, buf) - if rErr == io.EOF && partNumber > 1 { - break - } - if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { - return 0, rErr - } - // Update progress reader appropriately to the latest offset - // as we read from the source. - rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) - - // Proceed to upload the part. - var objPart ObjectPart - objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, - "", "", int64(length), opts.ServerSideEncryption) - if err != nil { - return totalUploadedSize, err - } - - // Save successfully uploaded part metadata. - partsInfo[partNumber] = objPart - - // Save successfully uploaded size. - totalUploadedSize += int64(length) - - // Increment part number. - partNumber++ - - // For unknown size, Read EOF we break away. - // We do not have to upload till totalPartsCount. - if rErr == io.EOF { - break - } - } - - // Loop over total uploaded parts to save them in - // Parts array before completing the multipart request. - for i := 1; i < partNumber; i++ { - part, ok := partsInfo[i] - if !ok { - return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) - } - complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - }) - } - - // Sort all completed parts. - sort.Sort(completedParts(complMultipartUpload.Parts)) - if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil { - return totalUploadedSize, err - } - - // Return final size. - return totalUploadedSize, nil -} diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go deleted file mode 100644 index f33df4dfc..000000000 --- a/vendor/github.com/minio/minio-go/api-remove.go +++ /dev/null @@ -1,303 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/xml" - "io" - "net/http" - "net/url" - - "github.com/minio/minio-go/pkg/s3utils" -) - -// RemoveBucket deletes the bucket name. -// -// All objects (including all object versions and delete markers). -// in the bucket must be deleted before successfully attempting this request. -func (c Client) RemoveBucket(bucketName string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - // Execute DELETE on bucket. - resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ - bucketName: bucketName, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, "") - } - } - - // Remove the location from cache on a successful delete. - c.bucketLocCache.Delete(bucketName) - - return nil -} - -// RemoveObject remove an object from a bucket. -func (c Client) RemoveObject(bucketName, objectName string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - // Execute DELETE on objectName. - resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - // if some unexpected error happened and max retry is reached, we want to let client know - if resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - // DeleteObject always responds with http '204' even for - // objects which do not exist. So no need to handle them - // specifically. - return nil -} - -// RemoveObjectError - container of Multi Delete S3 API error -type RemoveObjectError struct { - ObjectName string - Err error -} - -// generateRemoveMultiObjects - generate the XML request for remove multi objects request -func generateRemoveMultiObjectsRequest(objects []string) []byte { - rmObjects := []deleteObject{} - for _, obj := range objects { - rmObjects = append(rmObjects, deleteObject{Key: obj}) - } - xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: rmObjects, Quiet: true}) - return xmlBytes -} - -// processRemoveMultiObjectsResponse - parse the remove multi objects web service -// and return the success/failure result status for each object -func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh chan<- RemoveObjectError) { - // Parse multi delete XML response - rmResult := &deleteMultiObjectsResult{} - err := xmlDecoder(body, rmResult) - if err != nil { - errorCh <- RemoveObjectError{ObjectName: "", Err: err} - return - } - - // Fill deletion that returned an error. - for _, obj := range rmResult.UnDeletedObjects { - errorCh <- RemoveObjectError{ - ObjectName: obj.Key, - Err: ErrorResponse{ - Code: obj.Code, - Message: obj.Message, - }, - } - } -} - -// RemoveObjectsWithContext - Identical to RemoveObjects call, but accepts context to facilitate request cancellation. -func (c Client) RemoveObjectsWithContext(ctx context.Context, bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { - errorCh := make(chan RemoveObjectError, 1) - - // Validate if bucket name is valid. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - defer close(errorCh) - errorCh <- RemoveObjectError{ - Err: err, - } - return errorCh - } - // Validate objects channel to be properly allocated. - if objectsCh == nil { - defer close(errorCh) - errorCh <- RemoveObjectError{ - Err: ErrInvalidArgument("Objects channel cannot be nil"), - } - return errorCh - } - - // Generate and call MultiDelete S3 requests based on entries received from objectsCh - go func(errorCh chan<- RemoveObjectError) { - maxEntries := 1000 - finish := false - urlValues := make(url.Values) - urlValues.Set("delete", "") - - // Close error channel when Multi delete finishes. - defer close(errorCh) - - // Loop over entries by 1000 and call MultiDelete requests - for { - if finish { - break - } - count := 0 - var batch []string - - // Try to gather 1000 entries - for object := range objectsCh { - batch = append(batch, object) - if count++; count >= maxEntries { - break - } - } - if count == 0 { - // Multi Objects Delete API doesn't accept empty object list, quit immediately - break - } - if count < maxEntries { - // We didn't have 1000 entries, so this is the last batch - finish = true - } - - // Generate remove multi objects XML request - removeBytes := generateRemoveMultiObjectsRequest(batch) - // Execute GET on bucket to list objects. - resp, err := c.executeMethod(ctx, "POST", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - contentBody: bytes.NewReader(removeBytes), - contentLength: int64(len(removeBytes)), - contentMD5Base64: sumMD5Base64(removeBytes), - contentSHA256Hex: sum256Hex(removeBytes), - }) - if resp != nil { - if resp.StatusCode != http.StatusOK { - e := httpRespToErrorResponse(resp, bucketName, "") - errorCh <- RemoveObjectError{ObjectName: "", Err: e} - } - } - if err != nil { - for _, b := range batch { - errorCh <- RemoveObjectError{ObjectName: b, Err: err} - } - continue - } - - // Process multiobjects remove xml response - processRemoveMultiObjectsResponse(resp.Body, batch, errorCh) - - closeResponse(resp) - } - }(errorCh) - return errorCh -} - -// RemoveObjects removes multiple objects from a bucket. -// The list of objects to remove are received from objectsCh. -// Remove failures are sent back via error channel. -func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { - return c.RemoveObjectsWithContext(context.Background(), bucketName, objectsCh) -} - -// RemoveIncompleteUpload aborts an partially uploaded object. -func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - // Find multipart upload ids of the object to be aborted. - uploadIDs, err := c.findUploadIDs(bucketName, objectName) - if err != nil { - return err - } - - for _, uploadID := range uploadIDs { - // abort incomplete multipart upload, based on the upload id passed. - err := c.abortMultipartUpload(context.Background(), bucketName, objectName, uploadID) - if err != nil { - return err - } - } - - return nil -} - -// abortMultipartUpload aborts a multipart upload for the given -// uploadID, all previously uploaded parts are deleted. -func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return err - } - - // Initialize url queries. - urlValues := make(url.Values) - urlValues.Set("uploadId", uploadID) - - // Execute DELETE on multipart upload. - resp, err := c.executeMethod(ctx, "DELETE", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - return err - } - if resp != nil { - if resp.StatusCode != http.StatusNoContent { - // Abort has no response body, handle it for any errors. - var errorResponse ErrorResponse - switch resp.StatusCode { - case http.StatusNotFound: - // This is needed specifically for abort and it cannot - // be converged into default case. - errorResponse = ErrorResponse{ - Code: "NoSuchUpload", - Message: "The specified multipart upload does not exist.", - BucketName: bucketName, - Key: objectName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - Region: resp.Header.Get("x-amz-bucket-region"), - } - default: - return httpRespToErrorResponse(resp, bucketName, objectName) - } - return errorResponse - } - } - return nil -} diff --git a/vendor/github.com/minio/minio-go/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/api-s3-datatypes.go deleted file mode 100644 index 8d8880c05..000000000 --- a/vendor/github.com/minio/minio-go/api-s3-datatypes.go +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "encoding/xml" - "time" -) - -// listAllMyBucketsResult container for listBuckets response. -type listAllMyBucketsResult struct { - // Container for one or more buckets. - Buckets struct { - Bucket []BucketInfo - } - Owner owner -} - -// owner container for bucket owner information. -type owner struct { - DisplayName string - ID string -} - -// CommonPrefix container for prefix response. -type CommonPrefix struct { - Prefix string -} - -// ListBucketV2Result container for listObjects response version 2. -type ListBucketV2Result struct { - // A response can contain CommonPrefixes only if you have - // specified a delimiter. - CommonPrefixes []CommonPrefix - // Metadata about each object returned. - Contents []ObjectInfo - Delimiter string - - // Encoding type used to encode object keys in the response. - EncodingType string - - // A flag that indicates whether or not ListObjects returned all of the results - // that satisfied the search criteria. - IsTruncated bool - MaxKeys int64 - Name string - - // Hold the token that will be sent in the next request to fetch the next group of keys - NextContinuationToken string - - ContinuationToken string - Prefix string - - // FetchOwner and StartAfter are currently not used - FetchOwner string - StartAfter string -} - -// ListBucketResult container for listObjects response. -type ListBucketResult struct { - // A response can contain CommonPrefixes only if you have - // specified a delimiter. - CommonPrefixes []CommonPrefix - // Metadata about each object returned. - Contents []ObjectInfo - Delimiter string - - // Encoding type used to encode object keys in the response. - EncodingType string - - // A flag that indicates whether or not ListObjects returned all of the results - // that satisfied the search criteria. - IsTruncated bool - Marker string - MaxKeys int64 - Name string - - // When response is truncated (the IsTruncated element value in - // the response is true), you can use the key name in this field - // as marker in the subsequent request to get next set of objects. - // Object storage lists objects in alphabetical order Note: This - // element is returned only if you have delimiter request - // parameter specified. If response does not include the NextMaker - // and it is truncated, you can use the value of the last Key in - // the response as the marker in the subsequent request to get the - // next set of object keys. - NextMarker string - Prefix string -} - -// ListMultipartUploadsResult container for ListMultipartUploads response -type ListMultipartUploadsResult struct { - Bucket string - KeyMarker string - UploadIDMarker string `xml:"UploadIdMarker"` - NextKeyMarker string - NextUploadIDMarker string `xml:"NextUploadIdMarker"` - EncodingType string - MaxUploads int64 - IsTruncated bool - Uploads []ObjectMultipartInfo `xml:"Upload"` - Prefix string - Delimiter string - // A response can contain CommonPrefixes only if you specify a delimiter. - CommonPrefixes []CommonPrefix -} - -// initiator container for who initiated multipart upload. -type initiator struct { - ID string - DisplayName string -} - -// copyObjectResult container for copy object response. -type copyObjectResult struct { - ETag string - LastModified time.Time // time string format "2006-01-02T15:04:05.000Z" -} - -// ObjectPart container for particular part of an object. -type ObjectPart struct { - // Part number identifies the part. - PartNumber int - - // Date and time the part was uploaded. - LastModified time.Time - - // Entity tag returned when the part was uploaded, usually md5sum - // of the part. - ETag string - - // Size of the uploaded part data. - Size int64 -} - -// ListObjectPartsResult container for ListObjectParts response. -type ListObjectPartsResult struct { - Bucket string - Key string - UploadID string `xml:"UploadId"` - - Initiator initiator - Owner owner - - StorageClass string - PartNumberMarker int - NextPartNumberMarker int - MaxParts int - - // Indicates whether the returned list of parts is truncated. - IsTruncated bool - ObjectParts []ObjectPart `xml:"Part"` - - EncodingType string -} - -// initiateMultipartUploadResult container for InitiateMultiPartUpload -// response. -type initiateMultipartUploadResult struct { - Bucket string - Key string - UploadID string `xml:"UploadId"` -} - -// completeMultipartUploadResult container for completed multipart -// upload response. -type completeMultipartUploadResult struct { - Location string - Bucket string - Key string - ETag string -} - -// CompletePart sub container lists individual part numbers and their -// md5sum, part of completeMultipartUpload. -type CompletePart struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"` - - // Part number identifies the part. - PartNumber int - ETag string -} - -// completeMultipartUpload container for completing multipart upload. -type completeMultipartUpload struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` - Parts []CompletePart `xml:"Part"` -} - -// createBucketConfiguration container for bucket configuration. -type createBucketConfiguration struct { - XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"` - Location string `xml:"LocationConstraint"` -} - -// deleteObject container for Delete element in MultiObjects Delete XML request -type deleteObject struct { - Key string - VersionID string `xml:"VersionId,omitempty"` -} - -// deletedObject container for Deleted element in MultiObjects Delete XML response -type deletedObject struct { - Key string - VersionID string `xml:"VersionId,omitempty"` - // These fields are ignored. - DeleteMarker bool - DeleteMarkerVersionID string -} - -// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response -type nonDeletedObject struct { - Key string - Code string - Message string -} - -// deletedMultiObjects container for MultiObjects Delete XML request -type deleteMultiObjects struct { - XMLName xml.Name `xml:"Delete"` - Quiet bool - Objects []deleteObject `xml:"Object"` -} - -// deletedMultiObjectsResult container for MultiObjects Delete XML response -type deleteMultiObjectsResult struct { - XMLName xml.Name `xml:"DeleteResult"` - DeletedObjects []deletedObject `xml:"Deleted"` - UnDeletedObjects []nonDeletedObject `xml:"Error"` -} diff --git a/vendor/github.com/minio/minio-go/api-select.go b/vendor/github.com/minio/minio-go/api-select.go deleted file mode 100644 index 10e1d47d6..000000000 --- a/vendor/github.com/minio/minio-go/api-select.go +++ /dev/null @@ -1,532 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * (C) 2018 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "encoding/binary" - "encoding/xml" - "errors" - "fmt" - "hash" - "hash/crc32" - "io" - "net/http" - "net/url" - "strings" - - "github.com/minio/minio-go/pkg/encrypt" - "github.com/minio/minio-go/pkg/s3utils" -) - -// CSVFileHeaderInfo - is the parameter for whether to utilize headers. -type CSVFileHeaderInfo string - -// Constants for file header info. -const ( - CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE" - CSVFileHeaderInfoIgnore = "IGNORE" - CSVFileHeaderInfoUse = "USE" -) - -// SelectCompressionType - is the parameter for what type of compression is -// present -type SelectCompressionType string - -// Constants for compression types under select API. -const ( - SelectCompressionNONE SelectCompressionType = "NONE" - SelectCompressionGZIP = "GZIP" - SelectCompressionBZIP = "BZIP2" -) - -// CSVQuoteFields - is the parameter for how CSV fields are quoted. -type CSVQuoteFields string - -// Constants for csv quote styles. -const ( - CSVQuoteFieldsAlways CSVQuoteFields = "Always" - CSVQuoteFieldsAsNeeded = "AsNeeded" -) - -// QueryExpressionType - is of what syntax the expression is, this should only -// be SQL -type QueryExpressionType string - -// Constants for expression type. -const ( - QueryExpressionTypeSQL QueryExpressionType = "SQL" -) - -// JSONType determines json input serialization type. -type JSONType string - -// Constants for JSONTypes. -const ( - JSONDocumentType JSONType = "DOCUMENT" - JSONLinesType = "LINES" -) - -// ParquetInputOptions parquet input specific options -type ParquetInputOptions struct{} - -// CSVInputOptions csv input specific options -type CSVInputOptions struct { - FileHeaderInfo CSVFileHeaderInfo - RecordDelimiter string - FieldDelimiter string - QuoteCharacter string - QuoteEscapeCharacter string - Comments string -} - -// CSVOutputOptions csv output specific options -type CSVOutputOptions struct { - QuoteFields CSVQuoteFields - RecordDelimiter string - FieldDelimiter string - QuoteCharacter string - QuoteEscapeCharacter string -} - -// JSONInputOptions json input specific options -type JSONInputOptions struct { - Type JSONType -} - -// JSONOutputOptions - json output specific options -type JSONOutputOptions struct { - RecordDelimiter string -} - -// SelectObjectInputSerialization - input serialization parameters -type SelectObjectInputSerialization struct { - CompressionType SelectCompressionType - Parquet *ParquetInputOptions `xml:"Parquet,omitempty"` - CSV *CSVInputOptions `xml:"CSV,omitempty"` - JSON *JSONInputOptions `xml:"JSON,omitempty"` -} - -// SelectObjectOutputSerialization - output serialization parameters. -type SelectObjectOutputSerialization struct { - CSV *CSVOutputOptions `xml:"CSV,omitempty"` - JSON *JSONOutputOptions `xml:"JSON,omitempty"` -} - -// SelectObjectOptions - represents the input select body -type SelectObjectOptions struct { - XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"` - ServerSideEncryption encrypt.ServerSide `xml:"-"` - Expression string - ExpressionType QueryExpressionType - InputSerialization SelectObjectInputSerialization - OutputSerialization SelectObjectOutputSerialization - RequestProgress struct { - Enabled bool - } -} - -// Header returns the http.Header representation of the SelectObject options. -func (o SelectObjectOptions) Header() http.Header { - headers := make(http.Header) - if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { - o.ServerSideEncryption.Marshal(headers) - } - return headers -} - -// SelectObjectType - is the parameter which defines what type of object the -// operation is being performed on. -type SelectObjectType string - -// Constants for input data types. -const ( - SelectObjectTypeCSV SelectObjectType = "CSV" - SelectObjectTypeJSON = "JSON" - SelectObjectTypeParquet = "Parquet" -) - -// preludeInfo is used for keeping track of necessary information from the -// prelude. -type preludeInfo struct { - totalLen uint32 - headerLen uint32 -} - -// SelectResults is used for the streaming responses from the server. -type SelectResults struct { - pipeReader *io.PipeReader - resp *http.Response - stats *StatsMessage - progress *ProgressMessage -} - -// ProgressMessage is a struct for progress xml message. -type ProgressMessage struct { - XMLName xml.Name `xml:"Progress" json:"-"` - StatsMessage -} - -// StatsMessage is a struct for stat xml message. -type StatsMessage struct { - XMLName xml.Name `xml:"Stats" json:"-"` - BytesScanned int64 - BytesProcessed int64 - BytesReturned int64 -} - -// messageType represents the type of message. -type messageType string - -const ( - errorMsg messageType = "error" - commonMsg = "event" -) - -// eventType represents the type of event. -type eventType string - -// list of event-types returned by Select API. -const ( - endEvent eventType = "End" - recordsEvent = "Records" - progressEvent = "Progress" - statsEvent = "Stats" -) - -// contentType represents content type of event. -type contentType string - -const ( - xmlContent contentType = "text/xml" -) - -// SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API. -func (c Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return nil, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return nil, err - } - - selectReqBytes, err := xml.Marshal(opts) - if err != nil { - return nil, err - } - - urlValues := make(url.Values) - urlValues.Set("select", "") - urlValues.Set("select-type", "2") - - // Execute POST on bucket/object. - resp, err := c.executeMethod(ctx, "POST", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, - customHeader: opts.Header(), - contentMD5Base64: sumMD5Base64(selectReqBytes), - contentSHA256Hex: sum256Hex(selectReqBytes), - contentBody: bytes.NewReader(selectReqBytes), - contentLength: int64(len(selectReqBytes)), - }) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp, bucketName, "") - } - - pipeReader, pipeWriter := io.Pipe() - streamer := &SelectResults{ - resp: resp, - stats: &StatsMessage{}, - progress: &ProgressMessage{}, - pipeReader: pipeReader, - } - streamer.start(pipeWriter) - return streamer, nil -} - -// Close - closes the underlying response body and the stream reader. -func (s *SelectResults) Close() error { - defer closeResponse(s.resp) - return s.pipeReader.Close() -} - -// Read - is a reader compatible implementation for SelectObjectContent records. -func (s *SelectResults) Read(b []byte) (n int, err error) { - return s.pipeReader.Read(b) -} - -// Stats - information about a request's stats when processing is complete. -func (s *SelectResults) Stats() *StatsMessage { - return s.stats -} - -// Progress - information about the progress of a request. -func (s *SelectResults) Progress() *ProgressMessage { - return s.progress -} - -// start is the main function that decodes the large byte array into -// several events that are sent through the eventstream. -func (s *SelectResults) start(pipeWriter *io.PipeWriter) { - go func() { - for { - var prelude preludeInfo - var headers = make(http.Header) - var err error - - // Create CRC code - crc := crc32.New(crc32.IEEETable) - crcReader := io.TeeReader(s.resp.Body, crc) - - // Extract the prelude(12 bytes) into a struct to extract relevant information. - prelude, err = processPrelude(crcReader, crc) - if err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - - // Extract the headers(variable bytes) into a struct to extract relevant information - if prelude.headerLen > 0 { - if err = extractHeader(io.LimitReader(crcReader, int64(prelude.headerLen)), headers); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - } - - // Get the actual payload length so that the appropriate amount of - // bytes can be read or parsed. - payloadLen := prelude.PayloadLen() - - m := messageType(headers.Get("message-type")) - - switch m { - case errorMsg: - pipeWriter.CloseWithError(errors.New("Error Type of " + headers.Get("error-type") + " " + headers.Get("error-message"))) - closeResponse(s.resp) - return - case commonMsg: - // Get content-type of the payload. - c := contentType(headers.Get("content-type")) - - // Get event type of the payload. - e := eventType(headers.Get("event-type")) - - // Handle all supported events. - switch e { - case endEvent: - pipeWriter.Close() - closeResponse(s.resp) - return - case recordsEvent: - if _, err = io.Copy(pipeWriter, io.LimitReader(crcReader, payloadLen)); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - case progressEvent: - switch c { - case xmlContent: - if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.progress); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - default: - pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, progressEvent)) - closeResponse(s.resp) - return - } - case statsEvent: - switch c { - case xmlContent: - if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.stats); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - default: - pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, statsEvent)) - closeResponse(s.resp) - return - } - } - } - - // Ensures that the full message's CRC is correct and - // that the message is not corrupted - if err := checkCRC(s.resp.Body, crc.Sum32()); err != nil { - pipeWriter.CloseWithError(err) - closeResponse(s.resp) - return - } - - } - }() -} - -// PayloadLen is a function that calculates the length of the payload. -func (p preludeInfo) PayloadLen() int64 { - return int64(p.totalLen - p.headerLen - 16) -} - -// processPrelude is the function that reads the 12 bytes of the prelude and -// ensures the CRC is correct while also extracting relevant information into -// the struct, -func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) { - var err error - var pInfo = preludeInfo{} - - // reads total length of the message (first 4 bytes) - pInfo.totalLen, err = extractUint32(prelude) - if err != nil { - return pInfo, err - } - - // reads total header length of the message (2nd 4 bytes) - pInfo.headerLen, err = extractUint32(prelude) - if err != nil { - return pInfo, err - } - - // checks that the CRC is correct (3rd 4 bytes) - preCRC := crc.Sum32() - if err := checkCRC(prelude, preCRC); err != nil { - return pInfo, err - } - - return pInfo, nil -} - -// extracts the relevant information from the Headers. -func extractHeader(body io.Reader, myHeaders http.Header) error { - for { - // extracts the first part of the header, - headerTypeName, err := extractHeaderType(body) - if err != nil { - // Since end of file, we have read all of our headers - if err == io.EOF { - break - } - return err - } - - // reads the 7 present in the header and ignores it. - extractUint8(body) - - headerValueName, err := extractHeaderValue(body) - if err != nil { - return err - } - - myHeaders.Set(headerTypeName, headerValueName) - - } - return nil -} - -// extractHeaderType extracts the first half of the header message, the header type. -func extractHeaderType(body io.Reader) (string, error) { - // extracts 2 bit integer - headerNameLen, err := extractUint8(body) - if err != nil { - return "", err - } - // extracts the string with the appropriate number of bytes - headerName, err := extractString(body, int(headerNameLen)) - if err != nil { - return "", err - } - return strings.TrimPrefix(headerName, ":"), nil -} - -// extractsHeaderValue extracts the second half of the header message, the -// header value -func extractHeaderValue(body io.Reader) (string, error) { - bodyLen, err := extractUint16(body) - if err != nil { - return "", err - } - bodyName, err := extractString(body, int(bodyLen)) - if err != nil { - return "", err - } - return bodyName, nil -} - -// extracts a string from byte array of a particular number of bytes. -func extractString(source io.Reader, lenBytes int) (string, error) { - myVal := make([]byte, lenBytes) - _, err := source.Read(myVal) - if err != nil { - return "", err - } - return string(myVal), nil -} - -// extractUint32 extracts a 4 byte integer from the byte array. -func extractUint32(r io.Reader) (uint32, error) { - buf := make([]byte, 4) - _, err := io.ReadFull(r, buf) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint32(buf), nil -} - -// extractUint16 extracts a 2 byte integer from the byte array. -func extractUint16(r io.Reader) (uint16, error) { - buf := make([]byte, 2) - _, err := io.ReadFull(r, buf) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint16(buf), nil -} - -// extractUint8 extracts a 1 byte integer from the byte array. -func extractUint8(r io.Reader) (uint8, error) { - buf := make([]byte, 1) - _, err := io.ReadFull(r, buf) - if err != nil { - return 0, err - } - return buf[0], nil -} - -// checkCRC ensures that the CRC matches with the one from the reader. -func checkCRC(r io.Reader, expect uint32) error { - msgCRC, err := extractUint32(r) - if err != nil { - return err - } - - if msgCRC != expect { - return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect) - - } - return nil -} diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go deleted file mode 100644 index 91e9d3964..000000000 --- a/vendor/github.com/minio/minio-go/api-stat.go +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "net/http" - "strconv" - "strings" - "time" - - "github.com/minio/minio-go/pkg/s3utils" -) - -// BucketExists verify if bucket exists and you have permission to access it. -func (c Client) BucketExists(bucketName string) (bool, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return false, err - } - - // Execute HEAD on bucketName. - resp, err := c.executeMethod(context.Background(), "HEAD", requestMetadata{ - bucketName: bucketName, - contentSHA256Hex: emptySHA256Hex, - }) - defer closeResponse(resp) - if err != nil { - if ToErrorResponse(err).Code == "NoSuchBucket" { - return false, nil - } - return false, err - } - if resp != nil { - resperr := httpRespToErrorResponse(resp, bucketName, "") - if ToErrorResponse(resperr).Code == "NoSuchBucket" { - return false, nil - } - if resp.StatusCode != http.StatusOK { - return false, httpRespToErrorResponse(resp, bucketName, "") - } - } - return true, nil -} - -// List of header keys to be filtered, usually -// from all S3 API http responses. -var defaultFilterKeys = []string{ - "Connection", - "Transfer-Encoding", - "Accept-Ranges", - "Date", - "Server", - "Vary", - "x-amz-bucket-region", - "x-amz-request-id", - "x-amz-id-2", - "Content-Security-Policy", - "X-Xss-Protection", - - // Add new headers to be ignored. -} - -// Extract only necessary metadata header key/values by -// filtering them out with a list of custom header keys. -func extractObjMetadata(header http.Header) http.Header { - filterKeys := append([]string{ - "ETag", - "Content-Length", - "Last-Modified", - "Content-Type", - }, defaultFilterKeys...) - return filterHeader(header, filterKeys) -} - -// StatObject verifies if object exists and you have permission to access. -func (c Client) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ObjectInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return ObjectInfo{}, err - } - return c.statObject(context.Background(), bucketName, objectName, opts) -} - -// Lower level API for statObject supporting pre-conditions and range headers. -func (c Client) statObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { - // Input validation. - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return ObjectInfo{}, err - } - if err := s3utils.CheckValidObjectName(objectName); err != nil { - return ObjectInfo{}, err - } - - // Execute HEAD on objectName. - resp, err := c.executeMethod(ctx, "HEAD", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - contentSHA256Hex: emptySHA256Hex, - customHeader: opts.Header(), - }) - defer closeResponse(resp) - if err != nil { - return ObjectInfo{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { - return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - // Trim off the odd double quotes from ETag in the beginning and end. - md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") - md5sum = strings.TrimSuffix(md5sum, "\"") - - // Parse content length is exists - var size int64 = -1 - contentLengthStr := resp.Header.Get("Content-Length") - if contentLengthStr != "" { - size, err = strconv.ParseInt(contentLengthStr, 10, 64) - if err != nil { - // Content-Length is not valid - return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", - Message: "Content-Length is invalid. " + reportIssue, - BucketName: bucketName, - Key: objectName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - Region: resp.Header.Get("x-amz-bucket-region"), - } - } - } - - // Parse Last-Modified has http time format. - date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) - if err != nil { - return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", - Message: "Last-Modified time format is invalid. " + reportIssue, - BucketName: bucketName, - Key: objectName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - Region: resp.Header.Get("x-amz-bucket-region"), - } - } - - // Fetch content type if any present. - contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) - if contentType == "" { - contentType = "application/octet-stream" - } - - // Save object metadata info. - return ObjectInfo{ - ETag: md5sum, - Key: objectName, - Size: size, - LastModified: date, - ContentType: contentType, - // Extract only the relevant header keys describing the object. - // following function filters out a list of standard set of keys - // which are not part of object metadata. - Metadata: extractObjMetadata(resp.Header), - }, nil -} diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go deleted file mode 100644 index f1c54909f..000000000 --- a/vendor/github.com/minio/minio-go/api.go +++ /dev/null @@ -1,898 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2018 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "bytes" - "context" - "crypto/md5" - "crypto/sha256" - "errors" - "fmt" - "hash" - "io" - "io/ioutil" - "math/rand" - "net" - "net/http" - "net/http/cookiejar" - "net/http/httputil" - "net/url" - "os" - "runtime" - "strings" - "sync" - "time" - - "golang.org/x/net/publicsuffix" - - "github.com/minio/minio-go/pkg/credentials" - "github.com/minio/minio-go/pkg/s3signer" - "github.com/minio/minio-go/pkg/s3utils" -) - -// Client implements Amazon S3 compatible methods. -type Client struct { - /// Standard options. - - // Parsed endpoint url provided by the user. - endpointURL *url.URL - - // Holds various credential providers. - credsProvider *credentials.Credentials - - // Custom signerType value overrides all credentials. - overrideSignerType credentials.SignatureType - - // User supplied. - appInfo struct { - appName string - appVersion string - } - - // Indicate whether we are using https or not - secure bool - - // Needs allocation. - httpClient *http.Client - bucketLocCache *bucketLocationCache - - // Advanced functionality. - isTraceEnabled bool - traceOutput io.Writer - - // S3 specific accelerated endpoint. - s3AccelerateEndpoint string - - // Region endpoint - region string - - // Random seed. - random *rand.Rand - - // lookup indicates type of url lookup supported by server. If not specified, - // default to Auto. - lookup BucketLookupType -} - -// Options for New method -type Options struct { - Creds *credentials.Credentials - Secure bool - Region string - BucketLookup BucketLookupType - // Add future fields here -} - -// Global constants. -const ( - libraryName = "minio-go" - libraryVersion = "v6.0.14" -) - -// User Agent should always following the below style. -// Please open an issue to discuss any new changes here. -// -// Minio (OS; ARCH) LIB/VER APP/VER -const ( - libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") " - libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion -) - -// BucketLookupType is type of url lookup supported by server. -type BucketLookupType int - -// Different types of url lookup supported by the server.Initialized to BucketLookupAuto -const ( - BucketLookupAuto BucketLookupType = iota - BucketLookupDNS - BucketLookupPath -) - -// NewV2 - instantiate minio client with Amazon S3 signature version -// '2' compatibility. -func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { - creds := credentials.NewStaticV2(accessKeyID, secretAccessKey, "") - clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) - if err != nil { - return nil, err - } - clnt.overrideSignerType = credentials.SignatureV2 - return clnt, nil -} - -// NewV4 - instantiate minio client with Amazon S3 signature version -// '4' compatibility. -func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { - creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") - clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) - if err != nil { - return nil, err - } - clnt.overrideSignerType = credentials.SignatureV4 - return clnt, nil -} - -// New - instantiate minio client, adds automatic verification of signature. -func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { - creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") - clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) - if err != nil { - return nil, err - } - // Google cloud storage should be set to signature V2, force it if not. - if s3utils.IsGoogleEndpoint(*clnt.endpointURL) { - clnt.overrideSignerType = credentials.SignatureV2 - } - // If Amazon S3 set to signature v4. - if s3utils.IsAmazonEndpoint(*clnt.endpointURL) { - clnt.overrideSignerType = credentials.SignatureV4 - } - return clnt, nil -} - -// NewWithCredentials - instantiate minio client with credentials provider -// for retrieving credentials from various credentials provider such as -// IAM, File, Env etc. -func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) { - return privateNew(endpoint, creds, secure, region, BucketLookupAuto) -} - -// NewWithRegion - instantiate minio client, with region configured. Unlike New(), -// NewWithRegion avoids bucket-location lookup operations and it is slightly faster. -// Use this function when if your application deals with single region. -func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) { - creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") - return privateNew(endpoint, creds, secure, region, BucketLookupAuto) -} - -// NewWithOptions - instantiate minio client with options -func NewWithOptions(endpoint string, opts *Options) (*Client, error) { - return privateNew(endpoint, opts.Creds, opts.Secure, opts.Region, opts.BucketLookup) -} - -// lockedRandSource provides protected rand source, implements rand.Source interface. -type lockedRandSource struct { - lk sync.Mutex - src rand.Source -} - -// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. -func (r *lockedRandSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -// Seed uses the provided seed value to initialize the generator to a -// deterministic state. -func (r *lockedRandSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} - -// Redirect requests by re signing the request. -func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error { - if len(via) >= 5 { - return errors.New("stopped after 5 redirects") - } - if len(via) == 0 { - return nil - } - lastRequest := via[len(via)-1] - var reAuth bool - for attr, val := range lastRequest.Header { - // if hosts do not match do not copy Authorization header - if attr == "Authorization" && req.Host != lastRequest.Host { - reAuth = true - continue - } - if _, ok := req.Header[attr]; !ok { - req.Header[attr] = val - } - } - - *c.endpointURL = *req.URL - - value, err := c.credsProvider.Get() - if err != nil { - return err - } - var ( - signerType = value.SignerType - accessKeyID = value.AccessKeyID - secretAccessKey = value.SecretAccessKey - sessionToken = value.SessionToken - region = c.region - ) - - // Custom signer set then override the behavior. - if c.overrideSignerType != credentials.SignatureDefault { - signerType = c.overrideSignerType - } - - // If signerType returned by credentials helper is anonymous, - // then do not sign regardless of signerType override. - if value.SignerType == credentials.SignatureAnonymous { - signerType = credentials.SignatureAnonymous - } - - if reAuth { - // Check if there is no region override, if not get it from the URL if possible. - if region == "" { - region = s3utils.GetRegionFromURL(*c.endpointURL) - } - switch { - case signerType.IsV2(): - return errors.New("signature V2 cannot support redirection") - case signerType.IsV4(): - req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region)) - } - } - return nil -} - -func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string, lookup BucketLookupType) (*Client, error) { - // construct endpoint. - endpointURL, err := getEndpointURL(endpoint, secure) - if err != nil { - return nil, err - } - - // Initialize cookies to preserve server sent cookies if any and replay - // them upon each request. - jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) - if err != nil { - return nil, err - } - - // instantiate new Client. - clnt := new(Client) - - // Save the credentials. - clnt.credsProvider = creds - - // Remember whether we are using https or not - clnt.secure = secure - - // Save endpoint URL, user agent for future uses. - clnt.endpointURL = endpointURL - - // Instantiate http client and bucket location cache. - clnt.httpClient = &http.Client{ - Jar: jar, - Transport: DefaultTransport, - CheckRedirect: clnt.redirectHeaders, - } - - // Sets custom region, if region is empty bucket location cache is used automatically. - if region == "" { - region = s3utils.GetRegionFromURL(*clnt.endpointURL) - } - clnt.region = region - - // Instantiate bucket location cache. - clnt.bucketLocCache = newBucketLocationCache() - - // Introduce a new locked random seed. - clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}) - - // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined - // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. - clnt.lookup = lookup - // Return. - return clnt, nil -} - -// SetAppInfo - add application details to user agent. -func (c *Client) SetAppInfo(appName string, appVersion string) { - // if app name and version not set, we do not set a new user agent. - if appName != "" && appVersion != "" { - c.appInfo = struct { - appName string - appVersion string - }{} - c.appInfo.appName = appName - c.appInfo.appVersion = appVersion - } -} - -// SetCustomTransport - set new custom transport. -func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) { - // Set this to override default transport - // ``http.DefaultTransport``. - // - // This transport is usually needed for debugging OR to add your - // own custom TLS certificates on the client transport, for custom - // CA's and certs which are not part of standard certificate - // authority follow this example :- - // - // tr := &http.Transport{ - // TLSClientConfig: &tls.Config{RootCAs: pool}, - // DisableCompression: true, - // } - // api.SetCustomTransport(tr) - // - if c.httpClient != nil { - c.httpClient.Transport = customHTTPTransport - } -} - -// TraceOn - enable HTTP tracing. -func (c *Client) TraceOn(outputStream io.Writer) { - // if outputStream is nil then default to os.Stdout. - if outputStream == nil { - outputStream = os.Stdout - } - // Sets a new output stream. - c.traceOutput = outputStream - - // Enable tracing. - c.isTraceEnabled = true -} - -// TraceOff - disable HTTP tracing. -func (c *Client) TraceOff() { - // Disable tracing. - c.isTraceEnabled = false -} - -// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your -// requests. This feature is only specific to S3 for all other endpoints this -// function does nothing. To read further details on s3 transfer acceleration -// please vist - -// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html -func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { - if s3utils.IsAmazonEndpoint(*c.endpointURL) { - c.s3AccelerateEndpoint = accelerateEndpoint - } -} - -// Hash materials provides relevant initialized hash algo writers -// based on the expected signature type. -// -// - For signature v4 request if the connection is insecure compute only sha256. -// - For signature v4 request if the connection is secure compute only md5. -// - For anonymous request compute md5. -func (c *Client) hashMaterials() (hashAlgos map[string]hash.Hash, hashSums map[string][]byte) { - hashSums = make(map[string][]byte) - hashAlgos = make(map[string]hash.Hash) - if c.overrideSignerType.IsV4() { - if c.secure { - hashAlgos["md5"] = md5.New() - } else { - hashAlgos["sha256"] = sha256.New() - } - } else { - if c.overrideSignerType.IsAnonymous() { - hashAlgos["md5"] = md5.New() - } - } - return hashAlgos, hashSums -} - -// requestMetadata - is container for all the values to make a request. -type requestMetadata struct { - // If set newRequest presigns the URL. - presignURL bool - - // User supplied. - bucketName string - objectName string - queryValues url.Values - customHeader http.Header - expires int64 - - // Generated by our internal code. - bucketLocation string - contentBody io.Reader - contentLength int64 - contentMD5Base64 string // carries base64 encoded md5sum - contentSHA256Hex string // carries hex encoded sha256sum -} - -// dumpHTTP - dump HTTP request and response. -func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { - // Starts http dump. - _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") - if err != nil { - return err - } - - // Filter out Signature field from Authorization header. - origAuth := req.Header.Get("Authorization") - if origAuth != "" { - req.Header.Set("Authorization", redactSignature(origAuth)) - } - - // Only display request header. - reqTrace, err := httputil.DumpRequestOut(req, false) - if err != nil { - return err - } - - // Write request to trace output. - _, err = fmt.Fprint(c.traceOutput, string(reqTrace)) - if err != nil { - return err - } - - // Only display response header. - var respTrace []byte - - // For errors we make sure to dump response body as well. - if resp.StatusCode != http.StatusOK && - resp.StatusCode != http.StatusPartialContent && - resp.StatusCode != http.StatusNoContent { - respTrace, err = httputil.DumpResponse(resp, true) - if err != nil { - return err - } - } else { - respTrace, err = httputil.DumpResponse(resp, false) - if err != nil { - return err - } - } - - // Write response to trace output. - _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) - if err != nil { - return err - } - - // Ends the http dump. - _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------") - if err != nil { - return err - } - - // Returns success. - return nil -} - -// do - execute http request. -func (c Client) do(req *http.Request) (*http.Response, error) { - resp, err := c.httpClient.Do(req) - if err != nil { - // Handle this specifically for now until future Golang versions fix this issue properly. - if urlErr, ok := err.(*url.Error); ok { - if strings.Contains(urlErr.Err.Error(), "EOF") { - return nil, &url.Error{ - Op: urlErr.Op, - URL: urlErr.URL, - Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."), - } - } - } - return nil, err - } - - // Response cannot be non-nil, report error if thats the case. - if resp == nil { - msg := "Response is empty. " + reportIssue - return nil, ErrInvalidArgument(msg) - } - - // If trace is enabled, dump http request and response. - if c.isTraceEnabled { - err = c.dumpHTTP(req, resp) - if err != nil { - return nil, err - } - } - - return resp, nil -} - -// List of success status. -var successStatus = []int{ - http.StatusOK, - http.StatusNoContent, - http.StatusPartialContent, -} - -// executeMethod - instantiates a given method, and retries the -// request upon any error up to maxRetries attempts in a binomially -// delayed manner using a standard back off algorithm. -func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) { - var isRetryable bool // Indicates if request can be retried. - var bodySeeker io.Seeker // Extracted seeker from io.Reader. - var reqRetry = MaxRetry // Indicates how many times we can retry the request - - if metadata.contentBody != nil { - // Check if body is seekable then it is retryable. - bodySeeker, isRetryable = metadata.contentBody.(io.Seeker) - switch bodySeeker { - case os.Stdin, os.Stdout, os.Stderr: - isRetryable = false - } - // Retry only when reader is seekable - if !isRetryable { - reqRetry = 1 - } - - // Figure out if the body can be closed - if yes - // we will definitely close it upon the function - // return. - bodyCloser, ok := metadata.contentBody.(io.Closer) - if ok { - defer bodyCloser.Close() - } - } - - // Create a done channel to control 'newRetryTimer' go routine. - doneCh := make(chan struct{}, 1) - - // Indicate to our routine to exit cleanly upon return. - defer close(doneCh) - - // Blank indentifier is kept here on purpose since 'range' without - // blank identifiers is only supported since go1.4 - // https://golang.org/doc/go1.4#forrange. - for range c.newRetryTimer(reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) { - // Retry executes the following function body if request has an - // error until maxRetries have been exhausted, retry attempts are - // performed after waiting for a given period of time in a - // binomial fashion. - if isRetryable { - // Seek back to beginning for each attempt. - if _, err = bodySeeker.Seek(0, 0); err != nil { - // If seek failed, no need to retry. - return nil, err - } - } - - // Instantiate a new request. - var req *http.Request - req, err = c.newRequest(method, metadata) - if err != nil { - errResponse := ToErrorResponse(err) - if isS3CodeRetryable(errResponse.Code) { - continue // Retry. - } - return nil, err - } - - // Add context to request - req = req.WithContext(ctx) - - // Initiate the request. - res, err = c.do(req) - if err != nil { - // For supported http requests errors verify. - if isHTTPReqErrorRetryable(err) { - continue // Retry. - } - // For other errors, return here no need to retry. - return nil, err - } - - // For any known successful http status, return quickly. - for _, httpStatus := range successStatus { - if httpStatus == res.StatusCode { - return res, nil - } - } - - // Read the body to be saved later. - errBodyBytes, err := ioutil.ReadAll(res.Body) - // res.Body should be closed - closeResponse(res) - if err != nil { - return nil, err - } - - // Save the body. - errBodySeeker := bytes.NewReader(errBodyBytes) - res.Body = ioutil.NopCloser(errBodySeeker) - - // For errors verify if its retryable otherwise fail quickly. - errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) - - // Save the body back again. - errBodySeeker.Seek(0, 0) // Seek back to starting point. - res.Body = ioutil.NopCloser(errBodySeeker) - - // Bucket region if set in error response and the error - // code dictates invalid region, we can retry the request - // with the new region. - // - // Additionally we should only retry if bucketLocation and custom - // region is empty. - if metadata.bucketLocation == "" && c.region == "" { - if errResponse.Code == "AuthorizationHeaderMalformed" || errResponse.Code == "InvalidRegion" { - if metadata.bucketName != "" && errResponse.Region != "" { - // Gather Cached location only if bucketName is present. - if _, cachedLocationError := c.bucketLocCache.Get(metadata.bucketName); cachedLocationError != false { - c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) - continue // Retry. - } - } - } - } - - // Verify if error response code is retryable. - if isS3CodeRetryable(errResponse.Code) { - continue // Retry. - } - - // Verify if http status code is retryable. - if isHTTPStatusRetryable(res.StatusCode) { - continue // Retry. - } - - // For all other cases break out of the retry loop. - break - } - return res, err -} - -// newRequest - instantiate a new HTTP request for a given method. -func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) { - // If no method is supplied default to 'POST'. - if method == "" { - method = "POST" - } - - location := metadata.bucketLocation - if location == "" { - if metadata.bucketName != "" { - // Gather location only if bucketName is present. - location, err = c.getBucketLocation(metadata.bucketName) - if err != nil { - if ToErrorResponse(err).Code != "AccessDenied" { - return nil, err - } - } - // Upon AccessDenied error on fetching bucket location, default - // to possible locations based on endpoint URL. This can usually - // happen when GetBucketLocation() is disabled using IAM policies. - } - if location == "" { - location = getDefaultLocation(*c.endpointURL, c.region) - } - } - - // Look if target url supports virtual host. - isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) - - // Construct a new target URL. - targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, isVirtualHost, metadata.queryValues) - if err != nil { - return nil, err - } - - // Initialize a new HTTP request for the method. - req, err = http.NewRequest(method, targetURL.String(), nil) - if err != nil { - return nil, err - } - - // Get credentials from the configured credentials provider. - value, err := c.credsProvider.Get() - if err != nil { - return nil, err - } - - var ( - signerType = value.SignerType - accessKeyID = value.AccessKeyID - secretAccessKey = value.SecretAccessKey - sessionToken = value.SessionToken - ) - - // Custom signer set then override the behavior. - if c.overrideSignerType != credentials.SignatureDefault { - signerType = c.overrideSignerType - } - - // If signerType returned by credentials helper is anonymous, - // then do not sign regardless of signerType override. - if value.SignerType == credentials.SignatureAnonymous { - signerType = credentials.SignatureAnonymous - } - - // Generate presign url if needed, return right here. - if metadata.expires != 0 && metadata.presignURL { - if signerType.IsAnonymous() { - return nil, ErrInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.") - } - if signerType.IsV2() { - // Presign URL with signature v2. - req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost) - } else if signerType.IsV4() { - // Presign URL with signature v4. - req = s3signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires) - } - return req, nil - } - - // Set 'User-Agent' header for the request. - c.setUserAgent(req) - - // Set all headers. - for k, v := range metadata.customHeader { - req.Header.Set(k, v[0]) - } - - // Go net/http notoriously closes the request body. - // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors. - // This can cause underlying *os.File seekers to fail, avoid that - // by making sure to wrap the closer as a nop. - if metadata.contentLength == 0 { - req.Body = nil - } else { - req.Body = ioutil.NopCloser(metadata.contentBody) - } - - // Set incoming content-length. - req.ContentLength = metadata.contentLength - if req.ContentLength <= -1 { - // For unknown content length, we upload using transfer-encoding: chunked. - req.TransferEncoding = []string{"chunked"} - } - - // set md5Sum for content protection. - if len(metadata.contentMD5Base64) > 0 { - req.Header.Set("Content-Md5", metadata.contentMD5Base64) - } - - // For anonymous requests just return. - if signerType.IsAnonymous() { - return req, nil - } - - switch { - case signerType.IsV2(): - // Add signature version '2' authorization header. - req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) - case metadata.objectName != "" && method == "PUT" && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure: - // Streaming signature is used by default for a PUT object request. Additionally we also - // look if the initialized client is secure, if yes then we don't need to perform - // streaming signature. - req = s3signer.StreamingSignV4(req, accessKeyID, - secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC()) - default: - // Set sha256 sum for signature calculation only with signature version '4'. - shaHeader := unsignedPayload - if metadata.contentSHA256Hex != "" { - shaHeader = metadata.contentSHA256Hex - } - req.Header.Set("X-Amz-Content-Sha256", shaHeader) - - // Add signature version '4' authorization header. - req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location) - } - - // Return request. - return req, nil -} - -// set User agent. -func (c Client) setUserAgent(req *http.Request) { - req.Header.Set("User-Agent", libraryUserAgent) - if c.appInfo.appName != "" && c.appInfo.appVersion != "" { - req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) - } -} - -// makeTargetURL make a new target url. -func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) { - host := c.endpointURL.Host - // For Amazon S3 endpoint, try to fetch location based endpoint. - if s3utils.IsAmazonEndpoint(*c.endpointURL) { - if c.s3AccelerateEndpoint != "" && bucketName != "" { - // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html - // Disable transfer acceleration for non-compliant bucket names. - if strings.Contains(bucketName, ".") { - return nil, ErrTransferAccelerationBucket(bucketName) - } - // If transfer acceleration is requested set new host. - // For more details about enabling transfer acceleration read here. - // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html - host = c.s3AccelerateEndpoint - } else { - // Do not change the host if the endpoint URL is a FIPS S3 endpoint. - if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) { - // Fetch new host based on the bucket location. - host = getS3Endpoint(bucketLocation) - } - } - } - - // Save scheme. - scheme := c.endpointURL.Scheme - - // Strip port 80 and 443 so we won't send these ports in Host header. - // The reason is that browsers and curl automatically remove :80 and :443 - // with the generated presigned urls, then a signature mismatch error. - if h, p, err := net.SplitHostPort(host); err == nil { - if scheme == "http" && p == "80" || scheme == "https" && p == "443" { - host = h - } - } - - urlStr := scheme + "://" + host + "/" - // Make URL only if bucketName is available, otherwise use the - // endpoint URL. - if bucketName != "" { - // If endpoint supports virtual host style use that always. - // Currently only S3 and Google Cloud Storage would support - // virtual host style. - if isVirtualHostStyle { - urlStr = scheme + "://" + bucketName + "." + host + "/" - if objectName != "" { - urlStr = urlStr + s3utils.EncodePath(objectName) - } - } else { - // If not fall back to using path style. - urlStr = urlStr + bucketName + "/" - if objectName != "" { - urlStr = urlStr + s3utils.EncodePath(objectName) - } - } - } - - // If there are any query values, add them to the end. - if len(queryValues) > 0 { - urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues) - } - - return url.Parse(urlStr) -} - -// returns true if virtual hosted style requests are to be used. -func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool { - if bucketName == "" { - return false - } - - if c.lookup == BucketLookupDNS { - return true - } - if c.lookup == BucketLookupPath { - return false - } - - // default to virtual only for Amazon/Google storage. In all other cases use - // path style requests - return s3utils.IsVirtualHostSupported(url, bucketName) -} diff --git a/vendor/github.com/minio/minio-go/appveyor.yml b/vendor/github.com/minio/minio-go/appveyor.yml deleted file mode 100644 index 48ea6e77d..000000000 --- a/vendor/github.com/minio/minio-go/appveyor.yml +++ /dev/null @@ -1,39 +0,0 @@ -# version format -version: "{build}" - -# Operating system (build VM template) -os: Windows Server 2012 R2 - -clone_folder: c:\gopath\src\github.com\minio\minio-go - -# environment variables -environment: - GOPATH: c:\gopath - GO15VENDOREXPERIMENT: 1 - -# scripts that run after cloning repository -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version - - go env - - go get -u golang.org/x/lint/golint - - go get -u github.com/remyoudompheng/go-misc/deadcode - - go get -u github.com/gordonklaus/ineffassign - - go get -u golang.org/x/crypto/argon2 - - go get -t ./... - -# to run your custom scripts instead of automatic MSBuild -build_script: - - go vet ./... - - gofmt -s -l . - - golint -set_exit_status github.com/minio/minio-go... - - deadcode - - ineffassign . - - go test -short -v - - go test -short -race -v - -# to disable automatic tests -test: off - -# to disable deployment -deploy: off diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go deleted file mode 100644 index cac7ad792..000000000 --- a/vendor/github.com/minio/minio-go/bucket-cache.go +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "net/http" - "net/url" - "path" - "sync" - - "github.com/minio/minio-go/pkg/credentials" - "github.com/minio/minio-go/pkg/s3signer" - "github.com/minio/minio-go/pkg/s3utils" -) - -// bucketLocationCache - Provides simple mechanism to hold bucket -// locations in memory. -type bucketLocationCache struct { - // mutex is used for handling the concurrent - // read/write requests for cache. - sync.RWMutex - - // items holds the cached bucket locations. - items map[string]string -} - -// newBucketLocationCache - Provides a new bucket location cache to be -// used internally with the client object. -func newBucketLocationCache() *bucketLocationCache { - return &bucketLocationCache{ - items: make(map[string]string), - } -} - -// Get - Returns a value of a given key if it exists. -func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) { - r.RLock() - defer r.RUnlock() - location, ok = r.items[bucketName] - return -} - -// Set - Will persist a value into cache. -func (r *bucketLocationCache) Set(bucketName string, location string) { - r.Lock() - defer r.Unlock() - r.items[bucketName] = location -} - -// Delete - Deletes a bucket name from cache. -func (r *bucketLocationCache) Delete(bucketName string) { - r.Lock() - defer r.Unlock() - delete(r.items, bucketName) -} - -// GetBucketLocation - get location for the bucket name from location cache, if not -// fetch freshly by making a new request. -func (c Client) GetBucketLocation(bucketName string) (string, error) { - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - return c.getBucketLocation(bucketName) -} - -// getBucketLocation - Get location for the bucketName from location map cache, if not -// fetch freshly by making a new request. -func (c Client) getBucketLocation(bucketName string) (string, error) { - if err := s3utils.CheckValidBucketName(bucketName); err != nil { - return "", err - } - - // Region set then no need to fetch bucket location. - if c.region != "" { - return c.region, nil - } - - if location, ok := c.bucketLocCache.Get(bucketName); ok { - return location, nil - } - - // Initialize a new request. - req, err := c.getBucketLocationRequest(bucketName) - if err != nil { - return "", err - } - - // Initiate the request. - resp, err := c.do(req) - defer closeResponse(resp) - if err != nil { - return "", err - } - location, err := processBucketLocationResponse(resp, bucketName) - if err != nil { - return "", err - } - c.bucketLocCache.Set(bucketName, location) - return location, nil -} - -// processes the getBucketLocation http response from the server. -func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) { - if resp != nil { - if resp.StatusCode != http.StatusOK { - err = httpRespToErrorResponse(resp, bucketName, "") - errResp := ToErrorResponse(err) - // For access denied error, it could be an anonymous - // request. Move forward and let the top level callers - // succeed if possible based on their policy. - if errResp.Code == "AccessDenied" { - return "us-east-1", nil - } - return "", err - } - } - - // Extract location. - var locationConstraint string - err = xmlDecoder(resp.Body, &locationConstraint) - if err != nil { - return "", err - } - - location := locationConstraint - // Location is empty will be 'us-east-1'. - if location == "" { - location = "us-east-1" - } - - // Location can be 'EU' convert it to meaningful 'eu-west-1'. - if location == "EU" { - location = "eu-west-1" - } - - // Save the location into cache. - - // Return. - return location, nil -} - -// getBucketLocationRequest - Wrapper creates a new getBucketLocation request. -func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) { - // Set location query. - urlValues := make(url.Values) - urlValues.Set("location", "") - - // Set get bucket location always as path style. - targetURL := c.endpointURL - targetURL.Path = path.Join(bucketName, "") + "/" - targetURL.RawQuery = urlValues.Encode() - - // Get a new HTTP request for the method. - req, err := http.NewRequest("GET", targetURL.String(), nil) - if err != nil { - return nil, err - } - - // Set UserAgent for the request. - c.setUserAgent(req) - - // Get credentials from the configured credentials provider. - value, err := c.credsProvider.Get() - if err != nil { - return nil, err - } - - var ( - signerType = value.SignerType - accessKeyID = value.AccessKeyID - secretAccessKey = value.SecretAccessKey - sessionToken = value.SessionToken - ) - - // Custom signer set then override the behavior. - if c.overrideSignerType != credentials.SignatureDefault { - signerType = c.overrideSignerType - } - - // If signerType returned by credentials helper is anonymous, - // then do not sign regardless of signerType override. - if value.SignerType == credentials.SignatureAnonymous { - signerType = credentials.SignatureAnonymous - } - - if signerType.IsAnonymous() { - return req, nil - } - - if signerType.IsV2() { - // Get Bucket Location calls should be always path style - isVirtualHost := false - req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) - return req, nil - } - - // Set sha256 sum for signature calculation only with signature version '4'. - contentSha256 := emptySHA256Hex - if c.secure { - contentSha256 = unsignedPayload - } - - req.Header.Set("X-Amz-Content-Sha256", contentSha256) - req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") - return req, nil -} diff --git a/vendor/github.com/minio/minio-go/bucket-notification.go b/vendor/github.com/minio/minio-go/bucket-notification.go deleted file mode 100644 index ea303dd9d..000000000 --- a/vendor/github.com/minio/minio-go/bucket-notification.go +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "encoding/xml" - - "github.com/minio/minio-go/pkg/set" -) - -// NotificationEventType is a S3 notification event associated to the bucket notification configuration -type NotificationEventType string - -// The role of all event types are described in : -// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations -const ( - ObjectCreatedAll NotificationEventType = "s3:ObjectCreated:*" - ObjectCreatedPut = "s3:ObjectCreated:Put" - ObjectCreatedPost = "s3:ObjectCreated:Post" - ObjectCreatedCopy = "s3:ObjectCreated:Copy" - ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" - ObjectAccessedGet = "s3:ObjectAccessed:Get" - ObjectAccessedHead = "s3:ObjectAccessed:Head" - ObjectAccessedAll = "s3:ObjectAccessed:*" - ObjectRemovedAll = "s3:ObjectRemoved:*" - ObjectRemovedDelete = "s3:ObjectRemoved:Delete" - ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" - ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" -) - -// FilterRule - child of S3Key, a tag in the notification xml which -// carries suffix/prefix filters -type FilterRule struct { - Name string `xml:"Name"` - Value string `xml:"Value"` -} - -// S3Key - child of Filter, a tag in the notification xml which -// carries suffix/prefix filters -type S3Key struct { - FilterRules []FilterRule `xml:"FilterRule,omitempty"` -} - -// Filter - a tag in the notification xml structure which carries -// suffix/prefix filters -type Filter struct { - S3Key S3Key `xml:"S3Key,omitempty"` -} - -// Arn - holds ARN information that will be sent to the web service, -// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html -type Arn struct { - Partition string - Service string - Region string - AccountID string - Resource string -} - -// NewArn creates new ARN based on the given partition, service, region, account id and resource -func NewArn(partition, service, region, accountID, resource string) Arn { - return Arn{Partition: partition, - Service: service, - Region: region, - AccountID: accountID, - Resource: resource} -} - -// Return the string format of the ARN -func (arn Arn) String() string { - return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource -} - -// NotificationConfig - represents one single notification configuration -// such as topic, queue or lambda configuration. -type NotificationConfig struct { - ID string `xml:"Id,omitempty"` - Arn Arn `xml:"-"` - Events []NotificationEventType `xml:"Event"` - Filter *Filter `xml:"Filter,omitempty"` -} - -// NewNotificationConfig creates one notification config and sets the given ARN -func NewNotificationConfig(arn Arn) NotificationConfig { - return NotificationConfig{Arn: arn, Filter: &Filter{}} -} - -// AddEvents adds one event to the current notification config -func (t *NotificationConfig) AddEvents(events ...NotificationEventType) { - t.Events = append(t.Events, events...) -} - -// AddFilterSuffix sets the suffix configuration to the current notification config -func (t *NotificationConfig) AddFilterSuffix(suffix string) { - if t.Filter == nil { - t.Filter = &Filter{} - } - newFilterRule := FilterRule{Name: "suffix", Value: suffix} - // Replace any suffix rule if existing and add to the list otherwise - for index := range t.Filter.S3Key.FilterRules { - if t.Filter.S3Key.FilterRules[index].Name == "suffix" { - t.Filter.S3Key.FilterRules[index] = newFilterRule - return - } - } - t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) -} - -// AddFilterPrefix sets the prefix configuration to the current notification config -func (t *NotificationConfig) AddFilterPrefix(prefix string) { - if t.Filter == nil { - t.Filter = &Filter{} - } - newFilterRule := FilterRule{Name: "prefix", Value: prefix} - // Replace any prefix rule if existing and add to the list otherwise - for index := range t.Filter.S3Key.FilterRules { - if t.Filter.S3Key.FilterRules[index].Name == "prefix" { - t.Filter.S3Key.FilterRules[index] = newFilterRule - return - } - } - t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) -} - -// TopicConfig carries one single topic notification configuration -type TopicConfig struct { - NotificationConfig - Topic string `xml:"Topic"` -} - -// QueueConfig carries one single queue notification configuration -type QueueConfig struct { - NotificationConfig - Queue string `xml:"Queue"` -} - -// LambdaConfig carries one single cloudfunction notification configuration -type LambdaConfig struct { - NotificationConfig - Lambda string `xml:"CloudFunction"` -} - -// BucketNotification - the struct that represents the whole XML to be sent to the web service -type BucketNotification struct { - XMLName xml.Name `xml:"NotificationConfiguration"` - LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"` - TopicConfigs []TopicConfig `xml:"TopicConfiguration"` - QueueConfigs []QueueConfig `xml:"QueueConfiguration"` -} - -// AddTopic adds a given topic config to the general bucket notification config -func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) bool { - newTopicConfig := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()} - for _, n := range b.TopicConfigs { - // If new config matches existing one - if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter { - - existingConfig := set.NewStringSet() - for _, v := range n.Events { - existingConfig.Add(string(v)) - } - - newConfig := set.NewStringSet() - for _, v := range topicConfig.Events { - newConfig.Add(string(v)) - } - - if !newConfig.Intersection(existingConfig).IsEmpty() { - return false - } - } - } - b.TopicConfigs = append(b.TopicConfigs, newTopicConfig) - return true -} - -// AddQueue adds a given queue config to the general bucket notification config -func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) bool { - newQueueConfig := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()} - for _, n := range b.QueueConfigs { - if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter { - - existingConfig := set.NewStringSet() - for _, v := range n.Events { - existingConfig.Add(string(v)) - } - - newConfig := set.NewStringSet() - for _, v := range queueConfig.Events { - newConfig.Add(string(v)) - } - - if !newConfig.Intersection(existingConfig).IsEmpty() { - return false - } - } - } - b.QueueConfigs = append(b.QueueConfigs, newQueueConfig) - return true -} - -// AddLambda adds a given lambda config to the general bucket notification config -func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) bool { - newLambdaConfig := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()} - for _, n := range b.LambdaConfigs { - if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter { - - existingConfig := set.NewStringSet() - for _, v := range n.Events { - existingConfig.Add(string(v)) - } - - newConfig := set.NewStringSet() - for _, v := range lambdaConfig.Events { - newConfig.Add(string(v)) - } - - if !newConfig.Intersection(existingConfig).IsEmpty() { - return false - } - } - } - b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig) - return true -} - -// RemoveTopicByArn removes all topic configurations that match the exact specified ARN -func (b *BucketNotification) RemoveTopicByArn(arn Arn) { - var topics []TopicConfig - for _, topic := range b.TopicConfigs { - if topic.Topic != arn.String() { - topics = append(topics, topic) - } - } - b.TopicConfigs = topics -} - -// RemoveQueueByArn removes all queue configurations that match the exact specified ARN -func (b *BucketNotification) RemoveQueueByArn(arn Arn) { - var queues []QueueConfig - for _, queue := range b.QueueConfigs { - if queue.Queue != arn.String() { - queues = append(queues, queue) - } - } - b.QueueConfigs = queues -} - -// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN -func (b *BucketNotification) RemoveLambdaByArn(arn Arn) { - var lambdas []LambdaConfig - for _, lambda := range b.LambdaConfigs { - if lambda.Lambda != arn.String() { - lambdas = append(lambdas, lambda) - } - } - b.LambdaConfigs = lambdas -} diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go deleted file mode 100644 index 737742318..000000000 --- a/vendor/github.com/minio/minio-go/constants.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -/// Multipart upload defaults. - -// absMinPartSize - absolute minimum part size (5 MiB) below which -// a part in a multipart upload may not be uploaded. -const absMinPartSize = 1024 * 1024 * 5 - -// minPartSize - minimum part size 64MiB per object after which -// putObject behaves internally as multipart. -const minPartSize = 1024 * 1024 * 64 - -// maxPartsCount - maximum number of parts for a single multipart session. -const maxPartsCount = 10000 - -// maxPartSize - maximum part size 5GiB for a single multipart upload -// operation. -const maxPartSize = 1024 * 1024 * 1024 * 5 - -// maxSinglePutObjectSize - maximum size 5GiB of object per PUT -// operation. -const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 - -// maxMultipartPutObjectSize - maximum size 5TiB of object for -// Multipart operation. -const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 - -// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when -// we don't want to sign the request payload -const unsignedPayload = "UNSIGNED-PAYLOAD" - -// Total number of parallel workers used for multipart operation. -const totalWorkers = 4 - -// Signature related constants. -const ( - signV4Algorithm = "AWS4-HMAC-SHA256" - iso8601DateFormat = "20060102T150405Z" -) - -// Storage class header constant. -const amzStorageClass = "X-Amz-Storage-Class" - -// Website redirect location header constant -const amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location" diff --git a/vendor/github.com/minio/minio-go/core.go b/vendor/github.com/minio/minio-go/core.go deleted file mode 100644 index 4d51363f0..000000000 --- a/vendor/github.com/minio/minio-go/core.go +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "context" - "io" - "strings" - - "github.com/minio/minio-go/pkg/encrypt" -) - -// Core - Inherits Client and adds new methods to expose the low level S3 APIs. -type Core struct { - *Client -} - -// NewCore - Returns new initialized a Core client, this CoreClient should be -// only used under special conditions such as need to access lower primitives -// and being able to use them to write your own wrappers. -func NewCore(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Core, error) { - var s3Client Core - client, err := NewV4(endpoint, accessKeyID, secretAccessKey, secure) - if err != nil { - return nil, err - } - s3Client.Client = client - return &s3Client, nil -} - -// ListObjects - List all the objects at a prefix, optionally with marker and delimiter -// you can further filter the results. -func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) { - return c.listObjectsQuery(bucket, prefix, marker, delimiter, maxKeys) -} - -// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses -// continuationToken instead of marker to support iteration over the results. -func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int, startAfter string) (ListBucketV2Result, error) { - return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys, startAfter) -} - -// CopyObject - copies an object from source object to destination object on server side. -func (c Core) CopyObject(sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string) (ObjectInfo, error) { - return c.copyObjectDo(context.Background(), sourceBucket, sourceObject, destBucket, destObject, metadata) -} - -// CopyObjectPart - creates a part in a multipart upload by copying (a -// part of) an existing object. -func (c Core) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, - partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) { - - return c.copyObjectPartDo(context.Background(), srcBucket, srcObject, destBucket, destObject, uploadID, - partID, startOffset, length, metadata) -} - -// PutObject - Upload object. Uploads using single PUT call. -func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string, sse encrypt.ServerSide) (ObjectInfo, error) { - opts := PutObjectOptions{} - m := make(map[string]string) - for k, v := range metadata { - if strings.ToLower(k) == "content-encoding" { - opts.ContentEncoding = v - } else if strings.ToLower(k) == "content-disposition" { - opts.ContentDisposition = v - } else if strings.ToLower(k) == "content-language" { - opts.ContentLanguage = v - } else if strings.ToLower(k) == "content-type" { - opts.ContentType = v - } else if strings.ToLower(k) == "cache-control" { - opts.CacheControl = v - } else if strings.ToLower(k) == strings.ToLower(amzWebsiteRedirectLocation) { - opts.WebsiteRedirectLocation = v - } else { - m[k] = metadata[k] - } - } - opts.UserMetadata = m - opts.ServerSideEncryption = sse - return c.putObjectDo(context.Background(), bucket, object, data, md5Base64, sha256Hex, size, opts) -} - -// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID. -func (c Core) NewMultipartUpload(bucket, object string, opts PutObjectOptions) (uploadID string, err error) { - result, err := c.initiateMultipartUpload(context.Background(), bucket, object, opts) - return result.UploadID, err -} - -// ListMultipartUploads - List incomplete uploads. -func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) { - return c.listMultipartUploadsQuery(bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads) -} - -// PutObjectPart - Upload an object part. -func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) { - return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse) -} - -// ListObjectParts - List uploaded parts of an incomplete upload.x -func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) { - return c.listObjectPartsQuery(bucket, object, uploadID, partNumberMarker, maxParts) -} - -// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. -func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) (string, error) { - res, err := c.completeMultipartUpload(context.Background(), bucket, object, uploadID, completeMultipartUpload{ - Parts: parts, - }) - return res.ETag, err -} - -// AbortMultipartUpload - Abort an incomplete upload. -func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error { - return c.abortMultipartUpload(context.Background(), bucket, object, uploadID) -} - -// GetBucketPolicy - fetches bucket access policy for a given bucket. -func (c Core) GetBucketPolicy(bucket string) (string, error) { - return c.getBucketPolicy(bucket) -} - -// PutBucketPolicy - applies a new bucket access policy for a given bucket. -func (c Core) PutBucketPolicy(bucket, bucketPolicy string) error { - return c.putBucketPolicy(bucket, bucketPolicy) -} - -// GetObject is a lower level API implemented to support reading -// partial objects and also downloading objects with special conditions -// matching etag, modtime etc. -func (c Core) GetObject(bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) { - return c.getObject(context.Background(), bucketName, objectName, opts) -} - -// StatObject is a lower level API implemented to support special -// conditions matching etag, modtime on a request. -func (c Core) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { - return c.statObject(context.Background(), bucketName, objectName, opts) -} diff --git a/vendor/github.com/minio/minio-go/hook-reader.go b/vendor/github.com/minio/minio-go/hook-reader.go deleted file mode 100644 index 8f32291d4..000000000 --- a/vendor/github.com/minio/minio-go/hook-reader.go +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import "io" - -// hookReader hooks additional reader in the source stream. It is -// useful for making progress bars. Second reader is appropriately -// notified about the exact number of bytes read from the primary -// source on each Read operation. -type hookReader struct { - source io.Reader - hook io.Reader -} - -// Seek implements io.Seeker. Seeks source first, and if necessary -// seeks hook if Seek method is appropriately found. -func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) { - // Verify for source has embedded Seeker, use it. - sourceSeeker, ok := hr.source.(io.Seeker) - if ok { - return sourceSeeker.Seek(offset, whence) - } - // Verify if hook has embedded Seeker, use it. - hookSeeker, ok := hr.hook.(io.Seeker) - if ok { - return hookSeeker.Seek(offset, whence) - } - return n, nil -} - -// Read implements io.Reader. Always reads from the source, the return -// value 'n' number of bytes are reported through the hook. Returns -// error for all non io.EOF conditions. -func (hr *hookReader) Read(b []byte) (n int, err error) { - n, err = hr.source.Read(b) - if err != nil && err != io.EOF { - return n, err - } - // Progress the hook with the total read bytes from the source. - if _, herr := hr.hook.Read(b[:n]); herr != nil { - if herr != io.EOF { - return n, herr - } - } - return n, err -} - -// newHook returns a io.ReadSeeker which implements hookReader that -// reports the data read from the source to the hook. -func newHook(source, hook io.Reader) io.Reader { - if hook == nil { - return source - } - return &hookReader{source, hook} -} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/pkg/credentials/chain.go deleted file mode 100644 index e29826f48..000000000 --- a/vendor/github.com/minio/minio-go/pkg/credentials/chain.go +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -// A Chain will search for a provider which returns credentials -// and cache that provider until Retrieve is called again. -// -// The Chain provides a way of chaining multiple providers together -// which will pick the first available using priority order of the -// Providers in the list. -// -// If none of the Providers retrieve valid credentials Value, ChainProvider's -// Retrieve() will return the no credentials value. -// -// If a Provider is found which returns valid credentials Value ChainProvider -// will cache that Provider for all calls to IsExpired(), until Retrieve is -// called again after IsExpired() is true. -// -// creds := credentials.NewChainCredentials( -// []credentials.Provider{ -// &credentials.EnvAWSS3{}, -// &credentials.EnvMinio{}, -// }) -// -// // Usage of ChainCredentials. -// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") -// if err != nil { -// log.Fatalln(err) -// } -// -type Chain struct { - Providers []Provider - curr Provider -} - -// NewChainCredentials returns a pointer to a new Credentials object -// wrapping a chain of providers. -func NewChainCredentials(providers []Provider) *Credentials { - return New(&Chain{ - Providers: append([]Provider{}, providers...), - }) -} - -// Retrieve returns the credentials value, returns no credentials(anonymous) -// if no credentials provider returned any value. -// -// If a provider is found with credentials, it will be cached and any calls -// to IsExpired() will return the expired state of the cached provider. -func (c *Chain) Retrieve() (Value, error) { - for _, p := range c.Providers { - creds, _ := p.Retrieve() - // Always prioritize non-anonymous providers, if any. - if creds.AccessKeyID == "" && creds.SecretAccessKey == "" { - continue - } - c.curr = p - return creds, nil - } - // At this point we have exhausted all the providers and - // are left without any credentials return anonymous. - return Value{ - SignerType: SignatureAnonymous, - }, nil -} - -// IsExpired will returned the expired state of the currently cached provider -// if there is one. If there is no current provider, true will be returned. -func (c *Chain) IsExpired() bool { - if c.curr != nil { - return c.curr.IsExpired() - } - - return true -} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample deleted file mode 100644 index 130746f4b..000000000 --- a/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample +++ /dev/null @@ -1,17 +0,0 @@ -{ - "version": "8", - "hosts": { - "play": { - "url": "https://play.minio.io:9000", - "accessKey": "Q3AM3UQ867SPQQA43P2F", - "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", - "api": "S3v2" - }, - "s3": { - "url": "https://s3.amazonaws.com", - "accessKey": "accessKey", - "secretKey": "secret", - "api": "S3v4" - } - } -} \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go deleted file mode 100644 index 4bfdad413..000000000 --- a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "sync" - "time" -) - -// A Value is the AWS credentials value for individual credential fields. -type Value struct { - // AWS Access key ID - AccessKeyID string - - // AWS Secret Access Key - SecretAccessKey string - - // AWS Session Token - SessionToken string - - // Signature Type. - SignerType SignatureType -} - -// A Provider is the interface for any component which will provide credentials -// Value. A provider is required to manage its own Expired state, and what to -// be expired means. -type Provider interface { - // Retrieve returns nil if it successfully retrieved the value. - // Error is returned if the value were not obtainable, or empty. - Retrieve() (Value, error) - - // IsExpired returns if the credentials are no longer valid, and need - // to be retrieved. - IsExpired() bool -} - -// A Expiry provides shared expiration logic to be used by credentials -// providers to implement expiry functionality. -// -// The best method to use this struct is as an anonymous field within the -// provider's struct. -// -// Example: -// type IAMCredentialProvider struct { -// Expiry -// ... -// } -type Expiry struct { - // The date/time when to expire on - expiration time.Time - - // If set will be used by IsExpired to determine the current time. - // Defaults to time.Now if CurrentTime is not set. - CurrentTime func() time.Time -} - -// SetExpiration sets the expiration IsExpired will check when called. -// -// If window is greater than 0 the expiration time will be reduced by the -// window value. -// -// Using a window is helpful to trigger credentials to expire sooner than -// the expiration time given to ensure no requests are made with expired -// tokens. -func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { - e.expiration = expiration - if window > 0 { - e.expiration = e.expiration.Add(-window) - } -} - -// IsExpired returns if the credentials are expired. -func (e *Expiry) IsExpired() bool { - if e.CurrentTime == nil { - e.CurrentTime = time.Now - } - return e.expiration.Before(e.CurrentTime()) -} - -// Credentials - A container for synchronous safe retrieval of credentials Value. -// Credentials will cache the credentials value until they expire. Once the value -// expires the next Get will attempt to retrieve valid credentials. -// -// Credentials is safe to use across multiple goroutines and will manage the -// synchronous state so the Providers do not need to implement their own -// synchronization. -// -// The first Credentials.Get() will always call Provider.Retrieve() to get the -// first instance of the credentials Value. All calls to Get() after that -// will return the cached credentials Value until IsExpired() returns true. -type Credentials struct { - sync.Mutex - - creds Value - forceRefresh bool - provider Provider -} - -// New returns a pointer to a new Credentials with the provider set. -func New(provider Provider) *Credentials { - return &Credentials{ - provider: provider, - forceRefresh: true, - } -} - -// Get returns the credentials value, or error if the credentials Value failed -// to be retrieved. -// -// Will return the cached credentials Value if it has not expired. If the -// credentials Value has expired the Provider's Retrieve() will be called -// to refresh the credentials. -// -// If Credentials.Expire() was called the credentials Value will be force -// expired, and the next call to Get() will cause them to be refreshed. -func (c *Credentials) Get() (Value, error) { - c.Lock() - defer c.Unlock() - - if c.isExpired() { - creds, err := c.provider.Retrieve() - if err != nil { - return Value{}, err - } - c.creds = creds - c.forceRefresh = false - } - - return c.creds, nil -} - -// Expire expires the credentials and forces them to be retrieved on the -// next call to Get(). -// -// This will override the Provider's expired state, and force Credentials -// to call the Provider's Retrieve(). -func (c *Credentials) Expire() { - c.Lock() - defer c.Unlock() - - c.forceRefresh = true -} - -// IsExpired returns if the credentials are no longer valid, and need -// to be refreshed. -// -// If the Credentials were forced to be expired with Expire() this will -// reflect that override. -func (c *Credentials) IsExpired() bool { - c.Lock() - defer c.Unlock() - - return c.isExpired() -} - -// isExpired helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpired() bool { - return c.forceRefresh || c.provider.IsExpired() -} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample deleted file mode 100644 index 7fc91d9d2..000000000 --- a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample +++ /dev/null @@ -1,12 +0,0 @@ -[default] -aws_access_key_id = accessKey -aws_secret_access_key = secret -aws_session_token = token - -[no_token] -aws_access_key_id = accessKey -aws_secret_access_key = secret - -[with_colon] -aws_access_key_id: accessKey -aws_secret_access_key: secret diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/pkg/credentials/doc.go deleted file mode 100644 index c48784ba8..000000000 --- a/vendor/github.com/minio/minio-go/pkg/credentials/doc.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package credentials provides credential retrieval and management -// for S3 compatible object storage. -// -// By default the Credentials.Get() will cache the successful result of a -// Provider's Retrieve() until Provider.IsExpired() returns true. At which -// point Credentials will call Provider's Retrieve() to get new credential Value. -// -// The Provider is responsible for determining when credentials have expired. -// It is also important to note that Credentials will always call Retrieve the -// first time Credentials.Get() is called. -// -// Example of using the environment variable credentials. -// -// creds := NewFromEnv() -// // Retrieve the credentials value -// credValue, err := creds.Get() -// if err != nil { -// // handle error -// } -// -// Example of forcing credentials to expire and be refreshed on the next Get(). -// This may be helpful to proactively expire credentials and refresh them sooner -// than they would naturally expire on their own. -// -// creds := NewFromIAM("") -// creds.Expire() -// credsValue, err := creds.Get() -// // New credentials will be retrieved instead of from cache. -// -// -// Custom Provider -// -// Each Provider built into this package also provides a helper method to generate -// a Credentials pointer setup with the provider. To use a custom Provider just -// create a type which satisfies the Provider interface and pass it to the -// NewCredentials method. -// -// type MyProvider struct{} -// func (m *MyProvider) Retrieve() (Value, error) {...} -// func (m *MyProvider) IsExpired() bool {...} -// -// creds := NewCredentials(&MyProvider{}) -// credValue, err := creds.Get() -// -package credentials diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go deleted file mode 100644 index f9b2cc33a..000000000 --- a/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import "os" - -// A EnvAWS retrieves credentials from the environment variables of the -// running process. EnvAWSironment credentials never expire. -// -// EnvAWSironment variables used: -// -// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY. -// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY. -// * Secret Token: AWS_SESSION_TOKEN. -type EnvAWS struct { - retrieved bool -} - -// NewEnvAWS returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewEnvAWS() *Credentials { - return New(&EnvAWS{}) -} - -// Retrieve retrieves the keys from the environment. -func (e *EnvAWS) Retrieve() (Value, error) { - e.retrieved = false - - id := os.Getenv("AWS_ACCESS_KEY_ID") - if id == "" { - id = os.Getenv("AWS_ACCESS_KEY") - } - - secret := os.Getenv("AWS_SECRET_ACCESS_KEY") - if secret == "" { - secret = os.Getenv("AWS_SECRET_KEY") - } - - signerType := SignatureV4 - if id == "" || secret == "" { - signerType = SignatureAnonymous - } - - e.retrieved = true - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: os.Getenv("AWS_SESSION_TOKEN"), - SignerType: signerType, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (e *EnvAWS) IsExpired() bool { - return !e.retrieved -} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go deleted file mode 100644 index d72e77185..000000000 --- a/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import "os" - -// A EnvMinio retrieves credentials from the environment variables of the -// running process. EnvMinioironment credentials never expire. -// -// EnvMinioironment variables used: -// -// * Access Key ID: MINIO_ACCESS_KEY. -// * Secret Access Key: MINIO_SECRET_KEY. -type EnvMinio struct { - retrieved bool -} - -// NewEnvMinio returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewEnvMinio() *Credentials { - return New(&EnvMinio{}) -} - -// Retrieve retrieves the keys from the environment. -func (e *EnvMinio) Retrieve() (Value, error) { - e.retrieved = false - - id := os.Getenv("MINIO_ACCESS_KEY") - secret := os.Getenv("MINIO_SECRET_KEY") - - signerType := SignatureV4 - if id == "" || secret == "" { - signerType = SignatureAnonymous - } - - e.retrieved = true - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SignerType: signerType, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (e *EnvMinio) IsExpired() bool { - return !e.retrieved -} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go deleted file mode 100644 index 5ad68303a..000000000 --- a/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "os" - "path/filepath" - - "github.com/go-ini/ini" - homedir "github.com/mitchellh/go-homedir" -) - -// A FileAWSCredentials retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. -// -// Profile ini file example: $HOME/.aws/credentials -type FileAWSCredentials struct { - // Path to the shared credentials file. - // - // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.aws/credentials" - // Windows: "%USERPROFILE%\.aws\credentials" - filename string - - // AWS Profile to extract credentials from the shared credentials file. If empty - // will default to environment variable "AWS_PROFILE" or "default" if - // environment variable is also not set. - profile string - - // retrieved states if the credentials have been successfully retrieved. - retrieved bool -} - -// NewFileAWSCredentials returns a pointer to a new Credentials object -// wrapping the Profile file provider. -func NewFileAWSCredentials(filename string, profile string) *Credentials { - return New(&FileAWSCredentials{ - filename: filename, - profile: profile, - }) -} - -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *FileAWSCredentials) Retrieve() (Value, error) { - if p.filename == "" { - p.filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") - if p.filename == "" { - homeDir, err := homedir.Dir() - if err != nil { - return Value{}, err - } - p.filename = filepath.Join(homeDir, ".aws", "credentials") - } - } - if p.profile == "" { - p.profile = os.Getenv("AWS_PROFILE") - if p.profile == "" { - p.profile = "default" - } - } - - p.retrieved = false - - iniProfile, err := loadProfile(p.filename, p.profile) - if err != nil { - return Value{}, err - } - - // Default to empty string if not found. - id := iniProfile.Key("aws_access_key_id") - // Default to empty string if not found. - secret := iniProfile.Key("aws_secret_access_key") - // Default to empty string if not found. - token := iniProfile.Key("aws_session_token") - - p.retrieved = true - return Value{ - AccessKeyID: id.String(), - SecretAccessKey: secret.String(), - SessionToken: token.String(), - SignerType: SignatureV4, - }, nil -} - -// IsExpired returns if the shared credentials have expired. -func (p *FileAWSCredentials) IsExpired() bool { - return !p.retrieved -} - -// loadProfiles loads from the file pointed to by shared credentials filename for profile. -// The credentials retrieved from the profile will be returned or error. Error will be -// returned if it fails to read from the file, or the data is invalid. -func loadProfile(filename, profile string) (*ini.Section, error) { - config, err := ini.Load(filename) - if err != nil { - return nil, err - } - iniProfile, err := config.GetSection(profile) - if err != nil { - return nil, err - } - return iniProfile, nil -} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go deleted file mode 100644 index 6a6827e37..000000000 --- a/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - "runtime" - - homedir "github.com/mitchellh/go-homedir" -) - -// A FileMinioClient retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. -// -// Configuration file example: $HOME/.mc/config.json -type FileMinioClient struct { - // Path to the shared credentials file. - // - // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.mc/config.json" - // Windows: "%USERALIAS%\mc\config.json" - filename string - - // Minio Alias to extract credentials from the shared credentials file. If empty - // will default to environment variable "MINIO_ALIAS" or "default" if - // environment variable is also not set. - alias string - - // retrieved states if the credentials have been successfully retrieved. - retrieved bool -} - -// NewFileMinioClient returns a pointer to a new Credentials object -// wrapping the Alias file provider. -func NewFileMinioClient(filename string, alias string) *Credentials { - return New(&FileMinioClient{ - filename: filename, - alias: alias, - }) -} - -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *FileMinioClient) Retrieve() (Value, error) { - if p.filename == "" { - if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok { - p.filename = value - } else { - homeDir, err := homedir.Dir() - if err != nil { - return Value{}, err - } - p.filename = filepath.Join(homeDir, ".mc", "config.json") - if runtime.GOOS == "windows" { - p.filename = filepath.Join(homeDir, "mc", "config.json") - } - } - } - - if p.alias == "" { - p.alias = os.Getenv("MINIO_ALIAS") - if p.alias == "" { - p.alias = "s3" - } - } - - p.retrieved = false - - hostCfg, err := loadAlias(p.filename, p.alias) - if err != nil { - return Value{}, err - } - - p.retrieved = true - return Value{ - AccessKeyID: hostCfg.AccessKey, - SecretAccessKey: hostCfg.SecretKey, - SignerType: parseSignatureType(hostCfg.API), - }, nil -} - -// IsExpired returns if the shared credentials have expired. -func (p *FileMinioClient) IsExpired() bool { - return !p.retrieved -} - -// hostConfig configuration of a host. -type hostConfig struct { - URL string `json:"url"` - AccessKey string `json:"accessKey"` - SecretKey string `json:"secretKey"` - API string `json:"api"` -} - -// config config version. -type config struct { - Version string `json:"version"` - Hosts map[string]hostConfig `json:"hosts"` -} - -// loadAliass loads from the file pointed to by shared credentials filename for alias. -// The credentials retrieved from the alias will be returned or error. Error will be -// returned if it fails to read from the file. -func loadAlias(filename, alias string) (hostConfig, error) { - cfg := &config{} - configBytes, err := ioutil.ReadFile(filename) - if err != nil { - return hostConfig{}, err - } - if err = json.Unmarshal(configBytes, cfg); err != nil { - return hostConfig{}, err - } - return cfg.Hosts[alias], nil -} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go deleted file mode 100644 index 05b2a8bb4..000000000 --- a/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "bufio" - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "os" - "path" - "time" -) - -// DefaultExpiryWindow - Default expiry window. -// ExpiryWindow will allow the credentials to trigger refreshing -// prior to the credentials actually expiring. This is beneficial -// so race conditions with expiring credentials do not cause -// request to fail unexpectedly due to ExpiredTokenException exceptions. -const DefaultExpiryWindow = time.Second * 10 // 10 secs - -// A IAM retrieves credentials from the EC2 service, and keeps track if -// those credentials are expired. -type IAM struct { - Expiry - - // Required http Client to use when connecting to IAM metadata service. - Client *http.Client - - // Custom endpoint to fetch IAM role credentials. - endpoint string -} - -// IAM Roles for Amazon EC2 -// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html -const ( - defaultIAMRoleEndpoint = "http://169.254.169.254" - defaultECSRoleEndpoint = "http://169.254.170.2" - defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials" -) - -// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html -func getEndpoint(endpoint string) (string, bool) { - if endpoint != "" { - return endpoint, os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") != "" - } - if ecsURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"); ecsURI != "" { - return fmt.Sprintf("%s%s", defaultECSRoleEndpoint, ecsURI), true - } - return defaultIAMRoleEndpoint, false -} - -// NewIAM returns a pointer to a new Credentials object wrapping the IAM. -func NewIAM(endpoint string) *Credentials { - p := &IAM{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, - endpoint: endpoint, - } - return New(p) -} - -// Retrieve retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired -func (m *IAM) Retrieve() (Value, error) { - endpoint, isEcsTask := getEndpoint(m.endpoint) - var roleCreds ec2RoleCredRespBody - var err error - if isEcsTask { - roleCreds, err = getEcsTaskCredentials(m.Client, endpoint) - } else { - roleCreds, err = getCredentials(m.Client, endpoint) - } - if err != nil { - return Value{}, err - } - // Expiry window is set to 10secs. - m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow) - - return Value{ - AccessKeyID: roleCreds.AccessKeyID, - SecretAccessKey: roleCreds.SecretAccessKey, - SessionToken: roleCreds.Token, - SignerType: SignatureV4, - }, nil -} - -// A ec2RoleCredRespBody provides the shape for unmarshaling credential -// request responses. -type ec2RoleCredRespBody struct { - // Success State - Expiration time.Time - AccessKeyID string - SecretAccessKey string - Token string - - // Error state - Code string - Message string - - // Unused params. - LastUpdated time.Time - Type string -} - -// Get the final IAM role URL where the request will -// be sent to fetch the rolling access credentials. -// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html -func getIAMRoleURL(endpoint string) (*url.URL, error) { - u, err := url.Parse(endpoint) - if err != nil { - return nil, err - } - u.Path = defaultIAMSecurityCredsPath - return u, nil -} - -// listRoleNames lists of credential role names associated -// with the current EC2 service. If there are no credentials, -// or there is an error making or receiving the request. -// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html -func listRoleNames(client *http.Client, u *url.URL) ([]string, error) { - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return nil, err - } - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return nil, errors.New(resp.Status) - } - - credsList := []string{} - s := bufio.NewScanner(resp.Body) - for s.Scan() { - credsList = append(credsList, s.Text()) - } - - if err := s.Err(); err != nil { - return nil, err - } - - return credsList, nil -} - -func getEcsTaskCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { - req, err := http.NewRequest("GET", endpoint, nil) - if err != nil { - return ec2RoleCredRespBody{}, err - } - - resp, err := client.Do(req) - if err != nil { - return ec2RoleCredRespBody{}, err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return ec2RoleCredRespBody{}, errors.New(resp.Status) - } - - respCreds := ec2RoleCredRespBody{} - if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, err - } - - return respCreds, nil -} - -// getCredentials - obtains the credentials from the IAM role name associated with -// the current EC2 service. -// -// If the credentials cannot be found, or there is an error -// reading the response an error will be returned. -func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { - - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - u, err := getIAMRoleURL(endpoint) - if err != nil { - return ec2RoleCredRespBody{}, err - } - - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - roleNames, err := listRoleNames(client, u) - if err != nil { - return ec2RoleCredRespBody{}, err - } - - if len(roleNames) == 0 { - return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service") - } - - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - // - An instance profile can contain only one IAM role. This limit cannot be increased. - roleName := roleNames[0] - - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html - // The following command retrieves the security credentials for an - // IAM role named `s3access`. - // - // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access - // - u.Path = path.Join(u.Path, roleName) - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return ec2RoleCredRespBody{}, err - } - - resp, err := client.Do(req) - if err != nil { - return ec2RoleCredRespBody{}, err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return ec2RoleCredRespBody{}, errors.New(resp.Status) - } - - respCreds := ec2RoleCredRespBody{} - if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, err - } - - if respCreds.Code != "Success" { - // If an error code was returned something failed requesting the role. - return ec2RoleCredRespBody{}, errors.New(respCreds.Message) - } - - return respCreds, nil -} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go b/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go deleted file mode 100644 index 1b768e8c3..000000000 --- a/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import "strings" - -// SignatureType is type of Authorization requested for a given HTTP request. -type SignatureType int - -// Different types of supported signatures - default is SignatureV4 or SignatureDefault. -const ( - // SignatureDefault is always set to v4. - SignatureDefault SignatureType = iota - SignatureV4 - SignatureV2 - SignatureV4Streaming - SignatureAnonymous // Anonymous signature signifies, no signature. -) - -// IsV2 - is signature SignatureV2? -func (s SignatureType) IsV2() bool { - return s == SignatureV2 -} - -// IsV4 - is signature SignatureV4? -func (s SignatureType) IsV4() bool { - return s == SignatureV4 || s == SignatureDefault -} - -// IsStreamingV4 - is signature SignatureV4Streaming? -func (s SignatureType) IsStreamingV4() bool { - return s == SignatureV4Streaming -} - -// IsAnonymous - is signature empty? -func (s SignatureType) IsAnonymous() bool { - return s == SignatureAnonymous -} - -// Stringer humanized version of signature type, -// strings returned here are case insensitive. -func (s SignatureType) String() string { - if s.IsV2() { - return "S3v2" - } else if s.IsV4() { - return "S3v4" - } else if s.IsStreamingV4() { - return "S3v4Streaming" - } - return "Anonymous" -} - -func parseSignatureType(str string) SignatureType { - if strings.EqualFold(str, "S3v4") { - return SignatureV4 - } else if strings.EqualFold(str, "S3v2") { - return SignatureV2 - } else if strings.EqualFold(str, "S3v4Streaming") { - return SignatureV4Streaming - } - return SignatureAnonymous -} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/pkg/credentials/static.go deleted file mode 100644 index 8b0ba711c..000000000 --- a/vendor/github.com/minio/minio-go/pkg/credentials/static.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -// A Static is a set of credentials which are set programmatically, -// and will never expire. -type Static struct { - Value -} - -// NewStaticV2 returns a pointer to a new Credentials object -// wrapping a static credentials value provider, signature is -// set to v2. If access and secret are not specified then -// regardless of signature type set it Value will return -// as anonymous. -func NewStaticV2(id, secret, token string) *Credentials { - return NewStatic(id, secret, token, SignatureV2) -} - -// NewStaticV4 is similar to NewStaticV2 with similar considerations. -func NewStaticV4(id, secret, token string) *Credentials { - return NewStatic(id, secret, token, SignatureV4) -} - -// NewStatic returns a pointer to a new Credentials object -// wrapping a static credentials value provider. -func NewStatic(id, secret, token string, signerType SignatureType) *Credentials { - return New(&Static{ - Value: Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, - SignerType: signerType, - }, - }) -} - -// Retrieve returns the static credentials. -func (s *Static) Retrieve() (Value, error) { - if s.AccessKeyID == "" || s.SecretAccessKey == "" { - // Anonymous is not an error - return Value{SignerType: SignatureAnonymous}, nil - } - return s.Value, nil -} - -// IsExpired returns if the credentials are expired. -// -// For Static, the credentials never expired. -func (s *Static) IsExpired() bool { - return false -} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/pkg/credentials/sts_client_grants.go deleted file mode 100644 index f0a4e8d2c..000000000 --- a/vendor/github.com/minio/minio-go/pkg/credentials/sts_client_grants.go +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "encoding/xml" - "errors" - "fmt" - "net/http" - "net/url" - "time" -) - -// AssumedRoleUser - The identifiers for the temporary security credentials that -// the operation returns. Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser -type AssumedRoleUser struct { - Arn string - AssumedRoleID string `xml:"AssumeRoleId"` -} - -// AssumeRoleWithClientGrantsResponse contains the result of successful AssumeRoleWithClientGrants request. -type AssumeRoleWithClientGrantsResponse struct { - XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithClientGrantsResponse" json:"-"` - Result ClientGrantsResult `xml:"AssumeRoleWithClientGrantsResult"` - ResponseMetadata struct { - RequestID string `xml:"RequestId,omitempty"` - } `xml:"ResponseMetadata,omitempty"` -} - -// ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants -// request, including temporary credentials that can be used to make Minio API requests. -type ClientGrantsResult struct { - AssumedRoleUser AssumedRoleUser `xml:",omitempty"` - Audience string `xml:",omitempty"` - Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` - SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` - } `xml:",omitempty"` - PackedPolicySize int `xml:",omitempty"` - Provider string `xml:",omitempty"` - SubjectFromClientGrantsToken string `xml:",omitempty"` -} - -// ClientGrantsToken - client grants token with expiry. -type ClientGrantsToken struct { - token string - expiry int -} - -// Token - access token returned after authenticating client grants. -func (c *ClientGrantsToken) Token() string { - return c.token -} - -// Expiry - expiry for the access token returned after authenticating -// client grants. -func (c *ClientGrantsToken) Expiry() string { - return fmt.Sprintf("%d", c.expiry) -} - -// A STSClientGrants retrieves credentials from Minio service, and keeps track if -// those credentials are expired. -type STSClientGrants struct { - Expiry - - // Required http Client to use when connecting to Minio STS service. - Client *http.Client - - // Minio endpoint to fetch STS credentials. - stsEndpoint string - - // getClientGrantsTokenExpiry function to retrieve tokens - // from IDP This function should return two values one is - // accessToken which is a self contained access token (JWT) - // and second return value is the expiry associated with - // this token. This is a customer provided function and - // is mandatory. - getClientGrantsTokenExpiry func() (*ClientGrantsToken, error) -} - -// NewSTSClientGrants returns a pointer to a new -// Credentials object wrapping the STSClientGrants. -func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) { - if stsEndpoint == "" { - return nil, errors.New("STS endpoint cannot be empty") - } - if getClientGrantsTokenExpiry == nil { - return nil, errors.New("Client grants access token and expiry retrieval function should be defined") - } - return New(&STSClientGrants{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, - stsEndpoint: stsEndpoint, - getClientGrantsTokenExpiry: getClientGrantsTokenExpiry, - }), nil -} - -func getClientGrantsCredentials(clnt *http.Client, endpoint string, - getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (AssumeRoleWithClientGrantsResponse, error) { - - accessToken, err := getClientGrantsTokenExpiry() - if err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - - v := url.Values{} - v.Set("Action", "AssumeRoleWithClientGrants") - v.Set("Token", accessToken.Token()) - v.Set("DurationSeconds", accessToken.Expiry()) - v.Set("Version", "2011-06-15") - - u, err := url.Parse(endpoint) - if err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - u.RawQuery = v.Encode() - - req, err := http.NewRequest("POST", u.String(), nil) - if err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - resp, err := clnt.Do(req) - if err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return AssumeRoleWithClientGrantsResponse{}, errors.New(resp.Status) - } - - a := AssumeRoleWithClientGrantsResponse{} - if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { - return AssumeRoleWithClientGrantsResponse{}, err - } - return a, nil -} - -// Retrieve retrieves credentials from the Minio service. -// Error will be returned if the request fails. -func (m *STSClientGrants) Retrieve() (Value, error) { - a, err := getClientGrantsCredentials(m.Client, m.stsEndpoint, m.getClientGrantsTokenExpiry) - if err != nil { - return Value{}, err - } - - // Expiry window is set to 10secs. - m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) - - return Value{ - AccessKeyID: a.Result.Credentials.AccessKey, - SecretAccessKey: a.Result.Credentials.SecretKey, - SessionToken: a.Result.Credentials.SessionToken, - SignerType: SignatureV4, - }, nil -} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/pkg/credentials/sts_web_identity.go deleted file mode 100644 index d924b16c7..000000000 --- a/vendor/github.com/minio/minio-go/pkg/credentials/sts_web_identity.go +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2019 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package credentials - -import ( - "encoding/xml" - "errors" - "fmt" - "net/http" - "net/url" - "time" -) - -// AssumeRoleWithWebIdentityResponse contains the result of successful AssumeRoleWithWebIdentity request. -type AssumeRoleWithWebIdentityResponse struct { - XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithWebIdentityResponse" json:"-"` - Result WebIdentityResult `xml:"AssumeRoleWithWebIdentityResult"` - ResponseMetadata struct { - RequestID string `xml:"RequestId,omitempty"` - } `xml:"ResponseMetadata,omitempty"` -} - -// WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity -// request, including temporary credentials that can be used to make Minio API requests. -type WebIdentityResult struct { - AssumedRoleUser AssumedRoleUser `xml:",omitempty"` - Audience string `xml:",omitempty"` - Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` - SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` - } `xml:",omitempty"` - PackedPolicySize int `xml:",omitempty"` - Provider string `xml:",omitempty"` - SubjectFromWebIdentityToken string `xml:",omitempty"` -} - -// WebIdentityToken - web identity token with expiry. -type WebIdentityToken struct { - token string - expiry int -} - -// Token - access token returned after authenticating web identity. -func (c *WebIdentityToken) Token() string { - return c.token -} - -// Expiry - expiry for the access token returned after authenticating -// web identity. -func (c *WebIdentityToken) Expiry() string { - return fmt.Sprintf("%d", c.expiry) -} - -// A STSWebIdentity retrieves credentials from Minio service, and keeps track if -// those credentials are expired. -type STSWebIdentity struct { - Expiry - - // Required http Client to use when connecting to Minio STS service. - Client *http.Client - - // Minio endpoint to fetch STS credentials. - stsEndpoint string - - // getWebIDTokenExpiry function which returns ID tokens - // from IDP. This function should return two values one - // is ID token which is a self contained ID token (JWT) - // and second return value is the expiry associated with - // this token. - // This is a customer provided function and is mandatory. - getWebIDTokenExpiry func() (*WebIdentityToken, error) -} - -// NewSTSWebIdentity returns a pointer to a new -// Credentials object wrapping the STSWebIdentity. -func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) { - if stsEndpoint == "" { - return nil, errors.New("STS endpoint cannot be empty") - } - if getWebIDTokenExpiry == nil { - return nil, errors.New("Web ID token and expiry retrieval function should be defined") - } - return New(&STSWebIdentity{ - Client: &http.Client{ - Transport: http.DefaultTransport, - }, - stsEndpoint: stsEndpoint, - getWebIDTokenExpiry: getWebIDTokenExpiry, - }), nil -} - -func getWebIdentityCredentials(clnt *http.Client, endpoint string, - getWebIDTokenExpiry func() (*WebIdentityToken, error)) (AssumeRoleWithWebIdentityResponse, error) { - idToken, err := getWebIDTokenExpiry() - if err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - v := url.Values{} - v.Set("Action", "AssumeRoleWithWebIdentity") - v.Set("WebIdentityToken", idToken.Token()) - v.Set("DurationSeconds", idToken.Expiry()) - v.Set("Version", "2011-06-15") - - u, err := url.Parse(endpoint) - if err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - u.RawQuery = v.Encode() - - req, err := http.NewRequest("POST", u.String(), nil) - if err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - resp, err := clnt.Do(req) - if err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return AssumeRoleWithWebIdentityResponse{}, errors.New(resp.Status) - } - - a := AssumeRoleWithWebIdentityResponse{} - if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { - return AssumeRoleWithWebIdentityResponse{}, err - } - - return a, nil -} - -// Retrieve retrieves credentials from the Minio service. -// Error will be returned if the request fails. -func (m *STSWebIdentity) Retrieve() (Value, error) { - a, err := getWebIdentityCredentials(m.Client, m.stsEndpoint, m.getWebIDTokenExpiry) - if err != nil { - return Value{}, err - } - - // Expiry window is set to 10secs. - m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) - - return Value{ - AccessKeyID: a.Result.Credentials.AccessKey, - SecretAccessKey: a.Result.Credentials.SecretKey, - SessionToken: a.Result.Credentials.SessionToken, - SignerType: SignatureV4, - }, nil -} diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go deleted file mode 100644 index 2d3c70f00..000000000 --- a/vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2018 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package encrypt - -import ( - "crypto/md5" - "encoding/base64" - "encoding/json" - "errors" - "net/http" - - "golang.org/x/crypto/argon2" -) - -const ( - // sseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS. - sseGenericHeader = "X-Amz-Server-Side-Encryption" - - // sseKmsKeyID is the AWS SSE-KMS key id. - sseKmsKeyID = sseGenericHeader + "-Aws-Kms-Key-Id" - // sseEncryptionContext is the AWS SSE-KMS Encryption Context data. - sseEncryptionContext = sseGenericHeader + "-Encryption-Context" - - // sseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key. - sseCustomerAlgorithm = sseGenericHeader + "-Customer-Algorithm" - // sseCustomerKey is the AWS SSE-C encryption key HTTP header key. - sseCustomerKey = sseGenericHeader + "-Customer-Key" - // sseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key. - sseCustomerKeyMD5 = sseGenericHeader + "-Customer-Key-MD5" - - // sseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API. - sseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" - // sseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API. - sseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" - // sseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API. - sseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5" -) - -// PBKDF creates a SSE-C key from the provided password and salt. -// PBKDF is a password-based key derivation function -// which can be used to derive a high-entropy cryptographic -// key from a low-entropy password and a salt. -type PBKDF func(password, salt []byte) ServerSide - -// DefaultPBKDF is the default PBKDF. It uses Argon2id with the -// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads). -var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide { - sse := ssec{} - copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32)) - return sse -} - -// Type is the server-side-encryption method. It represents one of -// the following encryption methods: -// - SSE-C: server-side-encryption with customer provided keys -// - KMS: server-side-encryption with managed keys -// - S3: server-side-encryption using S3 storage encryption -type Type string - -const ( - // SSEC represents server-side-encryption with customer provided keys - SSEC Type = "SSE-C" - // KMS represents server-side-encryption with managed keys - KMS Type = "KMS" - // S3 represents server-side-encryption using S3 storage encryption - S3 Type = "S3" -) - -// ServerSide is a form of S3 server-side-encryption. -type ServerSide interface { - // Type returns the server-side-encryption method. - Type() Type - - // Marshal adds encryption headers to the provided HTTP headers. - // It marks an HTTP request as server-side-encryption request - // and inserts the required data into the headers. - Marshal(h http.Header) -} - -// NewSSE returns a server-side-encryption using S3 storage encryption. -// Using SSE-S3 the server will encrypt the object with server-managed keys. -func NewSSE() ServerSide { return s3{} } - -// NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context. -func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) { - if context == nil { - return kms{key: keyID, hasContext: false}, nil - } - serializedContext, err := json.Marshal(context) - if err != nil { - return nil, err - } - return kms{key: keyID, context: serializedContext, hasContext: true}, nil -} - -// NewSSEC returns a new server-side-encryption using SSE-C and the provided key. -// The key must be 32 bytes long. -func NewSSEC(key []byte) (ServerSide, error) { - if len(key) != 32 { - return nil, errors.New("encrypt: SSE-C key must be 256 bit long") - } - sse := ssec{} - copy(sse[:], key) - return sse, nil -} - -// SSE transforms a SSE-C copy encryption into a SSE-C encryption. -// It is the inverse of SSECopy(...). -// -// If the provided sse is no SSE-C copy encryption SSE returns -// sse unmodified. -func SSE(sse ServerSide) ServerSide { - if sse == nil || sse.Type() != SSEC { - return sse - } - if sse, ok := sse.(ssecCopy); ok { - return ssec(sse) - } - return sse -} - -// SSECopy transforms a SSE-C encryption into a SSE-C copy -// encryption. This is required for SSE-C key rotation or a SSE-C -// copy where the source and the destination should be encrypted. -// -// If the provided sse is no SSE-C encryption SSECopy returns -// sse unmodified. -func SSECopy(sse ServerSide) ServerSide { - if sse == nil || sse.Type() != SSEC { - return sse - } - if sse, ok := sse.(ssec); ok { - return ssecCopy(sse) - } - return sse -} - -type ssec [32]byte - -func (s ssec) Type() Type { return SSEC } - -func (s ssec) Marshal(h http.Header) { - keyMD5 := md5.Sum(s[:]) - h.Set(sseCustomerAlgorithm, "AES256") - h.Set(sseCustomerKey, base64.StdEncoding.EncodeToString(s[:])) - h.Set(sseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) -} - -type ssecCopy [32]byte - -func (s ssecCopy) Type() Type { return SSEC } - -func (s ssecCopy) Marshal(h http.Header) { - keyMD5 := md5.Sum(s[:]) - h.Set(sseCopyCustomerAlgorithm, "AES256") - h.Set(sseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:])) - h.Set(sseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) -} - -type s3 struct{} - -func (s s3) Type() Type { return S3 } - -func (s s3) Marshal(h http.Header) { h.Set(sseGenericHeader, "AES256") } - -type kms struct { - key string - context []byte - hasContext bool -} - -func (s kms) Type() Type { return KMS } - -func (s kms) Marshal(h http.Header) { - h.Set(sseGenericHeader, "aws:kms") - h.Set(sseKmsKeyID, s.key) - if s.hasContext { - h.Set(sseEncryptionContext, base64.StdEncoding.EncodeToString(s.context)) - } -} diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go deleted file mode 100644 index 156a6d63a..000000000 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3signer - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "net/http" - "strconv" - "strings" - "time" -) - -// Reference for constants used below - -// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming -const ( - streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" - streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" - emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - payloadChunkSize = 64 * 1024 - chunkSigConstLen = 17 // ";chunk-signature=" - signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" - crlfLen = 2 // CRLF -) - -// Request headers to be ignored while calculating seed signature for -// a request. -var ignoredStreamingHeaders = map[string]bool{ - "Authorization": true, - "User-Agent": true, - "Content-Type": true, -} - -// getSignedChunkLength - calculates the length of chunk metadata -func getSignedChunkLength(chunkDataSize int64) int64 { - return int64(len(fmt.Sprintf("%x", chunkDataSize))) + - chunkSigConstLen + - signatureStrLen + - crlfLen + - chunkDataSize + - crlfLen -} - -// getStreamLength - calculates the length of the overall stream (data + metadata) -func getStreamLength(dataLen, chunkSize int64) int64 { - if dataLen <= 0 { - return 0 - } - - chunksCount := int64(dataLen / chunkSize) - remainingBytes := int64(dataLen % chunkSize) - streamLen := int64(0) - streamLen += chunksCount * getSignedChunkLength(chunkSize) - if remainingBytes > 0 { - streamLen += getSignedChunkLength(remainingBytes) - } - streamLen += getSignedChunkLength(0) - return streamLen -} - -// buildChunkStringToSign - returns the string to sign given chunk data -// and previous signature. -func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string { - stringToSignParts := []string{ - streamingPayloadHdr, - t.Format(iso8601DateFormat), - getScope(region, t), - previousSig, - emptySHA256, - hex.EncodeToString(sum256(chunkData)), - } - - return strings.Join(stringToSignParts, "\n") -} - -// prepareStreamingRequest - prepares a request with appropriate -// headers before computing the seed signature. -func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { - // Set x-amz-content-sha256 header. - req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) - if sessionToken != "" { - req.Header.Set("X-Amz-Security-Token", sessionToken) - } - - req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) - // Set content length with streaming signature for each chunk included. - req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize)) - req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) -} - -// buildChunkHeader - returns the chunk header. -// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n -func buildChunkHeader(chunkLen int64, signature string) []byte { - return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n") -} - -// buildChunkSignature - returns chunk signature for a given chunk and previous signature. -func buildChunkSignature(chunkData []byte, reqTime time.Time, region, - previousSignature, secretAccessKey string) string { - - chunkStringToSign := buildChunkStringToSign(reqTime, region, - previousSignature, chunkData) - signingKey := getSigningKey(secretAccessKey, region, reqTime) - return getSignature(signingKey, chunkStringToSign) -} - -// getSeedSignature - returns the seed signature for a given request. -func (s *StreamingReader) setSeedSignature(req *http.Request) { - // Get canonical request - canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders) - - // Get string to sign from canonical request. - stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest) - - signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime) - - // Calculate signature. - s.seedSignature = getSignature(signingKey, stringToSign) -} - -// StreamingReader implements chunked upload signature as a reader on -// top of req.Body's ReaderCloser chunk header;data;... repeat -type StreamingReader struct { - accessKeyID string - secretAccessKey string - sessionToken string - region string - prevSignature string - seedSignature string - contentLen int64 // Content-Length from req header - baseReadCloser io.ReadCloser // underlying io.Reader - bytesRead int64 // bytes read from underlying io.Reader - buf bytes.Buffer // holds signed chunk - chunkBuf []byte // holds raw data read from req Body - chunkBufLen int // no. of bytes read so far into chunkBuf - done bool // done reading the underlying reader to EOF - reqTime time.Time - chunkNum int - totalChunks int - lastChunkSize int -} - -// signChunk - signs a chunk read from s.baseReader of chunkLen size. -func (s *StreamingReader) signChunk(chunkLen int) { - // Compute chunk signature for next header - signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime, - s.region, s.prevSignature, s.secretAccessKey) - - // For next chunk signature computation - s.prevSignature = signature - - // Write chunk header into streaming buffer - chunkHdr := buildChunkHeader(int64(chunkLen), signature) - s.buf.Write(chunkHdr) - - // Write chunk data into streaming buffer - s.buf.Write(s.chunkBuf[:chunkLen]) - - // Write the chunk trailer. - s.buf.Write([]byte("\r\n")) - - // Reset chunkBufLen for next chunk read. - s.chunkBufLen = 0 - s.chunkNum++ -} - -// setStreamingAuthHeader - builds and sets authorization header value -// for streaming signature. -func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { - credential := GetCredential(s.accessKeyID, s.region, s.reqTime) - authParts := []string{ - signV4Algorithm + " Credential=" + credential, - "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders), - "Signature=" + s.seedSignature, - } - - // Set authorization header. - auth := strings.Join(authParts, ",") - req.Header.Set("Authorization", auth) -} - -// StreamingSignV4 - provides chunked upload signatureV4 support by -// implementing io.Reader. -func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken, - region string, dataLen int64, reqTime time.Time) *http.Request { - - // Set headers needed for streaming signature. - prepareStreamingRequest(req, sessionToken, dataLen, reqTime) - - if req.Body == nil { - req.Body = ioutil.NopCloser(bytes.NewReader([]byte(""))) - } - - stReader := &StreamingReader{ - baseReadCloser: req.Body, - accessKeyID: accessKeyID, - secretAccessKey: secretAccessKey, - sessionToken: sessionToken, - region: region, - reqTime: reqTime, - chunkBuf: make([]byte, payloadChunkSize), - contentLen: dataLen, - chunkNum: 1, - totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, - lastChunkSize: int(dataLen % payloadChunkSize), - } - - // Add the request headers required for chunk upload signing. - - // Compute the seed signature. - stReader.setSeedSignature(req) - - // Set the authorization header with the seed signature. - stReader.setStreamingAuthHeader(req) - - // Set seed signature as prevSignature for subsequent - // streaming signing process. - stReader.prevSignature = stReader.seedSignature - req.Body = stReader - - return req -} - -// Read - this method performs chunk upload signature providing a -// io.Reader interface. -func (s *StreamingReader) Read(buf []byte) (int, error) { - switch { - // After the last chunk is read from underlying reader, we - // never re-fill s.buf. - case s.done: - - // s.buf will be (re-)filled with next chunk when has lesser - // bytes than asked for. - case s.buf.Len() < len(buf): - s.chunkBufLen = 0 - for { - n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) - // Usually we validate `err` first, but in this case - // we are validating n > 0 for the following reasons. - // - // 1. n > 0, err is one of io.EOF, nil (near end of stream) - // A Reader returning a non-zero number of bytes at the end - // of the input stream may return either err == EOF or err == nil - // - // 2. n == 0, err is io.EOF (actual end of stream) - // - // Callers should always process the n > 0 bytes returned - // before considering the error err. - if n1 > 0 { - s.chunkBufLen += n1 - s.bytesRead += int64(n1) - - if s.chunkBufLen == payloadChunkSize || - (s.chunkNum == s.totalChunks-1 && - s.chunkBufLen == s.lastChunkSize) { - // Sign the chunk and write it to s.buf. - s.signChunk(s.chunkBufLen) - break - } - } - if err != nil { - if err == io.EOF { - // No more data left in baseReader - last chunk. - // Done reading the last chunk from baseReader. - s.done = true - - // bytes read from baseReader different than - // content length provided. - if s.bytesRead != s.contentLen { - return 0, io.ErrUnexpectedEOF - } - - // Sign the chunk and write it to s.buf. - s.signChunk(0) - break - } - return 0, err - } - - } - } - return s.buf.Read(buf) -} - -// Close - this method makes underlying io.ReadCloser's Close method available. -func (s *StreamingReader) Close() error { - return s.baseReadCloser.Close() -} diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go deleted file mode 100644 index b4070938e..000000000 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go +++ /dev/null @@ -1,316 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3signer - -import ( - "bytes" - "crypto/hmac" - "crypto/sha1" - "encoding/base64" - "fmt" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/minio/minio-go/pkg/s3utils" -) - -// Signature and API related constants. -const ( - signV2Algorithm = "AWS" -) - -// Encode input URL path to URL encoded path. -func encodeURL2Path(req *http.Request, virtualHost bool) (path string) { - if virtualHost { - reqHost := getHostAddr(req) - dotPos := strings.Index(reqHost, ".") - if dotPos > -1 { - bucketName := reqHost[:dotPos] - path = "/" + bucketName - path += req.URL.Path - path = s3utils.EncodePath(path) - return - } - } - path = s3utils.EncodePath(req.URL.Path) - return -} - -// PreSignV2 - presign the request in following style. -// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. -func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request { - // Presign is not needed for anonymous credentials. - if accessKeyID == "" || secretAccessKey == "" { - return &req - } - - d := time.Now().UTC() - // Find epoch expires when the request will expire. - epochExpires := d.Unix() + expires - - // Add expires header if not present. - if expiresStr := req.Header.Get("Expires"); expiresStr == "" { - req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10)) - } - - // Get presigned string to sign. - stringToSign := preStringToSignV2(req, virtualHost) - hm := hmac.New(sha1.New, []byte(secretAccessKey)) - hm.Write([]byte(stringToSign)) - - // Calculate signature. - signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) - - query := req.URL.Query() - // Handle specially for Google Cloud Storage. - if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") { - query.Set("GoogleAccessId", accessKeyID) - } else { - query.Set("AWSAccessKeyId", accessKeyID) - } - - // Fill in Expires for presigned query. - query.Set("Expires", strconv.FormatInt(epochExpires, 10)) - - // Encode query and save. - req.URL.RawQuery = s3utils.QueryEncode(query) - - // Save signature finally. - req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature) - - // Return. - return &req -} - -// PostPresignSignatureV2 - presigned signature for PostPolicy -// request. -func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { - hm := hmac.New(sha1.New, []byte(secretAccessKey)) - hm.Write([]byte(policyBase64)) - signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) - return signature -} - -// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; -// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ); -// -// StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Date + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; -// -// CanonicalizedResource = [ "/" + Bucket ] + -// + -// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; -// -// CanonicalizedProtocolHeaders = - -// SignV2 sign the request before Do() (AWS Signature Version 2). -func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request { - // Signature calculation is not needed for anonymous credentials. - if accessKeyID == "" || secretAccessKey == "" { - return &req - } - - // Initial time. - d := time.Now().UTC() - - // Add date if not present. - if date := req.Header.Get("Date"); date == "" { - req.Header.Set("Date", d.Format(http.TimeFormat)) - } - - // Calculate HMAC for secretAccessKey. - stringToSign := stringToSignV2(req, virtualHost) - hm := hmac.New(sha1.New, []byte(secretAccessKey)) - hm.Write([]byte(stringToSign)) - - // Prepare auth header. - authHeader := new(bytes.Buffer) - authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID)) - encoder := base64.NewEncoder(base64.StdEncoding, authHeader) - encoder.Write(hm.Sum(nil)) - encoder.Close() - - // Set Authorization header. - req.Header.Set("Authorization", authHeader.String()) - - return &req -} - -// From the Amazon docs: -// -// StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Expires + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; -func preStringToSignV2(req http.Request, virtualHost bool) string { - buf := new(bytes.Buffer) - // Write standard headers. - writePreSignV2Headers(buf, req) - // Write canonicalized protocol headers if any. - writeCanonicalizedHeaders(buf, req) - // Write canonicalized Query resources if any. - writeCanonicalizedResource(buf, req, virtualHost) - return buf.String() -} - -// writePreSignV2Headers - write preSign v2 required headers. -func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { - buf.WriteString(req.Method + "\n") - buf.WriteString(req.Header.Get("Content-Md5") + "\n") - buf.WriteString(req.Header.Get("Content-Type") + "\n") - buf.WriteString(req.Header.Get("Expires") + "\n") -} - -// From the Amazon docs: -// -// StringToSign = HTTP-Verb + "\n" + -// Content-Md5 + "\n" + -// Content-Type + "\n" + -// Date + "\n" + -// CanonicalizedProtocolHeaders + -// CanonicalizedResource; -func stringToSignV2(req http.Request, virtualHost bool) string { - buf := new(bytes.Buffer) - // Write standard headers. - writeSignV2Headers(buf, req) - // Write canonicalized protocol headers if any. - writeCanonicalizedHeaders(buf, req) - // Write canonicalized Query resources if any. - writeCanonicalizedResource(buf, req, virtualHost) - return buf.String() -} - -// writeSignV2Headers - write signV2 required headers. -func writeSignV2Headers(buf *bytes.Buffer, req http.Request) { - buf.WriteString(req.Method + "\n") - buf.WriteString(req.Header.Get("Content-Md5") + "\n") - buf.WriteString(req.Header.Get("Content-Type") + "\n") - buf.WriteString(req.Header.Get("Date") + "\n") -} - -// writeCanonicalizedHeaders - write canonicalized headers. -func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { - var protoHeaders []string - vals := make(map[string][]string) - for k, vv := range req.Header { - // All the AMZ headers should be lowercase - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-amz") { - protoHeaders = append(protoHeaders, lk) - vals[lk] = vv - } - } - sort.Strings(protoHeaders) - for _, k := range protoHeaders { - buf.WriteString(k) - buf.WriteByte(':') - for idx, v := range vals[k] { - if idx > 0 { - buf.WriteByte(',') - } - if strings.Contains(v, "\n") { - // TODO: "Unfold" long headers that - // span multiple lines (as allowed by - // RFC 2616, section 4.2) by replacing - // the folding white-space (including - // new-line) by a single space. - buf.WriteString(v) - } else { - buf.WriteString(v) - } - } - buf.WriteByte('\n') - } -} - -// AWS S3 Signature V2 calculation rule is give here: -// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign - -// Whitelist resource list that will be used in query string for signature-V2 calculation. -// The list should be alphabetically sorted -var resourceList = []string{ - "acl", - "delete", - "lifecycle", - "location", - "logging", - "notification", - "partNumber", - "policy", - "requestPayment", - "response-cache-control", - "response-content-disposition", - "response-content-encoding", - "response-content-language", - "response-content-type", - "response-expires", - "torrent", - "uploadId", - "uploads", - "versionId", - "versioning", - "versions", - "website", -} - -// From the Amazon docs: -// -// CanonicalizedResource = [ "/" + Bucket ] + -// + -// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; -func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) { - // Save request URL. - requestURL := req.URL - // Get encoded URL path. - buf.WriteString(encodeURL2Path(&req, virtualHost)) - if requestURL.RawQuery != "" { - var n int - vals, _ := url.ParseQuery(requestURL.RawQuery) - // Verify if any sub resource queries are present, if yes - // canonicallize them. - for _, resource := range resourceList { - if vv, ok := vals[resource]; ok && len(vv) > 0 { - n++ - // First element - switch n { - case 1: - buf.WriteByte('?') - // The rest - default: - buf.WriteByte('&') - } - buf.WriteString(resource) - // Request parameters - if len(vv[0]) > 0 { - buf.WriteByte('=') - buf.WriteString(vv[0]) - } - } - } - } -} diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go deleted file mode 100644 index daf02fedf..000000000 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go +++ /dev/null @@ -1,315 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3signer - -import ( - "bytes" - "encoding/hex" - "net/http" - "sort" - "strconv" - "strings" - "time" - - "github.com/minio/minio-go/pkg/s3utils" -) - -// Signature and API related constants. -const ( - signV4Algorithm = "AWS4-HMAC-SHA256" - iso8601DateFormat = "20060102T150405Z" - yyyymmdd = "20060102" -) - -/// -/// Excerpts from @lsegal - -/// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. -/// -/// User-Agent: -/// -/// This is ignored from signing because signing this causes -/// problems with generating pre-signed URLs (that are executed -/// by other agents) or when customers pass requests through -/// proxies, which may modify the user-agent. -/// -/// Content-Length: -/// -/// This is ignored from signing because generating a pre-signed -/// URL should not provide a content-length constraint, -/// specifically when vending a S3 pre-signed PUT URL. The -/// corollary to this is that when sending regular requests -/// (non-pre-signed), the signature contains a checksum of the -/// body, which implicitly validates the payload length (since -/// changing the number of bytes would change the checksum) -/// and therefore this header is not valuable in the signature. -/// -/// Content-Type: -/// -/// Signing this header causes quite a number of problems in -/// browser environments, where browsers like to modify and -/// normalize the content-type header in different ways. There is -/// more information on this in https://goo.gl/2E9gyy. Avoiding -/// this field simplifies logic and reduces the possibility of -/// future bugs. -/// -/// Authorization: -/// -/// Is skipped for obvious reasons -/// -var v4IgnoredHeaders = map[string]bool{ - "Authorization": true, - "Content-Type": true, - "Content-Length": true, - "User-Agent": true, -} - -// getSigningKey hmac seed to calculate final signature. -func getSigningKey(secret, loc string, t time.Time) []byte { - date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) - location := sumHMAC(date, []byte(loc)) - service := sumHMAC(location, []byte("s3")) - signingKey := sumHMAC(service, []byte("aws4_request")) - return signingKey -} - -// getSignature final signature in hexadecimal form. -func getSignature(signingKey []byte, stringToSign string) string { - return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) -} - -// getScope generate a string of a specific date, an AWS region, and a -// service. -func getScope(location string, t time.Time) string { - scope := strings.Join([]string{ - t.Format(yyyymmdd), - location, - "s3", - "aws4_request", - }, "/") - return scope -} - -// GetCredential generate a credential string. -func GetCredential(accessKeyID, location string, t time.Time) string { - scope := getScope(location, t) - return accessKeyID + "/" + scope -} - -// getHashedPayload get the hexadecimal value of the SHA256 hash of -// the request payload. -func getHashedPayload(req http.Request) string { - hashedPayload := req.Header.Get("X-Amz-Content-Sha256") - if hashedPayload == "" { - // Presign does not have a payload, use S3 recommended value. - hashedPayload = unsignedPayload - } - return hashedPayload -} - -// getCanonicalHeaders generate a list of request headers for -// signature. -func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string { - var headers []string - vals := make(map[string][]string) - for k, vv := range req.Header { - if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { - continue // ignored header - } - headers = append(headers, strings.ToLower(k)) - vals[strings.ToLower(k)] = vv - } - headers = append(headers, "host") - sort.Strings(headers) - - var buf bytes.Buffer - // Save all the headers in canonical form
: newline - // separated for each header. - for _, k := range headers { - buf.WriteString(k) - buf.WriteByte(':') - switch { - case k == "host": - buf.WriteString(getHostAddr(&req)) - fallthrough - default: - for idx, v := range vals[k] { - if idx > 0 { - buf.WriteByte(',') - } - buf.WriteString(v) - } - buf.WriteByte('\n') - } - } - return buf.String() -} - -// getSignedHeaders generate all signed request headers. -// i.e lexically sorted, semicolon-separated list of lowercase -// request header names. -func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { - var headers []string - for k := range req.Header { - if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { - continue // Ignored header found continue. - } - headers = append(headers, strings.ToLower(k)) - } - headers = append(headers, "host") - sort.Strings(headers) - return strings.Join(headers, ";") -} - -// getCanonicalRequest generate a canonical request of style. -// -// canonicalRequest = -// \n -// \n -// \n -// \n -// \n -// -func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool) string { - req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) - canonicalRequest := strings.Join([]string{ - req.Method, - s3utils.EncodePath(req.URL.Path), - req.URL.RawQuery, - getCanonicalHeaders(req, ignoredHeaders), - getSignedHeaders(req, ignoredHeaders), - getHashedPayload(req), - }, "\n") - return canonicalRequest -} - -// getStringToSign a string based on selected query values. -func getStringToSignV4(t time.Time, location, canonicalRequest string) string { - stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n" - stringToSign = stringToSign + getScope(location, t) + "\n" - stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) - return stringToSign -} - -// PreSignV4 presign the request, in accordance with -// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. -func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request { - // Presign is not needed for anonymous credentials. - if accessKeyID == "" || secretAccessKey == "" { - return &req - } - - // Initial time. - t := time.Now().UTC() - - // Get credential string. - credential := GetCredential(accessKeyID, location, t) - - // Get all signed headers. - signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) - - // Set URL query. - query := req.URL.Query() - query.Set("X-Amz-Algorithm", signV4Algorithm) - query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) - query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) - query.Set("X-Amz-SignedHeaders", signedHeaders) - query.Set("X-Amz-Credential", credential) - // Set session token if available. - if sessionToken != "" { - query.Set("X-Amz-Security-Token", sessionToken) - } - req.URL.RawQuery = query.Encode() - - // Get canonical request. - canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders) - - // Get string to sign from canonical request. - stringToSign := getStringToSignV4(t, location, canonicalRequest) - - // Gext hmac signing key. - signingKey := getSigningKey(secretAccessKey, location, t) - - // Calculate signature. - signature := getSignature(signingKey, stringToSign) - - // Add signature header to RawQuery. - req.URL.RawQuery += "&X-Amz-Signature=" + signature - - return &req -} - -// PostPresignSignatureV4 - presigned signature for PostPolicy -// requests. -func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { - // Get signining key. - signingkey := getSigningKey(secretAccessKey, location, t) - // Calculate signature. - signature := getSignature(signingkey, policyBase64) - return signature -} - -// SignV4 sign the request before Do(), in accordance with -// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. -func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { - // Signature calculation is not needed for anonymous credentials. - if accessKeyID == "" || secretAccessKey == "" { - return &req - } - - // Initial time. - t := time.Now().UTC() - - // Set x-amz-date. - req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) - - // Set session token if available. - if sessionToken != "" { - req.Header.Set("X-Amz-Security-Token", sessionToken) - } - - // Get canonical request. - canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders) - - // Get string to sign from canonical request. - stringToSign := getStringToSignV4(t, location, canonicalRequest) - - // Get hmac signing key. - signingKey := getSigningKey(secretAccessKey, location, t) - - // Get credential string. - credential := GetCredential(accessKeyID, location, t) - - // Get all signed headers. - signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) - - // Calculate signature. - signature := getSignature(signingKey, stringToSign) - - // If regular request, construct the final authorization header. - parts := []string{ - signV4Algorithm + " Credential=" + credential, - "SignedHeaders=" + signedHeaders, - "Signature=" + signature, - } - - // Set authorization header. - auth := strings.Join(parts, ", ") - req.Header.Set("Authorization", auth) - - return &req -} diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go b/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go deleted file mode 100644 index 33b175208..000000000 --- a/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3signer - -import ( - "crypto/hmac" - "crypto/sha256" - "net/http" -) - -// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when -const unsignedPayload = "UNSIGNED-PAYLOAD" - -// sum256 calculate sha256 sum for an input byte array. -func sum256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -// sumHMAC calculate hmac between two input byte array. -func sumHMAC(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -// getHostAddr returns host header if available, otherwise returns host from URL -func getHostAddr(req *http.Request) string { - if req.Host != "" { - return req.Host - } - return req.URL.Host -} diff --git a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go deleted file mode 100644 index adceb7f2a..000000000 --- a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go +++ /dev/null @@ -1,331 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3utils - -import ( - "bytes" - "encoding/hex" - "errors" - "net" - "net/url" - "regexp" - "sort" - "strings" - "unicode/utf8" -) - -// Sentinel URL is the default url value which is invalid. -var sentinelURL = url.URL{} - -// IsValidDomain validates if input string is a valid domain name. -func IsValidDomain(host string) bool { - // See RFC 1035, RFC 3696. - host = strings.TrimSpace(host) - if len(host) == 0 || len(host) > 255 { - return false - } - // host cannot start or end with "-" - if host[len(host)-1:] == "-" || host[:1] == "-" { - return false - } - // host cannot start or end with "_" - if host[len(host)-1:] == "_" || host[:1] == "_" { - return false - } - // host cannot start or end with a "." - if host[len(host)-1:] == "." || host[:1] == "." { - return false - } - // All non alphanumeric characters are invalid. - if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:> 1 { - return parts[1] - } - parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } - parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } - parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint. -func IsAmazonEndpoint(endpointURL url.URL) bool { - if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" { - return true - } - return GetRegionFromURL(endpointURL) != "" -} - -// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint. -func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool { - if endpointURL == sentinelURL { - return false - } - return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" || - IsAmazonFIPSGovCloudEndpoint(endpointURL)) -} - -// IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint. -// See https://aws.amazon.com/compliance/fips. -func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { - if endpointURL == sentinelURL { - return false - } - return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" || - endpointURL.Host == "s3-fips.dualstack.us-gov-west-1.amazonaws.com" -} - -// IsAmazonFIPSUSEastWestEndpoint - Match if it is exactly Amazon S3 FIPS US East/West endpoint. -// See https://aws.amazon.com/compliance/fips. -func IsAmazonFIPSUSEastWestEndpoint(endpointURL url.URL) bool { - if endpointURL == sentinelURL { - return false - } - switch endpointURL.Host { - case "s3-fips.us-east-2.amazonaws.com": - case "s3-fips.dualstack.us-west-1.amazonaws.com": - case "s3-fips.dualstack.us-west-2.amazonaws.com": - case "s3-fips.dualstack.us-east-2.amazonaws.com": - case "s3-fips.dualstack.us-east-1.amazonaws.com": - case "s3-fips.us-west-1.amazonaws.com": - case "s3-fips.us-west-2.amazonaws.com": - case "s3-fips.us-east-1.amazonaws.com": - default: - return false - } - return true -} - -// IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint. -// See https://aws.amazon.com/compliance/fips. -func IsAmazonFIPSEndpoint(endpointURL url.URL) bool { - return IsAmazonFIPSUSEastWestEndpoint(endpointURL) || IsAmazonFIPSGovCloudEndpoint(endpointURL) -} - -// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint. -func IsGoogleEndpoint(endpointURL url.URL) bool { - if endpointURL == sentinelURL { - return false - } - return endpointURL.Host == "storage.googleapis.com" -} - -// Expects ascii encoded strings - from output of urlEncodePath -func percentEncodeSlash(s string) string { - return strings.Replace(s, "/", "%2F", -1) -} - -// QueryEncode - encodes query values in their URL encoded form. In -// addition to the percent encoding performed by urlEncodePath() used -// here, it also percent encodes '/' (forward slash) -func QueryEncode(v url.Values) string { - if v == nil { - return "" - } - var buf bytes.Buffer - keys := make([]string, 0, len(v)) - for k := range v { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - vs := v[k] - prefix := percentEncodeSlash(EncodePath(k)) + "=" - for _, v := range vs { - if buf.Len() > 0 { - buf.WriteByte('&') - } - buf.WriteString(prefix) - buf.WriteString(percentEncodeSlash(EncodePath(v))) - } - } - return buf.String() -} - -// if object matches reserved string, no need to encode them -var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") - -// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences -// -// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 -// non english characters cannot be parsed due to the nature in which url.Encode() is written -// -// This function on the other hand is a direct replacement for url.Encode() technique to support -// pretty much every UTF-8 character. -func EncodePath(pathName string) string { - if reservedObjectNames.MatchString(pathName) { - return pathName - } - var encodedPathname string - for _, s := range pathName { - if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) - encodedPathname = encodedPathname + string(s) - continue - } - switch s { - case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) - encodedPathname = encodedPathname + string(s) - continue - default: - len := utf8.RuneLen(s) - if len < 0 { - // if utf8 cannot convert return the same string as is - return pathName - } - u := make([]byte, len) - utf8.EncodeRune(u, s) - for _, r := range u { - hex := hex.EncodeToString([]byte{r}) - encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) - } - } - } - return encodedPathname -} - -// We support '.' with bucket names but we fallback to using path -// style requests instead for such buckets. -var ( - validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`) - validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) - ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) -) - -// Common checker for both stricter and basic validation. -func checkBucketNameCommon(bucketName string, strict bool) (err error) { - if strings.TrimSpace(bucketName) == "" { - return errors.New("Bucket name cannot be empty") - } - if len(bucketName) < 3 { - return errors.New("Bucket name cannot be smaller than 3 characters") - } - if len(bucketName) > 63 { - return errors.New("Bucket name cannot be greater than 63 characters") - } - if ipAddress.MatchString(bucketName) { - return errors.New("Bucket name cannot be an ip address") - } - if strings.Contains(bucketName, "..") { - return errors.New("Bucket name contains invalid characters") - } - if strict { - if !validBucketNameStrict.MatchString(bucketName) { - err = errors.New("Bucket name contains invalid characters") - } - return err - } - if !validBucketName.MatchString(bucketName) { - err = errors.New("Bucket name contains invalid characters") - } - return err -} - -// CheckValidBucketName - checks if we have a valid input bucket name. -func CheckValidBucketName(bucketName string) (err error) { - return checkBucketNameCommon(bucketName, false) -} - -// CheckValidBucketNameStrict - checks if we have a valid input bucket name. -// This is a stricter version. -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html -func CheckValidBucketNameStrict(bucketName string) (err error) { - return checkBucketNameCommon(bucketName, true) -} - -// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix. -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html -func CheckValidObjectNamePrefix(objectName string) error { - if len(objectName) > 1024 { - return errors.New("Object name cannot be greater than 1024 characters") - } - if !utf8.ValidString(objectName) { - return errors.New("Object name with non UTF-8 strings are not supported") - } - return nil -} - -// CheckValidObjectName - checks if we have a valid input object name. -// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html -func CheckValidObjectName(objectName string) error { - if strings.TrimSpace(objectName) == "" { - return errors.New("Object name cannot be empty") - } - return CheckValidObjectNamePrefix(objectName) -} diff --git a/vendor/github.com/minio/minio-go/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/pkg/set/stringset.go deleted file mode 100644 index efd02629b..000000000 --- a/vendor/github.com/minio/minio-go/pkg/set/stringset.go +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package set - -import ( - "encoding/json" - "fmt" - "sort" -) - -// StringSet - uses map as set of strings. -type StringSet map[string]struct{} - -// ToSlice - returns StringSet as string slice. -func (set StringSet) ToSlice() []string { - keys := make([]string, 0, len(set)) - for k := range set { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// IsEmpty - returns whether the set is empty or not. -func (set StringSet) IsEmpty() bool { - return len(set) == 0 -} - -// Add - adds string to the set. -func (set StringSet) Add(s string) { - set[s] = struct{}{} -} - -// Remove - removes string in the set. It does nothing if string does not exist in the set. -func (set StringSet) Remove(s string) { - delete(set, s) -} - -// Contains - checks if string is in the set. -func (set StringSet) Contains(s string) bool { - _, ok := set[s] - return ok -} - -// FuncMatch - returns new set containing each value who passes match function. -// A 'matchFn' should accept element in a set as first argument and -// 'matchString' as second argument. The function can do any logic to -// compare both the arguments and should return true to accept element in -// a set to include in output set else the element is ignored. -func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet { - nset := NewStringSet() - for k := range set { - if matchFn(k, matchString) { - nset.Add(k) - } - } - return nset -} - -// ApplyFunc - returns new set containing each value processed by 'applyFn'. -// A 'applyFn' should accept element in a set as a argument and return -// a processed string. The function can do any logic to return a processed -// string. -func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet { - nset := NewStringSet() - for k := range set { - nset.Add(applyFn(k)) - } - return nset -} - -// Equals - checks whether given set is equal to current set or not. -func (set StringSet) Equals(sset StringSet) bool { - // If length of set is not equal to length of given set, the - // set is not equal to given set. - if len(set) != len(sset) { - return false - } - - // As both sets are equal in length, check each elements are equal. - for k := range set { - if _, ok := sset[k]; !ok { - return false - } - } - - return true -} - -// Intersection - returns the intersection with given set as new set. -func (set StringSet) Intersection(sset StringSet) StringSet { - nset := NewStringSet() - for k := range set { - if _, ok := sset[k]; ok { - nset.Add(k) - } - } - - return nset -} - -// Difference - returns the difference with given set as new set. -func (set StringSet) Difference(sset StringSet) StringSet { - nset := NewStringSet() - for k := range set { - if _, ok := sset[k]; !ok { - nset.Add(k) - } - } - - return nset -} - -// Union - returns the union with given set as new set. -func (set StringSet) Union(sset StringSet) StringSet { - nset := NewStringSet() - for k := range set { - nset.Add(k) - } - - for k := range sset { - nset.Add(k) - } - - return nset -} - -// MarshalJSON - converts to JSON data. -func (set StringSet) MarshalJSON() ([]byte, error) { - return json.Marshal(set.ToSlice()) -} - -// UnmarshalJSON - parses JSON data and creates new set with it. -// If 'data' contains JSON string array, the set contains each string. -// If 'data' contains JSON string, the set contains the string as one element. -// If 'data' contains Other JSON types, JSON parse error is returned. -func (set *StringSet) UnmarshalJSON(data []byte) error { - sl := []string{} - var err error - if err = json.Unmarshal(data, &sl); err == nil { - *set = make(StringSet) - for _, s := range sl { - set.Add(s) - } - } else { - var s string - if err = json.Unmarshal(data, &s); err == nil { - *set = make(StringSet) - set.Add(s) - } - } - - return err -} - -// String - returns printable string of the set. -func (set StringSet) String() string { - return fmt.Sprintf("%s", set.ToSlice()) -} - -// NewStringSet - creates new string set. -func NewStringSet() StringSet { - return make(StringSet) -} - -// CreateStringSet - creates new string set with given string values. -func CreateStringSet(sl ...string) StringSet { - set := make(StringSet) - for _, k := range sl { - set.Add(k) - } - return set -} - -// CopyStringSet - returns copy of given set. -func CopyStringSet(set StringSet) StringSet { - nset := NewStringSet() - for k, v := range set { - nset[k] = v - } - return nset -} diff --git a/vendor/github.com/minio/minio-go/post-policy.go b/vendor/github.com/minio/minio-go/post-policy.go deleted file mode 100644 index c285fdefd..000000000 --- a/vendor/github.com/minio/minio-go/post-policy.go +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "encoding/base64" - "fmt" - "strings" - "time" -) - -// expirationDateFormat date format for expiration key in json policy. -const expirationDateFormat = "2006-01-02T15:04:05.999Z" - -// policyCondition explanation: -// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html -// -// Example: -// -// policyCondition { -// matchType: "$eq", -// key: "$Content-Type", -// value: "image/png", -// } -// -type policyCondition struct { - matchType string - condition string - value string -} - -// PostPolicy - Provides strict static type conversion and validation -// for Amazon S3's POST policy JSON string. -type PostPolicy struct { - // Expiration date and time of the POST policy. - expiration time.Time - // Collection of different policy conditions. - conditions []policyCondition - // ContentLengthRange minimum and maximum allowable size for the - // uploaded content. - contentLengthRange struct { - min int64 - max int64 - } - - // Post form data. - formData map[string]string -} - -// NewPostPolicy - Instantiate new post policy. -func NewPostPolicy() *PostPolicy { - p := &PostPolicy{} - p.conditions = make([]policyCondition, 0) - p.formData = make(map[string]string) - return p -} - -// SetExpires - Sets expiration time for the new policy. -func (p *PostPolicy) SetExpires(t time.Time) error { - if t.IsZero() { - return ErrInvalidArgument("No expiry time set.") - } - p.expiration = t - return nil -} - -// SetKey - Sets an object name for the policy based upload. -func (p *PostPolicy) SetKey(key string) error { - if strings.TrimSpace(key) == "" || key == "" { - return ErrInvalidArgument("Object name is empty.") - } - policyCond := policyCondition{ - matchType: "eq", - condition: "$key", - value: key, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["key"] = key - return nil -} - -// SetKeyStartsWith - Sets an object name that an policy based upload -// can start with. -func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { - if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { - return ErrInvalidArgument("Object prefix is empty.") - } - policyCond := policyCondition{ - matchType: "starts-with", - condition: "$key", - value: keyStartsWith, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["key"] = keyStartsWith - return nil -} - -// SetBucket - Sets bucket at which objects will be uploaded to. -func (p *PostPolicy) SetBucket(bucketName string) error { - if strings.TrimSpace(bucketName) == "" || bucketName == "" { - return ErrInvalidArgument("Bucket name is empty.") - } - policyCond := policyCondition{ - matchType: "eq", - condition: "$bucket", - value: bucketName, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["bucket"] = bucketName - return nil -} - -// SetContentType - Sets content-type of the object for this policy -// based upload. -func (p *PostPolicy) SetContentType(contentType string) error { - if strings.TrimSpace(contentType) == "" || contentType == "" { - return ErrInvalidArgument("No content type specified.") - } - policyCond := policyCondition{ - matchType: "eq", - condition: "$Content-Type", - value: contentType, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["Content-Type"] = contentType - return nil -} - -// SetContentLengthRange - Set new min and max content length -// condition for all incoming uploads. -func (p *PostPolicy) SetContentLengthRange(min, max int64) error { - if min > max { - return ErrInvalidArgument("Minimum limit is larger than maximum limit.") - } - if min < 0 { - return ErrInvalidArgument("Minimum limit cannot be negative.") - } - if max < 0 { - return ErrInvalidArgument("Maximum limit cannot be negative.") - } - p.contentLengthRange.min = min - p.contentLengthRange.max = max - return nil -} - -// SetSuccessStatusAction - Sets the status success code of the object for this policy -// based upload. -func (p *PostPolicy) SetSuccessStatusAction(status string) error { - if strings.TrimSpace(status) == "" || status == "" { - return ErrInvalidArgument("Status is empty") - } - policyCond := policyCondition{ - matchType: "eq", - condition: "$success_action_status", - value: status, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData["success_action_status"] = status - return nil -} - -// SetUserMetadata - Set user metadata as a key/value couple. -// Can be retrieved through a HEAD request or an event. -func (p *PostPolicy) SetUserMetadata(key string, value string) error { - if strings.TrimSpace(key) == "" || key == "" { - return ErrInvalidArgument("Key is empty") - } - if strings.TrimSpace(value) == "" || value == "" { - return ErrInvalidArgument("Value is empty") - } - headerName := fmt.Sprintf("x-amz-meta-%s", key) - policyCond := policyCondition{ - matchType: "eq", - condition: fmt.Sprintf("$%s", headerName), - value: value, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData[headerName] = value - return nil -} - -// SetUserData - Set user data as a key/value couple. -// Can be retrieved through a HEAD request or an event. -func (p *PostPolicy) SetUserData(key string, value string) error { - if key == "" { - return ErrInvalidArgument("Key is empty") - } - if value == "" { - return ErrInvalidArgument("Value is empty") - } - headerName := fmt.Sprintf("x-amz-%s", key) - policyCond := policyCondition{ - matchType: "eq", - condition: fmt.Sprintf("$%s", headerName), - value: value, - } - if err := p.addNewPolicy(policyCond); err != nil { - return err - } - p.formData[headerName] = value - return nil -} - -// addNewPolicy - internal helper to validate adding new policies. -func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { - if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { - return ErrInvalidArgument("Policy fields are empty.") - } - p.conditions = append(p.conditions, policyCond) - return nil -} - -// Stringer interface for printing policy in json formatted string. -func (p PostPolicy) String() string { - return string(p.marshalJSON()) -} - -// marshalJSON - Provides Marshalled JSON in bytes. -func (p PostPolicy) marshalJSON() []byte { - expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` - var conditionsStr string - conditions := []string{} - for _, po := range p.conditions { - conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value)) - } - if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 { - conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]", - p.contentLengthRange.min, p.contentLengthRange.max)) - } - if len(conditions) > 0 { - conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]" - } - retStr := "{" - retStr = retStr + expirationStr + "," - retStr = retStr + conditionsStr - retStr = retStr + "}" - return []byte(retStr) -} - -// base64 - Produces base64 of PostPolicy's Marshalled json. -func (p PostPolicy) base64() string { - return base64.StdEncoding.EncodeToString(p.marshalJSON()) -} diff --git a/vendor/github.com/minio/minio-go/retry-continous.go b/vendor/github.com/minio/minio-go/retry-continous.go deleted file mode 100644 index f31dfa6f2..000000000 --- a/vendor/github.com/minio/minio-go/retry-continous.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import "time" - -// newRetryTimerContinous creates a timer with exponentially increasing delays forever. -func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { - attemptCh := make(chan int) - - // normalize jitter to the range [0, 1.0] - if jitter < NoJitter { - jitter = NoJitter - } - if jitter > MaxJitter { - jitter = MaxJitter - } - - // computes the exponential backoff duration according to - // https://www.awsarchitectureblog.com/2015/03/backoff.html - exponentialBackoffWait := func(attempt int) time.Duration { - // 1< maxAttempt { - attempt = maxAttempt - } - //sleep = random_between(0, min(cap, base * 2 ** attempt)) - sleep := unit * time.Duration(1< cap { - sleep = cap - } - if jitter != NoJitter { - sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) - } - return sleep - } - - go func() { - defer close(attemptCh) - var nextBackoff int - for { - select { - // Attempts starts. - case attemptCh <- nextBackoff: - nextBackoff++ - case <-doneCh: - // Stop the routine. - return - } - time.Sleep(exponentialBackoffWait(nextBackoff)) - } - }() - return attemptCh -} diff --git a/vendor/github.com/minio/minio-go/retry.go b/vendor/github.com/minio/minio-go/retry.go deleted file mode 100644 index 445167b6a..000000000 --- a/vendor/github.com/minio/minio-go/retry.go +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "net" - "net/http" - "net/url" - "strings" - "time" -) - -// MaxRetry is the maximum number of retries before stopping. -var MaxRetry = 10 - -// MaxJitter will randomize over the full exponential backoff time -const MaxJitter = 1.0 - -// NoJitter disables the use of jitter for randomizing the exponential backoff time -const NoJitter = 0.0 - -// DefaultRetryUnit - default unit multiplicative per retry. -// defaults to 1 second. -const DefaultRetryUnit = time.Second - -// DefaultRetryCap - Each retry attempt never waits no longer than -// this maximum time duration. -const DefaultRetryCap = time.Second * 30 - -// newRetryTimer creates a timer with exponentially increasing -// delays until the maximum retry attempts are reached. -func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { - attemptCh := make(chan int) - - // computes the exponential backoff duration according to - // https://www.awsarchitectureblog.com/2015/03/backoff.html - exponentialBackoffWait := func(attempt int) time.Duration { - // normalize jitter to the range [0, 1.0] - if jitter < NoJitter { - jitter = NoJitter - } - if jitter > MaxJitter { - jitter = MaxJitter - } - - //sleep = random_between(0, min(cap, base * 2 ** attempt)) - sleep := unit * time.Duration(1< cap { - sleep = cap - } - if jitter != NoJitter { - sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) - } - return sleep - } - - go func() { - defer close(attemptCh) - for i := 0; i < maxRetry; i++ { - select { - // Attempts start from 1. - case attemptCh <- i + 1: - case <-doneCh: - // Stop the routine. - return - } - time.Sleep(exponentialBackoffWait(i)) - } - }() - return attemptCh -} - -// isHTTPReqErrorRetryable - is http requests error retryable, such -// as i/o timeout, connection broken etc.. -func isHTTPReqErrorRetryable(err error) bool { - if err == nil { - return false - } - switch e := err.(type) { - case *url.Error: - switch e.Err.(type) { - case *net.DNSError, *net.OpError, net.UnknownNetworkError: - return true - } - if strings.Contains(err.Error(), "Connection closed by foreign host") { - return true - } else if strings.Contains(err.Error(), "net/http: TLS handshake timeout") { - // If error is - tlsHandshakeTimeoutError, retry. - return true - } else if strings.Contains(err.Error(), "i/o timeout") { - // If error is - tcp timeoutError, retry. - return true - } else if strings.Contains(err.Error(), "connection timed out") { - // If err is a net.Dial timeout, retry. - return true - } else if strings.Contains(err.Error(), "net/http: HTTP/1.x transport connection broken") { - // If error is transport connection broken, retry. - return true - } - } - return false -} - -// List of AWS S3 error codes which are retryable. -var retryableS3Codes = map[string]struct{}{ - "RequestError": {}, - "RequestTimeout": {}, - "Throttling": {}, - "ThrottlingException": {}, - "RequestLimitExceeded": {}, - "RequestThrottled": {}, - "InternalError": {}, - "ExpiredToken": {}, - "ExpiredTokenException": {}, - "SlowDown": {}, - // Add more AWS S3 codes here. -} - -// isS3CodeRetryable - is s3 error code retryable. -func isS3CodeRetryable(s3Code string) (ok bool) { - _, ok = retryableS3Codes[s3Code] - return ok -} - -// List of HTTP status codes which are retryable. -var retryableHTTPStatusCodes = map[int]struct{}{ - 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet - http.StatusInternalServerError: {}, - http.StatusBadGateway: {}, - http.StatusServiceUnavailable: {}, - // Add more HTTP status codes here. -} - -// isHTTPStatusRetryable - is HTTP error code retryable. -func isHTTPStatusRetryable(httpStatusCode int) (ok bool) { - _, ok = retryableHTTPStatusCodes[httpStatusCode] - return ok -} diff --git a/vendor/github.com/minio/minio-go/s3-endpoints.go b/vendor/github.com/minio/minio-go/s3-endpoints.go deleted file mode 100644 index 0eccd2407..000000000 --- a/vendor/github.com/minio/minio-go/s3-endpoints.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -// awsS3EndpointMap Amazon S3 endpoint map. -var awsS3EndpointMap = map[string]string{ - "us-east-1": "s3.dualstack.us-east-1.amazonaws.com", - "us-east-2": "s3.dualstack.us-east-2.amazonaws.com", - "us-west-2": "s3.dualstack.us-west-2.amazonaws.com", - "us-west-1": "s3.dualstack.us-west-1.amazonaws.com", - "ca-central-1": "s3.dualstack.ca-central-1.amazonaws.com", - "eu-west-1": "s3.dualstack.eu-west-1.amazonaws.com", - "eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com", - "eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com", - "eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com", - "eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com", - "ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com", - "ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com", - "ap-southeast-2": "s3.dualstack.ap-southeast-2.amazonaws.com", - "ap-northeast-1": "s3.dualstack.ap-northeast-1.amazonaws.com", - "ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com", - "sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com", - "us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com", - "us-gov-east-1": "s3.dualstack.us-gov-east-1.amazonaws.com", - "cn-north-1": "s3.cn-north-1.amazonaws.com.cn", - "cn-northwest-1": "s3.cn-northwest-1.amazonaws.com.cn", -} - -// getS3Endpoint get Amazon S3 endpoint based on the bucket location. -func getS3Endpoint(bucketLocation string) (s3Endpoint string) { - s3Endpoint, ok := awsS3EndpointMap[bucketLocation] - if !ok { - // Default to 's3.dualstack.us-east-1.amazonaws.com' endpoint. - s3Endpoint = "s3.dualstack.us-east-1.amazonaws.com" - } - return s3Endpoint -} diff --git a/vendor/github.com/minio/minio-go/s3-error.go b/vendor/github.com/minio/minio-go/s3-error.go deleted file mode 100644 index 3b11776c2..000000000 --- a/vendor/github.com/minio/minio-go/s3-error.go +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -// Non exhaustive list of AWS S3 standard error responses - -// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html -var s3ErrorResponseMap = map[string]string{ - "AccessDenied": "Access Denied.", - "BadDigest": "The Content-Md5 you specified did not match what we received.", - "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", - "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", - "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", - "InternalError": "We encountered an internal error, please try again.", - "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", - "InvalidBucketName": "The specified bucket is not valid.", - "InvalidDigest": "The Content-Md5 you specified is not valid.", - "InvalidRange": "The requested range is not satisfiable", - "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", - "MissingContentLength": "You must provide the Content-Length HTTP header.", - "MissingContentMD5": "Missing required header for this request: Content-Md5.", - "MissingRequestBodyError": "Request body is empty.", - "NoSuchBucket": "The specified bucket does not exist.", - "NoSuchBucketPolicy": "The bucket policy does not exist", - "NoSuchKey": "The specified key does not exist.", - "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", - "NotImplemented": "A header you provided implies functionality that is not implemented", - "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", - "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", - "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", - "MethodNotAllowed": "The specified method is not allowed against this resource.", - "InvalidPart": "One or more of the specified parts could not be found.", - "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", - "InvalidObjectState": "The operation is not valid for the current state of the object.", - "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", - "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", - "BucketNotEmpty": "The bucket you tried to delete is not empty", - "AllAccessDisabled": "All access to this bucket has been disabled.", - "MalformedPolicy": "Policy has invalid resource.", - "MissingFields": "Missing fields in request.", - "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", - "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", - "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", - "InvalidDuration": "Duration provided in the request is invalid.", - "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", - // Add new API errors here. -} diff --git a/vendor/github.com/minio/minio-go/transport.go b/vendor/github.com/minio/minio-go/transport.go deleted file mode 100644 index 88700cfe7..000000000 --- a/vendor/github.com/minio/minio-go/transport.go +++ /dev/null @@ -1,50 +0,0 @@ -// +build go1.7 go1.8 - -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2017-2018 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "net" - "net/http" - "time" -) - -// DefaultTransport - this default transport is similar to -// http.DefaultTransport but with additional param DisableCompression -// is set to true to avoid decompressing content with 'gzip' encoding. -var DefaultTransport http.RoundTripper = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - MaxIdleConnsPerHost: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - // Set this value so that the underlying transport round-tripper - // doesn't try to auto decode the body of objects with - // content-encoding set to `gzip`. - // - // Refer: - // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 - DisableCompression: true, -} diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go deleted file mode 100644 index 8483f3834..000000000 --- a/vendor/github.com/minio/minio-go/utils.go +++ /dev/null @@ -1,272 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage - * Copyright 2015-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "crypto/md5" - "crypto/sha256" - "encoding/base64" - "encoding/hex" - "encoding/xml" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "regexp" - "strings" - "time" - - "github.com/minio/minio-go/pkg/s3utils" -) - -// xmlDecoder provide decoded value in xml. -func xmlDecoder(body io.Reader, v interface{}) error { - d := xml.NewDecoder(body) - return d.Decode(v) -} - -// sum256 calculate sha256sum for an input byte array, returns hex encoded. -func sum256Hex(data []byte) string { - hash := sha256.New() - hash.Write(data) - return hex.EncodeToString(hash.Sum(nil)) -} - -// sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded. -func sumMD5Base64(data []byte) string { - hash := md5.New() - hash.Write(data) - return base64.StdEncoding.EncodeToString(hash.Sum(nil)) -} - -// getEndpointURL - construct a new endpoint. -func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { - if strings.Contains(endpoint, ":") { - host, _, err := net.SplitHostPort(endpoint) - if err != nil { - return nil, err - } - if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { - msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." - return nil, ErrInvalidArgument(msg) - } - } else { - if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) { - msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." - return nil, ErrInvalidArgument(msg) - } - } - // If secure is false, use 'http' scheme. - scheme := "https" - if !secure { - scheme = "http" - } - - // Construct a secured endpoint URL. - endpointURLStr := scheme + "://" + endpoint - endpointURL, err := url.Parse(endpointURLStr) - if err != nil { - return nil, err - } - - // Validate incoming endpoint URL. - if err := isValidEndpointURL(*endpointURL); err != nil { - return nil, err - } - return endpointURL, nil -} - -// closeResponse close non nil response with any response Body. -// convenient wrapper to drain any remaining data on response body. -// -// Subsequently this allows golang http RoundTripper -// to re-use the same connection for future requests. -func closeResponse(resp *http.Response) { - // Callers should close resp.Body when done reading from it. - // If resp.Body is not closed, the Client's underlying RoundTripper - // (typically Transport) may not be able to re-use a persistent TCP - // connection to the server for a subsequent "keep-alive" request. - if resp != nil && resp.Body != nil { - // Drain any remaining Body and then close the connection. - // Without this closing connection would disallow re-using - // the same connection for future uses. - // - http://stackoverflow.com/a/17961593/4465767 - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - } -} - -var ( - // Hex encoded string of nil sha256sum bytes. - emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" - - // Sentinel URL is the default url value which is invalid. - sentinelURL = url.URL{} -) - -// Verify if input endpoint URL is valid. -func isValidEndpointURL(endpointURL url.URL) error { - if endpointURL == sentinelURL { - return ErrInvalidArgument("Endpoint url cannot be empty.") - } - if endpointURL.Path != "/" && endpointURL.Path != "" { - return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.") - } - if strings.Contains(endpointURL.Host, ".s3.amazonaws.com") { - if !s3utils.IsAmazonEndpoint(endpointURL) { - return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") - } - } - if strings.Contains(endpointURL.Host, ".googleapis.com") { - if !s3utils.IsGoogleEndpoint(endpointURL) { - return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.") - } - } - return nil -} - -// Verify if input expires value is valid. -func isValidExpiry(expires time.Duration) error { - expireSeconds := int64(expires / time.Second) - if expireSeconds < 1 { - return ErrInvalidArgument("Expires cannot be lesser than 1 second.") - } - if expireSeconds > 604800 { - return ErrInvalidArgument("Expires cannot be greater than 7 days.") - } - return nil -} - -// make a copy of http.Header -func cloneHeader(h http.Header) http.Header { - h2 := make(http.Header, len(h)) - for k, vv := range h { - vv2 := make([]string, len(vv)) - copy(vv2, vv) - h2[k] = vv2 - } - return h2 -} - -// Filter relevant response headers from -// the HEAD, GET http response. The function takes -// a list of headers which are filtered out and -// returned as a new http header. -func filterHeader(header http.Header, filterKeys []string) (filteredHeader http.Header) { - filteredHeader = cloneHeader(header) - for _, key := range filterKeys { - filteredHeader.Del(key) - } - return filteredHeader -} - -// regCred matches credential string in HTTP header -var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/") - -// regCred matches signature string in HTTP header -var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)") - -// Redact out signature value from authorization string. -func redactSignature(origAuth string) string { - if !strings.HasPrefix(origAuth, signV4Algorithm) { - // Set a temporary redacted auth - return "AWS **REDACTED**:**REDACTED**" - } - - /// Signature V4 authorization header. - - // Strip out accessKeyID from: - // Credential=////aws4_request - newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") - - // Strip out 256-bit signature from: Signature=<256-bit signature> - return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") -} - -// Get default location returns the location based on the input -// URL `u`, if region override is provided then all location -// defaults to regionOverride. -// -// If no other cases match then the location is set to `us-east-1` -// as a last resort. -func getDefaultLocation(u url.URL, regionOverride string) (location string) { - if regionOverride != "" { - return regionOverride - } - region := s3utils.GetRegionFromURL(u) - if region == "" { - region = "us-east-1" - } - return region -} - -var supportedHeaders = []string{ - "content-type", - "cache-control", - "content-encoding", - "content-disposition", - "content-language", - "x-amz-website-redirect-location", - "expires", - // Add more supported headers here. -} - -// isStorageClassHeader returns true if the header is a supported storage class header -func isStorageClassHeader(headerKey string) bool { - return strings.ToLower(amzStorageClass) == strings.ToLower(headerKey) -} - -// isStandardHeader returns true if header is a supported header and not a custom header -func isStandardHeader(headerKey string) bool { - key := strings.ToLower(headerKey) - for _, header := range supportedHeaders { - if strings.ToLower(header) == key { - return true - } - } - return false -} - -// sseHeaders is list of server side encryption headers -var sseHeaders = []string{ - "x-amz-server-side-encryption", - "x-amz-server-side-encryption-aws-kms-key-id", - "x-amz-server-side-encryption-context", - "x-amz-server-side-encryption-customer-algorithm", - "x-amz-server-side-encryption-customer-key", - "x-amz-server-side-encryption-customer-key-MD5", -} - -// isSSEHeader returns true if header is a server side encryption header. -func isSSEHeader(headerKey string) bool { - key := strings.ToLower(headerKey) - for _, h := range sseHeaders { - if strings.ToLower(h) == key { - return true - } - } - return false -} - -// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header. -func isAmzHeader(headerKey string) bool { - key := strings.ToLower(headerKey) - - return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) -} diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE deleted file mode 100644 index f9c841a51..000000000 --- a/vendor/github.com/mitchellh/go-homedir/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md deleted file mode 100644 index d70706d5b..000000000 --- a/vendor/github.com/mitchellh/go-homedir/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# go-homedir - -This is a Go library for detecting the user's home directory without -the use of cgo, so the library can be used in cross-compilation environments. - -Usage is incredibly simple, just call `homedir.Dir()` to get the home directory -for a user, and `homedir.Expand()` to expand the `~` in a path to the home -directory. - -**Why not just use `os/user`?** The built-in `os/user` package requires -cgo on Darwin systems. This means that any Go code that uses that package -cannot cross compile. But 99% of the time the use for `os/user` is just to -retrieve the home directory, which we can do for the current user without -cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go deleted file mode 100644 index 25378537e..000000000 --- a/vendor/github.com/mitchellh/go-homedir/homedir.go +++ /dev/null @@ -1,167 +0,0 @@ -package homedir - -import ( - "bytes" - "errors" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" -) - -// DisableCache will disable caching of the home directory. Caching is enabled -// by default. -var DisableCache bool - -var homedirCache string -var cacheLock sync.RWMutex - -// Dir returns the home directory for the executing user. -// -// This uses an OS-specific method for discovering the home directory. -// An error is returned if a home directory cannot be detected. -func Dir() (string, error) { - if !DisableCache { - cacheLock.RLock() - cached := homedirCache - cacheLock.RUnlock() - if cached != "" { - return cached, nil - } - } - - cacheLock.Lock() - defer cacheLock.Unlock() - - var result string - var err error - if runtime.GOOS == "windows" { - result, err = dirWindows() - } else { - // Unix-like system, so just assume Unix - result, err = dirUnix() - } - - if err != nil { - return "", err - } - homedirCache = result - return result, nil -} - -// Expand expands the path to include the home directory if the path -// is prefixed with `~`. If it isn't prefixed with `~`, the path is -// returned as-is. -func Expand(path string) (string, error) { - if len(path) == 0 { - return path, nil - } - - if path[0] != '~' { - return path, nil - } - - if len(path) > 1 && path[1] != '/' && path[1] != '\\' { - return "", errors.New("cannot expand user-specific home dir") - } - - dir, err := Dir() - if err != nil { - return "", err - } - - return filepath.Join(dir, path[1:]), nil -} - -// Reset clears the cache, forcing the next call to Dir to re-detect -// the home directory. This generally never has to be called, but can be -// useful in tests if you're modifying the home directory via the HOME -// env var or something. -func Reset() { - cacheLock.Lock() - defer cacheLock.Unlock() - homedirCache = "" -} - -func dirUnix() (string, error) { - homeEnv := "HOME" - if runtime.GOOS == "plan9" { - // On plan9, env vars are lowercase. - homeEnv = "home" - } - - // First prefer the HOME environmental variable - if home := os.Getenv(homeEnv); home != "" { - return home, nil - } - - var stdout bytes.Buffer - - // If that fails, try OS specific commands - if runtime.GOOS == "darwin" { - cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) - cmd.Stdout = &stdout - if err := cmd.Run(); err == nil { - result := strings.TrimSpace(stdout.String()) - if result != "" { - return result, nil - } - } - } else { - cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - // If the error is ErrNotFound, we ignore it. Otherwise, return it. - if err != exec.ErrNotFound { - return "", err - } - } else { - if passwd := strings.TrimSpace(stdout.String()); passwd != "" { - // username:password:uid:gid:gecos:home:shell - passwdParts := strings.SplitN(passwd, ":", 7) - if len(passwdParts) > 5 { - return passwdParts[5], nil - } - } - } - } - - // If all else fails, try the shell - stdout.Reset() - cmd := exec.Command("sh", "-c", "cd && pwd") - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { - return "", err - } - - result := strings.TrimSpace(stdout.String()) - if result == "" { - return "", errors.New("blank output when reading home directory") - } - - return result, nil -} - -func dirWindows() (string, error) { - // First prefer the HOME environmental variable - if home := os.Getenv("HOME"); home != "" { - return home, nil - } - - // Prefer standard environment variable USERPROFILE - if home := os.Getenv("USERPROFILE"); home != "" { - return home, nil - } - - drive := os.Getenv("HOMEDRIVE") - path := os.Getenv("HOMEPATH") - home := drive + path - if drive == "" || path == "" { - return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") - } - - return home, nil -} diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go deleted file mode 100644 index 29f0a2de4..000000000 --- a/vendor/golang.org/x/crypto/argon2/argon2.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package argon2 implements the key derivation function Argon2. -// Argon2 was selected as the winner of the Password Hashing Competition and can -// be used to derive cryptographic keys from passwords. -// -// For a detailed specification of Argon2 see [1]. -// -// If you aren't sure which function you need, use Argon2id (IDKey) and -// the parameter recommendations for your scenario. -// -// # Argon2i -// -// Argon2i (implemented by Key) is the side-channel resistant version of Argon2. -// It uses data-independent memory access, which is preferred for password -// hashing and password-based key derivation. Argon2i requires more passes over -// memory than Argon2id to protect from trade-off attacks. The recommended -// parameters (taken from [2]) for non-interactive operations are time=3 and to -// use the maximum available memory. -// -// # Argon2id -// -// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining -// Argon2i and Argon2d. It uses data-independent memory access for the first -// half of the first iteration over the memory and data-dependent memory access -// for the rest. Argon2id is side-channel resistant and provides better brute- -// force cost savings due to time-memory tradeoffs than Argon2i. The recommended -// parameters for non-interactive operations (taken from [2]) are time=1 and to -// use the maximum available memory. -// -// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf -// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3 -package argon2 - -import ( - "encoding/binary" - "sync" - - "golang.org/x/crypto/blake2b" -) - -// The Argon2 version implemented by this package. -const Version = 0x13 - -const ( - argon2d = iota - argon2i - argon2id -) - -// Key derives a key from the password, salt, and cost parameters using Argon2i -// returning a byte slice of length keyLen that can be used as cryptographic -// key. The CPU cost and parallelism degree must be greater than zero. -// -// For example, you can get a derived key for e.g. AES-256 (which needs a -// 32-byte key) by doing: -// -// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32) -// -// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number. -// If using that amount of memory (32 MB) is not possible in some contexts then -// the time parameter can be increased to compensate. -// -// The time parameter specifies the number of passes over the memory and the -// memory parameter specifies the size of the memory in KiB. For example -// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be -// adjusted to the number of available CPUs. The cost parameters should be -// increased as memory latency and CPU parallelism increases. Remember to get a -// good random salt. -func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { - return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen) -} - -// IDKey derives a key from the password, salt, and cost parameters using -// Argon2id returning a byte slice of length keyLen that can be used as -// cryptographic key. The CPU cost and parallelism degree must be greater than -// zero. -// -// For example, you can get a derived key for e.g. AES-256 (which needs a -// 32-byte key) by doing: -// -// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32) -// -// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number. -// If using that amount of memory (64 MB) is not possible in some contexts then -// the time parameter can be increased to compensate. -// -// The time parameter specifies the number of passes over the memory and the -// memory parameter specifies the size of the memory in KiB. For example -// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be -// adjusted to the numbers of available CPUs. The cost parameters should be -// increased as memory latency and CPU parallelism increases. Remember to get a -// good random salt. -func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { - return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen) -} - -func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { - if time < 1 { - panic("argon2: number of rounds too small") - } - if threads < 1 { - panic("argon2: parallelism degree too low") - } - h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode) - - memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads)) - if memory < 2*syncPoints*uint32(threads) { - memory = 2 * syncPoints * uint32(threads) - } - B := initBlocks(&h0, memory, uint32(threads)) - processBlocks(B, time, memory, uint32(threads), mode) - return extractKey(B, memory, uint32(threads), keyLen) -} - -const ( - blockLength = 128 - syncPoints = 4 -) - -type block [blockLength]uint64 - -func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte { - var ( - h0 [blake2b.Size + 8]byte - params [24]byte - tmp [4]byte - ) - - b2, _ := blake2b.New512(nil) - binary.LittleEndian.PutUint32(params[0:4], threads) - binary.LittleEndian.PutUint32(params[4:8], keyLen) - binary.LittleEndian.PutUint32(params[8:12], memory) - binary.LittleEndian.PutUint32(params[12:16], time) - binary.LittleEndian.PutUint32(params[16:20], uint32(Version)) - binary.LittleEndian.PutUint32(params[20:24], uint32(mode)) - b2.Write(params[:]) - binary.LittleEndian.PutUint32(tmp[:], uint32(len(password))) - b2.Write(tmp[:]) - b2.Write(password) - binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt))) - b2.Write(tmp[:]) - b2.Write(salt) - binary.LittleEndian.PutUint32(tmp[:], uint32(len(key))) - b2.Write(tmp[:]) - b2.Write(key) - binary.LittleEndian.PutUint32(tmp[:], uint32(len(data))) - b2.Write(tmp[:]) - b2.Write(data) - b2.Sum(h0[:0]) - return h0 -} - -func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block { - var block0 [1024]byte - B := make([]block, memory) - for lane := uint32(0); lane < threads; lane++ { - j := lane * (memory / threads) - binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane) - - binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0) - blake2bHash(block0[:], h0[:]) - for i := range B[j+0] { - B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:]) - } - - binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1) - blake2bHash(block0[:], h0[:]) - for i := range B[j+1] { - B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:]) - } - } - return B -} - -func processBlocks(B []block, time, memory, threads uint32, mode int) { - lanes := memory / threads - segments := lanes / syncPoints - - processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) { - var addresses, in, zero block - if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { - in[0] = uint64(n) - in[1] = uint64(lane) - in[2] = uint64(slice) - in[3] = uint64(memory) - in[4] = uint64(time) - in[5] = uint64(mode) - } - - index := uint32(0) - if n == 0 && slice == 0 { - index = 2 // we have already generated the first two blocks - if mode == argon2i || mode == argon2id { - in[6]++ - processBlock(&addresses, &in, &zero) - processBlock(&addresses, &addresses, &zero) - } - } - - offset := lane*lanes + slice*segments + index - var random uint64 - for index < segments { - prev := offset - 1 - if index == 0 && slice == 0 { - prev += lanes // last block in lane - } - if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { - if index%blockLength == 0 { - in[6]++ - processBlock(&addresses, &in, &zero) - processBlock(&addresses, &addresses, &zero) - } - random = addresses[index%blockLength] - } else { - random = B[prev][0] - } - newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index) - processBlockXOR(&B[offset], &B[prev], &B[newOffset]) - index, offset = index+1, offset+1 - } - wg.Done() - } - - for n := uint32(0); n < time; n++ { - for slice := uint32(0); slice < syncPoints; slice++ { - var wg sync.WaitGroup - for lane := uint32(0); lane < threads; lane++ { - wg.Add(1) - go processSegment(n, slice, lane, &wg) - } - wg.Wait() - } - } - -} - -func extractKey(B []block, memory, threads, keyLen uint32) []byte { - lanes := memory / threads - for lane := uint32(0); lane < threads-1; lane++ { - for i, v := range B[(lane*lanes)+lanes-1] { - B[memory-1][i] ^= v - } - } - - var block [1024]byte - for i, v := range B[memory-1] { - binary.LittleEndian.PutUint64(block[i*8:], v) - } - key := make([]byte, keyLen) - blake2bHash(key, block[:]) - return key -} - -func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 { - refLane := uint32(rand>>32) % threads - if n == 0 && slice == 0 { - refLane = lane - } - m, s := 3*segments, ((slice+1)%syncPoints)*segments - if lane == refLane { - m += index - } - if n == 0 { - m, s = slice*segments, 0 - if slice == 0 || lane == refLane { - m += index - } - } - if index == 0 || lane == refLane { - m-- - } - return phi(rand, uint64(m), uint64(s), refLane, lanes) -} - -func phi(rand, m, s uint64, lane, lanes uint32) uint32 { - p := rand & 0xFFFFFFFF - p = (p * p) >> 32 - p = (p * m) >> 32 - return lane*lanes + uint32((s+m-(p+1))%uint64(lanes)) -} diff --git a/vendor/golang.org/x/crypto/argon2/blake2b.go b/vendor/golang.org/x/crypto/argon2/blake2b.go deleted file mode 100644 index 10f46948d..000000000 --- a/vendor/golang.org/x/crypto/argon2/blake2b.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package argon2 - -import ( - "encoding/binary" - "hash" - - "golang.org/x/crypto/blake2b" -) - -// blake2bHash computes an arbitrary long hash value of in -// and writes the hash to out. -func blake2bHash(out []byte, in []byte) { - var b2 hash.Hash - if n := len(out); n < blake2b.Size { - b2, _ = blake2b.New(n, nil) - } else { - b2, _ = blake2b.New512(nil) - } - - var buffer [blake2b.Size]byte - binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out))) - b2.Write(buffer[:4]) - b2.Write(in) - - if len(out) <= blake2b.Size { - b2.Sum(out[:0]) - return - } - - outLen := len(out) - b2.Sum(buffer[:0]) - b2.Reset() - copy(out, buffer[:32]) - out = out[32:] - for len(out) > blake2b.Size { - b2.Write(buffer[:]) - b2.Sum(buffer[:0]) - copy(out, buffer[:32]) - out = out[32:] - b2.Reset() - } - - if outLen%blake2b.Size > 0 { // outLen > 64 - r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2 - b2, _ = blake2b.New(outLen-32*r, nil) - } - b2.Write(buffer[:]) - b2.Sum(out[:0]) -} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go deleted file mode 100644 index 063e7784f..000000000 --- a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 && gc && !purego - -package argon2 - -import "golang.org/x/sys/cpu" - -func init() { - useSSE4 = cpu.X86.HasSSE41 -} - -//go:noescape -func mixBlocksSSE2(out, a, b, c *block) - -//go:noescape -func xorBlocksSSE2(out, a, b, c *block) - -//go:noescape -func blamkaSSE4(b *block) - -func processBlockSSE(out, in1, in2 *block, xor bool) { - var t block - mixBlocksSSE2(&t, in1, in2, &t) - if useSSE4 { - blamkaSSE4(&t) - } else { - for i := 0; i < blockLength; i += 16 { - blamkaGeneric( - &t[i+0], &t[i+1], &t[i+2], &t[i+3], - &t[i+4], &t[i+5], &t[i+6], &t[i+7], - &t[i+8], &t[i+9], &t[i+10], &t[i+11], - &t[i+12], &t[i+13], &t[i+14], &t[i+15], - ) - } - for i := 0; i < blockLength/8; i += 2 { - blamkaGeneric( - &t[i], &t[i+1], &t[16+i], &t[16+i+1], - &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], - &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], - &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], - ) - } - } - if xor { - xorBlocksSSE2(out, in1, in2, &t) - } else { - mixBlocksSSE2(out, in1, in2, &t) - } -} - -func processBlock(out, in1, in2 *block) { - processBlockSSE(out, in1, in2, false) -} - -func processBlockXOR(out, in1, in2 *block) { - processBlockSSE(out, in1, in2, true) -} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s deleted file mode 100644 index 6713accac..000000000 --- a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 && gc && !purego - -#include "textflag.h" - -DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 - -#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v6, t1; \ - PUNPCKLQDQ v6, t2; \ - PUNPCKHQDQ v7, v6; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ v7, t2; \ - MOVO t1, v7; \ - MOVO v2, t1; \ - PUNPCKHQDQ t2, v7; \ - PUNPCKLQDQ v3, t2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v3 - -#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v2, t1; \ - PUNPCKLQDQ v2, t2; \ - PUNPCKHQDQ v3, v2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ v3, t2; \ - MOVO t1, v3; \ - MOVO v6, t1; \ - PUNPCKHQDQ t2, v3; \ - PUNPCKLQDQ v7, t2; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v7 - -#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ - MOVO v0, t0; \ - PMULULQ v2, t0; \ - PADDQ v2, v0; \ - PADDQ t0, v0; \ - PADDQ t0, v0; \ - PXOR v0, v6; \ - PSHUFD $0xB1, v6, v6; \ - MOVO v4, t0; \ - PMULULQ v6, t0; \ - PADDQ v6, v4; \ - PADDQ t0, v4; \ - PADDQ t0, v4; \ - PXOR v4, v2; \ - PSHUFB c40, v2; \ - MOVO v0, t0; \ - PMULULQ v2, t0; \ - PADDQ v2, v0; \ - PADDQ t0, v0; \ - PADDQ t0, v0; \ - PXOR v0, v6; \ - PSHUFB c48, v6; \ - MOVO v4, t0; \ - PMULULQ v6, t0; \ - PADDQ v6, v4; \ - PADDQ t0, v4; \ - PADDQ t0, v4; \ - PXOR v4, v2; \ - MOVO v2, t0; \ - PADDQ v2, t0; \ - PSRLQ $63, v2; \ - PXOR t0, v2; \ - MOVO v1, t0; \ - PMULULQ v3, t0; \ - PADDQ v3, v1; \ - PADDQ t0, v1; \ - PADDQ t0, v1; \ - PXOR v1, v7; \ - PSHUFD $0xB1, v7, v7; \ - MOVO v5, t0; \ - PMULULQ v7, t0; \ - PADDQ v7, v5; \ - PADDQ t0, v5; \ - PADDQ t0, v5; \ - PXOR v5, v3; \ - PSHUFB c40, v3; \ - MOVO v1, t0; \ - PMULULQ v3, t0; \ - PADDQ v3, v1; \ - PADDQ t0, v1; \ - PADDQ t0, v1; \ - PXOR v1, v7; \ - PSHUFB c48, v7; \ - MOVO v5, t0; \ - PMULULQ v7, t0; \ - PADDQ v7, v5; \ - PADDQ t0, v5; \ - PADDQ t0, v5; \ - PXOR v5, v3; \ - MOVO v3, t0; \ - PADDQ v3, t0; \ - PSRLQ $63, v3; \ - PXOR t0, v3 - -#define LOAD_MSG_0(block, off) \ - MOVOU 8*(off+0)(block), X0; \ - MOVOU 8*(off+2)(block), X1; \ - MOVOU 8*(off+4)(block), X2; \ - MOVOU 8*(off+6)(block), X3; \ - MOVOU 8*(off+8)(block), X4; \ - MOVOU 8*(off+10)(block), X5; \ - MOVOU 8*(off+12)(block), X6; \ - MOVOU 8*(off+14)(block), X7 - -#define STORE_MSG_0(block, off) \ - MOVOU X0, 8*(off+0)(block); \ - MOVOU X1, 8*(off+2)(block); \ - MOVOU X2, 8*(off+4)(block); \ - MOVOU X3, 8*(off+6)(block); \ - MOVOU X4, 8*(off+8)(block); \ - MOVOU X5, 8*(off+10)(block); \ - MOVOU X6, 8*(off+12)(block); \ - MOVOU X7, 8*(off+14)(block) - -#define LOAD_MSG_1(block, off) \ - MOVOU 8*off+0*8(block), X0; \ - MOVOU 8*off+16*8(block), X1; \ - MOVOU 8*off+32*8(block), X2; \ - MOVOU 8*off+48*8(block), X3; \ - MOVOU 8*off+64*8(block), X4; \ - MOVOU 8*off+80*8(block), X5; \ - MOVOU 8*off+96*8(block), X6; \ - MOVOU 8*off+112*8(block), X7 - -#define STORE_MSG_1(block, off) \ - MOVOU X0, 8*off+0*8(block); \ - MOVOU X1, 8*off+16*8(block); \ - MOVOU X2, 8*off+32*8(block); \ - MOVOU X3, 8*off+48*8(block); \ - MOVOU X4, 8*off+64*8(block); \ - MOVOU X5, 8*off+80*8(block); \ - MOVOU X6, 8*off+96*8(block); \ - MOVOU X7, 8*off+112*8(block) - -#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ - LOAD_MSG_0(block, off); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ - STORE_MSG_0(block, off) - -#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ - LOAD_MSG_1(block, off); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ - STORE_MSG_1(block, off) - -// func blamkaSSE4(b *block) -TEXT ·blamkaSSE4(SB), 4, $0-8 - MOVQ b+0(FP), AX - - MOVOU ·c40<>(SB), X10 - MOVOU ·c48<>(SB), X11 - - BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) - - BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) - RET - -// func mixBlocksSSE2(out, a, b, c *block) -TEXT ·mixBlocksSSE2(SB), 4, $0-32 - MOVQ out+0(FP), DX - MOVQ a+8(FP), AX - MOVQ b+16(FP), BX - MOVQ c+24(FP), CX - MOVQ $128, DI - -loop: - MOVOU 0(AX), X0 - MOVOU 0(BX), X1 - MOVOU 0(CX), X2 - PXOR X1, X0 - PXOR X2, X0 - MOVOU X0, 0(DX) - ADDQ $16, AX - ADDQ $16, BX - ADDQ $16, CX - ADDQ $16, DX - SUBQ $2, DI - JA loop - RET - -// func xorBlocksSSE2(out, a, b, c *block) -TEXT ·xorBlocksSSE2(SB), 4, $0-32 - MOVQ out+0(FP), DX - MOVQ a+8(FP), AX - MOVQ b+16(FP), BX - MOVQ c+24(FP), CX - MOVQ $128, DI - -loop: - MOVOU 0(AX), X0 - MOVOU 0(BX), X1 - MOVOU 0(CX), X2 - MOVOU 0(DX), X3 - PXOR X1, X0 - PXOR X2, X0 - PXOR X3, X0 - MOVOU X0, 0(DX) - ADDQ $16, AX - ADDQ $16, BX - ADDQ $16, CX - ADDQ $16, DX - SUBQ $2, DI - JA loop - RET diff --git a/vendor/golang.org/x/crypto/argon2/blamka_generic.go b/vendor/golang.org/x/crypto/argon2/blamka_generic.go deleted file mode 100644 index a481b2243..000000000 --- a/vendor/golang.org/x/crypto/argon2/blamka_generic.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package argon2 - -var useSSE4 bool - -func processBlockGeneric(out, in1, in2 *block, xor bool) { - var t block - for i := range t { - t[i] = in1[i] ^ in2[i] - } - for i := 0; i < blockLength; i += 16 { - blamkaGeneric( - &t[i+0], &t[i+1], &t[i+2], &t[i+3], - &t[i+4], &t[i+5], &t[i+6], &t[i+7], - &t[i+8], &t[i+9], &t[i+10], &t[i+11], - &t[i+12], &t[i+13], &t[i+14], &t[i+15], - ) - } - for i := 0; i < blockLength/8; i += 2 { - blamkaGeneric( - &t[i], &t[i+1], &t[16+i], &t[16+i+1], - &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], - &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], - &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], - ) - } - if xor { - for i := range t { - out[i] ^= in1[i] ^ in2[i] ^ t[i] - } - } else { - for i := range t { - out[i] = in1[i] ^ in2[i] ^ t[i] - } - } -} - -func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) { - v00, v01, v02, v03 := *t00, *t01, *t02, *t03 - v04, v05, v06, v07 := *t04, *t05, *t06, *t07 - v08, v09, v10, v11 := *t08, *t09, *t10, *t11 - v12, v13, v14, v15 := *t12, *t13, *t14, *t15 - - v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) - v12 ^= v00 - v12 = v12>>32 | v12<<32 - v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) - v04 ^= v08 - v04 = v04>>24 | v04<<40 - - v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) - v12 ^= v00 - v12 = v12>>16 | v12<<48 - v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) - v04 ^= v08 - v04 = v04>>63 | v04<<1 - - v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) - v13 ^= v01 - v13 = v13>>32 | v13<<32 - v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) - v05 ^= v09 - v05 = v05>>24 | v05<<40 - - v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) - v13 ^= v01 - v13 = v13>>16 | v13<<48 - v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) - v05 ^= v09 - v05 = v05>>63 | v05<<1 - - v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) - v14 ^= v02 - v14 = v14>>32 | v14<<32 - v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) - v06 ^= v10 - v06 = v06>>24 | v06<<40 - - v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) - v14 ^= v02 - v14 = v14>>16 | v14<<48 - v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) - v06 ^= v10 - v06 = v06>>63 | v06<<1 - - v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) - v15 ^= v03 - v15 = v15>>32 | v15<<32 - v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) - v07 ^= v11 - v07 = v07>>24 | v07<<40 - - v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) - v15 ^= v03 - v15 = v15>>16 | v15<<48 - v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) - v07 ^= v11 - v07 = v07>>63 | v07<<1 - - v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) - v15 ^= v00 - v15 = v15>>32 | v15<<32 - v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) - v05 ^= v10 - v05 = v05>>24 | v05<<40 - - v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) - v15 ^= v00 - v15 = v15>>16 | v15<<48 - v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) - v05 ^= v10 - v05 = v05>>63 | v05<<1 - - v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) - v12 ^= v01 - v12 = v12>>32 | v12<<32 - v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) - v06 ^= v11 - v06 = v06>>24 | v06<<40 - - v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) - v12 ^= v01 - v12 = v12>>16 | v12<<48 - v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) - v06 ^= v11 - v06 = v06>>63 | v06<<1 - - v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) - v13 ^= v02 - v13 = v13>>32 | v13<<32 - v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) - v07 ^= v08 - v07 = v07>>24 | v07<<40 - - v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) - v13 ^= v02 - v13 = v13>>16 | v13<<48 - v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) - v07 ^= v08 - v07 = v07>>63 | v07<<1 - - v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) - v14 ^= v03 - v14 = v14>>32 | v14<<32 - v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) - v04 ^= v09 - v04 = v04>>24 | v04<<40 - - v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) - v14 ^= v03 - v14 = v14>>16 | v14<<48 - v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) - v04 ^= v09 - v04 = v04>>63 | v04<<1 - - *t00, *t01, *t02, *t03 = v00, v01, v02, v03 - *t04, *t05, *t06, *t07 = v04, v05, v06, v07 - *t08, *t09, *t10, *t11 = v08, v09, v10, v11 - *t12, *t13, *t14, *t15 = v12, v13, v14, v15 -} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go deleted file mode 100644 index 16d58c650..000000000 --- a/vendor/golang.org/x/crypto/argon2/blamka_ref.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || purego || !gc - -package argon2 - -func processBlock(out, in1, in2 *block) { - processBlockGeneric(out, in1, in2, false) -} - -func processBlockXOR(out, in1, in2 *block) { - processBlockGeneric(out, in1, in2, true) -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go deleted file mode 100644 index d2e98d429..000000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b.go +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693 -// and the extendable output function (XOF) BLAKE2Xb. -// -// BLAKE2b is optimized for 64-bit platforms—including NEON-enabled ARMs—and -// produces digests of any size between 1 and 64 bytes. -// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf -// and for BLAKE2Xb see https://blake2.net/blake2x.pdf -// -// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512). -// If you need a secret-key MAC (message authentication code), use the New512 -// function with a non-nil key. -// -// BLAKE2X is a construction to compute hash values larger than 64 bytes. It -// can produce hash values between 0 and 4 GiB. -package blake2b - -import ( - "encoding/binary" - "errors" - "hash" -) - -const ( - // The blocksize of BLAKE2b in bytes. - BlockSize = 128 - // The hash size of BLAKE2b-512 in bytes. - Size = 64 - // The hash size of BLAKE2b-384 in bytes. - Size384 = 48 - // The hash size of BLAKE2b-256 in bytes. - Size256 = 32 -) - -var ( - useAVX2 bool - useAVX bool - useSSE4 bool -) - -var ( - errKeySize = errors.New("blake2b: invalid key size") - errHashSize = errors.New("blake2b: invalid hash size") -) - -var iv = [8]uint64{ - 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, - 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, -} - -// Sum512 returns the BLAKE2b-512 checksum of the data. -func Sum512(data []byte) [Size]byte { - var sum [Size]byte - checkSum(&sum, Size, data) - return sum -} - -// Sum384 returns the BLAKE2b-384 checksum of the data. -func Sum384(data []byte) [Size384]byte { - var sum [Size]byte - var sum384 [Size384]byte - checkSum(&sum, Size384, data) - copy(sum384[:], sum[:Size384]) - return sum384 -} - -// Sum256 returns the BLAKE2b-256 checksum of the data. -func Sum256(data []byte) [Size256]byte { - var sum [Size]byte - var sum256 [Size256]byte - checkSum(&sum, Size256, data) - copy(sum256[:], sum[:Size256]) - return sum256 -} - -// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil -// key turns the hash into a MAC. The key must be between zero and 64 bytes long. -func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } - -// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil -// key turns the hash into a MAC. The key must be between zero and 64 bytes long. -func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } - -// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil -// key turns the hash into a MAC. The key must be between zero and 64 bytes long. -func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } - -// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length. -// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long. -// The hash size can be a value between 1 and 64 but it is highly recommended to use -// values equal or greater than: -// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long). -// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long). -// When the key is nil, the returned hash.Hash implements BinaryMarshaler -// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash. -func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) } - -func newDigest(hashSize int, key []byte) (*digest, error) { - if hashSize < 1 || hashSize > Size { - return nil, errHashSize - } - if len(key) > Size { - return nil, errKeySize - } - d := &digest{ - size: hashSize, - keyLen: len(key), - } - copy(d.key[:], key) - d.Reset() - return d, nil -} - -func checkSum(sum *[Size]byte, hashSize int, data []byte) { - h := iv - h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) - var c [2]uint64 - - if length := len(data); length > BlockSize { - n := length &^ (BlockSize - 1) - if length == n { - n -= BlockSize - } - hashBlocks(&h, &c, 0, data[:n]) - data = data[n:] - } - - var block [BlockSize]byte - offset := copy(block[:], data) - remaining := uint64(BlockSize - offset) - if c[0] < remaining { - c[1]-- - } - c[0] -= remaining - - hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) - - for i, v := range h[:(hashSize+7)/8] { - binary.LittleEndian.PutUint64(sum[8*i:], v) - } -} - -type digest struct { - h [8]uint64 - c [2]uint64 - size int - block [BlockSize]byte - offset int - - key [BlockSize]byte - keyLen int -} - -const ( - magic = "b2b" - marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1 -) - -func (d *digest) MarshalBinary() ([]byte, error) { - if d.keyLen != 0 { - return nil, errors.New("crypto/blake2b: cannot marshal MACs") - } - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - for i := 0; i < 8; i++ { - b = appendUint64(b, d.h[i]) - } - b = appendUint64(b, d.c[0]) - b = appendUint64(b, d.c[1]) - // Maximum value for size is 64 - b = append(b, byte(d.size)) - b = append(b, d.block[:]...) - b = append(b, byte(d.offset)) - return b, nil -} - -func (d *digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("crypto/blake2b: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("crypto/blake2b: invalid hash state size") - } - b = b[len(magic):] - for i := 0; i < 8; i++ { - b, d.h[i] = consumeUint64(b) - } - b, d.c[0] = consumeUint64(b) - b, d.c[1] = consumeUint64(b) - d.size = int(b[0]) - b = b[1:] - copy(d.block[:], b[:BlockSize]) - b = b[BlockSize:] - d.offset = int(b[0]) - return nil -} - -func (d *digest) BlockSize() int { return BlockSize } - -func (d *digest) Size() int { return d.size } - -func (d *digest) Reset() { - d.h = iv - d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) - d.offset, d.c[0], d.c[1] = 0, 0, 0 - if d.keyLen > 0 { - d.block = d.key - d.offset = BlockSize - } -} - -func (d *digest) Write(p []byte) (n int, err error) { - n = len(p) - - if d.offset > 0 { - remaining := BlockSize - d.offset - if n <= remaining { - d.offset += copy(d.block[d.offset:], p) - return - } - copy(d.block[d.offset:], p[:remaining]) - hashBlocks(&d.h, &d.c, 0, d.block[:]) - d.offset = 0 - p = p[remaining:] - } - - if length := len(p); length > BlockSize { - nn := length &^ (BlockSize - 1) - if length == nn { - nn -= BlockSize - } - hashBlocks(&d.h, &d.c, 0, p[:nn]) - p = p[nn:] - } - - if len(p) > 0 { - d.offset += copy(d.block[:], p) - } - - return -} - -func (d *digest) Sum(sum []byte) []byte { - var hash [Size]byte - d.finalize(&hash) - return append(sum, hash[:d.size]...) -} - -func (d *digest) finalize(hash *[Size]byte) { - var block [BlockSize]byte - copy(block[:], d.block[:d.offset]) - remaining := uint64(BlockSize - d.offset) - - c := d.c - if c[0] < remaining { - c[1]-- - } - c[0] -= remaining - - h := d.h - hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) - - for i, v := range h { - binary.LittleEndian.PutUint64(hash[8*i:], v) - } -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.BigEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func appendUint32(b []byte, x uint32) []byte { - var a [4]byte - binary.BigEndian.PutUint32(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := binary.BigEndian.Uint64(b) - return b[8:], x -} - -func consumeUint32(b []byte) ([]byte, uint32) { - x := binary.BigEndian.Uint32(b) - return b[4:], x -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go deleted file mode 100644 index 199c21d27..000000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 && gc && !purego - -package blake2b - -import "golang.org/x/sys/cpu" - -func init() { - useAVX2 = cpu.X86.HasAVX2 - useAVX = cpu.X86.HasAVX - useSSE4 = cpu.X86.HasSSE41 -} - -//go:noescape -func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -//go:noescape -func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -//go:noescape -func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - switch { - case useAVX2: - hashBlocksAVX2(h, c, flag, blocks) - case useAVX: - hashBlocksAVX(h, c, flag, blocks) - case useSSE4: - hashBlocksSSE4(h, c, flag, blocks) - default: - hashBlocksGeneric(h, c, flag, blocks) - } -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s deleted file mode 100644 index 9ae8206c2..000000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ /dev/null @@ -1,744 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 && gc && !purego - -#include "textflag.h" - -DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 - -#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 -#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 -#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e -#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 -#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 - -#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ - VPADDQ m0, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m1, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y1_Y1; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y3_Y3; \ - VPADDQ m2, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m3, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y3_Y3; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y1_Y1 - -#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E -#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 -#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E -#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 -#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E - -#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n -#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n -#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n -#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n -#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n - -#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 -#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 -#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 -#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 -#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 - -#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 - -#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 -#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 - -// load msg: Y12 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y12, Y12 - -// load msg: Y13 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ - VMOVQ_SI_X13(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X13(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y13, Y13 - -// load msg: Y14 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ - VMOVQ_SI_X14(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X14(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y14, Y14 - -// load msg: Y15 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ - VMOVQ_SI_X15(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X15(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X11(6*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ - LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ - LOAD_MSG_AVX2_Y15(9, 11, 13, 15) - -#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ - LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ - LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ - VMOVQ_SI_X11(11*8); \ - VPSHUFD $0x4E, 0*8(SI), X14; \ - VPINSRQ_1_SI_X11(5*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(12, 2, 7, 3) - -#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ - VMOVQ_SI_X11(5*8); \ - VMOVDQU 11*8(SI), X12; \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - VMOVQ_SI_X13(8*8); \ - VMOVQ_SI_X11(2*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X11(13*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ - LOAD_MSG_AVX2_Y15(14, 6, 1, 4) - -#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ - LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ - LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ - LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ - VMOVQ_SI_X15(6*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X15(10*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ - LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X13(7*8); \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ - LOAD_MSG_AVX2_Y15(1, 12, 8, 13) - -#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ - LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ - LOAD_MSG_AVX2_Y15(13, 5, 14, 9) - -#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ - LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ - LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ - VMOVQ_SI_X14_0; \ - VPSHUFD $0x4E, 8*8(SI), X11; \ - VPINSRQ_1_SI_X14(6*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(7, 3, 2, 11) - -#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ - LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ - LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ - LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ - VMOVQ_SI_X15_0; \ - VMOVQ_SI_X11(6*8); \ - VPINSRQ_1_SI_X15(4*8); \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ - VMOVQ_SI_X12(6*8); \ - VMOVQ_SI_X11(11*8); \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ - VMOVQ_SI_X11(1*8); \ - VMOVDQU 12*8(SI), X14; \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - VMOVQ_SI_X15(2*8); \ - VMOVDQU 4*8(SI), X11; \ - VPINSRQ_1_SI_X15(7*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ - LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ - VMOVQ_SI_X13(2*8); \ - VPSHUFD $0x4E, 5*8(SI), X11; \ - VPINSRQ_1_SI_X13(4*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ - VMOVQ_SI_X15(11*8); \ - VMOVQ_SI_X11(12*8); \ - VPINSRQ_1_SI_X15(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y15, Y15 - -// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, DX - ADDQ $31, DX - ANDQ $~31, DX - - MOVQ CX, 16(DX) - XORQ CX, CX - MOVQ CX, 24(DX) - - VMOVDQU ·AVX2_c40<>(SB), Y4 - VMOVDQU ·AVX2_c48<>(SB), Y5 - - VMOVDQU 0(AX), Y8 - VMOVDQU 32(AX), Y9 - VMOVDQU ·AVX2_iv0<>(SB), Y6 - VMOVDQU ·AVX2_iv1<>(SB), Y7 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - MOVQ R9, 8(DX) - -loop: - ADDQ $128, R8 - MOVQ R8, 0(DX) - CMPQ R8, $128 - JGE noinc - INCQ R9 - MOVQ R9, 8(DX) - -noinc: - VMOVDQA Y8, Y0 - VMOVDQA Y9, Y1 - VMOVDQA Y6, Y2 - VPXOR 0(DX), Y7, Y3 - - LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() - VMOVDQA Y12, 32(DX) - VMOVDQA Y13, 64(DX) - VMOVDQA Y14, 96(DX) - VMOVDQA Y15, 128(DX) - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() - VMOVDQA Y12, 160(DX) - VMOVDQA Y13, 192(DX) - VMOVDQA Y14, 224(DX) - VMOVDQA Y15, 256(DX) - - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - - ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5) - ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5) - - VPXOR Y0, Y8, Y8 - VPXOR Y1, Y9, Y9 - VPXOR Y2, Y8, Y8 - VPXOR Y3, Y9, Y9 - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - - VMOVDQU Y8, 0(AX) - VMOVDQU Y9, 32(AX) - VZEROUPPER - - RET - -#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA -#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB -#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF -#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD -#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE - -#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF -#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF - -#define SHUFFLE_AVX() \ - VMOVDQA X6, X13; \ - VMOVDQA X2, X14; \ - VMOVDQA X4, X6; \ - VPUNPCKLQDQ_X13_X13_X15; \ - VMOVDQA X5, X4; \ - VMOVDQA X6, X5; \ - VPUNPCKHQDQ_X15_X7_X6; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X13_X7; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VPUNPCKHQDQ_X15_X2_X2; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X3_X3; \ - -#define SHUFFLE_AVX_INV() \ - VMOVDQA X2, X13; \ - VMOVDQA X4, X14; \ - VPUNPCKLQDQ_X2_X2_X15; \ - VMOVDQA X5, X4; \ - VPUNPCKHQDQ_X15_X3_X2; \ - VMOVDQA X14, X5; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VMOVDQA X6, X14; \ - VPUNPCKHQDQ_X15_X13_X3; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X6_X6; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X7_X7; \ - -#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - VPADDQ m0, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m1, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFD $-79, v6, v6; \ - VPSHUFD $-79, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPSHUFB c40, v2, v2; \ - VPSHUFB c40, v3, v3; \ - VPADDQ m2, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m3, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFB c48, v6, v6; \ - VPSHUFB c48, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPADDQ v2, v2, t0; \ - VPSRLQ $63, v2, v2; \ - VPXOR t0, v2, v2; \ - VPADDQ v3, v3, t0; \ - VPSRLQ $63, v3, v3; \ - VPXOR t0, v3, v3 - -// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) -// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 -#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X13(i2*8); \ - VMOVQ_SI_X14(i4*8); \ - VMOVQ_SI_X15(i6*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X13(i3*8); \ - VPINSRQ_1_SI_X14(i5*8); \ - VPINSRQ_1_SI_X15(i7*8) - -// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) -#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(1*8); \ - VMOVQ_SI_X15(5*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X13(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(7*8) - -// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) -#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ - VPSHUFD $0x4E, 0*8(SI), X12; \ - VMOVQ_SI_X13(11*8); \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(7*8); \ - VPINSRQ_1_SI_X13(5*8); \ - VPINSRQ_1_SI_X14(2*8); \ - VPINSRQ_1_SI_X15(3*8) - -// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) -#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ - VMOVDQU 11*8(SI), X12; \ - VMOVQ_SI_X13(5*8); \ - VMOVQ_SI_X14(8*8); \ - VMOVQ_SI_X15(2*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14_0; \ - VPINSRQ_1_SI_X15(13*8) - -// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) -#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(6*8); \ - VMOVQ_SI_X15_0; \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(8*8) - -// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) -#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ - VMOVQ_SI_X12(9*8); \ - VMOVQ_SI_X13(2*8); \ - VMOVQ_SI_X14_0; \ - VMOVQ_SI_X15(4*8); \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VPINSRQ_1_SI_X15(15*8) - -// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) -#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(11*8); \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X13(8*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(3*8) - -// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) -#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ - MOVQ 0*8(SI), X12; \ - VPSHUFD $0x4E, 8*8(SI), X13; \ - MOVQ 7*8(SI), X14; \ - MOVQ 2*8(SI), X15; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(11*8) - -// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) -#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ - MOVQ 6*8(SI), X12; \ - MOVQ 11*8(SI), X13; \ - MOVQ 15*8(SI), X14; \ - MOVQ 3*8(SI), X15; \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X14(9*8); \ - VPINSRQ_1_SI_X15(8*8) - -// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) -#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ - MOVQ 5*8(SI), X12; \ - MOVQ 8*8(SI), X13; \ - MOVQ 0*8(SI), X14; \ - MOVQ 6*8(SI), X15; \ - VPINSRQ_1_SI_X12(15*8); \ - VPINSRQ_1_SI_X13(2*8); \ - VPINSRQ_1_SI_X14(4*8); \ - VPINSRQ_1_SI_X15(10*8) - -// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) -#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ - VMOVDQU 12*8(SI), X12; \ - MOVQ 1*8(SI), X13; \ - MOVQ 2*8(SI), X14; \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VMOVDQU 4*8(SI), X15 - -// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) -#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ - MOVQ 15*8(SI), X12; \ - MOVQ 3*8(SI), X13; \ - MOVQ 11*8(SI), X14; \ - MOVQ 12*8(SI), X15; \ - VPINSRQ_1_SI_X12(9*8); \ - VPINSRQ_1_SI_X13(13*8); \ - VPINSRQ_1_SI_X14(14*8); \ - VPINSRQ_1_SI_X15_0 - -// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - VMOVDQU ·AVX_c40<>(SB), X0 - VMOVDQU ·AVX_c48<>(SB), X1 - VMOVDQA X0, X8 - VMOVDQA X1, X9 - - VMOVDQU ·AVX_iv3<>(SB), X0 - VMOVDQA X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0) - - VMOVDQU 0(AX), X10 - VMOVDQU 16(AX), X11 - VMOVDQU 32(AX), X2 - VMOVDQU 48(AX), X3 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - -loop: - ADDQ $128, R8 - CMPQ R8, $128 - JGE noinc - INCQ R9 - -noinc: - VMOVQ_R8_X15 - VPINSRQ_1_R9_X15 - - VMOVDQA X10, X0 - VMOVDQA X11, X1 - VMOVDQU ·AVX_iv0<>(SB), X4 - VMOVDQU ·AVX_iv1<>(SB), X5 - VMOVDQU ·AVX_iv2<>(SB), X6 - - VPXOR X15, X6, X6 - VMOVDQA 0(R10), X7 - - LOAD_MSG_AVX_0_2_4_6_1_3_5_7() - VMOVDQA X12, 16(R10) - VMOVDQA X13, 32(R10) - VMOVDQA X14, 48(R10) - VMOVDQA X15, 64(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) - VMOVDQA X12, 80(R10) - VMOVDQA X13, 96(R10) - VMOVDQA X14, 112(R10) - VMOVDQA X15, 128(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) - VMOVDQA X12, 144(R10) - VMOVDQA X13, 160(R10) - VMOVDQA X14, 176(R10) - VMOVDQA X15, 192(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_1_0_11_5_12_2_7_3() - VMOVDQA X12, 208(R10) - VMOVDQA X13, 224(R10) - VMOVDQA X14, 240(R10) - VMOVDQA X15, 256(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_11_12_5_15_8_0_2_13() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_2_5_4_15_6_10_0_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_9_5_2_10_0_7_4_15() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_2_6_0_8_12_10_11_3() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_0_6_9_8_7_3_2_11() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_5_15_8_2_0_4_6_10() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_6_14_11_0_15_9_3_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_12_13_1_10_2_7_4_5() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_15_9_3_13_11_14_12_0() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - - VMOVDQU 32(AX), X14 - VMOVDQU 48(AX), X15 - VPXOR X0, X10, X10 - VPXOR X1, X11, X11 - VPXOR X2, X14, X14 - VPXOR X3, X15, X15 - VPXOR X4, X10, X10 - VPXOR X5, X11, X11 - VPXOR X6, X14, X2 - VPXOR X7, X15, X3 - VMOVDQU X2, 32(AX) - VMOVDQU X3, 48(AX) - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - VMOVDQU X10, 0(AX) - VMOVDQU X11, 16(AX) - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - VZEROUPPER - - RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s deleted file mode 100644 index adfac00c1..000000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 && gc && !purego - -#include "textflag.h" - -DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 - -#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v6, t1; \ - PUNPCKLQDQ v6, t2; \ - PUNPCKHQDQ v7, v6; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ v7, t2; \ - MOVO t1, v7; \ - MOVO v2, t1; \ - PUNPCKHQDQ t2, v7; \ - PUNPCKLQDQ v3, t2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v3 - -#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v2, t1; \ - PUNPCKLQDQ v2, t2; \ - PUNPCKHQDQ v3, v2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ v3, t2; \ - MOVO t1, v3; \ - MOVO v6, t1; \ - PUNPCKHQDQ t2, v3; \ - PUNPCKLQDQ v7, t2; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v7 - -#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - PADDQ m0, v0; \ - PADDQ m1, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFD $0xB1, v6, v6; \ - PSHUFD $0xB1, v7, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - PSHUFB c40, v2; \ - PSHUFB c40, v3; \ - PADDQ m2, v0; \ - PADDQ m3, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFB c48, v6; \ - PSHUFB c48, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - MOVOU v2, t0; \ - PADDQ v2, t0; \ - PSRLQ $63, v2; \ - PXOR t0, v2; \ - MOVOU v3, t0; \ - PADDQ v3, t0; \ - PSRLQ $63, v3; \ - PXOR t0, v3 - -#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ - MOVQ i0*8(src), m0; \ - PINSRQ $1, i1*8(src), m0; \ - MOVQ i2*8(src), m1; \ - PINSRQ $1, i3*8(src), m1; \ - MOVQ i4*8(src), m2; \ - PINSRQ $1, i5*8(src), m2; \ - MOVQ i6*8(src), m3; \ - PINSRQ $1, i7*8(src), m3 - -// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - MOVOU ·iv3<>(SB), X0 - MOVO X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0) - - MOVOU ·c40<>(SB), X13 - MOVOU ·c48<>(SB), X14 - - MOVOU 0(AX), X12 - MOVOU 16(AX), X15 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - -loop: - ADDQ $128, R8 - CMPQ R8, $128 - JGE noinc - INCQ R9 - -noinc: - MOVQ R8, X8 - PINSRQ $1, R9, X8 - - MOVO X12, X0 - MOVO X15, X1 - MOVOU 32(AX), X2 - MOVOU 48(AX), X3 - MOVOU ·iv0<>(SB), X4 - MOVOU ·iv1<>(SB), X5 - MOVOU ·iv2<>(SB), X6 - - PXOR X8, X6 - MOVO 0(R10), X7 - - LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) - MOVO X8, 16(R10) - MOVO X9, 32(R10) - MOVO X10, 48(R10) - MOVO X11, 64(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) - MOVO X8, 80(R10) - MOVO X9, 96(R10) - MOVO X10, 112(R10) - MOVO X11, 128(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) - MOVO X8, 144(R10) - MOVO X9, 160(R10) - MOVO X10, 176(R10) - MOVO X11, 192(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) - MOVO X8, 208(R10) - MOVO X9, 224(R10) - MOVO X10, 240(R10) - MOVO X11, 256(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - MOVOU 32(AX), X10 - MOVOU 48(AX), X11 - PXOR X0, X12 - PXOR X1, X15 - PXOR X2, X10 - PXOR X3, X11 - PXOR X4, X12 - PXOR X5, X15 - PXOR X6, X10 - PXOR X7, X11 - MOVOU X10, 32(AX) - MOVOU X11, 48(AX) - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - MOVOU X12, 0(AX) - MOVOU X15, 16(AX) - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - - RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go deleted file mode 100644 index 3168a8aa3..000000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blake2b - -import ( - "encoding/binary" - "math/bits" -) - -// the precomputed values for BLAKE2b -// there are 12 16-byte arrays - one for each round -// the entries are calculated from the sigma constants. -var precomputed = [12][16]byte{ - {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, - {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, - {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, - {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, - {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, - {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, - {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, - {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, - {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, - {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, - {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first - {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second -} - -func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - var m [16]uint64 - c0, c1 := c[0], c[1] - - for i := 0; i < len(blocks); { - c0 += BlockSize - if c0 < BlockSize { - c1++ - } - - v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] - v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] - v12 ^= c0 - v13 ^= c1 - v14 ^= flag - - for j := range m { - m[j] = binary.LittleEndian.Uint64(blocks[i:]) - i += 8 - } - - for j := range precomputed { - s := &(precomputed[j]) - - v0 += m[s[0]] - v0 += v4 - v12 ^= v0 - v12 = bits.RotateLeft64(v12, -32) - v8 += v12 - v4 ^= v8 - v4 = bits.RotateLeft64(v4, -24) - v1 += m[s[1]] - v1 += v5 - v13 ^= v1 - v13 = bits.RotateLeft64(v13, -32) - v9 += v13 - v5 ^= v9 - v5 = bits.RotateLeft64(v5, -24) - v2 += m[s[2]] - v2 += v6 - v14 ^= v2 - v14 = bits.RotateLeft64(v14, -32) - v10 += v14 - v6 ^= v10 - v6 = bits.RotateLeft64(v6, -24) - v3 += m[s[3]] - v3 += v7 - v15 ^= v3 - v15 = bits.RotateLeft64(v15, -32) - v11 += v15 - v7 ^= v11 - v7 = bits.RotateLeft64(v7, -24) - - v0 += m[s[4]] - v0 += v4 - v12 ^= v0 - v12 = bits.RotateLeft64(v12, -16) - v8 += v12 - v4 ^= v8 - v4 = bits.RotateLeft64(v4, -63) - v1 += m[s[5]] - v1 += v5 - v13 ^= v1 - v13 = bits.RotateLeft64(v13, -16) - v9 += v13 - v5 ^= v9 - v5 = bits.RotateLeft64(v5, -63) - v2 += m[s[6]] - v2 += v6 - v14 ^= v2 - v14 = bits.RotateLeft64(v14, -16) - v10 += v14 - v6 ^= v10 - v6 = bits.RotateLeft64(v6, -63) - v3 += m[s[7]] - v3 += v7 - v15 ^= v3 - v15 = bits.RotateLeft64(v15, -16) - v11 += v15 - v7 ^= v11 - v7 = bits.RotateLeft64(v7, -63) - - v0 += m[s[8]] - v0 += v5 - v15 ^= v0 - v15 = bits.RotateLeft64(v15, -32) - v10 += v15 - v5 ^= v10 - v5 = bits.RotateLeft64(v5, -24) - v1 += m[s[9]] - v1 += v6 - v12 ^= v1 - v12 = bits.RotateLeft64(v12, -32) - v11 += v12 - v6 ^= v11 - v6 = bits.RotateLeft64(v6, -24) - v2 += m[s[10]] - v2 += v7 - v13 ^= v2 - v13 = bits.RotateLeft64(v13, -32) - v8 += v13 - v7 ^= v8 - v7 = bits.RotateLeft64(v7, -24) - v3 += m[s[11]] - v3 += v4 - v14 ^= v3 - v14 = bits.RotateLeft64(v14, -32) - v9 += v14 - v4 ^= v9 - v4 = bits.RotateLeft64(v4, -24) - - v0 += m[s[12]] - v0 += v5 - v15 ^= v0 - v15 = bits.RotateLeft64(v15, -16) - v10 += v15 - v5 ^= v10 - v5 = bits.RotateLeft64(v5, -63) - v1 += m[s[13]] - v1 += v6 - v12 ^= v1 - v12 = bits.RotateLeft64(v12, -16) - v11 += v12 - v6 ^= v11 - v6 = bits.RotateLeft64(v6, -63) - v2 += m[s[14]] - v2 += v7 - v13 ^= v2 - v13 = bits.RotateLeft64(v13, -16) - v8 += v13 - v7 ^= v8 - v7 = bits.RotateLeft64(v7, -63) - v3 += m[s[15]] - v3 += v4 - v14 ^= v3 - v14 = bits.RotateLeft64(v14, -16) - v9 += v14 - v4 ^= v9 - v4 = bits.RotateLeft64(v4, -63) - - } - - h[0] ^= v0 ^ v8 - h[1] ^= v1 ^ v9 - h[2] ^= v2 ^ v10 - h[3] ^= v3 ^ v11 - h[4] ^= v4 ^ v12 - h[5] ^= v5 ^ v13 - h[6] ^= v6 ^ v14 - h[7] ^= v7 ^ v15 - } - c[0], c[1] = c0, c1 -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go deleted file mode 100644 index 6e28668cd..000000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || purego || !gc - -package blake2b - -func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - hashBlocksGeneric(h, c, flag, blocks) -} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go deleted file mode 100644 index 52c414db0..000000000 --- a/vendor/golang.org/x/crypto/blake2b/blake2x.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blake2b - -import ( - "encoding/binary" - "errors" - "io" -) - -// XOF defines the interface to hash functions that -// support arbitrary-length output. -type XOF interface { - // Write absorbs more data into the hash's state. It panics if called - // after Read. - io.Writer - - // Read reads more output from the hash. It returns io.EOF if the limit - // has been reached. - io.Reader - - // Clone returns a copy of the XOF in its current state. - Clone() XOF - - // Reset resets the XOF to its initial state. - Reset() -} - -// OutputLengthUnknown can be used as the size argument to NewXOF to indicate -// the length of the output is not known in advance. -const OutputLengthUnknown = 0 - -// magicUnknownOutputLength is a magic value for the output size that indicates -// an unknown number of output bytes. -const magicUnknownOutputLength = (1 << 32) - 1 - -// maxOutputLength is the absolute maximum number of bytes to produce when the -// number of output bytes is unknown. -const maxOutputLength = (1 << 32) * 64 - -// NewXOF creates a new variable-output-length hash. The hash either produce a -// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes -// (size == OutputLengthUnknown). In the latter case, an absolute limit of -// 256GiB applies. -// -// A non-nil key turns the hash into a MAC. The key must between -// zero and 32 bytes long. -func NewXOF(size uint32, key []byte) (XOF, error) { - if len(key) > Size { - return nil, errKeySize - } - if size == magicUnknownOutputLength { - // 2^32-1 indicates an unknown number of bytes and thus isn't a - // valid length. - return nil, errors.New("blake2b: XOF length too large") - } - if size == OutputLengthUnknown { - size = magicUnknownOutputLength - } - x := &xof{ - d: digest{ - size: Size, - keyLen: len(key), - }, - length: size, - } - copy(x.d.key[:], key) - x.Reset() - return x, nil -} - -type xof struct { - d digest - length uint32 - remaining uint64 - cfg, root, block [Size]byte - offset int - nodeOffset uint32 - readMode bool -} - -func (x *xof) Write(p []byte) (n int, err error) { - if x.readMode { - panic("blake2b: write to XOF after read") - } - return x.d.Write(p) -} - -func (x *xof) Clone() XOF { - clone := *x - return &clone -} - -func (x *xof) Reset() { - x.cfg[0] = byte(Size) - binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length - binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length - x.cfg[17] = byte(Size) // inner hash size - - x.d.Reset() - x.d.h[1] ^= uint64(x.length) << 32 - - x.remaining = uint64(x.length) - if x.remaining == magicUnknownOutputLength { - x.remaining = maxOutputLength - } - x.offset, x.nodeOffset = 0, 0 - x.readMode = false -} - -func (x *xof) Read(p []byte) (n int, err error) { - if !x.readMode { - x.d.finalize(&x.root) - x.readMode = true - } - - if x.remaining == 0 { - return 0, io.EOF - } - - n = len(p) - if uint64(n) > x.remaining { - n = int(x.remaining) - p = p[:n] - } - - if x.offset > 0 { - blockRemaining := Size - x.offset - if n < blockRemaining { - x.offset += copy(p, x.block[x.offset:]) - x.remaining -= uint64(n) - return - } - copy(p, x.block[x.offset:]) - p = p[blockRemaining:] - x.offset = 0 - x.remaining -= uint64(blockRemaining) - } - - for len(p) >= Size { - binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) - x.nodeOffset++ - - x.d.initConfig(&x.cfg) - x.d.Write(x.root[:]) - x.d.finalize(&x.block) - - copy(p, x.block[:]) - p = p[Size:] - x.remaining -= uint64(Size) - } - - if todo := len(p); todo > 0 { - if x.remaining < uint64(Size) { - x.cfg[0] = byte(x.remaining) - } - binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) - x.nodeOffset++ - - x.d.initConfig(&x.cfg) - x.d.Write(x.root[:]) - x.d.finalize(&x.block) - - x.offset = copy(p, x.block[:todo]) - x.remaining -= uint64(todo) - } - return -} - -func (d *digest) initConfig(cfg *[Size]byte) { - d.offset, d.c[0], d.c[1] = 0, 0, 0 - for i := range d.h { - d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:]) - } -} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go deleted file mode 100644 index 54e446e1d..000000000 --- a/vendor/golang.org/x/crypto/blake2b/register.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blake2b - -import ( - "crypto" - "hash" -) - -func init() { - newHash256 := func() hash.Hash { - h, _ := New256(nil) - return h - } - newHash384 := func() hash.Hash { - h, _ := New384(nil) - return h - } - - newHash512 := func() hash.Hash { - h, _ := New512(nil) - return h - } - - crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) - crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) - crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) -} diff --git a/vendor/golang.org/x/net/publicsuffix/data/children b/vendor/golang.org/x/net/publicsuffix/data/children deleted file mode 100644 index 08261bffd196fd6942b4cebb5ff06e0ffe53808d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2976 zcmWO8`9qWS9{_Nl=dkqI0;nk~GuU>uM@qcI1JG0(VzeBB> z_6N0UM-8>A@-p>RrAY0X)8av$LqKtrG2viXZot8+JnGl50;&X;1(bx9 zQ%BN&qmERar^@gJsx0ghbv&$+IuT~4D#F^SiYg~{Cd@^h3%eh1F8yi1xvFQ>dHkHJ zPJcmJt9_}U-)3UpLH7T2?FgI zK8yCum`(SGIrM;xx%2?5iuTHwPs^+t+85Rm-|#frKVu#7w{E2aU>h9}zMT%t*r5ru z=8&N9JtQb&Fa3dapJp%|q(2NlM2A2L2?@87kc=7aMum5g(HTy9wDkr(25you8MnyTj63vL>jN?_;~_oH`iP7Ve@st+ zr}RX4LC>_lrUmJTL@E7b25I61 z(kv)Rvu-wN;pUJQVJ>OaMUz%hMXm_*$rW8ZY2y}>HbG6=#01hVXi2*+iF9zu><~6d&y01AGs;)CpX1Ba*Hbu z9UymvL*$Nln7+&XLf_T>O701zn)_TCxvwjy{}E4+$6_^k!qt%{LOppRHjt-66M3p@ zAy36t@=Ulwp6S}jGqHm_=dRMvg}=y4u9LhJZjqO|+vKHqhknJ~C9i~g;#MTEfneQRU9Dt;5gqJD< zQEyui&nZ9Txy&KFo7+$%vkm8c-PR)C%rxX{`-b;(ThIF`zvcZhHy}UTMkIIJ#LJa> zB+oP;xy^`zZMkT$n+1K8c?b=2JBEfS&uWKdoad2nc3q#N4q;f~Th`l9bVywFaML1<@oAlmC8N5;sx$e6tZ{hGa$ zFY!3WmqeEHCE366CABB`Qje2-X=DXontci#jXcdC&Ay0^*8YLYJpSa%A}{gBA}jf0 zwKe?lNRdCDZRg89YWecWdcHin0iEz@L=};3=yar0Lb7iMlKNeNd|zE4KPxkk-<>51 z+?_3v@6M4V)*B@&8}>-j8mc85XS*bs4gX4V=7dW2^_w9%&=@N@mh+S3Y`@Er%6@-K z#3)b6gQg*pmr-XJ_PyH-*F1>voI8y1@|?}cJk?C#k6$GPHs>S-dG1LHn)@R&#B(nb z(p=7r`B7v(^Sr~%><`SWmS9G^cM7wle|FN6=ptrS%K>Ihv>3EzZws@re=D=K?ry4Ps35K4*$sw=fk0a)T=NIf5!$yMs;*a3!6Z*OPQ=-*x7~ycO@D^)@N#<(zb5=4Qt}I;Y7nuoNUO!OWTcjS==x3Wrh;@vi4G( z5_bgeZ7-AW^NEq~Q^jJvDo(C9CSZMs1{+jbxzQ&{p6jz%o@-2z?^i9y`;9B*`#V<2 z^HrrvygYVMXTD-_UIDwfvyfdP zvnZCt7qLt74k(t&irJ;{huEcghZW26erA_-ma-|bBWz0iQ8p#7Op(%gj7^mtXH(*woGwiskVq+2whs*yWw4*%h)giWTu^*%f)`6kp2DE53~Xon0ln$gY-I*|jno zyEgtWc3oa4`%U}}_M5!h?E3gS?E1X>tUmsMLZ6?YF!*W|hJ}d=Lw*u#^i5_Db}d#M z$}dnH>MB$m_O&PuFDz0V&Og8&?kZ+~%|FPNb{%7n_?}_Qe9y9FU6?1ydxU|)!##)=?9RTav(%)3=T2XK#1cqjBtoB($o|@ z%G3p;)!i`0|2j-i-+~FIe_^8IB}{g_g(+qhrZ_!ds<{t@nft*Erw`0DD0OK)my7PzS69t#dOZ27C|60lQ&| zSq~}B;^5SP4XjDo3O$83^qHT!#;vQ zuZe|xk^rXev|zJlTd=usN3gj&9SXa@gCdO%P89wCCp8^VK{}zL+X1HvU2s}+1J01! zaHjhnoGp9^=QNMtJb4;?Uh@JjkbmJq_iOmQ@GV^AX}IW;z#kSC{?zt{OT0T=a`lBu z%X?6z9RSt5H&nX@fz{#%HCh5TJ_u~C;b6CfLap{wsN+XNoogJ_YbQbjKN%WaQ=!o^ z9h$T=p_z|>X4foeu|z?ub{<^elYnJuk&~5~WWeYg9TcO*v16;fgdiYGZ!S99}T0Pv=8sM&_1n#*?rM)c2 zrF0Q3rF(iyQ6eWrMeb6j$3u#H21+6EeW|o3K*}X5r0zXKq6WiqNQGm^Q7L12~wY;r2hjALVp1O diff --git a/vendor/golang.org/x/net/publicsuffix/data/nodes b/vendor/golang.org/x/net/publicsuffix/data/nodes deleted file mode 100644 index 1dae6ede8f292889cb4252aa473312fea1bac46a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 46610 zcmaf*b)1#e_W$?kNoIy-2m?_O6)Zwjp7Wf}IcLsvASMv;X%`~Cg1UVHCVpS5;A`+3e$DplxF$`ke~RpAr%S2WEosVT%r}*7N30Q%5BeZw8-;9A%2-sZly|o-Wj9haN+o7c4W@j` z-ufohs(Wn85j+Z0{XXB$VuUk1=XP8Qbe-L~3gT;Gb z8m6wI~KO$n+ege9N{d z6^I~Gb+1FI)NNf##n+%~)I!Qz14<#tsr!tHj%$xJD7h5YX$HOgM*r_tzZxN+265WEQ>?*T$B65Kzy(%kwJB(nkMP;>S zV&3;6NsL?-R5`8FFlszSM82JNrRF+y343LKP%7U5Dx=0L#D~ix8`aR($!Pem^{8RS zv6%O+v#DXN`&nuGtzHc`3`(&XXcX^{Mj+xsUE>>&K{WTX0!I-so}%A#2|W!;f(RU=x5 z;U~=1qvtU1{!-+=$AD??5Yox`TLn@x3oK?n z=*`L9WOrdLr4+d`H+877?Xxgt&gvz$Ye=H`0r@k(_d|XlYo6Vk)cCeJRyD!=)ucMI zOHJq)*{gQ*{2>}MaanMZV1CA`cI%klu68$0u2FkBQL3D-XxIsa^e@y{l< z*$ZLg zThxlbN2Em7gOdHyAlEjjeOu?&t3%p;K`j(;sYBgsO?7D7L+xrt>!Nm0mDj6T{XM7^ zXJDoLHoO^&deq@Y2N4xqEY*%)wa7CEq@G|S?Cdk+?Jbby(1op9@MpEml6?1o;&apQ|2{ zQ47h!kuS@o&TL&`rIZ_1XQfi6I;-;yt6J>cSfdtqePjO`!%SGjEIdMd;k(%fN;f&b?RdG_e2!?#7|-` zXV#Wo3#0lTA%9@h^dsy(#Z*ho4y#(y<~3>i!=^4VexT%;ib&{{2HKuOE_M#ystYKU zj0(c-153%64n+EhvM`7b-@Z=54^s-b4Ysx=<2QFZP@{DzkxY?}BDkHS=PK5&!s=^lT31V^uQ;kUC{SzCu9TzoWK z<50V*H!L;Go_QYC5Pr5>HMFu=s@RiFs?k`7T;S#m)nxqUQO(Ai-PqfLT0tv5%C?cA zvYEtI@LmSwIf5=}@-iXb2&v}RWk2#svJF`*!)B*y32%TK-Vng(bV$KAGz#7Yb8%)9 z!ZKKWr@K^3`>7!C?MGXr0ukQ>LLLRZ@7aJ_5k9$@((`qY_vsnmmjLp8E7flsA^)m{ z{Nhq8T1T16+U;m3{KJ3|eFm<74s!m4l&X6mCwkOM^H8ER?-#jqK^i>OPBzX8Qk7lJ zUsn&@v1mj#fHdiAq3W$wD_cKC&GQW+ftP?0%;@oxv#Qm`9#o6(2TSq& z4&=VWNBJM!YIUmv%SAu<$W=F{{5Jry`e#JU3K+$|bx8>|PX;hj(`@Q;<4jD2pA2CB zI~cz2NJQA(Nv@u-Vdb+Xb$Ndm)o7QXZ|MNgD-bi@p{_`LZIVu=&=sxyl!}f@V`)y& zmsPm{uYLtog=M&`DhsG9ecP$JeeLSXu2*Z+RYr9U+y~L{#$hCGs-q;r-m8c_TZf;EBNSM1tyS|14bkXVJBhB-}_bmi>N8=8pgm zTn*Rv1HBrJW4@Br;_9AlSdPhd>~3pU*LXgl8a~X4-wy>9<4;xekd5ekA+>;vUM2=^ zlG5EEjXn`n*R(!Yr>^zS$EfEtOnFYjTXX}}!c%=z%?@>Kdp4mKulB0zjOjJ%y0!)F z_&vi9$ScLl+Xx$Bf+=4PTw_fw<7K-|U2h!Pu5M@zfU5KXiQ`ZkNDpLz23 ziJYtv#vCub@KL9bjp}B@jyKP>B76&LyV=yMZr&sl+puHVeOm|4&m|(?=gn{#led@) zX)~XQF<6H+kk}=e>Xz1g{Kl)B)ZOhxR&|e|@D}!Vse4*8FdrD-gS}U=SN2tty4P^g zW=ykV1z9yM@{vDcN>np z+q%>X-Dg|Xi>+^Bs)T8vN?9Q;h+l=BAO(L=zg7*$Df-25Dd%Fha{=B*%Ky%ZBV~@p*L^ z{n?Juxtpa#?|VySj4yyuxCJ%$-r{JHm)Nf9Q7`wOgRtjEhkC`mFCyii`PC~O-w}~J z2WUZgaPsX1!?#yG_BP|(xVT=uYIr&rJ|ztpt?Z(EGO=t7$EY`eMx-!Hy}CKCM(s2V zNb4}58N;w=4D+hj+_F){yiNFcu@lP&`_*f08%*`OQCFj0Zt-0&rjr) zI5K{=sW-w!VS3f=fP9D}Pa0fLS_r?F6P9sYFV(d&+Lt-07NXJFjlI<4LG`9Fr$)Wm znul}mYaQzCg5SVgcrZT7S7V@XCRmID3>e3F)Z2X#7^c~)-tl~o0pEdEtlWl1#l}9W zbp%r&_0Ee9Q@!i{5{<}GM9SyzJk#2VOF!%ByKRYf^`4P1)qCB4 zpyvMrknRVu5^6*^qK_$yI>09f|I|97jLYdi{sVFawjjg-!+TL3!Y?qm3J$Reaj5r= zB7Bron(F*Q!2B zO*7R;t+Q&>$C2kv^>J&U27GU$W;FTLCjrSuC8sY{HI%H98PZ-Ox#fhDt|eLp*-M_gv>!Kbuag`nmNsJ-(Z$`U{fw2w10ap> z*+uSGv|BRWo7&;NLNIO~wN&J6k0rD9+#2%o&kjqLN49A9>2S-xPNQq1_=xMH_)x4A z4`q?5I*?v4N7PxejXZ>71zncxwvCwbthN#KS9Fx==&%fFcYq)whedO39az3;u;c`9 za9VP@?xd}}B>=HwD)JE;$q5J>C)g|_Ua;3%Mj9j9EhAe$BIwYtbW;2ZVHJ#=ad}F&vbJ|xEu-825JoP+c;u|?`2oM) zS=jR$`1Kl@mNAKuMCmo1n3{->@M^zh%(mT{DE(Fo<4WLquk57yI^5`40ZV?o%wfsj zT3urq>ps?M8D~BNme>lf5JqA$1<2s;otAN}$5<`ngEGs4c_1y_6OqL6^;9RhEaSIb zi_zjTyJdn=1jw?(n5zl7J}I~ak;I)2%Y^obIQQb$>&>L?NlJ;QH$gTaXGYlR5`Dy$ z*v3Fgyoqq)&5f4bTKzSa-91xG%kG^wc3AdErei*tUTfK-+iSJ#Sv0Ag>L{F>MMR$1 ziMR5L8VK-)ddr@zV-a?H>Zm@0c|VVH%DlH5xw-U~_Z=HizKP4gtqg|phtLhQ`h;Mr z{1Goxl(5lP;Sfyf7T%OqnczU!+bste6ET%6!CQHgmpDFe zK)9|6dk<4h%t9?Z%3(P$xCd>;yAx{p2z-?1YXY?Lr5%<7yY6qdOf@nvHCKgz_PPV| zW+%x$qMq)lMJ{q40fug_w@foGw-ejkUfRyUTjVJ(WJj-MTI(1LL{CFF+181-A6=G% zj8ou-=9!j*T1Ue*J|Ift18KX+2HXSdEeE^hTv8y9!=@~Z@h)5j_JPrUo|W)7c3KWG zEQnMeN|fG3ewL+vGG6*S5mhY5QEZOGa>(XmSpbTUu><#R%$pZr-u+XDT$V5Jv0gnx=NZ3Y&vPf^D*PPwQU+;C88}L0)0TRp z(Q;VNOiU#cL|*bOr1U#<&0idr>BjDWH14P;gv-F^KFe>J-uEb3b4M^5>~2J@vB5IK zeU}xDSBazK^DfJbZ4*t)OwW(#dPg)^W^M}hSY~;WR6`5NRpK%y_Uh_zKAn=!fK+;! z#QuA_0RAXd<5)s9jgsDPha1Bv#r|Ql9NuOSbo_d+Wp?Vf46t0%f$)`t zn!LhK>6A{(?Dj>X7BekJ7$;FOAHtr0DR4_>3FezI5H!#T?v1y&>@Mc91Z|jImLpno z(J1L|!17h7nTNFt>9!ncPPJN&Y###G`*N4%DEG%!%TcXM+AVWlt98)b(=iYj*<~>T z7uKRXl3)^}#K#?Q{rA-)Qs>3g$$%_8#A`8H(-|EpIUyGw11e*BFWLAMuJ6;3kV0ns z30Nt5(_=APhgh+<7^Ee`K~=T@wd5j)#bbU9qa@#E@wEP7TD5qynUF)oJ0SY$%2V@%759ru`)`EB2sFmf=ON-;pCtVC2vE{d>kLa z72V9E4{`24t^-CJZAnRSa%UY@Zh;Z}jZ*S60!)6r(Q-=rg}_MUW1x_WC{>t4Fok37 zkZ}kXjz_rs0H@_tqZg#rcOzF2CW(oa-E@zf^b1x7aQ-T4=BqH={Z!rkabkPYPidON za%$TW7*!D=Mi&gH*K(RiKfC;^2lFHCmeab<2ar!LNBvCz4^MGH4g^N@WYh|0)-prQ zm6UjJQogMS`$vPN`giOVUQlaU;Bk4hg51~wc^~1jNifRhMIpxtIT?+zMQn>@i=}kIHpr5#kQ%8j7m-yYr|fbLq-D3| zK^}-h9*qc*(s~hodKF|tE~I@Jq1?0^uAfJfHcT)XP$gfx_&)NdX z17!KAOvv6aD$;>lksXEP3K<6Q3OB+P-ZV(0S;(c36E;~ERxHY()Q|^}vV=S-xvyi=B^J9Zi;OzB(YHG+i(03e z#32uqad~P>9)_^-nay(M)(orVEceDb=FFxZ%UK<>YhZMAC^amB%fPLavV4BP_Om^cF6j4tHldBN+2Tlj1X zr5;QfMRZEC1mFb=FrW{(#w(cDCq{j^3tdE2AGhMQAWdaBF(8|8>^iF6FLCbM+P`Zt z*F#{i_Xj&(ce3W{Ha!INh!|}1Md^=$Q8FA<17kym)Sx|mi7;q%aQJ8|)iS&-_tlXc zd9t>J$gZ}RaRZSLATSwqzKJqeT|yiXFWaJZmE6f#?c`w8a)d`MCqr2soyxj2Lil5@ zAh9EC%Mn>ePdh%u(b!{1M5Z0$H2Prt)C8l8l#HB#sWBI0U|6(HSsM43L*>nFMysSm zzsAyeDf^O8_3Y6%l>^r-HGX}IgzJt$ zw7r}g8xcN|F=VMFXxlNOJH1|I>#6Hl<5Ai2+HOdAk90MpWnoCEjPsd785d{oN^$0M z_}~-_`srCX8r80HQui=ybMM7o;q78i7JxA$?aEqPXp(Gu&92g_>D@$Wt7S}bX3f?8 z>uIl3c@p$cjZdzCRYZK~D9u-G_^?}d^|||OlN=0{HhGA1--#*r`5vm1sfyp+7Z#Ij z4b5>7a@w1`bIoi2)6{=8;GIqC{wr6*Ra`46rxm|SUO*|i!?Vb%3VXLy$573(T7gR@ zmDU&>=mTNN)wAwUeVH`am&Tr0G3B9uU@rA(RMXoa|DMyoQ5hoV{*`5Y7N0?eDaNr(%*h0KOnZEL$R!ldJhJ8)%O!>v;mhzGYBvx`>MARKt?ti^5S7& zFvLIUrcG|`-PtyTO&{K5y)t4w%G$ObVN+ixIBD~4f#|ztJxa=JG3t+DzHEOOresYo zpq}5U8va^F&d-#R`7pTPr0p^Gz z;b+Z|()<3uAN_X0eZNOds_%5Ez5Ue;Lgzob@g~Q- ztaEsP(ddWIYgE=Nifh&0`ukILb+c0G=h(p~W5bwK2l;c0F-+R@qGfFcsUDrlVubaH zR^CAlWYQ{$7~F8nW`a4@#pOWTyxRZi1u;5!B$iENvUi)~oT(0}l)D1wMI<(NTsQU1~SgEyG9)1PumHiDO%DEkAIp;Y* zDu;6WjUDQsW&cSIk%Rxn&}wR&iP4C4*Pzw|PICH}{?$WjV5Qc*e<2YWvmw^2GSp#u z;&?Y=U|`=GWwMrJ*6bRI@=c8LS3ul)jE{p6{?^36yOfMfD{9*y#`tztvkxc@e};$A z*svOA#W5yoS3x}2!O$lPd6m3tUN%vRd4wT16Ui)7%}562IYrhI_tthw@=nA&50S(~ z+Kk*9$RCtSdTG;_h5C-H;=&dVG5gSFWI)Qe_&sdr!e%wSXjYgY?Tu=B>scm)P>u!z z9RNd(4zFVQlTYyG${p>n$v!nxXTvVfACirkm%hv$kMGOQUD+5sBMqP1%sBm_dskSw zyU}^hyJ~mE_UGfZwdNEUvX{-u+3BUH<)brw_8C}s&)Ep$yiD3g!f^3&?fB8_5{$J9)GY6ih}_p8_8c^V&cr$${hf<-}G%TYeMC&O9CSV~-Kc%QArW4H3C+0CU(<0PH%*q6M$SSrDalTazUDVAeTyYwN!5vvR0}O_duSI>T`{dR%vUOYOj>! z@+@tOkQasQ=z+Z42zf)uyF&iqVpUcFh)qbYRP%ZuyGv;wDaq%u=`$K3vjULWQZ-03D?s43fkH8P7wiXv?~k7n`aANyd3=I9aPe?yq8;CQ7@>b9(YqrvFr(nft0iWwoEW1ttnrjg~ z?ds&dJw2WAl0I3;%MpB>RLu5d!QS|n!&GZ1W!%XC$@p>;e)F-Hbrd0F$$fj)&IF_6 zHZW%&j)4(%ILe>aLUnEyPj{)%>-L z#C*B?&-wws)@i+jaHtr460UO9P`b5G&6g9v{LVFWRQ8dqog)t+iPjuQ_Vr!J@%@_0 zc3^7EzYuYJL^kXTTGUB_KEJXy_Qljmo5yZfC%Y4N@U8We=a(`taAHiI{NhUD$hit# z>jTTxDYhZ#x}q%WV{az0IhW-@uAwbw9l_+R2Vc&!bgk<~G%VIlM7}rK$elv?xq?cc zs&bDCs8hDNV^n2L%q?cDsUOjGI(Gf1AD7vWNUB-uTPVKL?l0$jFN5cHi6a*&U1-b6&QS0ou54+b#x zeFk!Bg*q)E&)8#kW~tK}yhCs~sRKr(lU&J;ohNHkUYvb!^(pk$_!BYe&x3?&B?Yv(s@5~RlUs^w{Xr? zmRZ{&m5dzA1NvVj&3%ze-g&8)n$;2I zyu~|W=b7}IMP4S3u3ByZhjw=7(45ezft2v@JN7RhMFunTkx;kFBh`2?>)FOjN z?EfI_5n<1NfZ-JwK7rwTK^*;`R{SCy5aECp{tu3VA`%jj(0>yNi%3L7BL9DvQ4x-b zaP0pt92emN5&m1k66G)wJO4LBCp#%>g`!sY-!P{{q)0@H{{JwFMYu$SOaA}Dr6OD= z!hgeDUJj#tCya{1|1tzL`bv?j6uGMZQ?6R%szq+Dmis?@`e+e8T7>_GO1qr56YeqP z|C?)FD!1LX@_J^)ba}lpk~^~65j@Ra7LKqK?Cnw3>o(WYHjQbJo>9j`Xc}hDpcN?PKb0%x2ay^VBeA zp32JOX=3ymcFD2wa546Cyx9+;lrA4|<$p;j?FBog9-@?)Mai`qE3VZ{`|Q0Mgn&Rm zTCZ>SAeR^5IYrhDwZ;mOOIfX^AFp${d*B9$Wi#z@Nd5whx|aH>R-x3Fn^2(Ik%q^a~7 zZW}Ih-V_9K%_r9cyB-y|cGz|M!Se^7PoZI+YNh0ms=P+fFRK1_kEE|t2k(*e1JPeZ z7UCmf1)6?_{R`vIn`zcJR=q~?$s z^5RQVXKz28tw_l$mw_ip31-;W?dqJ4%;oA_k9==tDey7EB%O+1<((@(`cuwh$<_E4 z58VI#M)5E2d-Q{ylIu3xoR~VVb3U1LuIg6jTlVu|Z!`yC`$@D}Z`=sEp$YOSRom5! z({XZC8-5uP4NbAnIM0NUwi_KPk1f{b3n^tD03+u>#^lhoo$CC$ljG`w;HwO3*W(Ph z{B4Mgdxz4<6P@VhQq6him`S@KXb?kv2i*XYo^eWM`w1uR(~(WY;~^hq#l zD#B?`)0T4V&MoGB3TA^F)@1r40c}Ke2(Q@&3cQEQi%%Sx%F>XzN`- z(=WaCZRp_pZf#zl6LU^uSPW5_+@LXMAiYG%Yi;YLN4Qv8?qUAcoY<)@zI;6)j4vT2 zmg8|GFO~l?t{h*2MW@O6UM+njuO$C6#^pYCaIoqZGK1WIzXbe?Tx=(5`U+$~L*GTp zfYaAcdM*qa^_^uycyEZzfTfo|;b6s_P1Vs#!UnyWa&bB6Qs2`UUA5dn{rUPr8`a;x z=(#G>s>U}1wY+JhQ(plmF7v3`)<3Z%r^3~MJonoE$CTVvc?%%M8TNr()eUMyudCgq zp=a{nUmL%I6+LU6&!nmPK{X9r=lRczBQ2-Tf9k)!pZ=S9eJiAIw)G|8sCSmD#*IJ9 zXukoZGQ0;~7F&Ah>CrrSHMPpu`BuikK=>n$=my_+#Fldl4AsS?D}SDnU|t@)aiv<` zEnh!bG8tTaeu#*l5ut&Pe4B}FmGxBbWdpl`UTEdYaWDj(PTf8KDMNa#`%AC>xr)jc zBznXu%YjV4)Lo2ceT!Z33Yfk5{sEBm3@Hvg$e@l-wFJ z8ji-8G4+r?uTKg#$lA1uNvAKr-8U0k#R|^Ap&)Z+uzM08;C$Fm7%=~6A-3bmZelHd z^&;(=Le^Z30^`<>U4voFWbeNXR+%_D{K0{qbfKZIz8Nl)y-8~*`le`eP@oemv%Gw|o&|Jy{=yR*X#NiPtf+Ormo zf1kSkKS;!%m-{1~%_0!$B4?@y!sA)NUrKh3k387zS{w+=a>BUl&oE`>lX8(*x zuP^!okiQ50?~B%mZ-_%r$-f~D+(i;?*V+hmnCGuy*_L$gx_T~p2=pZtto$XXGAr~= zbeS{4oY{?GG4O$-TufIiAz@V|2v_|~wc3J4_0!1B9Ys~muh|v;VB~)b+E%rkaVnoc zjoh69?0TAABcr90eNgWChssZhb7beV$j4Aa&SbC-y&gI1+UCE;X8Q)=;6CKa_>}I} zi--I}$-tZR!M+^)v`OE4>$#w_tC!Nj9t{1#?9uD9p0$D7$d&$ag8OJ3mCN0=POSf9 zT?NvF^itj@#@oI;(V;=Pa^-kzqhWwhoE(odb;Q}Ld@T&E$g;I*}U{tq|au9 z`O|80qonNL(p$MlZA1>;o3ySTj)TJbb*nMN3WARzdUhxG#d1o1^-%8}&69m7jwc*TU6f(im!DMa}|-jXdkH zye|P<&(MLHCkJ73Azfz3Z*z@2(IAPt@;4Wx1G{b~Tz*UigZ+2l^GE$oME{szAU3&} zai0Y-_Ax_WqEm9OU`FYamh(JNWp1Et6E#DyKg8+^-0j)i8plv*L*F@(aoOSq;9`@2A0*Kwd#&gJNHOSIdoXB{*16{ z@~}7VQx2}VFZ8NAYOZmpwf2qVIgh`}P__d}XKrDpTKmTD+(Fg!(oxy(lPft-Dw}+U zlXWr(a^(eW!w&Gtr<3bSu7i*=*iFDuthG~q5frmmnCC8mss&Z4QukS3mKAex? z0k!_MTFmovsdcSfx7}$R=2Ulf^3%M#Dg!Qrmw-xNdj736SRE@rtLH2}&}Dzw#Rtdp zz=kUa8$EmUgwS6@4o+CTPz-)`ucx1$Wsbvup{JEr8~AzqpZk{jka4ACU2{H9S6I&R zGE=(ffa-Z@!RQh5QNW_!+k z5w1KD7_z(?r!f~oNzpxttm&^*^dCEwR?(OGkDV$PceAM0GI5Ms5H4E>7DG;bMsp45 zdxMk=ycvcQxsSF=iCbdd%fKyJ!@;ZaA||Y_m98kAwvml*1L!N30la)W;ipE5DTRTm z8!_PaQPLT5Hwxo6NR_)0;U`cle}dafbG1VVv(dbTQ<^Dn9?dO?nC~)>+;cFX|7IxV zZvgJs0805UBl4MIPZqr38G`vHGU@+$}gq4p?N;`=>@Fuwm zyn89+Lk3OYQx@95S7amb&moW>gz+;70>AAbLsmvp(6y4`a~g>VFZE!`hRfo+7)_=7 z`>8tGN$1G~I50K(apYOu#4^&0NOJvZ$kSxkx{S#fSWozc|A|)oYTMQZf{q5+Gq;Hqz#xEik2y zl6*^NB%Aq^>Czx2Zs?W%VZSM_Jxw`c$5MbaC-%TSu7eQvb<+FuVvysQ2Eox(^%p|k z?iwj!ApA=KM=U}GKWBRLpb&+2I5I{3rx5L(Ok%*Igoy; ziNh0gR0Sa<|-Wb!OBnL3o^+IJ-5AovlZJ#-?ain~Bxb}!Yk*QmxP zlF4LoH&)JU2jm*Cn5)RRF`TOLmW_^o$_b2*KpOM{Bfgfw;5`jeGO>XW4nZS%1EVf^ zOBM*^bA;sk#Npm%LcX;^?v&E&kkkhwnFb!_bkPDS-FOA$_CkE@pj7-WIjvkpimQ$v z4LP+GaxQPvs;=fYXH~ZddEgaD*C@!260$aio~hmwd)52B0GUcPeO`^auYGNux?g`m zrytD+4kB`nu%2E+O5~bKpEm}N8v6Y5w>u+!Oi}NnHt_h6s!JS)u+qOJHH(I{2|C&KpA zdpYlpV+f?lryH63cBzLpABdWj6R67c3z-f#F9TuA0@SQqnXo#n>~)P8*gFW>+6`%h z;RsN5K8l>>3QB1UJIPBB#?TH#^zFWS7_Mu)Q$1YrxDUA~R$SAg=$6~iJrd_mk6j3H zIM4OM-Lg?V-1m}CJrerVr5@>--=`i8tb*YPhMCjPf=|tCz`X2lqZ=_`$ksNVQ|_Z( zQdRR2$!H;%jJ8Yw$xn4Hn?0a9A4ggH_|#*Oy+i7;{wauLok&~WqHgtg^p!64_$$-F zm$u!h)*Jg0k+sF4*1yKjeN_H!AQ->6Sv?V1>r_v4oe@$`7H1=(E^ARwZdsa!c>{ZC zXCpV});jf6pzGUFzvATpFt(7j>y;>FK@x||Z&1%h`O*Ee?RUdX>p(c`^g22;Bb`u}cc|xL zw|A@O-q@>2ZIFLoQ6ts`)Q0ZY>(ujxg$%i(eX7-)=z-Bj{xah@8uzpvQ+Ich`)}>4 zwf|PTYAZjzQMGM3*~PO}6a=z)wd;Q#vAlra(GLd+bt>kEmSe^8Il*Mf$FH_KC^_Ch zEmI!#bH65Qx!+M*`=sg4(4 zXK9%*!LPakao`TU11q-Q8Oru+ovLf&hz8YdPN-Min_7BQPvlsPX6#Rf(idVXZvpu1 zX;{v3!N|UZ>}EbvM|RJ|l>K&x>Urtj9@QIsJEVHIPWOT8>?YM$Fs4!UZJG#h`}tkC z{I(u)hM=#aWR>f-{AS`E`4ZKfeTX;jQ@`36kO$fHy}H%L?q~7q_(uS_0ByF}_*KV( zYHk*bmZv41)odeH{9_}KN(N@=h~=y$Us5fUPv#2WMx*eTO_1MVqZR>6+KOs3SjhHS1)0H?P%Q7Gii=YQ=Bd1}VPQ1>p&o4_zBUzXPtZ1EZds{A>iW{}#HvY%G%-sOn?h z&@Og^$7qZEf=J|h)J%qt@;*<3 zTyk;)2)@R7Y>SAzhlcw>xJHO56Tf1r$PPaDu?Tx_O=k?rc49t`T4)Z{a4x#hBVCxf z1V?_^<3nHcG7d7)C}{Fw>iaMn>rsnb4+-aE-p{|DD)V8~Qe|*`|AbN80V6raPj+Rc z37>#pb3+%xS74=(d9Dh6?*^5;?sCh&i-j$M;CQOxhs4LxO^A$y=#KZ?if-gPA!CCi zVj(Vr`52A8=qIc_sHQwr{lCJET`5*r|5R{zH^CeTqr8)n9s;Gmw!oN8wV;W3Jr5vU z_&RO=*NHb#3nRv2sfzDr^$ES$NUpx3ExrWju~ffA0Xm; z2zIwe@JEqzkwB6;wie#v?7%Sdb z1Hj)RB}osZ$la(JZt@a0=#Rv?-2`(MjOY|Ekq_@d_#;2MpNU)XU*ah86l$?Wb=Z>?v;1aE8M1Cf zr$b8Dpb?a#S~L@4HV{$KWOSo%QZ-gkG6E!9AMTPpm%GyKf?(*X3 zbDn41Dcba44L!>N(%6A;XiXDomoLeyR$#PPR$I>oG(z(b~DzRSZ&=Q z2aKmtGxx_)d^b8k2VFno_a{ zd&ar=Ejmgf!+g{M+2{k(x;b|j_n*$kx6T`(f+RzmKTM7)oIiUS()u@y$e zWaPZEBUaxL+%^9EYw#A5za&mx2zvKD$ocnY(pBu;#4>X3CM@4CmKzWW^*HH;7h#01 z#;ETCG$NB?ke^XAzdJjH&qe8?K61L!OR1%c-oLsYIekKToNDNibmH(K z7xrN}u?%R*dkE0|C@vFnMHsjr;D-EW-tPic=?SnMq6;Q;1EU*P{P&FopI3WcUXkVXehS zG>&kh1!&Ru3P=g&OQ+LjoYjHwexOP|gb!ac8s_;J4c`x-k^>;-R{=1`7#4azFE8+d z`S?ySe-1Rgs>HT~)O&#FJ_m`&YF2R|e)aJq8DX7tKR`9mA4ctzJ`&rBfyn;248Pk; z+e?_&Cv*33Cor5K2;GDEVz~?{?*VCQCUVhm190Uqrq}k$S$=?@3!p%052ltmATkGo zhhZSL1p`I%)3AIWy87RKOY}i=1b*G?5s8n3Q8I~=k^sC{#3+Qe*;h?j+vyiMHGZg4jZYi%)YA(W1?8cZwAcgUfdja{j_JtXRER znN3w6Sju1SLF9IpD)*scU@;=T76;t@dlBx%TTm`4JbR;2EyJSpaQY)@QVPpsQ0497 zqrM5uzxs%1Bf9Ygl+0-il|UYGAQ!8}ig5thh)ae_PHG|T z<#65d9ej8gA;dnwO3{HqIb@*`>H|<&KBROqM!gS$#W>3cLz11$Y5=4YA5jxVX;%-> z4nf#B00TuA_A;m)0la<7lW=?!_S|=)Tety+u>oPDfE%0m;yRqmAPhM%>Z`zBq!$r= z#Bq<3(wi_U`iLziyJ_1@`(KGMwU)@2gJodb`7MfNIlKoCqs`b4wZyqWI*L1^fekmF zi@kycIP$LO##>by2HF5rl9f)|=CFhcDt)3&4in(7Ix%$_Bs#GH-F+b?*%&%z+Zs?}-R`gPO zpZQj0Qk|PaM+HB?=)mKMtV5CUXc%YPAT>m3lp&{cRdG!Qj^v~j>c>DdeG{lo#IGUW z`@{~gQ`#Swp@vRMQz6NDQYyt|>@Y~xbA(?Y2hGAa0i+)W;;sTf_T#sp1mT!mOed@0 zMjpm*WFq*go&{gQACRi&X!Cx9H}5x?GA2_kz8aCr1(fvq7ke5x_c%lfd(yzNF+hN` z>KHyV@LMV;(8T=>xZDE`&xersQz2Z+;cnoWvy*E4IX~v*5vAf6C;40IW1OB&sXC4+ zy^Wds05?3Iwpbx;#_qLL55h`f_a@XP^kC%xpoNd38hM@<0OdEgAa@Ro$Zp+~WaPw` z0+=o^AaHI~cm5c4@gh%Lr)j9V~{Zw`$5y`*G+;eOGG%Ti{1M1DpV znUA;7I{X%1=@)MRH(Pk5iAFSvmB^1_ za@vI>J-qcZi7^!f?)%fx^`(LIqyVWq4z)I;R7S-l-% z&a1<+k5X7xbmLe`CF>z`cW*#Mo|B^y5a^xG9MwnGhT}JIBIW}plI(~aY>H*CO^!#S zR4xGrmapR-aDO5PzCSYP^9NYNf)Dv|z6uiG;Us9eiIE#zPB#!w2@HLZE0RN7lutoa zWDW@Qwpe~T!2O#Wh<801_k4(OB%j#yZH3zpS6^9#uR%8%0blZML;?pjqt>5=503yp z7)I$gPEcJ88JMf`1mxcY7JW!4ewvamiLQAUDJhu(alhsTxI7S*JV8XI0l1}d=JLpK zHF+=2gSR6W{sczsY%IIwWkkVHxKWw9#iz7@^vfV)NcK>D*$_w&TZ}gnD)}I4#RoRP z-Iq46oF{{8R=|~qM}46du5;{KR6PN(6l8VcqYy{Yk*wYcW=$dupx%376nqSc7 znGmA$cE_IHLv`or<5tBagyT!n7`u}DxC2vuxeE%*&K031*`a8jeUzUZr?1q>}^78ezoIFb>8*G!tlrz2v@ncK}642h{UjV<=hj(9QcC zdn4lL4pnI6^`blUND$;j05trce(vutgrPqaHsm)Ku{+Qx$m#+{e-mUCSc<+yPOq4u zuTU$pFlbUaQF`!492F$c2(L{eLpDmWLtP+Ez$kS3$e#?P5;=^S=V3Ikg0{d4>;=YP zUat&g<1y;n-ART{YrxU_eUNK05RxaQR4&{?c@l}rm&;YtXseV9m&p1cmR(>8f6zcC zuOv_fG2tgjh-FL$% zKDiewU*R%x1UV@91ebAnDPYPG!hKp3)hU2X)Wh{Pp;1)t2Sy17N}~uTucTAtt%nNT z1o2#shVD?$2c1N8a~;sq(T%*@4WNSpr0#w@rmmuDRAV44-58alvwsS5#dBR4sKHd! zjU(Sx4lKWpQGHL7nnzpF*R(}$LBpsZ?Gc%8(LIKMbe}YYzX_7gxe*vgiAWPjy(_>H z-IH-;PQiR=8wT`-8$YoNwUcR!RDv||2Yv&?F%X&2jiU>&9QTXbewd2f?ZnDj*} z1kg*QKGJy}=|iNf8B^b5G;&S=k%*7HQyBO=>-T$TcF8r9MeKC0wE zuRAI=11piM;p&ULz!*E#j~Gq*i6HW^pLpBQ@cr6=#uqRmN8+Q>gMsn|WJ5m-`BQKc zzXx$N3U1^CLP%C(v~U8tr6cGCk4;2mX$if6TO=!icU4VDwm)Irqa@)+@C~Zoosl z#-%ul)HWd}J5#6*m!WPfhhK3(-hdnaC)LO(L?U|%_bxxBWpqWPYa3SNHNwCi^_Gu_ z-V4GgC5ie;GbGiD&7vm9nb0yA;WFTst57()dI;a8t1 z+)p5)?}Fk#!A;6Tbu2B744sH`{}c6Cc@F0VS(HL{Ou5&0VNWj8t7^JIFsTQSvMNWT zPUK`b>b`WJN-2DAFMWAGfZ}pWtEdHr`OhHL4IAmG$LKsyfrGXU=tky4+>09s^#d

kM-i?FIu zi!BC$zP>B?7~t{G!56u;iD2sRQ6`tI;X-=Q{VH;W)36uLM$P=P4`CT_{+kgo3(znQ zsv}JoBck7kxnJqR)GytT6)R}lmr~Uff(g#XK=|4q&Se}#_VDk@y}1OrkOeDoxy}zB zj!2{hRLOG?so0Om6StzP?=4F2#Z-9|uJ-^mQa=2KmUyx3r!BM;K%o^_34MWD!AwZ$ zbQcEfV2-S4A^WBbe96%b1*1%KH(C zJ&u)v>VERqjTOJbo_;XVGbOMRzxqfQ{FmrH0G9CQ6Bzq2o8s#FPzJ*IM++F`7F@%HEuZKpmZE^g$@!Hz60}-r&mI@a4{WC=VF!^Aq5^STTZPxxE<#&r4V`_+#?MI6eoYrR(7OFT|8y zseKnw3dk0nYT@y#;2cEsnKd+}n_+Q0R*D<}jON*}x6}z1xz8@$w~4T3z=+DNLC{UL zY%GAhFQ67HP6z#5KpI~FEjiLfY;s!Had;m?&O87{?0gKA7Gtz9(FF`SuayQi0fQI% z1MjHf{m2=O7}bve1NYvt?Syke7&yf%1k2tq-U7!XS1wl6Z;&JgH8BQg<@xqE=Ba1)mGMM^=i3!_1zEc+6<QN<{SVi5w4CA5yFKcha^75wo7KQkMwQ%j<-IFHr%b_?dKE{(?)rDi`fWPwVTz z@Z$eh+<8aWRUM7~oZHo9$-NM;1DN(`rnhtZy^^l3bk&;!#>Di11EGZ`S=kmFmZls(*z!j2&!{uHnJb5Z|p3>4|Hn-4?s$eS2V$vZ14_l^OS?S#|#=a4Vm zn{vzhV54#g^wh$lV=#fzN1)fx8KI1R9|>pi6c-=X^cp~$P*B=*-G_rDs3*LJ<`RDv zKb|b^Ldp%~NXjpTet z)9DypDl3ukeelsRT>TN0()ZD+?374P63OPTbv+mMl2^7i>5&fI-l}K1bPJDeB-PMd z=`CMjbda&ZjCd04Ls~eKB^?D+ zb^>N@0{mCsB!?XFf1y-n{4?sUI6#r6Oc4?(YR}TNpgj)$*p$oKj2GLUi;#oU7l!Xh z*tMBVpLnp5)!=rEkdbPf1dj}1*%2iXio3vhH#$Lb2ba5ZK!jBE8&??9ayWdy$@ zRnL;8Ll_oAKNDQlWo`XE{`CxTf;z2zAof*I?OEu|)Uv z!Cjl>rikf4Q$LCF}jpTLdF{_PZox9f`H@zQ0u>1#D)yb7qg%22UZfPDblKGLhY%0Sk*5E+|kZ4}!}**2ukNOh$;@NGwHnJrXW zqn#sN6{2VJ?AGBbOWVtj+l=QEvK0)Wh~0VLwRze7EnVm7eZIR}o^3DcbCe;ysNQzc zw)0hFyV&sZ7M@J)Di_$|l=HLSKEbZ@o(6ezcq24tKLw?uI&R^q=H7TZo$H_2N*(Hqv2AZa_+nRWefc;d!2fsK)x!KzheA2~ZgMzh zMbW8jZE_ytB~_v^8FJ&m*h^4^`JImn7#RbM79gWE?=#?KzuRVfCdWlTPrNGRUy_CIz+;Sea6?eD|LSgDP#8`Z_7Yo8vj5& zraD2}$2xvImP+@M|HA33-%Uq!*zHf%^V@FN?sd~=>#wBVlwQ%6AEYe4ryYfftM|;J z?dQ!KKoPWn;g}{wui0rWq!CA!4Kc7@PL-7}1|>YE)7aT_MX&K~OBV^L8%jX=jH=KR zwEi=(;+Cb9(Rd{}QlIe}lu7qczK4H0*_IOebqJ?_0134oz6GObGWIPXc(#}SvhN4! zA^Ol--GLB@k0+rj&<~K=#@Lro4qnYud0w1vmy=rZH1zzFIsw%*9RDv!Xn409hGYN~ zIWGqHhvF21_hTw8=kTp!EL0j#YzIYVfD+3mAvJnPcL@P`P z_&0k1N01QPNw!Flm(*u)s^P2Ps^P?bBr^Hda2>2Q+ztr!LA3f;9c24x5WFvtqvj(x zsQHK@>nliYTHlSH%PA;$eLFe!4MAQ^Ad1&`m-vF$^2IU0{}yl;0+uf!Z~4+V;66~wAC%We zAR6$Y&e+coCVDqSW8d2l>for>9?PQk#=tc7(5d8_+qT)pZzk*7llBeiF;OQaiRGe%rUkQrrRZE1R z9J-IZfv3VyIcor2jf|48lBZDBJUG3d*Vr;pN@OB0cs2>8ecd3JqrXJF9m;w5gR+?& z)YjpGH(O9%hF--1{?5FLfvHDb6d@|n) zC_qig$;g)c>40eli*Q=WCdlhwhn}uZJPXm(Phq3>)CfeQXuRabRj~FRg$JLB!QWyM zs>DPin|z@B77c~RQd;mDQY{JY95Ub_yc3>7`wa1PQI~1<%6$p`eL!wJISVQ>>0Uk) zb_2aJ(-Dn9uX+IGlpKK^f$M0~wfpj|((N-HJudRb&Y5F6^@@wytr64N6Kq8r*RF=1 zEXV|IrJyR=_7TeSz}ox{Qa|IV`6*Zn&75Pp0&A(6z-0rd?#xcp_4*+&StqyBHAv+J zk3{Hr0+i^zF?4Vw6;N^OD!?5+z@2cVf7^lM(O>*4w2=xROzVEi0~o>ReA-Kz*^&6K-hh1DpVrY72!`%T*ZFRm?b4&ya+( z3gO@gUTaRFjG8dyYvS^BIY<0!=0U1v0HQT(;jd;rRB9yjea**ZfX@K6C%*s)vm>B3 zE}()OwY9l`j&8tx^7YkQ_K|x+Euj>WnWkJ~l8+Bh5qGAVS z)Mpg{a#3CVKI;K;r$zmt0*=fBoCJY-OI|~gBZk82%Mqr&3U=!o1T>SaVFHvJjzaAX z$ISzroDc9*WJ6q1E7zFW4YdRXXgDwLgNl>kv}<2C^@|I+vs5_LAl$*|mwWbTO*rsb<0L+tYHwgHxQ2CvtKDYt!XCd&I zsp&<;Z~8S1eDRdH5c$j9^jt|T1s{~U^I9w$5K$s3Fy8>E_G;`j_zB|mxK!iP+iM=6yCCRBuy>cI}9vSTAsY_@pK+dGQ5z6*zz0b?SU)_`kFYC)ac%T zJyP$6iY8ROfnsa_mb+)Fji;3DSNlmLAZcFvn?BhE>621c*6i6@dzRDw{nuIDQHv2e z`&l0|6Mx3JB|JjwOzYSNa z)1M7tq)g8N2t9jFK_`uMI?%1aj0@-*+ouu^2#HF`;F3|^W z3o$40LKc6On_KzmUyyAUHh$bcjHgqWljh@D&1csmU=#LN{CiBm`C^AzxWNdUBT^s1 z+N7f(uVL@T-JQ+6%IuBgIFNeNB@KH*Qg4Z|FNo0z$x(;EmAMgT&eWt?q~XcDlcA?m zj*jy-nnlJ1KX3ybB$?4I8=dtOa}kO0G21+|TDTuW}f8dIg6@($<+u4U{*2ODUXQ z6G3&SkauMH8lGOp>FeE@>1oIogfZrIz|a$@Q>P)lMbJx6InJQ)q9>3oKM9r5pHZP^ z5TKKazyf$-)V*LsTQc z>7q~Z*Ey87REi9HNnoz2F(#K91PESI>X0YyAVu@B}$QprbK%OMZmtJJ#9MQ_tn*d!%bM!e7Mi#@2N93Cc(1^H(1lrPD zXB>HaOHUt19ei6Vz{NMmFC?49zcRdl^qQOm!7?ODl_|z${uSVoz*B|tjkUPZI6O7%M=+4A~3y zM3ym&d(Ly1Bg-XPqcOS79LcLb0q$v~qBfxHz&KLV3rjya ztWy?p$3I8~c;`@;`H%86=^=P7{Gc7q9(SXS00yUVi8EH>bW+jwNH%%f`>2<)U)32| zGOcN6v)6=mebAim!cLFr2SoxC*?GflaGEaiaS{TLwE^bx&3}%(%G6|r%hkzDy{9!& zK&mm?3EqiqP`-ioVYMQv_1S8=a+#4I zVXb*H2`wq|T33-?a&e zG=6N2{EP;&0f`j@;LQm#E|Do#oeij&2?x3wX}ySVQj@a2flBZlD5tKXjLNsjTUFwr zpcBa!nCC?=^23(4wZ9C)>GLQowKq}*x5I|URSUJwfrBilRK5aAYT*#S4`+#Dp9Qbt zf==4?*es|#5Bb8oB7D2g1p)tRp58*#icbV@I|+%4DJ_`fTglU;YUtgn+hM5ULZ}2j zMg&KQ)Y_NBh(Ep!4sL+d#K{oQz{_Pfkx+5~$nkOg(3?r#{EBvRoJMumiuu$OQorRV z0+O$FQN|JQT=N)2NA-}c=GD32-5}=pDBr3|eK3Aa2lep*^nwdYFzOd6LpL0x?>qhw z#_P7Ysmr|}UwR`?M#~WY8uH87ytW&n`2!3(Zh}!{w@4?Q2q8s4&TFT}Sd;<+@&U}6i%sTV0c zvDA%}%P6gU8QGF8i0>Z)FV@ipa*}L;Eq&m_Hab5@f`rj4Squk_N9${VZ`G|r zuZ^dNSHi}&K3Ee|)_8M)UA$T`Nj1r9Y0S9qVBEieY~fvyvg#ekmLNy?d0yj>y7;E+ z$y-P$dCN({fdX!vgNC}mOWZEtU0y5RAt53`Ga@^KARin;!23u@t#JW1pvk~4s04O- zhFylYlvKBmY`W;JjzWEyy%ayO1Lde&&+vrS63Vb0p5K6!y)LB9KnHgfPf&PO3gohm zK3eU;;Ay~iOTOtaPe)|Y*?s0u%D$mK^seN{N2L12@o3j!9fjIQPUF?~#TlIXXOJ4a zst?&7&&4x@v7t<4>0wni`@W9Q9V>^~D9>#l9v!MSG=7+F<+8V9d7ecWC7Q8|!R|>^ z@_$vhqGW0)Pb8A@hMhy*jrMe-4umyb&r>xiitXy{<-N3nlF7of_c=;*AdNkJf4t1A z_)l%4jH9u&f(NL5hePx%>vRw`_g&XVbIvdU%SE=dFn{Q!C-LyAM2(MWZ-(TZDC^T% zGa$;qVpxRYM4!3eOXXfZ$uPioF16s54#=^YZw~2GoNv>db+D7$2pjobpyVA(g0W-3 z++VlzN7T4Ngl$m?c&<>=mq(C(#tOYcI`Yd!Zz!1Mq}f2NM0G&Ru-+sLpHS=yzo@7#!_YGb*lksf24$NEtDf~|Dg#;j?u;UJ(~9R2oVg`S-DweImAg92 zFP37#zto}448GSYItI>39;%rkBZF(P+ zGw$Qg;nXa)yE|@s9A0F;fy=OUhp??ZnY@85g@@l}L^H?g`L>3_{9J21x!i2i-O zjITyCu=QZkdCuW7GVH)M8ohueLlL@lP0cYK;QzLj!{XcKXvzPFO~rvDbKJNJE74mB z3NNOPir$8yj9x?amytJZ?Yk!og6n8oT2S)1EU5TVldbHDhd^0bt5}ML!iQo=>gL?b z!JF{QF5JzrWE&@oKI)*19tI*s2bW@h=b~bdc%(_Qu=XM+PwKv$vbyBGp44I&1k`D{ z4f(S!L(k)$Awiv%@4G>=<$D-GiRa3_8v%_A_~!ZpMNarIil;dv4v?9}{010O=QdW> zaGmeN#t)7RHJwG_>b@qu)y~sgrXtjbnVpn2{%yV$odV_w-fN z??f+!uf`*dJp|hr@k;@_om){hRMgh&ickNxh>Hx#I3isYKXx*j#;zs#ZH#no%) zpx^Y@SN}c@4+&~HdWVN@Zqe>ZjA4&#=|0re$~XD@RHJc-9J+%}`xGrBZ7RF%KutjN zDyQ~t5xWnyrc;n|6%*gCzglUreoRHb*|KH)BU+0^8{NSlRMNz&`pMw_5KaD?YP2pA z6#2dO-HuAMNNpNAtYz07DF05AzNIUNa~R!D^Ax?_x=_ya`}U>=<&Mbo{nO(X&{65k ziJ4FBr=zU@w2i)d6v!yY^lKS=+{v(ela8{h7fd7`Acu;InoR$SfMb|(4vt{yE?2Z3Q3_`427bvke6 z+kXxwFRQ+KytwFenuc=V-{~?#DrNO&pr{E%WpPqji~b3o?P^r=#@9|dDw)&Li&dtN z#6=e9C~~*istwtfim3NdD*x(WgQ~o{J{k;JLZ!2Xvr(nMTu_`Fz!KX);$6K zV!rGJ`PG#??GzL-INi@x_Iv0BCxsC~rj=sfh2U9cl#)BTpeNRDy^hbZ9Aryg3SMKF zYE-rRXYj4!oK8qdC*08APmaq4$jq2Eo^PeMqoLRobQLi9mJ|n-4u5?uMEzgzt>PgH zG79LJE0@7brToMNt{@>*45{Fq6xp;zs`feL4Ibx%zb66u7ZZ}B0qQr>o!4$VO}iT@ zQvHjvtA{5P8J}oIdIWhy9EM!Mb~Dy1qte{vypDS9X8i8U#k9+PSP7Z%ChGNe#JumIetn{*tV=2kN3S?RV=Z#18`ewJ;na+V76I1nT0K0@ z@peY(l%5EivCwZ3UoYlzz6tk51|Ypp@G2J;aaa&umZQ4dYf-EY?uu`rk8-f@0MI62 z4mEpxDR~FiU!h9cC%_IhdtAQvzn0b)ds6>ydd(e!9#AZrx1uJQXm@{cb={ zsY6T@Qrt>nCM_8cg;RZIvS%JSn8h|LtTsD0sn)QUuH|u+14zqzs_+|s%%xtup0%l{688DlyNv&qnilYK%bGS>m+Jqv) z`v%SC-kVy@mSDWsY*|s+YfiTg0`!dOHGgKwKl_ONP-4zSo#xMas$0!di5qX8+S3*= zPxEgel?i9_v=#3=%o<_L?(#j_);3Cla}N`idi4&6x!GHfQ!6&}*I%c&ODpW7W?C z=Fj8jv>I5VfPPe`_r(4Ge|6sH#IDBo{okWu*W84k0--tlFi;;tp=CgV=c=`HhEQv&2D;#E!TF1kE2Ts zAB%&k$)yF974n)e=^?@|)2TPL)6XQFc;%W)u>0s$m~v%noKzXGq_y#+n}TFtU%70G zQytVE>|rqR!XR^k%XzAmAE)UKUQ-uRPb z*~1d3Cx@;H*T|-yvUQy#OhYzXFM9b`&Ba3$xdPUzQh|wyq@tm6-#O zpTReK#At2jTf#xPl_!f>OJFGU4+w;_06LVcyfzE*?-3LUc2ao~ua#C8Ovd?^kS%G} z8J!Tll5g>qbd@35iItl99A2d_7b;EN6n;A8R$fgf966oT%HPKrmYq%EdK00p?5XPX z1fy!m`0)>~E)(fEQU7-2)iL*+LHgCNp{#jzsGHWVD7J7!2ZhV1O2uj%MTMqht+J{O zCTpLQr%U0bYB^$6Uj?X{(#4qTOa<_ql3|`+Gicu{n7&Q$DZ2c?F?oR53n6eA+aPNH z2>JTjwGg;;4TX<{tKteqD8)A+bg}FoA1P&w?iMf)%46gl^Oz|D{ulIgIo&P$7|l;t zF&@1M*>oB8N6iXhe%vJ8Qe|q-(TA}dtJrDIR=c+q5u6jn7n`Dp-FbEWO@HUC>>#O+ zw+*W)eXL5RFw>_3bXHe}_G> (nodesBitsTextOffset + nodesBitsTextLength)) - icannNode = u&(1<>= nodesBitsICANN - u = children.get(u & (1<>= childrenBitsLo - hi = u & (1<>= childrenBitsHi - switch u & (1<>= childrenBitsNodeType - wildcard = u&(1<>= nodesBitsTextLength - offset := x & (1<>start) & ((1 << (end - start + 1)) - 1) -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s deleted file mode 100644 index 22cc99844..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc - -#include "textflag.h" - -// func getisar0() uint64 -TEXT ·getisar0(SB),NOSPLIT,$0-8 - // get Instruction Set Attributes 0 into x0 - // mrs x0, ID_AA64ISAR0_EL1 = d5380600 - WORD $0xd5380600 - MOVD R0, ret+0(FP) - RET - -// func getisar1() uint64 -TEXT ·getisar1(SB),NOSPLIT,$0-8 - // get Instruction Set Attributes 1 into x0 - // mrs x0, ID_AA64ISAR1_EL1 = d5380620 - WORD $0xd5380620 - MOVD R0, ret+0(FP) - RET - -// func getpfr0() uint64 -TEXT ·getpfr0(SB),NOSPLIT,$0-8 - // get Processor Feature Register 0 into x0 - // mrs x0, ID_AA64PFR0_EL1 = d5380400 - WORD $0xd5380400 - MOVD R0, ret+0(FP) - RET - -// func getzfr0() uint64 -TEXT ·getzfr0(SB),NOSPLIT,$0-8 - // get SVE Feature Register 0 into x0 - // mrs x0, ID_AA64ZFR0_EL1 = d5380480 - WORD $0xd5380480 - MOVD R0, ret+0(FP) - RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go deleted file mode 100644 index 6ac6e1efb..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc - -package cpu - -func getisar0() uint64 -func getisar1() uint64 -func getpfr0() uint64 -func getzfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go deleted file mode 100644 index c8ae6ddc1..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc - -package cpu - -// haveAsmFunctions reports whether the other functions in this file can -// be safely called. -func haveAsmFunctions() bool { return true } - -// The following feature detection functions are defined in cpu_s390x.s. -// They are likely to be expensive to call so the results should be cached. -func stfle() facilityList -func kmQuery() queryResult -func kmcQuery() queryResult -func kmctrQuery() queryResult -func kmaQuery() queryResult -func kimdQuery() queryResult -func klmdQuery() queryResult diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go deleted file mode 100644 index 910728fb1..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (386 || amd64 || amd64p32) && gc - -package cpu - -// cpuid is implemented in cpu_x86.s for gc compiler -// and in cpu_gccgo.c for gccgo. -func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) - -// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler -// and in cpu_gccgo.c for gccgo. -func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go deleted file mode 100644 index 7f1946780..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gccgo - -package cpu - -func getisar0() uint64 { return 0 } -func getisar1() uint64 { return 0 } -func getpfr0() uint64 { return 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go deleted file mode 100644 index 9526d2ce3..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gccgo - -package cpu - -// haveAsmFunctions reports whether the other functions in this file can -// be safely called. -func haveAsmFunctions() bool { return false } - -// TODO(mundaym): the following feature detection functions are currently -// stubs. See https://golang.org/cl/162887 for how to fix this. -// They are likely to be expensive to call so the results should be cached. -func stfle() facilityList { panic("not implemented for gccgo") } -func kmQuery() queryResult { panic("not implemented for gccgo") } -func kmcQuery() queryResult { panic("not implemented for gccgo") } -func kmctrQuery() queryResult { panic("not implemented for gccgo") } -func kmaQuery() queryResult { panic("not implemented for gccgo") } -func kimdQuery() queryResult { panic("not implemented for gccgo") } -func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c deleted file mode 100644 index 3f73a05dc..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (386 || amd64 || amd64p32) && gccgo - -#include -#include -#include - -// Need to wrap __get_cpuid_count because it's declared as static. -int -gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, - uint32_t *eax, uint32_t *ebx, - uint32_t *ecx, uint32_t *edx) -{ - return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); -} - -#pragma GCC diagnostic ignored "-Wunknown-pragmas" -#pragma GCC push_options -#pragma GCC target("xsave") -#pragma clang attribute push (__attribute__((target("xsave"))), apply_to=function) - -// xgetbv reads the contents of an XCR (Extended Control Register) -// specified in the ECX register into registers EDX:EAX. -// Currently, the only supported value for XCR is 0. -void -gccgoXgetbv(uint32_t *eax, uint32_t *edx) -{ - uint64_t v = _xgetbv(0); - *eax = v & 0xffffffff; - *edx = v >> 32; -} - -#pragma clang attribute pop -#pragma GCC pop_options diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go deleted file mode 100644 index 99c60fe9f..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (386 || amd64 || amd64p32) && gccgo - -package cpu - -//extern gccgoGetCpuidCount -func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) - -func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { - var a, b, c, d uint32 - gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) - return a, b, c, d -} - -//extern gccgoXgetbv -func gccgoXgetbv(eax, edx *uint32) - -func xgetbv() (eax, edx uint32) { - var a, d uint32 - gccgoXgetbv(&a, &d) - return a, d -} - -// gccgo doesn't build on Darwin, per: -// https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/gcc.rb#L76 -func darwinSupportsAVX512() bool { - return false -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go deleted file mode 100644 index 743eb5435..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !386 && !amd64 && !amd64p32 && !arm64 - -package cpu - -func archInit() { - if err := readHWCAP(); err != nil { - return - } - doinit() - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go deleted file mode 100644 index 2057006dc..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -func doinit() { - ARM.HasSWP = isSet(hwCap, hwcap_SWP) - ARM.HasHALF = isSet(hwCap, hwcap_HALF) - ARM.HasTHUMB = isSet(hwCap, hwcap_THUMB) - ARM.Has26BIT = isSet(hwCap, hwcap_26BIT) - ARM.HasFASTMUL = isSet(hwCap, hwcap_FAST_MULT) - ARM.HasFPA = isSet(hwCap, hwcap_FPA) - ARM.HasVFP = isSet(hwCap, hwcap_VFP) - ARM.HasEDSP = isSet(hwCap, hwcap_EDSP) - ARM.HasJAVA = isSet(hwCap, hwcap_JAVA) - ARM.HasIWMMXT = isSet(hwCap, hwcap_IWMMXT) - ARM.HasCRUNCH = isSet(hwCap, hwcap_CRUNCH) - ARM.HasTHUMBEE = isSet(hwCap, hwcap_THUMBEE) - ARM.HasNEON = isSet(hwCap, hwcap_NEON) - ARM.HasVFPv3 = isSet(hwCap, hwcap_VFPv3) - ARM.HasVFPv3D16 = isSet(hwCap, hwcap_VFPv3D16) - ARM.HasTLS = isSet(hwCap, hwcap_TLS) - ARM.HasVFPv4 = isSet(hwCap, hwcap_VFPv4) - ARM.HasIDIVA = isSet(hwCap, hwcap_IDIVA) - ARM.HasIDIVT = isSet(hwCap, hwcap_IDIVT) - ARM.HasVFPD32 = isSet(hwCap, hwcap_VFPD32) - ARM.HasLPAE = isSet(hwCap, hwcap_LPAE) - ARM.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) - ARM.HasAES = isSet(hwCap2, hwcap2_AES) - ARM.HasPMULL = isSet(hwCap2, hwcap2_PMULL) - ARM.HasSHA1 = isSet(hwCap2, hwcap2_SHA1) - ARM.HasSHA2 = isSet(hwCap2, hwcap2_SHA2) - ARM.HasCRC32 = isSet(hwCap2, hwcap2_CRC32) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go deleted file mode 100644 index 3d386d0fc..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "strings" - "syscall" -) - -// HWCAP/HWCAP2 bits. These are exposed by Linux. -const ( - hwcap_FP = 1 << 0 - hwcap_ASIMD = 1 << 1 - hwcap_EVTSTRM = 1 << 2 - hwcap_AES = 1 << 3 - hwcap_PMULL = 1 << 4 - hwcap_SHA1 = 1 << 5 - hwcap_SHA2 = 1 << 6 - hwcap_CRC32 = 1 << 7 - hwcap_ATOMICS = 1 << 8 - hwcap_FPHP = 1 << 9 - hwcap_ASIMDHP = 1 << 10 - hwcap_CPUID = 1 << 11 - hwcap_ASIMDRDM = 1 << 12 - hwcap_JSCVT = 1 << 13 - hwcap_FCMA = 1 << 14 - hwcap_LRCPC = 1 << 15 - hwcap_DCPOP = 1 << 16 - hwcap_SHA3 = 1 << 17 - hwcap_SM3 = 1 << 18 - hwcap_SM4 = 1 << 19 - hwcap_ASIMDDP = 1 << 20 - hwcap_SHA512 = 1 << 21 - hwcap_SVE = 1 << 22 - hwcap_ASIMDFHM = 1 << 23 - - hwcap2_SVE2 = 1 << 1 -) - -// linuxKernelCanEmulateCPUID reports whether we're running -// on Linux 4.11+. Ideally we'd like to ask the question about -// whether the current kernel contains -// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=77c97b4ee21290f5f083173d957843b615abbff2 -// but the version number will have to do. -func linuxKernelCanEmulateCPUID() bool { - var un syscall.Utsname - syscall.Uname(&un) - var sb strings.Builder - for _, b := range un.Release[:] { - if b == 0 { - break - } - sb.WriteByte(byte(b)) - } - major, minor, _, ok := parseRelease(sb.String()) - return ok && (major > 4 || major == 4 && minor >= 11) -} - -func doinit() { - if err := readHWCAP(); err != nil { - // We failed to read /proc/self/auxv. This can happen if the binary has - // been given extra capabilities(7) with /bin/setcap. - // - // When this happens, we have two options. If the Linux kernel is new - // enough (4.11+), we can read the arm64 registers directly which'll - // trap into the kernel and then return back to userspace. - // - // But on older kernels, such as Linux 4.4.180 as used on many Synology - // devices, calling readARM64Registers (specifically getisar0) will - // cause a SIGILL and we'll die. So for older kernels, parse /proc/cpuinfo - // instead. - // - // See golang/go#57336. - if linuxKernelCanEmulateCPUID() { - readARM64Registers() - } else { - readLinuxProcCPUInfo() - } - return - } - - // HWCAP feature bits - ARM64.HasFP = isSet(hwCap, hwcap_FP) - ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) - ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) - ARM64.HasAES = isSet(hwCap, hwcap_AES) - ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) - ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) - ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) - ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) - ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) - ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) - ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) - ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) - ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) - ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) - ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) - ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) - ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) - ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) - ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) - ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) - ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) - ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) - ARM64.HasSVE = isSet(hwCap, hwcap_SVE) - ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) - - // HWCAP2 feature bits - ARM64.HasSVE2 = isSet(hwCap2, hwcap2_SVE2) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go deleted file mode 100644 index 4686c1d54..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux && (mips64 || mips64le) - -package cpu - -// HWCAP bits. These are exposed by the Linux kernel 5.4. -const ( - // CPU features - hwcap_MIPS_MSA = 1 << 1 -) - -func doinit() { - // HWCAP feature bits - MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go deleted file mode 100644 index cd63e7335..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x - -package cpu - -func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go deleted file mode 100644 index 197188e67..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux && (ppc64 || ppc64le) - -package cpu - -// HWCAP/HWCAP2 bits. These are exposed by the kernel. -const ( - // ISA Level - _PPC_FEATURE2_ARCH_2_07 = 0x80000000 - _PPC_FEATURE2_ARCH_3_00 = 0x00800000 - - // CPU features - _PPC_FEATURE2_DARN = 0x00200000 - _PPC_FEATURE2_SCV = 0x00100000 -) - -func doinit() { - // HWCAP2 feature bits - PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) - PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) - PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) - PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) -} - -func isSet(hwc uint, value uint) bool { - return hwc&value != 0 -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go deleted file mode 100644 index 1517ac61d..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const ( - // bit mask values from /usr/include/bits/hwcap.h - hwcap_ZARCH = 2 - hwcap_STFLE = 4 - hwcap_MSA = 8 - hwcap_LDISP = 16 - hwcap_EIMM = 32 - hwcap_DFP = 64 - hwcap_ETF3EH = 256 - hwcap_VX = 2048 - hwcap_VXE = 8192 -) - -func initS390Xbase() { - // test HWCAP bit vector - has := func(featureMask uint) bool { - return hwCap&featureMask == featureMask - } - - // mandatory - S390X.HasZARCH = has(hwcap_ZARCH) - - // optional - S390X.HasSTFLE = has(hwcap_STFLE) - S390X.HasLDISP = has(hwcap_LDISP) - S390X.HasEIMM = has(hwcap_EIMM) - S390X.HasETF3EH = has(hwcap_ETF3EH) - S390X.HasDFP = has(hwcap_DFP) - S390X.HasMSA = has(hwcap_MSA) - S390X.HasVX = has(hwcap_VX) - if S390X.HasVX { - S390X.HasVXE = has(hwcap_VXE) - } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go deleted file mode 100644 index 558635850..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_loong64.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build loong64 - -package cpu - -const cacheLineSize = 64 - -func initOptions() { -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go deleted file mode 100644 index fedb00cc4..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build mips64 || mips64le - -package cpu - -const cacheLineSize = 32 - -func initOptions() { - options = []option{ - {Name: "msa", Feature: &MIPS64X.HasMSA}, - } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go deleted file mode 100644 index ffb4ec7eb..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build mips || mipsle - -package cpu - -const cacheLineSize = 32 - -func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go deleted file mode 100644 index ebfb3fc8e..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "syscall" - "unsafe" -) - -// Minimal copy of functionality from x/sys/unix so the cpu package can call -// sysctl without depending on x/sys/unix. - -const ( - _CTL_QUERY = -2 - - _SYSCTL_VERS_1 = 0x1000000 -) - -var _zero uintptr - -func sysctl(mib []int32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6( - syscall.SYS___SYSCTL, - uintptr(_p0), - uintptr(len(mib)), - uintptr(unsafe.Pointer(old)), - uintptr(unsafe.Pointer(oldlen)), - uintptr(unsafe.Pointer(new)), - uintptr(newlen)) - if errno != 0 { - return errno - } - return nil -} - -type sysctlNode struct { - Flags uint32 - Num int32 - Name [32]int8 - Ver uint32 - __rsvd uint32 - Un [16]byte - _sysctl_size [8]byte - _sysctl_func [8]byte - _sysctl_parent [8]byte - _sysctl_desc [8]byte -} - -func sysctlNodes(mib []int32) ([]sysctlNode, error) { - var olen uintptr - - // Get a list of all sysctl nodes below the given MIB by performing - // a sysctl for the given MIB with CTL_QUERY appended. - mib = append(mib, _CTL_QUERY) - qnode := sysctlNode{Flags: _SYSCTL_VERS_1} - qp := (*byte)(unsafe.Pointer(&qnode)) - sz := unsafe.Sizeof(qnode) - if err := sysctl(mib, nil, &olen, qp, sz); err != nil { - return nil, err - } - - // Now that we know the size, get the actual nodes. - nodes := make([]sysctlNode, olen/sz) - np := (*byte)(unsafe.Pointer(&nodes[0])) - if err := sysctl(mib, np, &olen, qp, sz); err != nil { - return nil, err - } - - return nodes, nil -} - -func nametomib(name string) ([]int32, error) { - // Split name into components. - var parts []string - last := 0 - for i := 0; i < len(name); i++ { - if name[i] == '.' { - parts = append(parts, name[last:i]) - last = i + 1 - } - } - parts = append(parts, name[last:]) - - mib := []int32{} - // Discover the nodes and construct the MIB OID. - for partno, part := range parts { - nodes, err := sysctlNodes(mib) - if err != nil { - return nil, err - } - for _, node := range nodes { - n := make([]byte, 0) - for i := range node.Name { - if node.Name[i] != 0 { - n = append(n, byte(node.Name[i])) - } - } - if string(n) == part { - mib = append(mib, int32(node.Num)) - break - } - } - if len(mib) != partno+1 { - return nil, err - } - } - - return mib, nil -} - -// aarch64SysctlCPUID is struct aarch64_sysctl_cpu_id from NetBSD's -type aarch64SysctlCPUID struct { - midr uint64 /* Main ID Register */ - revidr uint64 /* Revision ID Register */ - mpidr uint64 /* Multiprocessor Affinity Register */ - aa64dfr0 uint64 /* A64 Debug Feature Register 0 */ - aa64dfr1 uint64 /* A64 Debug Feature Register 1 */ - aa64isar0 uint64 /* A64 Instruction Set Attribute Register 0 */ - aa64isar1 uint64 /* A64 Instruction Set Attribute Register 1 */ - aa64mmfr0 uint64 /* A64 Memory Model Feature Register 0 */ - aa64mmfr1 uint64 /* A64 Memory Model Feature Register 1 */ - aa64mmfr2 uint64 /* A64 Memory Model Feature Register 2 */ - aa64pfr0 uint64 /* A64 Processor Feature Register 0 */ - aa64pfr1 uint64 /* A64 Processor Feature Register 1 */ - aa64zfr0 uint64 /* A64 SVE Feature ID Register 0 */ - mvfr0 uint32 /* Media and VFP Feature Register 0 */ - mvfr1 uint32 /* Media and VFP Feature Register 1 */ - mvfr2 uint32 /* Media and VFP Feature Register 2 */ - pad uint32 - clidr uint64 /* Cache Level ID Register */ - ctr uint64 /* Cache Type Register */ -} - -func sysctlCPUID(name string) (*aarch64SysctlCPUID, error) { - mib, err := nametomib(name) - if err != nil { - return nil, err - } - - out := aarch64SysctlCPUID{} - n := unsafe.Sizeof(out) - _, _, errno := syscall.Syscall6( - syscall.SYS___SYSCTL, - uintptr(unsafe.Pointer(&mib[0])), - uintptr(len(mib)), - uintptr(unsafe.Pointer(&out)), - uintptr(unsafe.Pointer(&n)), - uintptr(0), - uintptr(0)) - if errno != 0 { - return nil, errno - } - return &out, nil -} - -func doinit() { - cpuid, err := sysctlCPUID("machdep.cpu0.cpu_id") - if err != nil { - setMinimalFeatures() - return - } - parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0) - - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go deleted file mode 100644 index 85b64d5cc..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import ( - "syscall" - "unsafe" -) - -// Minimal copy of functionality from x/sys/unix so the cpu package can call -// sysctl without depending on x/sys/unix. - -const ( - // From OpenBSD's sys/sysctl.h. - _CTL_MACHDEP = 7 - - // From OpenBSD's machine/cpu.h. - _CPU_ID_AA64ISAR0 = 2 - _CPU_ID_AA64ISAR1 = 3 -) - -// Implemented in the runtime package (runtime/sys_openbsd3.go) -func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err syscall.Errno) - -//go:linkname syscall_syscall6 syscall.syscall6 - -func sysctl(mib []uint32, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - _, _, errno := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if errno != 0 { - return errno - } - return nil -} - -var libc_sysctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_sysctl sysctl "libc.so" - -func sysctlUint64(mib []uint32) (uint64, bool) { - var out uint64 - nout := unsafe.Sizeof(out) - if err := sysctl(mib, (*byte)(unsafe.Pointer(&out)), &nout, nil, 0); err != nil { - return 0, false - } - return out, true -} - -func doinit() { - setMinimalFeatures() - - // Get ID_AA64ISAR0 and ID_AA64ISAR1 from sysctl. - isar0, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR0}) - if !ok { - return - } - isar1, ok := sysctlUint64([]uint32{_CTL_MACHDEP, _CPU_ID_AA64ISAR1}) - if !ok { - return - } - parseARM64SystemRegisters(isar0, isar1, 0) - - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s deleted file mode 100644 index 054ba05d6..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_openbsd_arm64.s +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 - JMP libc_sysctl(SB) - -GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 -DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go deleted file mode 100644 index e9ecf2a45..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !linux && arm - -package cpu - -func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go deleted file mode 100644 index 5341e7f88..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !linux && !netbsd && !openbsd && arm64 - -package cpu - -func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go deleted file mode 100644 index 5f8f2419a..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !linux && (mips64 || mips64le) - -package cpu - -func archInit() { - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go deleted file mode 100644 index 89608fba2..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !aix && !linux && (ppc64 || ppc64le) - -package cpu - -func archInit() { - PPC64.IsPOWER8 = true - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go deleted file mode 100644 index 5ab87808f..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !linux && riscv64 - -package cpu - -func archInit() { - Initialized = true -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go deleted file mode 100644 index c14f12b14..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build ppc64 || ppc64le - -package cpu - -const cacheLineSize = 128 - -func initOptions() { - options = []option{ - {Name: "darn", Feature: &PPC64.HasDARN}, - {Name: "scv", Feature: &PPC64.HasSCV}, - } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go deleted file mode 100644 index 7f0c79c00..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build riscv64 - -package cpu - -const cacheLineSize = 64 - -func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_s390x.go deleted file mode 100644 index 5881b8833..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_s390x.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -const cacheLineSize = 256 - -func initOptions() { - options = []option{ - {Name: "zarch", Feature: &S390X.HasZARCH, Required: true}, - {Name: "stfle", Feature: &S390X.HasSTFLE, Required: true}, - {Name: "ldisp", Feature: &S390X.HasLDISP, Required: true}, - {Name: "eimm", Feature: &S390X.HasEIMM, Required: true}, - {Name: "dfp", Feature: &S390X.HasDFP}, - {Name: "etf3eh", Feature: &S390X.HasETF3EH}, - {Name: "msa", Feature: &S390X.HasMSA}, - {Name: "aes", Feature: &S390X.HasAES}, - {Name: "aescbc", Feature: &S390X.HasAESCBC}, - {Name: "aesctr", Feature: &S390X.HasAESCTR}, - {Name: "aesgcm", Feature: &S390X.HasAESGCM}, - {Name: "ghash", Feature: &S390X.HasGHASH}, - {Name: "sha1", Feature: &S390X.HasSHA1}, - {Name: "sha256", Feature: &S390X.HasSHA256}, - {Name: "sha3", Feature: &S390X.HasSHA3}, - {Name: "sha512", Feature: &S390X.HasSHA512}, - {Name: "vx", Feature: &S390X.HasVX}, - {Name: "vxe", Feature: &S390X.HasVXE}, - } -} - -// bitIsSet reports whether the bit at index is set. The bit index -// is in big endian order, so bit index 0 is the leftmost bit. -func bitIsSet(bits []uint64, index uint) bool { - return bits[index/64]&((1<<63)>>(index%64)) != 0 -} - -// facility is a bit index for the named facility. -type facility uint8 - -const ( - // mandatory facilities - zarch facility = 1 // z architecture mode is active - stflef facility = 7 // store-facility-list-extended - ldisp facility = 18 // long-displacement - eimm facility = 21 // extended-immediate - - // miscellaneous facilities - dfp facility = 42 // decimal-floating-point - etf3eh facility = 30 // extended-translation 3 enhancement - - // cryptography facilities - msa facility = 17 // message-security-assist - msa3 facility = 76 // message-security-assist extension 3 - msa4 facility = 77 // message-security-assist extension 4 - msa5 facility = 57 // message-security-assist extension 5 - msa8 facility = 146 // message-security-assist extension 8 - msa9 facility = 155 // message-security-assist extension 9 - - // vector facilities - vx facility = 129 // vector facility - vxe facility = 135 // vector-enhancements 1 - vxe2 facility = 148 // vector-enhancements 2 -) - -// facilityList contains the result of an STFLE call. -// Bits are numbered in big endian order so the -// leftmost bit (the MSB) is at index 0. -type facilityList struct { - bits [4]uint64 -} - -// Has reports whether the given facilities are present. -func (s *facilityList) Has(fs ...facility) bool { - if len(fs) == 0 { - panic("no facility bits provided") - } - for _, f := range fs { - if !bitIsSet(s.bits[:], uint(f)) { - return false - } - } - return true -} - -// function is the code for the named cryptographic function. -type function uint8 - -const ( - // KM{,A,C,CTR} function codes - aes128 function = 18 // AES-128 - aes192 function = 19 // AES-192 - aes256 function = 20 // AES-256 - - // K{I,L}MD function codes - sha1 function = 1 // SHA-1 - sha256 function = 2 // SHA-256 - sha512 function = 3 // SHA-512 - sha3_224 function = 32 // SHA3-224 - sha3_256 function = 33 // SHA3-256 - sha3_384 function = 34 // SHA3-384 - sha3_512 function = 35 // SHA3-512 - shake128 function = 36 // SHAKE-128 - shake256 function = 37 // SHAKE-256 - - // KLMD function codes - ghash function = 65 // GHASH -) - -// queryResult contains the result of a Query function -// call. Bits are numbered in big endian order so the -// leftmost bit (the MSB) is at index 0. -type queryResult struct { - bits [2]uint64 -} - -// Has reports whether the given functions are present. -func (q *queryResult) Has(fns ...function) bool { - if len(fns) == 0 { - panic("no function codes provided") - } - for _, f := range fns { - if !bitIsSet(q.bits[:], uint(f)) { - return false - } - } - return true -} - -func doinit() { - initS390Xbase() - - // We need implementations of stfle, km and so on - // to detect cryptographic features. - if !haveAsmFunctions() { - return - } - - // optional cryptographic functions - if S390X.HasMSA { - aes := []function{aes128, aes192, aes256} - - // cipher message - km, kmc := kmQuery(), kmcQuery() - S390X.HasAES = km.Has(aes...) - S390X.HasAESCBC = kmc.Has(aes...) - if S390X.HasSTFLE { - facilities := stfle() - if facilities.Has(msa4) { - kmctr := kmctrQuery() - S390X.HasAESCTR = kmctr.Has(aes...) - } - if facilities.Has(msa8) { - kma := kmaQuery() - S390X.HasAESGCM = kma.Has(aes...) - } - } - - // compute message digest - kimd := kimdQuery() // intermediate (no padding) - klmd := klmdQuery() // last (padding) - S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) - S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) - S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) - S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist - sha3 := []function{ - sha3_224, sha3_256, sha3_384, sha3_512, - shake128, shake256, - } - S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) - } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s deleted file mode 100644 index 1fb4b7013..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_s390x.s +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc - -#include "textflag.h" - -// func stfle() facilityList -TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 - MOVD $ret+0(FP), R1 - MOVD $3, R0 // last doubleword index to store - XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) - WORD $0xb2b01000 // store facility list extended (STFLE) - RET - -// func kmQuery() queryResult -TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KM-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92E0024 // cipher message (KM) - RET - -// func kmcQuery() queryResult -TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMC-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92F0024 // cipher message with chaining (KMC) - RET - -// func kmctrQuery() queryResult -TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMCTR-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB92D4024 // cipher message with counter (KMCTR) - RET - -// func kmaQuery() queryResult -TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KMA-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xb9296024 // cipher message with authentication (KMA) - RET - -// func kimdQuery() queryResult -TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KIMD-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB93E0024 // compute intermediate message digest (KIMD) - RET - -// func klmdQuery() queryResult -TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 - MOVD $0, R0 // set function code to 0 (KLMD-Query) - MOVD $ret+0(FP), R1 // address of 16-byte return value - WORD $0xB93F0024 // compute last message digest (KLMD) - RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go deleted file mode 100644 index 384787ea3..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_wasm.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build wasm - -package cpu - -// We're compiling the cpu package for an unknown (software-abstracted) CPU. -// Make CacheLinePad an empty struct and hope that the usual struct alignment -// rules are good enough. - -const cacheLineSize = 0 - -func initOptions() {} - -func archInit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go deleted file mode 100644 index c29f5e4c5..000000000 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build 386 || amd64 || amd64p32 - -package cpu - -import "runtime" - -const cacheLineSize = 64 - -func initOptions() { - options = []option{ - {Name: "adx", Feature: &X86.HasADX}, - {Name: "aes", Feature: &X86.HasAES}, - {Name: "avx", Feature: &X86.HasAVX}, - {Name: "avx2", Feature: &X86.HasAVX2}, - {Name: "avx512", Feature: &X86.HasAVX512}, - {Name: "avx512f", Feature: &X86.HasAVX512F}, - {Name: "avx512cd", Feature: &X86.HasAVX512CD}, - {Name: "avx512er", Feature: &X86.HasAVX512ER}, - {Name: "avx512pf", Feature: &X86.HasAVX512PF}, - {Name: "avx512vl", Feature: &X86.HasAVX512VL}, - {Name: "avx512bw", Feature: &X86.HasAVX512BW}, - {Name: "avx512dq", Feature: &X86.HasAVX512DQ}, - {Name: "avx512ifma", Feature: &X86.HasAVX512IFMA}, - {Name: "avx512vbmi", Feature: &X86.HasAVX512VBMI}, - {Name: "avx512vnniw", Feature: &X86.HasAVX5124VNNIW}, - {Name: "avx5124fmaps", Feature: &X86.HasAVX5124FMAPS}, - {Name: "avx512vpopcntdq", Feature: &X86.HasAVX512VPOPCNTDQ}, - {Name: "avx512vpclmulqdq", Feature: &X86.HasAVX512VPCLMULQDQ}, - {Name: "avx512vnni", Feature: &X86.HasAVX512VNNI}, - {Name: "avx512gfni", Feature: &X86.HasAVX512GFNI}, - {Name: "avx512vaes", Feature: &X86.HasAVX512VAES}, - {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, - {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, - {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, - {Name: "amxtile", Feature: &X86.HasAMXTile}, - {Name: "amxint8", Feature: &X86.HasAMXInt8}, - {Name: "amxbf16", Feature: &X86.HasAMXBF16}, - {Name: "bmi1", Feature: &X86.HasBMI1}, - {Name: "bmi2", Feature: &X86.HasBMI2}, - {Name: "cx16", Feature: &X86.HasCX16}, - {Name: "erms", Feature: &X86.HasERMS}, - {Name: "fma", Feature: &X86.HasFMA}, - {Name: "osxsave", Feature: &X86.HasOSXSAVE}, - {Name: "pclmulqdq", Feature: &X86.HasPCLMULQDQ}, - {Name: "popcnt", Feature: &X86.HasPOPCNT}, - {Name: "rdrand", Feature: &X86.HasRDRAND}, - {Name: "rdseed", Feature: &X86.HasRDSEED}, - {Name: "sse3", Feature: &X86.HasSSE3}, - {Name: "sse41", Feature: &X86.HasSSE41}, - {Name: "sse42", Feature: &X86.HasSSE42}, - {Name: "ssse3", Feature: &X86.HasSSSE3}, - - // These capabilities should always be enabled on amd64: - {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, - } -} - -func archInit() { - - Initialized = true - - maxID, _, _, _ := cpuid(0, 0) - - if maxID < 1 { - return - } - - _, _, ecx1, edx1 := cpuid(1, 0) - X86.HasSSE2 = isSet(26, edx1) - - X86.HasSSE3 = isSet(0, ecx1) - X86.HasPCLMULQDQ = isSet(1, ecx1) - X86.HasSSSE3 = isSet(9, ecx1) - X86.HasFMA = isSet(12, ecx1) - X86.HasCX16 = isSet(13, ecx1) - X86.HasSSE41 = isSet(19, ecx1) - X86.HasSSE42 = isSet(20, ecx1) - X86.HasPOPCNT = isSet(23, ecx1) - X86.HasAES = isSet(25, ecx1) - X86.HasOSXSAVE = isSet(27, ecx1) - X86.HasRDRAND = isSet(30, ecx1) - - var osSupportsAVX, osSupportsAVX512 bool - // For XGETBV, OSXSAVE bit is required and sufficient. - if X86.HasOSXSAVE { - eax, _ := xgetbv() - // Check if XMM and YMM registers have OS support. - osSupportsAVX = isSet(1, eax) && isSet(2, eax) - - if runtime.GOOS == "darwin" { - // Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers. - // Since users can't rely on mask register contents, let's not advertise AVX-512 support. - // See issue 49233. - osSupportsAVX512 = false - } else { - // Check if OPMASK and ZMM registers have OS support. - osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax) - } - } - - X86.HasAVX = isSet(28, ecx1) && osSupportsAVX - - if maxID < 7 { - return - } - - _, ebx7, ecx7, edx7 := cpuid(7, 0) - X86.HasBMI1 = isSet(3, ebx7) - X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX - X86.HasBMI2 = isSet(8, ebx7) - X86.HasERMS = isSet(9, ebx7) - X86.HasRDSEED = isSet(18, ebx7) - X86.HasADX = isSet(19, ebx7) - - X86.HasAVX512 = isSet(16, ebx7) && osSupportsAVX512 // Because avx-512 foundation is the core required extension - if X86.HasAVX512 { - X86.HasAVX512F = true - X86.HasAVX512CD = isSet(28, ebx7) - X86.HasAVX512ER = isSet(27, ebx7) - X86.HasAVX512PF = isSet(26, ebx7) - X86.HasAVX512VL = isSet(31, ebx7) - X86.HasAVX512BW = isSet(30, ebx7) - X86.HasAVX512DQ = isSet(17, ebx7) - X86.HasAVX512IFMA = isSet(21, ebx7) - X86.HasAVX512VBMI = isSet(1, ecx7) - X86.HasAVX5124VNNIW = isSet(2, edx7) - X86.HasAVX5124FMAPS = isSet(3, edx7) - X86.HasAVX512VPOPCNTDQ = isSet(14, ecx7) - X86.HasAVX512VPCLMULQDQ = isSet(10, ecx7) - X86.HasAVX512VNNI = isSet(11, ecx7) - X86.HasAVX512GFNI = isSet(8, ecx7) - X86.HasAVX512VAES = isSet(9, ecx7) - X86.HasAVX512VBMI2 = isSet(6, ecx7) - X86.HasAVX512BITALG = isSet(12, ecx7) - - eax71, _, _, _ := cpuid(7, 1) - X86.HasAVX512BF16 = isSet(5, eax71) - } - - X86.HasAMXTile = isSet(24, edx7) - X86.HasAMXInt8 = isSet(25, edx7) - X86.HasAMXBF16 = isSet(22, edx7) -} - -func isSet(bitpos uint, value uint32) bool { - return value&(1<> 63)) -) - -// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 -// These are initialized in cpu_$GOARCH.go -// and should not be changed after they are initialized. -var hwCap uint -var hwCap2 uint - -func readHWCAP() error { - // For Go 1.21+, get auxv from the Go runtime. - if a := getAuxv(); len(a) > 0 { - for len(a) >= 2 { - tag, val := a[0], uint(a[1]) - a = a[2:] - switch tag { - case _AT_HWCAP: - hwCap = val - case _AT_HWCAP2: - hwCap2 = val - } - } - return nil - } - - buf, err := os.ReadFile(procAuxv) - if err != nil { - // e.g. on android /proc/self/auxv is not accessible, so silently - // ignore the error and leave Initialized = false. On some - // architectures (e.g. arm64) doinit() implements a fallback - // readout and will set Initialized = true again. - return err - } - bo := hostByteOrder() - for len(buf) >= 2*(uintSize/8) { - var tag, val uint - switch uintSize { - case 32: - tag = uint(bo.Uint32(buf[0:])) - val = uint(bo.Uint32(buf[4:])) - buf = buf[8:] - case 64: - tag = uint(bo.Uint64(buf[0:])) - val = uint(bo.Uint64(buf[8:])) - buf = buf[16:] - } - switch tag { - case _AT_HWCAP: - hwCap = val - case _AT_HWCAP2: - hwCap2 = val - } - } - return nil -} diff --git a/vendor/golang.org/x/sys/cpu/parse.go b/vendor/golang.org/x/sys/cpu/parse.go deleted file mode 100644 index 762b63d68..000000000 --- a/vendor/golang.org/x/sys/cpu/parse.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -import "strconv" - -// parseRelease parses a dot-separated version number. It follows the semver -// syntax, but allows the minor and patch versions to be elided. -// -// This is a copy of the Go runtime's parseRelease from -// https://golang.org/cl/209597. -func parseRelease(rel string) (major, minor, patch int, ok bool) { - // Strip anything after a dash or plus. - for i := 0; i < len(rel); i++ { - if rel[i] == '-' || rel[i] == '+' { - rel = rel[:i] - break - } - } - - next := func() (int, bool) { - for i := 0; i < len(rel); i++ { - if rel[i] == '.' { - ver, err := strconv.Atoi(rel[:i]) - rel = rel[i+1:] - return ver, err == nil - } - } - ver, err := strconv.Atoi(rel) - rel = "" - return ver, err == nil - } - if major, ok = next(); !ok || rel == "" { - return - } - if minor, ok = next(); !ok || rel == "" { - return - } - patch, ok = next() - return -} diff --git a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go deleted file mode 100644 index 4cd64c704..000000000 --- a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux && arm64 - -package cpu - -import ( - "errors" - "io" - "os" - "strings" -) - -func readLinuxProcCPUInfo() error { - f, err := os.Open("/proc/cpuinfo") - if err != nil { - return err - } - defer f.Close() - - var buf [1 << 10]byte // enough for first CPU - n, err := io.ReadFull(f, buf[:]) - if err != nil && err != io.ErrUnexpectedEOF { - return err - } - in := string(buf[:n]) - const features = "\nFeatures : " - i := strings.Index(in, features) - if i == -1 { - return errors.New("no CPU features found") - } - in = in[i+len(features):] - if i := strings.Index(in, "\n"); i != -1 { - in = in[:i] - } - m := map[string]*bool{} - - initOptions() // need it early here; it's harmless to call twice - for _, o := range options { - m[o.Name] = o.Feature - } - // The EVTSTRM field has alias "evstrm" in Go, but Linux calls it "evtstrm". - m["evtstrm"] = &ARM64.HasEVTSTRM - - for _, f := range strings.Fields(in) { - if p, ok := m[f]; ok { - *p = true - } - } - return nil -} diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv.go b/vendor/golang.org/x/sys/cpu/runtime_auxv.go deleted file mode 100644 index 5f92ac9a2..000000000 --- a/vendor/golang.org/x/sys/cpu/runtime_auxv.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cpu - -// getAuxvFn is non-nil on Go 1.21+ (via runtime_auxv_go121.go init) -// on platforms that use auxv. -var getAuxvFn func() []uintptr - -func getAuxv() []uintptr { - if getAuxvFn == nil { - return nil - } - return getAuxvFn() -} diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go deleted file mode 100644 index 4c9788ea8..000000000 --- a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 - -package cpu - -import ( - _ "unsafe" // for linkname -) - -//go:linkname runtime_getAuxv runtime.getAuxv -func runtime_getAuxv() []uintptr - -func init() { - getAuxvFn = runtime_getAuxv -} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go deleted file mode 100644 index 1b9ccb091..000000000 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Recreate a getsystemcfg syscall handler instead of -// using the one provided by x/sys/unix to avoid having -// the dependency between them. (See golang.org/issue/32102) -// Moreover, this file will be used during the building of -// gccgo's libgo and thus must not used a CGo method. - -//go:build aix && gccgo - -package cpu - -import ( - "syscall" -) - -//extern getsystemcfg -func gccgoGetsystemcfg(label uint32) (r uint64) - -func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) { - r1 = uintptr(gccgoGetsystemcfg(uint32(label))) - e1 = syscall.GetErrno() - return -} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go deleted file mode 100644 index e8b6cdbe9..000000000 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Minimal copy of x/sys/unix so the cpu package can make a -// system call on AIX without depending on x/sys/unix. -// (See golang.org/issue/32102) - -//go:build aix && ppc64 && gc - -package cpu - -import ( - "syscall" - "unsafe" -) - -//go:cgo_import_dynamic libc_getsystemcfg getsystemcfg "libc.a/shr_64.o" - -//go:linkname libc_getsystemcfg libc_getsystemcfg - -type syscallFunc uintptr - -var libc_getsystemcfg syscallFunc - -type errno = syscall.Errno - -// Implemented in runtime/syscall_aix.go. -func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) -func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err errno) - -func callgetsystemcfg(label int) (r1 uintptr, e1 errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_getsystemcfg)), 1, uintptr(label), 0, 0, 0, 0, 0) - return -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 66eae1977..05eb64e85 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -55,7 +55,7 @@ github.com/alecthomas/template/parse # github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 ## explicit; go 1.15 github.com/alecthomas/units -# github.com/aws/aws-sdk-go v1.55.1 +# github.com/aws/aws-sdk-go v1.55.5 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn @@ -151,9 +151,6 @@ github.com/docker/go-units github.com/felixge/httpsnoop # github.com/frankban/quicktest v1.14.6 ## explicit; go 1.13 -# github.com/go-ini/ini v1.67.0 -## explicit -github.com/go-ini/ini # github.com/go-logr/logr v1.4.1 ## explicit; go 1.18 github.com/go-logr/logr @@ -191,17 +188,6 @@ github.com/klauspost/compress/zstd/internal/xxhash # github.com/klauspost/pgzip v1.2.6 ## explicit github.com/klauspost/pgzip -# github.com/minio/minio-go v6.0.14+incompatible -## explicit -github.com/minio/minio-go -github.com/minio/minio-go/pkg/credentials -github.com/minio/minio-go/pkg/encrypt -github.com/minio/minio-go/pkg/s3signer -github.com/minio/minio-go/pkg/s3utils -github.com/minio/minio-go/pkg/set -# github.com/mitchellh/go-homedir v1.1.0 -## explicit -github.com/mitchellh/go-homedir # github.com/moby/docker-image-spec v1.3.1 ## explicit; go 1.18 github.com/moby/docker-image-spec/specs-go/v1 @@ -338,8 +324,6 @@ go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded # golang.org/x/crypto v0.25.0 ## explicit; go 1.20 -golang.org/x/crypto/argon2 -golang.org/x/crypto/blake2b golang.org/x/crypto/ocsp golang.org/x/crypto/pbkdf2 golang.org/x/crypto/scrypt @@ -357,14 +341,12 @@ golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna -golang.org/x/net/publicsuffix # golang.org/x/sync v0.7.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/singleflight # golang.org/x/sys v0.22.0 ## explicit; go 1.18 -golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows From 7dd239aa33afe9f47d38df0eee4a861d426aae06 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Mon, 19 Aug 2024 15:54:57 +0200 Subject: [PATCH 192/203] [PBM-1239] keep storage.s3.provider for compartibility --- pbm/storage/s3/s3.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pbm/storage/s3/s3.go b/pbm/storage/s3/s3.go index f3c2aab6e..d188692bb 100644 --- a/pbm/storage/s3/s3.go +++ b/pbm/storage/s3/s3.go @@ -41,6 +41,7 @@ const ( //nolint:lll type Config struct { + Provider string `bson:"provider,omitempty" json:"provider,omitempty" yaml:"provider,omitempty"` Region string `bson:"region" json:"region" yaml:"region"` EndpointURL string `bson:"endpointUrl,omitempty" json:"endpointUrl" yaml:"endpointUrl,omitempty"` ForcePathStyle *bool `bson:"forcePathStyle,omitempty" json:"forcePathStyle,omitempty" yaml:"forcePathStyle,omitempty"` From 2ba2ac8a6d0c6574b3da30bb215e07136f1a8640 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Tue, 20 Aug 2024 18:17:36 +0200 Subject: [PATCH 193/203] format yaml file --- e2e-tests/docker/conf/fs-disttxn-4x.yaml | 7 +++---- e2e-tests/docker/conf/fs-disttxn-50.yaml | 7 +++---- e2e-tests/docker/conf/fs.yaml | 7 +++---- e2e-tests/docker/conf/minio.yaml | 18 +++++++++--------- 4 files changed, 18 insertions(+), 21 deletions(-) diff --git a/e2e-tests/docker/conf/fs-disttxn-4x.yaml b/e2e-tests/docker/conf/fs-disttxn-4x.yaml index cd33c7ec5..1a03e1063 100644 --- a/e2e-tests/docker/conf/fs-disttxn-4x.yaml +++ b/e2e-tests/docker/conf/fs-disttxn-4x.yaml @@ -1,5 +1,4 @@ storage: - type: filesystem - filesystem: - path: /opt/backups/pbm_disttxn_4x - + type: filesystem + filesystem: + path: /opt/backups/pbm_disttxn_4x diff --git a/e2e-tests/docker/conf/fs-disttxn-50.yaml b/e2e-tests/docker/conf/fs-disttxn-50.yaml index 7932ad142..b0b73ca60 100644 --- a/e2e-tests/docker/conf/fs-disttxn-50.yaml +++ b/e2e-tests/docker/conf/fs-disttxn-50.yaml @@ -1,5 +1,4 @@ storage: - type: filesystem - filesystem: - path: /opt/backups/pbm_disttxn_50 - + type: filesystem + filesystem: + path: /opt/backups/pbm_disttxn_50 diff --git a/e2e-tests/docker/conf/fs.yaml b/e2e-tests/docker/conf/fs.yaml index cbe6bbd91..6d1865d31 100644 --- a/e2e-tests/docker/conf/fs.yaml +++ b/e2e-tests/docker/conf/fs.yaml @@ -1,5 +1,4 @@ storage: - type: filesystem - filesystem: - path: /opt/backups/pbm - + type: filesystem + filesystem: + path: /opt/backups/pbm diff --git a/e2e-tests/docker/conf/minio.yaml b/e2e-tests/docker/conf/minio.yaml index d750e076c..28ad5f133 100644 --- a/e2e-tests/docker/conf/minio.yaml +++ b/e2e-tests/docker/conf/minio.yaml @@ -1,10 +1,10 @@ storage: - type: s3 - s3: - endpointUrl: http://minio:9000 - bucket: bcp - prefix: pbme2etest - credentials: - access-key-id: "minio1234" - secret-access-key: "minio1234" - \ No newline at end of file + type: s3 + s3: + endpointUrl: http://minio:9000 + bucket: bcp + prefix: pbme2etest + credentials: + access-key-id: "minio1234" + secret-access-key: "minio1234" + From 0ec61e10759c1b29a58ed5933245b40adcb40584 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Tue, 20 Aug 2024 18:18:52 +0200 Subject: [PATCH 194/203] update minio/server and minio/mc versions for e2e-tests --- e2e-tests/docker/docker-compose-remapping.yaml | 4 ++-- e2e-tests/docker/docker-compose-rs.yaml | 6 +++--- e2e-tests/docker/docker-compose-single.yaml | 4 ++-- e2e-tests/docker/docker-compose.yaml | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/e2e-tests/docker/docker-compose-remapping.yaml b/e2e-tests/docker/docker-compose-remapping.yaml index 886458e2d..ab6101633 100644 --- a/e2e-tests/docker/docker-compose-remapping.yaml +++ b/e2e-tests/docker/docker-compose-remapping.yaml @@ -110,7 +110,7 @@ services: - NET_ADMIN minio: - image: minio/minio:RELEASE.2022-08-08T18-34-09Z + image: minio/minio:RELEASE.2024-08-17T01-24-54Z hostname: minio # ports: # - "9000:9000" @@ -121,7 +121,7 @@ services: - "MINIO_SECRET_KEY=minio1234" command: server /backups createbucket: - image: minio/mc + image: minio/mc:RELEASE.2024-08-17T11-33-50Z depends_on: - minio entrypoint: > diff --git a/e2e-tests/docker/docker-compose-rs.yaml b/e2e-tests/docker/docker-compose-rs.yaml index 2701aee29..5f9120fa9 100644 --- a/e2e-tests/docker/docker-compose-rs.yaml +++ b/e2e-tests/docker/docker-compose-rs.yaml @@ -157,10 +157,10 @@ services: - data-rs103:/data/db minio: - image: minio/minio:RELEASE.2022-08-08T18-34-09Z + image: minio/minio:RELEASE.2024-08-17T01-24-54Z hostname: minio ports: - - "9001:9000" + - "9000:9000" volumes: - backups:/backups environment: @@ -168,7 +168,7 @@ services: - "MINIO_SECRET_KEY=minio1234" command: server /backups createbucket: - image: minio/mc + image: minio/mc:RELEASE.2024-08-17T11-33-50Z depends_on: - minio entrypoint: > diff --git a/e2e-tests/docker/docker-compose-single.yaml b/e2e-tests/docker/docker-compose-single.yaml index 62f25d5c2..b6530e817 100644 --- a/e2e-tests/docker/docker-compose-single.yaml +++ b/e2e-tests/docker/docker-compose-single.yaml @@ -66,7 +66,7 @@ services: - NET_ADMIN minio: - image: minio/minio:RELEASE.2022-08-08T18-34-09Z + image: minio/minio:RELEASE.2024-08-17T01-24-54Z hostname: minio # ports: # - "9000:9000" @@ -77,7 +77,7 @@ services: - "MINIO_SECRET_KEY=minio1234" command: server /backups createbucket: - image: minio/mc + image: minio/mc:RELEASE.2024-08-17T11-33-50Z depends_on: - minio entrypoint: > diff --git a/e2e-tests/docker/docker-compose.yaml b/e2e-tests/docker/docker-compose.yaml index 90a728455..474f45bca 100644 --- a/e2e-tests/docker/docker-compose.yaml +++ b/e2e-tests/docker/docker-compose.yaml @@ -406,7 +406,7 @@ services: - rs203 minio: - image: minio/minio:RELEASE.2022-08-08T18-34-09Z + image: minio/minio:RELEASE.2024-08-17T01-24-54Z hostname: minio # ports: # - "9000:9000" @@ -417,7 +417,7 @@ services: - "MINIO_SECRET_KEY=minio1234" command: server /backups createbucket: - image: minio/mc + image: minio/mc:RELEASE.2024-08-17T11-33-50Z depends_on: - minio entrypoint: > From 51620cf82944f4e5897879dd3a708636c61951e7 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Tue, 20 Aug 2024 18:19:11 +0200 Subject: [PATCH 195/203] fix case inconsistency --- e2e-tests/docker/pbm.dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e-tests/docker/pbm.dockerfile b/e2e-tests/docker/pbm.dockerfile index 4583659ee..31364539e 100644 --- a/e2e-tests/docker/pbm.dockerfile +++ b/e2e-tests/docker/pbm.dockerfile @@ -1,9 +1,9 @@ ARG MONGODB_VERSION=5.0 ARG MONGODB_IMAGE=percona/percona-server-mongodb -FROM ${MONGODB_IMAGE}:${MONGODB_VERSION}-multi as mongo_image +FROM ${MONGODB_IMAGE}:${MONGODB_VERSION}-multi AS mongo_image -FROM oraclelinux:8 as base-build +FROM oraclelinux:8 AS base-build WORKDIR /build RUN mkdir -p /data/db From 28c39ff2df8425df23dcbd82b48973de2fe4d485 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Tue, 20 Aug 2024 18:19:39 +0200 Subject: [PATCH 196/203] [PBM-1239] use ListObjectsV2 api --- pbm/storage/s3/s3.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pbm/storage/s3/s3.go b/pbm/storage/s3/s3.go index d188692bb..4ce28b5e6 100644 --- a/pbm/storage/s3/s3.go +++ b/pbm/storage/s3/s3.go @@ -400,7 +400,7 @@ func (s *S3) List(prefix, suffix string) ([]storage.FileInfo, error) { prfx += "/" } - lparams := &s3.ListObjectsInput{ + lparams := &s3.ListObjectsV2Input{ Bucket: aws.String(s.opts.Bucket), } @@ -409,8 +409,8 @@ func (s *S3) List(prefix, suffix string) ([]storage.FileInfo, error) { } var files []storage.FileInfo - err := s.s3s.ListObjectsPages(lparams, - func(page *s3.ListObjectsOutput, lastPage bool) bool { + err := s.s3s.ListObjectsV2Pages(lparams, + func(page *s3.ListObjectsV2Output, lastPage bool) bool { for _, o := range page.Contents { f := aws.StringValue(o.Key) f = strings.TrimPrefix(f, aws.StringValue(lparams.Prefix)) From 5434a28401175f946bb977e4895837ceabdd2c9e Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Tue, 20 Aug 2024 18:20:48 +0200 Subject: [PATCH 197/203] [PBM-1239] use storage.Storage interface in tests allows to perform checks for different storage types --- e2e-tests/docker/conf/minio.yaml | 1 + .../tests/sharded/test_backup_cancellation.go | 69 ++++++------------- .../pkg/tests/sharded/test_delete_backup.go | 54 +++------------ 3 files changed, 29 insertions(+), 95 deletions(-) diff --git a/e2e-tests/docker/conf/minio.yaml b/e2e-tests/docker/conf/minio.yaml index 28ad5f133..517fcee57 100644 --- a/e2e-tests/docker/conf/minio.yaml +++ b/e2e-tests/docker/conf/minio.yaml @@ -2,6 +2,7 @@ storage: type: s3 s3: endpointUrl: http://minio:9000 + region: us-east-1 bucket: bcp prefix: pbme2etest credentials: diff --git a/e2e-tests/pkg/tests/sharded/test_backup_cancellation.go b/e2e-tests/pkg/tests/sharded/test_backup_cancellation.go index 1adabecc1..03dfc9903 100644 --- a/e2e-tests/pkg/tests/sharded/test_backup_cancellation.go +++ b/e2e-tests/pkg/tests/sharded/test_backup_cancellation.go @@ -1,24 +1,18 @@ package sharded import ( + "bytes" "context" "log" - "net/url" "os" "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/session" - awsS3 "github.com/aws/aws-sdk-go/service/s3" - - "gopkg.in/yaml.v2" - "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/errors" - "github.com/percona/percona-backup-mongodb/pbm/storage/s3" + "github.com/percona/percona-backup-mongodb/pbm/storage" + "github.com/percona/percona-backup-mongodb/pbm/util" ) func (c *Cluster) BackupCancellation(storage string) { @@ -55,62 +49,39 @@ func (c *Cluster) BackupCancellation(storage string) { func checkNoBackupFiles(backupName, conf string) { log.Println("check no artifacts left for backup", backupName) - buf, err := os.ReadFile(conf) - if err != nil { - log.Fatalln("Error: unable to read config file:", err) - } - var cfg config.Config - err = yaml.UnmarshalStrict(buf, &cfg) + files, err := listAllFiles(conf) if err != nil { - log.Fatalln("Error: unmarshal yaml:", err) + log.Fatalln("ERROR: list files:", err) } - stg := cfg.Storage - - endopintURL := awsurl - if stg.S3.EndpointURL != "" { - eu, err := url.Parse(stg.S3.EndpointURL) - if err != nil { - log.Fatalln("Error: parse EndpointURL:", err) + for _, file := range files { + if strings.Contains(file.Name, backupName) { + log.Fatalln("ERROR: failed to delete lefover", file.Name) } - endopintURL = eu.Host } +} - ss, err := newS3Client(endopintURL, stg.S3.Region, &stg.S3.Credentials) +func listAllFiles(confFilepath string) ([]storage.FileInfo, error) { + buf, err := os.ReadFile(confFilepath) if err != nil { - log.Fatalf("create S3 client: %v", err) + return nil, errors.Wrap(err, "read config file") } - res, err := ss.ListObjectsV2(&awsS3.ListObjectsV2Input{ - Bucket: &stg.S3.Bucket, - Prefix: &stg.S3.Prefix, - }) + cfg, err := config.Parse(bytes.NewBuffer(buf)) if err != nil { - log.Fatalf("list files on S3: %v", err) + return nil, errors.Wrap(err, "parse config") } - for _, object := range res.Contents { - s := object.String() - if strings.Contains(s, backupName) { - log.Fatalln("Error: failed to delete lefover", object.Key) - } + stg, err := util.StorageFromConfig(&cfg.Storage, nil) + if err != nil { + return nil, errors.Wrap(err, "storage from config") } -} -func newS3Client(uri, region string, creds *s3.Credentials) (*awsS3.S3, error) { - sess, err := session.NewSession(&aws.Config{ - Region: ®ion, - Endpoint: &uri, - Credentials: credentials.NewStaticCredentials( - creds.AccessKeyID, - creds.SecretAccessKey, - creds.SessionToken, - ), - }) + files, err := stg.List("", "") if err != nil { - return nil, errors.Wrap(err, "create AWS session") + return nil, errors.Wrap(err, "list files") } - return awsS3.New(sess), nil + return files, nil } diff --git a/e2e-tests/pkg/tests/sharded/test_delete_backup.go b/e2e-tests/pkg/tests/sharded/test_delete_backup.go index 217f35096..78ff7994e 100644 --- a/e2e-tests/pkg/tests/sharded/test_delete_backup.go +++ b/e2e-tests/pkg/tests/sharded/test_delete_backup.go @@ -4,15 +4,10 @@ import ( "context" "fmt" "log" - "net/url" "os" "strings" "time" - awsS3 "github.com/aws/aws-sdk-go/service/s3" - "gopkg.in/yaml.v2" - - "github.com/percona/percona-backup-mongodb/pbm/config" "github.com/percona/percona-backup-mongodb/pbm/ctrl" "github.com/percona/percona-backup-mongodb/pbm/defs" "github.com/percona/percona-backup-mongodb/pbm/lock" @@ -165,66 +160,33 @@ func (c *Cluster) BackupDelete(storage string) { checkData() } -const awsurl = "s3.amazonaws.com" - // checkArtefacts checks if all backups artifacts removed // except for the shouldStay func checkArtefacts(conf string, shouldStay map[string]struct{}) { log.Println("check all artifacts deleted excepts backup's", shouldStay) - buf, err := os.ReadFile(conf) - if err != nil { - log.Fatalln("ERROR: unable to read config file:", err) - } - var cfg config.Config - err = yaml.UnmarshalStrict(buf, &cfg) + files, err := listAllFiles(conf) if err != nil { - log.Fatalln("ERROR: unmarshal yaml:", err) + log.Fatalln("ERROR: list files:", err) } - stg := cfg.Storage - - if stg.Type == "azure" || stg.Type == "filesystem" { - return - } - - endopintURL := awsurl - if stg.S3.EndpointURL != "" { - eu, err := url.Parse(stg.S3.EndpointURL) - if err != nil { - log.Fatalln("ERROR: parse EndpointURL:", err) + for _, file := range files { + if strings.Contains(file.Name, defs.StorInitFile) { + continue } - endopintURL = eu.Host - } - - ss, err := newS3Client(endopintURL, stg.S3.Region, &stg.S3.Credentials) - if err != nil { - log.Fatalf("create S3 client: %v", err) - } - - res, err := ss.ListObjectsV2(&awsS3.ListObjectsV2Input{ - Bucket: &stg.S3.Bucket, - Prefix: &stg.S3.Prefix, - }) - if err != nil { - log.Fatalf("list files on S3: %v", err) - } - - for _, object := range res.Contents { - objectKey := *object.Key - if strings.Contains(objectKey, defs.StorInitFile) || strings.Contains(objectKey, "/pbmPitr/") { + if strings.Contains(file.Name, defs.PITRfsPrefix) { continue } var ok bool for b := range shouldStay { - if strings.Contains(objectKey, b) { + if strings.Contains(file.Name, b) { ok = true break } } if !ok { - log.Fatalln("ERROR: failed to delete lefover", objectKey) + log.Fatalln("ERROR: failed to delete lefover", file.Name) } } } From 69376948ed35e73dffabdea5268ab46947d108b7 Mon Sep 17 00:00:00 2001 From: Dmytro Zghoba Date: Thu, 22 Aug 2024 12:14:10 +0200 Subject: [PATCH 198/203] [PBM-1378] add Percona Squad notice (#998) Co-authored-by: Sandra Romanchenko <53295797+sandraromanchenko@users.noreply.github.com> --- cmd/pbm-agent/main.go | 2 ++ cmd/pbm-agent/squad.go | 25 +++++++++++++++++++++++++ 2 files changed, 27 insertions(+) create mode 100644 cmd/pbm-agent/squad.go diff --git a/cmd/pbm-agent/main.go b/cmd/pbm-agent/main.go index 65758ffb0..648060098 100644 --- a/cmd/pbm-agent/main.go +++ b/cmd/pbm-agent/main.go @@ -72,6 +72,8 @@ func main() { hidecreds() + fmt.Print(perconaSquadNotice) + err = runAgent(url, *dumpConns) stdlog.Println("Exit:", err) if err != nil { diff --git a/cmd/pbm-agent/squad.go b/cmd/pbm-agent/squad.go new file mode 100644 index 000000000..3a217f2e9 --- /dev/null +++ b/cmd/pbm-agent/squad.go @@ -0,0 +1,25 @@ +package main + +const perconaSquadNotice = ` + % _____ + %%% | __ \ + ###%%%%%%%%%%%%* | |__) |__ _ __ ___ ___ _ __ __ _ + ### ##%% %%%% | ___/ _ \ '__/ __/ _ \| '_ \ / _` + "`" + ` | + #### ##% %%%% | | | __/ | | (_| (_) | | | | (_| | + ### #### %%% |_| \___|_| \___\___/|_| |_|\__,_| + ,((### ### %%% _____ _ + (((( (### #### %%%% / ____| | | + ((( ((# ###### | (___ __ _ _ _ __ _ __| | + (((( (((# #### \___ \ / _` + "`" + ` | | | |/ _` + "`" + ` |/ _` + "`" + ` | + /(( ,((( *### ____) | (_| | |_| | (_| | (_| | + //// ((( #### |_____/ \__, |\__,_|\__,_|\__,_| + /// (((( #### | | + /////////////(((((((((((((((((######## |_| Join @ percona.com/squad + +** Join Percona Squad! ** +Participate in monthly SWAG raffles, get early access to new product features, +invite-only ”ask me anything” sessions with database performance experts. + +Interested? Fill in the form at squad.percona.com/mongodb + +` From b15d4cc9a98363236e7de1729b04c3ec77418200 Mon Sep 17 00:00:00 2001 From: radoslawszulgo Date: Thu, 22 Aug 2024 12:14:43 +0200 Subject: [PATCH 199/203] Update README.md with Join Percona Squad (#993) --- README.md | 39 +++++++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index d40e9fadb..7acc08a2f 100644 --- a/README.md +++ b/README.md @@ -9,14 +9,14 @@ consistent backups of MongoDB sharded clusters and replica sets. Percona Backup For more information about PBM components and how to use it, see [Percona Backup for MongoDB documentation](https://docs.percona.com/percona-backup-mongodb/) -Percona Backup for MongoDB includes the following **features**: +Percona Backup for MongoDB includes the following **Features**: - Backup and restore for both classic non-sharded replica sets and sharded clusters - Point-in-Time recovery - Simple command-line management utility - Replica set and sharded cluster consistency through oplog capture - Distributed transaction consistency with MongoDB 4.2+ -- Simple, integrated-with-MongoDB authentication +- Simple, integrated with MongoDB authentication - No need to install a coordination service on a separate server - Use of any S3-compatible storage - Support of locally-mounted remote filesystem backup servers. @@ -28,7 +28,7 @@ Percona Backup for MongoDB consists of the following components: - **pbm-agent** is a process running on every mongod node within the cluster or a replica set that performs backup and restore operations. - **pbm** CLI is a command-line utility that instructs pbm-agents to perform an operation. - **PBM Control collections** are special collections in MongoDB that store the configuration data and backup states -- Remote backup storage as either s3-compatible or filesystem type storage +- Remote backup storage as either s3-compatible or filesystem-type storage ![Architecture](pbm-architecture.png) @@ -45,12 +45,12 @@ Find the installation instructions in the [official documentation](https://docs. Alternatively, you can [run Percona Backup for MongoDB as a Docker container](https://hub.docker.com/r/percona/percona-backup-mongodb). ## API -This repository contains source code to build binaries. It is not a library and it is not intended to be used directly by calling exposed functions, types, etc. -Please, use `pbm` CLI as the publicly available API. See [PBM commands](https://docs.percona.com/percona-backup-mongodb/reference/pbm-commands.html) for reference. +This repository contains source code for building binaries. It is not a library and is not intended to be used directly by calling exposed functions, types, etc. +Please use the `pbm` CLI as the publicly available API. For reference, see [PBM commands](https://docs.percona.com/percona-backup-mongodb/reference/pbm-commands.html). ## Submit Bug Report / Feature Request -If you find a bug in Percona Backup for MongoDB, you can submit a report to the project's [JIRA issue tracker](https://jira.percona.com/projects/PBM). +If you find a bug in Percona Backup for MongoDB, submit a report to the project's [JIRA issue tracker](https://jira.percona.com/projects/PBM). As a general rule of thumb, please try to create bug reports that are: @@ -80,13 +80,32 @@ When submitting a bug report or a feature, please attach the following informati ## Licensing -Percona is dedicated to **keeping open source open**. Wherever possible, we strive to include permissive licensing for both our software and documentation. For this project, we are using the Apache License 2.0 license. +Percona is dedicated to **keeping open source open**. We strive to include permissive licensing for our software and documentation wherever possible. For this project, we are using the Apache License 2.0 license. ## How to get involved -We encourage contributions and are always looking for new members that are as dedicated to serving the community as we are. - -The [Contributing Guide](https://github.com/percona/percona-backup-mongodb/blob/main/CONTRIBUTING.md) contains the guidelines how you can contribute. +We encourage contributions and are always looking for new members who are as dedicated to serving the community as we are. + +The [Contributing Guide](https://github.com/percona/percona-backup-mongodb/blob/main/CONTRIBUTING.md) contains the guidelines for contributing. + +## Join Percona Squad! +Participate in monthly SWAG raffles, get early access to new product features, and invite-only ”ask me anything” sessions with database performance experts. Interested? Fill in the form at [squad.percona.com/mongodb](https://squad.percona.com/mongodb) +``` + % _____ + %%% | __ \ + ###%%%%%%%%%%%%* | |__) |__ _ __ ___ ___ _ __ __ _ + ### ##%% %%%% | ___/ _ \ '__/ __/ _ \| '_ \ / _` | + #### ##% %%%% | | | __/ | | (_| (_) | | | | (_| | + ### #### %%% |_| \___|_| \___\___/|_| |_|\__,_| + ,((### ### %%% _____ _ + (((( (### #### %%%% / ____| | | + ((( ((# ###### | (___ __ _ _ _ __ _ __| | + (((( (((# #### \___ \ / _` | | | |/ _` |/ _` | + /(( ,((( *### ____) | (_| | |_| | (_| | (_| | + //// ((( #### |_____/ \__, |\__,_|\__,_|\__,_| + /// (((( #### | | + /////////////(((((((((((((((((######## |_| +``` ## Contact From a60faa8ae2b65c3855652608725a9e9ce9380a76 Mon Sep 17 00:00:00 2001 From: Sandra Romanchenko <53295797+sandraromanchenko@users.noreply.github.com> Date: Thu, 22 Aug 2024 13:15:16 +0300 Subject: [PATCH 200/203] PBM-1368. PBM version bump (#996) --- pbm/version/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pbm/version/version.go b/pbm/version/version.go index f1cbeb5d8..cf602c8c0 100644 --- a/pbm/version/version.go +++ b/pbm/version/version.go @@ -16,7 +16,7 @@ import ( ) // current PBM version -const version = "2.5.0" +const version = "2.6.0" var ( platform string From c0e6f3a2275151245ae7dc7e1adff3deab2bd93b Mon Sep 17 00:00:00 2001 From: Boris Ilijic Date: Tue, 27 Aug 2024 16:03:27 +0200 Subject: [PATCH 201/203] PBM-1057: Forbidden running Backup & PITR on non-suitable agents (#1001) * Add ListStadyAgents for fetching agents which are ready for Backup and PITR * Use only steady agents for backup nomination * Extract and expand backup candidates rules * Add PITR candidates rules logic * Extract MaxReplicationLagTimeSec to defs package * Expand agent's stat with replication lag * Add replication lag guard for Backup and PITR candidates --- cmd/pbm-agent/agent.go | 7 +++++++ cmd/pbm-agent/backup.go | 26 ++++++++++++++++---------- cmd/pbm-agent/pitr.go | 8 ++++---- pbm/defs/defs.go | 2 ++ pbm/topo/agent.go | 28 ++++++++++++++++++++++++++++ pbm/topo/topo.go | 4 +--- 6 files changed, 58 insertions(+), 17 deletions(-) diff --git a/cmd/pbm-agent/agent.go b/cmd/pbm-agent/agent.go index 4f0544714..bbddc64f8 100644 --- a/cmd/pbm-agent/agent.go +++ b/cmd/pbm-agent/agent.go @@ -330,6 +330,13 @@ func (a *Agent) HbStatus(ctx context.Context) { } else { hb.State = n.State hb.StateStr = n.StateStr + + rLag, err := topo.ReplicationLag(ctx, a.nodeConn, a.brief.Me) + if err != nil { + l.Error("get replication lag: %v", err) + hb.Err += fmt.Sprintf("get replication lag: %v", err) + } + hb.ReplicationLag = rLag } } diff --git a/cmd/pbm-agent/backup.go b/cmd/pbm-agent/backup.go index 05d3ced83..e685ad466 100644 --- a/cmd/pbm-agent/backup.go +++ b/cmd/pbm-agent/backup.go @@ -165,22 +165,15 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, } } - agents, err := topo.ListAgentStatuses(ctx, a.leadConn) + agents, err := topo.ListSteadyAgents(ctx, a.leadConn) if err != nil { l.Error("get agents list: %v", err) return } - validCandidates := make([]topo.AgentStat, 0, len(agents)) - for _, s := range agents { - if version.FeatureSupport(s.MongoVersion()).BackupType(cmd.Type) != nil { - continue - } - - validCandidates = append(validCandidates, s) - } + candidates := a.getValidCandidates(agents, cmd.Type) - nodes := prio.CalcNodesPriority(c, cfg.Backup.Priority, validCandidates) + nodes := prio.CalcNodesPriority(c, cfg.Backup.Priority, candidates) shards, err := topo.ClusterMembers(ctx, a.leadConn.MongoClient()) if err != nil { @@ -256,6 +249,19 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID, } } +// getValidCandidates filters out all agents that are not suitable for the backup. +func (a *Agent) getValidCandidates(agents []topo.AgentStat, backupType defs.BackupType) []topo.AgentStat { + validCandidates := []topo.AgentStat{} + for _, agent := range agents { + if version.FeatureSupport(agent.MongoVersion()).BackupType(backupType) != nil { + continue + } + validCandidates = append(validCandidates, agent) + } + + return validCandidates +} + const renominationFrame = 5 * time.Second func (a *Agent) nominateRS(ctx context.Context, bcp, rs string, nodes [][]string) error { diff --git a/cmd/pbm-agent/pitr.go b/cmd/pbm-agent/pitr.go index 991f7f120..482133d91 100644 --- a/cmd/pbm-agent/pitr.go +++ b/cmd/pbm-agent/pitr.go @@ -412,13 +412,13 @@ func (a *Agent) leadNomination( return } - agents, err := topo.ListAgentStatuses(ctx, a.leadConn) + candidates, err := topo.ListSteadyAgents(ctx, a.leadConn) if err != nil { l.Error("get agents list: %v", err) return } - nodes := prio.CalcNodesPriority(nil, cfgPrio, agents) + nodes := prio.CalcNodesPriority(nil, cfgPrio, candidates) shards, err := topo.ClusterMembers(ctx, a.leadConn.MongoClient()) if err != nil { @@ -433,7 +433,7 @@ func (a *Agent) leadNomination( return } - err = a.reconcileReadyStatus(ctx, agents) + err = a.reconcileReadyStatus(ctx, candidates) if err != nil { l.Error("reconciling ready status: %v", err) return @@ -645,7 +645,7 @@ func (a *Agent) reconcileReadyStatus(ctx context.Context, agents []topo.AgentSta if err := oplog.SetClusterStatus(ctx, a.leadConn, oplog.StatusUnset); err != nil { l.Error("error while cleaning cluster status: %v", err) } - return errors.New("timeout while roconciling ready status") + return errors.New("timeout while reconciling ready status") } } } diff --git a/pbm/defs/defs.go b/pbm/defs/defs.go index 346925943..ebb09f487 100644 --- a/pbm/defs/defs.go +++ b/pbm/defs/defs.go @@ -136,6 +136,8 @@ const ( const StaleFrameSec uint32 = 30 +const MaxReplicationLagTimeSec = 21 + const ( // MetadataFileSuffix is a suffix for the metadata file on a storage MetadataFileSuffix = ".pbm.json" diff --git a/pbm/topo/agent.go b/pbm/topo/agent.go index 3e616a8b1..287d68d5d 100644 --- a/pbm/topo/agent.go +++ b/pbm/topo/agent.go @@ -45,6 +45,9 @@ type AgentStat struct { // DelaySecs is the node configured replication delay (lag). DelaySecs int32 `bson:"delay"` + // Replication lag for mongod. + ReplicationLag int `bson:"repl_lag"` + // AgentVer has the PBM Agent version (looks like `v2.3.4`) AgentVer string `bson:"v"` @@ -192,6 +195,31 @@ func ListAgentStatuses(ctx context.Context, m connect.Client) ([]AgentStat, erro return ListAgents(ctx, m) } +// ListSteadyAgents returns agents which are in steady state for backup or PITR. +func ListSteadyAgents(ctx context.Context, m connect.Client) ([]AgentStat, error) { + agents, err := ListAgentStatuses(ctx, m) + if err != nil { + return nil, errors.Wrap(err, "listing agents") + } + steadyAgents := []AgentStat{} + for _, a := range agents { + if a.State != defs.NodeStatePrimary && + a.State != defs.NodeStateSecondary { + continue + } + if a.Arbiter || a.DelaySecs > 0 { + continue + } + if a.ReplicationLag >= defs.MaxReplicationLagTimeSec { + continue + } + + steadyAgents = append(steadyAgents, a) + } + + return steadyAgents, nil +} + func ListAgents(ctx context.Context, m connect.Client) ([]AgentStat, error) { cur, err := m.AgentsStatusCollection().Find(ctx, bson.D{}) if err != nil { diff --git a/pbm/topo/topo.go b/pbm/topo/topo.go index 2da4ae5b4..79b37ed37 100644 --- a/pbm/topo/topo.go +++ b/pbm/topo/topo.go @@ -148,8 +148,6 @@ func collectTopoCheckErrors( return nil } -const maxReplicationLagTimeSec = 21 - // NodeSuits checks if node can perform backup func NodeSuits(ctx context.Context, m *mongo.Client, inf *NodeInfo) (bool, error) { status, err := GetNodeStatus(ctx, m, inf.Me) @@ -165,7 +163,7 @@ func NodeSuits(ctx context.Context, m *mongo.Client, inf *NodeInfo) (bool, error return false, errors.Wrap(err, "get node replication lag") } - return replLag < maxReplicationLagTimeSec && status.Health == defs.NodeHealthUp && + return replLag < defs.MaxReplicationLagTimeSec && status.Health == defs.NodeHealthUp && (status.State == defs.NodeStatePrimary || status.State == defs.NodeStateSecondary), nil } From 42203c90573e0ee1a050d642a9ae0b4db8678376 Mon Sep 17 00:00:00 2001 From: Oleksandr Miroshnychenko Date: Tue, 21 May 2024 15:03:51 +0300 Subject: [PATCH 202/203] test cirrus ci tagging --- .cirrus.yml | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 .cirrus.yml diff --git a/.cirrus.yml b/.cirrus.yml new file mode 100644 index 000000000..a402145ac --- /dev/null +++ b/.cirrus.yml @@ -0,0 +1,69 @@ +script_template: &SCRIPT_TEMPLATE + system_info_script: | + uname -r + uname -i + df -Th + free -m + pwd + ls -l .. + nproc --all + cat /proc/cpuinfo + install_dependencies_script: | + export DEBIAN_FRONTEND=noninteractive + PACKAGES_TO_INSTALL="lz4 unzip" + apt update + apt -yq --no-install-suggests --no-install-recommends --allow-unauthenticated install $PACKAGES_TO_INSTALL + if ! $(aws --version | grep -q 'aws-cli/2'); then + find /tmp -maxdepth 1 -name "*aws*" | xargs sudo rm -rf + + until curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "/tmp/awscliv2.zip"; do + sleep 1 + echo try again + done + + unzip -o /tmp/awscliv2.zip -d /tmp + cd /tmp/aws && sudo ./install + fi + set_additional_tags_script: | + TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") + echo $TOKEN + INSTANCE_ID=$(curl -s -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/latest/meta-data/instance-id) + echo $INSTANCE_ID + AZ=$(curl -s -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/latest/meta-data/placement/availability-zone) + echo $AZ + REGION=$(echo $AZ | sed 's/[a-z]$//') + aws --version + sleep 5 + aws ec2 describe-tags --filters "Name=resource-id,Values=$INSTANCE_ID" --region=$REGION || true + aws ec2 describe-tags --filters "Name=resource-id,Values=$INSTANCE_ID" --region=$REGION || true + aws ec2 describe-instances --instance-ids $INSTANCE_ID --region=$REGION || true + aws sts get-caller-identity || true + aws ec2 associate-iam-instance-profile --instance-id $INSTANCE_ID --iam-instance-profile Name=jenkins-ps80-master || true + sleep 3600 + + +task: + name: "Test tags" + aws_credentials: ENCRYPTED[3dec7f4eb176b0bde14de840ddf9c356de3a739bd7ae14fcb9751c38afc96dd2f39c25853e6934cfff5c8e433c1d98e5] + ec2_instance: + image: ami-0cf2b4e024cdb6960 # Replace with your desired AMI ID + type: c5.large # Specify the instance type you want to use + region: us-west-2 # Define the region where the instance will be launched + key_name: jenkins-master # Optionally specify a key pair for SSH access + spot: true + tags: + - key: Name + value: test + - key: Cirrusci + value: test-tag + - key: iit-billing-tag + value: CirrusCI + security_groups: + - default # Specify security groups + block_device_mappings: + - device_name: /dev/sda1 + ebs: + volume_size: 20 # Size of the EBS volume in GB + script: | + uname -a + << : *SCRIPT_TEMPLATE From d17d2557ae55f18c3922cc28858de6f29ed88327 Mon Sep 17 00:00:00 2001 From: Oleksandr Miroshnychenko Date: Wed, 28 Aug 2024 09:19:18 +0300 Subject: [PATCH 203/203] PKG-157 pbm: add marketing message to postinstall --- .cirrus.yml | 69 ------------------------------- packaging/debian/postinst | 10 +++++ packaging/rpm/mongodb-backup.spec | 10 +++++ 3 files changed, 20 insertions(+), 69 deletions(-) delete mode 100644 .cirrus.yml diff --git a/.cirrus.yml b/.cirrus.yml deleted file mode 100644 index a402145ac..000000000 --- a/.cirrus.yml +++ /dev/null @@ -1,69 +0,0 @@ -script_template: &SCRIPT_TEMPLATE - system_info_script: | - uname -r - uname -i - df -Th - free -m - pwd - ls -l .. - nproc --all - cat /proc/cpuinfo - install_dependencies_script: | - export DEBIAN_FRONTEND=noninteractive - PACKAGES_TO_INSTALL="lz4 unzip" - apt update - apt -yq --no-install-suggests --no-install-recommends --allow-unauthenticated install $PACKAGES_TO_INSTALL - if ! $(aws --version | grep -q 'aws-cli/2'); then - find /tmp -maxdepth 1 -name "*aws*" | xargs sudo rm -rf - - until curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "/tmp/awscliv2.zip"; do - sleep 1 - echo try again - done - - unzip -o /tmp/awscliv2.zip -d /tmp - cd /tmp/aws && sudo ./install - fi - set_additional_tags_script: | - TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") - echo $TOKEN - INSTANCE_ID=$(curl -s -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/latest/meta-data/instance-id) - echo $INSTANCE_ID - AZ=$(curl -s -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/latest/meta-data/placement/availability-zone) - echo $AZ - REGION=$(echo $AZ | sed 's/[a-z]$//') - aws --version - sleep 5 - aws ec2 describe-tags --filters "Name=resource-id,Values=$INSTANCE_ID" --region=$REGION || true - aws ec2 describe-tags --filters "Name=resource-id,Values=$INSTANCE_ID" --region=$REGION || true - aws ec2 describe-instances --instance-ids $INSTANCE_ID --region=$REGION || true - aws sts get-caller-identity || true - aws ec2 associate-iam-instance-profile --instance-id $INSTANCE_ID --iam-instance-profile Name=jenkins-ps80-master || true - sleep 3600 - - -task: - name: "Test tags" - aws_credentials: ENCRYPTED[3dec7f4eb176b0bde14de840ddf9c356de3a739bd7ae14fcb9751c38afc96dd2f39c25853e6934cfff5c8e433c1d98e5] - ec2_instance: - image: ami-0cf2b4e024cdb6960 # Replace with your desired AMI ID - type: c5.large # Specify the instance type you want to use - region: us-west-2 # Define the region where the instance will be launched - key_name: jenkins-master # Optionally specify a key pair for SSH access - spot: true - tags: - - key: Name - value: test - - key: Cirrusci - value: test-tag - - key: iit-billing-tag - value: CirrusCI - security_groups: - - default # Specify security groups - block_device_mappings: - - device_name: /dev/sda1 - ebs: - volume_size: 20 # Size of the EBS volume in GB - script: | - uname -a - << : *SCRIPT_TEMPLATE diff --git a/packaging/debian/postinst b/packaging/debian/postinst index b89f8ad41..0598396ba 100644 --- a/packaging/debian/postinst +++ b/packaging/debian/postinst @@ -2,4 +2,14 @@ #DEBHELPER# chown mongod:mongod /etc/pbm-storage.conf +cat << EOF +** Join Percona Squad! ** + +Participate in monthly SWAG raffles, get early access to new product features, +invite-only ”ask me anything” sessions with database performance experts. + +Interested? Fill in the form at https://squad.percona.com/mongodb + +EOF + exit 0 diff --git a/packaging/rpm/mongodb-backup.spec b/packaging/rpm/mongodb-backup.spec index 28e7d117a..a8b1cdf38 100644 --- a/packaging/rpm/mongodb-backup.spec +++ b/packaging/rpm/mongodb-backup.spec @@ -110,6 +110,16 @@ fi fi %endif +cat << EOF +** Join Percona Squad! ** + +Participate in monthly SWAG raffles, get early access to new product features, +invite-only ”ask me anything” sessions with database performance experts. + +Interested? Fill in the form at https://squad.percona.com/mongodb + +EOF + %postun -n percona-backup-mongodb case "$1" in