From 58e6f29ab45a86080631ef33204499d24a0f4194 Mon Sep 17 00:00:00 2001 From: Peng Hui Jiang Date: Sun, 25 Feb 2024 21:03:33 +0800 Subject: [PATCH] hmc support Signed-off-by: Peng Hui Jiang --- go.mod | 8 +- go.sum | 30 +- pkg/sensors/platform/source/hmc.go | 127 +- .../runc/libcontainer/cgroups/file.go | 31 +- .../runc/libcontainer/cgroups/fs2/memory.go | 18 +- .../runc/libcontainer/cgroups/stats.go | 2 + .../runc/libcontainer/configs/config.go | 6 +- .../runc/libcontainer/configs/config_linux.go | 30 +- .../runc/libcontainer/userns/userns_maps.c | 79 ++ .../libcontainer/userns/userns_maps_linux.go | 186 +++ .../runc/libcontainer/utils/utils_unix.go | 64 +- .../zhmcclient/golang-zhmcclient/LICENSE | 176 +++ .../pkg/zhmcclient/adapter.go | 307 +++++ .../pkg/zhmcclient/client.go | 633 +++++++++ .../pkg/zhmcclient/constants.go | 60 + .../golang-zhmcclient/pkg/zhmcclient/cpc.go | 130 ++ .../golang-zhmcclient/pkg/zhmcclient/data.pem | 23 + .../golang-zhmcclient/pkg/zhmcclient/data.txt | 1 + .../pkg/zhmcclient/imageFileName | 1 + .../golang-zhmcclient/pkg/zhmcclient/job.go | 147 ++ .../pkg/zhmcclient/logger.go | 15 + .../golang-zhmcclient/pkg/zhmcclient/lpar.go | 692 ++++++++++ .../pkg/zhmcclient/metrics.go | 79 ++ .../golang-zhmcclient/pkg/zhmcclient/model.go | 1177 +++++++++++++++++ .../golang-zhmcclient/pkg/zhmcclient/nic.go | 198 +++ .../pkg/zhmcclient/sgroup.go | 405 ++++++ .../golang-zhmcclient/pkg/zhmcclient/utils.go | 267 ++++ .../pkg/zhmcclient/vswitch.go | 133 ++ .../golang-zhmcclient/pkg/zhmcclient/zhmc.go | 220 +++ vendor/go.uber.org/multierr/.codecov.yml | 15 + vendor/go.uber.org/multierr/.gitignore | 4 + vendor/go.uber.org/multierr/CHANGELOG.md | 95 ++ vendor/go.uber.org/multierr/LICENSE.txt | 19 + vendor/go.uber.org/multierr/Makefile | 38 + vendor/go.uber.org/multierr/README.md | 43 + vendor/go.uber.org/multierr/error.go | 646 +++++++++ .../go.uber.org/multierr/error_post_go120.go | 48 + .../go.uber.org/multierr/error_pre_go120.go | 79 ++ vendor/go.uber.org/zap/.codecov.yml | 17 + vendor/go.uber.org/zap/.gitignore | 32 + vendor/go.uber.org/zap/.golangci.yml | 77 ++ vendor/go.uber.org/zap/.readme.tmpl | 109 ++ vendor/go.uber.org/zap/CHANGELOG.md | 671 ++++++++++ vendor/go.uber.org/zap/CODE_OF_CONDUCT.md | 75 ++ vendor/go.uber.org/zap/CONTRIBUTING.md | 70 + vendor/go.uber.org/zap/FAQ.md | 164 +++ vendor/go.uber.org/zap/LICENSE.txt | 19 + vendor/go.uber.org/zap/Makefile | 76 ++ vendor/go.uber.org/zap/README.md | 137 ++ vendor/go.uber.org/zap/array.go | 447 +++++++ vendor/go.uber.org/zap/buffer/buffer.go | 146 ++ vendor/go.uber.org/zap/buffer/pool.go | 53 + vendor/go.uber.org/zap/checklicense.sh | 17 + vendor/go.uber.org/zap/config.go | 330 +++++ vendor/go.uber.org/zap/doc.go | 117 ++ vendor/go.uber.org/zap/encoder.go | 79 ++ vendor/go.uber.org/zap/error.go | 82 ++ vendor/go.uber.org/zap/field.go | 613 +++++++++ vendor/go.uber.org/zap/flag.go | 39 + vendor/go.uber.org/zap/glide.yaml | 34 + vendor/go.uber.org/zap/global.go | 169 +++ vendor/go.uber.org/zap/http_handler.go | 140 ++ .../zap/internal/bufferpool/bufferpool.go | 31 + .../go.uber.org/zap/internal/color/color.go | 44 + vendor/go.uber.org/zap/internal/exit/exit.go | 66 + .../go.uber.org/zap/internal/level_enabler.go | 37 + vendor/go.uber.org/zap/internal/pool/pool.go | 58 + .../zap/internal/stacktrace/stack.go | 181 +++ vendor/go.uber.org/zap/level.go | 153 +++ vendor/go.uber.org/zap/logger.go | 432 ++++++ vendor/go.uber.org/zap/options.go | 167 +++ vendor/go.uber.org/zap/sink.go | 180 +++ vendor/go.uber.org/zap/sugar.go | 437 ++++++ vendor/go.uber.org/zap/time.go | 27 + vendor/go.uber.org/zap/writer.go | 98 ++ .../zap/zapcore/buffered_write_syncer.go | 219 +++ vendor/go.uber.org/zap/zapcore/clock.go | 48 + .../zap/zapcore/console_encoder.go | 157 +++ vendor/go.uber.org/zap/zapcore/core.go | 122 ++ vendor/go.uber.org/zap/zapcore/doc.go | 24 + vendor/go.uber.org/zap/zapcore/encoder.go | 451 +++++++ vendor/go.uber.org/zap/zapcore/entry.go | 298 +++++ vendor/go.uber.org/zap/zapcore/error.go | 136 ++ vendor/go.uber.org/zap/zapcore/field.go | 233 ++++ vendor/go.uber.org/zap/zapcore/hook.go | 77 ++ .../go.uber.org/zap/zapcore/increase_level.go | 75 ++ .../go.uber.org/zap/zapcore/json_encoder.go | 583 ++++++++ vendor/go.uber.org/zap/zapcore/lazy_with.go | 54 + vendor/go.uber.org/zap/zapcore/level.go | 229 ++++ .../go.uber.org/zap/zapcore/level_strings.go | 46 + vendor/go.uber.org/zap/zapcore/marshaler.go | 61 + .../go.uber.org/zap/zapcore/memory_encoder.go | 179 +++ .../zap/zapcore/reflected_encoder.go | 41 + vendor/go.uber.org/zap/zapcore/sampler.go | 229 ++++ vendor/go.uber.org/zap/zapcore/tee.go | 96 ++ .../go.uber.org/zap/zapcore/write_syncer.go | 122 ++ vendor/modules.txt | 19 +- 97 files changed, 15304 insertions(+), 51 deletions(-) create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/userns/userns_maps.c create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/userns/userns_maps_linux.go create mode 100644 vendor/github.com/zhmcclient/golang-zhmcclient/LICENSE create mode 100644 vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/adapter.go create mode 100644 vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/client.go create mode 100644 vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/constants.go create mode 100644 vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/cpc.go create mode 100644 vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/data.pem create mode 100644 vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/data.txt create mode 100644 vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/imageFileName create mode 100644 vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/job.go create mode 100644 vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/logger.go create mode 100644 vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/lpar.go create mode 100644 vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/metrics.go create mode 100644 vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/model.go create mode 100644 vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/nic.go create mode 100644 vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/sgroup.go create mode 100644 vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/utils.go create mode 100644 vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/vswitch.go create mode 100644 vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/zhmc.go create mode 100644 vendor/go.uber.org/multierr/.codecov.yml create mode 100644 vendor/go.uber.org/multierr/.gitignore create mode 100644 vendor/go.uber.org/multierr/CHANGELOG.md create mode 100644 vendor/go.uber.org/multierr/LICENSE.txt create mode 100644 vendor/go.uber.org/multierr/Makefile create mode 100644 vendor/go.uber.org/multierr/README.md create mode 100644 vendor/go.uber.org/multierr/error.go create mode 100644 vendor/go.uber.org/multierr/error_post_go120.go create mode 100644 vendor/go.uber.org/multierr/error_pre_go120.go create mode 100644 vendor/go.uber.org/zap/.codecov.yml create mode 100644 vendor/go.uber.org/zap/.gitignore create mode 100644 vendor/go.uber.org/zap/.golangci.yml create mode 100644 vendor/go.uber.org/zap/.readme.tmpl create mode 100644 vendor/go.uber.org/zap/CHANGELOG.md create mode 100644 vendor/go.uber.org/zap/CODE_OF_CONDUCT.md create mode 100644 vendor/go.uber.org/zap/CONTRIBUTING.md create mode 100644 vendor/go.uber.org/zap/FAQ.md create mode 100644 vendor/go.uber.org/zap/LICENSE.txt create mode 100644 vendor/go.uber.org/zap/Makefile create mode 100644 vendor/go.uber.org/zap/README.md create mode 100644 vendor/go.uber.org/zap/array.go create mode 100644 vendor/go.uber.org/zap/buffer/buffer.go create mode 100644 vendor/go.uber.org/zap/buffer/pool.go create mode 100644 vendor/go.uber.org/zap/checklicense.sh create mode 100644 vendor/go.uber.org/zap/config.go create mode 100644 vendor/go.uber.org/zap/doc.go create mode 100644 vendor/go.uber.org/zap/encoder.go create mode 100644 vendor/go.uber.org/zap/error.go create mode 100644 vendor/go.uber.org/zap/field.go create mode 100644 vendor/go.uber.org/zap/flag.go create mode 100644 vendor/go.uber.org/zap/glide.yaml create mode 100644 vendor/go.uber.org/zap/global.go create mode 100644 vendor/go.uber.org/zap/http_handler.go create mode 100644 vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go create mode 100644 vendor/go.uber.org/zap/internal/color/color.go create mode 100644 vendor/go.uber.org/zap/internal/exit/exit.go create mode 100644 vendor/go.uber.org/zap/internal/level_enabler.go create mode 100644 vendor/go.uber.org/zap/internal/pool/pool.go create mode 100644 vendor/go.uber.org/zap/internal/stacktrace/stack.go create mode 100644 vendor/go.uber.org/zap/level.go create mode 100644 vendor/go.uber.org/zap/logger.go create mode 100644 vendor/go.uber.org/zap/options.go create mode 100644 vendor/go.uber.org/zap/sink.go create mode 100644 vendor/go.uber.org/zap/sugar.go create mode 100644 vendor/go.uber.org/zap/time.go create mode 100644 vendor/go.uber.org/zap/writer.go create mode 100644 vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go create mode 100644 vendor/go.uber.org/zap/zapcore/clock.go create mode 100644 vendor/go.uber.org/zap/zapcore/console_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/core.go create mode 100644 vendor/go.uber.org/zap/zapcore/doc.go create mode 100644 vendor/go.uber.org/zap/zapcore/encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/entry.go create mode 100644 vendor/go.uber.org/zap/zapcore/error.go create mode 100644 vendor/go.uber.org/zap/zapcore/field.go create mode 100644 vendor/go.uber.org/zap/zapcore/hook.go create mode 100644 vendor/go.uber.org/zap/zapcore/increase_level.go create mode 100644 vendor/go.uber.org/zap/zapcore/json_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/lazy_with.go create mode 100644 vendor/go.uber.org/zap/zapcore/level.go create mode 100644 vendor/go.uber.org/zap/zapcore/level_strings.go create mode 100644 vendor/go.uber.org/zap/zapcore/marshaler.go create mode 100644 vendor/go.uber.org/zap/zapcore/memory_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/reflected_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/sampler.go create mode 100644 vendor/go.uber.org/zap/zapcore/tee.go create mode 100644 vendor/go.uber.org/zap/zapcore/write_syncer.go diff --git a/go.mod b/go.mod index 73f7ffbad0..66236f5404 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/sustainable-computing-io/kepler -go 1.20 +go 1.21 require ( github.com/NVIDIA/go-nvml v0.12.0-1 @@ -14,12 +14,13 @@ require ( github.com/klauspost/cpuid/v2 v2.2.6 github.com/onsi/ginkgo/v2 v2.13.2 github.com/onsi/gomega v1.30.0 - github.com/opencontainers/runc v1.1.10 + github.com/opencontainers/runc v1.1.12 github.com/opencontainers/runtime-spec v1.1.0 github.com/prometheus/client_golang v1.17.0 github.com/prometheus/common v0.44.0 github.com/prometheus/prometheus v0.48.1 github.com/sirupsen/logrus v1.9.0 + github.com/zhmcclient/golang-zhmcclient v0.2.1-0.20231123024149-4d04b40c0e93 golang.org/x/sys v0.14.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.28.2 @@ -70,6 +71,9 @@ require ( github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/procfs v0.11.1 // indirect github.com/spf13/pflag v1.0.5 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.26.0 // indirect + github.com/stretchr/testify v1.8.4 // indirect golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.13.0 // indirect diff --git a/go.sum b/go.sum index 44a59b9890..b14d8937e9 100644 --- a/go.sum +++ b/go.sum @@ -5,6 +5,7 @@ github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9 github.com/aquasecurity/libbpfgo v0.4.9-libbpf-1.2.0 h1:pk9L7I6wF1nTfO42+jjXhA8ozRjvtj2ZvHV/i/YC0dE= github.com/aquasecurity/libbpfgo v0.4.9-libbpf-1.2.0/go.mod h1:UD3Mfr+JZ/ASK2VMucI/zAdEhb35LtvYXvAUdrdqE9s= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= @@ -29,6 +30,9 @@ github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= @@ -86,6 +90,7 @@ github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwA github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jszwec/csvutil v1.8.0 h1:G7vS2LGdpZZDH1HmHeNbxOaJ/ZnJlpwGFvOkTkJzzNk= @@ -96,6 +101,7 @@ github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/4 github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -118,18 +124,24 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs= github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= -github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQBizE40= -github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M= +github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss= +github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8= github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= @@ -141,7 +153,9 @@ github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/ github.com/prometheus/prometheus v0.48.1 h1:CTszphSNTXkuCG6O0IfpKdHcJkvvnAAE1GbELKS+NFk= github.com/prometheus/prometheus v0.48.1/go.mod h1:SRw624aMAxTfryAcP8rOjg4S/sHHaetx2lyJJ2nM83g= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 h1:RpforrEYXWkmGwJHIGnLZ3tTWStkjVVstwzNGqxX2Ds= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -156,8 +170,17 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zhmcclient/golang-zhmcclient v0.2.1-0.20231123024149-4d04b40c0e93 h1:6aELYRWbQ+mJR4Mx+KFmDJwLhv4D+VsUe7ntu5cb4Xw= +github.com/zhmcclient/golang-zhmcclient v0.2.1-0.20231123024149-4d04b40c0e93/go.mod h1:adYy8E34m1Z0SW4YVtYVj6mSLZkFXLTk0HAap9ViE+Y= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -166,6 +189,7 @@ golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQz golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -218,6 +242,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= diff --git a/pkg/sensors/platform/source/hmc.go b/pkg/sensors/platform/source/hmc.go index 47e30071a1..30794cea7c 100644 --- a/pkg/sensors/platform/source/hmc.go +++ b/pkg/sensors/platform/source/hmc.go @@ -1,12 +1,9 @@ /* -Copyright 2022. - +Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -16,21 +13,127 @@ limitations under the License. package source +import ( + "os" + "strconv" + + "k8s.io/klog/v2" + "github.com/sustainable-computing-io/kepler/pkg/sensors/components/source" + "github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient" +) + +var hmcManager *zhmcclient.ZhmcManager + type PowerHMC struct{} -func (a *PowerHMC) GetName() string { - return "hmc" +func (r *PowerHMC) GetName() string { + return "hmc" +} + +func (r *PowerHMC) GetHMCManager() *zhmcclient.ZhmcManager { + if hmcManager == nil { + endpoint := os.Getenv("HMC_ENDPOINT") + username := os.Getenv("HMC_USERNAME") + password := os.Getenv("HMC_PASSWORD") + cacert := os.Getenv("CA_CERT") + skipCert := os.Getenv("SKIP_CERT") + isSkipCert, _ := strconv.ParseBool(skipCert) + + creds := &zhmcclient.Options{Username: username, Password: password, CaCert: cacert, SkipCert: isSkipCert, Trace: false} + client, err := zhmcclient.NewClient(endpoint, creds, nil) + if err != nil { + klog.V(1).Infof("Error getting client connection %v", err.Message) + } + if client != nil { + zhmcAPI := zhmcclient.NewManagerFromClient(client) + hmcManager, _ := zhmcAPI.(*zhmcclient.ZhmcManager) + return hmcManager + } + } + return hmcManager +} + +func (r *PowerHMC) GetEnergyFromLpar() (uint64, error) { + hmcManager := r.GetHMCManager() + lparURI := "api/logical-partitions/" + os.Getenv("LPAR_ID") + props := &zhmcclient.EnergyRequestPayload{ + Range: "last-day", + Resolution: "fifteen-minutes", + } + energy, _, err := hmcManager.GetEnergyDetailsforLPAR(lparURI, props) + if err != nil { + klog.V(1).Infof("Error getting energy data: %v", err.Message) + } + klog.V(1).Infof("Get energy data successfully") + return energy, err +} + +func (r *PowerHMC) GetLiveEnergyFromLpar() (uint64, error) { + hmcManager := r.GetHMCManager() + lparURI := "/api/logical-partitions/" + os.Getenv("LPAR_ID") + energy, _, err := hmcManager.GetLiveEnergyDetailsforLPAR(lparURI) + if err != nil { + klog.V(1).Infof("Error getting energy data: %v", err.Message) + } else { + klog.V(1).Infof("Get energy data successfully with power: %v", energy) + } + return energy, err +} + +func (r *PowerHMC) IsSystemCollectionSupported() bool { + return true +} + +func (r *PowerHMC) StopPower() { +} + +func (r *PowerHMC) GetEnergyFromDram() (uint64, error) { + return 0, nil +} + +func (r *PowerHMC) GetEnergyFromCore() (uint64, error) { + return 0, nil +} + +func (r *PowerHMC) GetEnergyFromUncore() (uint64, error) { + return 0, nil +} + +func (r *PowerHMC) GetEnergyFromPackage() (uint64, error) { + return 0, nil +} + +func (r *PowerHMC) GetNodeComponentsEnergy() map[int]source.NodeComponentsEnergy { + pkgEnergy, _ := r.GetLiveEnergyFromLpar() + pkgEnergy = pkgEnergy * 3 + coreEnergy := uint64(0) + dramEnergy := uint64(0) + uncoreEnergy := uint64(0) + componentsEnergies := make(map[int]source.NodeComponentsEnergy) + componentsEnergies[0] = source.NodeComponentsEnergy{ + Core: coreEnergy, + DRAM: dramEnergy, + Uncore: uncoreEnergy, + Pkg: pkgEnergy, + } + return componentsEnergies } -func (a *PowerHMC) StopPower() { +func (r *PowerHMC) GetPlatformEnergy() map[string]float64 { + pkgEnergy, _ := r.GetLiveEnergyFromLpar() + platformEnergies := make(map[string]float64) + platformEnergies["hmc"] = float64(pkgEnergy) * 3 + return platformEnergies } -func (a *PowerHMC) IsSystemCollectionSupported() bool { - return false +func (r *PowerHMC) IsPlatformCollectionSupported() bool { + return true } // GetEnergyFromHost returns the accumulated energy consumption -func (a *PowerHMC) GetAbsEnergyFromPlatform() (map[string]float64, error) { - power := map[string]float64{} - return power, nil +func (r *PowerHMC) GetAbsEnergyFromPlatform() (map[string]float64, error) { + pkgEnergy, _ := r.GetLiveEnergyFromLpar() + platformEnergies := make(map[string]float64) + platformEnergies["hmc"] = float64(pkgEnergy) * 3 + return platformEnergies, nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go index 48b263a166..f6e1b73bd9 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/file.go @@ -77,16 +77,16 @@ var ( // TestMode is set to true by unit tests that need "fake" cgroupfs. TestMode bool - cgroupFd int = -1 - prepOnce sync.Once - prepErr error - resolveFlags uint64 + cgroupRootHandle *os.File + prepOnce sync.Once + prepErr error + resolveFlags uint64 ) func prepareOpenat2() error { prepOnce.Do(func() { fd, err := unix.Openat2(-1, cgroupfsDir, &unix.OpenHow{ - Flags: unix.O_DIRECTORY | unix.O_PATH, + Flags: unix.O_DIRECTORY | unix.O_PATH | unix.O_CLOEXEC, }) if err != nil { prepErr = &os.PathError{Op: "openat2", Path: cgroupfsDir, Err: err} @@ -97,15 +97,16 @@ func prepareOpenat2() error { } return } + file := os.NewFile(uintptr(fd), cgroupfsDir) + var st unix.Statfs_t - if err = unix.Fstatfs(fd, &st); err != nil { + if err := unix.Fstatfs(int(file.Fd()), &st); err != nil { prepErr = &os.PathError{Op: "statfs", Path: cgroupfsDir, Err: err} logrus.Warnf("falling back to securejoin: %s", prepErr) return } - cgroupFd = fd - + cgroupRootHandle = file resolveFlags = unix.RESOLVE_BENEATH | unix.RESOLVE_NO_MAGICLINKS if st.Type == unix.CGROUP2_SUPER_MAGIC { // cgroupv2 has a single mountpoint and no "cpu,cpuacct" symlinks @@ -132,7 +133,7 @@ func openFile(dir, file string, flags int) (*os.File, error) { return openFallback(path, flags, mode) } - fd, err := unix.Openat2(cgroupFd, relPath, + fd, err := unix.Openat2(int(cgroupRootHandle.Fd()), relPath, &unix.OpenHow{ Resolve: resolveFlags, Flags: uint64(flags) | unix.O_CLOEXEC, @@ -140,20 +141,20 @@ func openFile(dir, file string, flags int) (*os.File, error) { }) if err != nil { err = &os.PathError{Op: "openat2", Path: path, Err: err} - // Check if cgroupFd is still opened to cgroupfsDir + // Check if cgroupRootHandle is still opened to cgroupfsDir // (happens when this package is incorrectly used // across the chroot/pivot_root/mntns boundary, or // when /sys/fs/cgroup is remounted). // // TODO: if such usage will ever be common, amend this - // to reopen cgroupFd and retry openat2. - fdStr := strconv.Itoa(cgroupFd) + // to reopen cgroupRootHandle and retry openat2. + fdStr := strconv.Itoa(int(cgroupRootHandle.Fd())) fdDest, _ := os.Readlink("/proc/self/fd/" + fdStr) if fdDest != cgroupfsDir { - // Wrap the error so it is clear that cgroupFd + // Wrap the error so it is clear that cgroupRootHandle // is opened to an unexpected/wrong directory. - err = fmt.Errorf("cgroupFd %s unexpectedly opened to %s != %s: %w", - fdStr, fdDest, cgroupfsDir, err) + err = fmt.Errorf("cgroupRootHandle %d unexpectedly opened to %s != %s: %w", + cgroupRootHandle.Fd(), fdDest, cgroupfsDir, err) } return nil, err } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go index 9cca98c4c0..01fe7d8e12 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs2/memory.go @@ -100,7 +100,7 @@ func statMemory(dirPath string, stats *cgroups.Stats) error { memoryUsage, err := getMemoryDataV2(dirPath, "") if err != nil { if errors.Is(err, unix.ENOENT) && dirPath == UnifiedMountpoint { - // The root cgroup does not have memory.{current,max} + // The root cgroup does not have memory.{current,max,peak} // so emulate those using data from /proc/meminfo and // /sys/fs/cgroup/memory.stat return rootStatsFromMeminfo(stats) @@ -108,10 +108,12 @@ func statMemory(dirPath string, stats *cgroups.Stats) error { return err } stats.MemoryStats.Usage = memoryUsage - swapUsage, err := getMemoryDataV2(dirPath, "swap") + swapOnlyUsage, err := getMemoryDataV2(dirPath, "swap") if err != nil { return err } + stats.MemoryStats.SwapOnlyUsage = swapOnlyUsage + swapUsage := swapOnlyUsage // As cgroup v1 reports SwapUsage values as mem+swap combined, // while in cgroup v2 swap values do not include memory, // report combined mem+swap for v1 compatibility. @@ -119,6 +121,9 @@ func statMemory(dirPath string, stats *cgroups.Stats) error { if swapUsage.Limit != math.MaxUint64 { swapUsage.Limit += memoryUsage.Limit } + // The `MaxUsage` of mem+swap cannot simply combine mem with + // swap. So set it to 0 for v1 compatibility. + swapUsage.MaxUsage = 0 stats.MemoryStats.SwapUsage = swapUsage return nil @@ -133,6 +138,7 @@ func getMemoryDataV2(path, name string) (cgroups.MemoryData, error) { } usage := moduleName + ".current" limit := moduleName + ".max" + maxUsage := moduleName + ".peak" value, err := fscommon.GetCgroupParamUint(path, usage) if err != nil { @@ -152,6 +158,14 @@ func getMemoryDataV2(path, name string) (cgroups.MemoryData, error) { } memoryData.Limit = value + // `memory.peak` since kernel 5.19 + // `memory.swap.peak` since kernel 6.5 + value, err = fscommon.GetCgroupParamUint(path, maxUsage) + if err != nil && !os.IsNotExist(err) { + return cgroups.MemoryData{}, err + } + memoryData.MaxUsage = value + return memoryData, nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go index 40a81dd5a8..0d8371b05f 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go @@ -78,6 +78,8 @@ type MemoryStats struct { Usage MemoryData `json:"usage,omitempty"` // usage of memory + swap SwapUsage MemoryData `json:"swap_usage,omitempty"` + // usage of swap only + SwapOnlyUsage MemoryData `json:"swap_only_usage,omitempty"` // usage of kernel memory KernelUsage MemoryData `json:"kernel_usage,omitempty"` // usage of kernel TCP memory diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go index c1b4a0041c..6ebf5ec7b6 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go @@ -21,9 +21,9 @@ type Rlimit struct { // IDMap represents UID/GID Mappings for User Namespaces. type IDMap struct { - ContainerID int `json:"container_id"` - HostID int `json:"host_id"` - Size int `json:"size"` + ContainerID int64 `json:"container_id"` + HostID int64 `json:"host_id"` + Size int64 `json:"size"` } // Seccomp represents syscall restrictions diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go index 8c02848b70..51fe940748 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go @@ -1,6 +1,10 @@ package configs -import "errors" +import ( + "errors" + "fmt" + "math" +) var ( errNoUIDMap = errors.New("User namespaces enabled, but no uid mappings found.") @@ -16,11 +20,18 @@ func (c Config) HostUID(containerId int) (int, error) { if c.UidMappings == nil { return -1, errNoUIDMap } - id, found := c.hostIDFromMapping(containerId, c.UidMappings) + id, found := c.hostIDFromMapping(int64(containerId), c.UidMappings) if !found { return -1, errNoUserMap } - return id, nil + // If we are a 32-bit binary running on a 64-bit system, it's possible + // the mapped user is too large to store in an int, which means we + // cannot do the mapping. We can't just return an int64, because + // os.Setuid() takes an int. + if id > math.MaxInt { + return -1, fmt.Errorf("mapping for uid %d (host id %d) is larger than native integer size (%d)", containerId, id, math.MaxInt) + } + return int(id), nil } // Return unchanged id. return containerId, nil @@ -39,11 +50,18 @@ func (c Config) HostGID(containerId int) (int, error) { if c.GidMappings == nil { return -1, errNoGIDMap } - id, found := c.hostIDFromMapping(containerId, c.GidMappings) + id, found := c.hostIDFromMapping(int64(containerId), c.GidMappings) if !found { return -1, errNoGroupMap } - return id, nil + // If we are a 32-bit binary running on a 64-bit system, it's possible + // the mapped user is too large to store in an int, which means we + // cannot do the mapping. We can't just return an int64, because + // os.Setgid() takes an int. + if id > math.MaxInt { + return -1, fmt.Errorf("mapping for gid %d (host id %d) is larger than native integer size (%d)", containerId, id, math.MaxInt) + } + return int(id), nil } // Return unchanged id. return containerId, nil @@ -57,7 +75,7 @@ func (c Config) HostRootGID() (int, error) { // Utility function that gets a host ID for a container ID from user namespace map // if that ID is present in the map. -func (c Config) hostIDFromMapping(containerID int, uMap []IDMap) (int, bool) { +func (c Config) hostIDFromMapping(containerID int64, uMap []IDMap) (int64, bool) { for _, m := range uMap { if (containerID >= m.ContainerID) && (containerID <= (m.ContainerID + m.Size - 1)) { hostID := m.HostID + (containerID - m.ContainerID) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_maps.c b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_maps.c new file mode 100644 index 0000000000..84f2c6188c --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_maps.c @@ -0,0 +1,79 @@ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include + +/* + * All of the code here is run inside an aync-signal-safe context, so we need + * to be careful to not call any functions that could cause issues. In theory, + * since we are a Go program, there are fewer restrictions in practice, it's + * better to be safe than sorry. + * + * The only exception is exit, which we need to call to make sure we don't + * return into runc. + */ + +void bail(int pipefd, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vdprintf(pipefd, fmt, args); + va_end(args); + + exit(1); +} + +int spawn_userns_cat(char *userns_path, char *path, int outfd, int errfd) +{ + char buffer[4096] = { 0 }; + + pid_t child = fork(); + if (child != 0) + return child; + /* in child */ + + /* Join the target userns. */ + int nsfd = open(userns_path, O_RDONLY); + if (nsfd < 0) + bail(errfd, "open userns path %s failed: %m", userns_path); + + int err = setns(nsfd, CLONE_NEWUSER); + if (err < 0) + bail(errfd, "setns %s failed: %m", userns_path); + + close(nsfd); + + /* Pipe the requested file contents. */ + int fd = open(path, O_RDONLY); + if (fd < 0) + bail(errfd, "open %s in userns %s failed: %m", path, userns_path); + + int nread, ntotal = 0; + while ((nread = read(fd, buffer, sizeof(buffer))) != 0) { + if (nread < 0) + bail(errfd, "read bytes from %s failed (after %d total bytes read): %m", path, ntotal); + ntotal += nread; + + int nwritten = 0; + while (nwritten < nread) { + int n = write(outfd, buffer, nread - nwritten); + if (n < 0) + bail(errfd, "write %d bytes from %s failed (after %d bytes written): %m", + nread - nwritten, path, nwritten); + nwritten += n; + } + if (nread != nwritten) + bail(errfd, "mismatch for bytes read and written: %d read != %d written", nread, nwritten); + } + + close(fd); + close(outfd); + close(errfd); + + /* We must exit here, otherwise we would return into a forked runc. */ + exit(0); +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_maps_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_maps_linux.go new file mode 100644 index 0000000000..7a8c2b023b --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/userns/userns_maps_linux.go @@ -0,0 +1,186 @@ +//go:build linux + +package userns + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "unsafe" + + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/sirupsen/logrus" +) + +/* +#include +extern int spawn_userns_cat(char *userns_path, char *path, int outfd, int errfd); +*/ +import "C" + +func parseIdmapData(data []byte) (ms []configs.IDMap, err error) { + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + var m configs.IDMap + line := scanner.Text() + if _, err := fmt.Sscanf(line, "%d %d %d", &m.ContainerID, &m.HostID, &m.Size); err != nil { + return nil, fmt.Errorf("parsing id map failed: invalid format in line %q: %w", line, err) + } + ms = append(ms, m) + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("parsing id map failed: %w", err) + } + return ms, nil +} + +// Do something equivalent to nsenter --user= cat , but more +// efficiently. Returns the contents of the requested file from within the user +// namespace. +func spawnUserNamespaceCat(nsPath string, path string) ([]byte, error) { + rdr, wtr, err := os.Pipe() + if err != nil { + return nil, fmt.Errorf("create pipe for userns spawn failed: %w", err) + } + defer rdr.Close() + defer wtr.Close() + + errRdr, errWtr, err := os.Pipe() + if err != nil { + return nil, fmt.Errorf("create error pipe for userns spawn failed: %w", err) + } + defer errRdr.Close() + defer errWtr.Close() + + cNsPath := C.CString(nsPath) + defer C.free(unsafe.Pointer(cNsPath)) + cPath := C.CString(path) + defer C.free(unsafe.Pointer(cPath)) + + childPid := C.spawn_userns_cat(cNsPath, cPath, C.int(wtr.Fd()), C.int(errWtr.Fd())) + + if childPid < 0 { + return nil, fmt.Errorf("failed to spawn fork for userns") + } else if childPid == 0 { + // this should never happen + panic("runc executing inside fork child -- unsafe state!") + } + + // We are in the parent -- close the write end of the pipe before reading. + wtr.Close() + output, err := io.ReadAll(rdr) + rdr.Close() + if err != nil { + return nil, fmt.Errorf("reading from userns spawn failed: %w", err) + } + + // Ditto for the error pipe. + errWtr.Close() + errOutput, err := io.ReadAll(errRdr) + errRdr.Close() + if err != nil { + return nil, fmt.Errorf("reading from userns spawn error pipe failed: %w", err) + } + errOutput = bytes.TrimSpace(errOutput) + + // Clean up the child. + child, err := os.FindProcess(int(childPid)) + if err != nil { + return nil, fmt.Errorf("could not find userns spawn process: %w", err) + } + state, err := child.Wait() + if err != nil { + return nil, fmt.Errorf("failed to wait for userns spawn process: %w", err) + } + if !state.Success() { + errStr := string(errOutput) + if errStr == "" { + errStr = fmt.Sprintf("unknown error (status code %d)", state.ExitCode()) + } + return nil, fmt.Errorf("userns spawn: %s", errStr) + } else if len(errOutput) > 0 { + // We can just ignore weird output in the error pipe if the process + // didn't bail(), but for completeness output for debugging. + logrus.Debugf("userns spawn succeeded but unexpected error message found: %s", string(errOutput)) + } + // The subprocess succeeded, return whatever it wrote to the pipe. + return output, nil +} + +func GetUserNamespaceMappings(nsPath string) (uidMap, gidMap []configs.IDMap, err error) { + var ( + pid int + extra rune + tryFastPath bool + ) + + // nsPath is usually of the form /proc//ns/user, which means that we + // already have a pid that is part of the user namespace and thus we can + // just use the pid to read from /proc//*id_map. + // + // Note that Sscanf doesn't consume the whole input, so we check for any + // trailing data with %c. That way, we can be sure the pattern matched + // /proc/$pid/ns/user _exactly_ iff n === 1. + if n, _ := fmt.Sscanf(nsPath, "/proc/%d/ns/user%c", &pid, &extra); n == 1 { + tryFastPath = pid > 0 + } + + for _, mapType := range []struct { + name string + idMap *[]configs.IDMap + }{ + {"uid_map", &uidMap}, + {"gid_map", &gidMap}, + } { + var mapData []byte + + if tryFastPath { + path := fmt.Sprintf("/proc/%d/%s", pid, mapType.name) + data, err := os.ReadFile(path) + if err != nil { + // Do not error out here -- we need to try the slow path if the + // fast path failed. + logrus.Debugf("failed to use fast path to read %s from userns %s (error: %s), falling back to slow userns-join path", mapType.name, nsPath, err) + } else { + mapData = data + } + } else { + logrus.Debugf("cannot use fast path to read %s from userns %s, falling back to slow userns-join path", mapType.name, nsPath) + } + + if mapData == nil { + // We have to actually join the namespace if we cannot take the + // fast path. The path is resolved with respect to the child + // process, so just use /proc/self. + data, err := spawnUserNamespaceCat(nsPath, "/proc/self/"+mapType.name) + if err != nil { + return nil, nil, err + } + mapData = data + } + idMap, err := parseIdmapData(mapData) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse %s of userns %s: %w", mapType.name, nsPath, err) + } + *mapType.idMap = idMap + } + + return uidMap, gidMap, nil +} + +// IsSameMapping returns whether or not the two id mappings are the same. Note +// that if the order of the mappings is different, or a mapping has been split, +// the mappings will be considered different. +func IsSameMapping(a, b []configs.IDMap) bool { + if len(a) != len(b) { + return false + } + for idx := range a { + if a[idx] != b[idx] { + return false + } + } + return true +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go index 220d0b4393..bf3237a291 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/utils/utils_unix.go @@ -7,6 +7,7 @@ import ( "fmt" "os" "strconv" + _ "unsafe" // for go:linkname "golang.org/x/sys/unix" ) @@ -23,9 +24,11 @@ func EnsureProcHandle(fh *os.File) error { return nil } -// CloseExecFrom applies O_CLOEXEC to all file descriptors currently open for -// the process (except for those below the given fd value). -func CloseExecFrom(minFd int) error { +type fdFunc func(fd int) + +// fdRangeFrom calls the passed fdFunc for each file descriptor that is open in +// the current process. +func fdRangeFrom(minFd int, fn fdFunc) error { fdDir, err := os.Open("/proc/self/fd") if err != nil { return err @@ -50,15 +53,60 @@ func CloseExecFrom(minFd int) error { if fd < minFd { continue } - // Intentionally ignore errors from unix.CloseOnExec -- the cases where - // this might fail are basically file descriptors that have already - // been closed (including and especially the one that was created when - // os.ReadDir did the "opendir" syscall). - unix.CloseOnExec(fd) + // Ignore the file descriptor we used for readdir, as it will be closed + // when we return. + if uintptr(fd) == fdDir.Fd() { + continue + } + // Run the closure. + fn(fd) } return nil } +// CloseExecFrom sets the O_CLOEXEC flag on all file descriptors greater or +// equal to minFd in the current process. +func CloseExecFrom(minFd int) error { + return fdRangeFrom(minFd, unix.CloseOnExec) +} + +//go:linkname runtime_IsPollDescriptor internal/poll.IsPollDescriptor + +// In order to make sure we do not close the internal epoll descriptors the Go +// runtime uses, we need to ensure that we skip descriptors that match +// "internal/poll".IsPollDescriptor. Yes, this is a Go runtime internal thing, +// unfortunately there's no other way to be sure we're only keeping the file +// descriptors the Go runtime needs. Hopefully nothing blows up doing this... +func runtime_IsPollDescriptor(fd uintptr) bool //nolint:revive + +// UnsafeCloseFrom closes all file descriptors greater or equal to minFd in the +// current process, except for those critical to Go's runtime (such as the +// netpoll management descriptors). +// +// NOTE: That this function is incredibly dangerous to use in most Go code, as +// closing file descriptors from underneath *os.File handles can lead to very +// bad behaviour (the closed file descriptor can be re-used and then any +// *os.File operations would apply to the wrong file). This function is only +// intended to be called from the last stage of runc init. +func UnsafeCloseFrom(minFd int) error { + // We must not close some file descriptors. + return fdRangeFrom(minFd, func(fd int) { + if runtime_IsPollDescriptor(uintptr(fd)) { + // These are the Go runtimes internal netpoll file descriptors. + // These file descriptors are operated on deep in the Go scheduler, + // and closing those files from underneath Go can result in panics. + // There is no issue with keeping them because they are not + // executable and are not useful to an attacker anyway. Also we + // don't have any choice. + return + } + // There's nothing we can do about errors from close(2), and the + // only likely error to be seen is EBADF which indicates the fd was + // already closed (in which case, we got what we wanted). + _ = unix.Close(fd) + }) +} + // NewSockPair returns a new unix socket pair func NewSockPair(name string) (parent *os.File, child *os.File, err error) { fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM|unix.SOCK_CLOEXEC, 0) diff --git a/vendor/github.com/zhmcclient/golang-zhmcclient/LICENSE b/vendor/github.com/zhmcclient/golang-zhmcclient/LICENSE new file mode 100644 index 0000000000..68c771a099 --- /dev/null +++ b/vendor/github.com/zhmcclient/golang-zhmcclient/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/adapter.go b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/adapter.go new file mode 100644 index 0000000000..d97ba3a17d --- /dev/null +++ b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/adapter.go @@ -0,0 +1,307 @@ +/* + * ============================================================================= + * IBM Confidential + * © Copyright IBM Corp. 2020, 2021 + * + * The source code for this program is not published or otherwise divested of + * its trade secrets, irrespective of what has been deposited with the + * U.S. Copyright Office. + * ============================================================================= + */ + +package zhmcclient + +import ( + "encoding/json" + "fmt" + "net/http" + "path" + + "go.uber.org/zap" +) + +// AdapterAPI defines an interface for issuing Adapter requests to ZHMC +// +//go:generate counterfeiter -o fakes/adapter.go --fake-name AdapterAPI . AdapterAPI +type AdapterAPI interface { + ListAdapters(cpcURI string, query map[string]string) ([]Adapter, int, *HmcError) + GetAdapterProperties(adapterURI string) (*AdapterProperties, int, *HmcError) + GetNetworkAdapterPortProperties(networkAdapterPortURI string) (*NetworkAdapterPort, int, *HmcError) + GetStorageAdapterPortProperties(storageAdapterPortURI string) (*StorageAdapterPort, int, *HmcError) + CreateHipersocket(cpcURI string, adaptor *HipersocketPayload) (string, int, *HmcError) + DeleteHipersocket(adapterURI string) (int, *HmcError) +} + +type AdapterManager struct { + client ClientAPI +} + +func NewAdapterManager(client ClientAPI) *AdapterManager { + return &AdapterManager{ + client: client, + } +} + +/** +* GET /api/cpcs/{cpc-id}/adapters +* @cpcURI the ID of the CPC +* @query the fields can be queried include: +* name, +* adapter-id, +* adapter-family, +* type, +* status +* @return adapter array +* Return: 200 and Adapters array +* or: 400, 404, 409 + */ +func (m *AdapterManager) ListAdapters(cpcURI string, query map[string]string) ([]Adapter, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, cpcURI, "/adapters") + requestUrl = BuildUrlFromQuery(requestUrl, query) + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodGet)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodGet, requestUrl, nil, "") + if err != nil { + logger.Error("error on listing adapters", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return nil, status, err + } + logger.Info(fmt.Sprintf("Response: listing adapters, request url: %v, method: %v, status: %v", requestUrl, http.MethodGet, status)) + + if status == http.StatusOK { + adapters := &AdaptersArray{} + err := json.Unmarshal(responseBody, adapters) + if err != nil { + logger.Error("error on unmarshalling adapters", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return nil, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + return adapters.ADAPTERS, status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error listing adapters", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + + return nil, status, errorResponseBody +} + +/** +* GET /api/adapters/{adapter-id} +* GET /api/adapters/{adapter-id}/network-ports/{network-port-id} +* @adapterURI the adapter ID, network-port-id for which properties need to be fetched +* @return adapter properties +* Return: 200 and Adapters properties +* or: 400, 404, 409 + */ +func (m *AdapterManager) GetAdapterProperties(adapterURI string) (*AdapterProperties, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, adapterURI) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodGet)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodGet, requestUrl, nil, "") + if err != nil { + logger.Error("error on getting adapter properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return nil, status, err + } + + if status == http.StatusOK { + adapterProps := &AdapterProperties{} + err := json.Unmarshal(responseBody, adapterProps) + if err != nil { + logger.Error("error on unmarshalling adapters", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return nil, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + logger.Info(fmt.Sprintf("Response: request url: %v, method: %v, status: %v, adapters: %v", requestUrl, http.MethodGet, status, adapterProps)) + return adapterProps, status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error getting adapter properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return nil, status, errorResponseBody +} + +/** +* GET /api/adapters/{adapter-id}/storage-ports/{storage-port-id} +* @adapterURI the adapter ID, storage-port-id for which properties need to be fetched +* @return storage port properties +* Return: 200 and StorageAdapterPort +* or: 400, 404, 409 + */ +func (m *AdapterManager) GetStorageAdapterPortProperties(storageAdapterPortURI string) (*StorageAdapterPort, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, storageAdapterPortURI) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodGet)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodGet, requestUrl, nil, "") + if err != nil { + logger.Error("error on getting storage port properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return nil, status, err + } + + if status == http.StatusOK { + portProps := &StorageAdapterPort{} + err := json.Unmarshal(responseBody, portProps) + if err != nil { + logger.Error("error on unmarshalling storage port", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return nil, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + logger.Info(fmt.Sprintf("Response: request url: %v, method: %v, status: %v, storage port properties: %v", requestUrl, http.MethodGet, status, portProps)) + return portProps, status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error getting storage port properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return nil, status, errorResponseBody +} + +/** +* GET /api/adapters/{adapter-id}/network-ports/{network-port-id} +* @adapterURI the adapter ID, network-port-id for which properties need to be fetched +* @return network port properties +* Return: 200 and NetworkAdapterPort +* or: 400, 404, 409 + */ +func (m *AdapterManager) GetNetworkAdapterPortProperties(networkAdapterPortURI string) (*NetworkAdapterPort, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, networkAdapterPortURI) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodGet)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodGet, requestUrl, nil, "") + if err != nil { + logger.Error("error on getting network port properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return nil, status, err + } + + if status == http.StatusOK { + portProps := &NetworkAdapterPort{} + err := json.Unmarshal(responseBody, portProps) + if err != nil { + logger.Error("error on unmarshalling network port", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return nil, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + logger.Info(fmt.Sprintf("Response: request url: %v, method: %v, status: %v, network port properties: %v", requestUrl, http.MethodGet, status, portProps)) + return portProps, status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error getting network port properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return nil, status, errorResponseBody +} + +/** +* POST /api/cpcs/{cpc-id}/adapters +* @cpcURI the ID of the CPC +* @adaptor the payload includes properties when create Hipersocket +* Return: 201 and body with "object-uri" +* or: 400, 403, 404, 409, 503 + */ +func (m *AdapterManager) CreateHipersocket(cpcURI string, adaptor *HipersocketPayload) (string, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, cpcURI, "/adapters") + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v, Parameter: %v", requestUrl, http.MethodPost, adaptor)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodPost, requestUrl, adaptor, "") + if err != nil { + logger.Error("error creating hipersocket", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return "", status, err + } + + if status == http.StatusCreated { + uriObj := HipersocketCreateResponse{} + err := json.Unmarshal(responseBody, &uriObj) + if err != nil { + logger.Error("error on unmarshalling adapters", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return "", status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + logger.Info(fmt.Sprintf("Response: hiper socket created, request url: %v, method: %v, status: %v, hipersocket uri: %v", requestUrl, http.MethodPost, status, uriObj.URI)) + + return uriObj.URI, status, nil + } + + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error creating hipersocket", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return "", status, errorResponseBody +} + +/** +* DELETE /api/adapters/{adapter-id} +* @adapterURI the adapter ID to be deleted +* Return: 204 +* or: 400, 403, 404, 409, 503 + */ +func (m *AdapterManager) DeleteHipersocket(adapterURI string) (int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, adapterURI) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodDelete)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodDelete, requestUrl, nil, "") + if err != nil { + logger.Error("error deleting hipersocket", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodDelete), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return status, err + } + + if status == http.StatusNoContent { + logger.Info(fmt.Sprintf("Response: hipersocket deleted, request url: %v, method: %v, status: %v", requestUrl, http.MethodDelete, status)) + return status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error deleting hipersocket", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodDelete), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return status, errorResponseBody +} diff --git a/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/client.go b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/client.go new file mode 100644 index 0000000000..b426ac467c --- /dev/null +++ b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/client.go @@ -0,0 +1,633 @@ +// Copyright 2021-2023 IBM Corp. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zhmcclient + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "path" + "strings" + + "net/http" + "net/http/httputil" + + "net/url" + + "go.uber.org/zap" +) + +func NewZapLogger() Logger { + zapLogger, _ := zap.NewProduction() + return zapLogger +} + +var logger = NewZapLogger() + +/* + ClientAPI defines an interface for issuing client requests to ZHMC +*/ + +//go:generate counterfeiter -o fakes/client.go --fake-name ClientAPI . ClientAPI +type ClientAPI interface { + CloneEndpointURL() *url.URL + TraceOn(outputStream io.Writer) + TraceOff() + SetSkipCertVerify(isSkipCert bool) + Logon() *HmcError + Logoff() *HmcError + GetMetricsContext() *MetricsContextDef + LogonConsole() (string, int, *HmcError) + LogoffConsole(sessionID string) *HmcError + IsLogon(verify bool) bool + ExecuteRequest(requestType string, url *url.URL, requestData interface{}, sessionID string) (responseStatusCode int, responseBodyStream []byte, err *HmcError) + UploadRequest(requestType string, url *url.URL, requestData []byte) (responseStatusCode int, responseBodyStream []byte, err *HmcError) +} + +// HTTP Client interface required for unit tests +type HTTPClient interface { + Do(request *http.Request) (*http.Response, error) +} + +const ( + SESSION_HEADER_NAME = "X-API-Session" + minHMCMetricsSampleInterval = 15 //seconds + metricsContextCreationURI = "/api/services/metrics/context" +) + +type Options struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Trace bool `json:"trace,omitempty"` + CaCert string `json:"ca-cert,omitempty"` + SkipCert bool `json:"skip-cert,omitempty"` +} + +type LogonData struct { + Userid string `json:"userid"` + Password string `json:"password"` +} + +type ChangePasswordData struct { + Userid string `json:"userid"` + Password string `json:"password"` + NewPassword string `json:"new-password"` +} + +// TODO, Use cache and use JobTopic, ObjectTopic to update cache +type Session struct { + MajorVersion int `json:"api-major-version,omitempty"` + MinorVersion int `json:"api-minor-version,omitempty"` + SessionID string `json:"api-session,omitempty"` + JobTopic string `json:"job-notification-topic,omitempty"` + ObjectTopic string `json:"notification-topic,omitempty"` + Expires int `json:"password-expires,omitempty"` + Credential string `json:"session-credential,omitempty"` +} + +type Client struct { + endpointURL *url.URL + httpClient *http.Client + logondata *LogonData + session *Session + metricsContext *MetricsContextDef + isSkipCert bool + isTraceEnabled bool + traceOutput io.Writer +} + +func newClientStruct(endpoint string, opts *Options) (*Client, *HmcError) { + tslConfig, err := SetCertificate(opts, &tls.Config{}) + if err != nil { + return nil, err + } + tslConfig.InsecureSkipVerify = opts.SkipCert + transport := &http.Transport{ + Dial: (&net.Dialer{ + Timeout: DEFAULT_DIAL_TIMEOUT, + }).Dial, + TLSClientConfig: tslConfig, + TLSHandshakeTimeout: DEFAULT_HANDSHAKE_TIMEOUT, + } + + httpclient := &http.Client{ + Timeout: DEFAULT_CONNECT_TIMEOUT, + Transport: transport, + } + + endpointurl, err := GetEndpointURLFromString(endpoint) + if err != nil { + return nil, err + } + + client := &Client{ + endpointURL: endpointurl, + httpClient: httpclient, + logondata: &LogonData{ + Userid: opts.Username, + Password: opts.Password, + }, + } + return client, nil +} + +func NewClient(endpoint string, opts *Options, l Logger) (ClientAPI, *HmcError) { + + if l != nil { + logger = l + } + + client, err := newClientStruct(endpoint, opts) + if err != nil { + return nil, err + } + + err = client.Logon() + if err != nil { + return nil, err + } + + if opts.Trace { + client.TraceOn(nil) + } else { + client.TraceOff() + } + + client.SetSkipCertVerify(opts.SkipCert) + + return client, nil +} + +func GetEndpointURLFromString(endpoint string) (*url.URL, *HmcError) { + logger.Info(fmt.Sprintf("endpoint: %v", endpoint)) + + if !strings.HasPrefix(strings.ToLower(endpoint), "https") { + return nil, getHmcErrorFromMsg(ERR_CODE_HMC_INVALID_URL, ERR_MSG_INSECURE_URL) + } + + url, err := url.Parse(endpoint) + if err != nil { + return nil, getHmcErrorFromErr(ERR_CODE_HMC_INVALID_URL, err) + } + return url, nil +} + +func IsExpectedHttpStatus(status int) bool { + for _, httpStatus := range KNOWN_SUCCESS_STATUS { + if httpStatus == status { + return true + } + } + return false +} + +func NeedLogon(status, reason int) bool { + if status == http.StatusUnauthorized { + return true + } + + if status == http.StatusForbidden { + if reason == 4 || reason == 5 { + return true + } + } + return false +} + +/** +* make a copy of the URL as it may be changed. + */ +func (c *Client) CloneEndpointURL() *url.URL { + var url *url.URL + if c.endpointURL == nil { + return url + } + url, _ = url.Parse(c.endpointURL.String()) + return url +} + +func (c *Client) TraceOn(outputStream io.Writer) { + if outputStream == nil { + outputStream = os.Stdout + } + c.traceOutput = outputStream + c.isTraceEnabled = true +} + +func (c *Client) TraceOff() { + c.traceOutput = os.Stdout + c.isTraceEnabled = false +} + +// TODO, check cert when request/logon +func (c *Client) SetSkipCertVerify(isSkipCert bool) { + c.isSkipCert = isSkipCert +} + +func (c *Client) clearSession() { + c.session = nil +} + +func (c *Client) Logon() *HmcError { + c.clearSession() + url := c.CloneEndpointURL() + if url == nil { + return &HmcError{Reason: int(ERR_CODE_HMC_INVALID_URL), Message: ERR_MSG_EMPTY_JOB_URI} + } + url.Path = path.Join(url.Path, "/api/sessions") + + status, responseBody, hmcErr := c.executeMethod(http.MethodPost, url.String(), c.logondata, "") + + if hmcErr != nil { + return hmcErr + } + + if status == http.StatusOK || status == http.StatusCreated { + session := &Session{} + err := json.Unmarshal(responseBody, session) + if err != nil { + return getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + c.session = session + metricsGroupList := []string{ + "logical-partition-usage", + } + if err = c.createMetricsContext(metricsGroupList); err != nil { + return getHmcErrorFromErr(ERR_CODE_HMC_CREATE_METRICS_CTX_FAIL, err) + } + return nil + } + + return GenerateErrorFromResponse(responseBody) +} + +// login and change password, then end session +func ChangePassword(endpoint string, opts *Options, newPassword string) *HmcError { + c, err := newClientStruct(endpoint, opts) + if err != nil { + return err + } + + c.clearSession() + url := c.CloneEndpointURL() + if url == nil { + return &HmcError{Reason: int(ERR_CODE_HMC_INVALID_URL), Message: ERR_MSG_EMPTY_JOB_URI} + } + url.Path = path.Join(url.Path, "/api/sessions") + + changePasswordData := ChangePasswordData{ + Userid: c.logondata.Userid, + Password: c.logondata.Password, + NewPassword: newPassword, + } + + status, responseBody, hmcErr := c.executeMethod(http.MethodPost, url.String(), changePasswordData, "") + + defer c.Logoff() + + if hmcErr != nil { + return hmcErr + } + + if status == http.StatusOK || status == http.StatusCreated { + session := &Session{} + err := json.Unmarshal(responseBody, session) + if err != nil { + return getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + c.session = session + return nil + } + + return GenerateErrorFromResponse(responseBody) +} + +func (c *Client) LogonConsole() (sessionID string, status int, err *HmcError) { + url := c.CloneEndpointURL() + url.Path = path.Join(url.Path, "/api/sessions") + + status, responseBody, err := c.executeMethod(http.MethodPost, url.String(), c.logondata, "") + if err != nil { + return "", status, err + } + if status == http.StatusOK || status == http.StatusCreated { + session := &Session{} + err := json.Unmarshal(responseBody, session) + if err != nil { + return "", status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + return session.SessionID, status, nil + } + + return "", status, GenerateErrorFromResponse(responseBody) +} + +func (c *Client) LogoffConsole(sessionID string) *HmcError { + url := c.CloneEndpointURL() + url.Path = path.Join(url.Path, "/api/sessions/this-session") + + status, responseBody, err := c.executeMethod(http.MethodDelete, url.String(), nil, sessionID) + if err != nil { + return err + } + if status == http.StatusNoContent { + return nil + } + return GenerateErrorFromResponse(responseBody) +} + +func (c *Client) Logoff() *HmcError { + url := c.CloneEndpointURL() + url.Path = path.Join(url.Path, "/api/sessions/this-session") + + status, responseBody, err := c.executeMethod(http.MethodDelete, url.String(), nil, "") + if err != nil { + return err + } + if status == http.StatusNoContent { + c.clearSession() + return nil + } + return GenerateErrorFromResponse(responseBody) +} + +func (c *Client) IsLogon(verify bool) bool { + if verify { + url := c.CloneEndpointURL() + if url == nil { + return false + } + url.Path = path.Join(url.Path, "/api/console") + + status, _, err := c.executeMethod(http.MethodGet, url.String(), nil, "") + if err != nil { + return false + } else if status == http.StatusOK || status == http.StatusBadRequest { + return true + } + return false + } + + if c.session != nil && c.session.SessionID != "" { + return true + } + return false +} + +func (c *Client) setUserAgent(req *http.Request) { + req.Header.Set("User-Agent", libraryUserAgent) +} + +func (c *Client) setRequestHeaders(req *http.Request, bodyType, sessionID string) { + c.setUserAgent(req) + req.Header.Add("Content-Type", bodyType) + req.Header.Add("Accept", "*/*") + if sessionID != "" { + req.Header.Add(SESSION_HEADER_NAME, sessionID) + } else if c.session != nil && c.session.SessionID != "" { + req.Header.Add(SESSION_HEADER_NAME, c.session.SessionID) + } +} + +func SetCertificate(opts *Options, tlsConfig *tls.Config) (*tls.Config, *HmcError) { + if !opts.SkipCert { + //Read root CA bundle in PEM format + cert, err := ioutil.ReadFile(opts.CaCert) + if err != nil { + return nil, getHmcErrorFromErr(ERR_CODE_HMC_READ_RESPONSE_FAIL, err) + } + if err != nil { + return nil, getHmcErrorFromErr(ERR_CODE_HMC_BAD_REQUEST, err) + } + /* + Read certs for PEM CA bundle and append rootCA cert pool of the TLS config for validation + */ + certPool := x509.NewCertPool() + certPool.AppendCertsFromPEM(cert) + tlsConfig.RootCAs = certPool + } + return tlsConfig, nil +} + +func (c *Client) UploadRequest(requestType string, url *url.URL, requestData []byte) (responseStatusCode int, responseBodyStream []byte, err *HmcError) { + retries := DEFAULT_CONNECT_RETRIES + responseStatusCode, responseBodyStream, err = c.executeUpload(requestType, url.String(), requestData) + if NeedLogon(responseStatusCode, GenerateErrorFromResponse(responseBodyStream).Reason) { + c.Logon() + c.executeUpload(requestType, url.String(), requestData) + } + if IsExpectedHttpStatus(responseStatusCode) { + return + } + + for retries > 0 { + if IsExpectedHttpStatus(responseStatusCode) { + break + } else { + logger.Info(fmt.Sprintf("Retry upload... %d", retries)) + responseStatusCode, responseBodyStream, err = c.executeUpload(requestType, url.String(), requestData) + retries -= 1 + } + } + return responseStatusCode, responseBodyStream, err +} + +func (c *Client) ExecuteRequest(requestType string, url *url.URL, requestData interface{}, sessionID string) (responseStatusCode int, responseBodyStream []byte, err *HmcError) { + var ( + retries int + ) + + if requestType == http.MethodGet { + retries = DEFAULT_READ_RETRIES + + } else { + retries = DEFAULT_CONNECT_RETRIES + } + + responseStatusCode, responseBodyStream, err = c.executeMethod(requestType, url.String(), requestData, sessionID) + if NeedLogon(responseStatusCode, GenerateErrorFromResponse(responseBodyStream).Reason) { + c.Logon() + responseStatusCode, responseBodyStream, err = c.executeMethod(requestType, url.String(), requestData, sessionID) + } + if IsExpectedHttpStatus(responseStatusCode) { // Known error, don't retry + return responseStatusCode, responseBodyStream, err + } + + for retries > 0 { + responseStatusCode, responseBodyStream, err = c.executeMethod(requestType, url.String(), requestData, sessionID) + if IsExpectedHttpStatus(responseStatusCode) || err == nil { // 2. Known error, don't retry + break + } else { // 3. Retry + retries -= 1 + } + } + return responseStatusCode, responseBodyStream, err +} + +func (c *Client) executeMethod(requestType string, urlStr string, requestData interface{}, sessionID string) (responseStatusCode int, responseBodyStream []byte, hmcErr *HmcError) { + var requestBody []byte + var err error + + if requestData != nil { + requestBody, err = json.Marshal(requestData) + if err != nil { + return -1, nil, getHmcErrorFromErr(ERR_CODE_HMC_MARSHAL_FAIL, err) + } + } + + request, err := http.NewRequest(requestType, urlStr, bytes.NewBuffer(requestBody)) + if err != nil { + return -1, nil, getHmcErrorFromErr(ERR_CODE_HMC_BAD_REQUEST, err) + } + + c.setRequestHeaders(request, APPLICATION_BODY_JSON, sessionID) + + response, err := c.httpClient.Do(request) + + if err != nil { + return -1, nil, getHmcErrorFromErr(ERR_CODE_HMC_EXECUTE_FAIL, err) + } + if response == nil { + return -1, nil, getHmcErrorFromMsg(ERR_CODE_HMC_EMPTY_RESPONSE, ERR_MSG_EMPTY_RESPONSE) + } + + defer response.Body.Close() + + responseBodyStream, err = ioutil.ReadAll(response.Body) + if err != nil { + return -1, nil, getHmcErrorFromErr(ERR_CODE_HMC_READ_RESPONSE_FAIL, err) + } + + if c.isTraceEnabled { + err = c.traceHTTP(request, response) + if err != nil { + return response.StatusCode, nil, getHmcErrorFromErr(ERR_CODE_HMC_TRACE_REQUEST_FAIL, err) + } + } + + return response.StatusCode, responseBodyStream, nil +} + +func (c *Client) executeUpload(requestType string, urlStr string, requestBody []byte) (responseStatusCode int, responseBodyStream []byte, hmcErr *HmcError) { + + request, err := http.NewRequest(requestType, urlStr, bytes.NewReader(requestBody)) + if err != nil { + return -1, nil, getHmcErrorFromErr(ERR_CODE_HMC_BAD_REQUEST, err) + } + + c.setRequestHeaders(request, APPLICATION_BODY_OCTET_STREAM, "") + + response, err := c.httpClient.Do(request) + + if response == nil { + return -1, nil, getHmcErrorFromMsg(ERR_CODE_HMC_EMPTY_RESPONSE, ERR_MSG_EMPTY_RESPONSE) + } + + if err != nil { + return -1, nil, getHmcErrorFromErr(ERR_CODE_HMC_EXECUTE_FAIL, err) + } + + defer response.Body.Close() + + responseBodyStream, err = ioutil.ReadAll(response.Body) + if err != nil { + return -1, nil, getHmcErrorFromErr(ERR_CODE_HMC_READ_RESPONSE_FAIL, err) + } + + if c.isTraceEnabled { + err = c.traceHTTP(request, response) + if err != nil { + return response.StatusCode, nil, getHmcErrorFromErr(ERR_CODE_HMC_TRACE_REQUEST_FAIL, err) + } + } + + return response.StatusCode, responseBodyStream, nil +} + +func (c *Client) traceHTTP(req *http.Request, resp *http.Response) error { + _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") + if err != nil { + return err + } + + reqTrace, err := httputil.DumpRequestOut(req, false) + if err != nil { + return err + } + + _, err = fmt.Fprint(c.traceOutput, string(reqTrace)) + if err != nil { + return err + } + + var respTrace []byte + if resp.StatusCode != http.StatusOK && + resp.StatusCode != http.StatusPartialContent && + resp.StatusCode != http.StatusNoContent { + respTrace, err = httputil.DumpResponse(resp, true) + if err != nil { + return err + } + } else { + respTrace, err = httputil.DumpResponse(resp, false) + if err != nil { + return err + } + } + + _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) + if err != nil { + return err + } + + _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------") + if err != nil { + return err + } + + return nil +} + +// createMetricsContext creates a "Metrics Context" resource in the HMC. +// Metrics context is defintion of a set of metrics that will be collected. +func (c *Client) createMetricsContext(metricsGroups []string) error { + + reqBody := make(map[string]interface{}) + reqBody["anticipated-frequency-seconds"] = minHMCMetricsSampleInterval + reqBody["metric-groups"] = metricsGroups + + requestUrl := c.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, metricsContextCreationURI) + _, mcResp, err := c.ExecuteRequest(http.MethodPost, requestUrl, reqBody, "") + + if err != nil { + return err + } + + c.metricsContext, _ = newMetricsContext(mcResp) + logger.Info("Create 'MetricsContext'") + + return nil +} + +func (c *Client) GetMetricsContext() *MetricsContextDef { + return c.metricsContext +} diff --git a/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/constants.go b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/constants.go new file mode 100644 index 0000000000..45d4bc00cd --- /dev/null +++ b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/constants.go @@ -0,0 +1,60 @@ +// Copyright 2021-2023 IBM Corp. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zhmcclient + +import ( + "net/http" + "runtime" + "time" +) + +// Global constants. +const ( + libraryName = "zhmcclient" + libraryVersion = "v0.1" + libraryUserAgentPrefix = "ZHMC (" + runtime.GOOS + "; " + runtime.GOARCH + ") " + libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion + + HMC_DEFAULT_PORT = "6794" + + DEFAULT_READ_RETRIES = 0 + DEFAULT_CONNECT_RETRIES = 3 + + DEFAULT_DIAL_TIMEOUT = 10 * time.Second + DEFAULT_HANDSHAKE_TIMEOUT = 10 * time.Second + DEFAULT_CONNECT_TIMEOUT = 90 * time.Second + DEFAULT_READ_TIMEOUT = 3600 * time.Second + DEFAULT_MAX_REDIRECTS = 30 * time.Second + DEFAULT_OPERATION_TIMEOUT = 3600 * time.Second + DEFAULT_STATUS_TIMEOUT = 900 * time.Second + + APPLICATION_BODY_JSON = "application/json" + APPLICATION_BODY_OCTET_STREAM = "application/octet-stream" +) + +// List of success status. +var KNOWN_SUCCESS_STATUS = []int{ + http.StatusOK, // 200 + http.StatusCreated, // 201 + http.StatusAccepted, // 202 + http.StatusNoContent, // 204 + http.StatusPartialContent, // 206 + http.StatusBadRequest, // 400 + //http.StatusForbidden, // 403, 403 susally caused by expired session header, we need handle it separately + http.StatusNotFound, // 404 + http.StatusConflict, // 409 + http.StatusInternalServerError, // 500 + http.StatusServiceUnavailable, // 503 +} diff --git a/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/cpc.go b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/cpc.go new file mode 100644 index 0000000000..0811aba025 --- /dev/null +++ b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/cpc.go @@ -0,0 +1,130 @@ +// Copyright 2021-2023 IBM Corp. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zhmcclient + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "path" + + "go.uber.org/zap" +) + +// CpcAPI defines an interface for issuing CPC requests to ZHMC +// +//go:generate counterfeiter -o fakes/cpc.go --fake-name CpcAPI . CpcAPI +type CpcAPI interface { + ListCPCs(query map[string]string) ([]CPC, int, *HmcError) + GetCPCProperties(cpcURI string) (*CPCProperties, int, *HmcError) +} + +type CpcManager struct { + client ClientAPI +} + +func NewCpcManager(client ClientAPI) *CpcManager { + return &CpcManager{ + client: client, + } +} + +/** +* GET /api/cpcs +* @query is a key, value pairs, currently, supports 'name=$name_reg_expression' +* Return: 200 and CPCs array +* or: 400 + */ +func (m *CpcManager) ListCPCs(query map[string]string) ([]CPC, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, "/api/cpcs") + requestUrl = BuildUrlFromQuery(requestUrl, query) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodGet)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodGet, requestUrl, nil, "") + if err != nil { + logger.Error("error on listing cpc's", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return nil, status, err + } + logger.Info(fmt.Sprintf("Response: request url: %v, method: %v, status: %v", requestUrl, http.MethodGet, status)) + if status == http.StatusOK { + cpcs := &CpcsArray{} + err := json.Unmarshal(responseBody, &cpcs) + if err != nil { + logger.Error("error on unmarshalling adapters", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return nil, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + return cpcs.CPCS, status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error on listing cpc's", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(errors.New(errorResponseBody.Message))) + + return nil, status, errorResponseBody +} + +/** +* GET /api/cpcs/{cpc-id} +* @return cpc properties +* Return: 200 and Cpcs properties +* or: 400, 404, 409 + */ +func (m *CpcManager) GetCPCProperties(cpcURI string) (*CPCProperties, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, cpcURI) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodGet)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodGet, requestUrl, nil, "") + if err != nil { + logger.Error("error on getting cpc properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return nil, status, err + } + + if status == http.StatusOK { + cpcProps := &CPCProperties{} + err := json.Unmarshal(responseBody, cpcProps) + if err != nil { + logger.Error("error on unmarshalling cpcs", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return nil, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + logger.Info(fmt.Sprintf("Response: request url: %v, method: %v, status: %v, cpcs: %v", requestUrl, http.MethodGet, status, cpcProps)) + return cpcProps, status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error getting cpc properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return nil, status, errorResponseBody +} diff --git a/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/data.pem b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/data.pem new file mode 100644 index 0000000000..c970856ee1 --- /dev/null +++ b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/data.pem @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID5TCCAs2gAwIBAgIBFDANBgkqhkiG9w0BAQsFADBiMQswCQYDVQQGEwJVUzE0 +MDIGA1UEChMrSW50ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNoaW5lcyBDb3Jwb3Jh +dGlvbjEdMBsGA1UEAxMUSUJNIEludGVybmFsIFJvb3QgQ0EwHhcNMTYwMjI0MDUw +MDAwWhcNMzUwMTAzMDQ1OTU5WjBiMQswCQYDVQQGEwJVUzE0MDIGA1UEChMrSW50 +ZXJuYXRpb25hbCBCdXNpbmVzcyBNYWNoaW5lcyBDb3Jwb3JhdGlvbjEdMBsGA1UE +AxMUSUJNIEludGVybmFsIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDUKGuk9Tmri43R3SauS7gY9rQ9DXvRwklnbW+3Ts8/Meb4MPPxezdE +cqVJtHVc3kinDpzVMeKJXlB8CABBpxMBSLApmIQywEKoVd0H0w62Yc3rYuhv03iY +y6OozBV0BL6tzZE0UbvtLGuAQXMZ7ehzxqIta85JjfFN86AO2u7xrNF0FYyGH+E0 +Rn6yNhb25VrqxE0OYbSMIGoWdvS11K4SgVDqrJ9OqIk8NHrIJ8Ed24P/YPMeAp3j +U409Gev1zGcuLdRr09WckQ145FZVDbPq42gcl7qYICPhZ4/eDUUjFgxpipfMGkMb +1X+Y3kFDgb4BO8Xrdda2VQo1iDZs8A8bAgMBAAGjgaUwgaIwPwYJYIZIAYb4QgEN +BDIWMEdlbmVyYXRlZCBieSB0aGUgU2VjdXJpdHkgU2VydmVyIGZvciB6L09TIChS +QUNGKTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU ++d4Y5Z4wE2lRp/15hUiMfA5v2OMwHwYDVR0jBBgwFoAU+d4Y5Z4wE2lRp/15hUiM +fA5v2OMwDQYJKoZIhvcNAQELBQADggEBAH87Ms8yFyAb9nXesaKjTHksLi1VKe2i +zESWozYFXnRtOgOW7/0xXcfK+7PW6xwcOqvTk61fqTGxj+iRyZf2e3FNtIB+T/Lg +3SZF9sztPM0jEUELWycC8l6WPTvzQjZZBCsF+cWbU1nxvRNQluzCsTDUEIfThJIF +cLu0WkoQclUrC3d2tM8jclLPssb6/OV8GaJ+4mx4ri7HbGaUAOtA/TXKR6AuhgkR +NPKYhpPU0q/PRlGXdwJP8zXb8+CXMMTnI5Upur7Tc5T3I/x1Gqfz7n1sTRZfsuiQ +J5uua4hz4te3oV2tm7LWcNItHD43zttBTTx/m5icg71JE2gcr2oincw= +-----END CERTIFICATE----- diff --git a/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/data.txt b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/data.txt new file mode 100644 index 0000000000..0aa6fb5467 --- /dev/null +++ b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/data.txt @@ -0,0 +1 @@ +test data \ No newline at end of file diff --git a/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/imageFileName b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/imageFileName new file mode 100644 index 0000000000..0aa6fb5467 --- /dev/null +++ b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/imageFileName @@ -0,0 +1 @@ +test data \ No newline at end of file diff --git a/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/job.go b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/job.go new file mode 100644 index 0000000000..547ef87352 --- /dev/null +++ b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/job.go @@ -0,0 +1,147 @@ +// Copyright 2016-2021 IBM Corp. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zhmcclient + +import ( + "encoding/json" + "fmt" + "net/http" + "path" + + "go.uber.org/zap" +) + +// JobAPI defines an interface for issuing Job requests to ZHMC +// +//go:generate counterfeiter -o fakes/job.go --fake-name JobAPI . JobAPI +type JobAPI interface { + QueryJob(jobURI string) (*Job, int, *HmcError) + DeleteJob(jobURI string) (int, *HmcError) + CancelJob(jobURI string) (int, *HmcError) +} + +type JobManager struct { + client ClientAPI +} + +func NewJobManager(client ClientAPI) *JobManager { + return &JobManager{ + client: client, + } +} + +/** +* GET /api/jobs/{job-id} +* Return: 200 and job status +* or: 400, 404 + */ +func (m *JobManager) QueryJob(jobURI string) (*Job, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, jobURI) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodGet)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodGet, requestUrl, nil, "") + if err != nil { + logger.Error("error on get on job uri", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return nil, status, err + } + + if status == http.StatusOK { + myjob := Job{} + err := json.Unmarshal(responseBody, &myjob) + if err != nil { + return nil, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + logger.Info(fmt.Sprintf("Response: request url: %v, method: %v, status: %v, job: %v", requestUrl, http.MethodGet, status, &myjob)) + return &myjob, status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error on get on job uri", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return nil, status, errorResponseBody +} + +/** +* DELETE /api/jobs/{job-id} +* Return: 204 +* or: 400, 404, 409 + */ +func (m *JobManager) DeleteJob(jobURI string) (int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, jobURI) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodDelete)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodDelete, requestUrl, nil, "") + if err != nil { + logger.Error("error on delete job", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodDelete), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return status, err + } + + if status == http.StatusNoContent { + logger.Info(fmt.Sprintf("Response: job deleted, request url: %v, method: %v, status: %v", requestUrl, http.MethodDelete, status)) + return status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error on delete job", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodDelete), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return status, errorResponseBody +} + +/** +* POST /api/jobs/{job-id}/operations/cancel +* Return: 204 +* or: 400, 404, 409 + */ +func (m *JobManager) CancelJob(jobURI string) (int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, jobURI, "operations/cancel") + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodPost)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodPost, requestUrl, nil, "") + if err != nil { + logger.Error("error on cancel job", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return status, err + } + + if status == http.StatusNoContent { + logger.Info(fmt.Sprintf("Response: job cancelled, request url: %v, method: %v, status: %v", requestUrl, http.MethodPost, status)) + return status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error on cancel job", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return status, errorResponseBody +} diff --git a/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/logger.go b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/logger.go new file mode 100644 index 0000000000..483c18f90f --- /dev/null +++ b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/logger.go @@ -0,0 +1,15 @@ +package zhmcclient + +import ( + "go.uber.org/zap/zapcore" +) + +type Logger interface { + Debug(msg string, fields ...zapcore.Field) + Info(msg string, fields ...zapcore.Field) + Warn(msg string, fields ...zapcore.Field) + Error(msg string, fields ...zapcore.Field) + DPanic(msg string, fields ...zapcore.Field) + Panic(msg string, fields ...zapcore.Field) + Fatal(msg string, fields ...zapcore.Field) +} diff --git a/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/lpar.go b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/lpar.go new file mode 100644 index 0000000000..213ab72623 --- /dev/null +++ b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/lpar.go @@ -0,0 +1,692 @@ +// Copyright 2021-2023 IBM Corp. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zhmcclient + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "path" + + "go.uber.org/zap" +) + +// LparAPI defines an interface for issuing LPAR requests to ZHMC +// +//go:generate counterfeiter -o fakes/lpar.go --fake-name LparAPI . LparAPI +type LparAPI interface { + CreateLPAR(cpcURI string, props *LparProperties) (string, int, *HmcError) + ListLPARs(cpcURI string, query map[string]string) ([]LPAR, int, *HmcError) + GetLparProperties(lparURI string) (*LparObjectProperties, int, *HmcError) + UpdateLparProperties(lparURI string, props *LparProperties) (int, *HmcError) + StartLPAR(lparURI string) (string, int, *HmcError) + StopLPAR(lparURI string) (string, int, *HmcError) + DeleteLPAR(lparURI string) (int, *HmcError) + AttachStorageGroupToPartition(storageGroupURI string, request *StorageGroupPayload) (int, *HmcError) + DetachStorageGroupToPartition(storageGroupURI string, request *StorageGroupPayload) (int, *HmcError) + MountIsoImage(lparURI string, isoFile string, insFile string) (int, *HmcError) + UnmountIsoImage(lparURI string) (int, *HmcError) + + ListNics(lparURI string) ([]string, int, *HmcError) + FetchAsciiConsoleURI(lparURI string, request *AsciiConsoleURIPayload) (*AsciiConsoleURIResponse, int, *HmcError) + + GetEnergyDetailsforLPAR(lparURI string, props *EnergyRequestPayload) (uint64, int, *HmcError) + + AttachCryptoToPartition(lparURI string, request *CryptoConfig) (int, *HmcError) +} + +type LparManager struct { + client ClientAPI +} + +type Wattage struct { + Data int `json:"data"` + Timestamp int `json:"timestamp"` +} + +type WattageData struct { + Wattage []Wattage `json:"wattage"` +} + +func NewLparManager(client ClientAPI) *LparManager { + return &LparManager{ + client: client, + } +} + +/** +* GET /api/cpcs/{cpc-id}/partitions +* @cpcURI is the cpc object-uri +* @query is a key, value pairs array, +* currently, supports 'name=$name_reg_expression' +* 'status=PartitionStatus' +* 'type=PartitionType' +* @return lpar array +* Return: 200 and LPARs array +* or: 400, 404, 409 + */ +func (m *LparManager) ListLPARs(cpcURI string, query map[string]string) ([]LPAR, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, cpcURI, "/partitions") + requestUrl = BuildUrlFromQuery(requestUrl, query) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodGet)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodGet, requestUrl, nil, "") + if err != nil { + logger.Error("error on getting lpar's", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return nil, status, err + } + + if status == http.StatusOK { + lpars := &LPARsArray{} + err := json.Unmarshal(responseBody, lpars) + if err != nil { + logger.Error("error on unmarshalling adapters", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return nil, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + logger.Info(fmt.Sprintf("Response: get on lpars successfull, status: %v", status)) + return lpars.LPARS, status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error listing lpar's", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return nil, status, errorResponseBody +} + +/** +* GET /api/partitions/{partition-id} +* @lparURI is the object-uri +* Return: 200 and LparObjectProperties +* or: 400, 404, + */ +func (m *LparManager) GetLparProperties(lparURI string) (*LparObjectProperties, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, lparURI) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodGet)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodGet, requestUrl, nil, "") + if err != nil { + logger.Error("error on getting lpar properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return nil, status, err + } + + if status == http.StatusOK { + lparProps := LparObjectProperties{} + err := json.Unmarshal(responseBody, &lparProps) + if err != nil { + return nil, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + logger.Info(fmt.Sprintf("Response: request url: %v, method: %v, status: %v, property details, name: %v,description: %v"+ + "uri: %v, partition status: %v, partition type: %v, id: %v, processor mode: %v, ifl processors: %v, memory: %v", + requestUrl, http.MethodGet, status, lparProps.Name, lparProps.Description, lparProps.URI, lparProps.Status, lparProps.Type, + lparProps.ID, lparProps.ProcessorMode, lparProps.IflProcessors, lparProps.MaximumMemory)) + return &lparProps, status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error getting lpar properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return nil, status, errorResponseBody +} + +/** +* POST /api/cpcs/{cpc-id}/partitions +* @cpcURI is the cpc object-uri +* @props are LPAR properties' +* @return object-uri string +* Return: 200 and object-uri string +* or: 400, 404, 409 + */ +func (m *LparManager) CreateLPAR(cpcURI string, props *LparProperties) (string, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, cpcURI, "/partitions") + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodPost)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodPost, requestUrl, props, "") + if err != nil { + logger.Error("error on getting lpar's", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return "", status, err + } + + if status == http.StatusCreated { + responseObj := LparProperties{} + err := json.Unmarshal(responseBody, &responseObj) + if err != nil { + return "", status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + if responseObj.URI != "" { + logger.Info(fmt.Sprintf("Response: request url: %v, method: %v, status: %v, lpar uri: %v", requestUrl, http.MethodPost, status, responseObj.URI)) + return responseObj.URI, status, nil + } + logger.Error("error on starting lpar", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(errors.New("empty job uri"))) + return "", status, getHmcErrorFromMsg(ERR_CODE_EMPTY_JOB_URI, ERR_MSG_EMPTY_JOB_URI) + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error listing lpar's", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return "", status, errorResponseBody +} + +/** +* POST /api/partitions/{partition-id} +* @lparURI is the object-uri +* Return: 204 +* or: 400, 403, 404, 409, 503, + */ +func (m *LparManager) UpdateLparProperties(lparURI string, props *LparProperties) (int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, lparURI) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodPost)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodPost, requestUrl, props, "") + if err != nil { + logger.Error("error on getting lpar properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return status, err + } + + if status == http.StatusNoContent { + logger.Info(fmt.Sprintf("update lpar properties completed, request url: %v, method: %v, status: %v", requestUrl, http.MethodPost, status)) + return status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error updating lpar properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return status, errorResponseBody +} + +/** +* POST /api/partitions/{partition-id}/operations/start +* @lparURI is the object-uri +* @return job-uri +* Return: 202 and job-uri +* or: 400, 403, 404, 503, + */ +func (m *LparManager) StartLPAR(lparURI string) (string, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, lparURI, "/operations/start") + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodPost)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodPost, requestUrl, nil, "") + if err != nil { + logger.Error("error on starting lpar", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return "", status, err + } + + if status == http.StatusAccepted { + responseObj := StartStopLparResponse{} + err := json.Unmarshal(responseBody, &responseObj) + if err != nil { + return "", status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + if responseObj.URI != "" { + logger.Info(fmt.Sprintf("Response: request url: %v, method: %v, status: %v, lpar uri: %v", requestUrl, http.MethodPost, status, responseObj.URI)) + return responseObj.URI, status, nil + } + logger.Error("error on starting lpar", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(errors.New("empty job uri"))) + return "", status, getHmcErrorFromMsg(ERR_CODE_EMPTY_JOB_URI, ERR_MSG_EMPTY_JOB_URI) + } + + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error starting lpar", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return "", status, errorResponseBody +} + +/** +* POST /api/partitions/{partition-id}/operations/stop +* @lparURI is the object-uri +* @return job-uri +* Return: 202 and job-uri +* or: 400, 403, 404, 503, + */ +func (m *LparManager) StopLPAR(lparURI string) (string, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, lparURI, "/operations/stop") + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodPost)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodPost, requestUrl, nil, "") + if err != nil { + logger.Error("error on stopping lpar", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return "", status, err + } + + if status == http.StatusAccepted { + responseObj := StartStopLparResponse{} + err := json.Unmarshal(responseBody, &responseObj) + if err != nil { + logger.Error("error on unmarshalling adapters", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return "", status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + if responseObj.URI != "" { + logger.Info(fmt.Sprintf("Response: request url: %v, method: %v, status: %v, lpar uri: %v", requestUrl, http.MethodPost, status, responseObj.URI)) + return responseObj.URI, status, nil + } + logger.Error("error on stopping lpar", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(errors.New("empty job uri"))) + return "", status, getHmcErrorFromMsg(ERR_CODE_EMPTY_JOB_URI, ERR_MSG_EMPTY_JOB_URI) + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error stopping lpar", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return "", status, errorResponseBody +} + +/** +* DELETE /api/partitions/{partition-id} +* @lparURI the lpar ID to be deleted +* Return: 204 +* or: 400, 403, 404, 409, 503 + */ +func (m *LparManager) DeleteLPAR(lparURI string) (int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, lparURI) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodDelete)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodDelete, requestUrl, nil, "") + if err != nil { + logger.Error("error deleting partition", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodDelete), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return status, err + } + + if status == http.StatusNoContent { + logger.Info(fmt.Sprintf("Response: partition deleted, request url: %v, method: %v, status: %v", requestUrl, http.MethodDelete, status)) + return status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error deleting partition", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodDelete), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return status, errorResponseBody +} + +/** +* POST /api/partitions/{partition-id}/operations/mount-iso-image +* @lparURI is the object-uri +* Return: 204 +* or: 400, 403, 404, 409, 503 + */ +func (m *LparManager) MountIsoImage(lparURI string, isoFile string, insFile string) (int, *HmcError) { + pureIsoName := path.Base(isoFile) + pureInsName := path.Base(insFile) + query := map[string]string{ + "image-name": pureIsoName, + "ins-file-name": "/" + pureInsName, + } + imageData, byteErr := RetrieveBytes(isoFile) + if byteErr != nil { + logger.Error("error on retrieving iso file", zap.Error(byteErr)) + } + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, lparURI, "/operations/mount-iso-image") + requestUrl = BuildUrlFromQuery(requestUrl, query) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v, Parameters: iso file %v, insfile: %v", requestUrl, http.MethodPost, isoFile, insFile)) + + status, responseBody, err := m.client.UploadRequest(http.MethodPost, requestUrl, imageData) + if err != nil { + logger.Error("error mounting iso image", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return status, err + } + + if status == http.StatusNoContent { + logger.Info(fmt.Sprintf("Response: mounting iso image completed, request url: %v, method: %v, status: %v", requestUrl, http.MethodPost, status)) + return status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error mounting iso image", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return status, errorResponseBody +} + +/** +* POST /api/partitions/{partition-id}/operations/unmount-iso-image +* @lparURI is the object-uri +* Return: 204 +* or: 400, 403, 404, 409, 503 + */ +func (m *LparManager) UnmountIsoImage(lparURI string) (int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, lparURI, "/operations/unmount-iso-image") + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodPost)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodPost, requestUrl, nil, "") + if err != nil { + logger.Error("error unmounting iso image", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return status, err + } + + if status == http.StatusNoContent { + logger.Info(fmt.Sprintf("Response: iso image unmounted. request url: %v, method: %v, status: %v", requestUrl, http.MethodPost, status)) + return status, nil + } + + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error unmounting iso image", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return status, errorResponseBody +} + +/** +* get_property('nic-uris') from LPAR + */ +func (m *LparManager) ListNics(lparURI string) ([]string, int, *HmcError) { + props, status, err := m.GetLparProperties(lparURI) + if err != nil { + logger.Error("error listing nics", + zap.String("request url", fmt.Sprint(lparURI)), + zap.String("method", http.MethodGet), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return nil, status, err + } + logger.Info(fmt.Sprintf("request url: %v, method: %v, status: %v, nic uri's: %v", lparURI, status, http.MethodGet, props.NicUris)) + return props.NicUris, status, nil +} + +// AttachStorageGroupToPartition + +/** +* POST /api/partitions/{partition-id}/operations/attach-storage-group +* Return: 200 +* or: 400, 404, 409 + */ +func (m *LparManager) AttachStorageGroupToPartition(lparURI string, request *StorageGroupPayload) (int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, lparURI, "/operations/attach-storage-group") + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodPost)) + logger.Info(fmt.Sprintf("request: %v", request)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodPost, requestUrl, request, "") + + if err != nil { + logger.Error("error on attach storage group to partition", + zap.String("request url", fmt.Sprint(lparURI)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return status, err + } + + if status == http.StatusNoContent { + logger.Info(fmt.Sprintf("Response: attach storage group to partition successfull, request url: %v, method: %v, status: %v", lparURI, http.MethodPost, status)) + return status, nil + } + + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error attaching storage group to partition", + zap.String("request url", fmt.Sprint(lparURI)), + zap.String("method", http.MethodPost), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return status, errorResponseBody +} + +// DetachStorageGroupToPartition +/** +* POST /api/partitions/{partition-id}/operations/detach-storage-group +* Return: 200 +* or: 400, 404, 409 + */ +func (m *LparManager) DetachStorageGroupToPartition(lparURI string, request *StorageGroupPayload) (int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, lparURI, "/operations/detach-storage-group") + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodPost)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodPost, requestUrl, request, "") + + if err != nil { + logger.Error("error on detach storage group to partition", + zap.String("request url", fmt.Sprint(lparURI)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return status, err + } + + if status == http.StatusNoContent { + logger.Info(fmt.Sprintf("Response: detach storage group to partition successfull, request url: %v, method: %v, status: %v", lparURI, http.MethodPost, status)) + return status, nil + } + + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error detaching storage group to partition", + zap.String("request url", fmt.Sprint(lparURI)), + zap.String("method", http.MethodPost), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return status, errorResponseBody +} + +// FetchAsciiConsoleURI +/** +* POST /api/partitions/{partition-id}/operations/get-ascii-console-websocket-uri +* Return: 200 and ascii-console-websocket-uri and sessionID for the given lpar +* or: 400, 404, 409 + */ +func (m *LparManager) FetchAsciiConsoleURI(lparURI string, request *AsciiConsoleURIPayload) (*AsciiConsoleURIResponse, int, *HmcError) { + // Start a new session for each ascii console URI + consoleSessionID, status, err := m.client.LogonConsole() + if err != nil { + return nil, status, err + } + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, lparURI, "/operations/get-ascii-console-websocket-uri") + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodPost)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodPost, requestUrl, request, consoleSessionID) + + if err != nil { + logger.Error("error on fetch ascii console uri", + zap.String("request url", fmt.Sprint(lparURI)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return nil, status, err + } + + if status == http.StatusOK { + responseObj := &AsciiConsoleURIResponse{} + + err := json.Unmarshal(responseBody, &responseObj) + if err != nil { + logger.Error("error on unmarshalling adapters", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return nil, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + if responseObj.URI != "" { + newResponseObj := &AsciiConsoleURIResponse{ + URI: path.Join(requestUrl.Host, responseObj.URI), + SessionID: consoleSessionID, + } + + logger.Info(fmt.Sprintf("Response: request url: %v, method: %v, status: %v, ascii console object: %v", lparURI, http.MethodPost, status, responseObj)) + return newResponseObj, status, nil + } + logger.Error("error on fetch ascii console uri", + zap.String("request url", fmt.Sprint(lparURI)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(errors.New("empty job uri"))) + return responseObj, status, getHmcErrorFromMsg(ERR_CODE_EMPTY_JOB_URI, ERR_MSG_EMPTY_JOB_URI) + } + + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error on fetch ascii console uri", + zap.String("request url", fmt.Sprint(lparURI)), + zap.String("method", http.MethodPost), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return nil, status, errorResponseBody +} + +// GetEnergyDetailsforLPAR +/** +* POST https://{hmc_addr}:{port}/api/logical-partitions/{logical-partition-id}/operations/get-historical-sustainability-data +* Return: 200 +* or: 400, 404, 409 +* sample response: + { + "wattage": [{ + "data": 53, + "timestamp": 1680394193292 + }, { + "data": 52, + "timestamp": 1680408593302 + }] + } + +*/ +func (m *LparManager) GetEnergyDetailsforLPAR(lparURI string, props *EnergyRequestPayload) (uint64, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + + requestUrl.Path = path.Join(requestUrl.Path, lparURI, "/operations", "/get-historical-sustainability-data") + logger.Info("Request URL:" + string(requestUrl.Path) + " Method:" + http.MethodPost + " props" + fmt.Sprint(props)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodPost, requestUrl, props, "") + + if err != nil { + logger.Error("error on getting lpar's energy", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return 0, status, err + } + + logger.Info("Response : " + string(responseBody)) + + if status == http.StatusOK { + var wd WattageData + err := json.Unmarshal(responseBody, &wd) + if err != nil { + return 0, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + logger.Info("Response: get on lpars successfully, status:" + fmt.Sprint(status)) + return uint64(wd.Wattage[len(wd.Wattage)-1].Data), status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + return 0, status, errorResponseBody +} + +func (m *LparManager) AttachCryptoToPartition(lparURI string, request *CryptoConfig) (int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, lparURI, "/operations/increase-crypto-configuration") + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodPost)) + logger.Info(fmt.Sprintf("request: %v", request)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodPost, requestUrl, request, "") + + if err != nil { + logger.Error("error on attach crypto adapters and domains to partition", + zap.String("request url", fmt.Sprint(lparURI)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return status, err + } + + if status == http.StatusNoContent { + logger.Info(fmt.Sprintf("Response: attach crypto adapters and domains to partition successfull, request url: %v, method: %v, status: %v", lparURI, http.MethodPost, status)) + return status, nil + } + + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error attaching crypto adapters and domains to partition", + zap.String("request url", fmt.Sprint(lparURI)), + zap.String("method", http.MethodPost), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return status, errorResponseBody +} diff --git a/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/metrics.go b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/metrics.go new file mode 100644 index 0000000000..098ca60c79 --- /dev/null +++ b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/metrics.go @@ -0,0 +1,79 @@ +// Copyright 2021-2023 IBM Corp. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zhmcclient + +import ( + "net/http" + "path" + "fmt" + + "go.uber.org/zap" +) + +// MetricsAPI defines an interface for issuing LPAR requests to ZHMC +type MetricsAPI interface { + GetLiveEnergyDetailsforLPAR(lparURI string) (uint64, int, *HmcError) +} + +type MetricsManager struct { + client ClientAPI +} + +func NewMetricsManager(client ClientAPI) *MetricsManager { + return &MetricsManager{ + client: client, + } +} + +// CollectMetrics collects metrics based on the "metrics context" created before. +// The return value is a list of MetricObject objects, which may +// belong to different metrics context groups or CPCs. +func (m *MetricsManager) CollectMetrics() (metricsObjectList []MetricsObject, err *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, m.client.GetMetricsContext().URI) + _, resp, err:= m.client.ExecuteRequest(http.MethodGet, requestUrl, nil, "") + metricsObjectList = extractMetricObjects(m.client.GetMetricsContext(), string(resp)) + return +} + +func (m *MetricsManager) GetLiveEnergyDetailsforLPAR(lparURI string) (uint64, int, *HmcError) { + metricObjects, err := m.CollectMetrics() + + if err != nil { + logger.Error("error on getting lpar's energy", + zap.String("request url", fmt.Sprint(lparURI)), + zap.String("method", http.MethodPost), + zap.Error(fmt.Errorf("%v", err))) + return 0, 0, err + } + // Find the item with the specified MetricsGroupName and ResourceURI + var powerConsumptionWatts uint64 = 0 + for _, obj := range metricObjects { + if obj.MetricsGroupName == "logical-partition-usage" && obj.ResourceURI == lparURI { + switch value := obj.Metrics["power-consumption-watts"].(type) { + case uint64: + powerConsumptionWatts = value + case int: + powerConsumptionWatts = uint64(value) + default: + // Handle the case where the value has an unsupported type + logger.Info("Unsupported type for power-consumption-watts") + } + break + } + } + logger.Info("powerConsumptionWatts:" + fmt.Sprint(powerConsumptionWatts)) + return powerConsumptionWatts, 0, nil +} diff --git a/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/model.go b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/model.go new file mode 100644 index 0000000000..1e10ebd119 --- /dev/null +++ b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/model.go @@ -0,0 +1,1177 @@ +// Copyright 2021-2023 IBM Corp. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zhmcclient + +////////////////////////////////////////////////// +// Adapter +////////////////////////////////////////////////// + +type AdapterFamily string + +const ( + ADAPTER_FAMILY_HIPERSOCKET AdapterFamily = "hipersockets" + ADAPTER_FAMILY_OSA = "osa" + ADAPTER_FAMILY_FICON = "ficon" + ADAPTER_FAMILY_ROCE = "roce" + ADAPTER_FAMILY_CRYPTO = "crypto" + ADAPTER_FAMILY_ACCELERATOR = "accelerator" +) + +type AdapterType string + +const ( + ADAPTER_TYPE_CRYPTO AdapterType = "crypto" + ADAPTER_TYPE_FCP = "fcp" + ADAPTER_TYPE_HIPERSOCKET = "hipersockets" + ADAPTER_TYPE_OSD = "osd" + ADAPTER_TYPE_OSM = "osm" + ADAPTER_TYPE_ROCE = "roce" + ADAPTER_TYPE_ZEDC = "zedc" + ADAPTER_TYPE_FC = "fc" + ADAPTER_TYPE_NOT_CFG = "not-configured" +) + +type AdapterStatus string + +const ( + ADAPTER_STATUS_ACTIVE AdapterStatus = "active" + ADAPTER_STATUS_NOT_ACTIVE = "not-active" + ADAPTER_STATUS_NOT_DETECTED = "not-detected" + ADAPTER_STATUS_EXCEPTIONS = "exceptions" +) + +type AdapterCardType string + +const ( + ADPATER_CARD_HIPERSOCKETS AdapterCardType = "hipersockets" + ADPATER_CARD_OSA_EXPRESS_4S_1GB = "osa-express-4s-1gb" + ADPATER_CARD_OSA_EXPRESS_4S_10GB = "osa-express-4s-10gb" + ADPATER_CARD_OSA_EXPRESS_4S_1000BASE_T = "osa-express-4s-1000base-t" + ADPATER_CARD_OSA_EXPRESS_5S_1GB = "osa-express-5s-1gb" + ADPATER_CARD_OSA_EXPRESS_5S_10GB = "osa-express-5s-10gb" + ADPATER_CARD_OSA_EXPRESS_5G_1000BASE_T = "osa-express-5s-1000base-t" + ADPATER_CARD_OSA_EXPRESS_6S_1GB = "osa-express-6s-1gb" + ADPATER_CARD_OSA_EXPRESS_6S_10GB = "osa-express-6s-10gb" + ADPATER_CARD_OSA_EXPRESS_6G_1000BASE_T = "osa-express-6s-1000base-t" + ADPATER_CARD_OSA_EXPRESS_7S_25GB = "osa-express-7s-25gb" + ADPATER_CARD_10GBE_ROCE_EXPRESS = "10gbe-roce-express" + ADPATER_CARD_ROCE_EXPRESS_2 = "roce-express-2" + ADAPTER_CARD_ROCE_EXPRESS_2_25GB = "roce-express-2-25gb" + ADAPTER_CARD_CRYPTO_EXPRESS_5S = "crypto-express-5s" + ADAPTER_CARD_CRYPTO_EXPRESS_6S = "crypto-express-6s" + ADAPTER_CARD_FICON_EXPRESS_8 = "ficon-express-8" + ADAPTER_CARD_FICON_EXPRESS_8S = "ficon-express-8s" + ADAPTER_CARD_FICON_EXPRESS_16S = "ficon-express-16s" + ADAPTER_CARD_FICON_EXPRESS_16S_PLUS = "ficon-express-16s-plus" + ADAPTER_CARD_FICON_EXPRESS_16SA = "ficon-express-16sa" + ADAPTER_CARD_FCP_EXPRESS_32S = "fcp-express-32s" + ADAPTER_CARD_ZEDC_EXPRESS = "zedc-express" + ADAPTER_CARD_CLOUD_NETWORK_X5 = "cloud-network-x5" + ADAPTER_CARD_UNKNOWN = "unknown" +) + +type AdapterState string + +const ( + ADAPTER_STATE_ONLINE AdapterState = "online" + ADAPTER_STATE_STAND_BY = "stand-by" + ADAPTER_STATE_RESERVED = "reserved" + ADAPTER_STATE_UNKNOWN = "unknown" +) + +type AdapterTransmissionUnit int + +const ( + ADAPTER_TRANSMISSION_UNIT_8 AdapterTransmissionUnit = 8 + ADAPTER_TRANSMISSION_UNIT_16 = 16 + ADAPTER_TRANSMISSION_UNIT_32 = 32 + ADAPTER_TRANSMISSION_UNIT_56 = 56 +) + +type AdapterConnectionClass string + +const ( + ADAPTER_CONNECTION_STORAGE_SUBSYSTEM AdapterConnectionClass = "storage-subsystem" + ADAPTER_CONNECTION_STORAGE_SWITCH AdapterConnectionClass = "storage-switch" +) + +// Physical adapter channel connection status +type AdapterChannelStatus string + +const ( + ADAPTER_CHANNEL_OPERATING AdapterChannelStatus = "operating" + ADAPTER_CHANNEL_NO_POWER = "no-power" + ADAPTER_CHANNEL_SERVICE = "service" + ADAPTER_CHANNEL_STOPPED = "stopped" + ADAPTER_CHANNEL_NOT_DEFINED = "not-defined" + ADAPTER_CHANNEL_SUSPENDED = "suspended" + ADAPTER_CHANNEL_CHECK_STOPPED = "check-stopped" + ADAPTER_CHANNEL_WRAP_BLOCK = "wrap-block" + ADAPTER_CHANNEL_PERMANENT_ERROR = "permanent-error" + ADAPTER_CHANNEL_INITIALIZING = "initializing" + ADAPTER_CHANNEL_LOSS_OF_SIGNAL = "loss-of-signal" + ADAPTER_CHANNEL_LOSS_OF_SYNCHRONIZATION = "loss-of-synchronization" + ADAPTER_CHANNEL_NOT_OPERATIONAL_LINK = "not-operational-link" + ADAPTER_CHANNEL_SEQUENCE_TIME_OUT = "sequence-time-out" + ADAPTER_CHANNEL_SEQUENCE_NOT_PERMITTED = "sequence-not-permitted" + ADAPTER_CHANNEL_TERMINAL_CONDITION = "terminal-condition" + ADAPTER_CHANNEL_OFFLINE_SIGNAL_RECEIVED = "offline-signal-received" + ADAPTER_CHANNEL_FABRIC_LOGIN_SEQUENCE_FAILURE = "fabric-login-sequence-failure" + ADAPTER_CHANNEL_POST_LOGIN_SEQUENCE_FAILURE = "post-login-sequence-failure" + ADAPTER_CHANNEL_STATE_CHANGE_REGISTRATION_FAILURE = "state-change-registration-failure" + ADAPTER_CHANNEL_INVALID_ATTACHMENT_FAILURE = "invalid-attachment-failure" + ADAPTER_CHANNEL_TEST_MODE = "test-mode" + ADAPTER_CHANNEL_BIT_ERROR_THRESHOLD_EXCEEDED = "bit-error-threshold-exceeded" + ADAPTER_CHANNEL_IFCC_THRESHOLD_EXCEEDED = "ifcc-threshold-exceeded" + ADAPTER_CHANNEL_IO_SUPRESSED = "io-supressed" +) + +type Adapter struct { + URI string `json:"object-uri,omitempty"` + Name string `json:"name,omitempty"` + ID string `json:"adapter-id,omitempty"` + Family AdapterFamily `json:"adapter-family,omitempty"` + Type AdapterType `json:"type,omitempty"` + Status AdapterStatus `json:"status,omitempty"` +} + +type AdaptersArray struct { + ADAPTERS []Adapter `json:"adapters"` +} + +type AdapterProperties struct { + URI string `json:"object-uri,omitempty"` + Name string `json:"name,omitempty"` + ID string `json:"adapter-id,omitempty"` + ObjectID string `json:"object-id,omitempty"` + Description string `json:"description,omitempty"` + Family AdapterFamily `json:"adapter-family,omitempty"` + Type AdapterType `json:"type,omitempty"` + Status AdapterStatus `json:"status,omitempty"` + DetectedCardType AdapterCardType `json:"detected-card-type,omitempty"` + CardLocation string `json:"card-location,omitempty"` + PortCount int `json:"port-count,omitempty"` + NetworkAdapterPortURIs []string `json:"network-port-uris,omitempty"` + StoragePortURIs []string `json:"storage-port-uris,omitempty"` + State AdapterState `json:"state,omitempty"` + MAX_TRANSMISSION_UNIT_SIZE AdapterTransmissionUnit `json:"maximum-transmission-unit-size,omitempty"` + PHYSICALCHANNELSTATUS AdapterChannelStatus `json:"physical-channel-status,omitempty"` + CONFIGURED_CAPACITY int `json:"configured-capacity,omitempty"` + USED_CAPACITY int `json:"used-capacity,omitempty"` + ALLOWED_CAPACITY int `json:"allowed-capacity,omitempty"` + MAXIMUM_TOTAL_CAPACITY int `json:"maximum-total-capacity,omitempty"` + CHANNEL_PATH_ID string `json:"channel-path-id,omitempty"` +} + +type NetworkAdapterPort struct { + URI string `json:"element-uri,omitempty"` + Name string `json:"name,omitempty"` + ID string `json:"element-id,omitempty"` + Parent string `json:"parent,omitempty"` + Class string `json:"class,omitempty"` + Description string `json:"description,omitempty"` + Index int `json:"index"` +} + +type StorageAdapterPort struct { + URI string `json:"element-uri,omitempty"` + Name string `json:"name,omitempty"` + ID string `json:"element-id,omitempty"` + Parent string `json:"parent,omitempty"` + Class string `json:"class,omitempty"` + Description string `json:"description,omitempty"` + FabricID string `json:"fabric-id,omitempty"` + ConnectionEndpointURI string `json:"connection-endpoint-uri,omitempty"` + ConnectionEndpointClass AdapterConnectionClass `json:"connection-endpoint-class,omitempty"` + Index int `json:"index"` +} + +/** +* Sample +* { +* "name": +* "description": +* "port-description": +* "maximum-transmission-unit-size": +* } +* @return * "object-uri":"/api/adapters/542b9406-d033-11e5-9f39-020000000338" + */ +type HipersocketPayload struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + PortDescription string `json:"port-description,omitempty"` + MaxUnitSize int `json:"maximum-transmission-unit-size,omitempty"` +} + +type HipersocketCreateResponse struct { + URI string `json:"object-uri"` +} + +////////////////////////////////////////////////// +// Virtual Switches +////////////////////////////////////////////////// + +type VirtualSwitchType string + +const ( + VIRTUALSWITCH_TYPE_HIPERSOCKET VirtualSwitchType = "hipersockets" + VIRTUALSWITCH_TYPE_OSD = "osd" +) + +type VirtualSwitchesArray struct { + VIRTUALSWITCHES []VirtualSwitch `json:"virtual-switches"` +} + +type VirtualSwitch struct { + URI string `json:"object-uri,omitempty"` + Name string `json:"name,omitempty"` + Type VirtualSwitchType `json:"type,omitempty"` +} + +type VirtualSwitchProperties struct { + URI string `json:"object-uri,omitempty"` + Name string `json:"name,omitempty"` + Type VirtualSwitchType `json:"type,omitempty"` + ID string `json:"object-id,omitempty"` + Parent string `json:"parent,omitempty"` + Class string `json:"class,omitempty"` // "virtual-switch" + Description string `json:"description,omitempty"` + AdapterURI string `json:"backing-adapter-uri,omitempty"` + Port int `json:"port,omitempty"` + VNicUris []string `json:"connected-vnic-uris,omitempty"` +} + +////////////////////////////////////////////////// +// CPC +////////////////////////////////////////////////// + +type CpcStatus string + +const ( + CPC_STATUS_ACTIVE CpcStatus = "active" + CPC_STATUS_OPERATING = "operating" + CPC_STATUS_NO_COMMUNICATING = "not-communicating" + CPC_STATUS_EXCEPTIONS = "exceptions" + CPC_STATUS_STATUS_CHECK = "status-check" + CPC_STATUS_SERVICE = "service" + CPC_STATUS_NOT_OPERATING = "not-operating" + CPC_STATUS_NO_POWER = "no-power" + CPC_STATUS_SERVICE_REQUIRED = "service-required" + CPC_STATUS_DEGRADED = "degraded" +) + +type CpcImlMode string + +const ( + CPC_IML_MODE_NOT_SET CpcImlMode = "not-set" + CPC_IML_MODE_ESA390 = "esa390" + CPC_IML_MODE_LPAR = "lpar" + CPC_IML_MODE_ESA390_TPF = "esa390-tpf" + CPC_IML_MODE_DPM = "dpm" +) + +type CpcDegradedStatus string + +const ( + CPC_DEGRADED_MEMORY CpcDegradedStatus = "memory" + CPC_DEGRADED_IO = "io" + CPC_DEGRADED_NODE = "node" + CPC_DEGRADED_RING = "ring" + CPC_DEGRADED_CBU = "cbu" + CPC_DEGRADED_MRU = "mru" + CPC_DEGRADED_AMBIENT_TEMP = "ambient-temp" + CPC_DEGRADED_IML = "iml" +) + +type CpcProcessorRunningTime string + +const ( + PROCESSOR_RUNNING_TIME_SYSTEM_DETERMINED CpcProcessorRunningTime = "system-determined" + PROCESSOR_RUNNING_TIME_USER_DETERMINED = "user-determined" +) + +type CpcLanInterfaceType string + +const ( + CPC_LAN_INTERFACE_ETHERNET CpcLanInterfaceType = "ethernet" + CPC_LAN_INTERFACE_TOKEN_RING = "token-ring" + CPC_LAN_INTERFACE_UNKNOWN = "unknown" +) + +type IPv6Type string + +const ( + IPV6_TYPE_LINK_LOCAL IPv6Type = "link-local" + IPV6_TYPE_STATIC = "static" + IPV6_TYPE_AUTO = "auto" +) + +type CPCFeatureName string + +const ( + CPC_FEATURE_DPM_STORAGE_MANAGEMENT CPCFeatureName = "dpm-storage-management" + CPC_FEATURE_DPM_FCP_TAPE_MANAGEMENT = "dpm-fcp-tape-management" + CPC_FEATURE_DPM_SMCD_PARTITION_LINK_MANAGEMENT = "dpm-smcd-partition-link-management" +) + +type AutoStartEntryType string + +const ( + AUTO_START_ENTRY_PARTITION AutoStartEntryType = "partition" + AUTO_START_ENTRY_PARTITION_GROUP = "partition-group" +) + +type PowerSave string + +const ( + POWER_SAVE_HIGHT_PERFORMANCE PowerSave = "high-performance" + POWER_SAVE_LOW_POWER = "low-power" + POWER_SAVE_CUSTOM = "custom" + POWER_SAVE_NOT_SUPPORTED = "not-supported" + POWER_SAVE_NOT_AVAILABLE = "not-available" + POWER_SAVE_NOT_ENTITLED = "not-entitled" +) + +type PowerSaveState string + +const ( + POWER_SAVE_STATE_HIGHT_PERFORMANCE PowerSaveState = "high-performance" + POWER_SAVE_STATE_LOW_POWER = "low-power" + POWER_SAVE_STATE_CUSTOM = "custom" + POWER_SAVE_STATE_NOT_SUPPORTED = "not-supported" + POWER_SAVE_STATE_NOT_ENTITLED = "not-entitled" +) + +type PowerAllowed string + +const ( + POWER_ALLOWED PowerAllowed = "allowed" + POWER_ALLOWED_UNKNOWN = "unknown" + POWER_ALLOWED_NOT_SUPPORTED = "not-supported" + POWER_ALLOWED_NOT_ENTITLED = "not-entitled" + POWER_ALLOWED_UNDER_GROUP_CONTROL = "under-group-control" + POWER_ALLOWED_ONCE_A_DAY_EXCEEDED = "once-a-day-exceeded" +) + +type PowerCapState string + +const ( + POWER_CAP_STATE_DISABLED PowerCapState = "disabled" + POWER_CAP_STATE_ENABLED = "enabled" + POWER_CAP_STATE_NOT_CUSTOM = "custom" + POWER_CAP_STATE_NOT_SUPPORTED = "not-supported" + POWER_CAP_STATE_NOT_ENTITLED = "not-entitled" +) + +type Arom string + +const ( + CONCURRENT_ENGINEERING_CHANGES_AROM Arom = "concurrent-engineering-changes-arom" + ENGINEERING_CHANGES_AROM = "cengineering-changes-arom" + AROM_NOT_AVAILABLE = "not-available" +) + +type MclType string + +const ( + MCL_TYPE_RETRIEVED MclType = "retrieved" + MCL_TYPE_ACTIVATED = "activated" + MCL_TYPE_ACCEPTED = "accepted" + MCL_TYPE_INSTALLABLE_CONCURRENT = "installable-concurrent" + MCL_TYPE_REMOVABLE_CONCURRENT = "removable-concurrent" +) + +type ActionType string + +const ( + ACTION_CHANNEL_CONFIG ActionType = "channel-config" + ACTION_TCOUPLING_FACILITY_REACTIVATION = "coupling-facility-reactivation" + ACTION_POWER_ON_RESET_TRACKING = "power-on-reset-tracking" +) + +type ActionActivation string + +const ( + ACTION_ACTIVATION_CURRENT ActionActivation = "current" + ACTION_ACTIVATION_NEXT = "next" +) + +/** +* Sample: +* { +* "dpm-enabled": true, +* "has-unacceptable-status": true, +* "name": "P0LXSMOZ", +* "object-uri": "/api/cpcs/e8753ff5-8ea6-35d9-b047-83c2624ba8da", +* "se-version": "2.13.1" +* "status": "not-operating" +* } + */ +type CPC struct { + URI string `json:"object-uri,omitempty"` + Name string `json:"name,omitempty"` + Status CpcStatus `json:"status,omitempty"` + HasAcceptableStatus bool `json:"has-unacceptable-status,omitempty"` + DpmEnabled bool `json:"dpm-enabled,omitempty"` + SeVersion string `json:"se-version,omitempty"` +} + +type CpcsArray struct { + CPCS []CPC `json:"cpcs"` +} + +type CPCProperties struct { + ObjectURI string `json:"object-uri,omitempty"` + Parent string `json:"parent,omitempty"` + Class string `json:"class,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Status CpcStatus `json:"status,omitempty"` + AcceptableStatus []CpcStatus `json:"acceptable-status,omitempty"` + SeVersion string `json:"se-version,omitempty"` + HasHardwareMessages bool `json:"has-hardware-messages,omitempty"` + ImlMode CpcImlMode `json:"iml-mode,omitempty"` + DpmEnabled bool `json:"dpm-enabled,omitempty"` + AutoStartList []AutoStartEntry `json:"auto-start-list,omitempty"` + IsCpacfEnabled bool `json:"is-cpacf-enabled,omitempty"` + NextActivationProfileName string `json:"next-activation-profile-name,omitempty"` + LastUsedActivationProfile string `json:"last-used-activation-profile,omitempty"` + LastUsedIocds string `json:"last-used-iocds,omitempty"` + MachineModel string `json:"machine-model,omitempty"` + MachineType string `json:"machine-type,omitempty"` + MachineSerialNumber string `json:"machine-serial-number,omitempty"` + CpcSerialNumber string `json:"cpc-serial-number,omitempty"` + CpcNodeDescriptor string `json:"cpc-node-descriptor,omitempty"` + IsCbuInstalled bool `json:"is-cbu-installed,omitempty"` + IsCbuEnabled bool `json:"is-cbu-enabled,omitempty"` + IsCbuActivated bool `json:"is-cbu-activated,omitempty"` + IsRealCbuAvailable bool `json:"is-real-cbu-available,omitempty"` + CbuActivationDate int64 `json:"cbu-activation-date,omitempty"` + CbuExpirationDate int64 `json:"cbu-expiration-date,omitempty"` + CbuNumberOfTestsLeft int `json:"cbu-number-of-tests-left,omitempty"` + IsSecureExecutionEnabled bool `json:"is-secure-execution-enabled,omitempty"` + IsGlobalKeyInstalled bool `json:"is-global-key-installed,omitempty"` + IsHostKeyInstalled bool `json:"is-host-key-installed,omitempty"` + GlobalPrimaryKeyHash string `json:"global-primary-key-hash,omitempty"` + GlobalSecondaryKeyHash string `json:"global-secondary-key-hash,omitempty"` + HostPrimaryKeyHash string `json:"host-primary-key-hash,omitempty"` + HostSecondaryKeyHash string `json:"host-secondary-key-hash,omitempty"` + IsServiceRequired bool `json:"is-service-required,omitempty"` + DegradedStatus []CpcDegradedStatus `json:"degraded-status,omitempty"` + ProcessorRunningTimeType CpcProcessorRunningTime `json:"processor-running-time-type,omitempty"` + ProcessorRunningTime int `json:"processor-running-time,omitempty"` + DoesWaitStateEndTimeSlice bool `json:"does-wait-state-end-time-slice,omitempty"` + IsOnOffCodInstalled bool `json:"is-on-off-cod-installed,omitempty"` + IsOnOffCodEnabled bool `json:"is-on-off-cod-enabled,omitempty"` + IsOnOffCodActivated bool `json:"is-on-off-cod-activated,omitempty"` + OnOffCodActivationDate int64 `json:"on-off-cod-activation-date,omitempty"` + SoftwareModelPermanent string `json:"software-model-permanent,omitempty"` + SoftwareModelPermanentPlusBillable string `json:"software-model-permanent-plus-billable,omitempty"` + SoftwareModelPermanentPlusTemporary string `json:"software-model-permanent-plus-temporary,omitempty"` + SoftwareModelPurchased string `json:"software-model-purchased,omitempty"` + MsuPermanent int `json:"msu-permanent,omitempty"` + MsuPermanentPlusBillable int `json:"msu-permanent-plus-billable,omitempty"` + MsuPermanentPlusTemporary int `json:"msu-permanent-plus-temporary,omitempty"` + MsuPurchased int `json:"msu-purchased,omitempty"` + ProcessorCountGeneralPurpose int `json:"processor-count-general-purpose,omitempty"` + ProcessorCountServiceAssist int `json:"processor-count-service-assist,omitempty"` + ProcessorCountAap int `json:"processor-count-aap,omitempty"` + ProcessorCountIfl int `json:"processor-count-ifl,omitempty"` + ProcessorCountIcf int `json:"processor-count-icf,omitempty"` + ProcessorCountIip int `json:"processor-count-iip,omitempty"` + ProcessorCountDefective int `json:"processor-count-defective,omitempty"` + ProcessorCountSpare int `json:"processor-count-spare,omitempty"` + ProcessorCountPending int `json:"processor-count-pending,omitempty"` + ProcessorCountPendingGeneralPurpose int `json:"processor-count-pending-general-purpose,omitempty"` + ProcessorCountPendingServiceAssist int `json:"processor-count-pending-service-assist,omitempty"` + ProcessorCountPendingAap int `json:"processor-count-pending-aap,omitempty"` + ProcessorCountPendingIfl int `json:"processor-count-pending-ifl,omitempty"` + ProcessorCountPendingIcf int `json:"processor-count-pending-icf,omitempty"` + ProcessorCountPendingIip int `json:"processor-count-pending-iip,omitempty"` + ProcessorCountPermanentServiceAssist int `json:"processor-count-permanent-service-assist,omitempty"` + ProcessorCountPermanentIfl int `json:"processor-count-permanent-ifl,omitempty"` + ProcessorCountPermanentIcf int `json:"processor-count-permanent-icf,omitempty"` + ProcessorCountPermanentIip int `json:"processor-count-permanent-iip,omitempty"` + ProcessorCountUnassignedServiceAssist int `json:"processor-count-unassigned-service-assist,omitempty"` + ProcessorCountUnassignedIfl int `json:"processor-count-unassigned-ifl,omitempty"` + ProcessorCountUnassignedIcf int `json:"processor-count-unassigned-icf,omitempty"` + ProcessorCountUnassignedIip int `json:"processor-count-unassigned-iip,omitempty"` + HasTemporaryCapacityChangeAllowed bool `json:"has-temporary-capacity-change-allowed,omitempty"` + EcMclDescription EcMcl `json:"ec-mcl-description,omitempty"` + HasAutomaticSeSwitchEnabled bool `json:"has-automatic-se-switch-enabled,omitempty"` + LanInterface1Address string `json:"lan-interface1-address,omitempty"` + LanInterface1Type CpcLanInterfaceType `json:"lan-interface1-type,omitempty"` + LanInterface2Address string `json:"lan-interface2-address,omitempty"` + LanInterface2Type CpcLanInterfaceType `json:"lan-interface2-type,omitempty"` + Network1Ipv4Mask string `json:"network1-ipv4-mask,omitempty"` + Network1Ipv4PriIpaddr string `json:"network1-ipv4-pri-ipaddr,omitempty"` + Network1Ipv4AltIpaddr string `json:"network1-ipv4-alt-ipaddr,omitempty"` + Network1Ipv6Info []Ipv6Info `json:"network1-ipv6-info,omitempty"` + Network2Ipv4Mask string `json:"network2-ipv4-mask,omitempty"` + Network2Ipv4PriIpaddr string `json:"network2-ipv4-pri-ipaddr,omitempty"` + Network2Ipv4AltIpaddr string `json:"network2-ipv4-alt-ipaddr,omitempty"` + Network2Ipv6Info []Ipv6Info `json:"network2-ipv6-info,omitempty"` + HardwareMessages []HardwareMessage `json:"hardware-messages,omitempty"` + StorageTotalInstalled int64 `json:"storage-total-installed,omitempty"` + StorageHardwareSystemArea int64 `json:"storage-hardware-system-area,omitempty"` + StorageCustomer int64 `json:"storage-customer,omitempty"` + StorageCustomerCentral int64 `json:"storage-customer-central,omitempty"` + StorageCustomerExpanded int64 `json:"storage-customer-expanded,omitempty"` + StorageCustomerAvailable int64 `json:"storage-customer-available,omitempty"` + StorageVfmIncrementSize int64 `json:"storage-vfm-increment-size,omitempty"` + StorageVfmTotal int64 `json:"storage-vfm-total,omitempty"` + MaximumHipersockets int `json:"maximum-hipersockets,omitempty"` + MaximumAlternateStorageSites int `json:"maximum-alternate-storage-sites,omitempty"` + AvailableFeaturesList []CPCFeatureInfo `json:"available-features-list,omitempty"` + MaximumPartitions int `json:"maximum-partitions,omitempty"` + ManagementWorldWidePortName string `json:"management-world-wide-port-name,omitempty"` + SnaName string `json:"sna-name,omitempty"` + TargetName string `json:"target-name,omitempty"` + MaximumIsmVchids int `json:"maximum-ism-vchids,omitempty"` + MinimumFidNumber int `json:"minimum-fid-number,omitempty"` + MaximumFidNumber int `json:"maximum-fid-number,omitempty"` + CPCPowerRating int `json:"cpc-power-rating,omitempty"` + CPCPowerConsumption int `json:"cpc-power-consumption,omitempty"` + CPCPowerSaving PowerSave `json:"cpc-power-saving,omitempty"` + CPCPowerSavingState PowerSaveState `json:"cpc-power-saving-state,omitempty"` + CPCPowerSaveAllowed PowerAllowed `json:"cpc-power-save-allowed,omitempty"` + CPCPowerCappingState PowerCapState `json:"cpc-power-capping-state,omitempty"` + CPCPowerCapMinimum int `json:"cpc-power-cap-minimum,omitempty"` + CPCPowerCapMaximum int `json:"cpc-power-cap-maximum,omitempty"` + CPCPowerCapCurrent int `json:"cpc-power-cap-current,omitempty"` + CPCPowerCapAllowed PowerAllowed `json:"cpc-power-cap-allowed,omitempty"` + ZCPCPowerRating int `json:"zcpc-power-rating,omitempty"` + ZCPCPowerConsumption int `json:"zcpc-power-consumption,omitempty"` + ZCPCPowerSaving PowerSave `json:"zcpc-power-saving,omitempty"` + ZCPCPowerSavingState PowerSaveState `json:"zcpc-power-saving-state,omitempty"` + ZCPCPowerSaveAllowed PowerAllowed `json:"zcpc-power-save-allowed,omitempty"` + ZCPCPowerCappingState PowerCapState `json:"zcpc-power-capping-state,omitempty"` + ZCPCPowerCapMinimum int `json:"zcpc-power-cap-minimum,omitempty"` + ZCPCPowerCapMaximum int `json:"zcpc-power-cap-maximum,omitempty"` + ZCPCPowerCapCurrent int `json:"zcpc-power-cap-current,omitempty"` + ZCPCPowerCapAllowed PowerAllowed `json:"zcpc-power-cap-allowed,omitempty"` + ZCPCAmbientTemperature float64 `json:"zcpc-ambient-temperature,omitempty"` + ZCPCExhaustTemperature float64 `json:"zcpc-exhaust-temperature,omitempty"` + ZCPCHumidity int `json:"zcpc-humidity,omitempty"` + ZCPCDewPoint float64 `json:"zcpc-dew-point,omitempty"` + ZCPCHeatLoad int `json:"zcpc-heat-load,omitempty"` + ZCPCHeatLoadForcedAir int `json:"zcpc-heat-load-forced-air,omitempty"` + ZCPCHeatLoadWater int `json:"zcpc-heat-load-water,omitempty"` + ZCPCMaximumPotentialPower int `json:"zcpc-maximum-potential-power,omitempty"` + ZCPCMaximumPotentialHeatLoad int `json:"zcpc-maximum-potential-heat-load,omitempty"` + LastEnergyAdviceTime int64 `json:"last-energy-advice-time,omitempty"` + ZCPCMinimumInletAirTemperature float64 `json:"zcpc-minimum-inlet-air-temperature,omitempty"` + ZCPCMaximumInletAirTemperature float64 `json:"zcpc-maximum-inlet-air-temperature,omitempty"` + ZCPCMaximumInletLiquidTemperature float64 `json:"zcpc-maximum-inlet-liquid-temperature,omitempty"` + ZCPCEnvironmentalClass string `json:"zcpc-environmental-class,omitempty"` +} + +type AutoStartEntry struct { + PostStartDelay int `json:"post-start-delay,omitempty"` + Type AutoStartEntryType `json:"type,omitempty"` + PartitionURI string `json:"partition-uri,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + PartitionURIs []string `json:"partition-uris,omitempty"` +} + +type CPCFeatureInfo struct { + Name CPCFeatureName `json:"name,omitempty"` + Description string `json:"description,omitempty"` + State bool `json:"state,omitempty"` +} + +type Mcl struct { + Type MclType `json:"type,omitempty"` + Level string `json:"level,omitempty"` + LastUpdate int64 `json:"last-update,omitempty"` +} + +type Action struct { + Type ActionType `json:"type,omitempty"` + Activation ActionActivation `json:"activation,omitempty"` + Pending bool `json:"pending,omitempty"` +} + +type Ec struct { + Number string `json:"number,omitempty"` + PartNumber string `json:"part-number,omitempty"` + Type string `json:"type,omitempty"` + Description string `json:"description,omitempty"` + Mcl []Mcl `json:"mcl,omitempty"` +} + +type EcMcl struct { + Actions []Action `json:"actions,omitempty"` + Ec []Ec `json:"ec,omitempty"` + LicControlLevel string `json:"lic-control-level,omitempty"` + DriverLevel string `json:"driver-level,omitempty"` + BundleLevel string `json:"bundle-level,omitempty"` + AromInfo Arom `json:"arom-info,omitempty"` +} + +type HardwareMessage struct { + ElementURL string `json:"element-uri,omitempty"` + ElementID string `json:"element-id,omitempty"` + Parent string `json:"parent,omitempty"` + Class string `json:"class,omitempty"` + Timestamp int64 `json:"timestamp,omitempty"` + ServiceSupported bool `json:"service-supported,omitempty"` + Text string `json:"text,omitempty"` +} + +type Ipv6Info struct { + Type IPv6Type `json:"type,omitempty"` + Prefix int `json:"prefix,omitempty"` + PriIpAddress string `json:"pri-ip-address,omitempty"` + AltIpAddress string `json:"alt-ip-address,omitempty"` +} + +////////////////////////////////////////////////// +// JOB +////////////////////////////////////////////////// + +type JobStatus string + +const ( + JOB_STATUS_RUNNING JobStatus = "running" + JOB_STATUS_CANCEL_PENDING = "cancel-pending" + JOB_STATUS_CANCELED = "canceled" + JOB_STATUS_COMPLETE = "complete" +) + +type JobResults struct { + Message string `json:"message,omitempty"` +} + +type Job struct { + URI string `json:"job-uri,omitempty"` + Status JobStatus `json:"status,omitempty"` + JobStatusCode int `json:"job-status-code,omitempty"` + JobReasonCode int `json:"job-reason-code,omitempty"` + JobResults JobResults `json:"job-results,omitempty"` +} + +////////////////////////////////////////////////// +// LPAR +////////////////////////////////////////////////// + +type PartitionType string + +const ( + PARTITION_TYPE_SSC PartitionType = "ssc" + PARTITION_TYPE_LINUX = "linux" + PARTITION_TYPE_ZVM = "zvm" +) + +type PartitionStatus string + +const ( + PARTITION_STATUS_NOT_ACTIVE PartitionStatus = "communications-not-active" + PARTITION_STATUS_STATUS_CHECK = "status-check" + PARTITION_STATUS_STOPPED = "stopped" + PARTITION_STATUS_TERMINATED = "terminated" + PARTITION_STATUS_STARTING = "starting" + PARTITION_STATUS_ACTIVE = "active" + PARTITION_STATUS_STOPPING = "stopping" + PARTITION_STATUS_DEGRADED = "degraded" + PARTITION_STATUS_REV_ERR = "reservation-error" + PARTITION_STATUS_PAUSED = "paused" +) + +type PartitionProcessorMode string + +const ( + PROCESSOR_MODE_DEDICATED PartitionProcessorMode = "dedicated" + PROCESSOR_MODE_SHARED = "shared" +) + +type PartitionBootDevice string + +const ( + BOOT_DEVICE_STORAGE_ADAPTER PartitionBootDevice = "storage-adapter" + BOOT_DEVICE_STORAGE_VOLUME = "storage-volume" + BOOT_DEVICE_NETWORK_ADAPTER = "network-adapter" + BOOT_DEVICE_FTP = "ftp" + BOOT_DEVICE_REMOVABLE_MEDIA = "removable-media" + BOOT_DEVICE_ISO_IMAGE = "iso-image" + BOOT_DEVICE_NONE = "none" // default +) + +type PartionBootRemovableMediaType string + +const ( + BOOT_REMOVABLE_MEDIA_CDROM PartionBootRemovableMediaType = "cdrom" + BOOT_REMOVABLE_MEDIA_USB = "usb" +) + +type SscBootSelection string + +const ( + SSC_BOOT_SELECTION_INSTALLER SscBootSelection = "installer" + SSC_BOOT_SELECTION_APPLIANCE = "appliance" +) + +type LPAR struct { + URI string `json:"object-uri,omitempty"` + Name string `json:"name,omitempty"` + Status PartitionStatus `json:"status,omitempty"` + Type PartitionType `json:"type,omitempty"` +} + +type LPARsArray struct { + LPARS []LPAR `json:"partitions"` +} + +type PartitionFeatureInfo struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + State bool `json:"state,omitempty"` +} + +type LparObjectProperties struct { + URI string `json:"object-uri,omitempty"` + CpcURI string `json:"parent,omitempty"` + Class string `json:"class,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Status PartitionStatus `json:"status,omitempty"` + Type PartitionType `json:"type,omitempty"` + ShortName string `json:"short-name,omitempty"` + ID string `json:"partition-id,omitempty"` + AutoGenerateID bool `json:"autogenerate-partition-id,omitempty"` + OsName string `json:"os-name,omitempty"` + OsType string `json:"os-type,omitempty"` + OsVersion string `json:"os-version,omitempty"` + ReserveResources bool `json:"reserve-resources,omitempty"` + DegradedAdapters []string `json:"degraded-adapters,omitempty"` + ProcessorMode PartitionProcessorMode `json:"processor-mode,omitempty"` + CpProcessors int `json:"cp-processors,omitempty"` + IflProcessors int `json:"ifl-processors,omitempty"` + IflAbsoluteProcessorCapping bool `json:"ifl-absolute-processor-capping,omitempty"` + CpAbsoluteProcessorCapping bool `json:"cp-absolute-processor-capping,omitempty"` + IflAbsoluteProcessorCappingValue float64 `json:"ifl-absolute-processor-capping-value,omitempty"` + CpAbsoluteProcessorCappingValue float64 `json:"cp-absolute-processor-capping-value,omitempty"` + IflProcessingWeightCapped bool `json:"ifl-processing-weight-capped,omitempty"` + CpProcessingWeightCapped bool `json:"cp-processing-weight-capped,omitempty"` + MinimumIflProcessingWeight int `json:"minimum-ifl-processing-weight,omitempty"` + MinimumCpProcessingWeight int `json:"minimum-cp-processing-weight,omitempty"` + InitialIflProcessingWeight int `json:"initial-ifl-processing-weight,omitempty"` + InitialCpProcessingWeight int `json:"initial-cp-processing-weight,omitempty"` + CurrentIflProcessingWeight int `json:"current-ifl-processing-weight,omitempty"` + CurrentCpProcessingWeight int `json:"current-cp-processing-weight,omitempty"` + MaximumIflProcessingWeight int `json:"maximum-ifl-processing-weight,omitempty"` + MaximumCpProcessingWeight int `json:"maximum-cp-processing-weight,omitempty"` + ProcessorManagementEnabled bool `json:"processor-management-enabled,omitempty"` + InitialMemory int `json:"initial-memory,omitempty"` + ReservedMemory int `json:"reserved-memory,omitempty"` + MaximumMemory int `json:"maximum-memory,omitempty"` + AutoStart bool `json:"auto-start,omitempty"` + BootDevice PartitionBootDevice `json:"boot-device,omitempty"` + BootNetworkDevice string `json:"boot-network-device,omitempty"` + BootFtpHost string `json:"boot-ftp-host,omitempty"` + BootFtpUsername string `json:"boot-ftp-username,omitempty"` + BootFtpPassword string `json:"boot-ftp-password,omitempty"` + BootFtpInsfile string `json:"boot-ftp-insfile,omitempty"` + BootRemovableMedia string `json:"boot-removable-media,omitempty"` + BootRemovableMediaType PartionBootRemovableMediaType `json:"boot-removable-media-type,omitempty"` + BootTimeout int `json:"boot-timeout,omitempty"` + BootStorageDevice string `json:"boot-storage-device,omitempty"` + BootStorageVolume string `json:"boot-storage-volume,omitempty"` + BootLogicalUnitNumber string `json:"boot-logical-unit-number,omitempty"` + BootWorldWidePortName string `json:"boot-world-wide-port-name,omitempty"` + BootConfigurationSelector int `json:"boot-configuration-selector,omitempty"` + BootRecordLba string `json:"boot-record-lba,omitempty"` + BootLoadParameters string `json:"boot-load-parameters,omitempty"` + BootOsSpecificParameters string `json:"boot-os-specific-parameters,omitempty"` + BootIsoImageName string `json:"boot-iso-image-name,omitempty"` + BootIsoInsFile string `json:"boot-iso-ins-file,omitempty"` + AccessGlobalPerformanceData bool `json:"access-global-performance-data,omitempty"` + PermitCrossPartitionCommands bool `json:"permit-cross-partition-commands,omitempty"` + AccessBasicCounterSet bool `json:"access-basic-counter-set,omitempty"` + AccessProblemStateCounterSet bool `json:"access-problem-state-counter-set,omitempty"` + AccessCryptoActivityCounterSet bool `json:"access-crypto-activity-counter-set,omitempty"` + AccessExtendedCounterSet bool `json:"access-extended-counter-set,omitempty"` + AccessCoprocessorGroupSet bool `json:"access-coprocessor-group-set,omitempty"` + AccessBasicSampling bool `json:"access-basic-sampling,omitempty"` + AccessDiagnosticSampling bool `json:"access-diagnostic-sampling,omitempty"` + PermitDesKeyImportFunctions bool `json:"permit-des-key-import-functions,omitempty"` + PermitAesKeyImportFunctions bool `json:"permit-aes-key-import-functions,omitempty"` + ThreadsPerProcessor int `json:"threads-per-processor,omitempty"` + VirtualFunctionUris []string `json:"virtual-function-uris,omitempty"` + NicUris []string `json:"nic-uris,omitempty"` + HbaUris []string `json:"hba-uris,omitempty"` + StorageGroupURIs []string `json:"storage-group-uris,omitempty"` + CryptoConfiguration CryptoConfig `json:"crypto-configuration,omitempty"` + SscHostName string `json:"ssc-host-name,omitempty"` + SscBootSelection SscBootSelection `json:"ssc-boot-selection,omitempty"` + SscIpv4Gateway string `json:"ssc-ipv4-gateway,omitempty"` + SscIpv6Gateway string `json:"ssc-ipv6-gateway,omitempty"` + SscDnsServers []string `json:"ssc-dns-servers,omitempty"` + SscMasterUserid string `json:"ssc-master-userid,omitempty"` + SscMasterPw string `json:"ssc-master-pw,omitempty"` + AvailableFeaturesList []PartitionFeatureInfo `json:"available-features-list,omitempty"` + Secureboot bool `json:"secure-boot,omitempty"` +} + +type LparProperties struct { + URI string `json:"object-uri,omitempty"` + CpcURI string `json:"parent,omitempty"` + Class string `json:"class,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Status PartitionStatus `json:"status,omitempty"` + Type PartitionType `json:"type,omitempty"` + ShortName string `json:"short-name,omitempty"` + ID string `json:"partition-id,omitempty"` + AutoGenerateID bool `json:"autogenerate-partition-id,omitempty"` + OsName string `json:"os-name,omitempty"` + OsType string `json:"os-type,omitempty"` + OsVersion string `json:"os-version,omitempty"` + ReserveResources bool `json:"reserve-resources,omitempty"` + DegradedAdapters []string `json:"degraded-adapters,omitempty"` + ProcessorMode PartitionProcessorMode `json:"processor-mode,omitempty"` + CpProcessors int `json:"cp-processors,omitempty"` + IflProcessors int `json:"ifl-processors,omitempty"` + IflAbsoluteProcessorCapping bool `json:"ifl-absolute-processor-capping,omitempty"` + CpAbsoluteProcessorCapping bool `json:"cp-absolute-processor-capping,omitempty"` + IflAbsoluteProcessorCappingValue float64 `json:"ifl-absolute-processor-capping-value,omitempty"` + CpAbsoluteProcessorCappingValue float64 `json:"cp-absolute-processor-capping-value,omitempty"` + IflProcessingWeightCapped bool `json:"ifl-processing-weight-capped,omitempty"` + CpProcessingWeightCapped bool `json:"cp-processing-weight-capped,omitempty"` + MinimumIflProcessingWeight int `json:"minimum-ifl-processing-weight,omitempty"` + MinimumCpProcessingWeight int `json:"minimum-cp-processing-weight,omitempty"` + InitialIflProcessingWeight int `json:"initial-ifl-processing-weight,omitempty"` + InitialCpProcessingWeight int `json:"initial-cp-processing-weight,omitempty"` + CurrentIflProcessingWeight int `json:"current-ifl-processing-weight,omitempty"` + CurrentCpProcessingWeight int `json:"current-cp-processing-weight,omitempty"` + MaximumIflProcessingWeight int `json:"maximum-ifl-processing-weight,omitempty"` + MaximumCpProcessingWeight int `json:"maximum-cp-processing-weight,omitempty"` + ProcessorManagementEnabled bool `json:"processor-management-enabled,omitempty"` + InitialMemory int `json:"initial-memory,omitempty"` + ReservedMemory int `json:"reserved-memory,omitempty"` + MaximumMemory int `json:"maximum-memory,omitempty"` + AutoStart bool `json:"auto-start,omitempty"` + BootDevice PartitionBootDevice `json:"boot-device,omitempty"` + BootNetworkDevice string `json:"boot-network-device,omitempty"` + BootFtpHost string `json:"boot-ftp-host,omitempty"` + BootFtpUsername string `json:"boot-ftp-username,omitempty"` + BootFtpPassword string `json:"boot-ftp-password,omitempty"` + BootFtpInsfile string `json:"boot-ftp-insfile,omitempty"` + BootRemovableMedia string `json:"boot-removable-media,omitempty"` + BootRemovableMediaType PartionBootRemovableMediaType `json:"boot-removable-media-type,omitempty"` + BootTimeout int `json:"boot-timeout,omitempty"` + BootStorageDevice string `json:"boot-storage-device,omitempty"` + BootStorageVolume string `json:"boot-storage-volume,omitempty"` + BootLogicalUnitNumber string `json:"boot-logical-unit-number,omitempty"` + BootWorldWidePortName string `json:"boot-world-wide-port-name,omitempty"` + BootConfigurationSelector int `json:"boot-configuration-selector,omitempty"` + BootRecordLba string `json:"boot-record-lba,omitempty"` + BootLoadParameters string `json:"boot-load-parameters,omitempty"` + BootOsSpecificParameters string `json:"boot-os-specific-parameters,omitempty"` + BootIsoImageName string `json:"boot-iso-image-name,omitempty"` + BootIsoInsFile string `json:"boot-iso-ins-file,omitempty"` + AccessGlobalPerformanceData bool `json:"access-global-performance-data,omitempty"` + PermitCrossPartitionCommands bool `json:"permit-cross-partition-commands,omitempty"` + AccessBasicCounterSet bool `json:"access-basic-counter-set,omitempty"` + AccessProblemStateCounterSet bool `json:"access-problem-state-counter-set,omitempty"` + AccessCryptoActivityCounterSet bool `json:"access-crypto-activity-counter-set,omitempty"` + AccessExtendedCounterSet bool `json:"access-extended-counter-set,omitempty"` + AccessCoprocessorGroupSet bool `json:"access-coprocessor-group-set,omitempty"` + AccessBasicSampling bool `json:"access-basic-sampling,omitempty"` + AccessDiagnosticSampling bool `json:"access-diagnostic-sampling,omitempty"` + PermitDesKeyImportFunctions bool `json:"permit-des-key-import-functions,omitempty"` + PermitAesKeyImportFunctions bool `json:"permit-aes-key-import-functions,omitempty"` + ThreadsPerProcessor int `json:"threads-per-processor,omitempty"` + VirtualFunctionUris []string `json:"virtual-function-uris,omitempty"` + NicUris []string `json:"nic-uris,omitempty"` + HbaUris []string `json:"hba-uris,omitempty"` + StorageGroupURIs []string `json:"storage-group-uris,omitempty"` + CryptoConfiguration CryptoConfig `json:"-"` + SscHostName string `json:"ssc-host-name,omitempty"` + SscBootSelection SscBootSelection `json:"ssc-boot-selection,omitempty"` + SscIpv4Gateway string `json:"ssc-ipv4-gateway,omitempty"` + SscIpv6Gateway string `json:"ssc-ipv6-gateway,omitempty"` + SscDnsServers []string `json:"ssc-dns-servers,omitempty"` + SscMasterUserid string `json:"ssc-master-userid,omitempty"` + SscMasterPw string `json:"ssc-master-pw,omitempty"` + AvailableFeaturesList []PartitionFeatureInfo `json:"available-features-list,omitempty"` + Secureboot bool `json:"secure-boot,omitempty"` +} + +type StartStopLparResponse struct { + URI string `json:"job-uri"` + Message string `json:"message"` +} + +////////////////////////////////////////////////// +// NIC +////////////////////////////////////////////////// + +type NicType string + +const ( + NIC_TYPE_ROCE NicType = "roce" + NIC_TYPE_IQD = "iqd" + NIC_TYPE_OSD = "osd" +) + +type SscIpAddressType string + +const ( + SSC_IP_TYPE_IPV4 SscIpAddressType = "ipv4" + SSC_IP_TYPE_IPV6 = "ipv6" + SSC_IP_TYPE_LINKLOCAL = "linklocal" + SSC_IP_TYPE_DHCP = "dhcp" +) + +type VlanType string + +const ( + VLAN_TYPE_ENFORCED VlanType = "enforced" +) + +type NIC struct { + ID string `json:"element-id,omitempty"` + URI string `json:"element-uri,omitempty"` + Parent string `json:"parent,omitempty"` + Class string `json:"class,omitempty"` + /* below are payloads when create a new Nic */ + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + DeviceNumber string `json:"device-number,omitempty"` + NetworkAdapterPortURI string `json:"network-adapter-port-uri,omitempty"` + VirtualSwitchUriType string `json:"virtual-switch-uri-type,omitempty"` + VirtualSwitchURI string `json:"virtual-switch-uri,omitempty"` + Type NicType `json:"type,omitempty"` + SscManagmentNIC bool `json:"ssc-management-nic,omitempty"` + SscIpAddressType SscIpAddressType `json:"ssc-ip-address-type,omitempty"` + SscIpAddress string `json:"ssc-ip-address,omitempty"` + VlanID int `json:"vlan-id,omitempty"` + MacAddress string `json:"mac-address,omitempty"` + SscMaskPrefix string `json:"ssc-mask-prefix,omitempty"` + VlanType VlanType `json:"vlan-type,omitempty"` +} + +type NicCreateResponse struct { + URI string `json:"element-uri"` +} + +////////////////////////////////////////////////// +// Storage Groups +////////////////////////////////////////////////// + +type StorageGroupOperation string + +const ( + STORAGE_GROUP_MODIFY StorageGroupOperation = "modify" + STORAGE_VOLUME_CREATE StorageGroupOperation = "create" +) + +type StorageGroupState string + +const ( + STORAGE_GROUP_COMPLETE StorageGroupState = "complete" + STORAGE_GROUP_PENDING = "pending" + STORAGE_GROUP_INCOMPLETE = "incomplete" + STORAGE_GROUP_PENDING_WITH_MISMATCHES = "pending-with-mismatches" + STORAGE_GROUP_OVERPROVISIONED = "overprovisioned" + STORAGE_GROUP_CHECKING_MIGRATION = "checking-migration" + STORAGE_GROUP_CONFIGURATION_ERROR = "configuration-error" +) + +type WWPNStatus string + +const ( + WWPN_STATUS_VALIDATED WWPNStatus = "validated" + WWPN_STATUS_NOT_VALIDATED = "not-validated" + WWPN_STATUS_UNKNOWN = "unknown" + WWPN_STATUS_INCOMPLETE = "incomplete" +) + +type StorageGroupArray struct { + STORAGEGROUPS []StorageGroup `json:"storage-groups"` +} + +type StorageVolumeArray struct { + STORAGEVOLUMES []StorageVolume `json:"storage-volumes"` +} + +type StorageGroupPayload struct { + StorageGroupURI string `json:"storage-group-uri"` + StorageGroupName string `json:"storage-group-name,omitempty"` + PartitionName string `json:"partition-name,omitempty"` +} +type AttachStorageGroupPayload struct { + StorageGroupURI string `json:"storage-group-uri"` +} + +// Storage group class specific properties +type StorageGroupProperties struct { + Class string `json:"class,omitempty"` + Connectivity int `json:"connectivity,omitempty"` + ActiveConnectivity int `json:"active-connectivity,omitempty"` + CpcURI string `json:"cpc-uri,omitempty"` + CandidatePortURIs []string `json:"candidate-adapter-port-uris,omitempty"` + Description string `json:"description,omitempty"` + DirectConnectionCount int `json:"direct-connection-count,omitempty"` + FulfillmentState StorageGroupState `json:"fulfillment-state,omitempty"` + MaxPartitions int `json:"max-partitions,omitempty"` + ActiveMaxPartitions int `json:"active-max-partitions,omitempty"` + Name string `json:"name,omitempty"` + ObjectID string `json:"object-id,omitempty"` + ObjectURI string `json:"object-uri,omitempty"` + Parent string `json:"parent,omitempty"` + Shared bool `json:"shared,omitempty"` + StorageVolumes []StorageVolume `json:"storage-volumes,omitempty"` + StorageVolumesURIs []string `json:"storage-volume-uris,omitempty"` + UnAssignedWWPNs []WWPN `json:"unassigned-world-wide-port-names,omitempty"` + VirtualStorageResourceURIs []string `json:"virtual-storage-resource-uris,omitempty"` + Type string `json:"type,omitempty"` +} + +type CreateStorageGroupProperties struct { + CpcURI string `json:"cpc-uri,omitempty"` + TemplateURI string `json:"template-uri,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Type string `json:"type,omitempty"` + Shared bool `json:"shared"` + Connectivity int `json:"connectivity,omitempty"` + MaxPartitions int `json:"max-partitions,omitempty"` + DirectConnectionCount int `json:"direct-connection-count,omitempty"` + StorageVolumes []StorageVolume `json:"storage-volumes,omitempty"` + EmailToAddress []string `json:"email-to-address,omitempty"` + EmailCcAddress []string `json:"email-cc-address,omitempty"` + EmailInsert string `json:"email-insert,omitempty"` +} + +type WWPN struct { + Name string `json:"world-wide-port-name,omitempty"` + Status WWPNStatus `json:"status,omitempty"` +} + +// Storage group object +type StorageGroup struct { + ObjectID string `json:"object-id,omitempty"` + Parent string `json:"parent,omitempty"` + CpcURI string `json:"cpc-uri,omitempty"` + Description string `json:"description,omitempty"` + FulfillmentState StorageGroupState `json:"fulfillment-state,omitempty"` + Name string `json:"name,omitempty"` + ObjectURI string `json:"object-uri,omitempty"` + Type string `json:"type,omitempty"` + StorageVolumes []StorageVolume `json:"storage-volumes,omitempty"` +} + +type StorageGroupPartitions struct { + GetStorageGroups []LparProperties `json:"partitions,omitempty"` +} + +type StorageGroupModel string + +const ( + STORAGE_GROUP_MODEL_1 StorageGroupModel = "1" + STORAGE_GROUP_MODEL_2 = "2" + STORAGE_GROUP_MODEL_3 = "3" + STORAGE_GROUP_MODEL_9 = "9" + STORAGE_GROUP_MODEL_27 = "27" + STORAGE_GROUP_MODEL_54 = "54" + STORAGE_GROUP_MODEL_EAV = "EAV" +) + +type EckdVolumetype string + +const ( + STORAGE_VOLUME_ECKDTYPE_BASE EckdVolumetype = "base" + STORAGE_VOLUME_ECKDTYPE_ALIAS EckdVolumetype = "alias" +) + +type StorageVolume struct { + Operation StorageGroupOperation `json:"operation,omitempty"` + Class string `json:"class,omitempty"` + Parent string `json:"parent,omitempty"` + URI string `json:"element-uri,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Size float64 `json:"size,omitempty"` + ActiveSize float64 `json:"active-size,omitempty"` + UUID string `json:"uuid,omitempty"` + Model string `json:"model,omitempty"` + ActiveModel string `json:"acitve-model,omitempty"` + Usage StorageVolumeUsage `json:"usage,omitempty"` + FulfillmentState StorageGroupState `json:"fulfillment-state,omitempty"` + Cylinders int `json:"cylinders,omitempty"` + DeviceNumber string `json:"device-number,omitempty"` + ControlUnitURI string `json:"control-unit-uri,omitempty"` + EckdType string `json:"eckd-type,omitempty"` + UnitAddress string `json:"unit-address,omitempty"` + Paths []VolumePath `json:"paths,omitempty"` + AdapterURI string `json:"adapter-uri,omitempty"` + SerialNumber string `json:"serial-number,omitempty"` + FID string `json:"fid,omitempty"` +} + +type StorageVolumeUsage string + +const ( + BOOT_USAGE StorageVolumeUsage = "boot" + DATA_USAGE StorageVolumeUsage = "data" +) + +type VolumePath struct { + PartitionURI string `json:"partition-uri,omitempty"` + DeviceNumber string `json:"device-number,omitempty"` + TargetWWPN string `json:"target-world-wide-port-name,omitempty"` + LogicalUnitNumber string `json:"logical-unit-number,omitempty"` +} +type StorageGroupCreateResponse struct { + URI []string `json:"element-uris,omitempty"` + ObjectURI string `json:"object-uri,omitempty"` + SvPaths []StorageGroupVolumePath `json:"sv-paths,omitempty"` +} +type StorageGroupVolumePath struct { + URI string `json:"element-uris,omitempty"` + Paths []VolumePath `json:"paths,omitempty"` +} + +// ASCIIConsolePayload +type AsciiConsoleURIPayload struct { + ForceTakeover bool `json:"force-takeover,omitempty"` +} + +type AsciiConsoleURIResponse struct { + URI string `json:"websocket-uri"` + SessionID string `json:"session-id"` +} + +// SSC crypto DomainInfo added as a substructure of CryptoConfig structure +type DomainInfo struct { + DomainIdx int `json:"domain-index"` + AccessMode string `json:"access-mode"` +} + +// SSC crypto-configuration object properties added to Support Crypto Card working +type CryptoConfig struct { + CryptoAdapterUris []string `json:"crypto-adapter-uris,omitempty"` + CryptoDomainConfigurations []DomainInfo `json:"crypto-domain-configurations,omitempty"` +} + +type EnergyRequestPayload struct { + Timescale string `json:"timescale,omitempty"` + Type string `json:"type,omitempty"` + Range string `json:"range,omitempty"` + Resolution string `json:"resolution,omitempty"` +} diff --git a/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/nic.go b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/nic.go new file mode 100644 index 0000000000..6aa5a4d8c1 --- /dev/null +++ b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/nic.go @@ -0,0 +1,198 @@ +// Copyright 2021-2023 IBM Corp. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zhmcclient + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "path" + + "go.uber.org/zap" +) + +// NicAPI defines an interface for issuing NIC requests to ZHMC +// +//go:generate counterfeiter -o fakes/nic.go --fake-name NicAPI . NicAPI +type NicAPI interface { + CreateNic(lparURI string, nic *NIC) (string, int, *HmcError) + DeleteNic(nicURI string) (int, *HmcError) + GetNicProperties(nicURI string) (*NIC, int, *HmcError) + UpdateNicProperties(nicURI string, props *NIC) (int, *HmcError) +} + +type NicManager struct { + client ClientAPI +} + +func NewNicManager(client ClientAPI) *NicManager { + return &NicManager{ + client: client, + } +} + +/** +* POST /api/partitions/{partition-id}/nics +* @ return element-uri +* Return: 201 and element-uri +* or: 400, 403, 404, 409, 503, + */ +func (m *NicManager) CreateNic(lparURI string, nic *NIC) (string, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, lparURI, "nics") + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodPost)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodPost, requestUrl, nic, "") + if err != nil { + logger.Error("error on create nic", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return "", status, err + } + + if status == http.StatusCreated { + uriObj := NicCreateResponse{} + err := json.Unmarshal(responseBody, &uriObj) + if err != nil { + logger.Error("error on unmarshalling adapters", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return "", status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + logger.Info(fmt.Sprintf("Response: nic created, request url: %v, method: %v, status: %v, nic uri: %v", requestUrl, http.MethodPost, status, uriObj.URI)) + return uriObj.URI, status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error on create nic", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(errors.New(errorResponseBody.Message))) + return "", status, errorResponseBody + +} + +/** +* DELETE /api/partitions/{partition-id}/nics/{nic-id} +* Return: 204 +* or: 400, 403, 404, 409, 503 + */ +func (m *NicManager) DeleteNic(nicURI string) (int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, nicURI) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodDelete)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodDelete, requestUrl, nil, "") + if err != nil { + logger.Error("error on delete nic", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodDelete), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return status, err + } + + if status == http.StatusNoContent { + logger.Info(fmt.Sprintf("Response: nic deleted, request url: %v, method: %v, status: %v", requestUrl, http.MethodDelete, status)) + return status, nil + } + + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error on delete nic", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodDelete), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return status, errorResponseBody +} + +/** +* GET /api/partitions/{partition-id}/nics/{nic-id} +* Return: 200 and NIC +* or: 400, 404, + */ +func (m *NicManager) GetNicProperties(nicURI string) (*NIC, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, nicURI) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodGet)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodGet, requestUrl, nil, "") + if err != nil { + logger.Error("error on get nic properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return nil, status, err + } + + if status == http.StatusOK { + nic := NIC{} + err := json.Unmarshal(responseBody, &nic) + if err != nil { + logger.Error("error on unmarshalling adapters", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return nil, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + logger.Info(fmt.Sprintf("Response: request url: %v, method: %v, status: %v, nic propeties: %v", requestUrl, http.MethodGet, status, &nic)) + return &nic, status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error on get nic properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return nil, status, errorResponseBody +} + +/** +* POST /api/partitions/{partition-id}/nics/{nic-id} +* Return: 204 +* or: 400, 404, + */ +func (m *NicManager) UpdateNicProperties(nicURI string, props *NIC) (int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, nicURI) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodPost)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodPost, requestUrl, props, "") + if err != nil { + logger.Error("error on update nic properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return status, err + } + + if status == http.StatusNoContent { + logger.Info(fmt.Sprintf("Response: request url: %v, method: %v, status: %v", requestUrl, http.MethodGet, status)) + return status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error on update nic properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return status, errorResponseBody +} diff --git a/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/sgroup.go b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/sgroup.go new file mode 100644 index 0000000000..6f7e664782 --- /dev/null +++ b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/sgroup.go @@ -0,0 +1,405 @@ +// Copyright 2021-2023 IBM Corp. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zhmcclient + +import ( + "encoding/json" + "fmt" + "net/http" + "path" + + "go.uber.org/zap" +) + +// StorageGroupAPI defines an interface for issuing NIC requests to ZHMC +//go:generate counterfeiter -o fakes/sgroup.go --fake-name StorageGroupAPI . StorageGroupAPI + +type StorageGroupAPI interface { + ListStorageGroups(storageGroupURI string, cpcUri string) ([]StorageGroup, int, *HmcError) + GetStorageGroupProperties(storageGroupURI string) (*StorageGroupProperties, int, *HmcError) + ListStorageVolumes(storageGroupURI string) ([]StorageVolume, int, *HmcError) + GetStorageVolumeProperties(storageVolumeURI string) (*StorageVolume, int, *HmcError) + UpdateStorageGroupProperties(storageGroupURI string, updateRequest *StorageGroupProperties) (int, *HmcError) + FulfillStorageGroup(storageGroupURI string, updateRequest *StorageGroupProperties) (int, *HmcError) + CreateStorageGroups(storageGroupURI string, storageGroup *CreateStorageGroupProperties) (*StorageGroupCreateResponse, int, *HmcError) + GetStorageGroupPartitions(storageGroupURI string, query map[string]string) (*StorageGroupPartitions, int, *HmcError) + DeleteStorageGroup(storageGroupURI string) (int, *HmcError) +} + +type StorageGroupManager struct { + client ClientAPI +} + +func NewStorageGroupManager(client ClientAPI) *StorageGroupManager { + return &StorageGroupManager{ + client: client, + } +} + +/** + * GET /api/storage-groups + * @cpcURI the URI of the CPC + * @return storage group array + * Return: 200 and Storage Group array + * or: 400, 404, 409 + */ +func (m *StorageGroupManager) ListStorageGroups(storageGroupURI string, cpcUri string) ([]StorageGroup, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, storageGroupURI) + query := map[string]string{ + "cpc-uri": cpcUri, + } + requestUrl = BuildUrlFromQuery(requestUrl, query) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodGet)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodGet, requestUrl, nil, "") + if err != nil { + logger.Error("error on list storage groups", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return nil, status, err + } + + if status == http.StatusOK { + storageGroups := &StorageGroupArray{} + err := json.Unmarshal(responseBody, storageGroups) + if err != nil { + logger.Error("error on unmarshalling adapters", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return nil, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + logger.Info(fmt.Sprintf("Response: request url: %v, method: %v, status: %v, storage groups: %v", requestUrl, http.MethodGet, status, storageGroups.STORAGEGROUPS)) + return storageGroups.STORAGEGROUPS, status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error on list storage groups", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return nil, status, errorResponseBody +} + +/** + * GET /api/storage-groups/{storage-group-id} + * @cpcURI the ID of the virtual switch + * @return adapter array + * Return: 200 and Storage Group object + * or: 400, 404, 409 + */ +func (m *StorageGroupManager) GetStorageGroupProperties(storageGroupURI string) (*StorageGroupProperties, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, storageGroupURI) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodGet)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodGet, requestUrl, nil, "") + if err != nil { + logger.Error("error on get storage group properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return nil, status, err + } + + if status == http.StatusOK { + storageGroup := &StorageGroupProperties{} + err := json.Unmarshal(responseBody, storageGroup) + if err != nil { + logger.Error("error on unmarshalling adapters", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return nil, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + logger.Info(fmt.Sprintf("Response: request url: %v, method: %v, status: %v, storage group properties: %v", requestUrl, http.MethodGet, status, storageGroup)) + return storageGroup, status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error on get storage group properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return nil, status, errorResponseBody +} + +/** + * GET /api/storage-groups/{storage-group-id}/storage-volumes + * @storage-group-id the Object id of the storage group + * @return storage volume array + * Return: 200 and Storage Group array + * or: 400, 404, 409 + */ +func (m *StorageGroupManager) ListStorageVolumes(storageGroupURI string) ([]StorageVolume, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, storageGroupURI) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodGet)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodGet, requestUrl, nil, "") + if err != nil { + logger.Error("error on list storage volumes", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return nil, status, err + } + + if status == http.StatusOK { + storageVolumes := &StorageVolumeArray{} + err := json.Unmarshal(responseBody, storageVolumes) + if err != nil { + logger.Error("error on unmarshalling adapters", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return nil, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + logger.Info(fmt.Sprintf("Response: request url: %v, method: %v, status: %v, storage volumes: %v", requestUrl, http.MethodGet, status, storageVolumes.STORAGEVOLUMES)) + return storageVolumes.STORAGEVOLUMES, status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error on list storage volumes", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return nil, status, errorResponseBody +} + +/** + * GET /api/storage-groups/{storage-group-id}/storage-volumes/{storage-volume-id} + * @return volume object + * Return: 200 and Storage Volume object + * or: 400, 404, 409 + */ +func (m *StorageGroupManager) GetStorageVolumeProperties(storageVolumeURI string) (*StorageVolume, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, storageVolumeURI) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodGet)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodGet, requestUrl, nil, "") + if err != nil { + logger.Error("error on get storage volume properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return nil, status, err + } + + if status == http.StatusOK { + storageVolume := &StorageVolume{} + err := json.Unmarshal(responseBody, storageVolume) + if err != nil { + logger.Error("error on unmarshalling adapters", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return nil, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + logger.Info(fmt.Sprintf("Respone: request url: %v, method: %v, status: %v, storage volume properties: %v", http.MethodGet, requestUrl, status, storageVolume)) + return storageVolume, status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error on get storage volume properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return nil, status, errorResponseBody +} + +/** + * POST /api/storage-groups/{storage-group-id}/operations/modify + * Return: 200 + * or: 400, 404, 409 + */ +func (m *StorageGroupManager) UpdateStorageGroupProperties(storageGroupURI string, updateRequest *StorageGroupProperties) (int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, storageGroupURI) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodPost)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodPost, requestUrl, updateRequest, "") + if err != nil { + logger.Error("error on update storage group properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return status, err + } + + if status == http.StatusOK { + storageGroup := &StorageGroup{} + err := json.Unmarshal(responseBody, storageGroup) + if err != nil { + logger.Error("error on unmarshalling adapters", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + logger.Info(fmt.Sprintf("Response: update storage group properties completed, request url: %v, method: %v, status: %v", requestUrl, http.MethodPost, status)) + return status, nil + } + logger.Error("error on update storage group properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status: ", fmt.Sprint(status))) + return status, nil +} + +/* +* + - POST /api/storage-groups/{storage-group-id}/operations/accept-mismatched- + storage-volumes + - Return: 200 + - or: 400, 404, 409 +*/ +func (m *StorageGroupManager) FulfillStorageGroup(storageGroupURI string, request *StorageGroupProperties) (int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, storageGroupURI) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodPost)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodPost, requestUrl, request, "") + if err != nil { + logger.Error("error on fulfill storage group", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return status, err + } + + if status == http.StatusOK { + storageGroup := &StorageGroup{} + err := json.Unmarshal(responseBody, storageGroup) + if err != nil { + logger.Error("error on unmarshalling adapters", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + logger.Info(fmt.Sprintf("Response: fulfill storage group completed, request url: %v, method: %v, status: %v", requestUrl, http.MethodPost, status)) + return status, nil + } + logger.Error("error on fulfill storage group", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodPost), + zap.String("status: ", fmt.Sprint(status))) + return status, nil +} + +// CreateStorageGroup + +/** + * POST/api/storage-groups + * @returns object-uri and the element-uri of each storage volume that was created in the response body + * Return: 201 + * or: 400, 404, 409 + */ +func (m *StorageGroupManager) CreateStorageGroups(storageGroupURI string, storageGroup *CreateStorageGroupProperties) (*StorageGroupCreateResponse, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, storageGroupURI) + + status, responseBody, err := m.client.ExecuteRequest(http.MethodPost, requestUrl, storageGroup, "") + if err != nil { + return nil, status, err + } + + if status == http.StatusCreated { + sgURI := StorageGroupCreateResponse{} + err := json.Unmarshal(responseBody, &sgURI) + if err != nil { + return nil, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + var svPaths []StorageGroupVolumePath + for _, i := range sgURI.URI { + sv, statuscode, err := m.GetStorageVolumeProperties(i) + if err != nil { + return nil, statuscode, err + } + svpath := StorageGroupVolumePath{ + URI: sv.URI, + Paths: sv.Paths, + } + svPaths = append(svPaths, svpath) + } + sgURI.SvPaths = svPaths + return &sgURI, status, nil + } + + return nil, status, GenerateErrorFromResponse(responseBody) +} + +/** + * GET /api/storage-groups/{storage-group-id}/operations/get-partitions + * @cpcURI the ID of the virtual switch + * @return adapter array + * Return: 200 and Storage Group object + * or: 400, 404, 409 + */ +func (m *StorageGroupManager) GetStorageGroupPartitions(storageGroupURI string, query map[string]string) (*StorageGroupPartitions, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, storageGroupURI, "/operations/get-partitions") + + requestUrl = BuildUrlFromQuery(requestUrl, query) + + status, responseBody, err := m.client.ExecuteRequest(http.MethodGet, requestUrl, nil, "") + + if err != nil { + return nil, status, err + } + + if status == http.StatusOK { + storageGroup := StorageGroupPartitions{} + + err := json.Unmarshal(responseBody, &storageGroup) + + if err != nil { + return nil, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + return &storageGroup, status, nil + } + + return nil, status, GenerateErrorFromResponse(responseBody) +} + +func (m *StorageGroupManager) DeleteStorageGroup(storageGroupURI string) (int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, storageGroupURI, "/operations/delete") + status, responseBody, err := m.client.ExecuteRequest(http.MethodPost, requestUrl, nil, "") + if err != nil { + return status, err + } + if status == http.StatusNoContent { + return status, nil + } + + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error deleting storage group ", + zap.String("request url", fmt.Sprint(storageGroupURI)), + zap.String("method", http.MethodPost), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", errorResponseBody))) + return status, errorResponseBody + +} diff --git a/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/utils.go b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/utils.go new file mode 100644 index 0000000000..dbec60f8dd --- /dev/null +++ b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/utils.go @@ -0,0 +1,267 @@ +// Copyright 2021-2023 IBM Corp. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zhmcclient + +import ( + "bufio" + "encoding/json" + "fmt" + "net/url" + "os" + "strconv" + "strings" +) + +type HmcErrorCode int + +const ( + ERR_CODE_HMC_INVALID_URL HmcErrorCode = iota + 1000 + ERR_CODE_HMC_BAD_REQUEST + ERR_CODE_HMC_EMPTY_RESPONSE + ERR_CODE_HMC_READ_RESPONSE_FAIL + ERR_CODE_HMC_TRACE_REQUEST_FAIL + ERR_CODE_HMC_CREATE_METRICS_CTX_FAIL + ERR_CODE_HMC_EXECUTE_FAIL + ERR_CODE_HMC_MARSHAL_FAIL + ERR_CODE_HMC_UNMARSHAL_FAIL + ERR_CODE_EMPTY_JOB_URI +) + +const ( + ERR_MSG_INSECURE_URL = "https is used for the client for secure reason." + ERR_MSG_EMPTY_RESPONSE = "http response is empty." + ERR_MSG_EMPTY_JOB_URI = "empty job-uri." +) + +type HmcError struct { + Reason int `json:"reason"` + Message string `json:"message"` +} + +// Metric group names, which are required when creating metrics context. +// For details, refer to "IBM Z Hardware Management Console Web Services API". +const ( + ZCpcEnvironmentalsAndPower = "zcpc-environmentals-and-power" + EnvironmentalPowerStatus = "environmental-power-status" +) + +type metricDef struct { + Name string `json:"metric-name"` // metric field name + Type string `json:"metric-type"` // golang type of this metric value +} + +type metricGroupDef struct { + MetricsGroupName string `json:"group-name"` + MetricDefs []metricDef `json:"metric-infos"` +} + +// MetricsContextDef represents a "Metrics Context" resource, which is +// associated with any following metrics collection. +type MetricsContextDef struct { + URI string `json:"metrics-context-uri"` + MetricGroupDefs []metricGroupDef `json:"metric-group-infos"` + MetricGroupDefsByName map[string]metricGroupDef +} + +// MetricsObject represents the metric values of a metrics group +// for a single resource at a single point in time. +type MetricsObject struct { + MetricsGroupName string + TimeStamp string + ResourceURI string + Metrics map[string]interface{} +} + +func (e HmcError) Error() string { + return fmt.Sprintf("HmcError - Reason: %d, %s", e.Reason, e.Message) +} + +func getHmcErrorFromErr(reason HmcErrorCode, err error) *HmcError { + return &HmcError{ + Reason: int(reason), + Message: err.Error(), + } +} + +func getHmcErrorFromMsg(reason HmcErrorCode, err string) *HmcError { + return &HmcError{ + Reason: (int)(reason), + Message: err, + } +} + +func BuildUrlFromQuery(url *url.URL, query map[string]string) *url.URL { + if query != nil { + q := url.Query() + for key, value := range query { + q.Add(key, value) + } + url.RawQuery = q.Encode() + } + return url +} + +func RetrieveBytes(filename string) ([]byte, error) { + file, err := os.Open(filename) + + if err != nil { + return nil, err + } + defer file.Close() + + stats, statsErr := file.Stat() + if statsErr != nil { + return nil, statsErr + } + + var size int64 = stats.Size() + bytes := make([]byte, size) + bufr := bufio.NewReader(file) + _, err = bufr.Read(bytes) + return bytes, err +} + +func GenerateErrorFromResponse(responseBodyStream []byte) *HmcError { + errBody := &HmcError{} + err := json.Unmarshal(responseBodyStream, errBody) + + if err != nil { + return &HmcError{ + Reason: -1, + Message: "Unknown error.", + } + + } + + return errBody +} + +func newMetricsContext(properties []byte) (*MetricsContextDef, error) { + mc := new(MetricsContextDef) + + if err := json.Unmarshal(properties, mc); err != nil { + return nil, err + } + + mc.MetricGroupDefsByName = make(map[string]metricGroupDef) + for _, mgDef := range mc.MetricGroupDefs { + mc.MetricGroupDefsByName[mgDef.MetricsGroupName] = mgDef + } + + return mc, nil +} + +// extract MetricGroup objects from response of a metrics retrieve request +func extractMetricObjects(mc *MetricsContextDef, metricsStr string) []MetricsObject { + var moList []MetricsObject + var mDefs []metricDef + + metricGroupName := "" + resourceURL := "" + timeStamp := "" + + state := 0 + lines := strings.Split(metricsStr, "\n") + + for _, line := range lines { + + switch state { + case 0: + { + // start or just finish processing a metrics group + if line == "" { + // Skip initial(or trailing) empty lines + continue + } else { + // Process the next metrics group + metricGroupName = strings.Trim(line, `"`) + mDefs = mc.MetricGroupDefsByName[metricGroupName].MetricDefs + state = 1 + } + } + case 1: + { + if line == "" { + //No(or no more) MetricObject items in this metrics group. + state = 0 + } else { + // There are MetricsObject items + resourceURL = strings.Trim(line, `"`) + state = 2 + } + } + case 2: + { + // Process the timestamp + timeStamp = line + state = 3 + } + case 3: + { + if line != "" { + // Process the metric values in the ValueRow line + mValues := strings.Split(line, `,`) + metrics := make(map[string]interface{}) + for Index, mDef := range mDefs { + metrics[mDef.Name] = metricValueConvert(mValues[Index], mDef.Type) + } + mo := MetricsObject{ + MetricsGroupName: metricGroupName, + TimeStamp: timeStamp, + ResourceURI: resourceURL, + Metrics: metrics, + } + moList = append(moList, mo) + // stay in this state, for more ValueRow lines + } else { + // On the empty line after the last ValueRow line + state = 1 + } + } + } + } + + return moList +} + +// get metric value from a metric value strings +func metricValueConvert(valueStr string, metricType string) interface{} { + if metricType == "bool" { + lowerStr := strings.ToLower(valueStr) + return lowerStr + + } else if metricType == "string" { + return strings.Trim(valueStr, `"`) + } + + switch metricType { + case "boolean-metric": + returnStr, _ := strconv.ParseBool(valueStr) + return returnStr + case "byte-metric", "short-metric", "integer-metric": + returnStr, _ := strconv.Atoi(valueStr) + return returnStr + case "long-metric": + returnStr, _ := strconv.ParseInt(valueStr, 10, 64) + return returnStr + case "string-metric": + return valueStr + case "double-metric": + returnStr, _ := strconv.ParseFloat(valueStr, 64) + return returnStr + } + + return "" +} diff --git a/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/vswitch.go b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/vswitch.go new file mode 100644 index 0000000000..efa54253c0 --- /dev/null +++ b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/vswitch.go @@ -0,0 +1,133 @@ +// Copyright 2021-2023 IBM Corp. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zhmcclient + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "path" + + "go.uber.org/zap" +) + +// VirtualSwitchAPI defines an interface for issuing VirtualSwitch requests to ZHMC +// +//go:generate counterfeiter -o fakes/vswitch.go --fake-name VirtualSwitchAPI . VirtualSwitchAPI +type VirtualSwitchAPI interface { + ListVirtualSwitches(cpcURI string, query map[string]string) ([]VirtualSwitch, int, *HmcError) + GetVirtualSwitchProperties(vSwitchURI string) (*VirtualSwitchProperties, int, *HmcError) +} + +type VirtualSwitchManager struct { + client ClientAPI +} + +func NewVirtualSwitchManager(client ClientAPI) *VirtualSwitchManager { + return &VirtualSwitchManager{ + client: client, + } +} + +/** + * GET /api/cpcs/{cpc-id}/virtual-switches + * @cpcURI the URI of the CPC + * @return adapter array + * Return: 200 and VirtualSwitches array + * or: 400, 404, 409 + */ +func (m *VirtualSwitchManager) ListVirtualSwitches(cpcURI string, query map[string]string) ([]VirtualSwitch, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, cpcURI, "virtual-switches") + requestUrl = BuildUrlFromQuery(requestUrl, query) + + logger.Info(fmt.Sprintf("Request URL: %v", requestUrl)) + logger.Info(fmt.Sprintf("Request Method: %v", http.MethodGet)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodGet, requestUrl, nil, "") + if err != nil { + logger.Error("error listing virtual switches", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return nil, status, err + } + + if status == http.StatusOK { + virtualSwitches := &VirtualSwitchesArray{} + err := json.Unmarshal(responseBody, virtualSwitches) + if err != nil { + logger.Error("error on unmarshalling adapters", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return nil, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + logger.Info(fmt.Sprintf("Response: request url: %v, method: %v, status: %v, virtual switches: %v", requestUrl, http.MethodGet, status, virtualSwitches.VIRTUALSWITCHES)) + return virtualSwitches.VIRTUALSWITCHES, status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error on listing virtual switches", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(errors.New(errorResponseBody.Message))) + return nil, status, errorResponseBody +} + +/** + * GET /api/virtual-switches/{vswitch-id} + * @cpcURI the ID of the virtual switch + * @return adapter array + * Return: 200 and VirtualSwitchProperties + * or: 400, 404, 409 + */ +func (m *VirtualSwitchManager) GetVirtualSwitchProperties(vSwitchURI string) (*VirtualSwitchProperties, int, *HmcError) { + requestUrl := m.client.CloneEndpointURL() + requestUrl.Path = path.Join(requestUrl.Path, vSwitchURI) + + logger.Info(fmt.Sprintf("Request URL: %v, Method: %v", requestUrl, http.MethodGet)) + status, responseBody, err := m.client.ExecuteRequest(http.MethodGet, requestUrl, nil, "") + if err != nil { + logger.Error("error getting virtual switch properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status", fmt.Sprint(status)), + zap.Error(fmt.Errorf("%v", err))) + return nil, status, err + } + + if status == http.StatusOK { + virtualSwitch := &VirtualSwitchProperties{} + err := json.Unmarshal(responseBody, virtualSwitch) + if err != nil { + logger.Error("error on unmarshalling adapters", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.Error(fmt.Errorf("%v", getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err)))) + return nil, status, getHmcErrorFromErr(ERR_CODE_HMC_UNMARSHAL_FAIL, err) + } + logger.Info(fmt.Sprintf("Response: request url: %v, method: %v, status: %v, virtual switch properties: %v", requestUrl, http.MethodGet, status, virtualSwitch)) + return virtualSwitch, status, nil + } + errorResponseBody := GenerateErrorFromResponse(responseBody) + logger.Error("error on getting switch properties", + zap.String("request url", fmt.Sprint(requestUrl)), + zap.String("method", http.MethodGet), + zap.String("status: ", fmt.Sprint(status)), + zap.Error(errors.New(errorResponseBody.Message))) + return nil, status, errorResponseBody +} diff --git a/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/zhmc.go b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/zhmc.go new file mode 100644 index 0000000000..1b23b1d3a6 --- /dev/null +++ b/vendor/github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient/zhmc.go @@ -0,0 +1,220 @@ +// Copyright 2021-2023 IBM Corp. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zhmcclient + +// ZhmcAPI defines an interface for issuing requests to ZHMC +// +//go:generate counterfeiter -o fakes/zhmc.go --fake-name ZhmcAPI . ZhmcAPI +type ZhmcAPI interface { + CpcAPI + LparAPI + NicAPI + AdapterAPI + StorageGroupAPI + VirtualSwitchAPI + JobAPI + MetricsAPI +} + +type ZhmcManager struct { + client ClientAPI + cpcManager CpcAPI + lparManager LparAPI + adapterManager AdapterAPI + storageGroupManager StorageGroupAPI + virtualSwitchManager VirtualSwitchAPI + nicManager NicAPI + jobManager JobAPI + metricsManager MetricsAPI +} + +func NewManagerFromOptions(endpoint string, creds *Options, logger Logger) ZhmcAPI { + client, _ := NewClient(endpoint, creds, logger) + if client != nil { + return NewManagerFromClient(client) + } + return nil +} + +func NewManagerFromClient(client ClientAPI) ZhmcAPI { + return &ZhmcManager{ + client: client, + cpcManager: NewCpcManager(client), + lparManager: NewLparManager(client), + adapterManager: NewAdapterManager(client), + storageGroupManager: NewStorageGroupManager(client), + virtualSwitchManager: NewVirtualSwitchManager(client), + nicManager: NewNicManager(client), + jobManager: NewJobManager(client), + metricsManager: NewMetricsManager(client), + } +} + +// CPC +func (m *ZhmcManager) ListCPCs(query map[string]string) ([]CPC, int, *HmcError) { + return m.cpcManager.ListCPCs(query) +} +func (m *ZhmcManager) GetCPCProperties(cpcURI string) (*CPCProperties, int, *HmcError) { + return m.cpcManager.GetCPCProperties(cpcURI) +} + +// LPAR +func (m *ZhmcManager) ListLPARs(cpcURI string, query map[string]string) ([]LPAR, int, *HmcError) { + return m.lparManager.ListLPARs(cpcURI, query) +} +func (m *ZhmcManager) GetLparProperties(lparURI string) (*LparObjectProperties, int, *HmcError) { + return m.lparManager.GetLparProperties(lparURI) +} +func (m *ZhmcManager) UpdateLparProperties(lparURI string, props *LparProperties) (int, *HmcError) { + return m.lparManager.UpdateLparProperties(lparURI, props) +} +func (m *ZhmcManager) CreateLPAR(cpcURI string, props *LparProperties) (string, int, *HmcError) { + return m.lparManager.CreateLPAR(cpcURI, props) +} +func (m *ZhmcManager) StartLPAR(lparURI string) (string, int, *HmcError) { + return m.lparManager.StartLPAR(lparURI) +} +func (m *ZhmcManager) StopLPAR(lparURI string) (string, int, *HmcError) { + return m.lparManager.StopLPAR(lparURI) +} +func (m *ZhmcManager) DeleteLPAR(lparURI string) (int, *HmcError) { + return m.lparManager.DeleteLPAR(lparURI) +} +func (m *ZhmcManager) MountIsoImage(lparURI string, isoFile string, insFile string) (int, *HmcError) { + return m.lparManager.MountIsoImage(lparURI, isoFile, insFile) +} +func (m *ZhmcManager) UnmountIsoImage(lparURI string) (int, *HmcError) { + return m.lparManager.UnmountIsoImage(lparURI) +} +func (m *ZhmcManager) ListNics(lparURI string) ([]string, int, *HmcError) { + return m.lparManager.ListNics(lparURI) +} + +func (m *ZhmcManager) AttachStorageGroupToPartition(lparURI string, request *StorageGroupPayload) (int, *HmcError) { + return m.lparManager.AttachStorageGroupToPartition(lparURI, request) +} + +func (m *ZhmcManager) DetachStorageGroupToPartition(lparURI string, request *StorageGroupPayload) (int, *HmcError) { + return m.lparManager.DetachStorageGroupToPartition(lparURI, request) +} + +func (m *ZhmcManager) FetchAsciiConsoleURI(lparURI string, request *AsciiConsoleURIPayload) (*AsciiConsoleURIResponse, int, *HmcError) { + return m.lparManager.FetchAsciiConsoleURI(lparURI, request) +} + +func (m *ZhmcManager) GetEnergyDetailsforLPAR(lparURI string, props *EnergyRequestPayload) (uint64, int, *HmcError) { + return m.lparManager.GetEnergyDetailsforLPAR(lparURI, props) +} + +func (m *ZhmcManager) AttachCryptoToPartition(lparURI string, request *CryptoConfig) (int, *HmcError) { + return m.lparManager.AttachCryptoToPartition(lparURI, request) +} + +func (m *ZhmcManager) GetLiveEnergyDetailsforLPAR(lparURI string) (uint64, int, *HmcError) { + return m.metricsManager.GetLiveEnergyDetailsforLPAR(lparURI) +} + +// Adapter +func (m *ZhmcManager) ListAdapters(cpcURI string, query map[string]string) ([]Adapter, int, *HmcError) { + return m.adapterManager.ListAdapters(cpcURI, query) +} +func (m *ZhmcManager) GetAdapterProperties(adapterURI string) (*AdapterProperties, int, *HmcError) { + return m.adapterManager.GetAdapterProperties(adapterURI) +} +func (m *ZhmcManager) GetNetworkAdapterPortProperties(adapterURI string) (*NetworkAdapterPort, int, *HmcError) { + return m.adapterManager.GetNetworkAdapterPortProperties(adapterURI) +} +func (m *ZhmcManager) GetStorageAdapterPortProperties(adapterURI string) (*StorageAdapterPort, int, *HmcError) { + return m.adapterManager.GetStorageAdapterPortProperties(adapterURI) +} +func (m *ZhmcManager) CreateHipersocket(cpcURI string, adaptor *HipersocketPayload) (string, int, *HmcError) { + return m.adapterManager.CreateHipersocket(cpcURI, adaptor) +} +func (m *ZhmcManager) DeleteHipersocket(adapterURI string) (int, *HmcError) { + return m.adapterManager.DeleteHipersocket(adapterURI) +} + +// Storage groups + +func (m *ZhmcManager) ListStorageGroups(storageGroupURI string, cpc string) ([]StorageGroup, int, *HmcError) { + return m.storageGroupManager.ListStorageGroups(storageGroupURI, cpc) +} + +func (m *ZhmcManager) GetStorageGroupProperties(storageGroupURI string) (*StorageGroupProperties, int, *HmcError) { + return m.storageGroupManager.GetStorageGroupProperties(storageGroupURI) +} + +func (m *ZhmcManager) ListStorageVolumes(storageGroupURI string) ([]StorageVolume, int, *HmcError) { + return m.storageGroupManager.ListStorageVolumes(storageGroupURI) +} + +func (m *ZhmcManager) GetStorageVolumeProperties(storageGroupURI string) (*StorageVolume, int, *HmcError) { + return m.storageGroupManager.GetStorageVolumeProperties(storageGroupURI) +} + +func (m *ZhmcManager) UpdateStorageGroupProperties(storageGroupURI string, uploadRequest *StorageGroupProperties) (int, *HmcError) { + return m.storageGroupManager.UpdateStorageGroupProperties(storageGroupURI, uploadRequest) +} + +func (m *ZhmcManager) FulfillStorageGroup(storageGroupURI string, updateRequest *StorageGroupProperties) (int, *HmcError) { + return m.storageGroupManager.FulfillStorageGroup(storageGroupURI, updateRequest) +} + +func (m *ZhmcManager) CreateStorageGroups(storageGroupURI string, storageGroup *CreateStorageGroupProperties) (*StorageGroupCreateResponse, int, *HmcError) { + return m.storageGroupManager.CreateStorageGroups(storageGroupURI, storageGroup) +} + +func (m *ZhmcManager) GetStorageGroupPartitions(storageGroupURI string, query map[string]string) (*StorageGroupPartitions, int, *HmcError) { + return m.storageGroupManager.GetStorageGroupPartitions(storageGroupURI, query) +} + +func (m *ZhmcManager) DeleteStorageGroup(storageGroupURI string) (int, *HmcError) { + return m.storageGroupManager.DeleteStorageGroup(storageGroupURI) +} + +// Virtual Switches + +func (m *ZhmcManager) ListVirtualSwitches(cpcURI string, query map[string]string) ([]VirtualSwitch, int, *HmcError) { + return m.virtualSwitchManager.ListVirtualSwitches(cpcURI, query) +} + +func (m *ZhmcManager) GetVirtualSwitchProperties(vSwitchURI string) (*VirtualSwitchProperties, int, *HmcError) { + return m.virtualSwitchManager.GetVirtualSwitchProperties(vSwitchURI) +} + +// NIC +func (m *ZhmcManager) CreateNic(lparURI string, nic *NIC) (string, int, *HmcError) { + return m.nicManager.CreateNic(lparURI, nic) +} +func (m *ZhmcManager) DeleteNic(nicURI string) (int, *HmcError) { + return m.nicManager.DeleteNic(nicURI) +} +func (m *ZhmcManager) GetNicProperties(nicURI string) (*NIC, int, *HmcError) { + return m.nicManager.GetNicProperties(nicURI) +} +func (m *ZhmcManager) UpdateNicProperties(nicURI string, props *NIC) (int, *HmcError) { + return m.nicManager.UpdateNicProperties(nicURI, props) +} + +// JOB +func (m *ZhmcManager) QueryJob(jobURI string) (*Job, int, *HmcError) { + return m.jobManager.QueryJob(jobURI) +} +func (m *ZhmcManager) DeleteJob(jobURI string) (int, *HmcError) { + return m.jobManager.DeleteJob(jobURI) +} +func (m *ZhmcManager) CancelJob(jobURI string) (int, *HmcError) { + return m.jobManager.CancelJob(jobURI) +} diff --git a/vendor/go.uber.org/multierr/.codecov.yml b/vendor/go.uber.org/multierr/.codecov.yml new file mode 100644 index 0000000000..6d4d1be7b5 --- /dev/null +++ b/vendor/go.uber.org/multierr/.codecov.yml @@ -0,0 +1,15 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore new file mode 100644 index 0000000000..b9a05e3da0 --- /dev/null +++ b/vendor/go.uber.org/multierr/.gitignore @@ -0,0 +1,4 @@ +/vendor +cover.html +cover.out +/bin diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md new file mode 100644 index 0000000000..f8177b978c --- /dev/null +++ b/vendor/go.uber.org/multierr/CHANGELOG.md @@ -0,0 +1,95 @@ +Releases +======== + +v1.11.0 (2023-03-28) +==================== +- `Errors` now supports any error that implements multiple-error + interface. +- Add `Every` function to allow checking if all errors in the chain + satisfies `errors.Is` against the target error. + +v1.10.0 (2023-03-08) +==================== + +- Comply with Go 1.20's multiple-error interface. +- Drop Go 1.18 support. + Per the support policy, only Go 1.19 and 1.20 are supported now. +- Drop all non-test external dependencies. + +v1.9.0 (2022-12-12) +=================== + +- Add `AppendFunc` that allow passsing functions to similar to + `AppendInvoke`. + +- Bump up yaml.v3 dependency to 3.0.1. + +v1.8.0 (2022-02-28) +=================== + +- `Combine`: perform zero allocations when there are no errors. + + +v1.7.0 (2021-05-06) +=================== + +- Add `AppendInvoke` to append into errors from `defer` blocks. + + +v1.6.0 (2020-09-14) +=================== + +- Actually drop library dependency on development-time tooling. + + +v1.5.0 (2020-02-24) +=================== + +- Drop library dependency on development-time tooling. + + +v1.4.0 (2019-11-04) +=================== + +- Add `AppendInto` function to more ergonomically build errors inside a + loop. + + +v1.3.0 (2019-10-29) +=================== + +- Switch to Go modules. + + +v1.2.0 (2019-09-26) +=================== + +- Support extracting and matching against wrapped errors with `errors.As` + and `errors.Is`. + + +v1.1.0 (2017-06-30) +=================== + +- Added an `Errors(error) []error` function to extract the underlying list of + errors for a multierr error. + + +v1.0.0 (2017-05-31) +=================== + +No changes since v0.2.0. This release is committing to making no breaking +changes to the current API in the 1.X series. + + +v0.2.0 (2017-04-11) +=================== + +- Repeatedly appending to the same error is now faster due to fewer + allocations. + + +v0.1.0 (2017-31-03) +=================== + +- Initial release diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt new file mode 100644 index 0000000000..413e30f7ce --- /dev/null +++ b/vendor/go.uber.org/multierr/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2017-2021 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile new file mode 100644 index 0000000000..dcb6fe723c --- /dev/null +++ b/vendor/go.uber.org/multierr/Makefile @@ -0,0 +1,38 @@ +# Directory to put `go install`ed binaries in. +export GOBIN ?= $(shell pwd)/bin + +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) + +.PHONY: build +build: + go build ./... + +.PHONY: test +test: + go test -race ./... + +.PHONY: gofmt +gofmt: + $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) + @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true + @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false) + +.PHONY: golint +golint: + @cd tools && go install golang.org/x/lint/golint + @$(GOBIN)/golint ./... + +.PHONY: staticcheck +staticcheck: + @cd tools && go install honnef.co/go/tools/cmd/staticcheck + @$(GOBIN)/staticcheck ./... + +.PHONY: lint +lint: gofmt golint staticcheck + +.PHONY: cover +cover: + go test -race -coverprofile=cover.out -coverpkg=./... -v ./... + go tool cover -html=cover.out -o cover.html diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md new file mode 100644 index 0000000000..5ab6ac40f4 --- /dev/null +++ b/vendor/go.uber.org/multierr/README.md @@ -0,0 +1,43 @@ +# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +`multierr` allows combining one or more Go `error`s together. + +## Features + +- **Idiomatic**: + multierr follows best practices in Go, and keeps your code idiomatic. + - It keeps the underlying error type hidden, + allowing you to deal in `error` values exclusively. + - It provides APIs to safely append into an error from a `defer` statement. +- **Performant**: + multierr is optimized for performance: + - It avoids allocations where possible. + - It utilizes slice resizing semantics to optimize common cases + like appending into the same error object from a loop. +- **Interoperable**: + multierr interoperates with the Go standard library's error APIs seamlessly: + - The `errors.Is` and `errors.As` functions *just work*. +- **Lightweight**: + multierr comes with virtually no dependencies. + +## Installation + +```bash +go get -u go.uber.org/multierr@latest +``` + +## Status + +Stable: No breaking changes will be made before 2.0. + +------------------------------------------------------------------------------- + +Released under the [MIT License]. + +[MIT License]: LICENSE.txt +[doc-img]: https://pkg.go.dev/badge/go.uber.org/multierr +[doc]: https://pkg.go.dev/go.uber.org/multierr +[ci-img]: https://github.com/uber-go/multierr/actions/workflows/go.yml/badge.svg +[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg +[ci]: https://github.com/uber-go/multierr/actions/workflows/go.yml +[cov]: https://codecov.io/gh/uber-go/multierr diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go new file mode 100644 index 0000000000..3a828b2dff --- /dev/null +++ b/vendor/go.uber.org/multierr/error.go @@ -0,0 +1,646 @@ +// Copyright (c) 2017-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package multierr allows combining one or more errors together. +// +// # Overview +// +// Errors can be combined with the use of the Combine function. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// conn.Close(), +// ) +// +// If only two errors are being combined, the Append function may be used +// instead. +// +// err = multierr.Append(reader.Close(), writer.Close()) +// +// The underlying list of errors for a returned error object may be retrieved +// with the Errors function. +// +// errors := multierr.Errors(err) +// if len(errors) > 0 { +// fmt.Println("The following errors occurred:", errors) +// } +// +// # Appending from a loop +// +// You sometimes need to append into an error from a loop. +// +// var err error +// for _, item := range items { +// err = multierr.Append(err, process(item)) +// } +// +// Cases like this may require knowledge of whether an individual instance +// failed. This usually requires introduction of a new variable. +// +// var err error +// for _, item := range items { +// if perr := process(item); perr != nil { +// log.Warn("skipping item", item) +// err = multierr.Append(err, perr) +// } +// } +// +// multierr includes AppendInto to simplify cases like this. +// +// var err error +// for _, item := range items { +// if multierr.AppendInto(&err, process(item)) { +// log.Warn("skipping item", item) +// } +// } +// +// This will append the error into the err variable, and return true if that +// individual error was non-nil. +// +// See [AppendInto] for more information. +// +// # Deferred Functions +// +// Go makes it possible to modify the return value of a function in a defer +// block if the function was using named returns. This makes it possible to +// record resource cleanup failures from deferred blocks. +// +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer func() { +// err = multierr.Append(err, conn.Close()) +// }() +// // ... +// } +// +// multierr provides the Invoker type and AppendInvoke function to make cases +// like the above simpler and obviate the need for a closure. The following is +// roughly equivalent to the example above. +// +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer multierr.AppendInvoke(&err, multierr.Close(conn)) +// // ... +// } +// +// See [AppendInvoke] and [Invoker] for more information. +// +// NOTE: If you're modifying an error from inside a defer, you MUST use a named +// return value for that function. +// +// # Advanced Usage +// +// Errors returned by Combine and Append MAY implement the following +// interface. +// +// type errorGroup interface { +// // Returns a slice containing the underlying list of errors. +// // +// // This slice MUST NOT be modified by the caller. +// Errors() []error +// } +// +// Note that if you need access to list of errors behind a multierr error, you +// should prefer using the Errors function. That said, if you need cheap +// read-only access to the underlying errors slice, you can attempt to cast +// the error to this interface. You MUST handle the failure case gracefully +// because errors returned by Combine and Append are not guaranteed to +// implement this interface. +// +// var errors []error +// group, ok := err.(errorGroup) +// if ok { +// errors = group.Errors() +// } else { +// errors = []error{err} +// } +package multierr // import "go.uber.org/multierr" + +import ( + "bytes" + "errors" + "fmt" + "io" + "strings" + "sync" + "sync/atomic" +) + +var ( + // Separator for single-line error messages. + _singlelineSeparator = []byte("; ") + + // Prefix for multi-line messages + _multilinePrefix = []byte("the following errors occurred:") + + // Prefix for the first and following lines of an item in a list of + // multi-line error messages. + // + // For example, if a single item is: + // + // foo + // bar + // + // It will become, + // + // - foo + // bar + _multilineSeparator = []byte("\n - ") + _multilineIndent = []byte(" ") +) + +// _bufferPool is a pool of bytes.Buffers. +var _bufferPool = sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, +} + +type errorGroup interface { + Errors() []error +} + +// Errors returns a slice containing zero or more errors that the supplied +// error is composed of. If the error is nil, a nil slice is returned. +// +// err := multierr.Append(r.Close(), w.Close()) +// errors := multierr.Errors(err) +// +// If the error is not composed of other errors, the returned slice contains +// just the error that was passed in. +// +// Callers of this function are free to modify the returned slice. +func Errors(err error) []error { + return extractErrors(err) +} + +// multiError is an error that holds one or more errors. +// +// An instance of this is guaranteed to be non-empty and flattened. That is, +// none of the errors inside multiError are other multiErrors. +// +// multiError formats to a semi-colon delimited list of error messages with +// %v and with a more readable multi-line format with %+v. +type multiError struct { + copyNeeded atomic.Bool + errors []error +} + +// Errors returns the list of underlying errors. +// +// This slice MUST NOT be modified. +func (merr *multiError) Errors() []error { + if merr == nil { + return nil + } + return merr.errors +} + +func (merr *multiError) Error() string { + if merr == nil { + return "" + } + + buff := _bufferPool.Get().(*bytes.Buffer) + buff.Reset() + + merr.writeSingleline(buff) + + result := buff.String() + _bufferPool.Put(buff) + return result +} + +// Every compares every error in the given err against the given target error +// using [errors.Is], and returns true only if every comparison returned true. +func Every(err error, target error) bool { + for _, e := range extractErrors(err) { + if !errors.Is(e, target) { + return false + } + } + return true +} + +func (merr *multiError) Format(f fmt.State, c rune) { + if c == 'v' && f.Flag('+') { + merr.writeMultiline(f) + } else { + merr.writeSingleline(f) + } +} + +func (merr *multiError) writeSingleline(w io.Writer) { + first := true + for _, item := range merr.errors { + if first { + first = false + } else { + w.Write(_singlelineSeparator) + } + io.WriteString(w, item.Error()) + } +} + +func (merr *multiError) writeMultiline(w io.Writer) { + w.Write(_multilinePrefix) + for _, item := range merr.errors { + w.Write(_multilineSeparator) + writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item)) + } +} + +// Writes s to the writer with the given prefix added before each line after +// the first. +func writePrefixLine(w io.Writer, prefix []byte, s string) { + first := true + for len(s) > 0 { + if first { + first = false + } else { + w.Write(prefix) + } + + idx := strings.IndexByte(s, '\n') + if idx < 0 { + idx = len(s) - 1 + } + + io.WriteString(w, s[:idx+1]) + s = s[idx+1:] + } +} + +type inspectResult struct { + // Number of top-level non-nil errors + Count int + + // Total number of errors including multiErrors + Capacity int + + // Index of the first non-nil error in the list. Value is meaningless if + // Count is zero. + FirstErrorIdx int + + // Whether the list contains at least one multiError + ContainsMultiError bool +} + +// Inspects the given slice of errors so that we can efficiently allocate +// space for it. +func inspect(errors []error) (res inspectResult) { + first := true + for i, err := range errors { + if err == nil { + continue + } + + res.Count++ + if first { + first = false + res.FirstErrorIdx = i + } + + if merr, ok := err.(*multiError); ok { + res.Capacity += len(merr.errors) + res.ContainsMultiError = true + } else { + res.Capacity++ + } + } + return +} + +// fromSlice converts the given list of errors into a single error. +func fromSlice(errors []error) error { + // Don't pay to inspect small slices. + switch len(errors) { + case 0: + return nil + case 1: + return errors[0] + } + + res := inspect(errors) + switch res.Count { + case 0: + return nil + case 1: + // only one non-nil entry + return errors[res.FirstErrorIdx] + case len(errors): + if !res.ContainsMultiError { + // Error list is flat. Make a copy of it + // Otherwise "errors" escapes to the heap + // unconditionally for all other cases. + // This lets us optimize for the "no errors" case. + out := append(([]error)(nil), errors...) + return &multiError{errors: out} + } + } + + nonNilErrs := make([]error, 0, res.Capacity) + for _, err := range errors[res.FirstErrorIdx:] { + if err == nil { + continue + } + + if nested, ok := err.(*multiError); ok { + nonNilErrs = append(nonNilErrs, nested.errors...) + } else { + nonNilErrs = append(nonNilErrs, err) + } + } + + return &multiError{errors: nonNilErrs} +} + +// Combine combines the passed errors into a single error. +// +// If zero arguments were passed or if all items are nil, a nil error is +// returned. +// +// Combine(nil, nil) // == nil +// +// If only a single error was passed, it is returned as-is. +// +// Combine(err) // == err +// +// Combine skips over nil arguments so this function may be used to combine +// together errors from operations that fail independently of each other. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// pipe.Close(), +// ) +// +// If any of the passed errors is a multierr error, it will be flattened along +// with the other errors. +// +// multierr.Combine(multierr.Combine(err1, err2), err3) +// // is the same as +// multierr.Combine(err1, err2, err3) +// +// The returned error formats into a readable multi-line error message if +// formatted with %+v. +// +// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) +func Combine(errors ...error) error { + return fromSlice(errors) +} + +// Append appends the given errors together. Either value may be nil. +// +// This function is a specialization of Combine for the common case where +// there are only two errors. +// +// err = multierr.Append(reader.Close(), writer.Close()) +// +// The following pattern may also be used to record failure of deferred +// operations without losing information about the original error. +// +// func doSomething(..) (err error) { +// f := acquireResource() +// defer func() { +// err = multierr.Append(err, f.Close()) +// }() +// +// Note that the variable MUST be a named return to append an error to it from +// the defer statement. See also [AppendInvoke]. +func Append(left error, right error) error { + switch { + case left == nil: + return right + case right == nil: + return left + } + + if _, ok := right.(*multiError); !ok { + if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) { + // Common case where the error on the left is constantly being + // appended to. + errs := append(l.errors, right) + return &multiError{errors: errs} + } else if !ok { + // Both errors are single errors. + return &multiError{errors: []error{left, right}} + } + } + + // Either right or both, left and right, are multiErrors. Rely on usual + // expensive logic. + errors := [2]error{left, right} + return fromSlice(errors[0:]) +} + +// AppendInto appends an error into the destination of an error pointer and +// returns whether the error being appended was non-nil. +// +// var err error +// multierr.AppendInto(&err, r.Close()) +// multierr.AppendInto(&err, w.Close()) +// +// The above is equivalent to, +// +// err := multierr.Append(r.Close(), w.Close()) +// +// As AppendInto reports whether the provided error was non-nil, it may be +// used to build a multierr error in a loop more ergonomically. For example: +// +// var err error +// for line := range lines { +// var item Item +// if multierr.AppendInto(&err, parse(line, &item)) { +// continue +// } +// items = append(items, item) +// } +// +// Compare this with a version that relies solely on Append: +// +// var err error +// for line := range lines { +// var item Item +// if parseErr := parse(line, &item); parseErr != nil { +// err = multierr.Append(err, parseErr) +// continue +// } +// items = append(items, item) +// } +func AppendInto(into *error, err error) (errored bool) { + if into == nil { + // We panic if 'into' is nil. This is not documented above + // because suggesting that the pointer must be non-nil may + // confuse users into thinking that the error that it points + // to must be non-nil. + panic("misuse of multierr.AppendInto: into pointer must not be nil") + } + + if err == nil { + return false + } + *into = Append(*into, err) + return true +} + +// Invoker is an operation that may fail with an error. Use it with +// AppendInvoke to append the result of calling the function into an error. +// This allows you to conveniently defer capture of failing operations. +// +// See also, [Close] and [Invoke]. +type Invoker interface { + Invoke() error +} + +// Invoke wraps a function which may fail with an error to match the Invoker +// interface. Use it to supply functions matching this signature to +// AppendInvoke. +// +// For example, +// +// func processReader(r io.Reader) (err error) { +// scanner := bufio.NewScanner(r) +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// for scanner.Scan() { +// // ... +// } +// // ... +// } +// +// In this example, the following line will construct the Invoker right away, +// but defer the invocation of scanner.Err() until the function returns. +// +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// +// Note that the error you're appending to from the defer statement MUST be a +// named return. +type Invoke func() error + +// Invoke calls the supplied function and returns its result. +func (i Invoke) Invoke() error { return i() } + +// Close builds an Invoker that closes the provided io.Closer. Use it with +// AppendInvoke to close io.Closers and append their results into an error. +// +// For example, +// +// func processFile(path string) (err error) { +// f, err := os.Open(path) +// if err != nil { +// return err +// } +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// return processReader(f) +// } +// +// In this example, multierr.Close will construct the Invoker right away, but +// defer the invocation of f.Close until the function returns. +// +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// +// Note that the error you're appending to from the defer statement MUST be a +// named return. +func Close(closer io.Closer) Invoker { + return Invoke(closer.Close) +} + +// AppendInvoke appends the result of calling the given Invoker into the +// provided error pointer. Use it with named returns to safely defer +// invocation of fallible operations until a function returns, and capture the +// resulting errors. +// +// func doSomething(...) (err error) { +// // ... +// f, err := openFile(..) +// if err != nil { +// return err +// } +// +// // multierr will call f.Close() when this function returns and +// // if the operation fails, its append its error into the +// // returned error. +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// +// scanner := bufio.NewScanner(f) +// // Similarly, this scheduled scanner.Err to be called and +// // inspected when the function returns and append its error +// // into the returned error. +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// +// // ... +// } +// +// NOTE: If used with a defer, the error variable MUST be a named return. +// +// Without defer, AppendInvoke behaves exactly like AppendInto. +// +// err := // ... +// multierr.AppendInvoke(&err, mutltierr.Invoke(foo)) +// +// // ...is roughly equivalent to... +// +// err := // ... +// multierr.AppendInto(&err, foo()) +// +// The advantage of the indirection introduced by Invoker is to make it easy +// to defer the invocation of a function. Without this indirection, the +// invoked function will be evaluated at the time of the defer block rather +// than when the function returns. +// +// // BAD: This is likely not what the caller intended. This will evaluate +// // foo() right away and append its result into the error when the +// // function returns. +// defer multierr.AppendInto(&err, foo()) +// +// // GOOD: This will defer invocation of foo unutil the function returns. +// defer multierr.AppendInvoke(&err, multierr.Invoke(foo)) +// +// multierr provides a few Invoker implementations out of the box for +// convenience. See [Invoker] for more information. +func AppendInvoke(into *error, invoker Invoker) { + AppendInto(into, invoker.Invoke()) +} + +// AppendFunc is a shorthand for [AppendInvoke]. +// It allows using function or method value directly +// without having to wrap it into an [Invoker] interface. +// +// func doSomething(...) (err error) { +// w, err := startWorker(...) +// if err != nil { +// return err +// } +// +// // multierr will call w.Stop() when this function returns and +// // if the operation fails, it appends its error into the +// // returned error. +// defer multierr.AppendFunc(&err, w.Stop) +// } +func AppendFunc(into *error, fn func() error) { + AppendInvoke(into, Invoke(fn)) +} diff --git a/vendor/go.uber.org/multierr/error_post_go120.go b/vendor/go.uber.org/multierr/error_post_go120.go new file mode 100644 index 0000000000..a173f9c251 --- /dev/null +++ b/vendor/go.uber.org/multierr/error_post_go120.go @@ -0,0 +1,48 @@ +// Copyright (c) 2017-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.20 +// +build go1.20 + +package multierr + +// Unwrap returns a list of errors wrapped by this multierr. +func (merr *multiError) Unwrap() []error { + return merr.Errors() +} + +type multipleErrors interface { + Unwrap() []error +} + +func extractErrors(err error) []error { + if err == nil { + return nil + } + + // check if the given err is an Unwrapable error that + // implements multipleErrors interface. + eg, ok := err.(multipleErrors) + if !ok { + return []error{err} + } + + return append(([]error)(nil), eg.Unwrap()...) +} diff --git a/vendor/go.uber.org/multierr/error_pre_go120.go b/vendor/go.uber.org/multierr/error_pre_go120.go new file mode 100644 index 0000000000..93872a3fcd --- /dev/null +++ b/vendor/go.uber.org/multierr/error_pre_go120.go @@ -0,0 +1,79 @@ +// Copyright (c) 2017-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build !go1.20 +// +build !go1.20 + +package multierr + +import "errors" + +// Versions of Go before 1.20 did not support the Unwrap() []error method. +// This provides a similar behavior by implementing the Is(..) and As(..) +// methods. +// See the errors.Join proposal for details: +// https://github.com/golang/go/issues/53435 + +// As attempts to find the first error in the error list that matches the type +// of the value that target points to. +// +// This function allows errors.As to traverse the values stored on the +// multierr error. +func (merr *multiError) As(target interface{}) bool { + for _, err := range merr.Errors() { + if errors.As(err, target) { + return true + } + } + return false +} + +// Is attempts to match the provided error against errors in the error list. +// +// This function allows errors.Is to traverse the values stored on the +// multierr error. +func (merr *multiError) Is(target error) bool { + for _, err := range merr.Errors() { + if errors.Is(err, target) { + return true + } + } + return false +} + +func extractErrors(err error) []error { + if err == nil { + return nil + } + + // Note that we're casting to multiError, not errorGroup. Our contract is + // that returned errors MAY implement errorGroup. Errors, however, only + // has special behavior for multierr-specific error objects. + // + // This behavior can be expanded in the future but I think it's prudent to + // start with as little as possible in terms of contract and possibility + // of misuse. + eg, ok := err.(*multiError) + if !ok { + return []error{err} + } + + return append(([]error)(nil), eg.Errors()...) +} diff --git a/vendor/go.uber.org/zap/.codecov.yml b/vendor/go.uber.org/zap/.codecov.yml new file mode 100644 index 0000000000..8e5ca7d3e2 --- /dev/null +++ b/vendor/go.uber.org/zap/.codecov.yml @@ -0,0 +1,17 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 95% # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure +ignore: + - internal/readme/readme.go + diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/go.uber.org/zap/.gitignore new file mode 100644 index 0000000000..da9d9d00b4 --- /dev/null +++ b/vendor/go.uber.org/zap/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +vendor + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.pprof +*.out +*.log + +/bin +cover.out +cover.html diff --git a/vendor/go.uber.org/zap/.golangci.yml b/vendor/go.uber.org/zap/.golangci.yml new file mode 100644 index 0000000000..fbc6df7906 --- /dev/null +++ b/vendor/go.uber.org/zap/.golangci.yml @@ -0,0 +1,77 @@ +output: + # Make output more digestible with quickfix in vim/emacs/etc. + sort-results: true + print-issued-lines: false + +linters: + # We'll track the golangci-lint default linters manually + # instead of letting them change without our control. + disable-all: true + enable: + # golangci-lint defaults: + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - unused + + # Our own extras: + - gofmt + - nolintlint # lints nolint directives + - revive + +linters-settings: + govet: + # These govet checks are disabled by default, but they're useful. + enable: + - niliness + - reflectvaluecompare + - sortslice + - unusedwrite + + errcheck: + exclude-functions: + # These methods can not fail. + # They operate on an in-memory buffer. + - (*go.uber.org/zap/buffer.Buffer).Write + - (*go.uber.org/zap/buffer.Buffer).WriteByte + - (*go.uber.org/zap/buffer.Buffer).WriteString + + - (*go.uber.org/zap/zapio.Writer).Close + - (*go.uber.org/zap/zapio.Writer).Sync + - (*go.uber.org/zap/zapio.Writer).Write + # Write to zapio.Writer cannot fail, + # so io.WriteString on it cannot fail. + - io.WriteString(*go.uber.org/zap/zapio.Writer) + + # Writing a plain string to a fmt.State cannot fail. + - io.WriteString(fmt.State) + +issues: + # Print all issues reported by all linters. + max-issues-per-linter: 0 + max-same-issues: 0 + + # Don't ignore some of the issues that golangci-lint considers okay. + # This includes documenting all exported entities. + exclude-use-default: false + + exclude-rules: + # Don't warn on unused parameters. + # Parameter names are useful; replacing them with '_' is undesirable. + - linters: [revive] + text: 'unused-parameter: parameter \S+ seems to be unused, consider removing or renaming it as _' + + # staticcheck already has smarter checks for empty blocks. + # revive's empty-block linter has false positives. + # For example, as of writing this, the following is not allowed. + # for foo() { } + - linters: [revive] + text: 'empty-block: this block is empty, you can remove it' + + # Ignore logger.Sync() errcheck failures in example_test.go + # since those are intended to be uncomplicated examples. + - linters: [errcheck] + path: example_test.go + text: 'Error return value of `logger.Sync` is not checked' diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl new file mode 100644 index 0000000000..92aa65d660 --- /dev/null +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -0,0 +1,109 @@ +# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Blazing fast, structured, leveled logging in Go. + +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than other structured logging +packages and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("failed to fetch URL", + // Structured context as loosely typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even +faster than the `SugaredLogger` and allocates far less, but it only supports +structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("failed to fetch URL", + // Structured context as strongly typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +See the [documentation][doc] and [FAQ](FAQ.md) for more details. + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive +and make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead +and allocations wherever possible. By building the high-level `SugaredLogger` +on that foundation, zap lets users *choose* when they need to count every +allocation and when they'd prefer a more familiar, loosely typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging packages — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +{{.BenchmarkAddingFields}} + +Log a message with a logger that already has 10 fields of context: + +{{.BenchmarkAccumulatedContext}} + +Log a static string, without any context or `printf`-style templating: + +{{.BenchmarkWithoutFields}} + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +zap to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on +issues and pull requests, but you can also report any negative conduct to +oss-conduct@uber.com. That email list is a private, safe space; even the zap +maintainers don't have access, so don't hesitate to hold us to a high +standard. + +
+ +Released under the [MIT License](LICENSE.txt). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other packages. Versions are +pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) + +[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap +[doc]: https://pkg.go.dev/go.uber.org/zap +[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml +[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/zap +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod + diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md new file mode 100644 index 0000000000..11b4659761 --- /dev/null +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -0,0 +1,671 @@ +# Changelog +All notable changes to this project will be documented in this file. + +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## 1.26.0 (14 Sep 2023) +Enhancements: +* [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured +context. +* [#1350][]: String encoding is much (~50%) faster now. + +Thanks to @jquirke, @cdvr1993 for their contributions to this release. + +[#1319]: https://github.com/uber-go/zap/pull/1319 +[#1350]: https://github.com/uber-go/zap/pull/1350 + +## 1.25.0 (1 Aug 2023) + +This release contains several improvements including performance, API additions, +and two new experimental packages whose APIs are unstable and may change in the +future. + +Enhancements: +* [#1246][]: Add `zap/exp/zapslog` package for integration with slog. +* [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set. +* [#1281][]: Add `zap/exp/expfield` package which contains helper methods +`Str` and `Strs` for constructing String-like zap.Fields. +* [#1310][]: Reduce stack size on `Any`. + +Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions +to this release. + +[#1246]: https://github.com/uber-go/zap/pull/1246 +[#1273]: https://github.com/uber-go/zap/pull/1273 +[#1281]: https://github.com/uber-go/zap/pull/1281 +[#1310]: https://github.com/uber-go/zap/pull/1310 + +## 1.24.0 (30 Nov 2022) + +Enhancements: +* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the + current minimum enabled log level. +* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically. + +Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their +contributions to this release. + +[#1148]: https://github.coml/uber-go/zap/pull/1148 +[#1185]: https://github.coml/uber-go/zap/pull/1185 + +## 1.23.0 (24 Aug 2022) + +Enhancements: +* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a + `LevelEnabler` or `Core`. +* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects + that implement `String() string`. + +[#1147]: https://github.com/uber-go/zap/pull/1147 +[#1155]: https://github.com/uber-go/zap/pull/1155 + +## 1.22.0 (8 Aug 2022) + +Enhancements: +* [#1071][]: Add `zap.Objects` and `zap.ObjectValues` field constructors to log + arrays of objects. With these two constructors, you don't need to implement + `zapcore.ArrayMarshaler` for use with `zap.Array` if those objects implement + `zapcore.ObjectMarshaler`. +* [#1079][]: Add `SugaredLogger.WithOptions` to build a copy of an existing + `SugaredLogger` with the provided options applied. +* [#1080][]: Add `*ln` variants to `SugaredLogger` for each log level. + These functions provide a string joining behavior similar to `fmt.Println`. +* [#1088][]: Add `zap.WithFatalHook` option to control the behavior of the + logger for `Fatal`-level log entries. This defaults to exiting the program. +* [#1108][]: Add a `zap.Must` function that you can use with `NewProduction` or + `NewDevelopment` to panic if the system was unable to build the logger. +* [#1118][]: Add a `Logger.Log` method that allows specifying the log level for + a statement dynamically. + +Thanks to @cardil, @craigpastro, @sashamelentyev, @shota3506, and @zhupeijun +for their contributions to this release. + +[#1071]: https://github.com/uber-go/zap/pull/1071 +[#1079]: https://github.com/uber-go/zap/pull/1079 +[#1080]: https://github.com/uber-go/zap/pull/1080 +[#1088]: https://github.com/uber-go/zap/pull/1088 +[#1108]: https://github.com/uber-go/zap/pull/1108 +[#1118]: https://github.com/uber-go/zap/pull/1118 + +## 1.21.0 (7 Feb 2022) + +Enhancements: +* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string. +* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a + string. + +Bugfixes: +* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset. + +Other changes: +* [#1052][]: Improve encoding performance when the `AddCaller` and + `AddStacktrace` options are used together. + +[#1047]: https://github.com/uber-go/zap/pull/1047 +[#1048]: https://github.com/uber-go/zap/pull/1048 +[#1052]: https://github.com/uber-go/zap/pull/1052 +[#1058]: https://github.com/uber-go/zap/pull/1058 + +Thanks to @aerosol and @Techassi for their contributions to this release. + +## 1.20.0 (4 Jan 2022) + +Enhancements: +* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline + characters between log statements. +* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON + encoding of reflected log fields. + +Bugfixes: +* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON. +* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject` + methods when the methods return. +* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero. + +Other changes: +* [#1028][]: Drop support for Go < 1.15. + +[#554]: https://github.com/uber-go/zap/pull/554 +[#989]: https://github.com/uber-go/zap/pull/989 +[#1011]: https://github.com/uber-go/zap/pull/1011 +[#1017]: https://github.com/uber-go/zap/pull/1017 +[#1028]: https://github.com/uber-go/zap/pull/1028 +[#1033]: https://github.com/uber-go/zap/pull/1033 +[#1039]: https://github.com/uber-go/zap/pull/1039 + +Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release. + +## 1.19.1 (8 Sep 2021) + +Bugfixes: +* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon. +* [#1003][]: JSON: Fix inaccurate precision when encoding float32. + +[#1001]: https://github.com/uber-go/zap/pull/1001 +[#1003]: https://github.com/uber-go/zap/pull/1003 + +## 1.19.0 (9 Aug 2021) + +Enhancements: +* [#975][]: Avoid panicking in Sampler core if the level is out of bounds. +* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields + better. + +[#975]: https://github.com/uber-go/zap/pull/975 +[#984]: https://github.com/uber-go/zap/pull/984 + +Thanks to @lancoLiu and @thockin for their contributions to this release. + +## 1.18.1 (28 Jun 2021) + +Bugfixes: +* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`. + +[#974]: https://github.com/uber-go/zap/pull/974 + +## 1.18.0 (28 Jun 2021) + +Enhancements: +* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers + messages in-memory and flushes them periodically. +* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`. +* [#897][]: Add `zap.WithClock` option to control the source of time via the + new `zapcore.Clock` interface. +* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w` + methods don't match expectations. +* [#943][]: Add support for filtering by level or arbitrary matcher function to + `zaptest/observer`. +* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's + `buffer.Buffer`. + +Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee +for their contributions to this release. + +[#691]: https://github.com/uber-go/zap/pull/691 +[#897]: https://github.com/uber-go/zap/pull/897 +[#943]: https://github.com/uber-go/zap/pull/943 +[#949]: https://github.com/uber-go/zap/pull/949 +[#961]: https://github.com/uber-go/zap/pull/961 +[#971]: https://github.com/uber-go/zap/pull/971 + +## 1.17.0 (25 May 2021) + +Bugfixes: +* [#867][]: Encode `` for nil `error` instead of a panic. +* [#931][], [#936][]: Update minimum version constraints to address + vulnerabilities in dependencies. + +Enhancements: +* [#865][]: Improve alignment of fields of the Logger struct, reducing its + size from 96 to 80 bytes. +* [#881][]: Support `grpclog.LoggerV2` in zapgrpc. +* [#903][]: Support URL-encoded POST requests to the AtomicLevel HTTP handler + with the `application/x-www-form-urlencoded` content type. +* [#912][]: Support multi-field encoding with `zap.Inline`. +* [#913][]: Speed up SugaredLogger for calls with a single string. +* [#928][]: Add support for filtering by field name to `zaptest/observer`. + +Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release. + +[#865]: https://github.com/uber-go/zap/pull/865 +[#867]: https://github.com/uber-go/zap/pull/867 +[#881]: https://github.com/uber-go/zap/pull/881 +[#903]: https://github.com/uber-go/zap/pull/903 +[#912]: https://github.com/uber-go/zap/pull/912 +[#913]: https://github.com/uber-go/zap/pull/913 +[#928]: https://github.com/uber-go/zap/pull/928 +[#931]: https://github.com/uber-go/zap/pull/931 +[#936]: https://github.com/uber-go/zap/pull/936 + +## 1.16.0 (1 Sep 2020) + +Bugfixes: +* [#828][]: Fix missing newline in IncreaseLevel error messages. +* [#835][]: Fix panic in JSON encoder when encoding times or durations + without specifying a time or duration encoder. +* [#843][]: Honor CallerSkip when taking stack traces. +* [#862][]: Fix the default file permissions to use `0666` and rely on the umask instead. +* [#854][]: Encode `` for nil `Stringer` instead of a panic error log. + +Enhancements: +* [#629][]: Added `zapcore.TimeEncoderOfLayout` to easily create time encoders + for custom layouts. +* [#697][]: Added support for a configurable delimiter in the console encoder. +* [#852][]: Optimize console encoder by pooling the underlying JSON encoder. +* [#844][]: Add ability to include the calling function as part of logs. +* [#843][]: Add `StackSkip` for including truncated stacks as a field. +* [#861][]: Add options to customize Fatal behaviour for better testability. + +Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release. + +[#629]: https://github.com/uber-go/zap/pull/629 +[#697]: https://github.com/uber-go/zap/pull/697 +[#828]: https://github.com/uber-go/zap/pull/828 +[#835]: https://github.com/uber-go/zap/pull/835 +[#843]: https://github.com/uber-go/zap/pull/843 +[#844]: https://github.com/uber-go/zap/pull/844 +[#852]: https://github.com/uber-go/zap/pull/852 +[#854]: https://github.com/uber-go/zap/pull/854 +[#861]: https://github.com/uber-go/zap/pull/861 +[#862]: https://github.com/uber-go/zap/pull/862 + +## 1.15.0 (23 Apr 2020) + +Bugfixes: +* [#804][]: Fix handling of `Time` values out of `UnixNano` range. +* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`. + +Enhancements: +* [#806][]: Add `WithCaller` option to supersede the `AddCaller` option. This + allows disabling annotation of log entries with caller information if + previously enabled with `AddCaller`. +* [#813][]: Deprecate `NewSampler` constructor in favor of + `NewSamplerWithOptions` which supports a `SamplerHook` option. This option + adds support for monitoring sampling decisions through a hook. + +Thanks to @danielbprice for their contributions to this release. + +[#804]: https://github.com/uber-go/zap/pull/804 +[#812]: https://github.com/uber-go/zap/pull/812 +[#806]: https://github.com/uber-go/zap/pull/806 +[#813]: https://github.com/uber-go/zap/pull/813 + +## 1.14.1 (14 Mar 2020) + +Bugfixes: +* [#791][]: Fix panic on attempting to build a logger with an invalid Config. +* [#795][]: Vendoring Zap with `go mod vendor` no longer includes Zap's + development-time dependencies. +* [#799][]: Fix issue introduced in 1.14.0 that caused invalid JSON output to + be generated for arrays of `time.Time` objects when using string-based time + formats. + +Thanks to @YashishDua for their contributions to this release. + +[#791]: https://github.com/uber-go/zap/pull/791 +[#795]: https://github.com/uber-go/zap/pull/795 +[#799]: https://github.com/uber-go/zap/pull/799 + +## 1.14.0 (20 Feb 2020) + +Enhancements: +* [#771][]: Optimize calls for disabled log levels. +* [#773][]: Add millisecond duration encoder. +* [#775][]: Add option to increase the level of a logger. +* [#786][]: Optimize time formatters using `Time.AppendFormat` where possible. + +Thanks to @caibirdme for their contributions to this release. + +[#771]: https://github.com/uber-go/zap/pull/771 +[#773]: https://github.com/uber-go/zap/pull/773 +[#775]: https://github.com/uber-go/zap/pull/775 +[#786]: https://github.com/uber-go/zap/pull/786 + +## 1.13.0 (13 Nov 2019) + +Enhancements: +* [#758][]: Add `Intp`, `Stringp`, and other similar `*p` field constructors + to log pointers to primitives with support for `nil` values. + +Thanks to @jbizzle for their contributions to this release. + +[#758]: https://github.com/uber-go/zap/pull/758 + +## 1.12.0 (29 Oct 2019) + +Enhancements: +* [#751][]: Migrate to Go modules. + +[#751]: https://github.com/uber-go/zap/pull/751 + +## 1.11.0 (21 Oct 2019) + +Enhancements: +* [#725][]: Add `zapcore.OmitKey` to omit keys in an `EncoderConfig`. +* [#736][]: Add `RFC3339` and `RFC3339Nano` time encoders. + +Thanks to @juicemia, @uhthomas for their contributions to this release. + +[#725]: https://github.com/uber-go/zap/pull/725 +[#736]: https://github.com/uber-go/zap/pull/736 + +## 1.10.0 (29 Apr 2019) + +Bugfixes: +* [#657][]: Fix `MapObjectEncoder.AppendByteString` not adding value as a + string. +* [#706][]: Fix incorrect call depth to determine caller in Go 1.12. + +Enhancements: +* [#610][]: Add `zaptest.WrapOptions` to wrap `zap.Option` for creating test + loggers. +* [#675][]: Don't panic when encoding a String field. +* [#704][]: Disable HTML escaping for JSON objects encoded using the + reflect-based encoder. + +Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions +to this release. + +[#657]: https://github.com/uber-go/zap/pull/657 +[#706]: https://github.com/uber-go/zap/pull/706 +[#610]: https://github.com/uber-go/zap/pull/610 +[#675]: https://github.com/uber-go/zap/pull/675 +[#704]: https://github.com/uber-go/zap/pull/704 + +## v1.9.1 (06 Aug 2018) + +Bugfixes: + +* [#614][]: MapObjectEncoder should not ignore empty slices. + +[#614]: https://github.com/uber-go/zap/pull/614 + +## v1.9.0 (19 Jul 2018) + +Enhancements: +* [#602][]: Reduce number of allocations when logging with reflection. +* [#572][], [#606][]: Expose a registry for third-party logging sinks. + +Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and +@dimroc for their contributions to this release. + +[#602]: https://github.com/uber-go/zap/pull/602 +[#572]: https://github.com/uber-go/zap/pull/572 +[#606]: https://github.com/uber-go/zap/pull/606 + +## v1.8.0 (13 Apr 2018) + +Enhancements: +* [#508][]: Make log level configurable when redirecting the standard + library's logger. +* [#518][]: Add a logger that writes to a `*testing.TB`. +* [#577][]: Add a top-level alias for `zapcore.Field` to clean up GoDoc. + +Bugfixes: +* [#574][]: Add a missing import comment to `go.uber.org/zap/buffer`. + +Thanks to @DiSiqueira and @djui for their contributions to this release. + +[#508]: https://github.com/uber-go/zap/pull/508 +[#518]: https://github.com/uber-go/zap/pull/518 +[#577]: https://github.com/uber-go/zap/pull/577 +[#574]: https://github.com/uber-go/zap/pull/574 + +## v1.7.1 (25 Sep 2017) + +Bugfixes: +* [#504][]: Store strings when using AddByteString with the map encoder. + +[#504]: https://github.com/uber-go/zap/pull/504 + +## v1.7.0 (21 Sep 2017) + +Enhancements: + +* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user + to specify the level of the logged messages. + +[#487]: https://github.com/uber-go/zap/pull/487 + +## v1.6.0 (30 Aug 2017) + +Enhancements: + +* [#491][]: Omit zap stack frames from stacktraces. +* [#490][]: Add a `ContextMap` method to observer logs for simpler + field validation in tests. + +[#490]: https://github.com/uber-go/zap/pull/490 +[#491]: https://github.com/uber-go/zap/pull/491 + +## v1.5.0 (22 Jul 2017) + +Enhancements: + +* [#460][] and [#470][]: Support errors produced by `go.uber.org/multierr`. +* [#465][]: Support user-supplied encoders for logger names. + +Bugfixes: + +* [#477][]: Fix a bug that incorrectly truncated deep stacktraces. + +Thanks to @richard-tunein and @pavius for their contributions to this release. + +[#477]: https://github.com/uber-go/zap/pull/477 +[#465]: https://github.com/uber-go/zap/pull/465 +[#460]: https://github.com/uber-go/zap/pull/460 +[#470]: https://github.com/uber-go/zap/pull/470 + +## v1.4.1 (08 Jun 2017) + +This release fixes two bugs. + +Bugfixes: + +* [#435][]: Support a variety of case conventions when unmarshaling levels. +* [#444][]: Fix a panic in the observer. + +[#435]: https://github.com/uber-go/zap/pull/435 +[#444]: https://github.com/uber-go/zap/pull/444 + +## v1.4.0 (12 May 2017) + +This release adds a few small features and is fully backward-compatible. + +Enhancements: + +* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to + override the Unix-style default. +* [#425][]: Preserve time zones when logging times. +* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a + variety of operations a bit simpler. + +[#424]: https://github.com/uber-go/zap/pull/424 +[#425]: https://github.com/uber-go/zap/pull/425 +[#431]: https://github.com/uber-go/zap/pull/431 + +## v1.3.0 (25 Apr 2017) + +This release adds an enhancement to zap's testing helpers as well as the +ability to marshal an AtomicLevel. It is fully backward-compatible. + +Enhancements: + +* [#415][]: Add a substring-filtering helper to zap's observer. This is + particularly useful when testing the `SugaredLogger`. +* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. + +[#415]: https://github.com/uber-go/zap/pull/415 +[#416]: https://github.com/uber-go/zap/pull/416 + +## v1.2.0 (13 Apr 2017) + +This release adds a gRPC compatibility wrapper. It is fully backward-compatible. + +Enhancements: + +* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements + `grpclog.Logger`. + +[#402]: https://github.com/uber-go/zap/pull/402 + +## v1.1.0 (31 Mar 2017) + +This release fixes two bugs and adds some enhancements to zap's testing helpers. +It is fully backward-compatible. + +Bugfixes: + +* [#385][]: Fix caller path trimming on Windows. +* [#396][]: Fix a panic when attempting to use non-existent directories with + zap's configuration struct. + +Enhancements: + +* [#386][]: Add filtering helpers to zaptest's observing logger. + +Thanks to @moitias for contributing to this release. + +[#385]: https://github.com/uber-go/zap/pull/385 +[#396]: https://github.com/uber-go/zap/pull/396 +[#386]: https://github.com/uber-go/zap/pull/386 + +## v1.0.0 (14 Mar 2017) + +This is zap's first stable release. All exported APIs are now final, and no +further breaking changes will be made in the 1.x release series. Anyone using a +semver-aware dependency manager should now pin to `^1`. + +Breaking changes: + +* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without + casting from `[]byte` to `string`. +* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`, + `zap.Logger`, and `zap.SugaredLogger`. +* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to + clash with other testing helpers. + +Bugfixes: + +* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier + for tab-separated console output. +* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to + work with concurrency-safe `WriteSyncer` implementations. +* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux + systems. +* [#373][]: Report the correct caller from zap's standard library + interoperability wrappers. + +Enhancements: + +* [#348][]: Add a registry allowing third-party encodings to work with zap's + built-in `Config`. +* [#327][]: Make the representation of logger callers configurable (like times, + levels, and durations). +* [#376][]: Allow third-party encoders to use their own buffer pools, which + removes the last performance advantage that zap's encoders have over plugins. +* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple + `WriteSyncer`s and lock the result. +* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in + Go 1.9). +* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it + easier for particularly punctilious users to unit test their application's + logging. + +Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their +contributions to this release. + +[#366]: https://github.com/uber-go/zap/pull/366 +[#364]: https://github.com/uber-go/zap/pull/364 +[#371]: https://github.com/uber-go/zap/pull/371 +[#362]: https://github.com/uber-go/zap/pull/362 +[#369]: https://github.com/uber-go/zap/pull/369 +[#347]: https://github.com/uber-go/zap/pull/347 +[#373]: https://github.com/uber-go/zap/pull/373 +[#348]: https://github.com/uber-go/zap/pull/348 +[#327]: https://github.com/uber-go/zap/pull/327 +[#376]: https://github.com/uber-go/zap/pull/376 +[#346]: https://github.com/uber-go/zap/pull/346 +[#365]: https://github.com/uber-go/zap/pull/365 +[#372]: https://github.com/uber-go/zap/pull/372 + +## v1.0.0-rc.3 (7 Mar 2017) + +This is the third release candidate for zap's stable release. There are no +breaking changes. + +Bugfixes: + +* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs + rather than `[]uint8`. + +Enhancements: + +* [#307][]: Users can opt into colored output for log levels. +* [#353][]: In addition to hijacking the output of the standard library's + package-global logging functions, users can now construct a zap-backed + `log.Logger` instance. +* [#311][]: Frames from common runtime functions and some of zap's internal + machinery are now omitted from stacktraces. + +Thanks to @ansel1 and @suyash for their contributions to this release. + +[#339]: https://github.com/uber-go/zap/pull/339 +[#307]: https://github.com/uber-go/zap/pull/307 +[#353]: https://github.com/uber-go/zap/pull/353 +[#311]: https://github.com/uber-go/zap/pull/311 + +## v1.0.0-rc.2 (21 Feb 2017) + +This is the second release candidate for zap's stable release. It includes two +breaking changes. + +Breaking changes: + +* [#316][]: Zap's global loggers are now fully concurrency-safe + (previously, users had to ensure that `ReplaceGlobals` was called before the + loggers were in use). However, they must now be accessed via the `L()` and + `S()` functions. Users can update their projects with + + ``` + gofmt -r "zap.L -> zap.L()" -w . + gofmt -r "zap.S -> zap.S()" -w . + ``` +* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid + JSON and YAML struct tags on all config structs. This release fixes the tags + and adds static analysis to prevent similar bugs in the future. + +Bugfixes: + +* [#321][]: Redirecting the standard library's `log` output now + correctly reports the logger's caller. + +Enhancements: + +* [#325][] and [#333][]: Zap now transparently supports non-standard, rich + errors like those produced by `github.com/pkg/errors`. +* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is + now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) -> + zap.NewNop()' -w .`. +* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a + more informative error. + +Thanks to @skipor and @chapsuk for their contributions to this release. + +[#316]: https://github.com/uber-go/zap/pull/316 +[#309]: https://github.com/uber-go/zap/pull/309 +[#317]: https://github.com/uber-go/zap/pull/317 +[#321]: https://github.com/uber-go/zap/pull/321 +[#325]: https://github.com/uber-go/zap/pull/325 +[#333]: https://github.com/uber-go/zap/pull/333 +[#326]: https://github.com/uber-go/zap/pull/326 +[#300]: https://github.com/uber-go/zap/pull/300 + +## v1.0.0-rc.1 (14 Feb 2017) + +This is the first release candidate for zap's stable release. There are multiple +breaking changes and improvements from the pre-release version. Most notably: + +* **Zap's import path is now "go.uber.org/zap"** — all users will + need to update their code. +* User-facing types and functions remain in the `zap` package. Code relevant + largely to extension authors is now in the `zapcore` package. +* The `zapcore.Core` type makes it easy for third-party packages to use zap's + internals but provide a different user-facing API. +* `Logger` is now a concrete type instead of an interface. +* A less verbose (though slower) logging API is included by default. +* Package-global loggers `L` and `S` are included. +* A human-friendly console encoder is included. +* A declarative config struct allows common logger configurations to be managed + as configuration instead of code. +* Sampling is more accurate, and doesn't depend on the standard library's shared + timer heap. + +## v0.1.0-beta.1 (6 Feb 2017) + +This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and +upgrade at their leisure. Since this is the first tagged release, there are no +backward compatibility concerns and all functionality is new. + +Early zap adopters should pin to the 0.1.x minor version until they're ready to +upgrade to the upcoming stable release. diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..e327d9aa5c --- /dev/null +++ b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, +body size, disability, ethnicity, gender identity and expression, level of +experience, nationality, personal appearance, race, religion, or sexual +identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an +appointed representative at an online or offline event. Representation of a +project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at oss-conduct@uber.com. The project +team will review and investigate all complaints, and will respond in a way +that it deems appropriate to the circumstances. The project team is obligated +to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.4, available at +[http://contributor-covenant.org/version/1/4][version]. + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md new file mode 100644 index 0000000000..ea02f3cae2 --- /dev/null +++ b/vendor/go.uber.org/zap/CONTRIBUTING.md @@ -0,0 +1,70 @@ +# Contributing + +We'd love your help making zap the very best structured logging library in Go! + +If you'd like to add new exported APIs, please [open an issue][open-issue] +describing your proposal — discussing API changes ahead of time makes +pull request review much smoother. In your issue, pull request, and any other +communications, please remember to treat your fellow contributors with +respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. + +Note that you'll need to sign [Uber's Contributor License Agreement][cla] +before we can accept any of your contributions. If necessary, a bot will remind +you to accept the CLA when you open your pull request. + +## Setup + +[Fork][fork], then clone the repository: + +```bash +mkdir -p $GOPATH/src/go.uber.org +cd $GOPATH/src/go.uber.org +git clone git@github.com:your_github_username/zap.git +cd zap +git remote add upstream https://github.com/uber-go/zap.git +git fetch upstream +``` + +Make sure that the tests and the linters pass: + +```bash +make test +make lint +``` + +## Making Changes + +Start by creating a new branch for your changes: + +```bash +cd $GOPATH/src/go.uber.org/zap +git checkout master +git fetch upstream +git rebase upstream/master +git checkout -b cool_new_feature +``` + +Make your changes, then ensure that `make lint` and `make test` still pass. If +you're satisfied with your changes, push them to your fork. + +```bash +git push origin cool_new_feature +``` + +Then use the GitHub UI to open a pull request. + +At this point, you're waiting on us to review your changes. We _try_ to respond +to issues and pull requests within a few business days, and we may suggest some +improvements or alternatives. Once your changes are approved, one of the +project maintainers will merge them. + +We're much more likely to approve your changes if you: + +- Add tests for new functionality. +- Write a [good commit message][commit-message]. +- Maintain backward compatibility. + +[fork]: https://github.com/uber-go/zap/fork +[open-issue]: https://github.com/uber-go/zap/issues/new +[cla]: https://cla-assistant.io/uber-go/zap +[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md new file mode 100644 index 0000000000..b183b20bc1 --- /dev/null +++ b/vendor/go.uber.org/zap/FAQ.md @@ -0,0 +1,164 @@ +# Frequently Asked Questions + +## Design + +### Why spend so much effort on logger performance? + +Of course, most applications won't notice the impact of a slow logger: they +already take tens or hundreds of milliseconds for each operation, so an extra +millisecond doesn't matter. + +On the other hand, why *not* make structured logging fast? The `SugaredLogger` +isn't any harder to use than other logging packages, and the `Logger` makes +structured logging possible in performance-sensitive contexts. Across a fleet +of Go microservices, making each application even slightly more efficient adds +up quickly. + +### Why aren't `Logger` and `SugaredLogger` interfaces? + +Unlike the familiar `io.Writer` and `http.Handler`, `Logger` and +`SugaredLogger` interfaces would include *many* methods. As [Rob Pike points +out][go-proverbs], "The bigger the interface, the weaker the abstraction." +Interfaces are also rigid — *any* change requires releasing a new major +version, since it breaks all third-party implementations. + +Making the `Logger` and `SugaredLogger` concrete types doesn't sacrifice much +abstraction, and it lets us add methods without introducing breaking changes. +Your applications should define and depend upon an interface that includes +just the methods you use. + +### Why are some of my logs missing? + +Logs are dropped intentionally by zap when sampling is enabled. The production +configuration (as returned by `NewProductionConfig()` enables sampling which will +cause repeated logs within a second to be sampled. See more details on why sampling +is enabled in [Why sample application logs](https://github.com/uber-go/zap/blob/master/FAQ.md#why-sample-application-logs). + +### Why sample application logs? + +Applications often experience runs of errors, either because of a bug or +because of a misbehaving user. Logging errors is usually a good idea, but it +can easily make this bad situation worse: not only is your application coping +with a flood of errors, it's also spending extra CPU cycles and I/O logging +those errors. Since writes are typically serialized, logging limits throughput +when you need it most. + +Sampling fixes this problem by dropping repetitive log entries. Under normal +conditions, your application writes out every entry. When similar entries are +logged hundreds or thousands of times each second, though, zap begins dropping +duplicates to preserve throughput. + +### Why do the structured logging APIs take a message in addition to fields? + +Subjectively, we find it helpful to accompany structured context with a brief +description. This isn't critical during development, but it makes debugging +and operating unfamiliar systems much easier. + +More concretely, zap's sampling algorithm uses the message to identify +duplicate entries. In our experience, this is a practical middle ground +between random sampling (which often drops the exact entry that you need while +debugging) and hashing the complete entry (which is prohibitively expensive). + +### Why include package-global loggers? + +Since so many other logging packages include a global logger, many +applications aren't designed to accept loggers as explicit parameters. +Changing function signatures is often a breaking change, so zap includes +global loggers to simplify migration. + +Avoid them where possible. + +### Why include dedicated Panic and Fatal log levels? + +In general, application code should handle errors gracefully instead of using +`panic` or `os.Exit`. However, every rule has exceptions, and it's common to +crash when an error is truly unrecoverable. To avoid losing any information +— especially the reason for the crash — the logger must flush any +buffered entries before the process exits. + +Zap makes this easy by offering `Panic` and `Fatal` logging methods that +automatically flush before exiting. Of course, this doesn't guarantee that +logs will never be lost, but it eliminates a common error. + +See the discussion in uber-go/zap#207 for more details. + +### What's `DPanic`? + +`DPanic` stands for "panic in development." In development, it logs at +`PanicLevel`; otherwise, it logs at `ErrorLevel`. `DPanic` makes it easier to +catch errors that are theoretically possible, but shouldn't actually happen, +*without* crashing in production. + +If you've ever written code like this, you need `DPanic`: + +```go +if err != nil { + panic(fmt.Sprintf("shouldn't ever get here: %v", err)) +} +``` + +## Installation + +### What does the error `expects import "go.uber.org/zap"` mean? + +Either zap was installed incorrectly or you're referencing the wrong package +name in your code. + +Zap's source code happens to be hosted on GitHub, but the [import +path][import-path] is `go.uber.org/zap`. This gives us, the project +maintainers, the freedom to move the source code if necessary. However, it +means that you need to take a little care when installing and using the +package. + +If you follow two simple rules, everything should work: install zap with `go +get -u go.uber.org/zap`, and always import it in your code with `import +"go.uber.org/zap"`. Your code shouldn't contain *any* references to +`github.com/uber-go/zap`. + +## Usage + +### Does zap support log rotation? + +Zap doesn't natively support rotating log files, since we prefer to leave this +to an external program like `logrotate`. + +However, it's easy to integrate a log rotation package like +[`gopkg.in/natefinch/lumberjack.v2`][lumberjack] as a `zapcore.WriteSyncer`. + +```go +// lumberjack.Logger is already safe for concurrent use, so we don't need to +// lock it. +w := zapcore.AddSync(&lumberjack.Logger{ + Filename: "/var/log/myapp/foo.log", + MaxSize: 500, // megabytes + MaxBackups: 3, + MaxAge: 28, // days +}) +core := zapcore.NewCore( + zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), + w, + zap.InfoLevel, +) +logger := zap.New(core) +``` + +## Extensions + +We'd love to support every logging need within zap itself, but we're only +familiar with a handful of log ingestion systems, flag-parsing packages, and +the like. Rather than merging code that we can't effectively debug and +support, we'd rather grow an ecosystem of zap extensions. + +We're aware of the following extensions, but haven't used them ourselves: + +| Package | Integration | +| --- | --- | +| `github.com/tchap/zapext` | Sentry, syslog | +| `github.com/fgrosse/zaptest` | Ginkgo | +| `github.com/blendle/zapdriver` | Stackdriver | +| `github.com/moul/zapgorm` | Gorm | +| `github.com/moul/zapfilter` | Advanced filtering rules | + +[go-proverbs]: https://go-proverbs.github.io/ +[import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths +[lumberjack]: https://godoc.org/gopkg.in/natefinch/lumberjack.v2 diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE.txt new file mode 100644 index 0000000000..6652bed45f --- /dev/null +++ b/vendor/go.uber.org/zap/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile new file mode 100644 index 0000000000..eb1cee53bd --- /dev/null +++ b/vendor/go.uber.org/zap/Makefile @@ -0,0 +1,76 @@ +# Directory containing the Makefile. +PROJECT_ROOT = $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + +export GOBIN ?= $(PROJECT_ROOT)/bin +export PATH := $(GOBIN):$(PATH) + +GOVULNCHECK = $(GOBIN)/govulncheck +BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem + +# Directories containing independent Go modules. +MODULE_DIRS = . ./exp ./benchmarks ./zapgrpc/internal/test + +# Directories that we want to track coverage for. +COVER_DIRS = . ./exp + +.PHONY: all +all: lint test + +.PHONY: lint +lint: golangci-lint tidy-lint license-lint + +.PHONY: golangci-lint +golangci-lint: + @$(foreach mod,$(MODULE_DIRS), \ + (cd $(mod) && \ + echo "[lint] golangci-lint: $(mod)" && \ + golangci-lint run --path-prefix $(mod)) &&) true + +.PHONY: tidy +tidy: + @$(foreach dir,$(MODULE_DIRS), \ + (cd $(dir) && go mod tidy) &&) true + +.PHONY: tidy-lint +tidy-lint: + @$(foreach mod,$(MODULE_DIRS), \ + (cd $(mod) && \ + echo "[lint] tidy: $(mod)" && \ + go mod tidy && \ + git diff --exit-code -- go.mod go.sum) &&) true + + +.PHONY: license-lint +license-lint: + ./checklicense.sh + +$(GOVULNCHECK): + cd tools && go install golang.org/x/vuln/cmd/govulncheck + +.PHONY: test +test: + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true + +.PHONY: cover +cover: + @$(foreach dir,$(COVER_DIRS), ( \ + cd $(dir) && \ + go test -race -coverprofile=cover.out -coverpkg=./... ./... \ + && go tool cover -html=cover.out -o cover.html) &&) true + +.PHONY: bench +BENCH ?= . +bench: + @$(foreach dir,$(MODULE_DIRS), ( \ + cd $(dir) && \ + go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) \ + ) &&) true + +.PHONY: updatereadme +updatereadme: + rm -f README.md + cat .readme.tmpl | go run internal/readme/readme.go > README.md + +.PHONY: vulncheck +vulncheck: $(GOVULNCHECK) + $(GOVULNCHECK) ./... diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md new file mode 100644 index 0000000000..9de08927be --- /dev/null +++ b/vendor/go.uber.org/zap/README.md @@ -0,0 +1,137 @@ +# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Blazing fast, structured, leveled logging in Go. + +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than other structured logging +packages and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("failed to fetch URL", + // Structured context as loosely typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even +faster than the `SugaredLogger` and allocates far less, but it only supports +structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("failed to fetch URL", + // Structured context as strongly typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +See the [documentation][doc] and [FAQ](FAQ.md) for more details. + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive +and make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead +and allocations wherever possible. By building the high-level `SugaredLogger` +on that foundation, zap lets users *choose* when they need to count every +allocation and when they'd prefer a more familiar, loosely typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging packages — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 1744 ns/op | +0% | 5 allocs/op +| :zap: zap (sugared) | 2483 ns/op | +42% | 10 allocs/op +| zerolog | 918 ns/op | -47% | 1 allocs/op +| go-kit | 5590 ns/op | +221% | 57 allocs/op +| slog | 5640 ns/op | +223% | 40 allocs/op +| apex/log | 21184 ns/op | +1115% | 63 allocs/op +| logrus | 24338 ns/op | +1296% | 79 allocs/op +| log15 | 26054 ns/op | +1394% | 74 allocs/op + +Log a message with a logger that already has 10 fields of context: + +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 193 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 227 ns/op | +18% | 1 allocs/op +| zerolog | 81 ns/op | -58% | 0 allocs/op +| slog | 322 ns/op | +67% | 0 allocs/op +| go-kit | 5377 ns/op | +2686% | 56 allocs/op +| apex/log | 19518 ns/op | +10013% | 53 allocs/op +| log15 | 19812 ns/op | +10165% | 70 allocs/op +| logrus | 21997 ns/op | +11297% | 68 allocs/op + +Log a static string, without any context or `printf`-style templating: + +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 165 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 212 ns/op | +28% | 1 allocs/op +| zerolog | 95 ns/op | -42% | 0 allocs/op +| slog | 296 ns/op | +79% | 0 allocs/op +| go-kit | 415 ns/op | +152% | 9 allocs/op +| standard library | 422 ns/op | +156% | 2 allocs/op +| apex/log | 1601 ns/op | +870% | 5 allocs/op +| logrus | 3017 ns/op | +1728% | 23 allocs/op +| log15 | 3469 ns/op | +2002% | 20 allocs/op + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +zap to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on +issues and pull requests, but you can also report any negative conduct to +oss-conduct@uber.com. That email list is a private, safe space; even the zap +maintainers don't have access, so don't hesitate to hold us to a high +standard. + +
+ +Released under the [MIT License](LICENSE.txt). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other packages. Versions are +pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) + +[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap +[doc]: https://pkg.go.dev/go.uber.org/zap +[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml +[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/zap +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod + diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go new file mode 100644 index 0000000000..abfccb566d --- /dev/null +++ b/vendor/go.uber.org/zap/array.go @@ -0,0 +1,447 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "time" + + "go.uber.org/zap/zapcore" +) + +// Array constructs a field with the given key and ArrayMarshaler. It provides +// a flexible, but still type-safe and efficient, way to add array-like types +// to the logging context. The struct's MarshalLogArray method is called lazily. +func Array(key string, val zapcore.ArrayMarshaler) Field { + return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val} +} + +// Bools constructs a field that carries a slice of bools. +func Bools(key string, bs []bool) Field { + return Array(key, bools(bs)) +} + +// ByteStrings constructs a field that carries a slice of []byte, each of which +// must be UTF-8 encoded text. +func ByteStrings(key string, bss [][]byte) Field { + return Array(key, byteStringsArray(bss)) +} + +// Complex128s constructs a field that carries a slice of complex numbers. +func Complex128s(key string, nums []complex128) Field { + return Array(key, complex128s(nums)) +} + +// Complex64s constructs a field that carries a slice of complex numbers. +func Complex64s(key string, nums []complex64) Field { + return Array(key, complex64s(nums)) +} + +// Durations constructs a field that carries a slice of time.Durations. +func Durations(key string, ds []time.Duration) Field { + return Array(key, durations(ds)) +} + +// Float64s constructs a field that carries a slice of floats. +func Float64s(key string, nums []float64) Field { + return Array(key, float64s(nums)) +} + +// Float32s constructs a field that carries a slice of floats. +func Float32s(key string, nums []float32) Field { + return Array(key, float32s(nums)) +} + +// Ints constructs a field that carries a slice of integers. +func Ints(key string, nums []int) Field { + return Array(key, ints(nums)) +} + +// Int64s constructs a field that carries a slice of integers. +func Int64s(key string, nums []int64) Field { + return Array(key, int64s(nums)) +} + +// Int32s constructs a field that carries a slice of integers. +func Int32s(key string, nums []int32) Field { + return Array(key, int32s(nums)) +} + +// Int16s constructs a field that carries a slice of integers. +func Int16s(key string, nums []int16) Field { + return Array(key, int16s(nums)) +} + +// Int8s constructs a field that carries a slice of integers. +func Int8s(key string, nums []int8) Field { + return Array(key, int8s(nums)) +} + +// Objects constructs a field with the given key, holding a list of the +// provided objects that can be marshaled by Zap. +// +// Note that these objects must implement zapcore.ObjectMarshaler directly. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the Request type, not its pointer (*Request). +// If it's on the pointer, use ObjectValues. +// +// Given an object that implements MarshalLogObject on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Author struct{ ... } +// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var authors []Author = ... +// logger.Info("loading article", zap.Objects("authors", authors)) +// +// Similarly, given a type that implements MarshalLogObject on its pointer +// receiver, you can log a slice of pointers to that object with Objects like +// so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +// +// If instead, you have a slice of values of such an object, use the +// ObjectValues constructor. +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field { + return Array(key, objects[T](values)) +} + +type objects[T zapcore.ObjectMarshaler] []T + +func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + if err := arr.AppendObject(o); err != nil { + return err + } + } + return nil +} + +// ObjectMarshalerPtr is a constraint that specifies that the given type +// implements zapcore.ObjectMarshaler on a pointer receiver. +type ObjectMarshalerPtr[T any] interface { + *T + zapcore.ObjectMarshaler +} + +// ObjectValues constructs a field with the given key, holding a list of the +// provided objects, where pointers to these objects can be marshaled by Zap. +// +// Note that pointers to these objects must implement zapcore.ObjectMarshaler. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the *Request type, not the value (Request). +// If it's on the value, use Objects. +// +// Given an object that implements MarshalLogObject on the pointer receiver, +// you can log a slice of those objects with ObjectValues like so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +// +// If instead, you have a slice of pointers of such an object, use the Objects +// field constructor. +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field { + return Array(key, objectValues[T, P](values)) +} + +type objectValues[T any, P ObjectMarshalerPtr[T]] []T + +func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range os { + // It is necessary for us to explicitly reference the "P" type. + // We cannot simply pass "&os[i]" to AppendObject because its type + // is "*T", which the type system does not consider as + // implementing ObjectMarshaler. + // Only the type "P" satisfies ObjectMarshaler, which we have + // to convert "*T" to explicitly. + var p P = &os[i] + if err := arr.AppendObject(p); err != nil { + return err + } + } + return nil +} + +// Strings constructs a field that carries a slice of strings. +func Strings(key string, ss []string) Field { + return Array(key, stringArray(ss)) +} + +// Stringers constructs a field with the given key, holding a list of the +// output provided by the value's String method +// +// Given an object that implements String on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Request struct{ ... } +// func (a Request) String() string +// +// var requests []Request = ... +// logger.Info("sending requests", zap.Stringers("requests", requests)) +// +// Note that these objects must implement fmt.Stringer directly. +// That is, if you're trying to marshal a []Request, the String method +// must be declared on the Request type, not its pointer (*Request). +func Stringers[T fmt.Stringer](key string, values []T) Field { + return Array(key, stringers[T](values)) +} + +type stringers[T fmt.Stringer] []T + +func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + arr.AppendString(o.String()) + } + return nil +} + +// Times constructs a field that carries a slice of time.Times. +func Times(key string, ts []time.Time) Field { + return Array(key, times(ts)) +} + +// Uints constructs a field that carries a slice of unsigned integers. +func Uints(key string, nums []uint) Field { + return Array(key, uints(nums)) +} + +// Uint64s constructs a field that carries a slice of unsigned integers. +func Uint64s(key string, nums []uint64) Field { + return Array(key, uint64s(nums)) +} + +// Uint32s constructs a field that carries a slice of unsigned integers. +func Uint32s(key string, nums []uint32) Field { + return Array(key, uint32s(nums)) +} + +// Uint16s constructs a field that carries a slice of unsigned integers. +func Uint16s(key string, nums []uint16) Field { + return Array(key, uint16s(nums)) +} + +// Uint8s constructs a field that carries a slice of unsigned integers. +func Uint8s(key string, nums []uint8) Field { + return Array(key, uint8s(nums)) +} + +// Uintptrs constructs a field that carries a slice of pointer addresses. +func Uintptrs(key string, us []uintptr) Field { + return Array(key, uintptrs(us)) +} + +// Errors constructs a field that carries a slice of errors. +func Errors(key string, errs []error) Field { + return Array(key, errArray(errs)) +} + +type bools []bool + +func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bs { + arr.AppendBool(bs[i]) + } + return nil +} + +type byteStringsArray [][]byte + +func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bss { + arr.AppendByteString(bss[i]) + } + return nil +} + +type complex128s []complex128 + +func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex128(nums[i]) + } + return nil +} + +type complex64s []complex64 + +func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex64(nums[i]) + } + return nil +} + +type durations []time.Duration + +func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ds { + arr.AppendDuration(ds[i]) + } + return nil +} + +type float64s []float64 + +func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat64(nums[i]) + } + return nil +} + +type float32s []float32 + +func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat32(nums[i]) + } + return nil +} + +type ints []int + +func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt(nums[i]) + } + return nil +} + +type int64s []int64 + +func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt64(nums[i]) + } + return nil +} + +type int32s []int32 + +func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt32(nums[i]) + } + return nil +} + +type int16s []int16 + +func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt16(nums[i]) + } + return nil +} + +type int8s []int8 + +func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt8(nums[i]) + } + return nil +} + +type stringArray []string + +func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ss { + arr.AppendString(ss[i]) + } + return nil +} + +type times []time.Time + +func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ts { + arr.AppendTime(ts[i]) + } + return nil +} + +type uints []uint + +func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint(nums[i]) + } + return nil +} + +type uint64s []uint64 + +func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint64(nums[i]) + } + return nil +} + +type uint32s []uint32 + +func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint32(nums[i]) + } + return nil +} + +type uint16s []uint16 + +func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint16(nums[i]) + } + return nil +} + +type uint8s []uint8 + +func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint8(nums[i]) + } + return nil +} + +type uintptrs []uintptr + +func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUintptr(nums[i]) + } + return nil +} diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go new file mode 100644 index 0000000000..27fb5cd5da --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -0,0 +1,146 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package buffer provides a thin wrapper around a byte slice. Unlike the +// standard library's bytes.Buffer, it supports a portion of the strconv +// package's zero-allocation formatters. +package buffer // import "go.uber.org/zap/buffer" + +import ( + "strconv" + "time" +) + +const _size = 1024 // by default, create 1 KiB buffers + +// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so +// the only way to construct one is via a Pool. +type Buffer struct { + bs []byte + pool Pool +} + +// AppendByte writes a single byte to the Buffer. +func (b *Buffer) AppendByte(v byte) { + b.bs = append(b.bs, v) +} + +// AppendBytes writes a single byte to the Buffer. +func (b *Buffer) AppendBytes(v []byte) { + b.bs = append(b.bs, v...) +} + +// AppendString writes a string to the Buffer. +func (b *Buffer) AppendString(s string) { + b.bs = append(b.bs, s...) +} + +// AppendInt appends an integer to the underlying buffer (assuming base 10). +func (b *Buffer) AppendInt(i int64) { + b.bs = strconv.AppendInt(b.bs, i, 10) +} + +// AppendTime appends the time formatted using the specified layout. +func (b *Buffer) AppendTime(t time.Time, layout string) { + b.bs = t.AppendFormat(b.bs, layout) +} + +// AppendUint appends an unsigned integer to the underlying buffer (assuming +// base 10). +func (b *Buffer) AppendUint(i uint64) { + b.bs = strconv.AppendUint(b.bs, i, 10) +} + +// AppendBool appends a bool to the underlying buffer. +func (b *Buffer) AppendBool(v bool) { + b.bs = strconv.AppendBool(b.bs, v) +} + +// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN +// or +/- Inf. +func (b *Buffer) AppendFloat(f float64, bitSize int) { + b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize) +} + +// Len returns the length of the underlying byte slice. +func (b *Buffer) Len() int { + return len(b.bs) +} + +// Cap returns the capacity of the underlying byte slice. +func (b *Buffer) Cap() int { + return cap(b.bs) +} + +// Bytes returns a mutable reference to the underlying byte slice. +func (b *Buffer) Bytes() []byte { + return b.bs +} + +// String returns a string copy of the underlying byte slice. +func (b *Buffer) String() string { + return string(b.bs) +} + +// Reset resets the underlying byte slice. Subsequent writes re-use the slice's +// backing array. +func (b *Buffer) Reset() { + b.bs = b.bs[:0] +} + +// Write implements io.Writer. +func (b *Buffer) Write(bs []byte) (int, error) { + b.bs = append(b.bs, bs...) + return len(bs), nil +} + +// WriteByte writes a single byte to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteByte(v byte) error { + b.AppendByte(v) + return nil +} + +// WriteString writes a string to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteString(s string) (int, error) { + b.AppendString(s) + return len(s), nil +} + +// TrimNewline trims any final "\n" byte from the end of the buffer. +func (b *Buffer) TrimNewline() { + if i := len(b.bs) - 1; i >= 0 { + if b.bs[i] == '\n' { + b.bs = b.bs[:i] + } + } +} + +// Free returns the Buffer to its Pool. +// +// Callers must not retain references to the Buffer after calling Free. +func (b *Buffer) Free() { + b.pool.put(b) +} diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go new file mode 100644 index 0000000000..846323360e --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/pool.go @@ -0,0 +1,53 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package buffer + +import ( + "go.uber.org/zap/internal/pool" +) + +// A Pool is a type-safe wrapper around a sync.Pool. +type Pool struct { + p *pool.Pool[*Buffer] +} + +// NewPool constructs a new Pool. +func NewPool() Pool { + return Pool{ + p: pool.New(func() *Buffer { + return &Buffer{ + bs: make([]byte, 0, _size), + } + }), + } +} + +// Get retrieves a Buffer from the pool, creating one if necessary. +func (p Pool) Get() *Buffer { + buf := p.p.Get() + buf.Reset() + buf.pool = p + return buf +} + +func (p Pool) put(buf *Buffer) { + p.p.Put(buf) +} diff --git a/vendor/go.uber.org/zap/checklicense.sh b/vendor/go.uber.org/zap/checklicense.sh new file mode 100644 index 0000000000..345ac8b89a --- /dev/null +++ b/vendor/go.uber.org/zap/checklicense.sh @@ -0,0 +1,17 @@ +#!/bin/bash -e + +ERROR_COUNT=0 +while read -r file +do + case "$(head -1 "${file}")" in + *"Copyright (c) "*" Uber Technologies, Inc.") + # everything's cool + ;; + *) + echo "$file is missing license header." + (( ERROR_COUNT++ )) + ;; + esac +done < <(git ls-files "*\.go") + +exit $ERROR_COUNT diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go new file mode 100644 index 0000000000..e76e4e64fb --- /dev/null +++ b/vendor/go.uber.org/zap/config.go @@ -0,0 +1,330 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "sort" + "time" + + "go.uber.org/zap/zapcore" +) + +// SamplingConfig sets a sampling strategy for the logger. Sampling caps the +// global CPU and I/O load that logging puts on your process while attempting +// to preserve a representative subset of your logs. +// +// If specified, the Sampler will invoke the Hook after each decision. +// +// Values configured here are per-second. See zapcore.NewSamplerWithOptions for +// details. +type SamplingConfig struct { + Initial int `json:"initial" yaml:"initial"` + Thereafter int `json:"thereafter" yaml:"thereafter"` + Hook func(zapcore.Entry, zapcore.SamplingDecision) `json:"-" yaml:"-"` +} + +// Config offers a declarative way to construct a logger. It doesn't do +// anything that can't be done with New, Options, and the various +// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to +// toggle common options. +// +// Note that Config intentionally supports only the most common options. More +// unusual logging setups (logging to network connections or message queues, +// splitting output between multiple files, etc.) are possible, but require +// direct use of the zapcore package. For sample code, see the package-level +// BasicConfiguration and AdvancedConfiguration examples. +// +// For an example showing runtime log level changes, see the documentation for +// AtomicLevel. +type Config struct { + // Level is the minimum enabled logging level. Note that this is a dynamic + // level, so calling Config.Level.SetLevel will atomically change the log + // level of all loggers descended from this config. + Level AtomicLevel `json:"level" yaml:"level"` + // Development puts the logger in development mode, which changes the + // behavior of DPanicLevel and takes stacktraces more liberally. + Development bool `json:"development" yaml:"development"` + // DisableCaller stops annotating logs with the calling function's file + // name and line number. By default, all logs are annotated. + DisableCaller bool `json:"disableCaller" yaml:"disableCaller"` + // DisableStacktrace completely disables automatic stacktrace capturing. By + // default, stacktraces are captured for WarnLevel and above logs in + // development and ErrorLevel and above in production. + DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"` + // Sampling sets a sampling policy. A nil SamplingConfig disables sampling. + Sampling *SamplingConfig `json:"sampling" yaml:"sampling"` + // Encoding sets the logger's encoding. Valid values are "json" and + // "console", as well as any third-party encodings registered via + // RegisterEncoder. + Encoding string `json:"encoding" yaml:"encoding"` + // EncoderConfig sets options for the chosen encoder. See + // zapcore.EncoderConfig for details. + EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"` + // OutputPaths is a list of URLs or file paths to write logging output to. + // See Open for details. + OutputPaths []string `json:"outputPaths" yaml:"outputPaths"` + // ErrorOutputPaths is a list of URLs to write internal logger errors to. + // The default is standard error. + // + // Note that this setting only affects internal errors; for sample code that + // sends error-level logs to a different location from info- and debug-level + // logs, see the package-level AdvancedConfiguration example. + ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"` + // InitialFields is a collection of fields to add to the root logger. + InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"` +} + +// NewProductionEncoderConfig returns an opinionated EncoderConfig for +// production environments. +// +// Messages encoded with this configuration will be JSON-formatted +// and will have the following keys by default: +// +// - "level": The logging level (e.g. "info", "error"). +// - "ts": The current time in number of seconds since the Unix epoch. +// - "msg": The message passed to the log statement. +// - "caller": If available, a short path to the file and line number +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// - "stacktrace": If available, a stack trace from the line +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// +// By default, the following formats are used for different types: +// +// - Time is formatted as floating-point number of seconds since the Unix +// epoch. +// - Duration is formatted as floating-point number of seconds. +// +// You may change these by setting the appropriate fields in the returned +// object. +// For example, use the following to change the time encoding format: +// +// cfg := zap.NewProductionEncoderConfig() +// cfg.EncodeTime = zapcore.ISO8601TimeEncoder +func NewProductionEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + TimeKey: "ts", + LevelKey: "level", + NameKey: "logger", + CallerKey: "caller", + FunctionKey: zapcore.OmitKey, + MessageKey: "msg", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.EpochTimeEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewProductionConfig builds a reasonable default production logging +// configuration. +// Logging is enabled at InfoLevel and above, and uses a JSON encoder. +// Logs are written to standard error. +// Stacktraces are included on logs of ErrorLevel and above. +// DPanicLevel logs will not panic, but will write a stacktrace. +// +// Sampling is enabled at 100:100 by default, +// meaning that after the first 100 log entries +// with the same level and message in the same second, +// it will log every 100th entry +// with the same level and message in the same second. +// You may disable this behavior by setting Sampling to nil. +// +// See [NewProductionEncoderConfig] for information +// on the default encoder configuration. +func NewProductionConfig() Config { + return Config{ + Level: NewAtomicLevelAt(InfoLevel), + Development: false, + Sampling: &SamplingConfig{ + Initial: 100, + Thereafter: 100, + }, + Encoding: "json", + EncoderConfig: NewProductionEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for +// development environments. +// +// Messages encoded with this configuration will use Zap's console encoder +// intended to print human-readable output. +// It will print log messages with the following information: +// +// - The log level (e.g. "INFO", "ERROR"). +// - The time in ISO8601 format (e.g. "2017-01-01T12:00:00Z"). +// - The message passed to the log statement. +// - If available, a short path to the file and line number +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// - If available, a stacktrace from the line +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// +// By default, the following formats are used for different types: +// +// - Time is formatted in ISO8601 format (e.g. "2017-01-01T12:00:00Z"). +// - Duration is formatted as a string (e.g. "1.234s"). +// +// You may change these by setting the appropriate fields in the returned +// object. +// For example, use the following to change the time encoding format: +// +// cfg := zap.NewDevelopmentEncoderConfig() +// cfg.EncodeTime = zapcore.ISO8601TimeEncoder +func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + // Keys can be anything except the empty string. + TimeKey: "T", + LevelKey: "L", + NameKey: "N", + CallerKey: "C", + FunctionKey: zapcore.OmitKey, + MessageKey: "M", + StacktraceKey: "S", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.CapitalLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewDevelopmentConfig builds a reasonable default development logging +// configuration. +// Logging is enabled at DebugLevel and above, and uses a console encoder. +// Logs are written to standard error. +// Stacktraces are included on logs of WarnLevel and above. +// DPanicLevel logs will panic. +// +// See [NewDevelopmentEncoderConfig] for information +// on the default encoder configuration. +func NewDevelopmentConfig() Config { + return Config{ + Level: NewAtomicLevelAt(DebugLevel), + Development: true, + Encoding: "console", + EncoderConfig: NewDevelopmentEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// Build constructs a logger from the Config and Options. +func (cfg Config) Build(opts ...Option) (*Logger, error) { + enc, err := cfg.buildEncoder() + if err != nil { + return nil, err + } + + sink, errSink, err := cfg.openSinks() + if err != nil { + return nil, err + } + + if cfg.Level == (AtomicLevel{}) { + return nil, errors.New("missing Level") + } + + log := New( + zapcore.NewCore(enc, sink, cfg.Level), + cfg.buildOptions(errSink)..., + ) + if len(opts) > 0 { + log = log.WithOptions(opts...) + } + return log, nil +} + +func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { + opts := []Option{ErrorOutput(errSink)} + + if cfg.Development { + opts = append(opts, Development()) + } + + if !cfg.DisableCaller { + opts = append(opts, AddCaller()) + } + + stackLevel := ErrorLevel + if cfg.Development { + stackLevel = WarnLevel + } + if !cfg.DisableStacktrace { + opts = append(opts, AddStacktrace(stackLevel)) + } + + if scfg := cfg.Sampling; scfg != nil { + opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { + var samplerOpts []zapcore.SamplerOption + if scfg.Hook != nil { + samplerOpts = append(samplerOpts, zapcore.SamplerHook(scfg.Hook)) + } + return zapcore.NewSamplerWithOptions( + core, + time.Second, + cfg.Sampling.Initial, + cfg.Sampling.Thereafter, + samplerOpts..., + ) + })) + } + + if len(cfg.InitialFields) > 0 { + fs := make([]Field, 0, len(cfg.InitialFields)) + keys := make([]string, 0, len(cfg.InitialFields)) + for k := range cfg.InitialFields { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fs = append(fs, Any(k, cfg.InitialFields[k])) + } + opts = append(opts, Fields(fs...)) + } + + return opts +} + +func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) { + sink, closeOut, err := Open(cfg.OutputPaths...) + if err != nil { + return nil, nil, err + } + errSink, _, err := Open(cfg.ErrorOutputPaths...) + if err != nil { + closeOut() + return nil, nil, err + } + return sink, errSink, nil +} + +func (cfg Config) buildEncoder() (zapcore.Encoder, error) { + return newEncoder(cfg.Encoding, cfg.EncoderConfig) +} diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go new file mode 100644 index 0000000000..3c50d7b4d3 --- /dev/null +++ b/vendor/go.uber.org/zap/doc.go @@ -0,0 +1,117 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zap provides fast, structured, leveled logging. +// +// For applications that log in the hot path, reflection-based serialization +// and string formatting are prohibitively expensive - they're CPU-intensive +// and make many small allocations. Put differently, using json.Marshal and +// fmt.Fprintf to log tons of interface{} makes your application slow. +// +// Zap takes a different approach. It includes a reflection-free, +// zero-allocation JSON encoder, and the base Logger strives to avoid +// serialization overhead and allocations wherever possible. By building the +// high-level SugaredLogger on that foundation, zap lets users choose when +// they need to count every allocation and when they'd prefer a more familiar, +// loosely typed API. +// +// # Choosing a Logger +// +// In contexts where performance is nice, but not critical, use the +// SugaredLogger. It's 4-10x faster than other structured logging packages and +// supports both structured and printf-style logging. Like log15 and go-kit, +// the SugaredLogger's structured logging APIs are loosely typed and accept a +// variadic number of key-value pairs. (For more advanced use cases, they also +// accept strongly typed fields - see the SugaredLogger.With documentation for +// details.) +// +// sugar := zap.NewExample().Sugar() +// defer sugar.Sync() +// sugar.Infow("failed to fetch URL", +// "url", "http://example.com", +// "attempt", 3, +// "backoff", time.Second, +// ) +// sugar.Infof("failed to fetch URL: %s", "http://example.com") +// +// By default, loggers are unbuffered. However, since zap's low-level APIs +// allow buffering, calling Sync before letting your process exit is a good +// habit. +// +// In the rare contexts where every microsecond and every allocation matter, +// use the Logger. It's even faster than the SugaredLogger and allocates far +// less, but it only supports strongly-typed, structured logging. +// +// logger := zap.NewExample() +// defer logger.Sync() +// logger.Info("failed to fetch URL", +// zap.String("url", "http://example.com"), +// zap.Int("attempt", 3), +// zap.Duration("backoff", time.Second), +// ) +// +// Choosing between the Logger and SugaredLogger doesn't need to be an +// application-wide decision: converting between the two is simple and +// inexpensive. +// +// logger := zap.NewExample() +// defer logger.Sync() +// sugar := logger.Sugar() +// plain := sugar.Desugar() +// +// # Configuring Zap +// +// The simplest way to build a Logger is to use zap's opinionated presets: +// NewExample, NewProduction, and NewDevelopment. These presets build a logger +// with a single function call: +// +// logger, err := zap.NewProduction() +// if err != nil { +// log.Fatalf("can't initialize zap logger: %v", err) +// } +// defer logger.Sync() +// +// Presets are fine for small projects, but larger projects and organizations +// naturally require a bit more customization. For most users, zap's Config +// struct strikes the right balance between flexibility and convenience. See +// the package-level BasicConfiguration example for sample code. +// +// More unusual configurations (splitting output between files, sending logs +// to a message queue, etc.) are possible, but require direct use of +// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration +// example for sample code. +// +// # Extending Zap +// +// The zap package itself is a relatively thin wrapper around the interfaces +// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g., +// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an +// exception aggregation service, like Sentry or Rollbar) typically requires +// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core +// interfaces. See the zapcore documentation for details. +// +// Similarly, package authors can use the high-performance Encoder and Core +// implementations in the zapcore package to build their own loggers. +// +// # Frequently Asked Questions +// +// An FAQ covering everything from installation errors to design decisions is +// available at https://github.com/uber-go/zap/blob/master/FAQ.md. +package zap // import "go.uber.org/zap" diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go new file mode 100644 index 0000000000..caa04ceefd --- /dev/null +++ b/vendor/go.uber.org/zap/encoder.go @@ -0,0 +1,79 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "sync" + + "go.uber.org/zap/zapcore" +) + +var ( + errNoEncoderNameSpecified = errors.New("no encoder name specified") + + _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){ + "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewConsoleEncoder(encoderConfig), nil + }, + "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewJSONEncoder(encoderConfig), nil + }, + } + _encoderMutex sync.RWMutex +) + +// RegisterEncoder registers an encoder constructor, which the Config struct +// can then reference. By default, the "json" and "console" encoders are +// registered. +// +// Attempting to register an encoder whose name is already taken returns an +// error. +func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error { + _encoderMutex.Lock() + defer _encoderMutex.Unlock() + if name == "" { + return errNoEncoderNameSpecified + } + if _, ok := _encoderNameToConstructor[name]; ok { + return fmt.Errorf("encoder already registered for name %q", name) + } + _encoderNameToConstructor[name] = constructor + return nil +} + +func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil { + return nil, errors.New("missing EncodeTime in EncoderConfig") + } + + _encoderMutex.RLock() + defer _encoderMutex.RUnlock() + if name == "" { + return nil, errNoEncoderNameSpecified + } + constructor, ok := _encoderNameToConstructor[name] + if !ok { + return nil, fmt.Errorf("no encoder registered for name %q", name) + } + return constructor(encoderConfig) +} diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go new file mode 100644 index 0000000000..45f7b838dc --- /dev/null +++ b/vendor/go.uber.org/zap/error.go @@ -0,0 +1,82 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "go.uber.org/zap/internal/pool" + "go.uber.org/zap/zapcore" +) + +var _errArrayElemPool = pool.New(func() *errArrayElem { + return &errArrayElem{} +}) + +// Error is shorthand for the common idiom NamedError("error", err). +func Error(err error) Field { + return NamedError("error", err) +} + +// NamedError constructs a field that lazily stores err.Error() under the +// provided key. Errors which also implement fmt.Formatter (like those produced +// by github.com/pkg/errors) will also have their verbose representation stored +// under key+"Verbose". If passed a nil error, the field is a no-op. +// +// For the common case in which the key is simply "error", the Error function +// is shorter and less repetitive. +func NamedError(key string, err error) Field { + if err == nil { + return Skip() + } + return Field{Key: key, Type: zapcore.ErrorType, Interface: err} +} + +type errArray []error + +func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + // To represent each error as an object with an "error" attribute and + // potentially an "errorVerbose" attribute, we need to wrap it in a + // type that implements LogObjectMarshaler. To prevent this from + // allocating, pool the wrapper type. + elem := _errArrayElemPool.Get() + elem.error = errs[i] + err := arr.AppendObject(elem) + elem.error = nil + _errArrayElemPool.Put(elem) + if err != nil { + return err + } + } + return nil +} + +type errArrayElem struct { + error +} + +func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error { + // Re-use the error field's logic, which supports non-standard error types. + Error(e.error).AddTo(enc) + return nil +} diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go new file mode 100644 index 0000000000..c8dd3358a9 --- /dev/null +++ b/vendor/go.uber.org/zap/field.go @@ -0,0 +1,613 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "math" + "time" + + "go.uber.org/zap/internal/stacktrace" + "go.uber.org/zap/zapcore" +) + +// Field is an alias for Field. Aliasing this type dramatically +// improves the navigability of this package's API documentation. +type Field = zapcore.Field + +var ( + _minTimeInt64 = time.Unix(0, math.MinInt64) + _maxTimeInt64 = time.Unix(0, math.MaxInt64) +) + +// Skip constructs a no-op field, which is often useful when handling invalid +// inputs in other Field constructors. +func Skip() Field { + return Field{Type: zapcore.SkipType} +} + +// nilField returns a field which will marshal explicitly as nil. See motivation +// in https://github.com/uber-go/zap/issues/753 . If we ever make breaking +// changes and add zapcore.NilType and zapcore.ObjectEncoder.AddNil, the +// implementation here should be changed to reflect that. +func nilField(key string) Field { return Reflect(key, nil) } + +// Binary constructs a field that carries an opaque binary blob. +// +// Binary data is serialized in an encoding-appropriate format. For example, +// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text, +// use ByteString. +func Binary(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.BinaryType, Interface: val} +} + +// Bool constructs a field that carries a bool. +func Bool(key string, val bool) Field { + var ival int64 + if val { + ival = 1 + } + return Field{Key: key, Type: zapcore.BoolType, Integer: ival} +} + +// Boolp constructs a field that carries a *bool. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Boolp(key string, val *bool) Field { + if val == nil { + return nilField(key) + } + return Bool(key, *val) +} + +// ByteString constructs a field that carries UTF-8 encoded text as a []byte. +// To log opaque binary blobs (which aren't necessarily valid UTF-8), use +// Binary. +func ByteString(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.ByteStringType, Interface: val} +} + +// Complex128 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex128 to +// interface{}). +func Complex128(key string, val complex128) Field { + return Field{Key: key, Type: zapcore.Complex128Type, Interface: val} +} + +// Complex128p constructs a field that carries a *complex128. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex128p(key string, val *complex128) Field { + if val == nil { + return nilField(key) + } + return Complex128(key, *val) +} + +// Complex64 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex64 to +// interface{}). +func Complex64(key string, val complex64) Field { + return Field{Key: key, Type: zapcore.Complex64Type, Interface: val} +} + +// Complex64p constructs a field that carries a *complex64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex64p(key string, val *complex64) Field { + if val == nil { + return nilField(key) + } + return Complex64(key, *val) +} + +// Float64 constructs a field that carries a float64. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float64(key string, val float64) Field { + return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))} +} + +// Float64p constructs a field that carries a *float64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float64p(key string, val *float64) Field { + if val == nil { + return nilField(key) + } + return Float64(key, *val) +} + +// Float32 constructs a field that carries a float32. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float32(key string, val float32) Field { + return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))} +} + +// Float32p constructs a field that carries a *float32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float32p(key string, val *float32) Field { + if val == nil { + return nilField(key) + } + return Float32(key, *val) +} + +// Int constructs a field with the given key and value. +func Int(key string, val int) Field { + return Int64(key, int64(val)) +} + +// Intp constructs a field that carries a *int. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Intp(key string, val *int) Field { + if val == nil { + return nilField(key) + } + return Int(key, *val) +} + +// Int64 constructs a field with the given key and value. +func Int64(key string, val int64) Field { + return Field{Key: key, Type: zapcore.Int64Type, Integer: val} +} + +// Int64p constructs a field that carries a *int64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int64p(key string, val *int64) Field { + if val == nil { + return nilField(key) + } + return Int64(key, *val) +} + +// Int32 constructs a field with the given key and value. +func Int32(key string, val int32) Field { + return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)} +} + +// Int32p constructs a field that carries a *int32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int32p(key string, val *int32) Field { + if val == nil { + return nilField(key) + } + return Int32(key, *val) +} + +// Int16 constructs a field with the given key and value. +func Int16(key string, val int16) Field { + return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)} +} + +// Int16p constructs a field that carries a *int16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int16p(key string, val *int16) Field { + if val == nil { + return nilField(key) + } + return Int16(key, *val) +} + +// Int8 constructs a field with the given key and value. +func Int8(key string, val int8) Field { + return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)} +} + +// Int8p constructs a field that carries a *int8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int8p(key string, val *int8) Field { + if val == nil { + return nilField(key) + } + return Int8(key, *val) +} + +// String constructs a field with the given key and value. +func String(key string, val string) Field { + return Field{Key: key, Type: zapcore.StringType, String: val} +} + +// Stringp constructs a field that carries a *string. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Stringp(key string, val *string) Field { + if val == nil { + return nilField(key) + } + return String(key, *val) +} + +// Uint constructs a field with the given key and value. +func Uint(key string, val uint) Field { + return Uint64(key, uint64(val)) +} + +// Uintp constructs a field that carries a *uint. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintp(key string, val *uint) Field { + if val == nil { + return nilField(key) + } + return Uint(key, *val) +} + +// Uint64 constructs a field with the given key and value. +func Uint64(key string, val uint64) Field { + return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)} +} + +// Uint64p constructs a field that carries a *uint64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint64p(key string, val *uint64) Field { + if val == nil { + return nilField(key) + } + return Uint64(key, *val) +} + +// Uint32 constructs a field with the given key and value. +func Uint32(key string, val uint32) Field { + return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)} +} + +// Uint32p constructs a field that carries a *uint32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint32p(key string, val *uint32) Field { + if val == nil { + return nilField(key) + } + return Uint32(key, *val) +} + +// Uint16 constructs a field with the given key and value. +func Uint16(key string, val uint16) Field { + return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)} +} + +// Uint16p constructs a field that carries a *uint16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint16p(key string, val *uint16) Field { + if val == nil { + return nilField(key) + } + return Uint16(key, *val) +} + +// Uint8 constructs a field with the given key and value. +func Uint8(key string, val uint8) Field { + return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)} +} + +// Uint8p constructs a field that carries a *uint8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint8p(key string, val *uint8) Field { + if val == nil { + return nilField(key) + } + return Uint8(key, *val) +} + +// Uintptr constructs a field with the given key and value. +func Uintptr(key string, val uintptr) Field { + return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)} +} + +// Uintptrp constructs a field that carries a *uintptr. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintptrp(key string, val *uintptr) Field { + if val == nil { + return nilField(key) + } + return Uintptr(key, *val) +} + +// Reflect constructs a field with the given key and an arbitrary object. It uses +// an encoding-appropriate, reflection-based function to lazily serialize nearly +// any object into the logging context, but it's relatively slow and +// allocation-heavy. Outside tests, Any is always a better choice. +// +// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect +// includes the error message in the final log output. +func Reflect(key string, val interface{}) Field { + return Field{Key: key, Type: zapcore.ReflectType, Interface: val} +} + +// Namespace creates a named, isolated scope within the logger's context. All +// subsequent fields will be added to the new namespace. +// +// This helps prevent key collisions when injecting loggers into sub-components +// or third-party libraries. +func Namespace(key string) Field { + return Field{Key: key, Type: zapcore.NamespaceType} +} + +// Stringer constructs a field with the given key and the output of the value's +// String method. The Stringer's String method is called lazily. +func Stringer(key string, val fmt.Stringer) Field { + return Field{Key: key, Type: zapcore.StringerType, Interface: val} +} + +// Time constructs a Field with the given key and value. The encoder +// controls how the time is serialized. +func Time(key string, val time.Time) Field { + if val.Before(_minTimeInt64) || val.After(_maxTimeInt64) { + return Field{Key: key, Type: zapcore.TimeFullType, Interface: val} + } + return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} +} + +// Timep constructs a field that carries a *time.Time. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Timep(key string, val *time.Time) Field { + if val == nil { + return nilField(key) + } + return Time(key, *val) +} + +// Stack constructs a field that stores a stacktrace of the current goroutine +// under provided key. Keep in mind that taking a stacktrace is eager and +// expensive (relatively speaking); this function both makes an allocation and +// takes about two microseconds. +func Stack(key string) Field { + return StackSkip(key, 1) // skip Stack +} + +// StackSkip constructs a field similarly to Stack, but also skips the given +// number of frames from the top of the stacktrace. +func StackSkip(key string, skip int) Field { + // Returning the stacktrace as a string costs an allocation, but saves us + // from expanding the zapcore.Field union struct to include a byte slice. Since + // taking a stacktrace is already so expensive (~10us), the extra allocation + // is okay. + return String(key, stacktrace.Take(skip+1)) // skip StackSkip +} + +// Duration constructs a field with the given key and value. The encoder +// controls how the duration is serialized. +func Duration(key string, val time.Duration) Field { + return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)} +} + +// Durationp constructs a field that carries a *time.Duration. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Durationp(key string, val *time.Duration) Field { + if val == nil { + return nilField(key) + } + return Duration(key, *val) +} + +// Object constructs a field with the given key and ObjectMarshaler. It +// provides a flexible, but still type-safe and efficient, way to add map- or +// struct-like user-defined types to the logging context. The struct's +// MarshalLogObject method is called lazily. +func Object(key string, val zapcore.ObjectMarshaler) Field { + return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} +} + +// Inline constructs a Field that is similar to Object, but it +// will add the elements of the provided ObjectMarshaler to the +// current namespace. +func Inline(val zapcore.ObjectMarshaler) Field { + return zapcore.Field{ + Type: zapcore.InlineMarshalerType, + Interface: val, + } +} + +// Dict constructs a field containing the provided key-value pairs. +// It acts similar to [Object], but with the fields specified as arguments. +func Dict(key string, val ...Field) Field { + return dictField(key, val) +} + +// We need a function with the signature (string, T) for zap.Any. +func dictField(key string, val []Field) Field { + return Object(key, dictObject(val)) +} + +type dictObject []Field + +func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error { + for _, f := range d { + f.AddTo(enc) + } + return nil +} + +// We discovered an issue where zap.Any can cause a performance degradation +// when used in new goroutines. +// +// This happens because the compiler assigns 4.8kb (one zap.Field per arm of +// switch statement) of stack space for zap.Any when it takes the form: +// +// switch v := v.(type) { +// case string: +// return String(key, v) +// case int: +// return Int(key, v) +// // ... +// default: +// return Reflect(key, v) +// } +// +// To avoid this, we use the type switch to assign a value to a single local variable +// and then call a function on it. +// The local variable is just a function reference so it doesn't allocate +// when converted to an interface{}. +// +// A fair bit of experimentation went into this. +// See also: +// +// - https://github.com/uber-go/zap/pull/1301 +// - https://github.com/uber-go/zap/pull/1303 +// - https://github.com/uber-go/zap/pull/1304 +// - https://github.com/uber-go/zap/pull/1305 +// - https://github.com/uber-go/zap/pull/1308 +type anyFieldC[T any] func(string, T) Field + +func (f anyFieldC[T]) Any(key string, val any) Field { + v, _ := val.(T) + // val is guaranteed to be a T, except when it's nil. + return f(key, v) +} + +// Any takes a key and an arbitrary value and chooses the best way to represent +// them as a field, falling back to a reflection-based approach only if +// necessary. +// +// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between +// them. To minimize surprises, []byte values are treated as binary blobs, byte +// values are treated as uint8, and runes are always treated as integers. +func Any(key string, value interface{}) Field { + var c interface{ Any(string, any) Field } + + switch value.(type) { + case zapcore.ObjectMarshaler: + c = anyFieldC[zapcore.ObjectMarshaler](Object) + case zapcore.ArrayMarshaler: + c = anyFieldC[zapcore.ArrayMarshaler](Array) + case []Field: + c = anyFieldC[[]Field](dictField) + case bool: + c = anyFieldC[bool](Bool) + case *bool: + c = anyFieldC[*bool](Boolp) + case []bool: + c = anyFieldC[[]bool](Bools) + case complex128: + c = anyFieldC[complex128](Complex128) + case *complex128: + c = anyFieldC[*complex128](Complex128p) + case []complex128: + c = anyFieldC[[]complex128](Complex128s) + case complex64: + c = anyFieldC[complex64](Complex64) + case *complex64: + c = anyFieldC[*complex64](Complex64p) + case []complex64: + c = anyFieldC[[]complex64](Complex64s) + case float64: + c = anyFieldC[float64](Float64) + case *float64: + c = anyFieldC[*float64](Float64p) + case []float64: + c = anyFieldC[[]float64](Float64s) + case float32: + c = anyFieldC[float32](Float32) + case *float32: + c = anyFieldC[*float32](Float32p) + case []float32: + c = anyFieldC[[]float32](Float32s) + case int: + c = anyFieldC[int](Int) + case *int: + c = anyFieldC[*int](Intp) + case []int: + c = anyFieldC[[]int](Ints) + case int64: + c = anyFieldC[int64](Int64) + case *int64: + c = anyFieldC[*int64](Int64p) + case []int64: + c = anyFieldC[[]int64](Int64s) + case int32: + c = anyFieldC[int32](Int32) + case *int32: + c = anyFieldC[*int32](Int32p) + case []int32: + c = anyFieldC[[]int32](Int32s) + case int16: + c = anyFieldC[int16](Int16) + case *int16: + c = anyFieldC[*int16](Int16p) + case []int16: + c = anyFieldC[[]int16](Int16s) + case int8: + c = anyFieldC[int8](Int8) + case *int8: + c = anyFieldC[*int8](Int8p) + case []int8: + c = anyFieldC[[]int8](Int8s) + case string: + c = anyFieldC[string](String) + case *string: + c = anyFieldC[*string](Stringp) + case []string: + c = anyFieldC[[]string](Strings) + case uint: + c = anyFieldC[uint](Uint) + case *uint: + c = anyFieldC[*uint](Uintp) + case []uint: + c = anyFieldC[[]uint](Uints) + case uint64: + c = anyFieldC[uint64](Uint64) + case *uint64: + c = anyFieldC[*uint64](Uint64p) + case []uint64: + c = anyFieldC[[]uint64](Uint64s) + case uint32: + c = anyFieldC[uint32](Uint32) + case *uint32: + c = anyFieldC[*uint32](Uint32p) + case []uint32: + c = anyFieldC[[]uint32](Uint32s) + case uint16: + c = anyFieldC[uint16](Uint16) + case *uint16: + c = anyFieldC[*uint16](Uint16p) + case []uint16: + c = anyFieldC[[]uint16](Uint16s) + case uint8: + c = anyFieldC[uint8](Uint8) + case *uint8: + c = anyFieldC[*uint8](Uint8p) + case []byte: + c = anyFieldC[[]byte](Binary) + case uintptr: + c = anyFieldC[uintptr](Uintptr) + case *uintptr: + c = anyFieldC[*uintptr](Uintptrp) + case []uintptr: + c = anyFieldC[[]uintptr](Uintptrs) + case time.Time: + c = anyFieldC[time.Time](Time) + case *time.Time: + c = anyFieldC[*time.Time](Timep) + case []time.Time: + c = anyFieldC[[]time.Time](Times) + case time.Duration: + c = anyFieldC[time.Duration](Duration) + case *time.Duration: + c = anyFieldC[*time.Duration](Durationp) + case []time.Duration: + c = anyFieldC[[]time.Duration](Durations) + case error: + c = anyFieldC[error](NamedError) + case []error: + c = anyFieldC[[]error](Errors) + case fmt.Stringer: + c = anyFieldC[fmt.Stringer](Stringer) + default: + c = anyFieldC[any](Reflect) + } + + return c.Any(key, value) +} diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go new file mode 100644 index 0000000000..1312875072 --- /dev/null +++ b/vendor/go.uber.org/zap/flag.go @@ -0,0 +1,39 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "flag" + + "go.uber.org/zap/zapcore" +) + +// LevelFlag uses the standard library's flag.Var to declare a global flag +// with the specified name, default, and usage guidance. The returned value is +// a pointer to the value of the flag. +// +// If you don't want to use the flag package's global state, you can use any +// non-nil *Level as a flag.Value with your own *flag.FlagSet. +func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level { + lvl := defaultLevel + flag.Var(&lvl, name, usage) + return &lvl +} diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml new file mode 100644 index 0000000000..8e1d05e9ab --- /dev/null +++ b/vendor/go.uber.org/zap/glide.yaml @@ -0,0 +1,34 @@ +package: go.uber.org/zap +license: MIT +import: +- package: go.uber.org/atomic + version: ^1 +- package: go.uber.org/multierr + version: ^1 +testImport: +- package: github.com/satori/go.uuid +- package: github.com/sirupsen/logrus +- package: github.com/apex/log + subpackages: + - handlers/json +- package: github.com/go-kit/kit + subpackages: + - log +- package: github.com/stretchr/testify + subpackages: + - assert + - require +- package: gopkg.in/inconshreveable/log15.v2 +- package: github.com/mattn/goveralls +- package: github.com/pborman/uuid +- package: github.com/pkg/errors +- package: github.com/rs/zerolog +- package: golang.org/x/tools + subpackages: + - cover +- package: golang.org/x/lint + subpackages: + - golint +- package: github.com/axw/gocov + subpackages: + - gocov diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go new file mode 100644 index 0000000000..3cb46c9e0a --- /dev/null +++ b/vendor/go.uber.org/zap/global.go @@ -0,0 +1,169 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "bytes" + "fmt" + "log" + "os" + "sync" + + "go.uber.org/zap/zapcore" +) + +const ( + _stdLogDefaultDepth = 1 + _loggerWriterDepth = 2 + _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + + "https://github.com/uber-go/zap/issues/new and reference this error: %v" +) + +var ( + _globalMu sync.RWMutex + _globalL = NewNop() + _globalS = _globalL.Sugar() +) + +// L returns the global Logger, which can be reconfigured with ReplaceGlobals. +// It's safe for concurrent use. +func L() *Logger { + _globalMu.RLock() + l := _globalL + _globalMu.RUnlock() + return l +} + +// S returns the global SugaredLogger, which can be reconfigured with +// ReplaceGlobals. It's safe for concurrent use. +func S() *SugaredLogger { + _globalMu.RLock() + s := _globalS + _globalMu.RUnlock() + return s +} + +// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a +// function to restore the original values. It's safe for concurrent use. +func ReplaceGlobals(logger *Logger) func() { + _globalMu.Lock() + prev := _globalL + _globalL = logger + _globalS = logger.Sugar() + _globalMu.Unlock() + return func() { ReplaceGlobals(prev) } +} + +// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at +// InfoLevel. To redirect the standard library's package-global logging +// functions, use RedirectStdLog instead. +func NewStdLog(l *Logger) *log.Logger { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + f := logger.Info + return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */) +} + +// NewStdLogAt returns *log.Logger which writes to supplied zap logger at +// required level. +func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil +} + +// RedirectStdLog redirects output from the standard library's package-global +// logger to the supplied logger at InfoLevel. Since zap already handles caller +// annotations, timestamps, etc., it automatically disables the standard +// library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLog(l *Logger) func() { + f, err := redirectStdLogAt(l, InfoLevel) + if err != nil { + // Can't get here, since passing InfoLevel to redirectStdLogAt always + // works. + panic(fmt.Sprintf(_programmerErrorTemplate, err)) + } + return f +} + +// RedirectStdLogAt redirects output from the standard library's package-global +// logger to the supplied logger at the specified level. Since zap already +// handles caller annotations, timestamps, etc., it automatically disables the +// standard library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + return redirectStdLogAt(l, level) +} + +func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + flags := log.Flags() + prefix := log.Prefix() + log.SetFlags(0) + log.SetPrefix("") + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + log.SetOutput(&loggerWriter{logFunc}) + return func() { + log.SetFlags(flags) + log.SetPrefix(prefix) + log.SetOutput(os.Stderr) + }, nil +} + +func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) { + switch lvl { + case DebugLevel: + return logger.Debug, nil + case InfoLevel: + return logger.Info, nil + case WarnLevel: + return logger.Warn, nil + case ErrorLevel: + return logger.Error, nil + case DPanicLevel: + return logger.DPanic, nil + case PanicLevel: + return logger.Panic, nil + case FatalLevel: + return logger.Fatal, nil + } + return nil, fmt.Errorf("unrecognized level: %q", lvl) +} + +type loggerWriter struct { + logFunc func(msg string, fields ...Field) +} + +func (l *loggerWriter) Write(p []byte) (int, error) { + p = bytes.TrimSpace(p) + l.logFunc(string(p)) + return len(p), nil +} diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go new file mode 100644 index 0000000000..2be8f65150 --- /dev/null +++ b/vendor/go.uber.org/zap/http_handler.go @@ -0,0 +1,140 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "go.uber.org/zap/zapcore" +) + +// ServeHTTP is a simple JSON endpoint that can report on or change the current +// logging level. +// +// # GET +// +// The GET request returns a JSON description of the current logging level like: +// +// {"level":"info"} +// +// # PUT +// +// The PUT request changes the logging level. It is perfectly safe to change the +// logging level while a program is running. Two content types are supported: +// +// Content-Type: application/x-www-form-urlencoded +// +// With this content type, the level can be provided through the request body or +// a query parameter. The log level is URL encoded like: +// +// level=debug +// +// The request body takes precedence over the query parameter, if both are +// specified. +// +// This content type is the default for a curl PUT request. Following are two +// example curl requests that both set the logging level to debug. +// +// curl -X PUT localhost:8080/log/level?level=debug +// curl -X PUT localhost:8080/log/level -d level=debug +// +// For any other content type, the payload is expected to be JSON encoded and +// look like: +// +// {"level":"info"} +// +// An example curl request could look like this: +// +// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' +func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if err := lvl.serveHTTP(w, r); err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "internal error: %v", err) + } +} + +func (lvl AtomicLevel) serveHTTP(w http.ResponseWriter, r *http.Request) error { + type errorResponse struct { + Error string `json:"error"` + } + type payload struct { + Level zapcore.Level `json:"level"` + } + + enc := json.NewEncoder(w) + + switch r.Method { + case http.MethodGet: + return enc.Encode(payload{Level: lvl.Level()}) + + case http.MethodPut: + requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return enc.Encode(errorResponse{Error: err.Error()}) + } + lvl.SetLevel(requestedLvl) + return enc.Encode(payload{Level: lvl.Level()}) + + default: + w.WriteHeader(http.StatusMethodNotAllowed) + return enc.Encode(errorResponse{ + Error: "Only GET and PUT are supported.", + }) + } +} + +// Decodes incoming PUT requests and returns the requested logging level. +func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error) { + if contentType == "application/x-www-form-urlencoded" { + return decodePutURL(r) + } + return decodePutJSON(r.Body) +} + +func decodePutURL(r *http.Request) (zapcore.Level, error) { + lvl := r.FormValue("level") + if lvl == "" { + return 0, errors.New("must specify logging level") + } + var l zapcore.Level + if err := l.UnmarshalText([]byte(lvl)); err != nil { + return 0, err + } + return l, nil +} + +func decodePutJSON(body io.Reader) (zapcore.Level, error) { + var pld struct { + Level *zapcore.Level `json:"level"` + } + if err := json.NewDecoder(body).Decode(&pld); err != nil { + return 0, fmt.Errorf("malformed request body: %v", err) + } + if pld.Level == nil { + return 0, errors.New("must specify logging level") + } + return *pld.Level, nil +} diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go new file mode 100644 index 0000000000..dad583aaa5 --- /dev/null +++ b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go @@ -0,0 +1,31 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package bufferpool houses zap's shared internal buffer pool. Third-party +// packages can recreate the same functionality with buffers.NewPool. +package bufferpool + +import "go.uber.org/zap/buffer" + +var ( + _pool = buffer.NewPool() + // Get retrieves a buffer from the pool, creating one if necessary. + Get = _pool.Get +) diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go new file mode 100644 index 0000000000..c4d5d02abc --- /dev/null +++ b/vendor/go.uber.org/zap/internal/color/color.go @@ -0,0 +1,44 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package color adds coloring functionality for TTY output. +package color + +import "fmt" + +// Foreground colors. +const ( + Black Color = iota + 30 + Red + Green + Yellow + Blue + Magenta + Cyan + White +) + +// Color represents a text color. +type Color uint8 + +// Add adds the coloring to the given string. +func (c Color) Add(s string) string { + return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s) +} diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go new file mode 100644 index 0000000000..f673f9947b --- /dev/null +++ b/vendor/go.uber.org/zap/internal/exit/exit.go @@ -0,0 +1,66 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package exit provides stubs so that unit tests can exercise code that calls +// os.Exit(1). +package exit + +import "os" + +var _exit = os.Exit + +// With terminates the process by calling os.Exit(code). If the package is +// stubbed, it instead records a call in the testing spy. +func With(code int) { + _exit(code) +} + +// A StubbedExit is a testing fake for os.Exit. +type StubbedExit struct { + Exited bool + Code int + prev func(code int) +} + +// Stub substitutes a fake for the call to os.Exit(1). +func Stub() *StubbedExit { + s := &StubbedExit{prev: _exit} + _exit = s.exit + return s +} + +// WithStub runs the supplied function with Exit stubbed. It returns the stub +// used, so that users can test whether the process would have crashed. +func WithStub(f func()) *StubbedExit { + s := Stub() + defer s.Unstub() + f() + return s +} + +// Unstub restores the previous exit function. +func (se *StubbedExit) Unstub() { + _exit = se.prev +} + +func (se *StubbedExit) exit(code int) { + se.Exited = true + se.Code = code +} diff --git a/vendor/go.uber.org/zap/internal/level_enabler.go b/vendor/go.uber.org/zap/internal/level_enabler.go new file mode 100644 index 0000000000..40bfed81e6 --- /dev/null +++ b/vendor/go.uber.org/zap/internal/level_enabler.go @@ -0,0 +1,37 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package internal and its subpackages hold types and functionality +// that are not part of Zap's public API. +package internal + +import "go.uber.org/zap/zapcore" + +// LeveledEnabler is an interface satisfied by LevelEnablers that are able to +// report their own level. +// +// This interface is defined to use more conveniently in tests and non-zapcore +// packages. +// This cannot be imported from zapcore because of the cyclic dependency. +type LeveledEnabler interface { + zapcore.LevelEnabler + + Level() zapcore.Level +} diff --git a/vendor/go.uber.org/zap/internal/pool/pool.go b/vendor/go.uber.org/zap/internal/pool/pool.go new file mode 100644 index 0000000000..60e9d2c432 --- /dev/null +++ b/vendor/go.uber.org/zap/internal/pool/pool.go @@ -0,0 +1,58 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package pool provides internal pool utilities. +package pool + +import ( + "sync" +) + +// A Pool is a generic wrapper around [sync.Pool] to provide strongly-typed +// object pooling. +// +// Note that SA6002 (ref: https://staticcheck.io/docs/checks/#SA6002) will +// not be detected, so all internal pool use must take care to only store +// pointer types. +type Pool[T any] struct { + pool sync.Pool +} + +// New returns a new [Pool] for T, and will use fn to construct new Ts when +// the pool is empty. +func New[T any](fn func() T) *Pool[T] { + return &Pool[T]{ + pool: sync.Pool{ + New: func() any { + return fn() + }, + }, + } +} + +// Get gets a T from the pool, or creates a new one if the pool is empty. +func (p *Pool[T]) Get() T { + return p.pool.Get().(T) +} + +// Put returns x into the pool. +func (p *Pool[T]) Put(x T) { + p.pool.Put(x) +} diff --git a/vendor/go.uber.org/zap/internal/stacktrace/stack.go b/vendor/go.uber.org/zap/internal/stacktrace/stack.go new file mode 100644 index 0000000000..82af7551f9 --- /dev/null +++ b/vendor/go.uber.org/zap/internal/stacktrace/stack.go @@ -0,0 +1,181 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package stacktrace provides support for gathering stack traces +// efficiently. +package stacktrace + +import ( + "runtime" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" +) + +var _stackPool = pool.New(func() *Stack { + return &Stack{ + storage: make([]uintptr, 64), + } +}) + +// Stack is a captured stack trace. +type Stack struct { + pcs []uintptr // program counters; always a subslice of storage + frames *runtime.Frames + + // The size of pcs varies depending on requirements: + // it will be one if the only the first frame was requested, + // and otherwise it will reflect the depth of the call stack. + // + // storage decouples the slice we need (pcs) from the slice we pool. + // We will always allocate a reasonably large storage, but we'll use + // only as much of it as we need. + storage []uintptr +} + +// Depth specifies how deep of a stack trace should be captured. +type Depth int + +const ( + // First captures only the first frame. + First Depth = iota + + // Full captures the entire call stack, allocating more + // storage for it if needed. + Full +) + +// Capture captures a stack trace of the specified depth, skipping +// the provided number of frames. skip=0 identifies the caller of +// Capture. +// +// The caller must call Free on the returned stacktrace after using it. +func Capture(skip int, depth Depth) *Stack { + stack := _stackPool.Get() + + switch depth { + case First: + stack.pcs = stack.storage[:1] + case Full: + stack.pcs = stack.storage + } + + // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers + // itself. +2 to skip captureStacktrace and runtime.Callers. + numFrames := runtime.Callers( + skip+2, + stack.pcs, + ) + + // runtime.Callers truncates the recorded stacktrace if there is no + // room in the provided slice. For the full stack trace, keep expanding + // storage until there are fewer frames than there is room. + if depth == Full { + pcs := stack.pcs + for numFrames == len(pcs) { + pcs = make([]uintptr, len(pcs)*2) + numFrames = runtime.Callers(skip+2, pcs) + } + + // Discard old storage instead of returning it to the pool. + // This will adjust the pool size over time if stack traces are + // consistently very deep. + stack.storage = pcs + stack.pcs = pcs[:numFrames] + } else { + stack.pcs = stack.pcs[:numFrames] + } + + stack.frames = runtime.CallersFrames(stack.pcs) + return stack +} + +// Free releases resources associated with this stacktrace +// and returns it back to the pool. +func (st *Stack) Free() { + st.frames = nil + st.pcs = nil + _stackPool.Put(st) +} + +// Count reports the total number of frames in this stacktrace. +// Count DOES NOT change as Next is called. +func (st *Stack) Count() int { + return len(st.pcs) +} + +// Next returns the next frame in the stack trace, +// and a boolean indicating whether there are more after it. +func (st *Stack) Next() (_ runtime.Frame, more bool) { + return st.frames.Next() +} + +// Take returns a string representation of the current stacktrace. +// +// skip is the number of frames to skip before recording the stack trace. +// skip=0 identifies the caller of Take. +func Take(skip int) string { + stack := Capture(skip+1, Full) + defer stack.Free() + + buffer := bufferpool.Get() + defer buffer.Free() + + stackfmt := NewFormatter(buffer) + stackfmt.FormatStack(stack) + return buffer.String() +} + +// Formatter formats a stack trace into a readable string representation. +type Formatter struct { + b *buffer.Buffer + nonEmpty bool // whehther we've written at least one frame already +} + +// NewFormatter builds a new Formatter. +func NewFormatter(b *buffer.Buffer) Formatter { + return Formatter{b: b} +} + +// FormatStack formats all remaining frames in the provided stacktrace -- minus +// the final runtime.main/runtime.goexit frame. +func (sf *Formatter) FormatStack(stack *Stack) { + // Note: On the last iteration, frames.Next() returns false, with a valid + // frame, but we ignore this frame. The last frame is a runtime frame which + // adds noise, since it's only either runtime.main or runtime.goexit. + for frame, more := stack.Next(); more; frame, more = stack.Next() { + sf.FormatFrame(frame) + } +} + +// FormatFrame formats the given frame. +func (sf *Formatter) FormatFrame(frame runtime.Frame) { + if sf.nonEmpty { + sf.b.AppendByte('\n') + } + sf.nonEmpty = true + sf.b.AppendString(frame.Function) + sf.b.AppendByte('\n') + sf.b.AppendByte('\t') + sf.b.AppendString(frame.File) + sf.b.AppendByte(':') + sf.b.AppendInt(int64(frame.Line)) +} diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go new file mode 100644 index 0000000000..155b208bd3 --- /dev/null +++ b/vendor/go.uber.org/zap/level.go @@ -0,0 +1,153 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "sync/atomic" + + "go.uber.org/zap/internal" + "go.uber.org/zap/zapcore" +) + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel = zapcore.DebugLevel + // InfoLevel is the default logging priority. + InfoLevel = zapcore.InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel = zapcore.WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel = zapcore.ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel = zapcore.DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel = zapcore.PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel = zapcore.FatalLevel +) + +// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with +// an anonymous function. +// +// It's particularly useful when splitting log output between different +// outputs (e.g., standard error and standard out). For sample code, see the +// package-level AdvancedConfiguration example. +type LevelEnablerFunc func(zapcore.Level) bool + +// Enabled calls the wrapped function. +func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) } + +// An AtomicLevel is an atomically changeable, dynamic logging level. It lets +// you safely change the log level of a tree of loggers (the root logger and +// any children created by adding context) at runtime. +// +// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to +// alter its level. +// +// AtomicLevels must be created with the NewAtomicLevel constructor to allocate +// their internal atomic pointer. +type AtomicLevel struct { + l *atomic.Int32 +} + +var _ internal.LeveledEnabler = AtomicLevel{} + +// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging +// enabled. +func NewAtomicLevel() AtomicLevel { + lvl := AtomicLevel{l: new(atomic.Int32)} + lvl.l.Store(int32(InfoLevel)) + return lvl +} + +// NewAtomicLevelAt is a convenience function that creates an AtomicLevel +// and then calls SetLevel with the given level. +func NewAtomicLevelAt(l zapcore.Level) AtomicLevel { + a := NewAtomicLevel() + a.SetLevel(l) + return a +} + +// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII +// representation of the log level. If the provided ASCII representation is +// invalid an error is returned. +// +// This is particularly useful when dealing with text input to configure log +// levels. +func ParseAtomicLevel(text string) (AtomicLevel, error) { + a := NewAtomicLevel() + l, err := zapcore.ParseLevel(text) + if err != nil { + return a, err + } + + a.SetLevel(l) + return a, nil +} + +// Enabled implements the zapcore.LevelEnabler interface, which allows the +// AtomicLevel to be used in place of traditional static levels. +func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { + return lvl.Level().Enabled(l) +} + +// Level returns the minimum enabled log level. +func (lvl AtomicLevel) Level() zapcore.Level { + return zapcore.Level(int8(lvl.l.Load())) +} + +// SetLevel alters the logging level. +func (lvl AtomicLevel) SetLevel(l zapcore.Level) { + lvl.l.Store(int32(l)) +} + +// String returns the string representation of the underlying Level. +func (lvl AtomicLevel) String() string { + return lvl.Level().String() +} + +// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text +// representations as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl *AtomicLevel) UnmarshalText(text []byte) error { + if lvl.l == nil { + lvl.l = &atomic.Int32{} + } + + var l zapcore.Level + if err := l.UnmarshalText(text); err != nil { + return err + } + + lvl.SetLevel(l) + return nil +} + +// MarshalText marshals the AtomicLevel to a byte slice. It uses the same +// text representation as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl AtomicLevel) MarshalText() (text []byte, err error) { + return lvl.Level().MarshalText() +} diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go new file mode 100644 index 0000000000..6205fe48a6 --- /dev/null +++ b/vendor/go.uber.org/zap/logger.go @@ -0,0 +1,432 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io" + "os" + "strings" + + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/stacktrace" + "go.uber.org/zap/zapcore" +) + +// A Logger provides fast, leveled, structured logging. All methods are safe +// for concurrent use. +// +// The Logger is designed for contexts in which every microsecond and every +// allocation matters, so its API intentionally favors performance and type +// safety over brevity. For most applications, the SugaredLogger strikes a +// better balance between performance and ergonomics. +type Logger struct { + core zapcore.Core + + development bool + addCaller bool + onFatal zapcore.CheckWriteHook // default is WriteThenFatal + + name string + errorOutput zapcore.WriteSyncer + + addStack zapcore.LevelEnabler + + callerSkip int + + clock zapcore.Clock +} + +// New constructs a new Logger from the provided zapcore.Core and Options. If +// the passed zapcore.Core is nil, it falls back to using a no-op +// implementation. +// +// This is the most flexible way to construct a Logger, but also the most +// verbose. For typical use cases, the highly-opinionated presets +// (NewProduction, NewDevelopment, and NewExample) or the Config struct are +// more convenient. +// +// For sample code, see the package-level AdvancedConfiguration example. +func New(core zapcore.Core, options ...Option) *Logger { + if core == nil { + return NewNop() + } + log := &Logger{ + core: core, + errorOutput: zapcore.Lock(os.Stderr), + addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, + } + return log.WithOptions(options...) +} + +// NewNop returns a no-op Logger. It never writes out logs or internal errors, +// and it never runs user-defined hooks. +// +// Using WithOptions to replace the Core or error output of a no-op Logger can +// re-enable logging. +func NewNop() *Logger { + return &Logger{ + core: zapcore.NewNopCore(), + errorOutput: zapcore.AddSync(io.Discard), + addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, + } +} + +// NewProduction builds a sensible production Logger that writes InfoLevel and +// above logs to standard error as JSON. +// +// It's a shortcut for NewProductionConfig().Build(...Option). +func NewProduction(options ...Option) (*Logger, error) { + return NewProductionConfig().Build(options...) +} + +// NewDevelopment builds a development Logger that writes DebugLevel and above +// logs to standard error in a human-friendly format. +// +// It's a shortcut for NewDevelopmentConfig().Build(...Option). +func NewDevelopment(options ...Option) (*Logger, error) { + return NewDevelopmentConfig().Build(options...) +} + +// Must is a helper that wraps a call to a function returning (*Logger, error) +// and panics if the error is non-nil. It is intended for use in variable +// initialization such as: +// +// var logger = zap.Must(zap.NewProduction()) +func Must(logger *Logger, err error) *Logger { + if err != nil { + panic(err) + } + + return logger +} + +// NewExample builds a Logger that's designed for use in zap's testable +// examples. It writes DebugLevel and above logs to standard out as JSON, but +// omits the timestamp and calling function to keep example output +// short and deterministic. +func NewExample(options ...Option) *Logger { + encoderCfg := zapcore.EncoderConfig{ + MessageKey: "msg", + LevelKey: "level", + NameKey: "logger", + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + } + core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel) + return New(core).WithOptions(options...) +} + +// Sugar wraps the Logger to provide a more ergonomic, but slightly slower, +// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a +// single application to use both Loggers and SugaredLoggers, converting +// between them on the boundaries of performance-sensitive code. +func (log *Logger) Sugar() *SugaredLogger { + core := log.clone() + core.callerSkip += 2 + return &SugaredLogger{core} +} + +// Named adds a new path segment to the logger's name. Segments are joined by +// periods. By default, Loggers are unnamed. +func (log *Logger) Named(s string) *Logger { + if s == "" { + return log + } + l := log.clone() + if log.name == "" { + l.name = s + } else { + l.name = strings.Join([]string{l.name, s}, ".") + } + return l +} + +// WithOptions clones the current Logger, applies the supplied Options, and +// returns the resulting Logger. It's safe to use concurrently. +func (log *Logger) WithOptions(opts ...Option) *Logger { + c := log.clone() + for _, opt := range opts { + opt.apply(c) + } + return c +} + +// With creates a child logger and adds structured context to it. Fields added +// to the child don't affect the parent, and vice versa. Any fields that +// require evaluation (such as Objects) are evaluated upon invocation of With. +func (log *Logger) With(fields ...Field) *Logger { + if len(fields) == 0 { + return log + } + l := log.clone() + l.core = l.core.With(fields) + return l +} + +// WithLazy creates a child logger and adds structured context to it lazily. +// +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// WithLazy provides a worthwhile performance optimization for contextual loggers +// when the likelihood of using the child logger is low, +// such as error paths and rarely taken branches. +// +// Similar to [With], fields added to the child don't affect the parent, and vice versa. +func (log *Logger) WithLazy(fields ...Field) *Logger { + if len(fields) == 0 { + return log + } + return log.WithOptions(WrapCore(func(core zapcore.Core) zapcore.Core { + return zapcore.NewLazyWith(core, fields) + })) +} + +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (log *Logger) Level() zapcore.Level { + return zapcore.LevelOf(log.core) +} + +// Check returns a CheckedEntry if logging a message at the specified level +// is enabled. It's a completely optional optimization; in high-performance +// applications, Check can help avoid allocating a slice to hold fields. +func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + return log.check(lvl, msg) +} + +// Log logs a message at the specified level. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// Any Fields that require evaluation (such as Objects) are evaluated upon +// invocation of Log. +func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) { + if ce := log.check(lvl, msg); ce != nil { + ce.Write(fields...) + } +} + +// Debug logs a message at DebugLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Debug(msg string, fields ...Field) { + if ce := log.check(DebugLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Info logs a message at InfoLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Info(msg string, fields ...Field) { + if ce := log.check(InfoLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Warn logs a message at WarnLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Warn(msg string, fields ...Field) { + if ce := log.check(WarnLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Error logs a message at ErrorLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Error(msg string, fields ...Field) { + if ce := log.check(ErrorLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// DPanic logs a message at DPanicLevel. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// +// If the logger is in development mode, it then panics (DPanic means +// "development panic"). This is useful for catching errors that are +// recoverable, but shouldn't ever happen. +func (log *Logger) DPanic(msg string, fields ...Field) { + if ce := log.check(DPanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Panic logs a message at PanicLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then panics, even if logging at PanicLevel is disabled. +func (log *Logger) Panic(msg string, fields ...Field) { + if ce := log.check(PanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Fatal logs a message at FatalLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then calls os.Exit(1), even if logging at FatalLevel is +// disabled. +func (log *Logger) Fatal(msg string, fields ...Field) { + if ce := log.check(FatalLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Sync calls the underlying Core's Sync method, flushing any buffered log +// entries. Applications should take care to call Sync before exiting. +func (log *Logger) Sync() error { + return log.core.Sync() +} + +// Core returns the Logger's underlying zapcore.Core. +func (log *Logger) Core() zapcore.Core { + return log.core +} + +// Name returns the Logger's underlying name, +// or an empty string if the logger is unnamed. +func (log *Logger) Name() string { + return log.name +} + +func (log *Logger) clone() *Logger { + clone := *log + return &clone +} + +func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + // Logger.check must always be called directly by a method in the + // Logger interface (e.g., Check, Info, Fatal). + // This skips Logger.check and the Info/Fatal/Check/etc. method that + // called it. + const callerSkipOffset = 2 + + // Check the level first to reduce the cost of disabled log calls. + // Since Panic and higher may exit, we skip the optimization for those levels. + if lvl < zapcore.DPanicLevel && !log.core.Enabled(lvl) { + return nil + } + + // Create basic checked entry thru the core; this will be non-nil if the + // log message will actually be written somewhere. + ent := zapcore.Entry{ + LoggerName: log.name, + Time: log.clock.Now(), + Level: lvl, + Message: msg, + } + ce := log.core.Check(ent, nil) + willWrite := ce != nil + + // Set up any required terminal behavior. + switch ent.Level { + case zapcore.PanicLevel: + ce = ce.After(ent, zapcore.WriteThenPanic) + case zapcore.FatalLevel: + onFatal := log.onFatal + // nil or WriteThenNoop will lead to continued execution after + // a Fatal log entry, which is unexpected. For example, + // + // f, err := os.Open(..) + // if err != nil { + // log.Fatal("cannot open", zap.Error(err)) + // } + // fmt.Println(f.Name()) + // + // The f.Name() will panic if we continue execution after the + // log.Fatal. + if onFatal == nil || onFatal == zapcore.WriteThenNoop { + onFatal = zapcore.WriteThenFatal + } + ce = ce.After(ent, onFatal) + case zapcore.DPanicLevel: + if log.development { + ce = ce.After(ent, zapcore.WriteThenPanic) + } + } + + // Only do further annotation if we're going to write this message; checked + // entries that exist only for terminal behavior don't benefit from + // annotation. + if !willWrite { + return ce + } + + // Thread the error output through to the CheckedEntry. + ce.ErrorOutput = log.errorOutput + + addStack := log.addStack.Enabled(ce.Level) + if !log.addCaller && !addStack { + return ce + } + + // Adding the caller or stack trace requires capturing the callers of + // this function. We'll share information between these two. + stackDepth := stacktrace.First + if addStack { + stackDepth = stacktrace.Full + } + stack := stacktrace.Capture(log.callerSkip+callerSkipOffset, stackDepth) + defer stack.Free() + + if stack.Count() == 0 { + if log.addCaller { + fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) + _ = log.errorOutput.Sync() + } + return ce + } + + frame, more := stack.Next() + + if log.addCaller { + ce.Caller = zapcore.EntryCaller{ + Defined: frame.PC != 0, + PC: frame.PC, + File: frame.File, + Line: frame.Line, + Function: frame.Function, + } + } + + if addStack { + buffer := bufferpool.Get() + defer buffer.Free() + + stackfmt := stacktrace.NewFormatter(buffer) + + // We've already extracted the first frame, so format that + // separately and defer to stackfmt for the rest. + stackfmt.FormatFrame(frame) + if more { + stackfmt.FormatStack(stack) + } + ce.Stack = buffer.String() + } + + return ce +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go new file mode 100644 index 0000000000..c4f3bca3d2 --- /dev/null +++ b/vendor/go.uber.org/zap/options.go @@ -0,0 +1,167 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" +) + +// An Option configures a Logger. +type Option interface { + apply(*Logger) +} + +// optionFunc wraps a func so it satisfies the Option interface. +type optionFunc func(*Logger) + +func (f optionFunc) apply(log *Logger) { + f(log) +} + +// WrapCore wraps or replaces the Logger's underlying zapcore.Core. +func WrapCore(f func(zapcore.Core) zapcore.Core) Option { + return optionFunc(func(log *Logger) { + log.core = f(log.core) + }) +} + +// Hooks registers functions which will be called each time the Logger writes +// out an Entry. Repeated use of Hooks is additive. +// +// Hooks are useful for simple side effects, like capturing metrics for the +// number of emitted logs. More complex side effects, including anything that +// requires access to the Entry's structured fields, should be implemented as +// a zapcore.Core instead. See zapcore.RegisterHooks for details. +func Hooks(hooks ...func(zapcore.Entry) error) Option { + return optionFunc(func(log *Logger) { + log.core = zapcore.RegisterHooks(log.core, hooks...) + }) +} + +// Fields adds fields to the Logger. +func Fields(fs ...Field) Option { + return optionFunc(func(log *Logger) { + log.core = log.core.With(fs) + }) +} + +// ErrorOutput sets the destination for errors generated by the Logger. Note +// that this option only affects internal errors; for sample code that sends +// error-level logs to a different location from info- and debug-level logs, +// see the package-level AdvancedConfiguration example. +// +// The supplied WriteSyncer must be safe for concurrent use. The Open and +// zapcore.Lock functions are the simplest ways to protect files with a mutex. +func ErrorOutput(w zapcore.WriteSyncer) Option { + return optionFunc(func(log *Logger) { + log.errorOutput = w + }) +} + +// Development puts the logger in development mode, which makes DPanic-level +// logs panic instead of simply logging an error. +func Development() Option { + return optionFunc(func(log *Logger) { + log.development = true + }) +} + +// AddCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller. See also WithCaller. +func AddCaller() Option { + return WithCaller(true) +} + +// WithCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller, or not, depending on the +// value of enabled. This is a generalized form of AddCaller. +func WithCaller(enabled bool) Option { + return optionFunc(func(log *Logger) { + log.addCaller = enabled + }) +} + +// AddCallerSkip increases the number of callers skipped by caller annotation +// (as enabled by the AddCaller option). When building wrappers around the +// Logger and SugaredLogger, supplying this Option prevents zap from always +// reporting the wrapper code as the caller. +func AddCallerSkip(skip int) Option { + return optionFunc(func(log *Logger) { + log.callerSkip += skip + }) +} + +// AddStacktrace configures the Logger to record a stack trace for all messages at +// or above a given level. +func AddStacktrace(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + log.addStack = lvl + }) +} + +// IncreaseLevel increase the level of the logger. It has no effect if +// the passed in level tries to decrease the level of the logger. +func IncreaseLevel(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + core, err := zapcore.NewIncreaseLevelCore(log.core, lvl) + if err != nil { + fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err) + } else { + log.core = core + } + }) +} + +// OnFatal sets the action to take on fatal logs. +// +// Deprecated: Use [WithFatalHook] instead. +func OnFatal(action zapcore.CheckWriteAction) Option { + return WithFatalHook(action) +} + +// WithFatalHook sets a CheckWriteHook to run on fatal logs. +// Zap will call this hook after writing a log statement with a Fatal level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a fatal log message, but it will not exit the +// program. +// +// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit)) +// +// It is important that the provided CheckWriteHook stops the control flow at +// the current statement to meet expectations of callers of the logger. +// We recommend calling os.Exit or runtime.Goexit inside custom hooks at +// minimum. +func WithFatalHook(hook zapcore.CheckWriteHook) Option { + return optionFunc(func(log *Logger) { + log.onFatal = hook + }) +} + +// WithClock specifies the clock used by the logger to determine the current +// time for logged entries. Defaults to the system clock with time.Now. +func WithClock(clock zapcore.Clock) Option { + return optionFunc(func(log *Logger) { + log.clock = clock + }) +} diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go new file mode 100644 index 0000000000..499772a00d --- /dev/null +++ b/vendor/go.uber.org/zap/sink.go @@ -0,0 +1,180 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + + "go.uber.org/zap/zapcore" +) + +const schemeFile = "file" + +var _sinkRegistry = newSinkRegistry() + +// Sink defines the interface to write to and close logger destinations. +type Sink interface { + zapcore.WriteSyncer + io.Closer +} + +type errSinkNotFound struct { + scheme string +} + +func (e *errSinkNotFound) Error() string { + return fmt.Sprintf("no sink found for scheme %q", e.scheme) +} + +type nopCloserSink struct{ zapcore.WriteSyncer } + +func (nopCloserSink) Close() error { return nil } + +type sinkRegistry struct { + mu sync.Mutex + factories map[string]func(*url.URL) (Sink, error) // keyed by scheme + openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile +} + +func newSinkRegistry() *sinkRegistry { + sr := &sinkRegistry{ + factories: make(map[string]func(*url.URL) (Sink, error)), + openFile: os.OpenFile, + } + // Infallible operation: the registry is empty, so we can't have a conflict. + _ = sr.RegisterSink(schemeFile, sr.newFileSinkFromURL) + return sr +} + +// RegisterScheme registers the given factory for the specific scheme. +func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + sr.mu.Lock() + defer sr.mu.Unlock() + + if scheme == "" { + return errors.New("can't register a sink factory for empty string") + } + normalized, err := normalizeScheme(scheme) + if err != nil { + return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) + } + if _, ok := sr.factories[normalized]; ok { + return fmt.Errorf("sink factory already registered for scheme %q", normalized) + } + sr.factories[normalized] = factory + return nil +} + +func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) { + // URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to + // the drive, and path is unset unless `c:/log.txt` is used. + // To avoid Windows-specific URL handling, we instead check IsAbs to open as a file. + // filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows. + if filepath.IsAbs(rawURL) { + return sr.newFileSinkFromPath(rawURL) + } + + u, err := url.Parse(rawURL) + if err != nil { + return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) + } + if u.Scheme == "" { + u.Scheme = schemeFile + } + + sr.mu.Lock() + factory, ok := sr.factories[u.Scheme] + sr.mu.Unlock() + if !ok { + return nil, &errSinkNotFound{u.Scheme} + } + return factory(u) +} + +// RegisterSink registers a user-supplied factory for all sinks with a +// particular scheme. +// +// All schemes must be ASCII, valid under section 0.1 of RFC 3986 +// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already +// have a factory registered. Zap automatically registers a factory for the +// "file" scheme. +func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + return _sinkRegistry.RegisterSink(scheme, factory) +} + +func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) { + if u.User != nil { + return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) + } + if u.Fragment != "" { + return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u) + } + if u.RawQuery != "" { + return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u) + } + // Error messages are better if we check hostname and port separately. + if u.Port() != "" { + return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u) + } + if hn := u.Hostname(); hn != "" && hn != "localhost" { + return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) + } + + return sr.newFileSinkFromPath(u.Path) +} + +func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) { + switch path { + case "stdout": + return nopCloserSink{os.Stdout}, nil + case "stderr": + return nopCloserSink{os.Stderr}, nil + } + return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666) +} + +func normalizeScheme(s string) (string, error) { + // https://tools.ietf.org/html/rfc3986#section-3.1 + s = strings.ToLower(s) + if first := s[0]; 'a' > first || 'z' < first { + return "", errors.New("must start with a letter") + } + for i := 1; i < len(s); i++ { // iterate over bytes, not runes + c := s[i] + switch { + case 'a' <= c && c <= 'z': + continue + case '0' <= c && c <= '9': + continue + case c == '.' || c == '+' || c == '-': + continue + } + return "", fmt.Errorf("may not contain %q", c) + } + return s, nil +} diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go new file mode 100644 index 0000000000..00ac5fe3ac --- /dev/null +++ b/vendor/go.uber.org/zap/sugar.go @@ -0,0 +1,437 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +const ( + _oddNumberErrMsg = "Ignored key without a value." + _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." + _multipleErrMsg = "Multiple errors without a key." +) + +// A SugaredLogger wraps the base Logger functionality in a slower, but less +// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar +// method. +// +// Unlike the Logger, the SugaredLogger doesn't insist on structured logging. +// For each log level, it exposes four methods: +// +// - methods named after the log level for log.Print-style logging +// - methods ending in "w" for loosely-typed structured logging +// - methods ending in "f" for log.Printf-style logging +// - methods ending in "ln" for log.Println-style logging +// +// For example, the methods for InfoLevel are: +// +// Info(...any) Print-style logging +// Infow(...any) Structured logging (read as "info with") +// Infof(string, ...any) Printf-style logging +// Infoln(...any) Println-style logging +type SugaredLogger struct { + base *Logger +} + +// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring +// is quite inexpensive, so it's reasonable for a single application to use +// both Loggers and SugaredLoggers, converting between them on the boundaries +// of performance-sensitive code. +func (s *SugaredLogger) Desugar() *Logger { + base := s.base.clone() + base.callerSkip -= 2 + return base +} + +// Named adds a sub-scope to the logger's name. See Logger.Named for details. +func (s *SugaredLogger) Named(name string) *SugaredLogger { + return &SugaredLogger{base: s.base.Named(name)} +} + +// WithOptions clones the current SugaredLogger, applies the supplied Options, +// and returns the result. It's safe to use concurrently. +func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger { + base := s.base.clone() + for _, opt := range opts { + opt.apply(base) + } + return &SugaredLogger{base: base} +} + +// With adds a variadic number of fields to the logging context. It accepts a +// mix of strongly-typed Field objects and loosely-typed key-value pairs. When +// processing pairs, the first element of the pair is used as the field key +// and the second as the field value. +// +// For example, +// +// sugaredLogger.With( +// "hello", "world", +// "failure", errors.New("oh no"), +// Stack(), +// "count", 42, +// "user", User{Name: "alice"}, +// ) +// +// is the equivalent of +// +// unsugared.With( +// String("hello", "world"), +// String("failure", "oh no"), +// Stack(), +// Int("count", 42), +// Object("user", User{Name: "alice"}), +// ) +// +// Note that the keys in key-value pairs should be strings. In development, +// passing a non-string key panics. In production, the logger is more +// forgiving: a separate error is logged, but the key-value pair is skipped +// and execution continues. Passing an orphaned key triggers similar behavior: +// panics in development and errors in production. +func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} +} + +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (s *SugaredLogger) Level() zapcore.Level { + return zapcore.LevelOf(s.base.core) +} + +// Debug logs the provided arguments at [DebugLevel]. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Debug(args ...interface{}) { + s.log(DebugLevel, "", args, nil) +} + +// Info logs the provided arguments at [InfoLevel]. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Info(args ...interface{}) { + s.log(InfoLevel, "", args, nil) +} + +// Warn logs the provided arguments at [WarnLevel]. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Warn(args ...interface{}) { + s.log(WarnLevel, "", args, nil) +} + +// Error logs the provided arguments at [ErrorLevel]. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Error(args ...interface{}) { + s.log(ErrorLevel, "", args, nil) +} + +// DPanic logs the provided arguments at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) DPanic(args ...interface{}) { + s.log(DPanicLevel, "", args, nil) +} + +// Panic constructs a message with the provided arguments and panics. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Panic(args ...interface{}) { + s.log(PanicLevel, "", args, nil) +} + +// Fatal constructs a message with the provided arguments and calls os.Exit. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Fatal(args ...interface{}) { + s.log(FatalLevel, "", args, nil) +} + +// Debugf formats the message according to the format specifier +// and logs it at [DebugLevel]. +func (s *SugaredLogger) Debugf(template string, args ...interface{}) { + s.log(DebugLevel, template, args, nil) +} + +// Infof formats the message according to the format specifier +// and logs it at [InfoLevel]. +func (s *SugaredLogger) Infof(template string, args ...interface{}) { + s.log(InfoLevel, template, args, nil) +} + +// Warnf formats the message according to the format specifier +// and logs it at [WarnLevel]. +func (s *SugaredLogger) Warnf(template string, args ...interface{}) { + s.log(WarnLevel, template, args, nil) +} + +// Errorf formats the message according to the format specifier +// and logs it at [ErrorLevel]. +func (s *SugaredLogger) Errorf(template string, args ...interface{}) { + s.log(ErrorLevel, template, args, nil) +} + +// DPanicf formats the message according to the format specifier +// and logs it at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { + s.log(DPanicLevel, template, args, nil) +} + +// Panicf formats the message according to the format specifier +// and panics. +func (s *SugaredLogger) Panicf(template string, args ...interface{}) { + s.log(PanicLevel, template, args, nil) +} + +// Fatalf formats the message according to the format specifier +// and calls os.Exit. +func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { + s.log(FatalLevel, template, args, nil) +} + +// Debugw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +// +// When debug-level logging is disabled, this is much faster than +// +// s.With(keysAndValues).Debug(msg) +func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { + s.log(DebugLevel, msg, nil, keysAndValues) +} + +// Infow logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) { + s.log(InfoLevel, msg, nil, keysAndValues) +} + +// Warnw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) { + s.log(WarnLevel, msg, nil, keysAndValues) +} + +// Errorw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) { + s.log(ErrorLevel, msg, nil, keysAndValues) +} + +// DPanicw logs a message with some additional context. In development, the +// logger then panics. (See DPanicLevel for details.) The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) { + s.log(DPanicLevel, msg, nil, keysAndValues) +} + +// Panicw logs a message with some additional context, then panics. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) { + s.log(PanicLevel, msg, nil, keysAndValues) +} + +// Fatalw logs a message with some additional context, then calls os.Exit. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { + s.log(FatalLevel, msg, nil, keysAndValues) +} + +// Debugln logs a message at [DebugLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Debugln(args ...interface{}) { + s.logln(DebugLevel, args, nil) +} + +// Infoln logs a message at [InfoLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Infoln(args ...interface{}) { + s.logln(InfoLevel, args, nil) +} + +// Warnln logs a message at [WarnLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Warnln(args ...interface{}) { + s.logln(WarnLevel, args, nil) +} + +// Errorln logs a message at [ErrorLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Errorln(args ...interface{}) { + s.logln(ErrorLevel, args, nil) +} + +// DPanicln logs a message at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +// Spaces are always added between arguments. +func (s *SugaredLogger) DPanicln(args ...interface{}) { + s.logln(DPanicLevel, args, nil) +} + +// Panicln logs a message at [PanicLevel] and panics. +// Spaces are always added between arguments. +func (s *SugaredLogger) Panicln(args ...interface{}) { + s.logln(PanicLevel, args, nil) +} + +// Fatalln logs a message at [FatalLevel] and calls os.Exit. +// Spaces are always added between arguments. +func (s *SugaredLogger) Fatalln(args ...interface{}) { + s.logln(FatalLevel, args, nil) +} + +// Sync flushes any buffered log entries. +func (s *SugaredLogger) Sync() error { + return s.base.Sync() +} + +// log message with Sprint, Sprintf, or neither. +func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { + // If logging at this level is completely disabled, skip the overhead of + // string formatting. + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + msg := getMessage(template, fmtArgs) + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + +// logln message with Sprintln +func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) { + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + msg := getMessageln(fmtArgs) + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + +// getMessage format with Sprint, Sprintf, or neither. +func getMessage(template string, fmtArgs []interface{}) string { + if len(fmtArgs) == 0 { + return template + } + + if template != "" { + return fmt.Sprintf(template, fmtArgs...) + } + + if len(fmtArgs) == 1 { + if str, ok := fmtArgs[0].(string); ok { + return str + } + } + return fmt.Sprint(fmtArgs...) +} + +// getMessageln format with Sprintln. +func getMessageln(fmtArgs []interface{}) string { + msg := fmt.Sprintln(fmtArgs...) + return msg[:len(msg)-1] +} + +func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { + if len(args) == 0 { + return nil + } + + var ( + // Allocate enough space for the worst case; if users pass only structured + // fields, we shouldn't penalize them with extra allocations. + fields = make([]Field, 0, len(args)) + invalid invalidPairs + seenError bool + ) + + for i := 0; i < len(args); { + // This is a strongly-typed field. Consume it and move on. + if f, ok := args[i].(Field); ok { + fields = append(fields, f) + i++ + continue + } + + // If it is an error, consume it and move on. + if err, ok := args[i].(error); ok { + if !seenError { + seenError = true + fields = append(fields, Error(err)) + } else { + s.base.Error(_multipleErrMsg, Error(err)) + } + i++ + continue + } + + // Make sure this element isn't a dangling key. + if i == len(args)-1 { + s.base.Error(_oddNumberErrMsg, Any("ignored", args[i])) + break + } + + // Consume this value and the next, treating them as a key-value pair. If the + // key isn't a string, add this pair to the slice of invalid pairs. + key, val := args[i], args[i+1] + if keyStr, ok := key.(string); !ok { + // Subsequent errors are likely, so allocate once up front. + if cap(invalid) == 0 { + invalid = make(invalidPairs, 0, len(args)/2) + } + invalid = append(invalid, invalidPair{i, key, val}) + } else { + fields = append(fields, Any(keyStr, val)) + } + i += 2 + } + + // If we encountered any invalid key-value pairs, log an error. + if len(invalid) > 0 { + s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid)) + } + return fields +} + +type invalidPair struct { + position int + key, value interface{} +} + +func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error { + enc.AddInt64("position", int64(p.position)) + Any("key", p.key).AddTo(enc) + Any("value", p.value).AddTo(enc) + return nil +} + +type invalidPairs []invalidPair + +func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error { + var err error + for i := range ps { + err = multierr.Append(err, enc.AppendObject(ps[i])) + } + return err +} diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go new file mode 100644 index 0000000000..c5a1f16225 --- /dev/null +++ b/vendor/go.uber.org/zap/time.go @@ -0,0 +1,27 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import "time" + +func timeToMillis(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond) +} diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go new file mode 100644 index 0000000000..06768c6791 --- /dev/null +++ b/vendor/go.uber.org/zap/writer.go @@ -0,0 +1,98 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +// Open is a high-level wrapper that takes a variadic number of URLs, opens or +// creates each of the specified resources, and combines them into a locked +// WriteSyncer. It also returns any error encountered and a function to close +// any opened files. +// +// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a +// scheme and URLs with the "file" scheme. Third-party code may register +// factories for other schemes using RegisterSink. +// +// URLs with the "file" scheme must use absolute paths on the local +// filesystem. No user, password, port, fragments, or query parameters are +// allowed, and the hostname must be empty or "localhost". +// +// Since it's common to write logs to the local filesystem, URLs without a +// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without +// a scheme, the special paths "stdout" and "stderr" are interpreted as +// os.Stdout and os.Stderr. When specified without a scheme, relative file +// paths also work. +func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { + writers, closeAll, err := open(paths) + if err != nil { + return nil, nil, err + } + + writer := CombineWriteSyncers(writers...) + return writer, closeAll, nil +} + +func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { + writers := make([]zapcore.WriteSyncer, 0, len(paths)) + closers := make([]io.Closer, 0, len(paths)) + closeAll := func() { + for _, c := range closers { + _ = c.Close() + } + } + + var openErr error + for _, path := range paths { + sink, err := _sinkRegistry.newSink(path) + if err != nil { + openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err)) + continue + } + writers = append(writers, sink) + closers = append(closers, sink) + } + if openErr != nil { + closeAll() + return nil, nil, openErr + } + + return writers, closeAll, nil +} + +// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a +// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op +// WriteSyncer. +// +// It's provided purely as a convenience; the result is no different from +// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually. +func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { + if len(writers) == 0 { + return zapcore.AddSync(io.Discard) + } + return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) +} diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go new file mode 100644 index 0000000000..a40e93b3ec --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go @@ -0,0 +1,219 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bufio" + "sync" + "time" + + "go.uber.org/multierr" +) + +const ( + // _defaultBufferSize specifies the default size used by Buffer. + _defaultBufferSize = 256 * 1024 // 256 kB + + // _defaultFlushInterval specifies the default flush interval for + // Buffer. + _defaultFlushInterval = 30 * time.Second +) + +// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before +// flushing them to a wrapped WriteSyncer after reaching some limit, or at some +// fixed interval--whichever comes first. +// +// BufferedWriteSyncer is safe for concurrent use. You don't need to use +// zapcore.Lock for WriteSyncers with BufferedWriteSyncer. +// +// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log +// destination (*os.File is a valid WriteSyncer), wrap it with +// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the +// object. +// +// func main() { +// ws := ... // your log destination +// bws := &zapcore.BufferedWriteSyncer{WS: ws} +// defer bws.Stop() +// +// // ... +// core := zapcore.NewCore(enc, bws, lvl) +// logger := zap.New(core) +// +// // ... +// } +// +// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs, +// waiting at most 30 seconds between flushes. +// You can customize these parameters by setting the Size or FlushInterval +// fields. +// For example, the following buffers up to 512 kB of logs before flushing them +// to Stderr, with a maximum of one minute between each flush. +// +// ws := &BufferedWriteSyncer{ +// WS: os.Stderr, +// Size: 512 * 1024, // 512 kB +// FlushInterval: time.Minute, +// } +// defer ws.Stop() +type BufferedWriteSyncer struct { + // WS is the WriteSyncer around which BufferedWriteSyncer will buffer + // writes. + // + // This field is required. + WS WriteSyncer + + // Size specifies the maximum amount of data the writer will buffered + // before flushing. + // + // Defaults to 256 kB if unspecified. + Size int + + // FlushInterval specifies how often the writer should flush data if + // there have been no writes. + // + // Defaults to 30 seconds if unspecified. + FlushInterval time.Duration + + // Clock, if specified, provides control of the source of time for the + // writer. + // + // Defaults to the system clock. + Clock Clock + + // unexported fields for state + mu sync.Mutex + initialized bool // whether initialize() has run + stopped bool // whether Stop() has run + writer *bufio.Writer + ticker *time.Ticker + stop chan struct{} // closed when flushLoop should stop + done chan struct{} // closed when flushLoop has stopped +} + +func (s *BufferedWriteSyncer) initialize() { + size := s.Size + if size == 0 { + size = _defaultBufferSize + } + + flushInterval := s.FlushInterval + if flushInterval == 0 { + flushInterval = _defaultFlushInterval + } + + if s.Clock == nil { + s.Clock = DefaultClock + } + + s.ticker = s.Clock.NewTicker(flushInterval) + s.writer = bufio.NewWriterSize(s.WS, size) + s.stop = make(chan struct{}) + s.done = make(chan struct{}) + s.initialized = true + go s.flushLoop() +} + +// Write writes log data into buffer syncer directly, multiple Write calls will be batched, +// and log data will be flushed to disk when the buffer is full or periodically. +func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + s.initialize() + } + + // To avoid partial writes from being flushed, we manually flush the existing buffer if: + // * The current write doesn't fit into the buffer fully, and + // * The buffer is not empty (since bufio will not split large writes when the buffer is empty) + if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 { + if err := s.writer.Flush(); err != nil { + return 0, err + } + } + + return s.writer.Write(bs) +} + +// Sync flushes buffered log data into disk directly. +func (s *BufferedWriteSyncer) Sync() error { + s.mu.Lock() + defer s.mu.Unlock() + + var err error + if s.initialized { + err = s.writer.Flush() + } + + return multierr.Append(err, s.WS.Sync()) +} + +// flushLoop flushes the buffer at the configured interval until Stop is +// called. +func (s *BufferedWriteSyncer) flushLoop() { + defer close(s.done) + + for { + select { + case <-s.ticker.C: + // we just simply ignore error here + // because the underlying bufio writer stores any errors + // and we return any error from Sync() as part of the close + _ = s.Sync() + case <-s.stop: + return + } + } +} + +// Stop closes the buffer, cleans up background goroutines, and flushes +// remaining unwritten data. +func (s *BufferedWriteSyncer) Stop() (err error) { + var stopped bool + + // Critical section. + func() { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + return + } + + stopped = s.stopped + if stopped { + return + } + s.stopped = true + + s.ticker.Stop() + close(s.stop) // tell flushLoop to stop + <-s.done // and wait until it has + }() + + // Don't call Sync on consecutive Stops. + if !stopped { + err = s.Sync() + } + + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go new file mode 100644 index 0000000000..422fd82a6b --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/clock.go @@ -0,0 +1,48 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "time" + +// DefaultClock is the default clock used by Zap in operations that require +// time. This clock uses the system clock for all operations. +var DefaultClock = systemClock{} + +// Clock is a source of time for logged entries. +type Clock interface { + // Now returns the current local time. + Now() time.Time + + // NewTicker returns *time.Ticker that holds a channel + // that delivers "ticks" of a clock. + NewTicker(time.Duration) *time.Ticker +} + +// systemClock implements default Clock that uses system time. +type systemClock struct{} + +func (systemClock) Now() time.Time { + return time.Now() +} + +func (systemClock) NewTicker(duration time.Duration) *time.Ticker { + return time.NewTicker(duration) +} diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go new file mode 100644 index 0000000000..8ca0bfaf56 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -0,0 +1,157 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" +) + +var _sliceEncoderPool = pool.New(func() *sliceArrayEncoder { + return &sliceArrayEncoder{ + elems: make([]interface{}, 0, 2), + } +}) + +func getSliceEncoder() *sliceArrayEncoder { + return _sliceEncoderPool.Get() +} + +func putSliceEncoder(e *sliceArrayEncoder) { + e.elems = e.elems[:0] + _sliceEncoderPool.Put(e) +} + +type consoleEncoder struct { + *jsonEncoder +} + +// NewConsoleEncoder creates an encoder whose output is designed for human - +// rather than machine - consumption. It serializes the core log entry data +// (message, level, timestamp, etc.) in a plain-text format and leaves the +// structured context as JSON. +// +// Note that although the console encoder doesn't use the keys specified in the +// encoder configuration, it will omit any element whose key is set to the empty +// string. +func NewConsoleEncoder(cfg EncoderConfig) Encoder { + if cfg.ConsoleSeparator == "" { + // Use a default delimiter of '\t' for backwards compatibility + cfg.ConsoleSeparator = "\t" + } + return consoleEncoder{newJSONEncoder(cfg, true)} +} + +func (c consoleEncoder) Clone() Encoder { + return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)} +} + +func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + line := bufferpool.Get() + + // We don't want the entry's metadata to be quoted and escaped (if it's + // encoded as strings), which means that we can't use the JSON encoder. The + // simplest option is to use the memory encoder and fmt.Fprint. + // + // If this ever becomes a performance bottleneck, we can implement + // ArrayEncoder for our plain-text format. + arr := getSliceEncoder() + if c.TimeKey != "" && c.EncodeTime != nil { + c.EncodeTime(ent.Time, arr) + } + if c.LevelKey != "" && c.EncodeLevel != nil { + c.EncodeLevel(ent.Level, arr) + } + if ent.LoggerName != "" && c.NameKey != "" { + nameEncoder := c.EncodeName + + if nameEncoder == nil { + // Fall back to FullNameEncoder for backward compatibility. + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, arr) + } + if ent.Caller.Defined { + if c.CallerKey != "" && c.EncodeCaller != nil { + c.EncodeCaller(ent.Caller, arr) + } + if c.FunctionKey != "" { + arr.AppendString(ent.Caller.Function) + } + } + for i := range arr.elems { + if i > 0 { + line.AppendString(c.ConsoleSeparator) + } + fmt.Fprint(line, arr.elems[i]) + } + putSliceEncoder(arr) + + // Add the message itself. + if c.MessageKey != "" { + c.addSeparatorIfNecessary(line) + line.AppendString(ent.Message) + } + + // Add any structured context. + c.writeContext(line, fields) + + // If there's no stacktrace key, honor that; this allows users to force + // single-line output. + if ent.Stack != "" && c.StacktraceKey != "" { + line.AppendByte('\n') + line.AppendString(ent.Stack) + } + + line.AppendString(c.LineEnding) + return line, nil +} + +func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { + context := c.jsonEncoder.Clone().(*jsonEncoder) + defer func() { + // putJSONEncoder assumes the buffer is still used, but we write out the buffer so + // we can free it. + context.buf.Free() + putJSONEncoder(context) + }() + + addFields(context, extra) + context.closeOpenNamespaces() + if context.buf.Len() == 0 { + return + } + + c.addSeparatorIfNecessary(line) + line.AppendByte('{') + line.Write(context.buf.Bytes()) + line.AppendByte('}') +} + +func (c consoleEncoder) addSeparatorIfNecessary(line *buffer.Buffer) { + if line.Len() > 0 { + line.AppendString(c.ConsoleSeparator) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go new file mode 100644 index 0000000000..776e93f6f3 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/core.go @@ -0,0 +1,122 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// Core is a minimal, fast logger interface. It's designed for library authors +// to wrap in a more user-friendly API. +type Core interface { + LevelEnabler + + // With adds structured context to the Core. + With([]Field) Core + // Check determines whether the supplied Entry should be logged (using the + // embedded LevelEnabler and possibly some extra logic). If the entry + // should be logged, the Core adds itself to the CheckedEntry and returns + // the result. + // + // Callers must use Check before calling Write. + Check(Entry, *CheckedEntry) *CheckedEntry + // Write serializes the Entry and any Fields supplied at the log site and + // writes them to their destination. + // + // If called, Write should always log the Entry and Fields; it should not + // replicate the logic of Check. + Write(Entry, []Field) error + // Sync flushes buffered logs (if any). + Sync() error +} + +type nopCore struct{} + +// NewNopCore returns a no-op Core. +func NewNopCore() Core { return nopCore{} } +func (nopCore) Enabled(Level) bool { return false } +func (n nopCore) With([]Field) Core { return n } +func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce } +func (nopCore) Write(Entry, []Field) error { return nil } +func (nopCore) Sync() error { return nil } + +// NewCore creates a Core that writes logs to a WriteSyncer. +func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core { + return &ioCore{ + LevelEnabler: enab, + enc: enc, + out: ws, + } +} + +type ioCore struct { + LevelEnabler + enc Encoder + out WriteSyncer +} + +var ( + _ Core = (*ioCore)(nil) + _ leveledEnabler = (*ioCore)(nil) +) + +func (c *ioCore) Level() Level { + return LevelOf(c.LevelEnabler) +} + +func (c *ioCore) With(fields []Field) Core { + clone := c.clone() + addFields(clone.enc, fields) + return clone +} + +func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if c.Enabled(ent.Level) { + return ce.AddCore(ent, c) + } + return ce +} + +func (c *ioCore) Write(ent Entry, fields []Field) error { + buf, err := c.enc.EncodeEntry(ent, fields) + if err != nil { + return err + } + _, err = c.out.Write(buf.Bytes()) + buf.Free() + if err != nil { + return err + } + if ent.Level > ErrorLevel { + // Since we may be crashing the program, sync the output. + // Ignore Sync errors, pending a clean solution to issue #370. + _ = c.Sync() + } + return nil +} + +func (c *ioCore) Sync() error { + return c.out.Sync() +} + +func (c *ioCore) clone() *ioCore { + return &ioCore{ + LevelEnabler: c.LevelEnabler, + enc: c.enc.Clone(), + out: c.out, + } +} diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go new file mode 100644 index 0000000000..31000e91f7 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zapcore defines and implements the low-level interfaces upon which +// zap is built. By providing alternate implementations of these interfaces, +// external packages can extend zap's capabilities. +package zapcore // import "go.uber.org/zap/zapcore" diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go new file mode 100644 index 0000000000..5769ff3e4e --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -0,0 +1,451 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/json" + "io" + "time" + + "go.uber.org/zap/buffer" +) + +// DefaultLineEnding defines the default line ending when writing logs. +// Alternate line endings specified in EncoderConfig can override this +// behavior. +const DefaultLineEnding = "\n" + +// OmitKey defines the key to use when callers want to remove a key from log output. +const OmitKey = "" + +// A LevelEncoder serializes a Level to a primitive type. +type LevelEncoder func(Level, PrimitiveArrayEncoder) + +// LowercaseLevelEncoder serializes a Level to a lowercase string. For example, +// InfoLevel is serialized to "info". +func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.String()) +} + +// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring. +// For example, InfoLevel is serialized to "info" and colored blue. +func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToLowercaseColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.String()) + } + enc.AppendString(s) +} + +// CapitalLevelEncoder serializes a Level to an all-caps string. For example, +// InfoLevel is serialized to "INFO". +func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.CapitalString()) +} + +// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color. +// For example, InfoLevel is serialized to "INFO" and colored blue. +func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToCapitalColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.CapitalString()) + } + enc.AppendString(s) +} + +// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to +// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder, +// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else +// is unmarshaled to LowercaseLevelEncoder. +func (e *LevelEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "capital": + *e = CapitalLevelEncoder + case "capitalColor": + *e = CapitalColorLevelEncoder + case "color": + *e = LowercaseColorLevelEncoder + default: + *e = LowercaseLevelEncoder + } + return nil +} + +// A TimeEncoder serializes a time.Time to a primitive type. +type TimeEncoder func(time.Time, PrimitiveArrayEncoder) + +// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds +// since the Unix epoch. +func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + sec := float64(nanos) / float64(time.Second) + enc.AppendFloat64(sec) +} + +// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of +// milliseconds since the Unix epoch. +func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + millis := float64(nanos) / float64(time.Millisecond) + enc.AppendFloat64(millis) +} + +// EpochNanosTimeEncoder serializes a time.Time to an integer number of +// nanoseconds since the Unix epoch. +func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + enc.AppendInt64(t.UnixNano()) +} + +func encodeTimeLayout(t time.Time, layout string, enc PrimitiveArrayEncoder) { + type appendTimeEncoder interface { + AppendTimeLayout(time.Time, string) + } + + if enc, ok := enc.(appendTimeEncoder); ok { + enc.AppendTimeLayout(t, layout) + return + } + + enc.AppendString(t.Format(layout)) +} + +// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string +// with millisecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, "2006-01-02T15:04:05.000Z0700", enc) +} + +// RFC3339TimeEncoder serializes a time.Time to an RFC3339-formatted string. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func RFC3339TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, time.RFC3339, enc) +} + +// RFC3339NanoTimeEncoder serializes a time.Time to an RFC3339-formatted string +// with nanosecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func RFC3339NanoTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, time.RFC3339Nano, enc) +} + +// TimeEncoderOfLayout returns TimeEncoder which serializes a time.Time using +// given layout. +func TimeEncoderOfLayout(layout string) TimeEncoder { + return func(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, layout, enc) + } +} + +// UnmarshalText unmarshals text to a TimeEncoder. +// "rfc3339nano" and "RFC3339Nano" are unmarshaled to RFC3339NanoTimeEncoder. +// "rfc3339" and "RFC3339" are unmarshaled to RFC3339TimeEncoder. +// "iso8601" and "ISO8601" are unmarshaled to ISO8601TimeEncoder. +// "millis" is unmarshaled to EpochMillisTimeEncoder. +// "nanos" is unmarshaled to EpochNanosEncoder. +// Anything else is unmarshaled to EpochTimeEncoder. +func (e *TimeEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "rfc3339nano", "RFC3339Nano": + *e = RFC3339NanoTimeEncoder + case "rfc3339", "RFC3339": + *e = RFC3339TimeEncoder + case "iso8601", "ISO8601": + *e = ISO8601TimeEncoder + case "millis": + *e = EpochMillisTimeEncoder + case "nanos": + *e = EpochNanosTimeEncoder + default: + *e = EpochTimeEncoder + } + return nil +} + +// UnmarshalYAML unmarshals YAML to a TimeEncoder. +// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout. +// +// timeEncoder: +// layout: 06/01/02 03:04pm +// +// If value is string, it uses UnmarshalText. +// +// timeEncoder: iso8601 +func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error { + var o struct { + Layout string `json:"layout" yaml:"layout"` + } + if err := unmarshal(&o); err == nil { + *e = TimeEncoderOfLayout(o.Layout) + return nil + } + + var s string + if err := unmarshal(&s); err != nil { + return err + } + return e.UnmarshalText([]byte(s)) +} + +// UnmarshalJSON unmarshals JSON to a TimeEncoder as same way UnmarshalYAML does. +func (e *TimeEncoder) UnmarshalJSON(data []byte) error { + return e.UnmarshalYAML(func(v interface{}) error { + return json.Unmarshal(data, v) + }) +} + +// A DurationEncoder serializes a time.Duration to a primitive type. +type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) + +// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. +func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendFloat64(float64(d) / float64(time.Second)) +} + +// NanosDurationEncoder serializes a time.Duration to an integer number of +// nanoseconds elapsed. +func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(int64(d)) +} + +// MillisDurationEncoder serializes a time.Duration to an integer number of +// milliseconds elapsed. +func MillisDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(d.Nanoseconds() / 1e6) +} + +// StringDurationEncoder serializes a time.Duration using its built-in String +// method. +func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendString(d.String()) +} + +// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled +// to StringDurationEncoder, and anything else is unmarshaled to +// NanosDurationEncoder. +func (e *DurationEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "string": + *e = StringDurationEncoder + case "nanos": + *e = NanosDurationEncoder + case "ms": + *e = MillisDurationEncoder + default: + *e = SecondsDurationEncoder + } + return nil +} + +// A CallerEncoder serializes an EntryCaller to a primitive type. +type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) + +// FullCallerEncoder serializes a caller in /full/path/to/package/file:line +// format. +func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.String()) +} + +// ShortCallerEncoder serializes a caller in package/file:line format, trimming +// all but the final directory from the full path. +func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.TrimmedPath()) +} + +// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to +// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder. +func (e *CallerEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullCallerEncoder + default: + *e = ShortCallerEncoder + } + return nil +} + +// A NameEncoder serializes a period-separated logger name to a primitive +// type. +type NameEncoder func(string, PrimitiveArrayEncoder) + +// FullNameEncoder serializes the logger name as-is. +func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) { + enc.AppendString(loggerName) +} + +// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is +// unmarshaled to FullNameEncoder. +func (e *NameEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullNameEncoder + default: + *e = FullNameEncoder + } + return nil +} + +// An EncoderConfig allows users to configure the concrete encoders supplied by +// zapcore. +type EncoderConfig struct { + // Set the keys used for each log entry. If any key is empty, that portion + // of the entry is omitted. + MessageKey string `json:"messageKey" yaml:"messageKey"` + LevelKey string `json:"levelKey" yaml:"levelKey"` + TimeKey string `json:"timeKey" yaml:"timeKey"` + NameKey string `json:"nameKey" yaml:"nameKey"` + CallerKey string `json:"callerKey" yaml:"callerKey"` + FunctionKey string `json:"functionKey" yaml:"functionKey"` + StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` + SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"` + LineEnding string `json:"lineEnding" yaml:"lineEnding"` + // Configure the primitive representations of common complex types. For + // example, some users may want all time.Times serialized as floating-point + // seconds since epoch, while others may prefer ISO8601 strings. + EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"` + EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"` + EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"` + EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"` + // Unlike the other primitive type encoders, EncodeName is optional. The + // zero value falls back to FullNameEncoder. + EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"` + // Configure the encoder for interface{} type objects. + // If not provided, objects are encoded using json.Encoder + NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"` + // Configures the field separator used by the console encoder. Defaults + // to tab. + ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"` +} + +// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a +// map- or struct-like object to the logging context. Like maps, ObjectEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ObjectEncoder interface { + // Logging-specific marshalers. + AddArray(key string, marshaler ArrayMarshaler) error + AddObject(key string, marshaler ObjectMarshaler) error + + // Built-in types. + AddBinary(key string, value []byte) // for arbitrary bytes + AddByteString(key string, value []byte) // for UTF-8 encoded bytes + AddBool(key string, value bool) + AddComplex128(key string, value complex128) + AddComplex64(key string, value complex64) + AddDuration(key string, value time.Duration) + AddFloat64(key string, value float64) + AddFloat32(key string, value float32) + AddInt(key string, value int) + AddInt64(key string, value int64) + AddInt32(key string, value int32) + AddInt16(key string, value int16) + AddInt8(key string, value int8) + AddString(key, value string) + AddTime(key string, value time.Time) + AddUint(key string, value uint) + AddUint64(key string, value uint64) + AddUint32(key string, value uint32) + AddUint16(key string, value uint16) + AddUint8(key string, value uint8) + AddUintptr(key string, value uintptr) + + // AddReflected uses reflection to serialize arbitrary objects, so it can be + // slow and allocation-heavy. + AddReflected(key string, value interface{}) error + // OpenNamespace opens an isolated namespace where all subsequent fields will + // be added. Applications can use namespaces to prevent key collisions when + // injecting loggers into sub-components or third-party libraries. + OpenNamespace(key string) +} + +// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding +// array-like objects to the logging context. Of note, it supports mixed-type +// arrays even though they aren't typical in Go. Like slices, ArrayEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ArrayEncoder interface { + // Built-in types. + PrimitiveArrayEncoder + + // Time-related types. + AppendDuration(time.Duration) + AppendTime(time.Time) + + // Logging-specific marshalers. + AppendArray(ArrayMarshaler) error + AppendObject(ObjectMarshaler) error + + // AppendReflected uses reflection to serialize arbitrary objects, so it's + // slow and allocation-heavy. + AppendReflected(value interface{}) error +} + +// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals +// only in Go's built-in types. It's included only so that Duration- and +// TimeEncoders cannot trigger infinite recursion. +type PrimitiveArrayEncoder interface { + // Built-in types. + AppendBool(bool) + AppendByteString([]byte) // for UTF-8 encoded bytes + AppendComplex128(complex128) + AppendComplex64(complex64) + AppendFloat64(float64) + AppendFloat32(float32) + AppendInt(int) + AppendInt64(int64) + AppendInt32(int32) + AppendInt16(int16) + AppendInt8(int8) + AppendString(string) + AppendUint(uint) + AppendUint64(uint64) + AppendUint32(uint32) + AppendUint16(uint16) + AppendUint8(uint8) + AppendUintptr(uintptr) +} + +// Encoder is a format-agnostic interface for all log entry marshalers. Since +// log encoders don't need to support the same wide range of use cases as +// general-purpose marshalers, it's possible to make them faster and +// lower-allocation. +// +// Implementations of the ObjectEncoder interface's methods can, of course, +// freely modify the receiver. However, the Clone and EncodeEntry methods will +// be called concurrently and shouldn't modify the receiver. +type Encoder interface { + ObjectEncoder + + // Clone copies the encoder, ensuring that adding fields to the copy doesn't + // affect the original. + Clone() Encoder + + // EncodeEntry encodes an entry and fields, along with any accumulated + // context, into a byte buffer and returns it. Any fields that are empty, + // including fields on the `Entry` type, should be omitted. + EncodeEntry(Entry, []Field) (*buffer.Buffer, error) +} diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go new file mode 100644 index 0000000000..459a5d7ce3 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -0,0 +1,298 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "runtime" + "strings" + "time" + + "go.uber.org/multierr" + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/exit" + "go.uber.org/zap/internal/pool" +) + +var _cePool = pool.New(func() *CheckedEntry { + // Pre-allocate some space for cores. + return &CheckedEntry{ + cores: make([]Core, 4), + } +}) + +func getCheckedEntry() *CheckedEntry { + ce := _cePool.Get() + ce.reset() + return ce +} + +func putCheckedEntry(ce *CheckedEntry) { + if ce == nil { + return + } + _cePool.Put(ce) +} + +// NewEntryCaller makes an EntryCaller from the return signature of +// runtime.Caller. +func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller { + if !ok { + return EntryCaller{} + } + return EntryCaller{ + PC: pc, + File: file, + Line: line, + Defined: true, + } +} + +// EntryCaller represents the caller of a logging function. +type EntryCaller struct { + Defined bool + PC uintptr + File string + Line int + Function string +} + +// String returns the full path and line number of the caller. +func (ec EntryCaller) String() string { + return ec.FullPath() +} + +// FullPath returns a /full/path/to/package/file:line description of the +// caller. +func (ec EntryCaller) FullPath() string { + if !ec.Defined { + return "undefined" + } + buf := bufferpool.Get() + buf.AppendString(ec.File) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// TrimmedPath returns a package/file:line description of the caller, +// preserving only the leaf directory name and file name. +func (ec EntryCaller) TrimmedPath() string { + if !ec.Defined { + return "undefined" + } + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + // + // Find the last separator. + // + idx := strings.LastIndexByte(ec.File, '/') + if idx == -1 { + return ec.FullPath() + } + // Find the penultimate separator. + idx = strings.LastIndexByte(ec.File[:idx], '/') + if idx == -1 { + return ec.FullPath() + } + buf := bufferpool.Get() + // Keep everything after the penultimate separator. + buf.AppendString(ec.File[idx+1:]) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// An Entry represents a complete log message. The entry's structured context +// is already serialized, but the log level, time, message, and call site +// information are available for inspection and modification. Any fields left +// empty will be omitted when encoding. +// +// Entries are pooled, so any functions that accept them MUST be careful not to +// retain references to them. +type Entry struct { + Level Level + Time time.Time + LoggerName string + Message string + Caller EntryCaller + Stack string +} + +// CheckWriteHook is a custom action that may be executed after an entry is +// written. +// +// Register one on a CheckedEntry with the After method. +// +// if ce := logger.Check(...); ce != nil { +// ce = ce.After(hook) +// ce.Write(...) +// } +// +// You can configure the hook for Fatal log statements at the logger level with +// the zap.WithFatalHook option. +type CheckWriteHook interface { + // OnWrite is invoked with the CheckedEntry that was written and a list + // of fields added with that entry. + // + // The list of fields DOES NOT include fields that were already added + // to the logger with the With method. + OnWrite(*CheckedEntry, []Field) +} + +// CheckWriteAction indicates what action to take after a log entry is +// processed. Actions are ordered in increasing severity. +type CheckWriteAction uint8 + +const ( + // WriteThenNoop indicates that nothing special needs to be done. It's the + // default behavior. + WriteThenNoop CheckWriteAction = iota + // WriteThenGoexit runs runtime.Goexit after Write. + WriteThenGoexit + // WriteThenPanic causes a panic after Write. + WriteThenPanic + // WriteThenFatal causes an os.Exit(1) after Write. + WriteThenFatal +) + +// OnWrite implements the OnWrite method to keep CheckWriteAction compatible +// with the new CheckWriteHook interface which deprecates CheckWriteAction. +func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) { + switch a { + case WriteThenGoexit: + runtime.Goexit() + case WriteThenPanic: + panic(ce.Message) + case WriteThenFatal: + exit.With(1) + } +} + +var _ CheckWriteHook = CheckWriteAction(0) + +// CheckedEntry is an Entry together with a collection of Cores that have +// already agreed to log it. +// +// CheckedEntry references should be created by calling AddCore or After on a +// nil *CheckedEntry. References are returned to a pool after Write, and MUST +// NOT be retained after calling their Write method. +type CheckedEntry struct { + Entry + ErrorOutput WriteSyncer + dirty bool // best-effort detection of pool misuse + after CheckWriteHook + cores []Core +} + +func (ce *CheckedEntry) reset() { + ce.Entry = Entry{} + ce.ErrorOutput = nil + ce.dirty = false + ce.after = nil + for i := range ce.cores { + // don't keep references to cores + ce.cores[i] = nil + } + ce.cores = ce.cores[:0] +} + +// Write writes the entry to the stored Cores, returns any errors, and returns +// the CheckedEntry reference to a pool for immediate re-use. Finally, it +// executes any required CheckWriteAction. +func (ce *CheckedEntry) Write(fields ...Field) { + if ce == nil { + return + } + + if ce.dirty { + if ce.ErrorOutput != nil { + // Make a best effort to detect unsafe re-use of this CheckedEntry. + // If the entry is dirty, log an internal error; because the + // CheckedEntry is being used after it was returned to the pool, + // the message may be an amalgamation from multiple call sites. + fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) + _ = ce.ErrorOutput.Sync() // ignore error + } + return + } + ce.dirty = true + + var err error + for i := range ce.cores { + err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) + } + if err != nil && ce.ErrorOutput != nil { + fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) + _ = ce.ErrorOutput.Sync() // ignore error + } + + hook := ce.after + if hook != nil { + hook.OnWrite(ce, fields) + } + putCheckedEntry(ce) +} + +// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be +// used by Core.Check implementations, and is safe to call on nil CheckedEntry +// references. +func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.cores = append(ce.cores, core) + return ce +} + +// Should sets this CheckedEntry's CheckWriteAction, which controls whether a +// Core will panic or fatal after writing this log entry. Like AddCore, it's +// safe to call on nil CheckedEntry references. +// +// Deprecated: Use [CheckedEntry.After] instead. +func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { + return ce.After(ent, should) +} + +// After sets this CheckEntry's CheckWriteHook, which will be called after this +// log entry has been written. It's safe to call this on nil CheckedEntry +// references. +func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.after = hook + return ce +} diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go new file mode 100644 index 0000000000..c40df13269 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -0,0 +1,136 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "reflect" + + "go.uber.org/zap/internal/pool" +) + +// Encodes the given error into fields of an object. A field with the given +// name is added for the error message. +// +// If the error implements fmt.Formatter, a field with the name ${key}Verbose +// is also added with the full verbose error message. +// +// Finally, if the error implements errorGroup (from go.uber.org/multierr) or +// causer (from github.com/pkg/errors), a ${key}Causes field is added with an +// array of objects containing the errors this error was comprised of. +// +// { +// "error": err.Error(), +// "errorVerbose": fmt.Sprintf("%+v", err), +// "errorCauses": [ +// ... +// ], +// } +func encodeError(key string, err error, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the Error() method + defer func() { + if rerr := recover(); rerr != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // error that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", rerr) + } + }() + + basic := err.Error() + enc.AddString(key, basic) + + switch e := err.(type) { + case errorGroup: + return enc.AddArray(key+"Causes", errArray(e.Errors())) + case fmt.Formatter: + verbose := fmt.Sprintf("%+v", e) + if verbose != basic { + // This is a rich error type, like those produced by + // github.com/pkg/errors. + enc.AddString(key+"Verbose", verbose) + } + } + return nil +} + +type errorGroup interface { + // Provides read-only access to the underlying list of errors, preferably + // without causing any allocs. + Errors() []error +} + +// Note that errArray and errArrayElem are very similar to the version +// implemented in the top-level error.go file. We can't re-use this because +// that would require exporting errArray as part of the zapcore API. + +// Encodes a list of errors using the standard error encoding logic. +type errArray []error + +func (errs errArray) MarshalLogArray(arr ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + + el := newErrArrayElem(errs[i]) + err := arr.AppendObject(el) + el.Free() + if err != nil { + return err + } + } + return nil +} + +var _errArrayElemPool = pool.New(func() *errArrayElem { + return &errArrayElem{} +}) + +// Encodes any error into a {"error": ...} re-using the same errors logic. +// +// May be passed in place of an array to build a single-element array. +type errArrayElem struct{ err error } + +func newErrArrayElem(err error) *errArrayElem { + e := _errArrayElemPool.Get() + e.err = err + return e +} + +func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error { + return arr.AppendObject(e) +} + +func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error { + return encodeError("error", e.err, enc) +} + +func (e *errArrayElem) Free() { + e.err = nil + _errArrayElemPool.Put(e) +} diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go new file mode 100644 index 0000000000..95bdb0a126 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -0,0 +1,233 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "fmt" + "math" + "reflect" + "time" +) + +// A FieldType indicates which member of the Field union struct should be used +// and how it should be serialized. +type FieldType uint8 + +const ( + // UnknownType is the default field type. Attempting to add it to an encoder will panic. + UnknownType FieldType = iota + // ArrayMarshalerType indicates that the field carries an ArrayMarshaler. + ArrayMarshalerType + // ObjectMarshalerType indicates that the field carries an ObjectMarshaler. + ObjectMarshalerType + // BinaryType indicates that the field carries an opaque binary blob. + BinaryType + // BoolType indicates that the field carries a bool. + BoolType + // ByteStringType indicates that the field carries UTF-8 encoded bytes. + ByteStringType + // Complex128Type indicates that the field carries a complex128. + Complex128Type + // Complex64Type indicates that the field carries a complex128. + Complex64Type + // DurationType indicates that the field carries a time.Duration. + DurationType + // Float64Type indicates that the field carries a float64. + Float64Type + // Float32Type indicates that the field carries a float32. + Float32Type + // Int64Type indicates that the field carries an int64. + Int64Type + // Int32Type indicates that the field carries an int32. + Int32Type + // Int16Type indicates that the field carries an int16. + Int16Type + // Int8Type indicates that the field carries an int8. + Int8Type + // StringType indicates that the field carries a string. + StringType + // TimeType indicates that the field carries a time.Time that is + // representable by a UnixNano() stored as an int64. + TimeType + // TimeFullType indicates that the field carries a time.Time stored as-is. + TimeFullType + // Uint64Type indicates that the field carries a uint64. + Uint64Type + // Uint32Type indicates that the field carries a uint32. + Uint32Type + // Uint16Type indicates that the field carries a uint16. + Uint16Type + // Uint8Type indicates that the field carries a uint8. + Uint8Type + // UintptrType indicates that the field carries a uintptr. + UintptrType + // ReflectType indicates that the field carries an interface{}, which should + // be serialized using reflection. + ReflectType + // NamespaceType signals the beginning of an isolated namespace. All + // subsequent fields should be added to the new namespace. + NamespaceType + // StringerType indicates that the field carries a fmt.Stringer. + StringerType + // ErrorType indicates that the field carries an error. + ErrorType + // SkipType indicates that the field is a no-op. + SkipType + + // InlineMarshalerType indicates that the field carries an ObjectMarshaler + // that should be inlined. + InlineMarshalerType +) + +// A Field is a marshaling operation used to add a key-value pair to a logger's +// context. Most fields are lazily marshaled, so it's inexpensive to add fields +// to disabled debug-level log statements. +type Field struct { + Key string + Type FieldType + Integer int64 + String string + Interface interface{} +} + +// AddTo exports a field through the ObjectEncoder interface. It's primarily +// useful to library authors, and shouldn't be necessary in most applications. +func (f Field) AddTo(enc ObjectEncoder) { + var err error + + switch f.Type { + case ArrayMarshalerType: + err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler)) + case ObjectMarshalerType: + err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler)) + case InlineMarshalerType: + err = f.Interface.(ObjectMarshaler).MarshalLogObject(enc) + case BinaryType: + enc.AddBinary(f.Key, f.Interface.([]byte)) + case BoolType: + enc.AddBool(f.Key, f.Integer == 1) + case ByteStringType: + enc.AddByteString(f.Key, f.Interface.([]byte)) + case Complex128Type: + enc.AddComplex128(f.Key, f.Interface.(complex128)) + case Complex64Type: + enc.AddComplex64(f.Key, f.Interface.(complex64)) + case DurationType: + enc.AddDuration(f.Key, time.Duration(f.Integer)) + case Float64Type: + enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer))) + case Float32Type: + enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer))) + case Int64Type: + enc.AddInt64(f.Key, f.Integer) + case Int32Type: + enc.AddInt32(f.Key, int32(f.Integer)) + case Int16Type: + enc.AddInt16(f.Key, int16(f.Integer)) + case Int8Type: + enc.AddInt8(f.Key, int8(f.Integer)) + case StringType: + enc.AddString(f.Key, f.String) + case TimeType: + if f.Interface != nil { + enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location))) + } else { + // Fall back to UTC if location is nil. + enc.AddTime(f.Key, time.Unix(0, f.Integer)) + } + case TimeFullType: + enc.AddTime(f.Key, f.Interface.(time.Time)) + case Uint64Type: + enc.AddUint64(f.Key, uint64(f.Integer)) + case Uint32Type: + enc.AddUint32(f.Key, uint32(f.Integer)) + case Uint16Type: + enc.AddUint16(f.Key, uint16(f.Integer)) + case Uint8Type: + enc.AddUint8(f.Key, uint8(f.Integer)) + case UintptrType: + enc.AddUintptr(f.Key, uintptr(f.Integer)) + case ReflectType: + err = enc.AddReflected(f.Key, f.Interface) + case NamespaceType: + enc.OpenNamespace(f.Key) + case StringerType: + err = encodeStringer(f.Key, f.Interface, enc) + case ErrorType: + err = encodeError(f.Key, f.Interface.(error), enc) + case SkipType: + break + default: + panic(fmt.Sprintf("unknown field type: %v", f)) + } + + if err != nil { + enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error()) + } +} + +// Equals returns whether two fields are equal. For non-primitive types such as +// errors, marshalers, or reflect types, it uses reflect.DeepEqual. +func (f Field) Equals(other Field) bool { + if f.Type != other.Type { + return false + } + if f.Key != other.Key { + return false + } + + switch f.Type { + case BinaryType, ByteStringType: + return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte)) + case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType: + return reflect.DeepEqual(f.Interface, other.Interface) + default: + return f == other + } +} + +func addFields(enc ObjectEncoder, fields []Field) { + for i := range fields { + fields[i].AddTo(enc) + } +} + +func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the String() method, similar to https://golang.org/src/fmt/print.go#L540 + defer func() { + if err := recover(); err != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // Stringer that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(stringer); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", err) + } + }() + + enc.AddString(key, stringer.(fmt.Stringer).String()) + return nil +} diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go new file mode 100644 index 0000000000..198def9917 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/hook.go @@ -0,0 +1,77 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type hooked struct { + Core + funcs []func(Entry) error +} + +var ( + _ Core = (*hooked)(nil) + _ leveledEnabler = (*hooked)(nil) +) + +// RegisterHooks wraps a Core and runs a collection of user-defined callback +// hooks each time a message is logged. Execution of the callbacks is blocking. +// +// This offers users an easy way to register simple callbacks (e.g., metrics +// collection) without implementing the full Core interface. +func RegisterHooks(core Core, hooks ...func(Entry) error) Core { + funcs := append([]func(Entry) error{}, hooks...) + return &hooked{ + Core: core, + funcs: funcs, + } +} + +func (h *hooked) Level() Level { + return LevelOf(h.Core) +} + +func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + // Let the wrapped Core decide whether to log this message or not. This + // also gives the downstream a chance to register itself directly with the + // CheckedEntry. + if downstream := h.Core.Check(ent, ce); downstream != nil { + return downstream.AddCore(ent, h) + } + return ce +} + +func (h *hooked) With(fields []Field) Core { + return &hooked{ + Core: h.Core.With(fields), + funcs: h.funcs, + } +} + +func (h *hooked) Write(ent Entry, _ []Field) error { + // Since our downstream had a chance to register itself directly with the + // CheckedMessage, we don't need to call it here. + var err error + for i := range h.funcs { + err = multierr.Append(err, h.funcs[i](ent)) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go new file mode 100644 index 0000000000..7a11237ae9 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/increase_level.go @@ -0,0 +1,75 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "fmt" + +type levelFilterCore struct { + core Core + level LevelEnabler +} + +var ( + _ Core = (*levelFilterCore)(nil) + _ leveledEnabler = (*levelFilterCore)(nil) +) + +// NewIncreaseLevelCore creates a core that can be used to increase the level of +// an existing Core. It cannot be used to decrease the logging level, as it acts +// as a filter before calling the underlying core. If level decreases the log level, +// an error is returned. +func NewIncreaseLevelCore(core Core, level LevelEnabler) (Core, error) { + for l := _maxLevel; l >= _minLevel; l-- { + if !core.Enabled(l) && level.Enabled(l) { + return nil, fmt.Errorf("invalid increase level, as level %q is allowed by increased level, but not by existing core", l) + } + } + + return &levelFilterCore{core, level}, nil +} + +func (c *levelFilterCore) Enabled(lvl Level) bool { + return c.level.Enabled(lvl) +} + +func (c *levelFilterCore) Level() Level { + return LevelOf(c.level) +} + +func (c *levelFilterCore) With(fields []Field) Core { + return &levelFilterCore{c.core.With(fields), c.level} +} + +func (c *levelFilterCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !c.Enabled(ent.Level) { + return ce + } + + return c.core.Check(ent, ce) +} + +func (c *levelFilterCore) Write(ent Entry, fields []Field) error { + return c.core.Write(ent, fields) +} + +func (c *levelFilterCore) Sync() error { + return c.core.Sync() +} diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go new file mode 100644 index 0000000000..c8ab86979b --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -0,0 +1,583 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/base64" + "math" + "time" + "unicode/utf8" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" +) + +// For JSON-escaping; see jsonEncoder.safeAddString below. +const _hex = "0123456789abcdef" + +var _jsonPool = pool.New(func() *jsonEncoder { + return &jsonEncoder{} +}) + +func putJSONEncoder(enc *jsonEncoder) { + if enc.reflectBuf != nil { + enc.reflectBuf.Free() + } + enc.EncoderConfig = nil + enc.buf = nil + enc.spaced = false + enc.openNamespaces = 0 + enc.reflectBuf = nil + enc.reflectEnc = nil + _jsonPool.Put(enc) +} + +type jsonEncoder struct { + *EncoderConfig + buf *buffer.Buffer + spaced bool // include spaces after colons and commas + openNamespaces int + + // for encoding generic values by reflection + reflectBuf *buffer.Buffer + reflectEnc ReflectedEncoder +} + +// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder +// appropriately escapes all field keys and values. +// +// Note that the encoder doesn't deduplicate keys, so it's possible to produce +// a message like +// +// {"foo":"bar","foo":"baz"} +// +// This is permitted by the JSON specification, but not encouraged. Many +// libraries will ignore duplicate key-value pairs (typically keeping the last +// pair) when unmarshaling, but users should attempt to avoid adding duplicate +// keys. +func NewJSONEncoder(cfg EncoderConfig) Encoder { + return newJSONEncoder(cfg, false) +} + +func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder { + if cfg.SkipLineEnding { + cfg.LineEnding = "" + } else if cfg.LineEnding == "" { + cfg.LineEnding = DefaultLineEnding + } + + // If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default + if cfg.NewReflectedEncoder == nil { + cfg.NewReflectedEncoder = defaultReflectedEncoder + } + + return &jsonEncoder{ + EncoderConfig: &cfg, + buf: bufferpool.Get(), + spaced: spaced, + } +} + +func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error { + enc.addKey(key) + return enc.AppendArray(arr) +} + +func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error { + enc.addKey(key) + return enc.AppendObject(obj) +} + +func (enc *jsonEncoder) AddBinary(key string, val []byte) { + enc.AddString(key, base64.StdEncoding.EncodeToString(val)) +} + +func (enc *jsonEncoder) AddByteString(key string, val []byte) { + enc.addKey(key) + enc.AppendByteString(val) +} + +func (enc *jsonEncoder) AddBool(key string, val bool) { + enc.addKey(key) + enc.AppendBool(val) +} + +func (enc *jsonEncoder) AddComplex128(key string, val complex128) { + enc.addKey(key) + enc.AppendComplex128(val) +} + +func (enc *jsonEncoder) AddComplex64(key string, val complex64) { + enc.addKey(key) + enc.AppendComplex64(val) +} + +func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { + enc.addKey(key) + enc.AppendDuration(val) +} + +func (enc *jsonEncoder) AddFloat64(key string, val float64) { + enc.addKey(key) + enc.AppendFloat64(val) +} + +func (enc *jsonEncoder) AddFloat32(key string, val float32) { + enc.addKey(key) + enc.AppendFloat32(val) +} + +func (enc *jsonEncoder) AddInt64(key string, val int64) { + enc.addKey(key) + enc.AppendInt64(val) +} + +func (enc *jsonEncoder) resetReflectBuf() { + if enc.reflectBuf == nil { + enc.reflectBuf = bufferpool.Get() + enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf) + } else { + enc.reflectBuf.Reset() + } +} + +var nullLiteralBytes = []byte("null") + +// Only invoke the standard JSON encoder if there is actually something to +// encode; otherwise write JSON null literal directly. +func (enc *jsonEncoder) encodeReflected(obj interface{}) ([]byte, error) { + if obj == nil { + return nullLiteralBytes, nil + } + enc.resetReflectBuf() + if err := enc.reflectEnc.Encode(obj); err != nil { + return nil, err + } + enc.reflectBuf.TrimNewline() + return enc.reflectBuf.Bytes(), nil +} + +func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { + valueBytes, err := enc.encodeReflected(obj) + if err != nil { + return err + } + enc.addKey(key) + _, err = enc.buf.Write(valueBytes) + return err +} + +func (enc *jsonEncoder) OpenNamespace(key string) { + enc.addKey(key) + enc.buf.AppendByte('{') + enc.openNamespaces++ +} + +func (enc *jsonEncoder) AddString(key, val string) { + enc.addKey(key) + enc.AppendString(val) +} + +func (enc *jsonEncoder) AddTime(key string, val time.Time) { + enc.addKey(key) + enc.AppendTime(val) +} + +func (enc *jsonEncoder) AddUint64(key string, val uint64) { + enc.addKey(key) + enc.AppendUint64(val) +} + +func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error { + enc.addElementSeparator() + enc.buf.AppendByte('[') + err := arr.MarshalLogArray(enc) + enc.buf.AppendByte(']') + return err +} + +func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error { + // Close ONLY new openNamespaces that are created during + // AppendObject(). + old := enc.openNamespaces + enc.openNamespaces = 0 + enc.addElementSeparator() + enc.buf.AppendByte('{') + err := obj.MarshalLogObject(enc) + enc.buf.AppendByte('}') + enc.closeOpenNamespaces() + enc.openNamespaces = old + return err +} + +func (enc *jsonEncoder) AppendBool(val bool) { + enc.addElementSeparator() + enc.buf.AppendBool(val) +} + +func (enc *jsonEncoder) AppendByteString(val []byte) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddByteString(val) + enc.buf.AppendByte('"') +} + +// appendComplex appends the encoded form of the provided complex128 value. +// precision specifies the encoding precision for the real and imaginary +// components of the complex number. +func (enc *jsonEncoder) appendComplex(val complex128, precision int) { + enc.addElementSeparator() + // Cast to a platform-independent, fixed-size type. + r, i := float64(real(val)), float64(imag(val)) + enc.buf.AppendByte('"') + // Because we're always in a quoted string, we can use strconv without + // special-casing NaN and +/-Inf. + enc.buf.AppendFloat(r, precision) + // If imaginary part is less than 0, minus (-) sign is added by default + // by AppendFloat. + if i >= 0 { + enc.buf.AppendByte('+') + } + enc.buf.AppendFloat(i, precision) + enc.buf.AppendByte('i') + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendDuration(val time.Duration) { + cur := enc.buf.Len() + if e := enc.EncodeDuration; e != nil { + e(val, enc) + } + if cur == enc.buf.Len() { + // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep + // JSON valid. + enc.AppendInt64(int64(val)) + } +} + +func (enc *jsonEncoder) AppendInt64(val int64) { + enc.addElementSeparator() + enc.buf.AppendInt(val) +} + +func (enc *jsonEncoder) AppendReflected(val interface{}) error { + valueBytes, err := enc.encodeReflected(val) + if err != nil { + return err + } + enc.addElementSeparator() + _, err = enc.buf.Write(valueBytes) + return err +} + +func (enc *jsonEncoder) AppendString(val string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(val) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendTimeLayout(time time.Time, layout string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.buf.AppendTime(time, layout) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendTime(val time.Time) { + cur := enc.buf.Len() + if e := enc.EncodeTime; e != nil { + e(val, enc) + } + if cur == enc.buf.Len() { + // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep + // output JSON valid. + enc.AppendInt64(val.UnixNano()) + } +} + +func (enc *jsonEncoder) AppendUint64(val uint64) { + enc.addElementSeparator() + enc.buf.AppendUint(val) +} + +func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) } +func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) } +func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } +func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } +func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } + +func (enc *jsonEncoder) Clone() Encoder { + clone := enc.clone() + clone.buf.Write(enc.buf.Bytes()) + return clone +} + +func (enc *jsonEncoder) clone() *jsonEncoder { + clone := _jsonPool.Get() + clone.EncoderConfig = enc.EncoderConfig + clone.spaced = enc.spaced + clone.openNamespaces = enc.openNamespaces + clone.buf = bufferpool.Get() + return clone +} + +func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + final := enc.clone() + final.buf.AppendByte('{') + + if final.LevelKey != "" && final.EncodeLevel != nil { + final.addKey(final.LevelKey) + cur := final.buf.Len() + final.EncodeLevel(ent.Level, final) + if cur == final.buf.Len() { + // User-supplied EncodeLevel was a no-op. Fall back to strings to keep + // output JSON valid. + final.AppendString(ent.Level.String()) + } + } + if final.TimeKey != "" { + final.AddTime(final.TimeKey, ent.Time) + } + if ent.LoggerName != "" && final.NameKey != "" { + final.addKey(final.NameKey) + cur := final.buf.Len() + nameEncoder := final.EncodeName + + // if no name encoder provided, fall back to FullNameEncoder for backwards + // compatibility + if nameEncoder == nil { + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, final) + if cur == final.buf.Len() { + // User-supplied EncodeName was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.LoggerName) + } + } + if ent.Caller.Defined { + if final.CallerKey != "" { + final.addKey(final.CallerKey) + cur := final.buf.Len() + final.EncodeCaller(ent.Caller, final) + if cur == final.buf.Len() { + // User-supplied EncodeCaller was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.Caller.String()) + } + } + if final.FunctionKey != "" { + final.addKey(final.FunctionKey) + final.AppendString(ent.Caller.Function) + } + } + if final.MessageKey != "" { + final.addKey(enc.MessageKey) + final.AppendString(ent.Message) + } + if enc.buf.Len() > 0 { + final.addElementSeparator() + final.buf.Write(enc.buf.Bytes()) + } + addFields(final, fields) + final.closeOpenNamespaces() + if ent.Stack != "" && final.StacktraceKey != "" { + final.AddString(final.StacktraceKey, ent.Stack) + } + final.buf.AppendByte('}') + final.buf.AppendString(final.LineEnding) + + ret := final.buf + putJSONEncoder(final) + return ret, nil +} + +func (enc *jsonEncoder) truncate() { + enc.buf.Reset() +} + +func (enc *jsonEncoder) closeOpenNamespaces() { + for i := 0; i < enc.openNamespaces; i++ { + enc.buf.AppendByte('}') + } + enc.openNamespaces = 0 +} + +func (enc *jsonEncoder) addKey(key string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(key) + enc.buf.AppendByte('"') + enc.buf.AppendByte(':') + if enc.spaced { + enc.buf.AppendByte(' ') + } +} + +func (enc *jsonEncoder) addElementSeparator() { + last := enc.buf.Len() - 1 + if last < 0 { + return + } + switch enc.buf.Bytes()[last] { + case '{', '[', ':', ',', ' ': + return + default: + enc.buf.AppendByte(',') + if enc.spaced { + enc.buf.AppendByte(' ') + } + } +} + +func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { + enc.addElementSeparator() + switch { + case math.IsNaN(val): + enc.buf.AppendString(`"NaN"`) + case math.IsInf(val, 1): + enc.buf.AppendString(`"+Inf"`) + case math.IsInf(val, -1): + enc.buf.AppendString(`"-Inf"`) + default: + enc.buf.AppendFloat(val, bitSize) + } +} + +// safeAddString JSON-escapes a string and appends it to the internal buffer. +// Unlike the standard library's encoder, it doesn't attempt to protect the +// user from browser vulnerabilities or JSONP-related problems. +func (enc *jsonEncoder) safeAddString(s string) { + safeAppendStringLike( + (*buffer.Buffer).AppendString, + utf8.DecodeRuneInString, + enc.buf, + s, + ) +} + +// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. +func (enc *jsonEncoder) safeAddByteString(s []byte) { + safeAppendStringLike( + (*buffer.Buffer).AppendBytes, + utf8.DecodeRune, + enc.buf, + s, + ) +} + +// safeAppendStringLike is a generic implementation of safeAddString and safeAddByteString. +// It appends a string or byte slice to the buffer, escaping all special characters. +func safeAppendStringLike[S []byte | string]( + // appendTo appends this string-like object to the buffer. + appendTo func(*buffer.Buffer, S), + // decodeRune decodes the next rune from the string-like object + // and returns its value and width in bytes. + decodeRune func(S) (rune, int), + buf *buffer.Buffer, + s S, +) { + // The encoding logic below works by skipping over characters + // that can be safely copied as-is, + // until a character is found that needs special handling. + // At that point, we copy everything we've seen so far, + // and then handle that special character. + // + // last is the index of the last byte that was copied to the buffer. + last := 0 + for i := 0; i < len(s); { + if s[i] >= utf8.RuneSelf { + // Character >= RuneSelf may be part of a multi-byte rune. + // They need to be decoded before we can decide how to handle them. + r, size := decodeRune(s[i:]) + if r != utf8.RuneError || size != 1 { + // No special handling required. + // Skip over this rune and continue. + i += size + continue + } + + // Invalid UTF-8 sequence. + // Replace it with the Unicode replacement character. + appendTo(buf, s[last:i]) + buf.AppendString(`\ufffd`) + + i++ + last = i + } else { + // Character < RuneSelf is a single-byte UTF-8 rune. + if s[i] >= 0x20 && s[i] != '\\' && s[i] != '"' { + // No escaping necessary. + // Skip over this character and continue. + i++ + continue + } + + // This character needs to be escaped. + appendTo(buf, s[last:i]) + switch s[i] { + case '\\', '"': + buf.AppendByte('\\') + buf.AppendByte(s[i]) + case '\n': + buf.AppendByte('\\') + buf.AppendByte('n') + case '\r': + buf.AppendByte('\\') + buf.AppendByte('r') + case '\t': + buf.AppendByte('\\') + buf.AppendByte('t') + default: + // Encode bytes < 0x20, except for the escape sequences above. + buf.AppendString(`\u00`) + buf.AppendByte(_hex[s[i]>>4]) + buf.AppendByte(_hex[s[i]&0xF]) + } + + i++ + last = i + } + } + + // add remaining + appendTo(buf, s[last:]) +} diff --git a/vendor/go.uber.org/zap/zapcore/lazy_with.go b/vendor/go.uber.org/zap/zapcore/lazy_with.go new file mode 100644 index 0000000000..05288d6a88 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/lazy_with.go @@ -0,0 +1,54 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "sync" + +type lazyWithCore struct { + Core + sync.Once + fields []Field +} + +// NewLazyWith wraps a Core with a "lazy" Core that will only encode fields if +// the logger is written to (or is further chained in a lon-lazy manner). +func NewLazyWith(core Core, fields []Field) Core { + return &lazyWithCore{ + Core: core, + fields: fields, + } +} + +func (d *lazyWithCore) initOnce() { + d.Once.Do(func() { + d.Core = d.Core.With(d.fields) + }) +} + +func (d *lazyWithCore) With(fields []Field) Core { + d.initOnce() + return d.Core.With(fields) +} + +func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry { + d.initOnce() + return d.Core.Check(e, ce) +} diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go new file mode 100644 index 0000000000..e01a241316 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level.go @@ -0,0 +1,229 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "errors" + "fmt" +) + +var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level") + +// A Level is a logging priority. Higher levels are more important. +type Level int8 + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel Level = iota - 1 + // InfoLevel is the default logging priority. + InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel + + _minLevel = DebugLevel + _maxLevel = FatalLevel + + // InvalidLevel is an invalid value for Level. + // + // Core implementations may panic if they see messages of this level. + InvalidLevel = _maxLevel + 1 +) + +// ParseLevel parses a level based on the lower-case or all-caps ASCII +// representation of the log level. If the provided ASCII representation is +// invalid an error is returned. +// +// This is particularly useful when dealing with text input to configure log +// levels. +func ParseLevel(text string) (Level, error) { + var level Level + err := level.UnmarshalText([]byte(text)) + return level, err +} + +type leveledEnabler interface { + LevelEnabler + + Level() Level +} + +// LevelOf reports the minimum enabled log level for the given LevelEnabler +// from Zap's supported log levels, or [InvalidLevel] if none of them are +// enabled. +// +// A LevelEnabler may implement a 'Level() Level' method to override the +// behavior of this function. +// +// func (c *core) Level() Level { +// return c.currentLevel +// } +// +// It is recommended that [Core] implementations that wrap other cores use +// LevelOf to retrieve the level of the wrapped core. For example, +// +// func (c *coreWrapper) Level() Level { +// return zapcore.LevelOf(c.wrappedCore) +// } +func LevelOf(enab LevelEnabler) Level { + if lvler, ok := enab.(leveledEnabler); ok { + return lvler.Level() + } + + for lvl := _minLevel; lvl <= _maxLevel; lvl++ { + if enab.Enabled(lvl) { + return lvl + } + } + + return InvalidLevel +} + +// String returns a lower-case ASCII representation of the log level. +func (l Level) String() string { + switch l { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warn" + case ErrorLevel: + return "error" + case DPanicLevel: + return "dpanic" + case PanicLevel: + return "panic" + case FatalLevel: + return "fatal" + default: + return fmt.Sprintf("Level(%d)", l) + } +} + +// CapitalString returns an all-caps ASCII representation of the log level. +func (l Level) CapitalString() string { + // Printing levels in all-caps is common enough that we should export this + // functionality. + switch l { + case DebugLevel: + return "DEBUG" + case InfoLevel: + return "INFO" + case WarnLevel: + return "WARN" + case ErrorLevel: + return "ERROR" + case DPanicLevel: + return "DPANIC" + case PanicLevel: + return "PANIC" + case FatalLevel: + return "FATAL" + default: + return fmt.Sprintf("LEVEL(%d)", l) + } +} + +// MarshalText marshals the Level to text. Note that the text representation +// drops the -Level suffix (see example). +func (l Level) MarshalText() ([]byte, error) { + return []byte(l.String()), nil +} + +// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText +// expects the text representation of a Level to drop the -Level suffix (see +// example). +// +// In particular, this makes it easy to configure logging levels using YAML, +// TOML, or JSON files. +func (l *Level) UnmarshalText(text []byte) error { + if l == nil { + return errUnmarshalNilLevel + } + if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) { + return fmt.Errorf("unrecognized level: %q", text) + } + return nil +} + +func (l *Level) unmarshalText(text []byte) bool { + switch string(text) { + case "debug", "DEBUG": + *l = DebugLevel + case "info", "INFO", "": // make the zero value useful + *l = InfoLevel + case "warn", "WARN": + *l = WarnLevel + case "error", "ERROR": + *l = ErrorLevel + case "dpanic", "DPANIC": + *l = DPanicLevel + case "panic", "PANIC": + *l = PanicLevel + case "fatal", "FATAL": + *l = FatalLevel + default: + return false + } + return true +} + +// Set sets the level for the flag.Value interface. +func (l *Level) Set(s string) error { + return l.UnmarshalText([]byte(s)) +} + +// Get gets the level for the flag.Getter interface. +func (l *Level) Get() interface{} { + return *l +} + +// Enabled returns true if the given level is at or above this level. +func (l Level) Enabled(lvl Level) bool { + return lvl >= l +} + +// LevelEnabler decides whether a given logging level is enabled when logging a +// message. +// +// Enablers are intended to be used to implement deterministic filters; +// concerns like sampling are better implemented as a Core. +// +// Each concrete Level value implements a static LevelEnabler which returns +// true for itself and all higher logging levels. For example WarnLevel.Enabled() +// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and +// FatalLevel, but return false for InfoLevel and DebugLevel. +type LevelEnabler interface { + Enabled(Level) bool +} diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go new file mode 100644 index 0000000000..7af8dadcb3 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level_strings.go @@ -0,0 +1,46 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/zap/internal/color" + +var ( + _levelToColor = map[Level]color.Color{ + DebugLevel: color.Magenta, + InfoLevel: color.Blue, + WarnLevel: color.Yellow, + ErrorLevel: color.Red, + DPanicLevel: color.Red, + PanicLevel: color.Red, + FatalLevel: color.Red, + } + _unknownLevelColor = color.Red + + _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor)) + _levelToCapitalColorString = make(map[Level]string, len(_levelToColor)) +) + +func init() { + for level, color := range _levelToColor { + _levelToLowercaseColorString[level] = color.Add(level.String()) + _levelToCapitalColorString[level] = color.Add(level.CapitalString()) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go new file mode 100644 index 0000000000..c3c55ba0d9 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/marshaler.go @@ -0,0 +1,61 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// ObjectMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +// +// Note: ObjectMarshaler is only used when zap.Object is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. +type ObjectMarshaler interface { + MarshalLogObject(ObjectEncoder) error +} + +// ObjectMarshalerFunc is a type adapter that turns a function into an +// ObjectMarshaler. +type ObjectMarshalerFunc func(ObjectEncoder) error + +// MarshalLogObject calls the underlying function. +func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { + return f(enc) +} + +// ArrayMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +// +// Note: ArrayMarshaler is only used when zap.Array is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. +type ArrayMarshaler interface { + MarshalLogArray(ArrayEncoder) error +} + +// ArrayMarshalerFunc is a type adapter that turns a function into an +// ArrayMarshaler. +type ArrayMarshalerFunc func(ArrayEncoder) error + +// MarshalLogArray calls the underlying function. +func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error { + return f(enc) +} diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go new file mode 100644 index 0000000000..dfead0829d --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/memory_encoder.go @@ -0,0 +1,179 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "time" + +// MapObjectEncoder is an ObjectEncoder backed by a simple +// map[string]interface{}. It's not fast enough for production use, but it's +// helpful in tests. +type MapObjectEncoder struct { + // Fields contains the entire encoded log context. + Fields map[string]interface{} + // cur is a pointer to the namespace we're currently writing to. + cur map[string]interface{} +} + +// NewMapObjectEncoder creates a new map-backed ObjectEncoder. +func NewMapObjectEncoder() *MapObjectEncoder { + m := make(map[string]interface{}) + return &MapObjectEncoder{ + Fields: m, + cur: m, + } +} + +// AddArray implements ObjectEncoder. +func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error { + arr := &sliceArrayEncoder{elems: make([]interface{}, 0)} + err := v.MarshalLogArray(arr) + m.cur[key] = arr.elems + return err +} + +// AddObject implements ObjectEncoder. +func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error { + newMap := NewMapObjectEncoder() + m.cur[k] = newMap.Fields + return v.MarshalLogObject(newMap) +} + +// AddBinary implements ObjectEncoder. +func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v } + +// AddByteString implements ObjectEncoder. +func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) } + +// AddBool implements ObjectEncoder. +func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v } + +// AddDuration implements ObjectEncoder. +func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v } + +// AddComplex128 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v } + +// AddComplex64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v } + +// AddFloat64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v } + +// AddFloat32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v } + +// AddInt implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v } + +// AddInt64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v } + +// AddInt32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v } + +// AddInt16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v } + +// AddInt8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v } + +// AddString implements ObjectEncoder. +func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v } + +// AddTime implements ObjectEncoder. +func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v } + +// AddUint implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v } + +// AddUint64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v } + +// AddUint32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v } + +// AddUint16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v } + +// AddUint8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v } + +// AddUintptr implements ObjectEncoder. +func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v } + +// AddReflected implements ObjectEncoder. +func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error { + m.cur[k] = v + return nil +} + +// OpenNamespace implements ObjectEncoder. +func (m *MapObjectEncoder) OpenNamespace(k string) { + ns := make(map[string]interface{}) + m.cur[k] = ns + m.cur = ns +} + +// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like +// the MapObjectEncoder, it's not designed for production use. +type sliceArrayEncoder struct { + elems []interface{} +} + +func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error { + enc := &sliceArrayEncoder{} + err := v.MarshalLogArray(enc) + s.elems = append(s.elems, enc.elems) + return err +} + +func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error { + m := NewMapObjectEncoder() + err := v.MarshalLogObject(m) + s.elems = append(s.elems, m.Fields) + return err +} + +func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { + s.elems = append(s.elems, v) + return nil +} + +func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) } +func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) } diff --git a/vendor/go.uber.org/zap/zapcore/reflected_encoder.go b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go new file mode 100644 index 0000000000..8746360eca --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go @@ -0,0 +1,41 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/json" + "io" +) + +// ReflectedEncoder serializes log fields that can't be serialized with Zap's +// JSON encoder. These have the ReflectType field type. +// Use EncoderConfig.NewReflectedEncoder to set this. +type ReflectedEncoder interface { + // Encode encodes and writes to the underlying data stream. + Encode(interface{}) error +} + +func defaultReflectedEncoder(w io.Writer) ReflectedEncoder { + enc := json.NewEncoder(w) + // For consistency with our custom JSON encoder. + enc.SetEscapeHTML(false) + return enc +} diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go new file mode 100644 index 0000000000..b7c093a4f2 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -0,0 +1,229 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "sync/atomic" + "time" +) + +const ( + _numLevels = _maxLevel - _minLevel + 1 + _countersPerLevel = 4096 +) + +type counter struct { + resetAt atomic.Int64 + counter atomic.Uint64 +} + +type counters [_numLevels][_countersPerLevel]counter + +func newCounters() *counters { + return &counters{} +} + +func (cs *counters) get(lvl Level, key string) *counter { + i := lvl - _minLevel + j := fnv32a(key) % _countersPerLevel + return &cs[i][j] +} + +// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc +func fnv32a(s string) uint32 { + const ( + offset32 = 2166136261 + prime32 = 16777619 + ) + hash := uint32(offset32) + for i := 0; i < len(s); i++ { + hash ^= uint32(s[i]) + hash *= prime32 + } + return hash +} + +func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { + tn := t.UnixNano() + resetAfter := c.resetAt.Load() + if resetAfter > tn { + return c.counter.Add(1) + } + + c.counter.Store(1) + + newResetAfter := tn + tick.Nanoseconds() + if !c.resetAt.CompareAndSwap(resetAfter, newResetAfter) { + // We raced with another goroutine trying to reset, and it also reset + // the counter to 1, so we need to reincrement the counter. + return c.counter.Add(1) + } + + return 1 +} + +// SamplingDecision is a decision represented as a bit field made by sampler. +// More decisions may be added in the future. +type SamplingDecision uint32 + +const ( + // LogDropped indicates that the Sampler dropped a log entry. + LogDropped SamplingDecision = 1 << iota + // LogSampled indicates that the Sampler sampled a log entry. + LogSampled +) + +// optionFunc wraps a func so it satisfies the SamplerOption interface. +type optionFunc func(*sampler) + +func (f optionFunc) apply(s *sampler) { + f(s) +} + +// SamplerOption configures a Sampler. +type SamplerOption interface { + apply(*sampler) +} + +// nopSamplingHook is the default hook used by sampler. +func nopSamplingHook(Entry, SamplingDecision) {} + +// SamplerHook registers a function which will be called when Sampler makes a +// decision. +// +// This hook may be used to get visibility into the performance of the sampler. +// For example, use it to track metrics of dropped versus sampled logs. +// +// var dropped atomic.Int64 +// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) { +// if dec&zapcore.LogDropped > 0 { +// dropped.Inc() +// } +// }) +func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption { + return optionFunc(func(s *sampler) { + s.hook = hook + }) +} + +// NewSamplerWithOptions creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// For example, +// +// core = NewSamplerWithOptions(core, time.Second, 10, 5) +// +// This will log the first 10 log entries with the same level and message +// in a one second interval as-is. Following that, it will allow through +// every 5th log entry with the same level and message in that interval. +// +// If thereafter is zero, the Core will drop all log entries after the first N +// in that interval. +// +// Sampler can be configured to report sampling decisions with the SamplerHook +// option. +// +// Keep in mind that Zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core { + s := &sampler{ + Core: core, + tick: tick, + counts: newCounters(), + first: uint64(first), + thereafter: uint64(thereafter), + hook: nopSamplingHook, + } + for _, opt := range opts { + opt.apply(s) + } + + return s +} + +type sampler struct { + Core + + counts *counters + tick time.Duration + first, thereafter uint64 + hook func(Entry, SamplingDecision) +} + +var ( + _ Core = (*sampler)(nil) + _ leveledEnabler = (*sampler)(nil) +) + +// NewSampler creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// Keep in mind that zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +// +// Deprecated: use NewSamplerWithOptions. +func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { + return NewSamplerWithOptions(core, tick, first, thereafter) +} + +func (s *sampler) Level() Level { + return LevelOf(s.Core) +} + +func (s *sampler) With(fields []Field) Core { + return &sampler{ + Core: s.Core.With(fields), + tick: s.tick, + counts: s.counts, + first: s.first, + thereafter: s.thereafter, + hook: s.hook, + } +} + +func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !s.Enabled(ent.Level) { + return ce + } + + if ent.Level >= _minLevel && ent.Level <= _maxLevel { + counter := s.counts.get(ent.Level, ent.Message) + n := counter.IncCheckReset(ent.Time, s.tick) + if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) { + s.hook(ent, LogDropped) + return ce + } + s.hook(ent, LogSampled) + } + return s.Core.Check(ent, ce) +} diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go new file mode 100644 index 0000000000..9bb32f0557 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/tee.go @@ -0,0 +1,96 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type multiCore []Core + +var ( + _ leveledEnabler = multiCore(nil) + _ Core = multiCore(nil) +) + +// NewTee creates a Core that duplicates log entries into two or more +// underlying Cores. +// +// Calling it with a single Core returns the input unchanged, and calling +// it with no input returns a no-op Core. +func NewTee(cores ...Core) Core { + switch len(cores) { + case 0: + return NewNopCore() + case 1: + return cores[0] + default: + return multiCore(cores) + } +} + +func (mc multiCore) With(fields []Field) Core { + clone := make(multiCore, len(mc)) + for i := range mc { + clone[i] = mc[i].With(fields) + } + return clone +} + +func (mc multiCore) Level() Level { + minLvl := _maxLevel // mc is never empty + for i := range mc { + if lvl := LevelOf(mc[i]); lvl < minLvl { + minLvl = lvl + } + } + return minLvl +} + +func (mc multiCore) Enabled(lvl Level) bool { + for i := range mc { + if mc[i].Enabled(lvl) { + return true + } + } + return false +} + +func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + for i := range mc { + ce = mc[i].Check(ent, ce) + } + return ce +} + +func (mc multiCore) Write(ent Entry, fields []Field) error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Write(ent, fields)) + } + return err +} + +func (mc multiCore) Sync() error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Sync()) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go new file mode 100644 index 0000000000..d4a1af3d07 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/write_syncer.go @@ -0,0 +1,122 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "io" + "sync" + + "go.uber.org/multierr" +) + +// A WriteSyncer is an io.Writer that can also flush any buffered data. Note +// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer. +type WriteSyncer interface { + io.Writer + Sync() error +} + +// AddSync converts an io.Writer to a WriteSyncer. It attempts to be +// intelligent: if the concrete type of the io.Writer implements WriteSyncer, +// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync. +func AddSync(w io.Writer) WriteSyncer { + switch w := w.(type) { + case WriteSyncer: + return w + default: + return writerWrapper{w} + } +} + +type lockedWriteSyncer struct { + sync.Mutex + ws WriteSyncer +} + +// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In +// particular, *os.Files must be locked before use. +func Lock(ws WriteSyncer) WriteSyncer { + if _, ok := ws.(*lockedWriteSyncer); ok { + // no need to layer on another lock + return ws + } + return &lockedWriteSyncer{ws: ws} +} + +func (s *lockedWriteSyncer) Write(bs []byte) (int, error) { + s.Lock() + n, err := s.ws.Write(bs) + s.Unlock() + return n, err +} + +func (s *lockedWriteSyncer) Sync() error { + s.Lock() + err := s.ws.Sync() + s.Unlock() + return err +} + +type writerWrapper struct { + io.Writer +} + +func (w writerWrapper) Sync() error { + return nil +} + +type multiWriteSyncer []WriteSyncer + +// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes +// and sync calls, much like io.MultiWriter. +func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer { + if len(ws) == 1 { + return ws[0] + } + return multiWriteSyncer(ws) +} + +// See https://golang.org/src/io/multi.go +// When not all underlying syncers write the same number of bytes, +// the smallest number is returned even though Write() is called on +// all of them. +func (ws multiWriteSyncer) Write(p []byte) (int, error) { + var writeErr error + nWritten := 0 + for _, w := range ws { + n, err := w.Write(p) + writeErr = multierr.Append(writeErr, err) + if nWritten == 0 && n != 0 { + nWritten = n + } else if n < nWritten { + nWritten = n + } + } + return nWritten, writeErr +} + +func (ws multiWriteSyncer) Sync() error { + var err error + for _, w := range ws { + err = multierr.Append(err, w.Sync()) + } + return err +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 76b87b9645..8267c69c1d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -229,7 +229,7 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types -# github.com/opencontainers/runc v1.1.10 +# github.com/opencontainers/runc v1.1.12 ## explicit; go 1.17 github.com/opencontainers/runc/libcontainer/cgroups github.com/opencontainers/runc/libcontainer/cgroups/devices @@ -283,6 +283,23 @@ github.com/sirupsen/logrus # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag +# github.com/zhmcclient/golang-zhmcclient v0.2.1-0.20231123024149-4d04b40c0e93 +## explicit; go 1.20 +github.com/zhmcclient/golang-zhmcclient/pkg/zhmcclient +# go.uber.org/multierr v1.11.0 +## explicit; go 1.19 +go.uber.org/multierr +# go.uber.org/zap v1.26.0 +## explicit; go 1.19 +go.uber.org/zap +go.uber.org/zap/buffer +go.uber.org/zap/internal +go.uber.org/zap/internal/bufferpool +go.uber.org/zap/internal/color +go.uber.org/zap/internal/exit +go.uber.org/zap/internal/pool +go.uber.org/zap/internal/stacktrace +go.uber.org/zap/zapcore # golang.org/x/exp v0.0.0-20231006140011-7918f672742d ## explicit; go 1.20 golang.org/x/exp/constraints