diff --git a/.github/actions/bot/action.yaml b/.github/actions/bot/action.yaml index dfb471a30..c462781cd 100644 --- a/.github/actions/bot/action.yaml +++ b/.github/actions/bot/action.yaml @@ -3,8 +3,8 @@ description: "🤖 beep boop" runs: using: "composite" steps: - - uses: "actions/checkout@v3" - - uses: "actions/github-script@v6" + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 + - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # 7.0.1 with: script: | const crypto = require('crypto'); diff --git a/.github/actions/ci/build/action.yaml b/.github/actions/ci/build/action.yaml index b860d0ccd..5f7dcccfb 100644 --- a/.github/actions/ci/build/action.yaml +++ b/.github/actions/ci/build/action.yaml @@ -21,7 +21,7 @@ outputs: runs: using: "composite" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 with: ref: ${{ inputs.git_sha }} - id: build @@ -31,7 +31,7 @@ runs: AMI_NAME="amazon-eks-node-${{ inputs.os_distro }}-${{ inputs.k8s_version }}-${{ inputs.build_id }}" make k8s=${{ inputs.k8s_version }} os_distro=${{ inputs.os_distro }} ami_name=${AMI_NAME} ${{ inputs.additional_arguments }} echo "ami_id=$(jq -r .builds[0].artifact_id "${AMI_NAME}-manifest.json" | cut -d ':' -f 2)" >> $GITHUB_OUTPUT - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # 4.3.3 with: name: version-info path: "*-version-info.json" diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..7759808b4 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,9 @@ +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + labels: + - "dependencies" + - "changelog/exclude" diff --git a/.github/workflows/bot-trigger.yaml b/.github/workflows/bot-trigger.yaml index d728d4f10..1fca0abd7 100644 --- a/.github/workflows/bot-trigger.yaml +++ b/.github/workflows/bot-trigger.yaml @@ -10,5 +10,5 @@ jobs: runs-on: ubuntu-latest permissions: write-all steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 - uses: ./.github/actions/bot diff --git a/.github/workflows/ci-auto.yaml b/.github/workflows/ci-auto.yaml index 701a8eef8..2bfe409b4 100644 --- a/.github/workflows/ci-auto.yaml +++ b/.github/workflows/ci-auto.yaml @@ -9,37 +9,37 @@ jobs: lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 - run: echo "$(go env GOPATH)/bin" >> $GITHUB_PATH - run: go install mvdan.cc/sh/v3/cmd/shfmt@latest - run: make lint templates-test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 - run: make test nodeadm-build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 - run: cd nodeadm && make build nodeadm-check-generate: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 - run: hack/nodeadm-check-generate.sh nodeadm-check-vendor: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 - run: hack/nodeadm-check-vendor.sh nodeadm-test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 - run: cd nodeadm && make test nodeadm-test-e2e: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 - run: cd nodeadm && make test-e2e diff --git a/.github/workflows/ci-manual.yaml b/.github/workflows/ci-manual.yaml index 9a33243c0..4e8d13e3a 100644 --- a/.github/workflows/ci-manual.yaml +++ b/.github/workflows/ci-manual.yaml @@ -64,7 +64,7 @@ jobs: needs: - setup steps: - - uses: actions/github-script@v6 + - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # 7.0.1 with: script: | github.rest.issues.createComment({ @@ -95,10 +95,10 @@ jobs: - os_distro: al2023 k8s_version: 1.22 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 with: ref: 'main' - - uses: aws-actions/configure-aws-credentials@v2 + - uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # 4.0.2 with: aws-region: ${{ secrets.AWS_REGION }} role-to-assume: ${{ secrets.AWS_ROLE_ARN_CI }} @@ -130,7 +130,7 @@ jobs: - setup - kubernetes-versions steps: - - uses: actions/github-script@v6 + - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # 7.0.1 with: script: | const { data } = await github.rest.actions.listJobsForWorkflowRun({ diff --git a/.github/workflows/dependency-review.yaml b/.github/workflows/dependency-review.yaml index 5de0aaac4..8261f62da 100644 --- a/.github/workflows/dependency-review.yaml +++ b/.github/workflows/dependency-review.yaml @@ -9,26 +9,25 @@ jobs: dependency-review: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/dependency-review-action@v4 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 + - uses: actions/dependency-review-action@72eb03d02c7872a771aacd928f3123ac62ad6d3a # 4.3.3 gosec: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v5 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 + - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # 5.0.1 with: - go-version: '1.21.8' + go-version-file: 'nodeadm/go.mod' - run: go install github.com/securego/gosec/v2/cmd/gosec@latest - run: gosec -exclude-generated ./... working-directory: nodeadm govulncheck: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: golang/govulncheck-action@v1 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 + - uses: golang/govulncheck-action@dd0578b371c987f96d1185abb54344b44352bd58 # 1.0.3 with: - go-version-input: 1.21.8 work-dir: ./nodeadm go-version-file: nodeadm/go.mod cache: false - repo-checkout: false \ No newline at end of file + repo-checkout: false diff --git a/.github/workflows/deploy-docs.yaml b/.github/workflows/deploy-docs.yaml index 33ee10d43..302a6962d 100644 --- a/.github/workflows/deploy-docs.yaml +++ b/.github/workflows/deploy-docs.yaml @@ -10,6 +10,6 @@ jobs: contents: write runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 - run: pip install mkdocs mkdocs-material - run: mkdocs gh-deploy --strict --no-history --force diff --git a/.github/workflows/janitor.yaml b/.github/workflows/janitor.yaml index 0a91d9f8a..e13b722ff 100644 --- a/.github/workflows/janitor.yaml +++ b/.github/workflows/janitor.yaml @@ -13,8 +13,8 @@ jobs: if: github.repository == 'awslabs/amazon-eks-ami' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: aws-actions/configure-aws-credentials@v2 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 + - uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # 4.0.2 with: aws-region: ${{ secrets.AWS_REGION }} role-to-assume: ${{ secrets.AWS_ROLE_ARN_JANITOR }} @@ -26,8 +26,8 @@ jobs: if: github.repository == 'awslabs/amazon-eks-ami' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: aws-actions/configure-aws-credentials@v2 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 + - uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # 4.0.2 with: aws-region: ${{ secrets.AWS_REGION }} role-to-assume: ${{ secrets.AWS_ROLE_ARN_JANITOR }} diff --git a/.github/workflows/sync-eni-max-pods.yaml b/.github/workflows/sync-eni-max-pods.yaml index 0939ef568..35908233e 100644 --- a/.github/workflows/sync-eni-max-pods.yaml +++ b/.github/workflows/sync-eni-max-pods.yaml @@ -14,16 +14,16 @@ jobs: if: github.repository == 'awslabs/amazon-eks-ami' runs-on: ubuntu-latest steps: - - uses: aws-actions/configure-aws-credentials@v2 + - uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # 4.0.2 with: aws-region: ${{ secrets.AWS_REGION }} role-to-assume: ${{ secrets.AWS_ROLE_ARN_SYNC_ENI_MAX_PODS }} - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 with: repository: awslabs/amazon-eks-ami ref: refs/heads/main path: amazon-eks-ami/ - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 with: repository: aws/amazon-vpc-cni-k8s ref: refs/heads/master @@ -35,7 +35,7 @@ jobs: make generate-limits cp misc/eni-max-pods.txt ../amazon-eks-ami/templates/shared/runtime/eni-max-pods.txt cp misc/eni-max-pods.txt ../amazon-eks-ami/nodeadm/internal/kubelet/eni-max-pods.txt - - uses: peter-evans/create-pull-request@v4 + - uses: peter-evans/create-pull-request@6d6857d36972b65feb161a90e484f2984215f83e # 6.0.5 with: branch: update-eni-max-pods path: amazon-eks-ami/ diff --git a/.github/workflows/sync-to-codecommit.yaml b/.github/workflows/sync-to-codecommit.yaml index eeebf367b..ff9167332 100644 --- a/.github/workflows/sync-to-codecommit.yaml +++ b/.github/workflows/sync-to-codecommit.yaml @@ -14,11 +14,11 @@ jobs: id-token: write contents: read steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 with: # fetch complete history fetch-depth: 0 - - uses: aws-actions/configure-aws-credentials@v1 + - uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # 4.0.2 with: aws-region: ${{ secrets.AWS_REGION }} role-to-assume: ${{ secrets.AWS_ROLE_ARN_SYNC_TO_CODECOMMIT }} diff --git a/.github/workflows/update-changelog.yaml b/.github/workflows/update-changelog.yaml index 022908594..f2621b02a 100644 --- a/.github/workflows/update-changelog.yaml +++ b/.github/workflows/update-changelog.yaml @@ -21,12 +21,12 @@ jobs: needs: - setup steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 with: repository: awslabs/amazon-eks-ami ref: refs/heads/main path: amazon-eks-ami/ - - uses: actions/github-script@v6 + - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # 7.0.1 with: script: | const fs = require('fs'); @@ -45,9 +45,9 @@ jobs: const newEntry = `# ${release.data.name}\n${release.data.body}`; let updatedChangelog = changelog.replace(placeholder, placeholder + '\n\n' + newEntry + '\n---\n'); // if the release notes are modified in the GitHub web editor, trailing spaces can be added accidentally - updatedChangelog = updatedChangelog.replace(/\s+$/, ''); + updatedChangelog = updatedChangelog.split('\n').map(s => s.replace(/\s+$/, '')).join('\n'); fs.writeFileSync(changelogPath, updatedChangelog); - - uses: peter-evans/create-pull-request@v4 + - uses: peter-evans/create-pull-request@6d6857d36972b65feb161a90e484f2984215f83e # 6.0.5 with: branch: update-changelog path: amazon-eks-ami/ diff --git a/.github/workflows/update-dependency.yaml b/.github/workflows/update-dependency.yaml index cb3966151..d20806e9d 100644 --- a/.github/workflows/update-dependency.yaml +++ b/.github/workflows/update-dependency.yaml @@ -17,10 +17,10 @@ jobs: contents: write pull-requests: write steps: - - uses: actions/setup-go@v5 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # 4.1.7 + - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # 5.0.1 with: - go-version: '1.21.8' - - uses: actions/checkout@v4 + go-version-file: 'nodeadm/go.mod' - name: Update Nodeadm Dependencies id: update_deps run: | @@ -31,14 +31,13 @@ jobs: echo 'EOF' >> $GITHUB_OUTPUT - name: Create PR if: ${{ steps.update_deps.outputs.changes != '' }} - uses: peter-evans/create-pull-request@v5 + uses: peter-evans/create-pull-request@6d6857d36972b65feb161a90e484f2984215f83e # 6.0.5 with: title: 'Update dependencies' commit-message: Update dependencies committer: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> branch: dependencies/update - branch-suffix: timestamp base: main delete-branch: true labels: | diff --git a/CHANGELOG.md b/CHANGELOG.md index a2308d83e..86b477957 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ +<<<<<<< HEAD <<<<<<< HEAD # AMI Release v20240227 @@ -1357,6 +1358,5812 @@ > - Kubernetes 1.25-1.28: `5.10.192-183.736.amzn2` ======= > - Kubernetes 1.25 and below: `5.10.192-183.736.amzn2` +======= +# AMI Release v20240605 +> **Note** +> There are no changes to the AMI template in this release. + +--- + +

AMI Details

+ + +
+Kubernetes 1.30 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.30-v202406051.30.0-20240605s3://amazon-eks/1.30.0/2024-05-12/
amazon-eks-node-1.30-v20240605
amazon-eks-node-al2023-arm64-standard-1.30-v20240605
amazon-eks-arm64-node-1.30-v20240605
amazon-eks-gpu-node-1.30-v20240605
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.183.01-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.91-99.172.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.29 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.29-v202406051.29.3-20240605s3://amazon-eks/1.29.3/2024-04-19/
amazon-eks-node-1.29-v20240605
amazon-eks-node-al2023-arm64-standard-1.29-v20240605
amazon-eks-arm64-node-1.29-v20240605
amazon-eks-gpu-node-1.29-v20240605
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.183.01-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.91-99.172.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.28 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.28-v202406051.28.8-20240605s3://amazon-eks/1.28.8/2024-04-19/
amazon-eks-node-1.28-v20240605
amazon-eks-node-al2023-arm64-standard-1.28-v20240605
amazon-eks-arm64-node-1.28-v20240605
amazon-eks-gpu-node-1.28-v20240605
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.183.01-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.91-99.172.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.27 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.27-v202406051.27.12-20240605s3://amazon-eks/1.27.12/2024-04-19/
amazon-eks-node-1.27-v20240605
amazon-eks-node-al2023-arm64-standard-1.27-v20240605
amazon-eks-arm64-node-1.27-v20240605
amazon-eks-gpu-node-1.27-v20240605
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.183.01-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.91-99.172.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.26 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.26-v202406051.26.15-20240605s3://amazon-eks/1.26.15/2024-04-19/
amazon-eks-node-1.26-v20240605
amazon-eks-node-al2023-arm64-standard-1.26-v20240605
amazon-eks-arm64-node-1.26-v20240605
amazon-eks-gpu-node-1.26-v20240605
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.183.01-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.91-99.172.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.25 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.25-v202406051.25.16-20240605s3://amazon-eks/1.25.16/2024-04-19/
amazon-eks-node-1.25-v20240605
amazon-eks-node-al2023-arm64-standard-1.25-v20240605
amazon-eks-arm64-node-1.25-v20240605
amazon-eks-gpu-node-1.25-v20240605
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.183.01-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.91-99.172.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.24 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.24-v202406051.24.17-20240605s3://amazon-eks/1.24.17/2024-04-19/
amazon-eks-node-1.24-v20240605
amazon-eks-node-al2023-arm64-standard-1.24-v20240605
amazon-eks-arm64-node-1.24-v20240605
amazon-eks-gpu-node-1.24-v20240605
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.183.01-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.91-99.172.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.23 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.23-v202406051.23.17-20240605s3://amazon-eks/1.23.17/2024-04-19/
amazon-eks-node-1.23-v20240605
amazon-eks-node-al2023-arm64-standard-1.23-v20240605
amazon-eks-arm64-node-1.23-v20240605
amazon-eks-gpu-node-1.23-v20240605
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.183.01-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.91-99.172.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.22 + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-1.22-v202406051.22.17-20240605s3://amazon-eks/1.22.17/2024-04-19/
amazon-eks-arm64-node-1.22-v20240605
amazon-eks-gpu-node-1.22-v20240605
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.183.01-1.el7
runc1.1.11-1.amzn2
+
+ +
+Kubernetes 1.21 + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-1.21-v202406051.21.14-20240605s3://amazon-eks/1.21.14/2024-04-19/
amazon-eks-arm64-node-1.21-v20240605
amazon-eks-gpu-node-1.21-v20240605
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.183.01-1.el7
runc1.1.11-1.amzn2
+
+ + +--- + + +# AMI Release v20240531 + + +## What's Changed +* Decompress GZIP'd user data by @cartermckinnon in https://github.com/awslabs/amazon-eks-ami/pull/1762 + + +**Full Changelog**: https://github.com/awslabs/amazon-eks-ami/compare/v20240522...v20240531 + +--- + +

AMI Details

+ + +
+Kubernetes 1.30 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.30-v202405311.30.0-20240531s3://amazon-eks/1.30.0/2024-05-12/
amazon-eks-node-1.30-v20240531
amazon-eks-node-al2023-arm64-standard-1.30-v20240531
amazon-eks-arm64-node-1.30-v20240531
amazon-eks-gpu-node-1.30-v20240531
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.91-99.172.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.29 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.29-v202405311.29.3-20240531s3://amazon-eks/1.29.3/2024-04-19/
amazon-eks-node-1.29-v20240531
amazon-eks-node-al2023-arm64-standard-1.29-v20240531
amazon-eks-arm64-node-1.29-v20240531
amazon-eks-gpu-node-1.29-v20240531
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.91-99.172.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.28 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.28-v202405311.28.8-20240531s3://amazon-eks/1.28.8/2024-04-19/
amazon-eks-node-1.28-v20240531
amazon-eks-node-al2023-arm64-standard-1.28-v20240531
amazon-eks-arm64-node-1.28-v20240531
amazon-eks-gpu-node-1.28-v20240531
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.91-99.172.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.27 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.27-v202405311.27.12-20240531s3://amazon-eks/1.27.12/2024-04-19/
amazon-eks-node-1.27-v20240531
amazon-eks-node-al2023-arm64-standard-1.27-v20240531
amazon-eks-arm64-node-1.27-v20240531
amazon-eks-gpu-node-1.27-v20240531
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.91-99.172.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.26 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.26-v202405311.26.15-20240531s3://amazon-eks/1.26.15/2024-04-19/
amazon-eks-node-1.26-v20240531
amazon-eks-node-al2023-arm64-standard-1.26-v20240531
amazon-eks-arm64-node-1.26-v20240531
amazon-eks-gpu-node-1.26-v20240531
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.91-99.172.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.25 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.25-v202405311.25.16-20240531s3://amazon-eks/1.25.16/2024-04-19/
amazon-eks-node-1.25-v20240531
amazon-eks-node-al2023-arm64-standard-1.25-v20240531
amazon-eks-arm64-node-1.25-v20240531
amazon-eks-gpu-node-1.25-v20240531
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.91-99.172.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.24 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.24-v202405311.24.17-20240531s3://amazon-eks/1.24.17/2024-04-19/
amazon-eks-node-1.24-v20240531
amazon-eks-node-al2023-arm64-standard-1.24-v20240531
amazon-eks-arm64-node-1.24-v20240531
amazon-eks-gpu-node-1.24-v20240531
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.91-99.172.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.23 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.23-v202405311.23.17-20240531s3://amazon-eks/1.23.17/2024-04-19/
amazon-eks-node-1.23-v20240531
amazon-eks-node-al2023-arm64-standard-1.23-v20240531
amazon-eks-arm64-node-1.23-v20240531
amazon-eks-gpu-node-1.23-v20240531
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.91-99.172.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.22 + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-1.22-v202405311.22.17-20240531s3://amazon-eks/1.22.17/2024-04-19/
amazon-eks-arm64-node-1.22-v20240531
amazon-eks-gpu-node-1.22-v20240531
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+
+ +
+Kubernetes 1.21 + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-1.21-v202405311.21.14-20240531s3://amazon-eks/1.21.14/2024-04-19/
amazon-eks-arm64-node-1.21-v20240531
amazon-eks-gpu-node-1.21-v20240531
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.217-205.860.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+
+ + +--- + + +# AMI Release v20240522 + + +> [!NOTE] +> Be aware that we are tracking an ongoing issue with the 535-series drivers and instance types in the g4 and g5 families. +> This release disabled GSP on 1.22+ on g4 and g5 families to mitigate this issue. +> More information is available in https://github.com/awslabs/amazon-eks-ami/issues/1523. + +## What's Changed +* log any processes with large environments by @tzneal in https://github.com/awslabs/amazon-eks-ami/pull/1807 +* Collect Network Policy ebpf data from log collector script by @jaydeokar in https://github.com/awslabs/amazon-eks-ami/pull/1805 + +## New Contributors +* @guikcd made their first contribution in https://github.com/awslabs/amazon-eks-ami/pull/1813 + +**Full Changelog**: https://github.com/awslabs/amazon-eks-ami/compare/v20240514...v20240522 + +--- + +

AMI Details

+ + +
+Kubernetes 1.30 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.30-v202405221.30.0-20240522s3://amazon-eks/1.30.0/2024-05-12/
amazon-eks-node-1.30-v20240522
amazon-eks-node-al2023-arm64-standard-1.30-v20240522
amazon-eks-arm64-node-1.30-v20240522
amazon-eks-gpu-node-1.30-v20240522
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.216-204.855.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.90-99.173.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.29 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.29-v202405221.29.3-20240522s3://amazon-eks/1.29.3/2024-04-19/
amazon-eks-node-1.29-v20240522
amazon-eks-node-al2023-arm64-standard-1.29-v20240522
amazon-eks-arm64-node-1.29-v20240522
amazon-eks-gpu-node-1.29-v20240522
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.216-204.855.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.90-99.173.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.28 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.28-v202405221.28.8-20240522s3://amazon-eks/1.28.8/2024-04-19/
amazon-eks-node-1.28-v20240522
amazon-eks-node-al2023-arm64-standard-1.28-v20240522
amazon-eks-arm64-node-1.28-v20240522
amazon-eks-gpu-node-1.28-v20240522
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.216-204.855.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.90-99.173.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.27 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.27-v202405221.27.12-20240522s3://amazon-eks/1.27.12/2024-04-19/
amazon-eks-node-1.27-v20240522
amazon-eks-node-al2023-arm64-standard-1.27-v20240522
amazon-eks-arm64-node-1.27-v20240522
amazon-eks-gpu-node-1.27-v20240522
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.216-204.855.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.90-99.173.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.26 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.26-v202405221.26.15-20240522s3://amazon-eks/1.26.15/2024-04-19/
amazon-eks-node-1.26-v20240522
amazon-eks-node-al2023-arm64-standard-1.26-v20240522
amazon-eks-arm64-node-1.26-v20240522
amazon-eks-gpu-node-1.26-v20240522
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.216-204.855.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.90-99.173.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.25 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.25-v202405221.25.16-20240522s3://amazon-eks/1.25.16/2024-04-19/
amazon-eks-node-1.25-v20240522
amazon-eks-node-al2023-arm64-standard-1.25-v20240522
amazon-eks-arm64-node-1.25-v20240522
amazon-eks-gpu-node-1.25-v20240522
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.216-204.855.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.90-99.173.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.24 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.24-v202405221.24.17-20240522s3://amazon-eks/1.24.17/2024-04-19/
amazon-eks-node-1.24-v20240522
amazon-eks-node-al2023-arm64-standard-1.24-v20240522
amazon-eks-arm64-node-1.24-v20240522
amazon-eks-gpu-node-1.24-v20240522
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.216-204.855.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.90-99.173.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.23 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.23-v202405221.23.17-20240522s3://amazon-eks/1.23.17/2024-04-19/
amazon-eks-node-1.23-v20240522
amazon-eks-node-al2023-arm64-standard-1.23-v20240522
amazon-eks-arm64-node-1.23-v20240522
amazon-eks-gpu-node-1.23-v20240522
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.216-204.855.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.90-99.173.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.22 + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-1.22-v202405221.22.17-20240522s3://amazon-eks/1.22.17/2024-04-19/
amazon-eks-arm64-node-1.22-v20240522
amazon-eks-gpu-node-1.22-v20240522
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.216-204.855.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+
+ +
+Kubernetes 1.21 + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-1.21-v202405221.21.14-20240522s3://amazon-eks/1.21.14/2024-04-19/
amazon-eks-arm64-node-1.21-v20240522
amazon-eks-gpu-node-1.21-v20240522
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.380.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda12.2.2-1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.216-204.855.amzn25.10.192-183.736.amzn2
nvidia-driver-latest-dkms535.161.07-1.el7
runc1.1.11-1.amzn2
+
+ + +> **Note** +> A recent change in the Linux kernel caused the EFA and NVIDIA drivers to be incompatible. More information is available in #1494. +> To prevent unexpected failures, the kernel in the GPU AMI will remain at the following versions until we have determined a solution: +> - Kubernetes 1.21 and below: `5.10.192-183.736.amzn2` + +--- + + +# AMI Release v20240514 + + +> [!NOTE] +> Be aware that we are tracking an ongoing issue with the 535-series drivers and instance types in the g4 and g5 families. +> This release disabled GSP on 1.23+ on g4 and g5 families to mitigate this issue. +> More information is available in https://github.com/awslabs/amazon-eks-ami/issues/1523. + +## What's Changed +* rework containerd config merging by @ndbaker1 in https://github.com/awslabs/amazon-eks-ami/pull/1780 +* Handle existing `nerdctl` installations by @ricomasgu in https://github.com/awslabs/amazon-eks-ami/pull/1786 + +## New Contributors +* @ricomasgu made their first contribution in https://github.com/awslabs/amazon-eks-ami/pull/1786 + +**Full Changelog**: https://github.com/awslabs/amazon-eks-ami/compare/v20240506...v20240514 + +--- + +

AMI Details

+ + +
+Kubernetes 1.30 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.30-v202405141.30.0-20240514s3://amazon-eks/1.30.0/2024-05-12/
amazon-eks-node-1.30-v20240514
amazon-eks-node-al2023-arm64-standard-1.30-v20240514
amazon-eks-arm64-node-1.30-v20240514
amazon-eks-gpu-node-1.30-v20240514
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.215-203.850.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.90-99.173.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.29 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.29-v202405141.29.3-20240514s3://amazon-eks/1.29.3/2024-04-19/
amazon-eks-node-1.29-v20240514
amazon-eks-node-al2023-arm64-standard-1.29-v20240514
amazon-eks-arm64-node-1.29-v20240514
amazon-eks-gpu-node-1.29-v20240514
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.215-203.850.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.90-99.173.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.28 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.28-v202405141.28.8-20240514s3://amazon-eks/1.28.8/2024-04-19/
amazon-eks-node-1.28-v20240514
amazon-eks-node-al2023-arm64-standard-1.28-v20240514
amazon-eks-arm64-node-1.28-v20240514
amazon-eks-gpu-node-1.28-v20240514
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.215-203.850.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.90-99.173.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.27 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.27-v202405141.27.12-20240514s3://amazon-eks/1.27.12/2024-04-19/
amazon-eks-node-1.27-v20240514
amazon-eks-node-al2023-arm64-standard-1.27-v20240514
amazon-eks-arm64-node-1.27-v20240514
amazon-eks-gpu-node-1.27-v20240514
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.215-203.850.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.90-99.173.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.26 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.26-v202405141.26.15-20240514s3://amazon-eks/1.26.15/2024-04-19/
amazon-eks-node-1.26-v20240514
amazon-eks-node-al2023-arm64-standard-1.26-v20240514
amazon-eks-arm64-node-1.26-v20240514
amazon-eks-gpu-node-1.26-v20240514
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.215-203.850.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.90-99.173.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.25 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.25-v202405141.25.16-20240514s3://amazon-eks/1.25.16/2024-04-19/
amazon-eks-node-1.25-v20240514
amazon-eks-node-al2023-arm64-standard-1.25-v20240514
amazon-eks-arm64-node-1.25-v20240514
amazon-eks-gpu-node-1.25-v20240514
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.215-203.850.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.90-99.173.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.24 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.24-v202405141.24.17-20240514s3://amazon-eks/1.24.17/2024-04-19/
amazon-eks-node-1.24-v20240514
amazon-eks-node-al2023-arm64-standard-1.24-v20240514
amazon-eks-arm64-node-1.24-v20240514
amazon-eks-gpu-node-1.24-v20240514
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.215-203.850.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.90-99.173.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.23 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.23-v202405141.23.17-20240514s3://amazon-eks/1.23.17/2024-04-19/
amazon-eks-node-1.23-v20240514
amazon-eks-node-al2023-arm64-standard-1.23-v20240514
amazon-eks-arm64-node-1.23-v20240514
amazon-eks-gpu-node-1.23-v20240514
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.215-203.850.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.380.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.90-99.173.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.22 + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-1.22-v202405141.22.17-20240514s3://amazon-eks/1.22.17/2024-04-19/
amazon-eks-arm64-node-1.22-v20240514
amazon-eks-gpu-node-1.22-v20240514
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda12.2.2-1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.215-203.850.amzn25.10.192-183.736.amzn2
nvidia-driver-latest-dkms535.161.07-1.el7
runc1.1.11-1.amzn2
+
+ +
+Kubernetes 1.21 + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-1.21-v202405141.21.14-20240514s3://amazon-eks/1.21.14/2024-04-19/
amazon-eks-arm64-node-1.21-v20240514
amazon-eks-gpu-node-1.21-v20240514
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda12.2.2-1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.215-203.850.amzn25.10.192-183.736.amzn2
nvidia-driver-latest-dkms535.161.07-1.el7
runc1.1.11-1.amzn2
+
+ + +> **Note** +> A recent change in the Linux kernel caused the EFA and NVIDIA drivers to be incompatible. More information is available in #1494. +> To prevent unexpected failures, the kernel in the GPU AMI will remain at the following versions until we have determined a solution: +> - Kubernetes 1.22 and below: `5.10.192-183.736.amzn2` + +--- + + +# AMI Release v20240506 + +> [!NOTE] +> +> Be aware that we are tracking an ongoing issue with the 535-series drivers and instance types in the `g4` and `g5` families. +> This release disabled GSP on 1.24+ on `g4` and `g5` families to mitigate this issue. +> More information is available in #1523. + +## What's Changed +* al2023: Remove duplicate logrotate entries by @cartermckinnon in https://github.com/awslabs/amazon-eks-ami/pull/1777 +* Add cron logs to Log Collector Script by @tzneal in https://github.com/awslabs/amazon-eks-ami/pull/1781 + +**Full Changelog**: https://github.com/awslabs/amazon-eks-ami/compare/v20240424...v20240506 + +--- + +

AMI Details

+ + +
+Kubernetes 1.29 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.29-v202405061.29.3-20240506s3://amazon-eks/1.29.3/2024-04-19/
amazon-eks-node-1.29-v20240506
amazon-eks-node-al2023-arm64-standard-1.29-v20240506
amazon-eks-arm64-node-1.29-v20240506
amazon-eks-gpu-node-1.29-v20240506
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.215-203.850.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.131.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.87-99.174.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.28 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.28-v202405061.28.8-20240506s3://amazon-eks/1.28.8/2024-04-19/
amazon-eks-node-1.28-v20240506
amazon-eks-node-al2023-arm64-standard-1.28-v20240506
amazon-eks-arm64-node-1.28-v20240506
amazon-eks-gpu-node-1.28-v20240506
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.215-203.850.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.131.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.87-99.174.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.27 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.27-v202405061.27.12-20240506s3://amazon-eks/1.27.12/2024-04-19/
amazon-eks-node-1.27-v20240506
amazon-eks-node-al2023-arm64-standard-1.27-v20240506
amazon-eks-arm64-node-1.27-v20240506
amazon-eks-gpu-node-1.27-v20240506
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.215-203.850.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.131.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.87-99.174.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.26 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.26-v202405061.26.15-20240506s3://amazon-eks/1.26.15/2024-04-19/
amazon-eks-node-1.26-v20240506
amazon-eks-node-al2023-arm64-standard-1.26-v20240506
amazon-eks-arm64-node-1.26-v20240506
amazon-eks-gpu-node-1.26-v20240506
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.215-203.850.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.131.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.87-99.174.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.25 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.25-v202405061.25.16-20240506s3://amazon-eks/1.25.16/2024-04-19/
amazon-eks-node-1.25-v20240506
amazon-eks-node-al2023-arm64-standard-1.25-v20240506
amazon-eks-arm64-node-1.25-v20240506
amazon-eks-gpu-node-1.25-v20240506
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.215-203.850.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.131.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.87-99.174.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.24 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.24-v202405061.24.17-20240506s3://amazon-eks/1.24.17/2024-04-19/
amazon-eks-node-1.24-v20240506
amazon-eks-node-al2023-arm64-standard-1.24-v20240506
amazon-eks-arm64-node-1.24-v20240506
amazon-eks-gpu-node-1.24-v20240506
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.215-203.850.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.131.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.87-99.174.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.23 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.23-v202405061.23.17-20240506s3://amazon-eks/1.23.17/2024-04-19/
amazon-eks-node-1.23-v20240506
amazon-eks-node-al2023-arm64-standard-1.23-v20240506
amazon-eks-arm64-node-1.23-v20240506
amazon-eks-gpu-node-1.23-v20240506
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda12.2.2-1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.215-203.850.amzn25.10.192-183.736.amzn2
nvidia-driver-latest-dkms535.161.07-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.131.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.87-99.174.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.22 + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-1.22-v202405061.22.17-20240506s3://amazon-eks/1.22.17/2024-04-19/
amazon-eks-arm64-node-1.22-v20240506
amazon-eks-gpu-node-1.22-v20240506
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda12.2.2-1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.215-203.850.amzn25.10.192-183.736.amzn2
nvidia-driver-latest-dkms535.161.07-1.el7
runc1.1.11-1.amzn2
+
+ +
+Kubernetes 1.21 + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-1.21-v202405061.21.14-20240506s3://amazon-eks/1.21.14/2024-04-19/
amazon-eks-arm64-node-1.21-v20240506
amazon-eks-gpu-node-1.21-v20240506
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda12.2.2-1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.215-203.850.amzn25.10.192-183.736.amzn2
nvidia-driver-latest-dkms535.161.07-1.el7
runc1.1.11-1.amzn2
+
+ + +> **Note** +> A recent change in the Linux kernel caused the EFA and NVIDIA drivers to be incompatible. More information is available in #1494. +> To prevent unexpected failures, the kernel in the GPU AMI will remain at the following versions until we have determined a solution: +> - Kubernetes 1.23 and below: `5.10.192-183.736.amzn2` + +--- + +# AMI Release v20240424 + + +> [!NOTE] +> +> Be aware that we are tracking an ongoing issue with the 535-series drivers and instance types in the `g4` and `g5` families. +> This release disabled GSP on 1.25+ on `g4` and `g5` families to mitigate this issue. +> More information is available in #1523. + +## What's Changed +* Add InstanceIdNodeName feature gate by @Issacwww in https://github.com/awslabs/amazon-eks-ami/pull/1731 +* Bump golang.org/x/net from 0.19.0 to 0.23.0 in /nodeadm by @dependabot in https://github.com/awslabs/amazon-eks-ami/pull/1769 + +## New Contributors +* @dependabot made their first contribution in https://github.com/awslabs/amazon-eks-ami/pull/1769 + +**Full Changelog**: https://github.com/awslabs/amazon-eks-ami/compare/v20240415...v20240424 + +--- + +

AMI Details

+ + +
+Kubernetes 1.29 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.29-v202404241.29.3-20240424s3://amazon-eks/1.29.3/2024-04-19/
amazon-eks-node-1.29-v20240424
amazon-eks-node-al2023-arm64-standard-1.29-v20240424
amazon-eks-arm64-node-1.29-v20240424
amazon-eks-gpu-node-1.29-v20240424
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.214-202.855.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.131.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.84-99.169.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.28 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.28-v202404241.28.8-20240424s3://amazon-eks/1.28.8/2024-04-19/
amazon-eks-node-1.28-v20240424
amazon-eks-node-al2023-arm64-standard-1.28-v20240424
amazon-eks-arm64-node-1.28-v20240424
amazon-eks-gpu-node-1.28-v20240424
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.214-202.855.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.131.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.84-99.169.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.27 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.27-v202404241.27.12-20240424s3://amazon-eks/1.27.12/2024-04-19/
amazon-eks-node-1.27-v20240424
amazon-eks-node-al2023-arm64-standard-1.27-v20240424
amazon-eks-arm64-node-1.27-v20240424
amazon-eks-gpu-node-1.27-v20240424
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.214-202.855.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.131.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.84-99.169.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.26 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.26-v202404241.26.15-20240424s3://amazon-eks/1.26.15/2024-04-19/
amazon-eks-node-1.26-v20240424
amazon-eks-node-al2023-arm64-standard-1.26-v20240424
amazon-eks-arm64-node-1.26-v20240424
amazon-eks-gpu-node-1.26-v20240424
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.214-202.855.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.131.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.84-99.169.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.25 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.25-v202404241.25.16-20240424s3://amazon-eks/1.25.16/2024-04-19/
amazon-eks-node-1.25-v20240424
amazon-eks-node-al2023-arm64-standard-1.25-v20240424
amazon-eks-arm64-node-1.25-v20240424
amazon-eks-gpu-node-1.25-v20240424
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.8.0-1.amzn2
kernel5.10.214-202.855.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.131.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.84-99.169.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.24 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.24-v202404241.24.17-20240424s3://amazon-eks/1.24.17/2024-04-19/
amazon-eks-node-1.24-v20240424
amazon-eks-node-al2023-arm64-standard-1.24-v20240424
amazon-eks-arm64-node-1.24-v20240424
amazon-eks-gpu-node-1.24-v20240424
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda12.2.2-1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.214-202.855.amzn25.10.192-183.736.amzn2
nvidia-driver-latest-dkms535.161.07-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.131.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.84-99.169.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.23 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.23-v202404241.23.17-20240424s3://amazon-eks/1.23.17/2024-04-19/
amazon-eks-node-1.23-v20240424
amazon-eks-node-al2023-arm64-standard-1.23-v20240424
amazon-eks-arm64-node-1.23-v20240424
amazon-eks-gpu-node-1.23-v20240424
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda12.2.2-1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.214-202.855.amzn25.10.192-183.736.amzn2
nvidia-driver-latest-dkms535.161.07-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.3.131.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.84-99.169.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.22 + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-1.22-v202404241.22.17-20240424s3://amazon-eks/1.22.17/2024-04-19/
amazon-eks-arm64-node-1.22-v20240424
amazon-eks-gpu-node-1.22-v20240424
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda12.2.2-1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.214-202.855.amzn25.10.192-183.736.amzn2
nvidia-driver-latest-dkms535.161.07-1.el7
runc1.1.11-1.amzn2
+
+ +
+Kubernetes 1.21 + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-1.21-v202404241.21.14-20240424s3://amazon-eks/1.21.14/2024-04-19/
amazon-eks-arm64-node-1.21-v20240424
amazon-eks-gpu-node-1.21-v20240424
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.3.131.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda12.2.2-1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.214-202.855.amzn25.10.192-183.736.amzn2
nvidia-driver-latest-dkms535.161.07-1.el7
runc1.1.11-1.amzn2
+
+ + +> **Note** +> A recent change in the Linux kernel caused the EFA and NVIDIA drivers to be incompatible. More information is available in #1494. +> To prevent unexpected failures, the kernel in the GPU AMI will remain at the following versions until we have determined a solution: +> - Kubernetes 1.24 and below: `5.10.192-183.736.amzn2` + +--- + + +# AMI Release v20240415 + + + +> [!NOTE] +> +> Be aware that we are tracking an ongoing issue with the 535-series drivers and instance types in the `g4` and `g5` families. +> This release disabled GSP on 1.26+ on `g4` and `g5` families. +> More information is available in #1523. +> + +## What's Changed +* Use GitHub token when fetching Packer plugins by @cartermckinnon in https://github.com/awslabs/amazon-eks-ami/pull/1757 +* Wait for network-online to configure clocksource by @cartermckinnon in https://github.com/awslabs/amazon-eks-ami/pull/1758 + + +**Full Changelog**: https://github.com/awslabs/amazon-eks-ami/compare/v20240409...v20240415 + +--- + +

AMI Details

+ + +
+Kubernetes 1.29 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.29-v202404151.29.0-20240415s3://amazon-eks/1.29.0/2024-01-04/
amazon-eks-node-1.29-v20240415
amazon-eks-node-al2023-arm64-standard-1.29-v20240415
amazon-eks-arm64-node-1.29-v20240415
amazon-eks-gpu-node-1.29-v20240415
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.2.2303.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.6.0-1.amzn2
kernel5.10.213-201.855.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.2.2303.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.82-99.168.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.28 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.28-v202404151.28.5-20240415s3://amazon-eks/1.28.5/2024-01-04/
amazon-eks-node-1.28-v20240415
amazon-eks-node-al2023-arm64-standard-1.28-v20240415
amazon-eks-arm64-node-1.28-v20240415
amazon-eks-gpu-node-1.28-v20240415
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.2.2303.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.6.0-1.amzn2
kernel5.10.213-201.855.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.2.2303.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.82-99.168.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.27 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.27-v202404151.27.9-20240415s3://amazon-eks/1.27.9/2024-01-04/
amazon-eks-node-1.27-v20240415
amazon-eks-node-al2023-arm64-standard-1.27-v20240415
amazon-eks-arm64-node-1.27-v20240415
amazon-eks-gpu-node-1.27-v20240415
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.2.2303.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.6.0-1.amzn2
kernel5.10.213-201.855.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.2.2303.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.82-99.168.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.26 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.26-v202404151.26.12-20240415s3://amazon-eks/1.26.12/2024-01-04/
amazon-eks-node-1.26-v20240415
amazon-eks-node-al2023-arm64-standard-1.26-v20240415
amazon-eks-arm64-node-1.26-v20240415
amazon-eks-gpu-node-1.26-v20240415
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.2.2303.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda-12-212.2.2-1
efa2.6.0-1.amzn2
kernel5.10.213-201.855.amzn2
nvidia-driver-latest-dkms535.161.08-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.2.2303.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.82-99.168.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.25 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.25-v202404151.25.16-20240415s3://amazon-eks/1.25.16/2024-01-04/
amazon-eks-node-1.25-v20240415
amazon-eks-node-al2023-arm64-standard-1.25-v20240415
amazon-eks-arm64-node-1.25-v20240415
amazon-eks-gpu-node-1.25-v20240415
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.2.2303.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda12.2.2-1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
efa2.6.0-1.amzn2
kernel5.10.213-201.855.amzn25.10.192-183.736.amzn2
nvidia-driver-latest-dkms535.161.07-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.2.2303.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.82-99.168.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.24 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.24-v202404151.24.17-20240415s3://amazon-eks/1.24.17/2024-01-04/
amazon-eks-node-1.24-v20240415
amazon-eks-node-al2023-arm64-standard-1.24-v20240415
amazon-eks-arm64-node-1.24-v20240415
amazon-eks-gpu-node-1.24-v20240415
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.2.2303.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda12.2.2-1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.213-201.855.amzn25.10.192-183.736.amzn2
nvidia-driver-latest-dkms535.161.07-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.2.2303.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.82-99.168.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.23 + + + + + + + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-al2023-x86_64-standard-1.23-v202404151.23.17-20240415s3://amazon-eks/1.23.17/2024-01-04/
amazon-eks-node-1.23-v20240415
amazon-eks-node-al2023-arm64-standard-1.23-v20240415
amazon-eks-arm64-node-1.23-v20240415
amazon-eks-gpu-node-1.23-v20240415
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.2.2303.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda12.2.2-1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.213-201.855.amzn25.10.192-183.736.amzn2
nvidia-driver-latest-dkms535.161.07-1.el7
runc1.1.11-1.amzn2
+ + + + + + + + + + + + + + + + + + + + + + +
PackageAL2023_x86_64_STANDARDAL2023_ARM_64_STANDARD
amazon-ssm-agent3.2.2303.0-1.amzn2023
containerd1.7.11-1.amzn2023.0.1
kernel6.1.82-99.168.amzn2023
runc1.1.11-1.amzn2023.0.1
+
+ +
+Kubernetes 1.22 + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-1.22-v202404151.22.17-20240415s3://amazon-eks/1.22.17/2024-02-09/
amazon-eks-arm64-node-1.22-v20240415
amazon-eks-gpu-node-1.22-v20240415
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.2.2303.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda12.2.2-1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.213-201.855.amzn25.10.192-183.736.amzn2
nvidia-driver-latest-dkms535.161.07-1.el7
runc1.1.11-1.amzn2
+
+ +
+Kubernetes 1.21 + + + + + + + + + + + + + + + + +
AMI NamesRelease versionIncluded artifacts
amazon-eks-node-1.21-v202404151.21.14-20240415s3://amazon-eks/1.21.14/2024-02-09/
amazon-eks-arm64-node-1.21-v20240415
amazon-eks-gpu-node-1.21-v20240415
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
PackageAL2_x86_64AL2_ARM_64AL2_x86_64_GPU
amazon-ssm-agent3.2.2303.0-1.amzn2
containerd1.7.11-1.amzn2.0.1
cuda12.2.2-1
cuda-12-212.2.2-1
docker25.0.3-1.amzn2.0.1
kernel5.10.213-201.855.amzn25.10.192-183.736.amzn2
nvidia-driver-latest-dkms535.161.07-1.el7
runc1.1.11-1.amzn2
+
+ + +> **Note** +> A recent change in the Linux kernel caused the EFA and NVIDIA drivers to be incompatible. More information is available in #1494. +> To prevent unexpected failures, the kernel in the GPU AMI will remain at the following versions until we have determined a solution: +> - Kubernetes 1.25 and below: `5.10.192-183.736.amzn2` +>>>>>>> upstream/main --- diff --git a/doc/usage/overview.md b/doc/usage/overview.md index 785c33908..bd6b43de0 100644 --- a/doc/usage/overview.md +++ b/doc/usage/overview.md @@ -149,9 +149,13 @@ To build the EKS Optimized AMI, you will need the following permissions: { "Effect": "Allow", "Action": [ - "s3:GetObject" + "s3:GetObject", + "s3:ListBucket" ], - "Resource": "arn:aws:s3:::amazon-eks/*" + "Resource": [ + "arn:aws:s3:::amazon-eks/*", + "arn:aws:s3:::amazon-eks" + ] } ] } diff --git a/log-collector-script/linux/eks-log-collector.sh b/log-collector-script/linux/eks-log-collector.sh old mode 100644 new mode 100755 index c32529b40..e7992c3fa --- a/log-collector-script/linux/eks-log-collector.sh +++ b/log-collector-script/linux/eks-log-collector.sh @@ -20,7 +20,7 @@ export LANG="C" export LC_ALL="C" # Global options -readonly PROGRAM_VERSION="0.7.7" +readonly PROGRAM_VERSION="0.7.8" readonly PROGRAM_SOURCE="https://github.com/awslabs/amazon-eks-ami/blob/main/log-collector-script/" readonly PROGRAM_NAME="$(basename "$0" .sh)" readonly PROGRAM_DIR="/opt/log-collector" @@ -313,6 +313,7 @@ get_mounts_info() { lvs > "${COLLECT_DIR}"/storage/lvs.txt pvs > "${COLLECT_DIR}"/storage/pvs.txt vgs > "${COLLECT_DIR}"/storage/vgs.txt + cp --force /etc/fstab "${COLLECT_DIR}"/storage/fstab.txt mount -t xfs | awk '{print $1}' | xargs -I{} -- sh -c "xfs_info {}; xfs_db -r -c 'freesp -s' {}" > "${COLLECT_DIR}"/storage/xfs.txt mount | grep ^overlay | sed 's/.*upperdir=//' | sed 's/,.*//' | xargs -n 1 timeout 75 du -sh | grep -v ^0 > "${COLLECT_DIR}"/storage/pod_local_storage.txt ok @@ -715,6 +716,16 @@ get_system_services() { timeout 75 cat /proc/stat > "${COLLECT_DIR}"/system/procstat.txt 2>&1 timeout 75 cat /proc/[0-9]*/stat > "${COLLECT_DIR}"/system/allprocstat.txt 2>&1 + # collect pids which have large environments + echo -e "PID\tCount" > "${COLLECT_DIR}/system/large_environments.txt" + for i in /proc/*/environ; do + ENV_COUNT=$(tr '\0' '\n' < "$i" | grep -cv '^$') + if ((ENV_COUNT > 1000)); then + PID=$(echo "$i" | sed 's#/proc/##' | sed 's#/environ##') + echo -e "${PID}\t${ENV_COUNT}" >> "${COLLECT_DIR}/system/large_environments.txt" + fi + done + ok } diff --git a/nodeadm/Makefile b/nodeadm/Makefile index ece05a79c..470c9ddf4 100644 --- a/nodeadm/Makefile +++ b/nodeadm/Makefile @@ -1,6 +1,3 @@ -# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. -ENVTEST_K8S_VERSION = 1.27.1 - # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) GOBIN=$(shell go env GOPATH)/bin @@ -92,29 +89,16 @@ $(LOCALBIN): mkdir -p $(LOCALBIN) ## Tool Binaries -KUSTOMIZE ?= $(LOCALBIN)/kustomize CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen CONVERSION_GEN ?= $(LOCALBIN)/conversion-gen CRD_REF_DOCS ?= $(LOCALBIN)/crd-ref-docs -ENVTEST ?= $(LOCALBIN)/setup-envtest ## Tool Versions -KUSTOMIZE_VERSION ?= v5.0.1 -CONTROLLER_TOOLS_VERSION ?= v0.12.0 +CONTROLLER_TOOLS_VERSION ?= v0.14.0 CODE_GENERATOR_VERSION ?= v0.28.1 CRD_REF_DOCS_VERSION ?= cf959ab94ea543cb8efd25dc35081880b7ca6a81 -tools: kustomize controller-gen conversion-gen crd-ref-docs ## Install the toolchain. - -.PHONY: kustomize -kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. If wrong version is installed, it will be removed before downloading. -$(KUSTOMIZE): $(LOCALBIN) - @if test -x $(LOCALBIN)/kustomize && ! $(LOCALBIN)/kustomize version | grep -q $(KUSTOMIZE_VERSION); then \ - echo "$(LOCALBIN)/kustomize version is not expected $(KUSTOMIZE_VERSION). Removing it before installing."; \ - rm -rf $(LOCALBIN)/kustomize; \ - fi - test -s $(LOCALBIN)/kustomize || \ - GOBIN=$(LOCALBIN) GO111MODULE=on go install sigs.k8s.io/kustomize/kustomize/v5@$(KUSTOMIZE_VERSION) +tools: controller-gen conversion-gen crd-ref-docs ## Install the toolchain. .PHONY: controller-gen controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. If wrong version is installed, it will be overwritten. diff --git a/nodeadm/api/v1alpha1/nodeconfig_types.go b/nodeadm/api/v1alpha1/nodeconfig_types.go index 122dd36b5..2dfd77e38 100644 --- a/nodeadm/api/v1alpha1/nodeconfig_types.go +++ b/nodeadm/api/v1alpha1/nodeconfig_types.go @@ -49,7 +49,7 @@ type ClusterDetails struct { // CertificateAuthority is a base64-encoded string of your cluster's certificate authority chain. CertificateAuthority []byte `json:"certificateAuthority,omitempty"` - // CIDR is your cluster's Pod IP CIDR. This value is used to infer your cluster's DNS address. + // CIDR is your cluster's service CIDR block. This value is used to infer your cluster's DNS address. CIDR string `json:"cidr,omitempty"` // EnableOutpost determines how your node is configured when running on an AWS Outpost. @@ -61,21 +61,25 @@ type ClusterDetails struct { // KubeletOptions are additional parameters passed to `kubelet`. type KubeletOptions struct { - // Config is a [`KubeletConfiguration`](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1/) + // Config is a [`KubeletConfiguration`](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/) // that will be merged with the defaults. Config map[string]runtime.RawExtension `json:"config,omitempty"` - // Flags are [command-line `kubelet`` arguments](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/). + // Flags are [command-line `kubelet` arguments](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/). // that will be appended to the defaults. Flags []string `json:"flags,omitempty"` } // ContainerdOptions are additional parameters passed to `containerd`. type ContainerdOptions struct { - // Config is inline [`containerd` configuration TOML](https://github.com/containerd/containerd/blob/main/docs/man/containerd-config.toml.5.md) - // that will be [imported](https://github.com/containerd/containerd/blob/32169d591dbc6133ef7411329b29d0c0433f8c4d/docs/man/containerd-config.toml.5.md?plain=1#L146-L154) - // by the default configuration file. + // Config is an inline [`containerd` configuration TOML](https://github.com/containerd/containerd/blob/main/docs/man/containerd-config.toml.5.md) + // that will be merged with the defaults. Config string `json:"config,omitempty"` + + // BaseRuntimeSpec is the OCI runtime specification upon which all containers will be based. + // The provided spec will be merged with the default spec; so that a partial spec may be provided. + // For more information, see: https://github.com/opencontainers/runtime-spec + BaseRuntimeSpec map[string]runtime.RawExtension `json:"baseRuntimeSpec,omitempty"` } // InstanceOptions determines how the node's operating system and devices are configured. diff --git a/nodeadm/api/v1alpha1/zz_generated.deepcopy.go b/nodeadm/api/v1alpha1/zz_generated.deepcopy.go index c94f897fa..ae21ff1a7 100644 --- a/nodeadm/api/v1alpha1/zz_generated.deepcopy.go +++ b/nodeadm/api/v1alpha1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated // Code generated by controller-gen. DO NOT EDIT. @@ -37,6 +36,13 @@ func (in *ClusterDetails) DeepCopy() *ClusterDetails { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ContainerdOptions) DeepCopyInto(out *ContainerdOptions) { *out = *in + if in.BaseRuntimeSpec != nil { + in, out := &in.BaseRuntimeSpec, &out.BaseRuntimeSpec + *out = make(map[string]runtime.RawExtension, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerdOptions. @@ -169,7 +175,7 @@ func (in *NodeConfigList) DeepCopyObject() runtime.Object { func (in *NodeConfigSpec) DeepCopyInto(out *NodeConfigSpec) { *out = *in in.Cluster.DeepCopyInto(&out.Cluster) - out.Containerd = in.Containerd + in.Containerd.DeepCopyInto(&out.Containerd) out.Instance = in.Instance in.Kubelet.DeepCopyInto(&out.Kubelet) if in.FeatureGates != nil { diff --git a/nodeadm/crds/node.eks.aws_nodeconfigs.yaml b/nodeadm/crds/node.eks.aws_nodeconfigs.yaml index 3758eedfc..a556b7546 100644 --- a/nodeadm/crds/node.eks.aws_nodeconfigs.yaml +++ b/nodeadm/crds/node.eks.aws_nodeconfigs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.12.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: nodeconfigs.node.eks.aws spec: group: node.eks.aws @@ -20,21 +20,27 @@ spec: description: NodeConfig is the primary configuration object for `nodeadm`. properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: properties: cluster: - description: ClusterDetails contains the coordinates of your EKS cluster. + description: |- + ClusterDetails contains the coordinates of your EKS cluster. These details can be found using the [DescribeCluster API](https://docs.aws.amazon.com/eks/latest/APIReference/API_DescribeCluster.html). properties: apiServerEndpoint: @@ -47,8 +53,8 @@ spec: format: byte type: string cidr: - description: CIDR is your cluster's Pod IP CIDR. This value is - used to infer your cluster's DNS address. + description: CIDR is your cluster's service CIDR block. This value + is used to infer your cluster's DNS address. type: string enableOutpost: description: EnableOutpost determines how your node is configured @@ -66,10 +72,19 @@ spec: description: ContainerdOptions are additional parameters passed to `containerd`. properties: + baseRuntimeSpec: + additionalProperties: + type: object + x-kubernetes-preserve-unknown-fields: true + description: |- + BaseRuntimeSpec is the OCI runtime specification upon which all containers will be based. + The provided spec will be merged with the default spec; so that a partial spec may be provided. + For more information, see: https://github.com/opencontainers/runtime-spec + type: object config: - description: Config is inline [`containerd` configuration TOML](https://github.com/containerd/containerd/blob/main/docs/man/containerd-config.toml.5.md) - that will be [imported](https://github.com/containerd/containerd/blob/32169d591dbc6133ef7411329b29d0c0433f8c4d/docs/man/containerd-config.toml.5.md?plain=1#L146-L154) - by the default configuration file. + description: |- + Config is an inline [`containerd` configuration TOML](https://github.com/containerd/containerd/blob/main/docs/man/containerd-config.toml.5.md) + that will be merged with the defaults. type: string type: object featureGates: @@ -83,7 +98,8 @@ spec: and devices are configured. properties: localStorage: - description: LocalStorageOptions control how [EC2 instance stores](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html) + description: |- + LocalStorageOptions control how [EC2 instance stores](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html) are used when available. properties: strategy: @@ -102,11 +118,13 @@ spec: additionalProperties: type: object x-kubernetes-preserve-unknown-fields: true - description: Config is a [`KubeletConfiguration`](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1/) + description: |- + Config is a [`KubeletConfiguration`](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/) that will be merged with the defaults. type: object flags: - description: Flags are [command-line `kubelet`` arguments](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/). + description: |- + Flags are [command-line `kubelet` arguments](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/). that will be appended to the defaults. items: type: string diff --git a/nodeadm/doc/api.md b/nodeadm/doc/api.md index 20dcd95d2..f2f770240 100644 --- a/nodeadm/doc/api.md +++ b/nodeadm/doc/api.md @@ -20,7 +20,7 @@ _Appears in:_ | `name` _string_ | Name is the name of your EKS cluster | | `apiServerEndpoint` _string_ | APIServerEndpoint is the URL of your EKS cluster's kube-apiserver. | | `certificateAuthority` _[byte](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#byte-v1-meta) array_ | CertificateAuthority is a base64-encoded string of your cluster's certificate authority chain. | -| `cidr` _string_ | CIDR is your cluster's Pod IP CIDR. This value is used to infer your cluster's DNS address. | +| `cidr` _string_ | CIDR is your cluster's service CIDR block. This value is used to infer your cluster's DNS address. | | `enableOutpost` _boolean_ | EnableOutpost determines how your node is configured when running on an AWS Outpost. | | `id` _string_ | ID is an identifier for your cluster; this is only used when your node is running on an AWS Outpost. | @@ -33,7 +33,8 @@ _Appears in:_ | Field | Description | | --- | --- | -| `config` _string_ | Config is inline [`containerd` configuration TOML](https://github.com/containerd/containerd/blob/main/docs/man/containerd-config.toml.5.md) that will be [imported](https://github.com/containerd/containerd/blob/32169d591dbc6133ef7411329b29d0c0433f8c4d/docs/man/containerd-config.toml.5.md?plain=1#L146-L154) by the default configuration file. | +| `config` _string_ | Config is an inline [`containerd` configuration TOML](https://github.com/containerd/containerd/blob/main/docs/man/containerd-config.toml.5.md) that will be merged with the defaults. | +| `baseRuntimeSpec` _object (keys:string, values:RawExtension)_ | BaseRuntimeSpec is the OCI runtime specification upon which all containers will be based. The provided spec will be merged with the default spec; so that a partial spec may be provided. For more information, see: https://github.com/opencontainers/runtime-spec | #### Feature @@ -67,8 +68,8 @@ _Appears in:_ | Field | Description | | --- | --- | -| `config` _object (keys:string, values:RawExtension)_ | Config is a [`KubeletConfiguration`](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1/) that will be merged with the defaults. | -| `flags` _string array_ | Flags are [command-line `kubelet`` arguments](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/). that will be appended to the defaults. | +| `config` _object (keys:string, values:RawExtension)_ | Config is a [`KubeletConfiguration`](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/) that will be merged with the defaults. | +| `flags` _string array_ | Flags are [command-line `kubelet` arguments](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/). that will be appended to the defaults. | #### LocalStorageOptions diff --git a/nodeadm/doc/examples.md b/nodeadm/doc/examples.md index 1be2c0b5a..7ebbaedc5 100644 --- a/nodeadm/doc/examples.md +++ b/nodeadm/doc/examples.md @@ -64,40 +64,32 @@ The configuration objects will be merged in the order they appear in the MIME mu --- ## Using instance ID as node name (experimental) -We introduced the ability to name nodes using their EC2 instance id instead of EC2 private DNS name for AL2023. - When the `InstanceIdNodeName` feature gate is enabled, `nodeadm` will use the EC2 instance's ID (e.g. `i-abcdefg1234`) as the name of the `Node` object created by `kubelet`, instead of the EC2 instance's private DNS Name (e.g. `ip-192-168-1-1.ec2.internal`). There are several benefits of doing this: 1. Your `Node` names are more meaningful in, for example, the output of `kubectl get nodes`. 2. The `Node` name, which is in the critical path of `kubelet` authentication, is non-volatile. While the private DNS name of an instance may change, its ID cannot. 3. The `ec2:DescribeInstances` permission can be removed from your node role's IAM policy; this is no longer necessary. -To opt-in to the feature, you will need -- [Create new worker node IAM role](https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html#create-worker-node-role) -- [Update the `aws-auth` ConfigMap with above created role](https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html#aws-auth-users). See example below +### To enable this feature, you will need to: +1. [Create a new worker node IAM role](https://docs.aws.amazon.com/eks/latest/userguide/create-node-role.html#create-worker-node-role) + - ⚠️ **Note**: you should create a new role when migrating an existing cluster to avoid authentication failures on existing nodes. +2. [Update the `aws-auth` ConfigMap with above created role](https://docs.aws.amazon.com/eks/latest/userguide/auth-configmap.html#aws-auth-users). For example: ``` - groups: - system:bootstrappers - system:nodes - rolearn: + rolearn: $ROLE_CREATED_ABOVE username: system:node:{{SessionName}} ``` -- Enable the new feature gate `InstanceIdNodeName` in the user data, See example configuration below +3. Enable the feature gate in your user data: ``` --- apiVersion: node.eks.aws/v1alpha1 kind: NodeConfig spec: - cluster: - name: my-cluster - apiServerEndpoint: https://example.com - certificateAuthority: Y2VydGlmaWNhdGVBdXRob3JpdHk= - cidr: 10.100.0.0/16 featureGates: InstanceIdNodeName: true ``` -- [Create launch template with above user data](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html). -- [Create the node group with launch template](https://docs.aws.amazon.com/eks/latest/userguide/create-managed-node-group.html). --- @@ -119,3 +111,24 @@ spec: ``` Can be used to disable deletion of unpacked image layers in the `containerd` content store. + +--- + +## Modifying container RLIMITs + +If your workload requires different RLIMITs than the defaults, you can use the `baseRuntimeSpec` option of `containerd` to override them: + +``` +--- +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + cluster: ... + containerd: + baseRuntimeSpec: + process: + rlimits: + - type: RLIMIT_NOFILE + soft: 1024 + hard: 1024 +``` diff --git a/nodeadm/go.mod b/nodeadm/go.mod index 9d9f29790..e436100d6 100644 --- a/nodeadm/go.mod +++ b/nodeadm/go.mod @@ -12,7 +12,8 @@ require ( github.com/containerd/containerd v1.7.13 github.com/coreos/go-systemd/v22 v22.5.0 github.com/integrii/flaggy v1.5.2 - github.com/stretchr/testify v1.8.4 + github.com/pelletier/go-toml/v2 v2.2.2 + github.com/stretchr/testify v1.9.0 go.uber.org/zap v1.26.0 golang.org/x/mod v0.14.0 k8s.io/apimachinery v0.29.1 @@ -24,7 +25,7 @@ require ( require ( github.com/Microsoft/go-winio v0.6.1 // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect golang.org/x/tools v0.16.1 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/grpc v1.58.3 // indirect diff --git a/nodeadm/go.sum b/nodeadm/go.sum index 6563a353a..04b1cff4c 100644 --- a/nodeadm/go.sum +++ b/nodeadm/go.sum @@ -98,6 +98,8 @@ github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= @@ -117,13 +119,15 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= diff --git a/nodeadm/internal/api/bridge/zz_generated.conversion.go b/nodeadm/internal/api/bridge/zz_generated.conversion.go index df44c16ad..cd521ac15 100644 --- a/nodeadm/internal/api/bridge/zz_generated.conversion.go +++ b/nodeadm/internal/api/bridge/zz_generated.conversion.go @@ -136,6 +136,7 @@ func Convert_api_ClusterDetails_To_v1alpha1_ClusterDetails(in *api.ClusterDetail func autoConvert_v1alpha1_ContainerdOptions_To_api_ContainerdOptions(in *v1alpha1.ContainerdOptions, out *api.ContainerdOptions, s conversion.Scope) error { out.Config = in.Config + out.BaseRuntimeSpec = *(*api.InlineDocument)(unsafe.Pointer(&in.BaseRuntimeSpec)) return nil } @@ -146,6 +147,7 @@ func Convert_v1alpha1_ContainerdOptions_To_api_ContainerdOptions(in *v1alpha1.Co func autoConvert_api_ContainerdOptions_To_v1alpha1_ContainerdOptions(in *api.ContainerdOptions, out *v1alpha1.ContainerdOptions, s conversion.Scope) error { out.Config = in.Config + out.BaseRuntimeSpec = *(*map[string]runtime.RawExtension)(unsafe.Pointer(&in.BaseRuntimeSpec)) return nil } diff --git a/nodeadm/internal/api/merge.go b/nodeadm/internal/api/merge.go index 2c8c26d89..cb1a3380e 100644 --- a/nodeadm/internal/api/merge.go +++ b/nodeadm/internal/api/merge.go @@ -6,30 +6,40 @@ import ( "dario.cat/mergo" "github.com/awslabs/amazon-eks-ami/nodeadm/internal/util" + "github.com/pelletier/go-toml/v2" "k8s.io/apimachinery/pkg/runtime" ) // Merges two NodeConfigs with custom collision handling func (dst *NodeConfig) Merge(src *NodeConfig) error { - return mergo.Merge(dst, src, mergo.WithOverride, mergo.WithTransformers(kubeletTransformer{})) + return mergo.Merge(dst, src, mergo.WithOverride, mergo.WithTransformers(nodeConfigTransformer{})) } const ( kubeletFlagsName = "Flags" kubeletConfigName = "Config" + + containerdConfigName = "Config" ) -type kubeletTransformer struct{} +type nodeConfigTransformer struct{} -func (k kubeletTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(KubeletOptions{}) { +func (t nodeConfigTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(ContainerdOptions{}) { return func(dst, src reflect.Value) error { - k.transformFlags( + return t.transformContainerdConfig( + dst.FieldByName(containerdConfigName), + src.FieldByName(containerdConfigName), + ) + } + } else if typ == reflect.TypeOf(KubeletOptions{}) { + return func(dst, src reflect.Value) error { + t.transformKubeletFlags( dst.FieldByName(kubeletFlagsName), src.FieldByName(kubeletFlagsName), ) - if err := k.transformConfig( + if err := t.transformKubeletConfig( dst.FieldByName(kubeletConfigName), src.FieldByName(kubeletConfigName), ); err != nil { @@ -42,7 +52,7 @@ func (k kubeletTransformer) Transformer(typ reflect.Type) func(dst, src reflect. return nil } -func (k kubeletTransformer) transformFlags(dst, src reflect.Value) { +func (t nodeConfigTransformer) transformKubeletFlags(dst, src reflect.Value) { if dst.CanSet() { // kubelet flags are parsed using https://github.com/spf13/pflag, where // flag order determines precedence. For single-value flags this is @@ -56,7 +66,7 @@ func (k kubeletTransformer) transformFlags(dst, src reflect.Value) { } } -func (k kubeletTransformer) transformConfig(dst, src reflect.Value) error { +func (t nodeConfigTransformer) transformKubeletConfig(dst, src reflect.Value) error { if dst.CanSet() { if dst.Len() <= 0 { // if the destination is empty just use the source data @@ -64,7 +74,7 @@ func (k kubeletTransformer) transformConfig(dst, src reflect.Value) error { } else if src.Len() > 0 { // kubelet config in an inline document here, so we explicitly // perform a merge with dst and src data. - mergedMap, err := util.DocumentMerge(dst.Interface(), src.Interface(), mergo.WithOverride) + mergedMap, err := util.Merge(dst.Interface(), src.Interface(), json.Marshal, json.Unmarshal) if err != nil { return err } @@ -78,6 +88,30 @@ func (k kubeletTransformer) transformConfig(dst, src reflect.Value) error { return nil } +func (t nodeConfigTransformer) transformContainerdConfig(dst, src reflect.Value) error { + if dst.CanSet() { + if dst.Len() <= 0 { + // if the destination is empty just use the source data + dst.Set(src) + } else if src.Len() > 0 { + // containerd config is a string an inline string here, so we + // explicitly perform a merge with dst and src data. + dstConfig := []byte(dst.String()) + srcConfig := []byte(src.String()) + configBytes, err := util.Merge(dstConfig, srcConfig, toml.Marshal, toml.Unmarshal) + if err != nil { + return err + } + config, err := toml.Marshal(configBytes) + if err != nil { + return err + } + dst.SetString(string(config)) + } + } + return nil +} + func toInlineDocument(m map[string]interface{}) (InlineDocument, error) { var rawMap = make(InlineDocument) for key, value := range m { diff --git a/nodeadm/internal/api/merge_test.go b/nodeadm/internal/api/merge_test.go index d546adc37..a59105574 100644 --- a/nodeadm/internal/api/merge_test.go +++ b/nodeadm/internal/api/merge_test.go @@ -3,6 +3,8 @@ package api import ( "reflect" "testing" + + "github.com/pelletier/go-toml/v2" ) func toInlineDocumentMust(m map[string]interface{}) InlineDocument { @@ -13,6 +15,20 @@ func toInlineDocumentMust(m map[string]interface{}) InlineDocument { return d } +// pass the toml through serialization and deserialization to get a normalized +// payload for tests that has deterministic ordering and formatting +func tomlNormalize(t string) string { + var m map[string]interface{} + if err := toml.Unmarshal([]byte(t), &m); err != nil { + panic(err) + } + s, err := toml.Marshal(m) + if err != nil { + panic(err) + } + return string(s) +} + func TestMerge(t *testing.T) { var tests = []struct { name string @@ -44,6 +60,26 @@ func TestMerge(t *testing.T) { Cluster: ClusterDetails{Name: "next"}, }, }, + { + name: "merge with deeply nested toml object", + baseSpec: NodeConfigSpec{ + Containerd: ContainerdOptions{ + Config: "[a.b.c.d]\nf = 0", + }, + }, + patchSpec: NodeConfigSpec{ + Containerd: ContainerdOptions{ + Config: "[a.b.c.d]\ne = 0", + }, + }, + // This test is primarily for clarity on what happens during the + // expansion of nested toml objects. + expectedSpec: NodeConfigSpec{ + Containerd: ContainerdOptions{ + Config: "[a]\n[a.b]\n[a.b.c]\n[a.b.c.d]\ne = 0\nf = 0\n", + }, + }, + }, { name: "customer overrides orchestrator defaults", baseSpec: NodeConfigSpec{ @@ -66,7 +102,33 @@ func TestMerge(t *testing.T) { }, }, Containerd: ContainerdOptions{ - Config: "base", + Config: tomlNormalize(` +version = 2 +root = "/var/lib/containerd" +state = "/run/containerd" + +[grpc] +address = "/run/containerd/containerd.sock" + +[plugins."io.containerd.grpc.v1.cri".containerd] +default_runtime_name = "runc" +discard_unpacked_layers = true + +[plugins."io.containerd.grpc.v1.cri"] +sandbox_image = "{{.SandboxImage}}" + +[plugins."io.containerd.grpc.v1.cri".registry] +config_path = "/etc/containerd/certs.d:/etc/docker/certs.d" + +[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] +runtime_type = "io.containerd.runc.v2" + +[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] +SystemdCgroup = true + +[plugins."io.containerd.grpc.v1.cri".cni] +bin_dir = "/opt/cni/bin" +conf_dir = "/etc/cni/net.d"`), }, }, patchSpec: NodeConfigSpec{ @@ -82,7 +144,13 @@ func TestMerge(t *testing.T) { }, }, Containerd: ContainerdOptions{ - Config: "patch", + Config: tomlNormalize(` +version = 2 +[grpc] +address = "/run/containerd/containerd.sock.2" + +[plugins."io.containerd.grpc.v1.cri".containerd] +discard_unpacked_layers = false`), }, }, expectedSpec: NodeConfigSpec{ @@ -107,7 +175,33 @@ func TestMerge(t *testing.T) { }, }, Containerd: ContainerdOptions{ - Config: "patch", + Config: tomlNormalize(` +version = 2 +root = "/var/lib/containerd" +state = "/run/containerd" + +[grpc] +address = "/run/containerd/containerd.sock.2" + +[plugins."io.containerd.grpc.v1.cri".containerd] +default_runtime_name = "runc" +discard_unpacked_layers = false + +[plugins."io.containerd.grpc.v1.cri"] +sandbox_image = "{{.SandboxImage}}" + +[plugins."io.containerd.grpc.v1.cri".registry] +config_path = "/etc/containerd/certs.d:/etc/docker/certs.d" + +[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] +runtime_type = "io.containerd.runc.v2" + +[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] +SystemdCgroup = true + +[plugins."io.containerd.grpc.v1.cri".cni] +bin_dir = "/opt/cni/bin" +conf_dir = "/etc/cni/net.d"`), }, }, }, diff --git a/nodeadm/internal/api/types.go b/nodeadm/internal/api/types.go index 5bfb1f4d1..8ad2dedb8 100644 --- a/nodeadm/internal/api/types.go +++ b/nodeadm/internal/api/types.go @@ -78,10 +78,8 @@ type KubeletOptions struct { type InlineDocument map[string]runtime.RawExtension type ContainerdOptions struct { - // Config is an inline containerd config toml document that can be provided - // by the user to override default generated configurations - // https://github.com/containerd/containerd/blob/main/docs/man/containerd-config.toml.5.md - Config string `json:"config,omitempty"` + Config string `json:"config,omitempty"` + BaseRuntimeSpec InlineDocument `json:"baseRuntimeSpec,omitempty"` } type IPFamily string diff --git a/nodeadm/internal/api/zz_generated.deepcopy.go b/nodeadm/internal/api/zz_generated.deepcopy.go index 987fb7b6b..25f27000f 100644 --- a/nodeadm/internal/api/zz_generated.deepcopy.go +++ b/nodeadm/internal/api/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated // Code generated by controller-gen. DO NOT EDIT. @@ -37,6 +36,13 @@ func (in *ClusterDetails) DeepCopy() *ClusterDetails { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ContainerdOptions) DeepCopyInto(out *ContainerdOptions) { *out = *in + if in.BaseRuntimeSpec != nil { + in, out := &in.BaseRuntimeSpec, &out.BaseRuntimeSpec + *out = make(InlineDocument, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerdOptions. @@ -221,7 +227,7 @@ func (in *NodeConfigList) DeepCopyObject() runtime.Object { func (in *NodeConfigSpec) DeepCopyInto(out *NodeConfigSpec) { *out = *in in.Cluster.DeepCopyInto(&out.Cluster) - out.Containerd = in.Containerd + in.Containerd.DeepCopyInto(&out.Containerd) out.Instance = in.Instance in.Kubelet.DeepCopyInto(&out.Kubelet) if in.FeatureGates != nil { diff --git a/nodeadm/internal/configprovider/userdata.go b/nodeadm/internal/configprovider/userdata.go index e3ad4a6ee..671f8881f 100644 --- a/nodeadm/internal/configprovider/userdata.go +++ b/nodeadm/internal/configprovider/userdata.go @@ -2,6 +2,8 @@ package configprovider import ( "bytes" + "compress/gzip" + "encoding/base64" "fmt" "io" "mime" @@ -22,17 +24,39 @@ const ( nodeConfigMediaType = "application/" + api.GroupName ) -type userDataConfigProvider struct{} +type userDataProvider interface { + GetUserData() ([]byte, error) +} + +type imdsUserDataProvider struct{} + +func (p *imdsUserDataProvider) GetUserData() ([]byte, error) { + return imds.GetUserData() +} + +type userDataConfigProvider struct { + userDataProvider userDataProvider +} func NewUserDataConfigProvider() ConfigProvider { - return &userDataConfigProvider{} + return &userDataConfigProvider{ + userDataProvider: &imdsUserDataProvider{}, + } } -func (ics *userDataConfigProvider) Provide() (*internalapi.NodeConfig, error) { - userData, err := imds.GetUserData() +func (p *userDataConfigProvider) Provide() (*internalapi.NodeConfig, error) { + userData, err := p.userDataProvider.GetUserData() if err != nil { return nil, err } + userData, err = decodeIfBase64(userData) + if err != nil { + return nil, fmt.Errorf("failed to decode user data: %v", err) + } + userData, err = decompressIfGZIP(userData) + if err != nil { + return nil, fmt.Errorf("failed to decompress user data: %v", err) + } // if the MIME data fails to parse as a multipart document, then fall back // to parsing the entire userdata as the node config. if multipartReader, err := getMIMEMultipartReader(userData); err == nil { @@ -85,6 +109,14 @@ func parseMultipart(userDataReader *multipart.Reader) (*internalapi.NodeConfig, if err != nil { return nil, err } + nodeConfigPart, err = decodeIfBase64(nodeConfigPart) + if err != nil { + return nil, err + } + nodeConfigPart, err = decompressIfGZIP(nodeConfigPart) + if err != nil { + return nil, err + } decodedConfig, err := apibridge.DecodeNodeConfig(nodeConfigPart) if err != nil { return nil, err @@ -102,6 +134,39 @@ func parseMultipart(userDataReader *multipart.Reader) (*internalapi.NodeConfig, } return config, nil } else { - return nil, fmt.Errorf("Could not find NodeConfig within UserData") + return nil, fmt.Errorf("could not find NodeConfig within UserData") + } +} + +func decodeIfBase64(data []byte) ([]byte, error) { + e := base64.StdEncoding + maxDecodedLen := e.DecodedLen(len(data)) + decodedData := make([]byte, maxDecodedLen) + decodedLen, err := e.Decode(decodedData, data) + if err != nil { + return data, nil + } + return decodedData[:decodedLen], nil +} + +// https://en.wikipedia.org/wiki/Gzip +const gzipMagicNumber = uint16(0x1f8b) + +func decompressIfGZIP(data []byte) ([]byte, error) { + if len(data) < 2 { + return data, nil + } + preamble := uint16(data[0])<<8 | uint16(data[1]) + if preamble == gzipMagicNumber { + reader, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, fmt.Errorf("failed to create GZIP reader: %v", err) + } + if decompressed, err := io.ReadAll(reader); err != nil { + return nil, fmt.Errorf("failed to read from GZIP reader: %v", err) + } else { + return decompressed, nil + } } + return data, nil } diff --git a/nodeadm/internal/configprovider/userdata_test.go b/nodeadm/internal/configprovider/userdata_test.go index a00e847b5..60eadc6dc 100644 --- a/nodeadm/internal/configprovider/userdata_test.go +++ b/nodeadm/internal/configprovider/userdata_test.go @@ -1,131 +1,283 @@ package configprovider import ( - "encoding/json" + "bytes" + "compress/gzip" "fmt" - "mime/multipart" - "net/mail" - "reflect" - "strings" "testing" "github.com/awslabs/amazon-eks-ami/nodeadm/internal/api" + "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/runtime" ) -const boundary = "#" -const completeNodeConfig = `--- -apiVersion: node.eks.aws/v1alpha1 -kind: NodeConfig -spec: - cluster: - name: autofill - apiServerEndpoint: autofill - certificateAuthority: '' - cidr: 10.100.0.0/16 - kubelet: - config: - port: 1010 - maxPods: 120 - flags: - - --v=2 - - --node-labels=foo=bar,nodegroup=test -` - -const partialNodeConfig = `--- -apiVersion: node.eks.aws/v1alpha1 -kind: NodeConfig -spec: - kubelet: - config: - maxPods: 150 - podsPerCore: 20 - flags: - - --v=5 - - --node-labels=foo=baz -` - -var completeMergedWithPartial = api.NodeConfig{ - Spec: api.NodeConfigSpec{ - Cluster: api.ClusterDetails{ - Name: "autofill", - APIServerEndpoint: "autofill", - CertificateAuthority: []byte{}, - CIDR: "10.100.0.0/16", - }, - Kubelet: api.KubeletOptions{ - Config: api.InlineDocument{ - "maxPods": runtime.RawExtension{Raw: []byte("150")}, - "podsPerCore": runtime.RawExtension{Raw: []byte("20")}, - "port": runtime.RawExtension{Raw: []byte("1010")}, - }, - Flags: []string{ - "--v=2", - "--node-labels=foo=bar,nodegroup=test", - "--v=5", - "--node-labels=foo=baz", - }, - }, - }, -} - -func indent(in string) string { - var mid interface{} - err := json.Unmarshal([]byte(in), &mid) +func Test_decompressIfGZIP(t *testing.T) { + expected := []byte("hello, world!") + compressed, err := compressAsGZIP(expected) if err != nil { - panic(err) + t.Fatal(err) } - out, err := json.MarshalIndent(&mid, "", " ") + actual, err := decompressIfGZIP(compressed) if err != nil { - panic(err) + t.Fatalf("failed to decompress GZIP: %v", err) } - return string(out) + assert.Equal(t, expected, actual) } -func mimeifyNodeConfigs(configs ...string) string { - var mimeDocLines = []string{ - "MIME-Version: 1.0", - `Content-Type: multipart/mixed; boundary="#"`, - } - for _, config := range configs { - mimeDocLines = append(mimeDocLines, fmt.Sprintf("\n--#\nContent-Type: %s\n\n%s", nodeConfigMediaType, config)) +func mustCompressAsGZIP(t *testing.T, data []byte) []byte { + compressedData, err := compressAsGZIP(data) + if err != nil { + t.Errorf("failed to compress as GZIP: %v", err) } - mimeDocLines = append(mimeDocLines, "\n--#--") - return strings.Join(mimeDocLines, "\n") + return compressedData } -func TestParseMIMENodeConfig(t *testing.T) { - mimeMessage, err := mail.ReadMessage(strings.NewReader(mimeifyNodeConfigs(completeNodeConfig))) +func compressAsGZIP(data []byte) ([]byte, error) { + var compressed bytes.Buffer + writer := gzip.NewWriter(&compressed) + n, err := writer.Write(data) if err != nil { - t.Fatal(err) + return nil, fmt.Errorf("failed to write data to GZIP writer: %v", err) } - userDataReader := multipart.NewReader(mimeMessage.Body, boundary) - if _, err := parseMultipart(userDataReader); err != nil { - t.Fatal(err) + if n != len(data) { + return nil, fmt.Errorf("data written to GZIP writer doesn't match input (%d): %d", len(data), n) } + if err := writer.Close(); err != nil { + return nil, fmt.Errorf("unable to close GZIP writer: %v", err) + } + return compressed.Bytes(), nil } -func TestGetMIMEReader(t *testing.T) { - if _, err := getMIMEMultipartReader([]byte(mimeifyNodeConfigs(completeNodeConfig))); err != nil { - t.Fatal(err) +type testUserDataProvider struct { + userData []byte + err error +} + +func (p *testUserDataProvider) GetUserData() ([]byte, error) { + return p.userData, p.err +} + +func Test_Provide(t *testing.T) { + testCases := []struct { + scenario string + expectedNodeConfig api.NodeConfig + userData []byte + isErrorExpected bool + }{ + { + scenario: "multiple NodeConfigs in MIME multi-part should be merged", + userData: linesToBytes( + "MIME-Version: 1.0", + `Content-Type: multipart/mixed; boundary="BOUNDARY"`, + "", + "--BOUNDARY", + "Content-Type: application/node.eks.aws", + "", + "---", + "apiVersion: node.eks.aws/v1alpha1", + "kind: NodeConfig", + "spec:", + " cluster:", + " name: my-cluster", + " apiServerEndpoint: https://example.com", + " certificateAuthority: Y2VydGlmaWNhdGVBdXRob3JpdHk=", + " cidr: 10.100.0.0/16", + " kubelet:", + " config:", + " port: 1010", + " maxPods: 120", + " flags:", + " - --v=2", + " - --node-labels=foo=bar,nodegroup=test", + "", + "--BOUNDARY", + "Content-Type: application/node.eks.aws", + "", + "---", + "apiVersion: node.eks.aws/v1alpha1", + "kind: NodeConfig", + "spec:", + " kubelet:", + " config:", + " maxPods: 150", + " podsPerCore: 20", + " flags:", + " - --v=5", + " - --node-labels=foo=baz", + "", + "--BOUNDARY--", + ), + expectedNodeConfig: api.NodeConfig{ + Spec: api.NodeConfigSpec{ + Cluster: api.ClusterDetails{ + Name: "my-cluster", + APIServerEndpoint: "https://example.com", + CertificateAuthority: []byte("certificateAuthority"), + CIDR: "10.100.0.0/16", + }, + Kubelet: api.KubeletOptions{ + Config: api.InlineDocument{ + "maxPods": runtime.RawExtension{Raw: []byte("150")}, + "podsPerCore": runtime.RawExtension{Raw: []byte("20")}, + "port": runtime.RawExtension{Raw: []byte("1010")}, + }, + Flags: []string{ + "--v=2", + "--node-labels=foo=bar,nodegroup=test", + "--v=5", + "--node-labels=foo=baz", + }, + }, + }, + }, + }, + { + scenario: "GZIP NodeConfig", + userData: mustCompressAsGZIP(t, + linesToBytes( + "---", + "apiVersion: node.eks.aws/v1alpha1", + "kind: NodeConfig", + "spec:", + " cluster:", + " name: my-cluster", + " apiServerEndpoint: https://example.com", + " certificateAuthority: Y2VydGlmaWNhdGVBdXRob3JpdHk=", + ), + ), + expectedNodeConfig: api.NodeConfig{ + Spec: api.NodeConfigSpec{ + Cluster: api.ClusterDetails{ + Name: "my-cluster", + APIServerEndpoint: "https://example.com", + CertificateAuthority: []byte("certificateAuthority"), + }, + }, + }, + }, + { + scenario: "GZIP multi-part MIME", + userData: mustCompressAsGZIP(t, + linesToBytes( + "MIME-Version: 1.0", + `Content-Type: multipart/mixed; boundary="BOUNDARY"`, + "", + "--BOUNDARY", + "Content-Type: application/node.eks.aws", + "", + "---", + "apiVersion: node.eks.aws/v1alpha1", + "kind: NodeConfig", + "spec:", + " cluster:", + " name: my-cluster", + " apiServerEndpoint: https://example.com", + " certificateAuthority: Y2VydGlmaWNhdGVBdXRob3JpdHk=", + "", + "--BOUNDARY--", + ), + ), + expectedNodeConfig: api.NodeConfig{ + Spec: api.NodeConfigSpec{ + Cluster: api.ClusterDetails{ + Name: "my-cluster", + APIServerEndpoint: "https://example.com", + CertificateAuthority: []byte("certificateAuthority"), + }, + }, + }, + }, + { + scenario: "multi-part MIME with GZIP NodeConfig part", + userData: appendByteSlices( + linesToBytes( + "MIME-Version: 1.0", + `Content-Type: multipart/mixed; boundary="BOUNDARY"`, + "", + "--BOUNDARY", + "Content-Type: application/node.eks.aws", + "", + "", + ), + mustCompressAsGZIP(t, + linesToBytes( + "---", + "apiVersion: node.eks.aws/v1alpha1", + "kind: NodeConfig", + "spec:", + " cluster:", + " name: my-cluster", + " apiServerEndpoint: https://example.com", + " certificateAuthority: Y2VydGlmaWNhdGVBdXRob3JpdHk=", + ), + ), + linesToBytes( + "", + "--BOUNDARY--", + ), + ), + expectedNodeConfig: api.NodeConfig{ + Spec: api.NodeConfigSpec{ + Cluster: api.ClusterDetails{ + Name: "my-cluster", + APIServerEndpoint: "https://example.com", + CertificateAuthority: []byte("certificateAuthority"), + }, + }, + }, + }, + { + scenario: "base64 encoded, gzip compressed multi-part MIME document", + userData: []byte("H4sIAONcTmYAA12PT0/CQBDF7/spNty3tXpbwwGQACbUBLXKcegOdtP9l90p0m9vS4xBbjPvvfll3sI7QkfirQ8oue0M6QCRcqvPqB75wXdOQeynk+1mu5y/vJdPs91+wsZNVBiT9k7yIrtjTIjrCFv8A0MIRtdAQzx3XmGGbcrgO41ngkHQf6xrNz8VYEIDBWu1U5KXgzdwj/qLpYC1ZJzXpkuEcRw5d2DHEr34VS/iAH/FeMK4dCp47Ujyhigkmed4BhsMZrW3l2iNkfRx/BNnHTU+auol399XvVoZCx9lo1bVXH3u/OHhOah1O72tPZT5AWxxqkxSAQAA"), + expectedNodeConfig: api.NodeConfig{ + Spec: api.NodeConfigSpec{ + Cluster: api.ClusterDetails{ + Name: "my-cluster", + APIServerEndpoint: "https://example.com", + CertificateAuthority: []byte("certificateAuthority"), + }, + }, + }, + }, } - if _, err := getMIMEMultipartReader([]byte(completeNodeConfig)); err == nil { - t.Fatalf("expected err for bad multipart data") + + for i, testCase := range testCases { + t.Run(fmt.Sprintf("%d_%s", i, testCase.scenario), func(t *testing.T) { + configProvider := userDataConfigProvider{ + userDataProvider: &testUserDataProvider{ + userData: testCase.userData, + }, + } + t.Logf("test case user data:\n%s", string(testCase.userData)) + actualNodeConfig, err := configProvider.Provide() + if testCase.isErrorExpected { + assert.NotNil(t, err) + assert.Nil(t, actualNodeConfig) + } else { + assert.Nil(t, err) + if assert.NotNil(t, actualNodeConfig) { + assert.Equal(t, testCase.expectedNodeConfig, *actualNodeConfig) + } + } + }) } } -func TestMergeNodeConfig(t *testing.T) { - mimeNodeConfig := mimeifyNodeConfigs(completeNodeConfig, partialNodeConfig) - mimeMessage, err := mail.ReadMessage(strings.NewReader(mimeNodeConfig)) - if err != nil { - t.Fatal(err) +func linesToBytes(lines ...string) []byte { + var buf bytes.Buffer + for i, line := range lines { + if i > 0 { + buf.WriteString("\n") + } + buf.WriteString(line) } - userDataReader := multipart.NewReader(mimeMessage.Body, boundary) - config, err := parseMultipart(userDataReader) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(config, &completeMergedWithPartial) { - t.Errorf("\nexpected: %+v\n\ngot: %+v", &completeMergedWithPartial, config) + return buf.Bytes() +} + +func appendByteSlices(slices ...[]byte) []byte { + var res []byte + for _, slice := range slices { + res = append(res, slice...) } + return res } diff --git a/nodeadm/internal/containerd/base-runtime-spec.json b/nodeadm/internal/containerd/base-runtime-spec.json new file mode 100644 index 000000000..e7f59498f --- /dev/null +++ b/nodeadm/internal/containerd/base-runtime-spec.json @@ -0,0 +1,174 @@ +{ + "linux": { + "maskedPaths": [ + "/proc/acpi", + "/proc/asound", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/sched_debug", + "/proc/scsi", + "/proc/timer_list", + "/proc/timer_stats", + "/sys/firmware" + ], + "namespaces": [ + { + "type": "ipc" + }, + { + "type": "mount" + }, + { + "type": "network" + }, + { + "type": "pid" + }, + { + "type": "uts" + } + ], + "readonlyPaths": [ + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" + ], + "resources": { + "devices": [ + { + "access": "rwm", + "allow": false + } + ] + } + }, + "mounts": [ + { + "destination": "/dev", + "options": [ + "nosuid", + "strictatime", + "mode=755", + "size=65536k" + ], + "source": "tmpfs", + "type": "tmpfs" + }, + { + "destination": "/dev/mqueue", + "options": [ + "nosuid", + "noexec", + "nodev" + ], + "source": "mqueue", + "type": "mqueue" + }, + { + "destination": "/dev/pts", + "options": [ + "nosuid", + "noexec", + "newinstance", + "ptmxmode=0666", + "mode=0620", + "gid=5" + ], + "source": "devpts", + "type": "devpts" + }, + { + "destination": "/proc", + "options": [ + "nosuid", + "noexec", + "nodev" + ], + "source": "proc", + "type": "proc" + }, + { + "destination": "/sys", + "options": [ + "nosuid", + "noexec", + "nodev", + "ro" + ], + "source": "sysfs", + "type": "sysfs" + } + ], + "ociVersion": "1.1.0", + "process": { + "capabilities": { + "bounding": [ + "CAP_AUDIT_WRITE", + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_MKNOD", + "CAP_NET_BIND_SERVICE", + "CAP_NET_RAW", + "CAP_SETFCAP", + "CAP_SETGID", + "CAP_SETPCAP", + "CAP_SETUID", + "CAP_SYS_CHROOT" + ], + "effective": [ + "CAP_AUDIT_WRITE", + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_MKNOD", + "CAP_NET_BIND_SERVICE", + "CAP_NET_RAW", + "CAP_SETFCAP", + "CAP_SETGID", + "CAP_SETPCAP", + "CAP_SETUID", + "CAP_SYS_CHROOT" + ], + "permitted": [ + "CAP_AUDIT_WRITE", + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_MKNOD", + "CAP_NET_BIND_SERVICE", + "CAP_NET_RAW", + "CAP_SETFCAP", + "CAP_SETGID", + "CAP_SETPCAP", + "CAP_SETUID", + "CAP_SYS_CHROOT" + ] + }, + "cwd": "/", + "noNewPrivileges": true, + "rlimits": [ + { + "type": "RLIMIT_NOFILE", + "soft": 65536, + "hard": 1048576 + } + ], + "user": { + "gid": 0, + "uid": 0 + } + }, + "root": { + "path": "rootfs" + } +} diff --git a/nodeadm/internal/containerd/base_runtime_spec.go b/nodeadm/internal/containerd/base_runtime_spec.go new file mode 100644 index 000000000..c2e58b44f --- /dev/null +++ b/nodeadm/internal/containerd/base_runtime_spec.go @@ -0,0 +1,38 @@ +package containerd + +import ( + _ "embed" + "encoding/json" + "fmt" + "strings" + + "github.com/awslabs/amazon-eks-ami/nodeadm/internal/api" + "github.com/awslabs/amazon-eks-ami/nodeadm/internal/util" + "go.uber.org/zap" +) + +const containerdBaseRuntimeSpecFile = "/etc/containerd/base-runtime-spec.json" + +//go:embed base-runtime-spec.json +var defaultBaseRuntimeSpecData string + +func writeBaseRuntimeSpec(cfg *api.NodeConfig) error { + zap.L().Info("Writing containerd base runtime spec...", zap.String("path", containerdBaseRuntimeSpecFile)) + baseRuntimeSpecData := defaultBaseRuntimeSpecData + if len(cfg.Spec.Containerd.BaseRuntimeSpec) > 0 { + var defaultBaseRuntimeSpecMap api.InlineDocument + if err := json.Unmarshal([]byte(defaultBaseRuntimeSpecData), &defaultBaseRuntimeSpecMap); err != nil { + return fmt.Errorf("failed to unmarshal default base runtime spec: %v", err) + } + mergedBaseRuntimeSpecMap, err := util.Merge(defaultBaseRuntimeSpecMap, cfg.Spec.Containerd.BaseRuntimeSpec, json.Marshal, json.Unmarshal) + if err != nil { + return err + } + mergedBaseRuntimeSpecData, err := json.MarshalIndent(mergedBaseRuntimeSpecMap, "", strings.Repeat(" ", 4)) + if err != nil { + return err + } + baseRuntimeSpecData = string(mergedBaseRuntimeSpecData) + } + return util.WriteFileWithDir(containerdBaseRuntimeSpecFile, []byte(baseRuntimeSpecData), containerdConfigPerm) +} diff --git a/nodeadm/internal/containerd/config.go b/nodeadm/internal/containerd/config.go index b6a5274a2..c18a1533d 100644 --- a/nodeadm/internal/containerd/config.go +++ b/nodeadm/internal/containerd/config.go @@ -3,20 +3,19 @@ package containerd import ( "bytes" _ "embed" - "path/filepath" "text/template" "github.com/awslabs/amazon-eks-ami/nodeadm/internal/api" "github.com/awslabs/amazon-eks-ami/nodeadm/internal/util" + "github.com/pelletier/go-toml/v2" "go.uber.org/zap" ) const ContainerRuntimeEndpoint = "unix:///run/containerd/containerd.sock" const ( - containerdConfigFile = "/etc/containerd/config.toml" - containerdConfigImportDir = "/etc/containerd/config.d" - containerdConfigPerm = 0644 + containerdConfigFile = "/etc/containerd/config.toml" + containerdConfigPerm = 0644 ) var ( @@ -30,21 +29,30 @@ type containerdTemplateVars struct { } func writeContainerdConfig(cfg *api.NodeConfig) error { - // write nodeadm's generated containerd config to the default path - containerdConfig, err := generateContainerdConfig(cfg) - if err != nil { + if err := writeBaseRuntimeSpec(cfg); err != nil { return err } - zap.L().Info("Writing containerd config to file..", zap.String("path", containerdConfigFile)) - if err := util.WriteFileWithDir(containerdConfigFile, containerdConfig, containerdConfigPerm); err != nil { + + containerdConfig, err := generateContainerdConfig(cfg) + if err != nil { return err } + // because the logic in containerd's import merge decides to completely + // overwrite entire sections, we want to implement this merging ourselves. + // see: https://github.com/containerd/containerd/blob/a91b05d99ceac46329be06eb43f7ae10b89aad45/cmd/containerd/server/config/config.go#L407-L431 if len(cfg.Spec.Containerd.Config) > 0 { - containerConfigImportPath := filepath.Join(containerdConfigImportDir, "00-nodeadm.toml") - zap.L().Info("Writing user containerd config to drop-in file..", zap.String("path", containerConfigImportPath)) - return util.WriteFileWithDir(containerConfigImportPath, []byte(cfg.Spec.Containerd.Config), containerdConfigPerm) + containerdConfigMap, err := util.Merge(containerdConfig, []byte(cfg.Spec.Containerd.Config), toml.Marshal, toml.Unmarshal) + if err != nil { + return err + } + containerdConfig, err = toml.Marshal(containerdConfigMap) + if err != nil { + return err + } } - return nil + + zap.L().Info("Writing containerd config to file..", zap.String("path", containerdConfigFile)) + return util.WriteFileWithDir(containerdConfigFile, containerdConfig, containerdConfigPerm) } func generateContainerdConfig(cfg *api.NodeConfig) ([]byte, error) { diff --git a/nodeadm/internal/containerd/config.template.toml b/nodeadm/internal/containerd/config.template.toml index 273cebdf5..47b66b0fa 100644 --- a/nodeadm/internal/containerd/config.template.toml +++ b/nodeadm/internal/containerd/config.template.toml @@ -1,10 +1,6 @@ version = 2 root = "/var/lib/containerd" state = "/run/containerd" -# Users can use the following import directory to add additional -# configuration to containerd. The imports do not behave exactly like overrides. -# see: https://github.com/containerd/containerd/blob/main/docs/man/containerd-config.toml.5.md#format -imports = ["/etc/containerd/config.d/*.toml"] [grpc] address = "/run/containerd/containerd.sock" @@ -21,6 +17,7 @@ config_path = "/etc/containerd/certs.d:/etc/docker/certs.d" [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] runtime_type = "io.containerd.runc.v2" +base_runtime_spec = "/etc/containerd/base-runtime-spec.json" [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] SystemdCgroup = true diff --git a/nodeadm/internal/kubelet/config.go b/nodeadm/internal/kubelet/config.go index 15c041b3b..6fce79f28 100644 --- a/nodeadm/internal/kubelet/config.go +++ b/nodeadm/internal/kubelet/config.go @@ -13,8 +13,6 @@ import ( "strings" "time" - "dario.cat/mergo" - "go.uber.org/zap" "golang.org/x/mod/semver" @@ -191,11 +189,6 @@ func (ksc *kubeletConfig) withOutpostSetup(cfg *api.NodeConfig) error { ipHostMappings = append(ipHostMappings, fmt.Sprintf("%s\t%s", ip, apiUrl.Host)) } output := strings.Join(ipHostMappings, "\n") + "\n" - - if err != nil { - return err - } - // append to /etc/hosts file with shuffled mappings of "IP address to API server domain name" f, err := os.OpenFile("/etc/hosts", os.O_APPEND|os.O_WRONLY, kubeletConfigPerm) if err != nil { @@ -338,7 +331,7 @@ func (k *kubelet) writeKubeletConfigToFile(cfg *api.NodeConfig) error { var kubeletConfigBytes []byte if cfg.Spec.Kubelet.Config != nil && len(cfg.Spec.Kubelet.Config) > 0 { - mergedMap, err := util.DocumentMerge(kubeletConfig, cfg.Spec.Kubelet.Config, mergo.WithOverride) + mergedMap, err := util.Merge(kubeletConfig, cfg.Spec.Kubelet.Config, json.Marshal, json.Unmarshal) if err != nil { return err } @@ -392,16 +385,15 @@ func (k *kubelet) writeKubeletConfigToDir(cfg *api.NodeConfig) error { // merge in default type metadata like kind and apiVersion in case the // user has not specified this, as it is required to qualify a drop-in // config as a valid KubeletConfiguration - userKubeletConfigMap, err := util.DocumentMerge(defaultKubeletSubConfig().TypeMeta, cfg.Spec.Kubelet.Config) + userKubeletConfigMap, err := util.Merge(defaultKubeletSubConfig().TypeMeta, cfg.Spec.Kubelet.Config, json.Marshal, json.Unmarshal) if err != nil { return err } - - zap.L().Info("Writing user kubelet config to drop-in file..", zap.String("path", filePath)) userKubeletConfigBytes, err := json.MarshalIndent(userKubeletConfigMap, "", strings.Repeat(" ", 4)) if err != nil { return err } + zap.L().Info("Writing user kubelet config to drop-in file..", zap.String("path", filePath)) if err := util.WriteFileWithDir(filePath, userKubeletConfigBytes, kubeletConfigPerm); err != nil { return err } diff --git a/nodeadm/internal/kubelet/eni-max-pods.txt b/nodeadm/internal/kubelet/eni-max-pods.txt index eca414165..0d318ac6a 100644 --- a/nodeadm/internal/kubelet/eni-max-pods.txt +++ b/nodeadm/internal/kubelet/eni-max-pods.txt @@ -161,11 +161,11 @@ c6in.12xlarge 234 c6in.16xlarge 737 c6in.24xlarge 737 c6in.2xlarge 58 -c6in.32xlarge 345 +c6in.32xlarge 394 c6in.4xlarge 234 c6in.8xlarge 234 c6in.large 29 -c6in.metal 345 +c6in.metal 394 c6in.xlarge 58 c7a.12xlarge 234 c7a.16xlarge 737 @@ -206,6 +206,11 @@ c7gn.large 29 c7gn.medium 8 c7gn.metal 737 c7gn.xlarge 58 +c7i-flex.2xlarge 58 +c7i-flex.4xlarge 234 +c7i-flex.8xlarge 234 +c7i-flex.large 29 +c7i-flex.xlarge 58 c7i.12xlarge 234 c7i.16xlarge 737 c7i.24xlarge 737 @@ -475,21 +480,21 @@ m6idn.12xlarge 234 m6idn.16xlarge 737 m6idn.24xlarge 737 m6idn.2xlarge 58 -m6idn.32xlarge 345 +m6idn.32xlarge 394 m6idn.4xlarge 234 m6idn.8xlarge 234 m6idn.large 29 -m6idn.metal 345 +m6idn.metal 394 m6idn.xlarge 58 m6in.12xlarge 234 m6in.16xlarge 737 m6in.24xlarge 737 m6in.2xlarge 58 -m6in.32xlarge 345 +m6in.32xlarge 394 m6in.4xlarge 234 m6in.8xlarge 234 m6in.large 29 -m6in.metal 345 +m6in.metal 394 m6in.xlarge 58 m7a.12xlarge 234 m7a.16xlarge 737 @@ -538,6 +543,7 @@ m7i.metal-24xl 737 m7i.metal-48xl 737 m7i.xlarge 58 mac1.metal 234 +mac2-m1ultra.metal 234 mac2-m2.metal 234 mac2-m2pro.metal 234 mac2.metal 234 @@ -676,21 +682,21 @@ r6idn.12xlarge 234 r6idn.16xlarge 737 r6idn.24xlarge 737 r6idn.2xlarge 58 -r6idn.32xlarge 345 +r6idn.32xlarge 394 r6idn.4xlarge 234 r6idn.8xlarge 234 r6idn.large 29 -r6idn.metal 345 +r6idn.metal 394 r6idn.xlarge 58 r6in.12xlarge 234 r6in.16xlarge 737 r6in.24xlarge 737 r6in.2xlarge 58 -r6in.32xlarge 345 +r6in.32xlarge 394 r6in.4xlarge 234 r6in.8xlarge 234 r6in.large 29 -r6in.metal 345 +r6in.metal 394 r6in.xlarge 58 r7a.12xlarge 234 r7a.16xlarge 737 @@ -787,6 +793,10 @@ u-6tb1.56xlarge 737 u-6tb1.metal 147 u-9tb1.112xlarge 737 u-9tb1.metal 147 +u7i-12tb.224xlarge 737 +u7in-16tb.224xlarge 394 +u7in-24tb.224xlarge 394 +u7in-32tb.224xlarge 394 vt1.24xlarge 737 vt1.3xlarge 58 vt1.6xlarge 234 diff --git a/nodeadm/internal/util/merge.go b/nodeadm/internal/util/merge.go index f211be0ed..ad352260c 100644 --- a/nodeadm/internal/util/merge.go +++ b/nodeadm/internal/util/merge.go @@ -1,31 +1,54 @@ package util import ( - "encoding/json" + "fmt" + "reflect" "dario.cat/mergo" ) -// DocumentMerge merges two arguments using their marshalled json -// representations and returns the resulting data in a `map[string]interface{}` -func DocumentMerge(a, b any, opts ...func(*mergo.Config)) (map[string]interface{}, error) { - var aMap, bMap map[string]interface{} - aBytes, err := json.Marshal(a) - if err != nil { - return nil, err +// Merge is a wrapper around the "Merge" from dario.cat/mergo which +// automatically handles repeated conversions between raw representations of +// data and nested key-value objects +// +// dst and src can either both be of type []byte, or both will be marshalled +// into a binary representation using the provided marshaller func. +func Merge( + dst, src any, + marshaller func(v any) ([]byte, error), + unmarshaller func(data []byte, v any) error, + opts ...func(*mergo.Config), +) (map[string]interface{}, error) { + var ( + dstBytes, srcBytes []byte + dstMap, srcMap map[string]interface{} + err error + ) + if reflect.TypeOf(dst) == reflect.TypeOf([]byte{}) && reflect.TypeOf(src) == reflect.TypeOf([]byte{}) { + dstBytes = reflect.ValueOf(dst).Bytes() + srcBytes = reflect.ValueOf(src).Bytes() + } else { + if marshaller == nil { + return nil, fmt.Errorf("marshaller expected.") + } + if dstBytes, err = marshaller(dst); err != nil { + return nil, err + } + if srcBytes, err = marshaller(src); err != nil { + return nil, err + } } - bBytes, err := json.Marshal(b) - if err != nil { + if err := unmarshaller(dstBytes, &dstMap); err != nil { return nil, err } - if err := json.Unmarshal(aBytes, &aMap); err != nil { + if err := unmarshaller(srcBytes, &srcMap); err != nil { return nil, err } - if err := json.Unmarshal(bBytes, &bMap); err != nil { - return nil, err + if len(opts) == 0 { + opts = append(opts, mergo.WithOverride) } - if err := mergo.Merge(&aMap, &bMap, opts...); err != nil { + if err := mergo.Merge(&dstMap, &srcMap, opts...); err != nil { return nil, err } - return aMap, nil + return dstMap, nil } diff --git a/nodeadm/test/e2e/cases/containerd-base-runtime-spec/config.yaml b/nodeadm/test/e2e/cases/containerd-base-runtime-spec/config.yaml new file mode 100644 index 000000000..e2fa63f0b --- /dev/null +++ b/nodeadm/test/e2e/cases/containerd-base-runtime-spec/config.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: node.eks.aws/v1alpha1 +kind: NodeConfig +spec: + cluster: + name: my-cluster + apiServerEndpoint: https://example.com + certificateAuthority: Y2VydGlmaWNhdGVBdXRob3JpdHk= + cidr: 10.100.0.0/16 + containerd: + baseRuntimeSpec: + foo: bar + process: + rlimits: + - type: RLIMIT_NOFILE + soft: 12345 + hard: 12345 \ No newline at end of file diff --git a/nodeadm/test/e2e/cases/containerd-base-runtime-spec/expected-base-runtime-spec.json b/nodeadm/test/e2e/cases/containerd-base-runtime-spec/expected-base-runtime-spec.json new file mode 100644 index 000000000..19f5aeaa5 --- /dev/null +++ b/nodeadm/test/e2e/cases/containerd-base-runtime-spec/expected-base-runtime-spec.json @@ -0,0 +1,175 @@ +{ + "foo": "bar", + "linux": { + "maskedPaths": [ + "/proc/acpi", + "/proc/asound", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/sched_debug", + "/proc/scsi", + "/proc/timer_list", + "/proc/timer_stats", + "/sys/firmware" + ], + "namespaces": [ + { + "type": "ipc" + }, + { + "type": "mount" + }, + { + "type": "network" + }, + { + "type": "pid" + }, + { + "type": "uts" + } + ], + "readonlyPaths": [ + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" + ], + "resources": { + "devices": [ + { + "access": "rwm", + "allow": false + } + ] + } + }, + "mounts": [ + { + "destination": "/dev", + "options": [ + "nosuid", + "strictatime", + "mode=755", + "size=65536k" + ], + "source": "tmpfs", + "type": "tmpfs" + }, + { + "destination": "/dev/mqueue", + "options": [ + "nosuid", + "noexec", + "nodev" + ], + "source": "mqueue", + "type": "mqueue" + }, + { + "destination": "/dev/pts", + "options": [ + "nosuid", + "noexec", + "newinstance", + "ptmxmode=0666", + "mode=0620", + "gid=5" + ], + "source": "devpts", + "type": "devpts" + }, + { + "destination": "/proc", + "options": [ + "nosuid", + "noexec", + "nodev" + ], + "source": "proc", + "type": "proc" + }, + { + "destination": "/sys", + "options": [ + "nosuid", + "noexec", + "nodev", + "ro" + ], + "source": "sysfs", + "type": "sysfs" + } + ], + "ociVersion": "1.1.0", + "process": { + "capabilities": { + "bounding": [ + "CAP_AUDIT_WRITE", + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_MKNOD", + "CAP_NET_BIND_SERVICE", + "CAP_NET_RAW", + "CAP_SETFCAP", + "CAP_SETGID", + "CAP_SETPCAP", + "CAP_SETUID", + "CAP_SYS_CHROOT" + ], + "effective": [ + "CAP_AUDIT_WRITE", + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_MKNOD", + "CAP_NET_BIND_SERVICE", + "CAP_NET_RAW", + "CAP_SETFCAP", + "CAP_SETGID", + "CAP_SETPCAP", + "CAP_SETUID", + "CAP_SYS_CHROOT" + ], + "permitted": [ + "CAP_AUDIT_WRITE", + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_MKNOD", + "CAP_NET_BIND_SERVICE", + "CAP_NET_RAW", + "CAP_SETFCAP", + "CAP_SETGID", + "CAP_SETPCAP", + "CAP_SETUID", + "CAP_SYS_CHROOT" + ] + }, + "cwd": "/", + "noNewPrivileges": true, + "rlimits": [ + { + "hard": 12345, + "soft": 12345, + "type": "RLIMIT_NOFILE" + } + ], + "user": { + "gid": 0, + "uid": 0 + } + }, + "root": { + "path": "rootfs" + } +} \ No newline at end of file diff --git a/nodeadm/test/e2e/cases/containerd-base-runtime-spec/run.sh b/nodeadm/test/e2e/cases/containerd-base-runtime-spec/run.sh new file mode 100755 index 000000000..7bc1b4628 --- /dev/null +++ b/nodeadm/test/e2e/cases/containerd-base-runtime-spec/run.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +source /helpers.sh + +mock::aws +mock::kubelet 1.27.0 +wait::dbus-ready + +nodeadm init --skip run --config-source file://config.yaml + +assert::files-equal /etc/containerd/base-runtime-spec.json expected-base-runtime-spec.json diff --git a/nodeadm/test/e2e/cases/containerd-config/config.yaml b/nodeadm/test/e2e/cases/containerd-config/config.yaml index 188a97aaf..fc1cd01c4 100644 --- a/nodeadm/test/e2e/cases/containerd-config/config.yaml +++ b/nodeadm/test/e2e/cases/containerd-config/config.yaml @@ -12,4 +12,7 @@ spec: version = 2 [grpc] - address = "/run/containerd/containerd.sock" + address = "/run/foo/foo.sock" + + [plugins."io.containerd.grpc.v1.cri".containerd] + discard_unpacked_layers = false diff --git a/nodeadm/test/e2e/cases/containerd-config/expected-containerd-config.toml b/nodeadm/test/e2e/cases/containerd-config/expected-containerd-config.toml index 746a9732f..5e0d18c80 100644 --- a/nodeadm/test/e2e/cases/containerd-config/expected-containerd-config.toml +++ b/nodeadm/test/e2e/cases/containerd-config/expected-containerd-config.toml @@ -1,30 +1,29 @@ +root = '/var/lib/containerd' +state = '/run/containerd' version = 2 -root = "/var/lib/containerd" -state = "/run/containerd" -# Users can use the following import directory to add additional -# configuration to containerd. The imports do not behave exactly like overrides. -# see: https://github.com/containerd/containerd/blob/main/docs/man/containerd-config.toml.5.md#format -imports = ["/etc/containerd/config.d/*.toml"] [grpc] -address = "/run/containerd/containerd.sock" +address = '/run/foo/foo.sock' -[plugins."io.containerd.grpc.v1.cri".containerd] -default_runtime_name = "runc" -discard_unpacked_layers = true +[plugins] +[plugins.'io.containerd.grpc.v1.cri'] +sandbox_image = '602401143452.dkr.ecr.us-west-2.amazonaws.com/eks/pause:3.5' -[plugins."io.containerd.grpc.v1.cri"] -sandbox_image = "602401143452.dkr.ecr.us-west-2.amazonaws.com/eks/pause:3.5" +[plugins.'io.containerd.grpc.v1.cri'.cni] +bin_dir = '/opt/cni/bin' +conf_dir = '/etc/cni/net.d' -[plugins."io.containerd.grpc.v1.cri".registry] -config_path = "/etc/containerd/certs.d:/etc/docker/certs.d" +[plugins.'io.containerd.grpc.v1.cri'.containerd] +default_runtime_name = 'runc' +discard_unpacked_layers = false -[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] -runtime_type = "io.containerd.runc.v2" +[plugins.'io.containerd.grpc.v1.cri'.containerd.runtimes] +[plugins.'io.containerd.grpc.v1.cri'.containerd.runtimes.runc] +base_runtime_spec = '/etc/containerd/base-runtime-spec.json' +runtime_type = 'io.containerd.runc.v2' -[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] +[plugins.'io.containerd.grpc.v1.cri'.containerd.runtimes.runc.options] SystemdCgroup = true -[plugins."io.containerd.grpc.v1.cri".cni] -bin_dir = "/opt/cni/bin" -conf_dir = "/etc/cni/net.d" +[plugins.'io.containerd.grpc.v1.cri'.registry] +config_path = '/etc/containerd/certs.d:/etc/docker/certs.d' diff --git a/nodeadm/test/e2e/cases/containerd-config/expected-user-containerd-config.toml b/nodeadm/test/e2e/cases/containerd-config/expected-user-containerd-config.toml deleted file mode 100644 index ab21bb149..000000000 --- a/nodeadm/test/e2e/cases/containerd-config/expected-user-containerd-config.toml +++ /dev/null @@ -1,4 +0,0 @@ -version = 2 - -[grpc] -address = "/run/containerd/containerd.sock" diff --git a/nodeadm/test/e2e/cases/containerd-config/run.sh b/nodeadm/test/e2e/cases/containerd-config/run.sh index acf902a2e..315cba2f8 100755 --- a/nodeadm/test/e2e/cases/containerd-config/run.sh +++ b/nodeadm/test/e2e/cases/containerd-config/run.sh @@ -13,4 +13,3 @@ wait::dbus-ready nodeadm init --skip run --config-source file://config.yaml assert::files-equal /etc/containerd/config.toml expected-containerd-config.toml -assert::files-equal /etc/containerd/config.d/00-nodeadm.toml expected-user-containerd-config.toml diff --git a/nodeadm/test/e2e/cases/kubelet-config-set-empty/run.sh b/nodeadm/test/e2e/cases/kubelet-config-set-empty/run.sh index 1211ad758..fe73d4349 100755 --- a/nodeadm/test/e2e/cases/kubelet-config-set-empty/run.sh +++ b/nodeadm/test/e2e/cases/kubelet-config-set-empty/run.sh @@ -19,4 +19,4 @@ wait::dbus-ready nodeadm init --skip run --config-source file://config.yaml assert::json-files-equal /etc/kubernetes/kubelet/config.json expected-kubelet-config.json # default the name strategy should be EC2PrivateName, use this pattern to assert -assert::file-contains /etc/eks/kubelet/environment '--hostname-override=ip.*ec2.internal ' +assert::file-contains /etc/eks/kubelet/environment '--hostname-override=ip.*ec2.internal' diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/.dockerignore b/nodeadm/vendor/github.com/pelletier/go-toml/v2/.dockerignore new file mode 100644 index 000000000..7b5883475 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/.dockerignore @@ -0,0 +1,2 @@ +cmd/tomll/tomll +cmd/tomljson/tomljson diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/.gitattributes b/nodeadm/vendor/github.com/pelletier/go-toml/v2/.gitattributes new file mode 100644 index 000000000..34a0a21a3 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/.gitattributes @@ -0,0 +1,4 @@ +* text=auto + +benchmark/benchmark.toml text eol=lf +testdata/** text eol=lf diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/.gitignore b/nodeadm/vendor/github.com/pelletier/go-toml/v2/.gitignore new file mode 100644 index 000000000..4b7c4eda3 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/.gitignore @@ -0,0 +1,7 @@ +test_program/test_program_bin +fuzz/ +cmd/tomll/tomll +cmd/tomljson/tomljson +cmd/tomltestgen/tomltestgen +dist +tests/ diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/.golangci.toml b/nodeadm/vendor/github.com/pelletier/go-toml/v2/.golangci.toml new file mode 100644 index 000000000..067db5517 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/.golangci.toml @@ -0,0 +1,84 @@ +[service] +golangci-lint-version = "1.39.0" + +[linters-settings.wsl] +allow-assign-and-anything = true + +[linters-settings.exhaustive] +default-signifies-exhaustive = true + +[linters] +disable-all = true +enable = [ + "asciicheck", + "bodyclose", + "cyclop", + "deadcode", + "depguard", + "dogsled", + "dupl", + "durationcheck", + "errcheck", + "errorlint", + "exhaustive", + # "exhaustivestruct", + "exportloopref", + "forbidigo", + # "forcetypeassert", + "funlen", + "gci", + # "gochecknoglobals", + "gochecknoinits", + "gocognit", + "goconst", + "gocritic", + "gocyclo", + "godot", + "godox", + # "goerr113", + "gofmt", + "gofumpt", + "goheader", + "goimports", + "golint", + "gomnd", + # "gomoddirectives", + "gomodguard", + "goprintffuncname", + "gosec", + "gosimple", + "govet", + # "ifshort", + "importas", + "ineffassign", + "lll", + "makezero", + "misspell", + "nakedret", + "nestif", + "nilerr", + # "nlreturn", + "noctx", + "nolintlint", + #"paralleltest", + "prealloc", + "predeclared", + "revive", + "rowserrcheck", + "sqlclosecheck", + "staticcheck", + "structcheck", + "stylecheck", + # "testpackage", + "thelper", + "tparallel", + "typecheck", + "unconvert", + "unparam", + "unused", + "varcheck", + "wastedassign", + "whitespace", + # "wrapcheck", + # "wsl" +] diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml b/nodeadm/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml new file mode 100644 index 000000000..1d8b69e65 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/.goreleaser.yaml @@ -0,0 +1,126 @@ +before: + hooks: + - go mod tidy + - go fmt ./... + - go test ./... +builds: + - id: tomll + main: ./cmd/tomll + binary: tomll + env: + - CGO_ENABLED=0 + flags: + - -trimpath + ldflags: + - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} + mod_timestamp: '{{ .CommitTimestamp }}' + targets: + - linux_amd64 + - linux_arm64 + - linux_arm + - linux_riscv64 + - windows_amd64 + - windows_arm64 + - windows_arm + - darwin_amd64 + - darwin_arm64 + - id: tomljson + main: ./cmd/tomljson + binary: tomljson + env: + - CGO_ENABLED=0 + flags: + - -trimpath + ldflags: + - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} + mod_timestamp: '{{ .CommitTimestamp }}' + targets: + - linux_amd64 + - linux_arm64 + - linux_arm + - linux_riscv64 + - windows_amd64 + - windows_arm64 + - windows_arm + - darwin_amd64 + - darwin_arm64 + - id: jsontoml + main: ./cmd/jsontoml + binary: jsontoml + env: + - CGO_ENABLED=0 + flags: + - -trimpath + ldflags: + - -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} + mod_timestamp: '{{ .CommitTimestamp }}' + targets: + - linux_amd64 + - linux_arm64 + - linux_riscv64 + - linux_arm + - windows_amd64 + - windows_arm64 + - windows_arm + - darwin_amd64 + - darwin_arm64 +universal_binaries: + - id: tomll + replace: true + name_template: tomll + - id: tomljson + replace: true + name_template: tomljson + - id: jsontoml + replace: true + name_template: jsontoml +archives: +- id: jsontoml + format: tar.xz + builds: + - jsontoml + files: + - none* + name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" +- id: tomljson + format: tar.xz + builds: + - tomljson + files: + - none* + name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" +- id: tomll + format: tar.xz + builds: + - tomll + files: + - none* + name_template: "{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}" +dockers: + - id: tools + goos: linux + goarch: amd64 + ids: + - jsontoml + - tomljson + - tomll + image_templates: + - "ghcr.io/pelletier/go-toml:latest" + - "ghcr.io/pelletier/go-toml:{{ .Tag }}" + - "ghcr.io/pelletier/go-toml:v{{ .Major }}" + skip_push: false +checksum: + name_template: 'sha256sums.txt' +snapshot: + name_template: "{{ incpatch .Version }}-next" +release: + github: + owner: pelletier + name: go-toml + draft: true + prerelease: auto + mode: replace +changelog: + use: github-native +announce: + skip: true diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md b/nodeadm/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md new file mode 100644 index 000000000..96ecf9e2b --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/CONTRIBUTING.md @@ -0,0 +1,193 @@ +# Contributing + +Thank you for your interest in go-toml! We appreciate you considering +contributing to go-toml! + +The main goal is the project is to provide an easy-to-use and efficient TOML +implementation for Go that gets the job done and gets out of your way – dealing +with TOML is probably not the central piece of your project. + +As the single maintainer of go-toml, time is scarce. All help, big or small, is +more than welcomed! + +## Ask questions + +Any question you may have, somebody else might have it too. Always feel free to +ask them on the [discussion board][discussions]. We will try to answer them as +clearly and quickly as possible, time permitting. + +Asking questions also helps us identify areas where the documentation needs +improvement, or new features that weren't envisioned before. Sometimes, a +seemingly innocent question leads to the fix of a bug. Don't hesitate and ask +away! + +[discussions]: https://github.com/pelletier/go-toml/discussions + +## Improve the documentation + +The best way to share your knowledge and experience with go-toml is to improve +the documentation. Fix a typo, clarify an interface, add an example, anything +goes! + +The documentation is present in the [README][readme] and thorough the source +code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a change +to the documentation, create a pull request with your proposed changes. For +simple changes like that, the easiest way to go is probably the "Fork this +project and edit the file" button on Github, displayed at the top right of the +file. Unless it's a trivial change (for example a typo), provide a little bit of +context in your pull request description or commit message. + +## Report a bug + +Found a bug! Sorry to hear that :(. Help us and other track them down and fix by +reporting it. [File a new bug report][bug-report] on the [issues +tracker][issues-tracker]. The template should provide enough guidance on what to +include. When in doubt: add more details! By reducing ambiguity and providing +more information, it decreases back and forth and saves everyone time. + +## Code changes + +Want to contribute a patch? Very happy to hear that! + +First, some high-level rules: + +- A short proposal with some POC code is better than a lengthy piece of text + with no code. Code speaks louder than words. That being said, bigger changes + should probably start with a [discussion][discussions]. +- No backward-incompatible patch will be accepted unless discussed. Sometimes + it's hard, but we try not to break people's programs unless we absolutely have + to. +- If you are writing a new feature or extending an existing one, make sure to + write some documentation. +- Bug fixes need to be accompanied with regression tests. +- New code needs to be tested. +- Your commit messages need to explain why the change is needed, even if already + included in the PR description. + +It does sound like a lot, but those best practices are here to save time overall +and continuously improve the quality of the project, which is something everyone +benefits from. + +### Get started + +The fairly standard code contribution process looks like that: + +1. [Fork the project][fork]. +2. Make your changes, commit on any branch you like. +3. [Open up a pull request][pull-request] +4. Review, potential ask for changes. +5. Merge. + +Feel free to ask for help! You can create draft pull requests to gather +some early feedback! + +### Run the tests + +You can run tests for go-toml using Go's test tool: `go test -race ./...`. + +During the pull request process, all tests will be ran on Linux, Windows, and +MacOS on the last two versions of Go. + +However, given GitHub's new policy to _not_ run Actions on pull requests until a +maintainer clicks on button, it is highly recommended that you run them locally +as you make changes. + +### Check coverage + +We use `go tool cover` to compute test coverage. Most code editors have a way to +run and display code coverage, but at the end of the day, we do this: + +``` +go test -covermode=atomic -coverprofile=coverage.out +go tool cover -func=coverage.out +``` + +and verify that the overall percentage of tested code does not go down. This is +a requirement. As a rule of thumb, all lines of code touched by your changes +should be covered. On Unix you can use `./ci.sh coverage -d v2` to check if your +code lowers the coverage. + +### Verify performance + +Go-toml aims to stay efficient. We rely on a set of scenarios executed with Go's +builtin benchmark systems. Because of their noisy nature, containers provided by +Github Actions cannot be reliably used for benchmarking. As a result, you are +responsible for checking that your changes do not incur a performance penalty. +You can run their following to execute benchmarks: + +``` +go test ./... -bench=. -count=10 +``` + +Benchmark results should be compared against each other with +[benchstat][benchstat]. Typical flow looks like this: + +1. On the `v2` branch, run `go test ./... -bench=. -count 10` and save output to + a file (for example `old.txt`). +2. Make some code changes. +3. Run `go test ....` again, and save the output to an other file (for example + `new.txt`). +4. Run `benchstat old.txt new.txt` to check that time/op does not go up in any + test. + +On Unix you can use `./ci.sh benchmark -d v2` to verify how your code impacts +performance. + +It is highly encouraged to add the benchstat results to your pull request +description. Pull requests that lower performance will receive more scrutiny. + +[benchstat]: https://pkg.go.dev/golang.org/x/perf/cmd/benchstat + +### Style + +Try to look around and follow the same format and structure as the rest of the +code. We enforce using `go fmt` on the whole code base. + +--- + +## Maintainers-only + +### Merge pull request + +Checklist: + +- Passing CI. +- Does not introduce backward-incompatible changes (unless discussed). +- Has relevant doc changes. +- Benchstat does not show performance regression. +- Pull request is [labeled appropriately][pr-labels]. +- Title will be understandable in the changelog. + +1. Merge using "squash and merge". +2. Make sure to edit the commit message to keep all the useful information + nice and clean. +3. Make sure the commit title is clear and contains the PR number (#123). + +### New release + +1. Decide on the next version number. Use semver. Review commits since last + version to assess. +2. Tag release. For example: +``` +git checkout v2 +git pull +git tag v2.2.0 +git push --tags +``` +3. CI automatically builds a draft Github release. Review it and edit as + necessary. Look for "Other changes". That would indicate a pull request not + labeled properly. Tweak labels and pull request titles until changelog looks + good for users. +4. Check "create discussion" box, in the "Releases" category. +5. If new version is an alpha or beta only, check pre-release box. + + +[issues-tracker]: https://github.com/pelletier/go-toml/issues +[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md +[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml +[readme]: ./README.md +[fork]: https://help.github.com/articles/fork-a-repo +[pull-request]: https://help.github.com/en/articles/creating-a-pull-request +[new-release]: https://github.com/pelletier/go-toml/releases/new +[gh]: https://github.com/cli/cli +[pr-labels]: https://github.com/pelletier/go-toml/blob/v2/.github/release.yml diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/Dockerfile b/nodeadm/vendor/github.com/pelletier/go-toml/v2/Dockerfile new file mode 100644 index 000000000..b9e933237 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/Dockerfile @@ -0,0 +1,5 @@ +FROM scratch +ENV PATH "$PATH:/bin" +COPY tomll /bin/tomll +COPY tomljson /bin/tomljson +COPY jsontoml /bin/jsontoml diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/LICENSE b/nodeadm/vendor/github.com/pelletier/go-toml/v2/LICENSE new file mode 100644 index 000000000..991e2ae96 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +go-toml v2 +Copyright (c) 2021 - 2023 Thomas Pelletier + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/README.md b/nodeadm/vendor/github.com/pelletier/go-toml/v2/README.md new file mode 100644 index 000000000..d964b25fe --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/README.md @@ -0,0 +1,576 @@ +# go-toml v2 + +Go library for the [TOML](https://toml.io/en/) format. + +This library supports [TOML v1.0.0](https://toml.io/en/v1.0.0). + +[🐞 Bug Reports](https://github.com/pelletier/go-toml/issues) + +[💬 Anything else](https://github.com/pelletier/go-toml/discussions) + +## Documentation + +Full API, examples, and implementation notes are available in the Go +documentation. + +[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml/v2.svg)](https://pkg.go.dev/github.com/pelletier/go-toml/v2) + +## Import + +```go +import "github.com/pelletier/go-toml/v2" +``` + +See [Modules](#Modules). + +## Features + +### Stdlib behavior + +As much as possible, this library is designed to behave similarly as the +standard library's `encoding/json`. + +### Performance + +While go-toml favors usability, it is written with performance in mind. Most +operations should not be shockingly slow. See [benchmarks](#benchmarks). + +### Strict mode + +`Decoder` can be set to "strict mode", which makes it error when some parts of +the TOML document was not present in the target structure. This is a great way +to check for typos. [See example in the documentation][strict]. + +[strict]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Decoder.DisallowUnknownFields + +### Contextualized errors + +When most decoding errors occur, go-toml returns [`DecodeError`][decode-err], +which contains a human readable contextualized version of the error. For +example: + +``` +1| [server] +2| path = 100 + | ~~~ cannot decode TOML integer into struct field toml_test.Server.Path of type string +3| port = 50 +``` + +[decode-err]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#DecodeError + +### Local date and time support + +TOML supports native [local date/times][ldt]. It allows to represent a given +date, time, or date-time without relation to a timezone or offset. To support +this use-case, go-toml provides [`LocalDate`][tld], [`LocalTime`][tlt], and +[`LocalDateTime`][tldt]. Those types can be transformed to and from `time.Time`, +making them convenient yet unambiguous structures for their respective TOML +representation. + +[ldt]: https://toml.io/en/v1.0.0#local-date-time +[tld]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDate +[tlt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalTime +[tldt]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#LocalDateTime + +### Commented config + +Since TOML is often used for configuration files, go-toml can emit documents +annotated with [comments and commented-out values][comments-example]. For +example, it can generate the following file: + +```toml +# Host IP to connect to. +host = '127.0.0.1' +# Port of the remote server. +port = 4242 + +# Encryption parameters (optional) +# [TLS] +# cipher = 'AEAD-AES128-GCM-SHA256' +# version = 'TLS 1.3' +``` + +[comments-example]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#example-Marshal-Commented + +## Getting started + +Given the following struct, let's see how to read it and write it as TOML: + +```go +type MyConfig struct { + Version int + Name string + Tags []string +} +``` + +### Unmarshaling + +[`Unmarshal`][unmarshal] reads a TOML document and fills a Go structure with its +content. For example: + +```go +doc := ` +version = 2 +name = "go-toml" +tags = ["go", "toml"] +` + +var cfg MyConfig +err := toml.Unmarshal([]byte(doc), &cfg) +if err != nil { + panic(err) +} +fmt.Println("version:", cfg.Version) +fmt.Println("name:", cfg.Name) +fmt.Println("tags:", cfg.Tags) + +// Output: +// version: 2 +// name: go-toml +// tags: [go toml] +``` + +[unmarshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Unmarshal + +### Marshaling + +[`Marshal`][marshal] is the opposite of Unmarshal: it represents a Go structure +as a TOML document: + +```go +cfg := MyConfig{ + Version: 2, + Name: "go-toml", + Tags: []string{"go", "toml"}, +} + +b, err := toml.Marshal(cfg) +if err != nil { + panic(err) +} +fmt.Println(string(b)) + +// Output: +// Version = 2 +// Name = 'go-toml' +// Tags = ['go', 'toml'] +``` + +[marshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Marshal + +## Unstable API + +This API does not yet follow the backward compatibility guarantees of this +library. They provide early access to features that may have rough edges or an +API subject to change. + +### Parser + +Parser is the unstable API that allows iterative parsing of a TOML document at +the AST level. See https://pkg.go.dev/github.com/pelletier/go-toml/v2/unstable. + +## Benchmarks + +Execution time speedup compared to other Go TOML libraries: + + + + + + + + + + + + + +
Benchmarkgo-toml v1BurntSushi/toml
Marshal/HugoFrontMatter-21.9x2.2x
Marshal/ReferenceFile/map-21.7x2.1x
Marshal/ReferenceFile/struct-22.2x3.0x
Unmarshal/HugoFrontMatter-22.9x2.7x
Unmarshal/ReferenceFile/map-22.6x2.7x
Unmarshal/ReferenceFile/struct-24.6x5.1x
+
See more +

The table above has the results of the most common use-cases. The table below +contains the results of all benchmarks, including unrealistic ones. It is +provided for completeness.

+ + + + + + + + + + + + + + + + + + +
Benchmarkgo-toml v1BurntSushi/toml
Marshal/SimpleDocument/map-21.8x2.7x
Marshal/SimpleDocument/struct-22.7x3.8x
Unmarshal/SimpleDocument/map-23.8x3.0x
Unmarshal/SimpleDocument/struct-25.6x4.1x
UnmarshalDataset/example-23.0x3.2x
UnmarshalDataset/code-22.3x2.9x
UnmarshalDataset/twitter-22.6x2.7x
UnmarshalDataset/citm_catalog-22.2x2.3x
UnmarshalDataset/canada-21.8x1.5x
UnmarshalDataset/config-24.1x2.9x
geomean2.7x2.8x
+

This table can be generated with ./ci.sh benchmark -a -html.

+
+ +## Modules + +go-toml uses Go's standard modules system. + +Installation instructions: + +- Go ≥ 1.16: Nothing to do. Use the import in your code. The `go` command deals + with it automatically. +- Go ≥ 1.13: `GO111MODULE=on go get github.com/pelletier/go-toml/v2`. + +In case of trouble: [Go Modules FAQ][mod-faq]. + +[mod-faq]: https://github.com/golang/go/wiki/Modules#why-does-installing-a-tool-via-go-get-fail-with-error-cannot-find-main-module + +## Tools + +Go-toml provides three handy command line tools: + + * `tomljson`: Reads a TOML file and outputs its JSON representation. + + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/tomljson@latest + $ tomljson --help + ``` + + * `jsontoml`: Reads a JSON file and outputs a TOML representation. + + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/jsontoml@latest + $ jsontoml --help + ``` + + * `tomll`: Lints and reformats a TOML file. + + ``` + $ go install github.com/pelletier/go-toml/v2/cmd/tomll@latest + $ tomll --help + ``` + +### Docker image + +Those tools are also available as a [Docker image][docker]. For example, to use +`tomljson`: + +``` +docker run -i ghcr.io/pelletier/go-toml:v2 tomljson < example.toml +``` + +Multiple versions are available on [ghcr.io][docker]. + +[docker]: https://github.com/pelletier/go-toml/pkgs/container/go-toml + +## Migrating from v1 + +This section describes the differences between v1 and v2, with some pointers on +how to get the original behavior when possible. + +### Decoding / Unmarshal + +#### Automatic field name guessing + +When unmarshaling to a struct, if a key in the TOML document does not exactly +match the name of a struct field or any of the `toml`-tagged field, v1 tries +multiple variations of the key ([code][v1-keys]). + +V2 instead does a case-insensitive matching, like `encoding/json`. + +This could impact you if you are relying on casing to differentiate two fields, +and one of them is a not using the `toml` struct tag. The recommended solution +is to be specific about tag names for those fields using the `toml` struct tag. + +[v1-keys]: https://github.com/pelletier/go-toml/blob/a2e52561804c6cd9392ebf0048ca64fe4af67a43/marshal.go#L775-L781 + +#### Ignore preexisting value in interface + +When decoding into a non-nil `interface{}`, go-toml v1 uses the type of the +element in the interface to decode the object. For example: + +```go +type inner struct { + B interface{} +} +type doc struct { + A interface{} +} + +d := doc{ + A: inner{ + B: "Before", + }, +} + +data := ` +[A] +B = "After" +` + +toml.Unmarshal([]byte(data), &d) +fmt.Printf("toml v1: %#v\n", d) + +// toml v1: main.doc{A:main.inner{B:"After"}} +``` + +In this case, field `A` is of type `interface{}`, containing a `inner` struct. +V1 sees that type and uses it when decoding the object. + +When decoding an object into an `interface{}`, V2 instead disregards whatever +value the `interface{}` may contain and replaces it with a +`map[string]interface{}`. With the same data structure as above, here is what +the result looks like: + +```go +toml.Unmarshal([]byte(data), &d) +fmt.Printf("toml v2: %#v\n", d) + +// toml v2: main.doc{A:map[string]interface {}{"B":"After"}} +``` + +This is to match `encoding/json`'s behavior. There is no way to make the v2 +decoder behave like v1. + +#### Values out of array bounds ignored + +When decoding into an array, v1 returns an error when the number of elements +contained in the doc is superior to the capacity of the array. For example: + +```go +type doc struct { + A [2]string +} +d := doc{} +err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d) +fmt.Println(err) + +// (1, 1): unmarshal: TOML array length (3) exceeds destination array length (2) +``` + +In the same situation, v2 ignores the last value: + +```go +err := toml.Unmarshal([]byte(`A = ["one", "two", "many"]`), &d) +fmt.Println("err:", err, "d:", d) +// err: d: {[one two]} +``` + +This is to match `encoding/json`'s behavior. There is no way to make the v2 +decoder behave like v1. + +#### Support for `toml.Unmarshaler` has been dropped + +This method was not widely used, poorly defined, and added a lot of complexity. +A similar effect can be achieved by implementing the `encoding.TextUnmarshaler` +interface and use strings. + +#### Support for `default` struct tag has been dropped + +This feature adds complexity and a poorly defined API for an effect that can be +accomplished outside of the library. + +It does not seem like other format parsers in Go support that feature (the +project referenced in the original ticket #202 has not been updated since 2017). +Given that go-toml v2 should not touch values not in the document, the same +effect can be achieved by pre-filling the struct with defaults (libraries like +[go-defaults][go-defaults] can help). Also, string representation is not well +defined for all types: it creates issues like #278. + +The recommended replacement is pre-filling the struct before unmarshaling. + +[go-defaults]: https://github.com/mcuadros/go-defaults + +#### `toml.Tree` replacement + +This structure was the initial attempt at providing a document model for +go-toml. It allows manipulating the structure of any document, encoding and +decoding from their TOML representation. While a more robust feature was +initially planned in go-toml v2, this has been ultimately [removed from +scope][nodoc] of this library, with no plan to add it back at the moment. The +closest equivalent at the moment would be to unmarshal into an `interface{}` and +use type assertions and/or reflection to manipulate the arbitrary +structure. However this would fall short of providing all of the TOML features +such as adding comments and be specific about whitespace. + + +#### `toml.Position` are not retrievable anymore + +The API for retrieving the position (line, column) of a specific TOML element do +not exist anymore. This was done to minimize the amount of concepts introduced +by the library (query path), and avoid the performance hit related to storing +positions in the absence of a document model, for a feature that seemed to have +little use. Errors however have gained more detailed position +information. Position retrieval seems better fitted for a document model, which +has been [removed from the scope][nodoc] of go-toml v2 at the moment. + +### Encoding / Marshal + +#### Default struct fields order + +V1 emits struct fields order alphabetically by default. V2 struct fields are +emitted in order they are defined. For example: + +```go +type S struct { + B string + A string +} + +data := S{ + B: "B", + A: "A", +} + +b, _ := tomlv1.Marshal(data) +fmt.Println("v1:\n" + string(b)) + +b, _ = tomlv2.Marshal(data) +fmt.Println("v2:\n" + string(b)) + +// Output: +// v1: +// A = "A" +// B = "B" + +// v2: +// B = 'B' +// A = 'A' +``` + +There is no way to make v2 encoder behave like v1. A workaround could be to +manually sort the fields alphabetically in the struct definition, or generate +struct types using `reflect.StructOf`. + +#### No indentation by default + +V1 automatically indents content of tables by default. V2 does not. However the +same behavior can be obtained using [`Encoder.SetIndentTables`][sit]. For example: + +```go +data := map[string]interface{}{ + "table": map[string]string{ + "key": "value", + }, +} + +b, _ := tomlv1.Marshal(data) +fmt.Println("v1:\n" + string(b)) + +b, _ = tomlv2.Marshal(data) +fmt.Println("v2:\n" + string(b)) + +buf := bytes.Buffer{} +enc := tomlv2.NewEncoder(&buf) +enc.SetIndentTables(true) +enc.Encode(data) +fmt.Println("v2 Encoder:\n" + string(buf.Bytes())) + +// Output: +// v1: +// +// [table] +// key = "value" +// +// v2: +// [table] +// key = 'value' +// +// +// v2 Encoder: +// [table] +// key = 'value' +``` + +[sit]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Encoder.SetIndentTables + +#### Keys and strings are single quoted + +V1 always uses double quotes (`"`) around strings and keys that cannot be +represented bare (unquoted). V2 uses single quotes instead by default (`'`), +unless a character cannot be represented, then falls back to double quotes. As a +result of this change, `Encoder.QuoteMapKeys` has been removed, as it is not +useful anymore. + +There is no way to make v2 encoder behave like v1. + +#### `TextMarshaler` emits as a string, not TOML + +Types that implement [`encoding.TextMarshaler`][tm] can emit arbitrary TOML in +v1. The encoder would append the result to the output directly. In v2 the result +is wrapped in a string. As a result, this interface cannot be implemented by the +root object. + +There is no way to make v2 encoder behave like v1. + +[tm]: https://golang.org/pkg/encoding/#TextMarshaler + +#### `Encoder.CompactComments` has been removed + +Emitting compact comments is now the default behavior of go-toml. This option +is not necessary anymore. + +#### Struct tags have been merged + +V1 used to provide multiple struct tags: `comment`, `commented`, `multiline`, +`toml`, and `omitempty`. To behave more like the standard library, v2 has merged +`toml`, `multiline`, `commented`, and `omitempty`. For example: + +```go +type doc struct { + // v1 + F string `toml:"field" multiline:"true" omitempty:"true" commented:"true"` + // v2 + F string `toml:"field,multiline,omitempty,commented"` +} +``` + +Has a result, the `Encoder.SetTag*` methods have been removed, as there is just +one tag now. + +#### `Encoder.ArraysWithOneElementPerLine` has been renamed + +The new name is `Encoder.SetArraysMultiline`. The behavior should be the same. + +#### `Encoder.Indentation` has been renamed + +The new name is `Encoder.SetIndentSymbol`. The behavior should be the same. + + +#### Embedded structs behave like stdlib + +V1 defaults to merging embedded struct fields into the embedding struct. This +behavior was unexpected because it does not follow the standard library. To +avoid breaking backward compatibility, the `Encoder.PromoteAnonymous` method was +added to make the encoder behave correctly. Given backward compatibility is not +a problem anymore, v2 does the right thing by default: it follows the behavior +of `encoding/json`. `Encoder.PromoteAnonymous` has been removed. + +[nodoc]: https://github.com/pelletier/go-toml/discussions/506#discussioncomment-1526038 + +### `query` + +go-toml v1 provided the [`go-toml/query`][query] package. It allowed to run +JSONPath-style queries on TOML files. This feature is not available in v2. For a +replacement, check out [dasel][dasel]. + +This package has been removed because it was essentially not supported anymore +(last commit May 2020), increased the complexity of the code base, and more +complete solutions exist out there. + +[query]: https://github.com/pelletier/go-toml/tree/f99d6bbca119636aeafcf351ee52b3d202782627/query +[dasel]: https://github.com/TomWright/dasel + +## Versioning + +Expect for parts explicitely marked otherwise, go-toml follows [Semantic +Versioning](https://semver.org). The supported version of +[TOML](https://github.com/toml-lang/toml) is indicated at the beginning of this +document. The last two major versions of Go are supported (see [Go Release +Policy](https://golang.org/doc/devel/release.html#policy)). + +## License + +The MIT License (MIT). Read [LICENSE](LICENSE). diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/SECURITY.md b/nodeadm/vendor/github.com/pelletier/go-toml/v2/SECURITY.md new file mode 100644 index 000000000..d4d554fda --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/SECURITY.md @@ -0,0 +1,16 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ---------- | ------------------ | +| Latest 2.x | :white_check_mark: | +| All 1.x | :x: | +| All 0.x | :x: | + +## Reporting a Vulnerability + +Email a vulnerability report to `security@pelletier.codes`. Make sure to include +as many details as possible to reproduce the vulnerability. This is a +side-project: I will try to get back to you as quickly as possible, time +permitting in my personal life. Providing a working patch helps very much! diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/ci.sh b/nodeadm/vendor/github.com/pelletier/go-toml/v2/ci.sh new file mode 100644 index 000000000..86217a9b0 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/ci.sh @@ -0,0 +1,284 @@ +#!/usr/bin/env bash + + +stderr() { + echo "$@" 1>&2 +} + +usage() { + b=$(basename "$0") + echo $b: ERROR: "$@" 1>&2 + + cat 1>&2 < coverage.out + go tool cover -func=coverage.out + echo "Coverage profile for ${branch}: ${dir}/coverage.out" >&2 + popd + + if [ "${branch}" != "HEAD" ]; then + git worktree remove --force "$dir" + fi +} + +coverage() { + case "$1" in + -d) + shift + target="${1?Need to provide a target branch argument}" + + output_dir="$(mktemp -d)" + target_out="${output_dir}/target.txt" + head_out="${output_dir}/head.txt" + + cover "${target}" > "${target_out}" + cover "HEAD" > "${head_out}" + + cat "${target_out}" + cat "${head_out}" + + echo "" + + target_pct="$(tail -n2 ${target_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%.*/\1/')" + head_pct="$(tail -n2 ${head_out} | head -n1 | sed -E 's/.*total.*\t([0-9.]+)%/\1/')" + echo "Results: ${target} ${target_pct}% HEAD ${head_pct}%" + + delta_pct=$(echo "$head_pct - $target_pct" | bc -l) + echo "Delta: ${delta_pct}" + + if [[ $delta_pct = \-* ]]; then + echo "Regression!"; + + target_diff="${output_dir}/target.diff.txt" + head_diff="${output_dir}/head.diff.txt" + cat "${target_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${target_diff}" + cat "${head_out}" | grep -E '^github.com/pelletier/go-toml' | tr -s "\t " | cut -f 2,3 | sort > "${head_diff}" + + diff --side-by-side --suppress-common-lines "${target_diff}" "${head_diff}" + return 1 + fi + return 0 + ;; + esac + + cover "${1-HEAD}" +} + +bench() { + branch="${1}" + out="${2}" + replace="${3}" + dir="$(mktemp -d)" + + stderr "Executing benchmark for ${branch} at ${dir}" + + if [ "${branch}" = "HEAD" ]; then + cp -r . "${dir}/" + else + git worktree add "$dir" "$branch" + fi + + pushd "$dir" + + if [ "${replace}" != "" ]; then + find ./benchmark/ -iname '*.go' -exec sed -i -E "s|github.com/pelletier/go-toml/v2|${replace}|g" {} \; + go get "${replace}" + fi + + export GOMAXPROCS=2 + go test '-bench=^Benchmark(Un)?[mM]arshal' -count=10 -run=Nothing ./... | tee "${out}" + popd + + if [ "${branch}" != "HEAD" ]; then + git worktree remove --force "$dir" + fi +} + +fmktemp() { + if mktemp --version &> /dev/null; then + # GNU + mktemp --suffix=-$1 + else + # BSD + mktemp -t $1 + fi +} + +benchstathtml() { +python3 - $1 <<'EOF' +import sys + +lines = [] +stop = False + +with open(sys.argv[1]) as f: + for line in f.readlines(): + line = line.strip() + if line == "": + stop = True + if not stop: + lines.append(line.split(',')) + +results = [] +for line in reversed(lines[2:]): + if len(line) < 8 or line[0] == "": + continue + v2 = float(line[1]) + results.append([ + line[0].replace("-32", ""), + "%.1fx" % (float(line[3])/v2), # v1 + "%.1fx" % (float(line[7])/v2), # bs + ]) +# move geomean to the end +results.append(results[0]) +del results[0] + + +def printtable(data): + print(""" + + + + + """) + + for r in data: + print(" ".format(*r)) + + print(""" +
Benchmarkgo-toml v1BurntSushi/toml
{}{}{}
""") + + +def match(x): + return "ReferenceFile" in x[0] or "HugoFrontMatter" in x[0] + +above = [x for x in results if match(x)] +below = [x for x in results if not match(x)] + +printtable(above) +print("
See more") +print("""

The table above has the results of the most common use-cases. The table below +contains the results of all benchmarks, including unrealistic ones. It is +provided for completeness.

""") +printtable(below) +print('

This table can be generated with ./ci.sh benchmark -a -html.

') +print("
") + +EOF +} + +benchmark() { + case "$1" in + -d) + shift + target="${1?Need to provide a target branch argument}" + + old=`fmktemp ${target}` + bench "${target}" "${old}" + + new=`fmktemp HEAD` + bench HEAD "${new}" + + benchstat "${old}" "${new}" + return 0 + ;; + -a) + shift + + v2stats=`fmktemp go-toml-v2` + bench HEAD "${v2stats}" "github.com/pelletier/go-toml/v2" + v1stats=`fmktemp go-toml-v1` + bench HEAD "${v1stats}" "github.com/pelletier/go-toml" + bsstats=`fmktemp bs-toml` + bench HEAD "${bsstats}" "github.com/BurntSushi/toml" + + cp "${v2stats}" go-toml-v2.txt + cp "${v1stats}" go-toml-v1.txt + cp "${bsstats}" bs-toml.txt + + if [ "$1" = "-html" ]; then + tmpcsv=`fmktemp csv` + benchstat -format csv go-toml-v2.txt go-toml-v1.txt bs-toml.txt > $tmpcsv + benchstathtml $tmpcsv + else + benchstat go-toml-v2.txt go-toml-v1.txt bs-toml.txt + fi + + rm -f go-toml-v2.txt go-toml-v1.txt bs-toml.txt + return $? + esac + + bench "${1-HEAD}" `mktemp` +} + +case "$1" in + coverage) shift; coverage $@;; + benchmark) shift; benchmark $@;; + *) usage "bad argument $1";; +esac diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/decode.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/decode.go new file mode 100644 index 000000000..f0ec3b170 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/decode.go @@ -0,0 +1,550 @@ +package toml + +import ( + "fmt" + "math" + "strconv" + "time" + + "github.com/pelletier/go-toml/v2/unstable" +) + +func parseInteger(b []byte) (int64, error) { + if len(b) > 2 && b[0] == '0' { + switch b[1] { + case 'x': + return parseIntHex(b) + case 'b': + return parseIntBin(b) + case 'o': + return parseIntOct(b) + default: + panic(fmt.Errorf("invalid base '%c', should have been checked by scanIntOrFloat", b[1])) + } + } + + return parseIntDec(b) +} + +func parseLocalDate(b []byte) (LocalDate, error) { + // full-date = date-fullyear "-" date-month "-" date-mday + // date-fullyear = 4DIGIT + // date-month = 2DIGIT ; 01-12 + // date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year + var date LocalDate + + if len(b) != 10 || b[4] != '-' || b[7] != '-' { + return date, unstable.NewParserError(b, "dates are expected to have the format YYYY-MM-DD") + } + + var err error + + date.Year, err = parseDecimalDigits(b[0:4]) + if err != nil { + return LocalDate{}, err + } + + date.Month, err = parseDecimalDigits(b[5:7]) + if err != nil { + return LocalDate{}, err + } + + date.Day, err = parseDecimalDigits(b[8:10]) + if err != nil { + return LocalDate{}, err + } + + if !isValidDate(date.Year, date.Month, date.Day) { + return LocalDate{}, unstable.NewParserError(b, "impossible date") + } + + return date, nil +} + +func parseDecimalDigits(b []byte) (int, error) { + v := 0 + + for i, c := range b { + if c < '0' || c > '9' { + return 0, unstable.NewParserError(b[i:i+1], "expected digit (0-9)") + } + v *= 10 + v += int(c - '0') + } + + return v, nil +} + +func parseDateTime(b []byte) (time.Time, error) { + // offset-date-time = full-date time-delim full-time + // full-time = partial-time time-offset + // time-offset = "Z" / time-numoffset + // time-numoffset = ( "+" / "-" ) time-hour ":" time-minute + + dt, b, err := parseLocalDateTime(b) + if err != nil { + return time.Time{}, err + } + + var zone *time.Location + + if len(b) == 0 { + // parser should have checked that when assigning the date time node + panic("date time should have a timezone") + } + + if b[0] == 'Z' || b[0] == 'z' { + b = b[1:] + zone = time.UTC + } else { + const dateTimeByteLen = 6 + if len(b) != dateTimeByteLen { + return time.Time{}, unstable.NewParserError(b, "invalid date-time timezone") + } + var direction int + switch b[0] { + case '-': + direction = -1 + case '+': + direction = +1 + default: + return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset character") + } + + if b[3] != ':' { + return time.Time{}, unstable.NewParserError(b[3:4], "expected a : separator") + } + + hours, err := parseDecimalDigits(b[1:3]) + if err != nil { + return time.Time{}, err + } + if hours > 23 { + return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset hours") + } + + minutes, err := parseDecimalDigits(b[4:6]) + if err != nil { + return time.Time{}, err + } + if minutes > 59 { + return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset minutes") + } + + seconds := direction * (hours*3600 + minutes*60) + if seconds == 0 { + zone = time.UTC + } else { + zone = time.FixedZone("", seconds) + } + b = b[dateTimeByteLen:] + } + + if len(b) > 0 { + return time.Time{}, unstable.NewParserError(b, "extra bytes at the end of the timezone") + } + + t := time.Date( + dt.Year, + time.Month(dt.Month), + dt.Day, + dt.Hour, + dt.Minute, + dt.Second, + dt.Nanosecond, + zone) + + return t, nil +} + +func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) { + var dt LocalDateTime + + const localDateTimeByteMinLen = 11 + if len(b) < localDateTimeByteMinLen { + return dt, nil, unstable.NewParserError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]") + } + + date, err := parseLocalDate(b[:10]) + if err != nil { + return dt, nil, err + } + dt.LocalDate = date + + sep := b[10] + if sep != 'T' && sep != ' ' && sep != 't' { + return dt, nil, unstable.NewParserError(b[10:11], "datetime separator is expected to be T or a space") + } + + t, rest, err := parseLocalTime(b[11:]) + if err != nil { + return dt, nil, err + } + dt.LocalTime = t + + return dt, rest, nil +} + +// parseLocalTime is a bit different because it also returns the remaining +// []byte that is didn't need. This is to allow parseDateTime to parse those +// remaining bytes as a timezone. +func parseLocalTime(b []byte) (LocalTime, []byte, error) { + var ( + nspow = [10]int{0, 1e8, 1e7, 1e6, 1e5, 1e4, 1e3, 1e2, 1e1, 1e0} + t LocalTime + ) + + // check if b matches to have expected format HH:MM:SS[.NNNNNN] + const localTimeByteLen = 8 + if len(b) < localTimeByteLen { + return t, nil, unstable.NewParserError(b, "times are expected to have the format HH:MM:SS[.NNNNNN]") + } + + var err error + + t.Hour, err = parseDecimalDigits(b[0:2]) + if err != nil { + return t, nil, err + } + + if t.Hour > 23 { + return t, nil, unstable.NewParserError(b[0:2], "hour cannot be greater 23") + } + if b[2] != ':' { + return t, nil, unstable.NewParserError(b[2:3], "expecting colon between hours and minutes") + } + + t.Minute, err = parseDecimalDigits(b[3:5]) + if err != nil { + return t, nil, err + } + if t.Minute > 59 { + return t, nil, unstable.NewParserError(b[3:5], "minutes cannot be greater 59") + } + if b[5] != ':' { + return t, nil, unstable.NewParserError(b[5:6], "expecting colon between minutes and seconds") + } + + t.Second, err = parseDecimalDigits(b[6:8]) + if err != nil { + return t, nil, err + } + + if t.Second > 60 { + return t, nil, unstable.NewParserError(b[6:8], "seconds cannot be greater 60") + } + + b = b[8:] + + if len(b) >= 1 && b[0] == '.' { + frac := 0 + precision := 0 + digits := 0 + + for i, c := range b[1:] { + if !isDigit(c) { + if i == 0 { + return t, nil, unstable.NewParserError(b[0:1], "need at least one digit after fraction point") + } + break + } + digits++ + + const maxFracPrecision = 9 + if i >= maxFracPrecision { + // go-toml allows decoding fractional seconds + // beyond the supported precision of 9 + // digits. It truncates the fractional component + // to the supported precision and ignores the + // remaining digits. + // + // https://github.com/pelletier/go-toml/discussions/707 + continue + } + + frac *= 10 + frac += int(c - '0') + precision++ + } + + if precision == 0 { + return t, nil, unstable.NewParserError(b[:1], "nanoseconds need at least one digit") + } + + t.Nanosecond = frac * nspow[precision] + t.Precision = precision + + return t, b[1+digits:], nil + } + return t, b, nil +} + +//nolint:cyclop +func parseFloat(b []byte) (float64, error) { + if len(b) == 4 && (b[0] == '+' || b[0] == '-') && b[1] == 'n' && b[2] == 'a' && b[3] == 'n' { + return math.NaN(), nil + } + + cleaned, err := checkAndRemoveUnderscoresFloats(b) + if err != nil { + return 0, err + } + + if cleaned[0] == '.' { + return 0, unstable.NewParserError(b, "float cannot start with a dot") + } + + if cleaned[len(cleaned)-1] == '.' { + return 0, unstable.NewParserError(b, "float cannot end with a dot") + } + + dotAlreadySeen := false + for i, c := range cleaned { + if c == '.' { + if dotAlreadySeen { + return 0, unstable.NewParserError(b[i:i+1], "float can have at most one decimal point") + } + if !isDigit(cleaned[i-1]) { + return 0, unstable.NewParserError(b[i-1:i+1], "float decimal point must be preceded by a digit") + } + if !isDigit(cleaned[i+1]) { + return 0, unstable.NewParserError(b[i:i+2], "float decimal point must be followed by a digit") + } + dotAlreadySeen = true + } + } + + start := 0 + if cleaned[0] == '+' || cleaned[0] == '-' { + start = 1 + } + if cleaned[start] == '0' && len(cleaned) > start+1 && isDigit(cleaned[start+1]) { + return 0, unstable.NewParserError(b, "float integer part cannot have leading zeroes") + } + + f, err := strconv.ParseFloat(string(cleaned), 64) + if err != nil { + return 0, unstable.NewParserError(b, "unable to parse float: %w", err) + } + + return f, nil +} + +func parseIntHex(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) + if err != nil { + return 0, err + } + + i, err := strconv.ParseInt(string(cleaned), 16, 64) + if err != nil { + return 0, unstable.NewParserError(b, "couldn't parse hexadecimal number: %w", err) + } + + return i, nil +} + +func parseIntOct(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) + if err != nil { + return 0, err + } + + i, err := strconv.ParseInt(string(cleaned), 8, 64) + if err != nil { + return 0, unstable.NewParserError(b, "couldn't parse octal number: %w", err) + } + + return i, nil +} + +func parseIntBin(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b[2:]) + if err != nil { + return 0, err + } + + i, err := strconv.ParseInt(string(cleaned), 2, 64) + if err != nil { + return 0, unstable.NewParserError(b, "couldn't parse binary number: %w", err) + } + + return i, nil +} + +func isSign(b byte) bool { + return b == '+' || b == '-' +} + +func parseIntDec(b []byte) (int64, error) { + cleaned, err := checkAndRemoveUnderscoresIntegers(b) + if err != nil { + return 0, err + } + + startIdx := 0 + + if isSign(cleaned[0]) { + startIdx++ + } + + if len(cleaned) > startIdx+1 && cleaned[startIdx] == '0' { + return 0, unstable.NewParserError(b, "leading zero not allowed on decimal number") + } + + i, err := strconv.ParseInt(string(cleaned), 10, 64) + if err != nil { + return 0, unstable.NewParserError(b, "couldn't parse decimal number: %w", err) + } + + return i, nil +} + +func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) { + start := 0 + if b[start] == '+' || b[start] == '-' { + start++ + } + + if len(b) == start { + return b, nil + } + + if b[start] == '_' { + return nil, unstable.NewParserError(b[start:start+1], "number cannot start with underscore") + } + + if b[len(b)-1] == '_' { + return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore") + } + + // fast path + i := 0 + for ; i < len(b); i++ { + if b[i] == '_' { + break + } + } + if i == len(b) { + return b, nil + } + + before := false + cleaned := make([]byte, i, len(b)) + copy(cleaned, b) + + for i++; i < len(b); i++ { + c := b[i] + if c == '_' { + if !before { + return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores") + } + before = false + } else { + before = true + cleaned = append(cleaned, c) + } + } + + return cleaned, nil +} + +func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) { + if b[0] == '_' { + return nil, unstable.NewParserError(b[0:1], "number cannot start with underscore") + } + + if b[len(b)-1] == '_' { + return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore") + } + + // fast path + i := 0 + for ; i < len(b); i++ { + if b[i] == '_' { + break + } + } + if i == len(b) { + return b, nil + } + + before := false + cleaned := make([]byte, 0, len(b)) + + for i := 0; i < len(b); i++ { + c := b[i] + + switch c { + case '_': + if !before { + return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores") + } + if i < len(b)-1 && (b[i+1] == 'e' || b[i+1] == 'E') { + return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore before exponent") + } + before = false + case '+', '-': + // signed exponents + cleaned = append(cleaned, c) + before = false + case 'e', 'E': + if i < len(b)-1 && b[i+1] == '_' { + return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after exponent") + } + cleaned = append(cleaned, c) + case '.': + if i < len(b)-1 && b[i+1] == '_' { + return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after decimal point") + } + if i > 0 && b[i-1] == '_' { + return nil, unstable.NewParserError(b[i-1:i], "cannot have underscore before decimal point") + } + cleaned = append(cleaned, c) + default: + before = true + cleaned = append(cleaned, c) + } + } + + return cleaned, nil +} + +// isValidDate checks if a provided date is a date that exists. +func isValidDate(year int, month int, day int) bool { + return month > 0 && month < 13 && day > 0 && day <= daysIn(month, year) +} + +// daysBefore[m] counts the number of days in a non-leap year +// before month m begins. There is an entry for m=12, counting +// the number of days before January of next year (365). +var daysBefore = [...]int32{ + 0, + 31, + 31 + 28, + 31 + 28 + 31, + 31 + 28 + 31 + 30, + 31 + 28 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30, + 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31, +} + +func daysIn(m int, year int) int { + if m == 2 && isLeap(year) { + return 29 + } + return int(daysBefore[m] - daysBefore[m-1]) +} + +func isLeap(year int) bool { + return year%4 == 0 && (year%100 != 0 || year%400 == 0) +} + +func isDigit(r byte) bool { + return r >= '0' && r <= '9' +} diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/doc.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/doc.go new file mode 100644 index 000000000..b7bc599bd --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/doc.go @@ -0,0 +1,2 @@ +// Package toml is a library to read and write TOML documents. +package toml diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/errors.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/errors.go new file mode 100644 index 000000000..309733f1f --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/errors.go @@ -0,0 +1,252 @@ +package toml + +import ( + "fmt" + "strconv" + "strings" + + "github.com/pelletier/go-toml/v2/internal/danger" + "github.com/pelletier/go-toml/v2/unstable" +) + +// DecodeError represents an error encountered during the parsing or decoding +// of a TOML document. +// +// In addition to the error message, it contains the position in the document +// where it happened, as well as a human-readable representation that shows +// where the error occurred in the document. +type DecodeError struct { + message string + line int + column int + key Key + + human string +} + +// StrictMissingError occurs in a TOML document that does not have a +// corresponding field in the target value. It contains all the missing fields +// in Errors. +// +// Emitted by Decoder when DisallowUnknownFields() was called. +type StrictMissingError struct { + // One error per field that could not be found. + Errors []DecodeError +} + +// Error returns the canonical string for this error. +func (s *StrictMissingError) Error() string { + return "strict mode: fields in the document are missing in the target struct" +} + +// String returns a human readable description of all errors. +func (s *StrictMissingError) String() string { + var buf strings.Builder + + for i, e := range s.Errors { + if i > 0 { + buf.WriteString("\n---\n") + } + + buf.WriteString(e.String()) + } + + return buf.String() +} + +type Key []string + +// Error returns the error message contained in the DecodeError. +func (e *DecodeError) Error() string { + return "toml: " + e.message +} + +// String returns the human-readable contextualized error. This string is multi-line. +func (e *DecodeError) String() string { + return e.human +} + +// Position returns the (line, column) pair indicating where the error +// occurred in the document. Positions are 1-indexed. +func (e *DecodeError) Position() (row int, column int) { + return e.line, e.column +} + +// Key that was being processed when the error occurred. The key is present only +// if this DecodeError is part of a StrictMissingError. +func (e *DecodeError) Key() Key { + return e.key +} + +// decodeErrorFromHighlight creates a DecodeError referencing a highlighted +// range of bytes from document. +// +// highlight needs to be a sub-slice of document, or this function panics. +// +// The function copies all bytes used in DecodeError, so that document and +// highlight can be freely deallocated. +// +//nolint:funlen +func wrapDecodeError(document []byte, de *unstable.ParserError) *DecodeError { + offset := danger.SubsliceOffset(document, de.Highlight) + + errMessage := de.Error() + errLine, errColumn := positionAtEnd(document[:offset]) + before, after := linesOfContext(document, de.Highlight, offset, 3) + + var buf strings.Builder + + maxLine := errLine + len(after) - 1 + lineColumnWidth := len(strconv.Itoa(maxLine)) + + // Write the lines of context strictly before the error. + for i := len(before) - 1; i > 0; i-- { + line := errLine - i + buf.WriteString(formatLineNumber(line, lineColumnWidth)) + buf.WriteString("|") + + if len(before[i]) > 0 { + buf.WriteString(" ") + buf.Write(before[i]) + } + + buf.WriteRune('\n') + } + + // Write the document line that contains the error. + + buf.WriteString(formatLineNumber(errLine, lineColumnWidth)) + buf.WriteString("| ") + + if len(before) > 0 { + buf.Write(before[0]) + } + + buf.Write(de.Highlight) + + if len(after) > 0 { + buf.Write(after[0]) + } + + buf.WriteRune('\n') + + // Write the line with the error message itself (so it does not have a line + // number). + + buf.WriteString(strings.Repeat(" ", lineColumnWidth)) + buf.WriteString("| ") + + if len(before) > 0 { + buf.WriteString(strings.Repeat(" ", len(before[0]))) + } + + buf.WriteString(strings.Repeat("~", len(de.Highlight))) + + if len(errMessage) > 0 { + buf.WriteString(" ") + buf.WriteString(errMessage) + } + + // Write the lines of context strictly after the error. + + for i := 1; i < len(after); i++ { + buf.WriteRune('\n') + line := errLine + i + buf.WriteString(formatLineNumber(line, lineColumnWidth)) + buf.WriteString("|") + + if len(after[i]) > 0 { + buf.WriteString(" ") + buf.Write(after[i]) + } + } + + return &DecodeError{ + message: errMessage, + line: errLine, + column: errColumn, + key: de.Key, + human: buf.String(), + } +} + +func formatLineNumber(line int, width int) string { + format := "%" + strconv.Itoa(width) + "d" + + return fmt.Sprintf(format, line) +} + +func linesOfContext(document []byte, highlight []byte, offset int, linesAround int) ([][]byte, [][]byte) { + return beforeLines(document, offset, linesAround), afterLines(document, highlight, offset, linesAround) +} + +func beforeLines(document []byte, offset int, linesAround int) [][]byte { + var beforeLines [][]byte + + // Walk the document backward from the highlight to find previous lines + // of context. + rest := document[:offset] +backward: + for o := len(rest) - 1; o >= 0 && len(beforeLines) <= linesAround && len(rest) > 0; { + switch { + case rest[o] == '\n': + // handle individual lines + beforeLines = append(beforeLines, rest[o+1:]) + rest = rest[:o] + o = len(rest) - 1 + case o == 0: + // add the first line only if it's non-empty + beforeLines = append(beforeLines, rest) + + break backward + default: + o-- + } + } + + return beforeLines +} + +func afterLines(document []byte, highlight []byte, offset int, linesAround int) [][]byte { + var afterLines [][]byte + + // Walk the document forward from the highlight to find the following + // lines of context. + rest := document[offset+len(highlight):] +forward: + for o := 0; o < len(rest) && len(afterLines) <= linesAround; { + switch { + case rest[o] == '\n': + // handle individual lines + afterLines = append(afterLines, rest[:o]) + rest = rest[o+1:] + o = 0 + + case o == len(rest)-1: + // add last line only if it's non-empty + afterLines = append(afterLines, rest) + + break forward + default: + o++ + } + } + + return afterLines +} + +func positionAtEnd(b []byte) (row int, column int) { + row = 1 + column = 1 + + for _, c := range b { + if c == '\n' { + row++ + column = 1 + } else { + column++ + } + } + + return +} diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go new file mode 100644 index 000000000..80f698db4 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go @@ -0,0 +1,42 @@ +package characters + +var invalidAsciiTable = [256]bool{ + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + // 0x09 TAB + // 0x0A LF + 0x0B: true, + 0x0C: true, + // 0x0D CR + 0x0E: true, + 0x0F: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1A: true, + 0x1B: true, + 0x1C: true, + 0x1D: true, + 0x1E: true, + 0x1F: true, + // 0x20 - 0x7E Printable ASCII characters + 0x7F: true, +} + +func InvalidAscii(b byte) bool { + return invalidAsciiTable[b] +} diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go new file mode 100644 index 000000000..db4f45acb --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go @@ -0,0 +1,199 @@ +package characters + +import ( + "unicode/utf8" +) + +type utf8Err struct { + Index int + Size int +} + +func (u utf8Err) Zero() bool { + return u.Size == 0 +} + +// Verified that a given string is only made of valid UTF-8 characters allowed +// by the TOML spec: +// +// Any Unicode character may be used except those that must be escaped: +// quotation mark, backslash, and the control characters other than tab (U+0000 +// to U+0008, U+000A to U+001F, U+007F). +// +// It is a copy of the Go 1.17 utf8.Valid implementation, tweaked to exit early +// when a character is not allowed. +// +// The returned utf8Err is Zero() if the string is valid, or contains the byte +// index and size of the invalid character. +// +// quotation mark => already checked +// backslash => already checked +// 0-0x8 => invalid +// 0x9 => tab, ok +// 0xA - 0x1F => invalid +// 0x7F => invalid +func Utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { + // Fast path. Check for and skip 8 bytes of ASCII characters per iteration. + offset := 0 + for len(p) >= 8 { + // Combining two 32 bit loads allows the same code to be used + // for 32 and 64 bit platforms. + // The compiler can generate a 32bit load for first32 and second32 + // on many platforms. See test/codegen/memcombine.go. + first32 := uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 + second32 := uint32(p[4]) | uint32(p[5])<<8 | uint32(p[6])<<16 | uint32(p[7])<<24 + if (first32|second32)&0x80808080 != 0 { + // Found a non ASCII byte (>= RuneSelf). + break + } + + for i, b := range p[:8] { + if InvalidAscii(b) { + err.Index = offset + i + err.Size = 1 + return + } + } + + p = p[8:] + offset += 8 + } + n := len(p) + for i := 0; i < n; { + pi := p[i] + if pi < utf8.RuneSelf { + if InvalidAscii(pi) { + err.Index = offset + i + err.Size = 1 + return + } + i++ + continue + } + x := first[pi] + if x == xx { + // Illegal starter byte. + err.Index = offset + i + err.Size = 1 + return + } + size := int(x & 7) + if i+size > n { + // Short or invalid. + err.Index = offset + i + err.Size = n - i + return + } + accept := acceptRanges[x>>4] + if c := p[i+1]; c < accept.lo || accept.hi < c { + err.Index = offset + i + err.Size = 2 + return + } else if size == 2 { + } else if c := p[i+2]; c < locb || hicb < c { + err.Index = offset + i + err.Size = 3 + return + } else if size == 3 { + } else if c := p[i+3]; c < locb || hicb < c { + err.Index = offset + i + err.Size = 4 + return + } + i += size + } + return +} + +// Return the size of the next rune if valid, 0 otherwise. +func Utf8ValidNext(p []byte) int { + c := p[0] + + if c < utf8.RuneSelf { + if InvalidAscii(c) { + return 0 + } + return 1 + } + + x := first[c] + if x == xx { + // Illegal starter byte. + return 0 + } + size := int(x & 7) + if size > len(p) { + // Short or invalid. + return 0 + } + accept := acceptRanges[x>>4] + if c := p[1]; c < accept.lo || accept.hi < c { + return 0 + } else if size == 2 { + } else if c := p[2]; c < locb || hicb < c { + return 0 + } else if size == 3 { + } else if c := p[3]; c < locb || hicb < c { + return 0 + } + + return size +} + +// acceptRange gives the range of valid values for the second byte in a UTF-8 +// sequence. +type acceptRange struct { + lo uint8 // lowest value for second byte. + hi uint8 // highest value for second byte. +} + +// acceptRanges has size 16 to avoid bounds checks in the code that uses it. +var acceptRanges = [16]acceptRange{ + 0: {locb, hicb}, + 1: {0xA0, hicb}, + 2: {locb, 0x9F}, + 3: {0x90, hicb}, + 4: {locb, 0x8F}, +} + +// first is information about the first byte in a UTF-8 sequence. +var first = [256]uint8{ + // 1 2 3 4 5 6 7 8 9 A B C D E F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F + as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F + // 1 2 3 4 5 6 7 8 9 A B C D E F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF + xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF + xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF + s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF + s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF + s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF +} + +const ( + // The default lowest and highest continuation byte. + locb = 0b10000000 + hicb = 0b10111111 + + // These names of these constants are chosen to give nice alignment in the + // table below. The first nibble is an index into acceptRanges or F for + // special one-byte cases. The second nibble is the Rune length or the + // Status for the special one-byte case. + xx = 0xF1 // invalid: size 1 + as = 0xF0 // ASCII: size 1 + s1 = 0x02 // accept 0, size 2 + s2 = 0x13 // accept 1, size 3 + s3 = 0x03 // accept 0, size 3 + s4 = 0x23 // accept 2, size 3 + s5 = 0x34 // accept 3, size 4 + s6 = 0x04 // accept 0, size 4 + s7 = 0x44 // accept 4, size 4 +) diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go new file mode 100644 index 000000000..e38e1131b --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/danger/danger.go @@ -0,0 +1,65 @@ +package danger + +import ( + "fmt" + "reflect" + "unsafe" +) + +const maxInt = uintptr(int(^uint(0) >> 1)) + +func SubsliceOffset(data []byte, subslice []byte) int { + datap := (*reflect.SliceHeader)(unsafe.Pointer(&data)) + hlp := (*reflect.SliceHeader)(unsafe.Pointer(&subslice)) + + if hlp.Data < datap.Data { + panic(fmt.Errorf("subslice address (%d) is before data address (%d)", hlp.Data, datap.Data)) + } + offset := hlp.Data - datap.Data + + if offset > maxInt { + panic(fmt.Errorf("slice offset larger than int (%d)", offset)) + } + + intoffset := int(offset) + + if intoffset > datap.Len { + panic(fmt.Errorf("slice offset (%d) is farther than data length (%d)", intoffset, datap.Len)) + } + + if intoffset+hlp.Len > datap.Len { + panic(fmt.Errorf("slice ends (%d+%d) is farther than data length (%d)", intoffset, hlp.Len, datap.Len)) + } + + return intoffset +} + +func BytesRange(start []byte, end []byte) []byte { + if start == nil || end == nil { + panic("cannot call BytesRange with nil") + } + startp := (*reflect.SliceHeader)(unsafe.Pointer(&start)) + endp := (*reflect.SliceHeader)(unsafe.Pointer(&end)) + + if startp.Data > endp.Data { + panic(fmt.Errorf("start pointer address (%d) is after end pointer address (%d)", startp.Data, endp.Data)) + } + + l := startp.Len + endLen := int(endp.Data-startp.Data) + endp.Len + if endLen > l { + l = endLen + } + + if l > startp.Cap { + panic(fmt.Errorf("range length is larger than capacity")) + } + + return start[:l] +} + +func Stride(ptr unsafe.Pointer, size uintptr, offset int) unsafe.Pointer { + // TODO: replace with unsafe.Add when Go 1.17 is released + // https://github.com/golang/go/issues/40481 + return unsafe.Pointer(uintptr(ptr) + uintptr(int(size)*offset)) +} diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go new file mode 100644 index 000000000..9d41c28a2 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/danger/typeid.go @@ -0,0 +1,23 @@ +package danger + +import ( + "reflect" + "unsafe" +) + +// typeID is used as key in encoder and decoder caches to enable using +// the optimize runtime.mapaccess2_fast64 function instead of the more +// expensive lookup if we were to use reflect.Type as map key. +// +// typeID holds the pointer to the reflect.Type value, which is unique +// in the program. +// +// https://github.com/segmentio/encoding/blob/master/json/codec.go#L59-L61 +type TypeID unsafe.Pointer + +func MakeTypeID(t reflect.Type) TypeID { + // reflect.Type has the fields: + // typ unsafe.Pointer + // ptr unsafe.Pointer + return TypeID((*[2]unsafe.Pointer)(unsafe.Pointer(&t))[1]) +} diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go new file mode 100644 index 000000000..149b17f53 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go @@ -0,0 +1,48 @@ +package tracker + +import "github.com/pelletier/go-toml/v2/unstable" + +// KeyTracker is a tracker that keeps track of the current Key as the AST is +// walked. +type KeyTracker struct { + k []string +} + +// UpdateTable sets the state of the tracker with the AST table node. +func (t *KeyTracker) UpdateTable(node *unstable.Node) { + t.reset() + t.Push(node) +} + +// UpdateArrayTable sets the state of the tracker with the AST array table node. +func (t *KeyTracker) UpdateArrayTable(node *unstable.Node) { + t.reset() + t.Push(node) +} + +// Push the given key on the stack. +func (t *KeyTracker) Push(node *unstable.Node) { + it := node.Key() + for it.Next() { + t.k = append(t.k, string(it.Node().Data)) + } +} + +// Pop key from stack. +func (t *KeyTracker) Pop(node *unstable.Node) { + it := node.Key() + for it.Next() { + t.k = t.k[:len(t.k)-1] + } +} + +// Key returns the current key +func (t *KeyTracker) Key() []string { + k := make([]string, len(t.k)) + copy(k, t.k) + return k +} + +func (t *KeyTracker) reset() { + t.k = t.k[:0] +} diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go new file mode 100644 index 000000000..76df2d5b6 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go @@ -0,0 +1,358 @@ +package tracker + +import ( + "bytes" + "fmt" + "sync" + + "github.com/pelletier/go-toml/v2/unstable" +) + +type keyKind uint8 + +const ( + invalidKind keyKind = iota + valueKind + tableKind + arrayTableKind +) + +func (k keyKind) String() string { + switch k { + case invalidKind: + return "invalid" + case valueKind: + return "value" + case tableKind: + return "table" + case arrayTableKind: + return "array table" + } + panic("missing keyKind string mapping") +} + +// SeenTracker tracks which keys have been seen with which TOML type to flag +// duplicates and mismatches according to the spec. +// +// Each node in the visited tree is represented by an entry. Each entry has an +// identifier, which is provided by a counter. Entries are stored in the array +// entries. As new nodes are discovered (referenced for the first time in the +// TOML document), entries are created and appended to the array. An entry +// points to its parent using its id. +// +// To find whether a given key (sequence of []byte) has already been visited, +// the entries are linearly searched, looking for one with the right name and +// parent id. +// +// Given that all keys appear in the document after their parent, it is +// guaranteed that all descendants of a node are stored after the node, this +// speeds up the search process. +// +// When encountering [[array tables]], the descendants of that node are removed +// to allow that branch of the tree to be "rediscovered". To maintain the +// invariant above, the deletion process needs to keep the order of entries. +// This results in more copies in that case. +type SeenTracker struct { + entries []entry + currentIdx int +} + +var pool = sync.Pool{ + New: func() interface{} { + return &SeenTracker{} + }, +} + +func (s *SeenTracker) reset() { + // Always contains a root element at index 0. + s.currentIdx = 0 + if len(s.entries) == 0 { + s.entries = make([]entry, 1, 2) + } else { + s.entries = s.entries[:1] + } + s.entries[0].child = -1 + s.entries[0].next = -1 +} + +type entry struct { + // Use -1 to indicate no child or no sibling. + child int + next int + + name []byte + kind keyKind + explicit bool + kv bool +} + +// Find the index of the child of parentIdx with key k. Returns -1 if +// it does not exist. +func (s *SeenTracker) find(parentIdx int, k []byte) int { + for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next { + if bytes.Equal(s.entries[i].name, k) { + return i + } + } + return -1 +} + +// Remove all descendants of node at position idx. +func (s *SeenTracker) clear(idx int) { + if idx >= len(s.entries) { + return + } + + for i := s.entries[idx].child; i >= 0; { + next := s.entries[i].next + n := s.entries[0].next + s.entries[0].next = i + s.entries[i].next = n + s.entries[i].name = nil + s.clear(i) + i = next + } + + s.entries[idx].child = -1 +} + +func (s *SeenTracker) create(parentIdx int, name []byte, kind keyKind, explicit bool, kv bool) int { + e := entry{ + child: -1, + next: s.entries[parentIdx].child, + + name: name, + kind: kind, + explicit: explicit, + kv: kv, + } + var idx int + if s.entries[0].next >= 0 { + idx = s.entries[0].next + s.entries[0].next = s.entries[idx].next + s.entries[idx] = e + } else { + idx = len(s.entries) + s.entries = append(s.entries, e) + } + + s.entries[parentIdx].child = idx + + return idx +} + +func (s *SeenTracker) setExplicitFlag(parentIdx int) { + for i := s.entries[parentIdx].child; i >= 0; i = s.entries[i].next { + if s.entries[i].kv { + s.entries[i].explicit = true + s.entries[i].kv = false + } + s.setExplicitFlag(i) + } +} + +// CheckExpression takes a top-level node and checks that it does not contain +// keys that have been seen in previous calls, and validates that types are +// consistent. It returns true if it is the first time this node's key is seen. +// Useful to clear array tables on first use. +func (s *SeenTracker) CheckExpression(node *unstable.Node) (bool, error) { + if s.entries == nil { + s.reset() + } + switch node.Kind { + case unstable.KeyValue: + return s.checkKeyValue(node) + case unstable.Table: + return s.checkTable(node) + case unstable.ArrayTable: + return s.checkArrayTable(node) + default: + panic(fmt.Errorf("this should not be a top level node type: %s", node.Kind)) + } +} + +func (s *SeenTracker) checkTable(node *unstable.Node) (bool, error) { + if s.currentIdx >= 0 { + s.setExplicitFlag(s.currentIdx) + } + + it := node.Key() + + parentIdx := 0 + + // This code is duplicated in checkArrayTable. This is because factoring + // it in a function requires to copy the iterator, or allocate it to the + // heap, which is not cheap. + for it.Next() { + if it.IsLast() { + break + } + + k := it.Node().Data + + idx := s.find(parentIdx, k) + + if idx < 0 { + idx = s.create(parentIdx, k, tableKind, false, false) + } else { + entry := s.entries[idx] + if entry.kind == valueKind { + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + } + } + parentIdx = idx + } + + k := it.Node().Data + idx := s.find(parentIdx, k) + + first := false + if idx >= 0 { + kind := s.entries[idx].kind + if kind != tableKind { + return false, fmt.Errorf("toml: key %s should be a table, not a %s", string(k), kind) + } + if s.entries[idx].explicit { + return false, fmt.Errorf("toml: table %s already exists", string(k)) + } + s.entries[idx].explicit = true + } else { + idx = s.create(parentIdx, k, tableKind, true, false) + first = true + } + + s.currentIdx = idx + + return first, nil +} + +func (s *SeenTracker) checkArrayTable(node *unstable.Node) (bool, error) { + if s.currentIdx >= 0 { + s.setExplicitFlag(s.currentIdx) + } + + it := node.Key() + + parentIdx := 0 + + for it.Next() { + if it.IsLast() { + break + } + + k := it.Node().Data + + idx := s.find(parentIdx, k) + + if idx < 0 { + idx = s.create(parentIdx, k, tableKind, false, false) + } else { + entry := s.entries[idx] + if entry.kind == valueKind { + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + } + } + + parentIdx = idx + } + + k := it.Node().Data + idx := s.find(parentIdx, k) + + firstTime := idx < 0 + if firstTime { + idx = s.create(parentIdx, k, arrayTableKind, true, false) + } else { + kind := s.entries[idx].kind + if kind != arrayTableKind { + return false, fmt.Errorf("toml: key %s already exists as a %s, but should be an array table", kind, string(k)) + } + s.clear(idx) + } + + s.currentIdx = idx + + return firstTime, nil +} + +func (s *SeenTracker) checkKeyValue(node *unstable.Node) (bool, error) { + parentIdx := s.currentIdx + it := node.Key() + + for it.Next() { + k := it.Node().Data + + idx := s.find(parentIdx, k) + + if idx < 0 { + idx = s.create(parentIdx, k, tableKind, false, true) + } else { + entry := s.entries[idx] + if it.IsLast() { + return false, fmt.Errorf("toml: key %s is already defined", string(k)) + } else if entry.kind != tableKind { + return false, fmt.Errorf("toml: expected %s to be a table, not a %s", string(k), entry.kind) + } else if entry.explicit { + return false, fmt.Errorf("toml: cannot redefine table %s that has already been explicitly defined", string(k)) + } + } + + parentIdx = idx + } + + s.entries[parentIdx].kind = valueKind + + value := node.Value() + + switch value.Kind { + case unstable.InlineTable: + return s.checkInlineTable(value) + case unstable.Array: + return s.checkArray(value) + } + + return false, nil +} + +func (s *SeenTracker) checkArray(node *unstable.Node) (first bool, err error) { + it := node.Children() + for it.Next() { + n := it.Node() + switch n.Kind { + case unstable.InlineTable: + first, err = s.checkInlineTable(n) + if err != nil { + return false, err + } + case unstable.Array: + first, err = s.checkArray(n) + if err != nil { + return false, err + } + } + } + return first, nil +} + +func (s *SeenTracker) checkInlineTable(node *unstable.Node) (first bool, err error) { + s = pool.Get().(*SeenTracker) + s.reset() + + it := node.Children() + for it.Next() { + n := it.Node() + first, err = s.checkKeyValue(n) + if err != nil { + return false, err + } + } + + // As inline tables are self-contained, the tracker does not + // need to retain the details of what they contain. The + // keyValue element that creates the inline table is kept to + // mark the presence of the inline table and prevent + // redefinition of its keys: check* functions cannot walk into + // a value. + pool.Put(s) + return first, nil +} diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go new file mode 100644 index 000000000..bf0317392 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/internal/tracker/tracker.go @@ -0,0 +1 @@ +package tracker diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/localtime.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/localtime.go new file mode 100644 index 000000000..a856bfdb0 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/localtime.go @@ -0,0 +1,122 @@ +package toml + +import ( + "fmt" + "strings" + "time" + + "github.com/pelletier/go-toml/v2/unstable" +) + +// LocalDate represents a calendar day in no specific timezone. +type LocalDate struct { + Year int + Month int + Day int +} + +// AsTime converts d into a specific time instance at midnight in zone. +func (d LocalDate) AsTime(zone *time.Location) time.Time { + return time.Date(d.Year, time.Month(d.Month), d.Day, 0, 0, 0, 0, zone) +} + +// String returns RFC 3339 representation of d. +func (d LocalDate) String() string { + return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) +} + +// MarshalText returns RFC 3339 representation of d. +func (d LocalDate) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText parses b using RFC 3339 to fill d. +func (d *LocalDate) UnmarshalText(b []byte) error { + res, err := parseLocalDate(b) + if err != nil { + return err + } + *d = res + return nil +} + +// LocalTime represents a time of day of no specific day in no specific +// timezone. +type LocalTime struct { + Hour int // Hour of the day: [0; 24[ + Minute int // Minute of the hour: [0; 60[ + Second int // Second of the minute: [0; 60[ + Nanosecond int // Nanoseconds within the second: [0, 1000000000[ + Precision int // Number of digits to display for Nanosecond. +} + +// String returns RFC 3339 representation of d. +// If d.Nanosecond and d.Precision are zero, the time won't have a nanosecond +// component. If d.Nanosecond > 0 but d.Precision = 0, then the minimum number +// of digits for nanoseconds is provided. +func (d LocalTime) String() string { + s := fmt.Sprintf("%02d:%02d:%02d", d.Hour, d.Minute, d.Second) + + if d.Precision > 0 { + s += fmt.Sprintf(".%09d", d.Nanosecond)[:d.Precision+1] + } else if d.Nanosecond > 0 { + // Nanoseconds are specified, but precision is not provided. Use the + // minimum. + s += strings.Trim(fmt.Sprintf(".%09d", d.Nanosecond), "0") + } + + return s +} + +// MarshalText returns RFC 3339 representation of d. +func (d LocalTime) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText parses b using RFC 3339 to fill d. +func (d *LocalTime) UnmarshalText(b []byte) error { + res, left, err := parseLocalTime(b) + if err == nil && len(left) != 0 { + err = unstable.NewParserError(left, "extra characters") + } + if err != nil { + return err + } + *d = res + return nil +} + +// LocalDateTime represents a time of a specific day in no specific timezone. +type LocalDateTime struct { + LocalDate + LocalTime +} + +// AsTime converts d into a specific time instance in zone. +func (d LocalDateTime) AsTime(zone *time.Location) time.Time { + return time.Date(d.Year, time.Month(d.Month), d.Day, d.Hour, d.Minute, d.Second, d.Nanosecond, zone) +} + +// String returns RFC 3339 representation of d. +func (d LocalDateTime) String() string { + return d.LocalDate.String() + "T" + d.LocalTime.String() +} + +// MarshalText returns RFC 3339 representation of d. +func (d LocalDateTime) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText parses b using RFC 3339 to fill d. +func (d *LocalDateTime) UnmarshalText(data []byte) error { + res, left, err := parseLocalDateTime(data) + if err == nil && len(left) != 0 { + err = unstable.NewParserError(left, "extra characters") + } + if err != nil { + return err + } + + *d = res + return nil +} diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/marshaler.go new file mode 100644 index 000000000..7f4e20c12 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/marshaler.go @@ -0,0 +1,1121 @@ +package toml + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + "unicode" + + "github.com/pelletier/go-toml/v2/internal/characters" +) + +// Marshal serializes a Go value as a TOML document. +// +// It is a shortcut for Encoder.Encode() with the default options. +func Marshal(v interface{}) ([]byte, error) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + + err := enc.Encode(v) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// Encoder writes a TOML document to an output stream. +type Encoder struct { + // output + w io.Writer + + // global settings + tablesInline bool + arraysMultiline bool + indentSymbol string + indentTables bool + marshalJsonNumbers bool +} + +// NewEncoder returns a new Encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + indentSymbol: " ", + } +} + +// SetTablesInline forces the encoder to emit all tables inline. +// +// This behavior can be controlled on an individual struct field basis with the +// inline tag: +// +// MyField `toml:",inline"` +func (enc *Encoder) SetTablesInline(inline bool) *Encoder { + enc.tablesInline = inline + return enc +} + +// SetArraysMultiline forces the encoder to emit all arrays with one element per +// line. +// +// This behavior can be controlled on an individual struct field basis with the multiline tag: +// +// MyField `multiline:"true"` +func (enc *Encoder) SetArraysMultiline(multiline bool) *Encoder { + enc.arraysMultiline = multiline + return enc +} + +// SetIndentSymbol defines the string that should be used for indentation. The +// provided string is repeated for each indentation level. Defaults to two +// spaces. +func (enc *Encoder) SetIndentSymbol(s string) *Encoder { + enc.indentSymbol = s + return enc +} + +// SetIndentTables forces the encoder to intent tables and array tables. +func (enc *Encoder) SetIndentTables(indent bool) *Encoder { + enc.indentTables = indent + return enc +} + +// SetMarshalJsonNumbers forces the encoder to serialize `json.Number` as a +// float or integer instead of relying on TextMarshaler to emit a string. +// +// *Unstable:* This method does not follow the compatibility guarantees of +// semver. It can be changed or removed without a new major version being +// issued. +func (enc *Encoder) SetMarshalJsonNumbers(indent bool) *Encoder { + enc.marshalJsonNumbers = indent + return enc +} + +// Encode writes a TOML representation of v to the stream. +// +// If v cannot be represented to TOML it returns an error. +// +// # Encoding rules +// +// A top level slice containing only maps or structs is encoded as [[table +// array]]. +// +// All slices not matching rule 1 are encoded as [array]. As a result, any map +// or struct they contain is encoded as an {inline table}. +// +// Nil interfaces and nil pointers are not supported. +// +// Keys in key-values always have one part. +// +// Intermediate tables are always printed. +// +// By default, strings are encoded as literal string, unless they contain either +// a newline character or a single quote. In that case they are emitted as +// quoted strings. +// +// Unsigned integers larger than math.MaxInt64 cannot be encoded. Doing so +// results in an error. This rule exists because the TOML specification only +// requires parsers to support at least the 64 bits integer range. Allowing +// larger numbers would create non-standard TOML documents, which may not be +// readable (at best) by other implementations. To encode such numbers, a +// solution is a custom type that implements encoding.TextMarshaler. +// +// When encoding structs, fields are encoded in order of definition, with their +// exact name. +// +// Tables and array tables are separated by empty lines. However, consecutive +// subtables definitions are not. For example: +// +// [top1] +// +// [top2] +// [top2.child1] +// +// [[array]] +// +// [[array]] +// [array.child2] +// +// # Struct tags +// +// The encoding of each public struct field can be customized by the format +// string in the "toml" key of the struct field's tag. This follows +// encoding/json's convention. The format string starts with the name of the +// field, optionally followed by a comma-separated list of options. The name may +// be empty in order to provide options without overriding the default name. +// +// The "multiline" option emits strings as quoted multi-line TOML strings. It +// has no effect on fields that would not be encoded as strings. +// +// The "inline" option turns fields that would be emitted as tables into inline +// tables instead. It has no effect on other fields. +// +// The "omitempty" option prevents empty values or groups from being emitted. +// +// The "commented" option prefixes the value and all its children with a comment +// symbol. +// +// In addition to the "toml" tag struct tag, a "comment" tag can be used to emit +// a TOML comment before the value being annotated. Comments are ignored inside +// inline tables. For array tables, the comment is only present before the first +// element of the array. +func (enc *Encoder) Encode(v interface{}) error { + var ( + b []byte + ctx encoderCtx + ) + + ctx.inline = enc.tablesInline + + if v == nil { + return fmt.Errorf("toml: cannot encode a nil interface") + } + + b, err := enc.encode(b, ctx, reflect.ValueOf(v)) + if err != nil { + return err + } + + _, err = enc.w.Write(b) + if err != nil { + return fmt.Errorf("toml: cannot write: %w", err) + } + + return nil +} + +type valueOptions struct { + multiline bool + omitempty bool + commented bool + comment string +} + +type encoderCtx struct { + // Current top-level key. + parentKey []string + + // Key that should be used for a KV. + key string + // Extra flag to account for the empty string + hasKey bool + + // Set to true to indicate that the encoder is inside a KV, so that all + // tables need to be inlined. + insideKv bool + + // Set to true to skip the first table header in an array table. + skipTableHeader bool + + // Should the next table be encoded as inline + inline bool + + // Indentation level + indent int + + // Prefix the current value with a comment. + commented bool + + // Options coming from struct tags + options valueOptions +} + +func (ctx *encoderCtx) shiftKey() { + if ctx.hasKey { + ctx.parentKey = append(ctx.parentKey, ctx.key) + ctx.clearKey() + } +} + +func (ctx *encoderCtx) setKey(k string) { + ctx.key = k + ctx.hasKey = true +} + +func (ctx *encoderCtx) clearKey() { + ctx.key = "" + ctx.hasKey = false +} + +func (ctx *encoderCtx) isRoot() bool { + return len(ctx.parentKey) == 0 && !ctx.hasKey +} + +func (enc *Encoder) encode(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + i := v.Interface() + + switch x := i.(type) { + case time.Time: + if x.Nanosecond() > 0 { + return x.AppendFormat(b, time.RFC3339Nano), nil + } + return x.AppendFormat(b, time.RFC3339), nil + case LocalTime: + return append(b, x.String()...), nil + case LocalDate: + return append(b, x.String()...), nil + case LocalDateTime: + return append(b, x.String()...), nil + case json.Number: + if enc.marshalJsonNumbers { + if x == "" { /// Useful zero value. + return append(b, "0"...), nil + } else if v, err := x.Int64(); err == nil { + return enc.encode(b, ctx, reflect.ValueOf(v)) + } else if f, err := x.Float64(); err == nil { + return enc.encode(b, ctx, reflect.ValueOf(f)) + } else { + return nil, fmt.Errorf("toml: unable to convert %q to int64 or float64", x) + } + } + } + + hasTextMarshaler := v.Type().Implements(textMarshalerType) + if hasTextMarshaler || (v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + if !hasTextMarshaler { + v = v.Addr() + } + + if ctx.isRoot() { + return nil, fmt.Errorf("toml: type %s implementing the TextMarshaler interface cannot be a root element", v.Type()) + } + + text, err := v.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return nil, err + } + + b = enc.encodeString(b, string(text), ctx.options) + + return b, nil + } + + switch v.Kind() { + // containers + case reflect.Map: + return enc.encodeMap(b, ctx, v) + case reflect.Struct: + return enc.encodeStruct(b, ctx, v) + case reflect.Slice, reflect.Array: + return enc.encodeSlice(b, ctx, v) + case reflect.Interface: + if v.IsNil() { + return nil, fmt.Errorf("toml: encoding a nil interface is not supported") + } + + return enc.encode(b, ctx, v.Elem()) + case reflect.Ptr: + if v.IsNil() { + return enc.encode(b, ctx, reflect.Zero(v.Type().Elem())) + } + + return enc.encode(b, ctx, v.Elem()) + + // values + case reflect.String: + b = enc.encodeString(b, v.String(), ctx.options) + case reflect.Float32: + f := v.Float() + + if math.IsNaN(f) { + b = append(b, "nan"...) + } else if f > math.MaxFloat32 { + b = append(b, "inf"...) + } else if f < -math.MaxFloat32 { + b = append(b, "-inf"...) + } else if math.Trunc(f) == f { + b = strconv.AppendFloat(b, f, 'f', 1, 32) + } else { + b = strconv.AppendFloat(b, f, 'f', -1, 32) + } + case reflect.Float64: + f := v.Float() + if math.IsNaN(f) { + b = append(b, "nan"...) + } else if f > math.MaxFloat64 { + b = append(b, "inf"...) + } else if f < -math.MaxFloat64 { + b = append(b, "-inf"...) + } else if math.Trunc(f) == f { + b = strconv.AppendFloat(b, f, 'f', 1, 64) + } else { + b = strconv.AppendFloat(b, f, 'f', -1, 64) + } + case reflect.Bool: + if v.Bool() { + b = append(b, "true"...) + } else { + b = append(b, "false"...) + } + case reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint: + x := v.Uint() + if x > uint64(math.MaxInt64) { + return nil, fmt.Errorf("toml: not encoding uint (%d) greater than max int64 (%d)", x, int64(math.MaxInt64)) + } + b = strconv.AppendUint(b, x, 10) + case reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int: + b = strconv.AppendInt(b, v.Int(), 10) + default: + return nil, fmt.Errorf("toml: cannot encode value of type %s", v.Kind()) + } + + return b, nil +} + +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Map: + return v.IsNil() + default: + return false + } +} + +func shouldOmitEmpty(options valueOptions, v reflect.Value) bool { + return options.omitempty && isEmptyValue(v) +} + +func (enc *Encoder) encodeKv(b []byte, ctx encoderCtx, options valueOptions, v reflect.Value) ([]byte, error) { + var err error + + if !ctx.inline { + b = enc.encodeComment(ctx.indent, options.comment, b) + b = enc.commented(ctx.commented, b) + b = enc.indent(ctx.indent, b) + } + + b = enc.encodeKey(b, ctx.key) + b = append(b, " = "...) + + // create a copy of the context because the value of a KV shouldn't + // modify the global context. + subctx := ctx + subctx.insideKv = true + subctx.shiftKey() + subctx.options = options + + b, err = enc.encode(b, subctx, v) + if err != nil { + return nil, err + } + + return b, nil +} + +func (enc *Encoder) commented(commented bool, b []byte) []byte { + if commented { + return append(b, "# "...) + } + return b +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Struct: + return isEmptyStruct(v) + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func isEmptyStruct(v reflect.Value) bool { + // TODO: merge with walkStruct and cache. + typ := v.Type() + for i := 0; i < typ.NumField(); i++ { + fieldType := typ.Field(i) + + // only consider exported fields + if fieldType.PkgPath != "" { + continue + } + + tag := fieldType.Tag.Get("toml") + + // special field name to skip field + if tag == "-" { + continue + } + + f := v.Field(i) + + if !isEmptyValue(f) { + return false + } + } + + return true +} + +const literalQuote = '\'' + +func (enc *Encoder) encodeString(b []byte, v string, options valueOptions) []byte { + if needsQuoting(v) { + return enc.encodeQuotedString(options.multiline, b, v) + } + + return enc.encodeLiteralString(b, v) +} + +func needsQuoting(v string) bool { + // TODO: vectorize + for _, b := range []byte(v) { + if b == '\'' || b == '\r' || b == '\n' || characters.InvalidAscii(b) { + return true + } + } + return false +} + +// caller should have checked that the string does not contain new lines or ' . +func (enc *Encoder) encodeLiteralString(b []byte, v string) []byte { + b = append(b, literalQuote) + b = append(b, v...) + b = append(b, literalQuote) + + return b +} + +func (enc *Encoder) encodeQuotedString(multiline bool, b []byte, v string) []byte { + stringQuote := `"` + + if multiline { + stringQuote = `"""` + } + + b = append(b, stringQuote...) + if multiline { + b = append(b, '\n') + } + + const ( + hextable = "0123456789ABCDEF" + // U+0000 to U+0008, U+000A to U+001F, U+007F + nul = 0x0 + bs = 0x8 + lf = 0xa + us = 0x1f + del = 0x7f + ) + + for _, r := range []byte(v) { + switch r { + case '\\': + b = append(b, `\\`...) + case '"': + b = append(b, `\"`...) + case '\b': + b = append(b, `\b`...) + case '\f': + b = append(b, `\f`...) + case '\n': + if multiline { + b = append(b, r) + } else { + b = append(b, `\n`...) + } + case '\r': + b = append(b, `\r`...) + case '\t': + b = append(b, `\t`...) + default: + switch { + case r >= nul && r <= bs, r >= lf && r <= us, r == del: + b = append(b, `\u00`...) + b = append(b, hextable[r>>4]) + b = append(b, hextable[r&0x0f]) + default: + b = append(b, r) + } + } + } + + b = append(b, stringQuote...) + + return b +} + +// caller should have checked that the string is in A-Z / a-z / 0-9 / - / _ . +func (enc *Encoder) encodeUnquotedKey(b []byte, v string) []byte { + return append(b, v...) +} + +func (enc *Encoder) encodeTableHeader(ctx encoderCtx, b []byte) ([]byte, error) { + if len(ctx.parentKey) == 0 { + return b, nil + } + + b = enc.encodeComment(ctx.indent, ctx.options.comment, b) + + b = enc.commented(ctx.commented, b) + + b = enc.indent(ctx.indent, b) + + b = append(b, '[') + + b = enc.encodeKey(b, ctx.parentKey[0]) + + for _, k := range ctx.parentKey[1:] { + b = append(b, '.') + b = enc.encodeKey(b, k) + } + + b = append(b, "]\n"...) + + return b, nil +} + +//nolint:cyclop +func (enc *Encoder) encodeKey(b []byte, k string) []byte { + needsQuotation := false + cannotUseLiteral := false + + if len(k) == 0 { + return append(b, "''"...) + } + + for _, c := range k { + if (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '-' || c == '_' { + continue + } + + if c == literalQuote { + cannotUseLiteral = true + } + + needsQuotation = true + } + + if needsQuotation && needsQuoting(k) { + cannotUseLiteral = true + } + + switch { + case cannotUseLiteral: + return enc.encodeQuotedString(false, b, k) + case needsQuotation: + return enc.encodeLiteralString(b, k) + default: + return enc.encodeUnquotedKey(b, k) + } +} + +func (enc *Encoder) keyToString(k reflect.Value) (string, error) { + keyType := k.Type() + switch { + case keyType.Kind() == reflect.String: + return k.String(), nil + + case keyType.Implements(textMarshalerType): + keyB, err := k.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return "", fmt.Errorf("toml: error marshalling key %v from text: %w", k, err) + } + return string(keyB), nil + } + return "", fmt.Errorf("toml: type %s is not supported as a map key", keyType.Kind()) +} + +func (enc *Encoder) encodeMap(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + var ( + t table + emptyValueOptions valueOptions + ) + + iter := v.MapRange() + for iter.Next() { + v := iter.Value() + + if isNil(v) { + continue + } + + k, err := enc.keyToString(iter.Key()) + if err != nil { + return nil, err + } + + if willConvertToTableOrArrayTable(ctx, v) { + t.pushTable(k, v, emptyValueOptions) + } else { + t.pushKV(k, v, emptyValueOptions) + } + } + + sortEntriesByKey(t.kvs) + sortEntriesByKey(t.tables) + + return enc.encodeTable(b, ctx, t) +} + +func sortEntriesByKey(e []entry) { + sort.Slice(e, func(i, j int) bool { + return e[i].Key < e[j].Key + }) +} + +type entry struct { + Key string + Value reflect.Value + Options valueOptions +} + +type table struct { + kvs []entry + tables []entry +} + +func (t *table) pushKV(k string, v reflect.Value, options valueOptions) { + for _, e := range t.kvs { + if e.Key == k { + return + } + } + + t.kvs = append(t.kvs, entry{Key: k, Value: v, Options: options}) +} + +func (t *table) pushTable(k string, v reflect.Value, options valueOptions) { + for _, e := range t.tables { + if e.Key == k { + return + } + } + t.tables = append(t.tables, entry{Key: k, Value: v, Options: options}) +} + +func walkStruct(ctx encoderCtx, t *table, v reflect.Value) { + // TODO: cache this + typ := v.Type() + for i := 0; i < typ.NumField(); i++ { + fieldType := typ.Field(i) + + // only consider exported fields + if fieldType.PkgPath != "" { + continue + } + + tag := fieldType.Tag.Get("toml") + + // special field name to skip field + if tag == "-" { + continue + } + + k, opts := parseTag(tag) + if !isValidName(k) { + k = "" + } + + f := v.Field(i) + + if k == "" { + if fieldType.Anonymous { + if fieldType.Type.Kind() == reflect.Struct { + walkStruct(ctx, t, f) + } else if fieldType.Type.Kind() == reflect.Pointer && !f.IsNil() && f.Elem().Kind() == reflect.Struct { + walkStruct(ctx, t, f.Elem()) + } + continue + } else { + k = fieldType.Name + } + } + + if isNil(f) { + continue + } + + options := valueOptions{ + multiline: opts.multiline, + omitempty: opts.omitempty, + commented: opts.commented, + comment: fieldType.Tag.Get("comment"), + } + + if opts.inline || !willConvertToTableOrArrayTable(ctx, f) { + t.pushKV(k, f, options) + } else { + t.pushTable(k, f, options) + } + } +} + +func (enc *Encoder) encodeStruct(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + var t table + + walkStruct(ctx, &t, v) + + return enc.encodeTable(b, ctx, t) +} + +func (enc *Encoder) encodeComment(indent int, comment string, b []byte) []byte { + for len(comment) > 0 { + var line string + idx := strings.IndexByte(comment, '\n') + if idx >= 0 { + line = comment[:idx] + comment = comment[idx+1:] + } else { + line = comment + comment = "" + } + b = enc.indent(indent, b) + b = append(b, "# "...) + b = append(b, line...) + b = append(b, '\n') + } + return b +} + +func isValidName(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + case !unicode.IsLetter(c) && !unicode.IsDigit(c): + return false + } + } + return true +} + +type tagOptions struct { + multiline bool + inline bool + omitempty bool + commented bool +} + +func parseTag(tag string) (string, tagOptions) { + opts := tagOptions{} + + idx := strings.Index(tag, ",") + if idx == -1 { + return tag, opts + } + + raw := tag[idx+1:] + tag = string(tag[:idx]) + for raw != "" { + var o string + i := strings.Index(raw, ",") + if i >= 0 { + o, raw = raw[:i], raw[i+1:] + } else { + o, raw = raw, "" + } + switch o { + case "multiline": + opts.multiline = true + case "inline": + opts.inline = true + case "omitempty": + opts.omitempty = true + case "commented": + opts.commented = true + } + } + + return tag, opts +} + +func (enc *Encoder) encodeTable(b []byte, ctx encoderCtx, t table) ([]byte, error) { + var err error + + ctx.shiftKey() + + if ctx.insideKv || (ctx.inline && !ctx.isRoot()) { + return enc.encodeTableInline(b, ctx, t) + } + + if !ctx.skipTableHeader { + b, err = enc.encodeTableHeader(ctx, b) + if err != nil { + return nil, err + } + + if enc.indentTables && len(ctx.parentKey) > 0 { + ctx.indent++ + } + } + ctx.skipTableHeader = false + + hasNonEmptyKV := false + for _, kv := range t.kvs { + if shouldOmitEmpty(kv.Options, kv.Value) { + continue + } + hasNonEmptyKV = true + + ctx.setKey(kv.Key) + ctx2 := ctx + ctx2.commented = kv.Options.commented || ctx2.commented + + b, err = enc.encodeKv(b, ctx2, kv.Options, kv.Value) + if err != nil { + return nil, err + } + + b = append(b, '\n') + } + + first := true + for _, table := range t.tables { + if shouldOmitEmpty(table.Options, table.Value) { + continue + } + if first { + first = false + if hasNonEmptyKV { + b = append(b, '\n') + } + } else { + b = append(b, "\n"...) + } + + ctx.setKey(table.Key) + + ctx.options = table.Options + ctx2 := ctx + ctx2.commented = ctx2.commented || ctx.options.commented + + b, err = enc.encode(b, ctx2, table.Value) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func (enc *Encoder) encodeTableInline(b []byte, ctx encoderCtx, t table) ([]byte, error) { + var err error + + b = append(b, '{') + + first := true + for _, kv := range t.kvs { + if shouldOmitEmpty(kv.Options, kv.Value) { + continue + } + + if first { + first = false + } else { + b = append(b, `, `...) + } + + ctx.setKey(kv.Key) + + b, err = enc.encodeKv(b, ctx, kv.Options, kv.Value) + if err != nil { + return nil, err + } + } + + if len(t.tables) > 0 { + panic("inline table cannot contain nested tables, only key-values") + } + + b = append(b, "}"...) + + return b, nil +} + +func willConvertToTable(ctx encoderCtx, v reflect.Value) bool { + if !v.IsValid() { + return false + } + if v.Type() == timeType || v.Type().Implements(textMarshalerType) || (v.Kind() != reflect.Ptr && v.CanAddr() && reflect.PtrTo(v.Type()).Implements(textMarshalerType)) { + return false + } + + t := v.Type() + switch t.Kind() { + case reflect.Map, reflect.Struct: + return !ctx.inline + case reflect.Interface: + return willConvertToTable(ctx, v.Elem()) + case reflect.Ptr: + if v.IsNil() { + return false + } + + return willConvertToTable(ctx, v.Elem()) + default: + return false + } +} + +func willConvertToTableOrArrayTable(ctx encoderCtx, v reflect.Value) bool { + if ctx.insideKv { + return false + } + t := v.Type() + + if t.Kind() == reflect.Interface { + return willConvertToTableOrArrayTable(ctx, v.Elem()) + } + + if t.Kind() == reflect.Slice || t.Kind() == reflect.Array { + if v.Len() == 0 { + // An empty slice should be a kv = []. + return false + } + + for i := 0; i < v.Len(); i++ { + t := willConvertToTable(ctx, v.Index(i)) + + if !t { + return false + } + } + + return true + } + + return willConvertToTable(ctx, v) +} + +func (enc *Encoder) encodeSlice(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + if v.Len() == 0 { + b = append(b, "[]"...) + + return b, nil + } + + if willConvertToTableOrArrayTable(ctx, v) { + return enc.encodeSliceAsArrayTable(b, ctx, v) + } + + return enc.encodeSliceAsArray(b, ctx, v) +} + +// caller should have checked that v is a slice that only contains values that +// encode into tables. +func (enc *Encoder) encodeSliceAsArrayTable(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + ctx.shiftKey() + + scratch := make([]byte, 0, 64) + + scratch = enc.commented(ctx.commented, scratch) + + if enc.indentTables { + scratch = enc.indent(ctx.indent, scratch) + } + + scratch = append(scratch, "[["...) + + for i, k := range ctx.parentKey { + if i > 0 { + scratch = append(scratch, '.') + } + + scratch = enc.encodeKey(scratch, k) + } + + scratch = append(scratch, "]]\n"...) + ctx.skipTableHeader = true + + b = enc.encodeComment(ctx.indent, ctx.options.comment, b) + + if enc.indentTables { + ctx.indent++ + } + + for i := 0; i < v.Len(); i++ { + if i != 0 { + b = append(b, "\n"...) + } + + b = append(b, scratch...) + + var err error + b, err = enc.encode(b, ctx, v.Index(i)) + if err != nil { + return nil, err + } + } + + return b, nil +} + +func (enc *Encoder) encodeSliceAsArray(b []byte, ctx encoderCtx, v reflect.Value) ([]byte, error) { + multiline := ctx.options.multiline || enc.arraysMultiline + separator := ", " + + b = append(b, '[') + + subCtx := ctx + subCtx.options = valueOptions{} + + if multiline { + separator = ",\n" + + b = append(b, '\n') + + subCtx.indent++ + } + + var err error + first := true + + for i := 0; i < v.Len(); i++ { + if first { + first = false + } else { + b = append(b, separator...) + } + + if multiline { + b = enc.indent(subCtx.indent, b) + } + + b, err = enc.encode(b, subCtx, v.Index(i)) + if err != nil { + return nil, err + } + } + + if multiline { + b = append(b, '\n') + b = enc.indent(ctx.indent, b) + } + + b = append(b, ']') + + return b, nil +} + +func (enc *Encoder) indent(level int, b []byte) []byte { + for i := 0; i < level; i++ { + b = append(b, enc.indentSymbol...) + } + + return b +} diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/strict.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/strict.go new file mode 100644 index 000000000..802e7e4d1 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/strict.go @@ -0,0 +1,107 @@ +package toml + +import ( + "github.com/pelletier/go-toml/v2/internal/danger" + "github.com/pelletier/go-toml/v2/internal/tracker" + "github.com/pelletier/go-toml/v2/unstable" +) + +type strict struct { + Enabled bool + + // Tracks the current key being processed. + key tracker.KeyTracker + + missing []unstable.ParserError +} + +func (s *strict) EnterTable(node *unstable.Node) { + if !s.Enabled { + return + } + + s.key.UpdateTable(node) +} + +func (s *strict) EnterArrayTable(node *unstable.Node) { + if !s.Enabled { + return + } + + s.key.UpdateArrayTable(node) +} + +func (s *strict) EnterKeyValue(node *unstable.Node) { + if !s.Enabled { + return + } + + s.key.Push(node) +} + +func (s *strict) ExitKeyValue(node *unstable.Node) { + if !s.Enabled { + return + } + + s.key.Pop(node) +} + +func (s *strict) MissingTable(node *unstable.Node) { + if !s.Enabled { + return + } + + s.missing = append(s.missing, unstable.ParserError{ + Highlight: keyLocation(node), + Message: "missing table", + Key: s.key.Key(), + }) +} + +func (s *strict) MissingField(node *unstable.Node) { + if !s.Enabled { + return + } + + s.missing = append(s.missing, unstable.ParserError{ + Highlight: keyLocation(node), + Message: "missing field", + Key: s.key.Key(), + }) +} + +func (s *strict) Error(doc []byte) error { + if !s.Enabled || len(s.missing) == 0 { + return nil + } + + err := &StrictMissingError{ + Errors: make([]DecodeError, 0, len(s.missing)), + } + + for _, derr := range s.missing { + derr := derr + err.Errors = append(err.Errors, *wrapDecodeError(doc, &derr)) + } + + return err +} + +func keyLocation(node *unstable.Node) []byte { + k := node.Key() + + hasOne := k.Next() + if !hasOne { + panic("should not be called with empty key") + } + + start := k.Node().Data + end := k.Node().Data + + for k.Next() { + end = k.Node().Data + } + + return danger.BytesRange(start, end) +} diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/toml.abnf b/nodeadm/vendor/github.com/pelletier/go-toml/v2/toml.abnf new file mode 100644 index 000000000..473f3749e --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/toml.abnf @@ -0,0 +1,243 @@ +;; This document describes TOML's syntax, using the ABNF format (defined in +;; RFC 5234 -- https://www.ietf.org/rfc/rfc5234.txt). +;; +;; All valid TOML documents will match this description, however certain +;; invalid documents would need to be rejected as per the semantics described +;; in the supporting text description. + +;; It is possible to try this grammar interactively, using instaparse. +;; http://instaparse.mojombo.com/ +;; +;; To do so, in the lower right, click on Options and change `:input-format` to +;; ':abnf'. Then paste this entire ABNF document into the grammar entry box +;; (above the options). Then you can type or paste a sample TOML document into +;; the beige box on the left. Tada! + +;; Overall Structure + +toml = expression *( newline expression ) + +expression = ws [ comment ] +expression =/ ws keyval ws [ comment ] +expression =/ ws table ws [ comment ] + +;; Whitespace + +ws = *wschar +wschar = %x20 ; Space +wschar =/ %x09 ; Horizontal tab + +;; Newline + +newline = %x0A ; LF +newline =/ %x0D.0A ; CRLF + +;; Comment + +comment-start-symbol = %x23 ; # +non-ascii = %x80-D7FF / %xE000-10FFFF +non-eol = %x09 / %x20-7F / non-ascii + +comment = comment-start-symbol *non-eol + +;; Key-Value pairs + +keyval = key keyval-sep val + +key = simple-key / dotted-key +simple-key = quoted-key / unquoted-key + +unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ +quoted-key = basic-string / literal-string +dotted-key = simple-key 1*( dot-sep simple-key ) + +dot-sep = ws %x2E ws ; . Period +keyval-sep = ws %x3D ws ; = + +val = string / boolean / array / inline-table / date-time / float / integer + +;; String + +string = ml-basic-string / basic-string / ml-literal-string / literal-string + +;; Basic String + +basic-string = quotation-mark *basic-char quotation-mark + +quotation-mark = %x22 ; " + +basic-char = basic-unescaped / escaped +basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii +escaped = escape escape-seq-char + +escape = %x5C ; \ +escape-seq-char = %x22 ; " quotation mark U+0022 +escape-seq-char =/ %x5C ; \ reverse solidus U+005C +escape-seq-char =/ %x62 ; b backspace U+0008 +escape-seq-char =/ %x66 ; f form feed U+000C +escape-seq-char =/ %x6E ; n line feed U+000A +escape-seq-char =/ %x72 ; r carriage return U+000D +escape-seq-char =/ %x74 ; t tab U+0009 +escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX +escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX + +;; Multiline Basic String + +ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body + ml-basic-string-delim +ml-basic-string-delim = 3quotation-mark +ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] + +mlb-content = mlb-char / newline / mlb-escaped-nl +mlb-char = mlb-unescaped / escaped +mlb-quotes = 1*2quotation-mark +mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii +mlb-escaped-nl = escape ws newline *( wschar / newline ) + +;; Literal String + +literal-string = apostrophe *literal-char apostrophe + +apostrophe = %x27 ; ' apostrophe + +literal-char = %x09 / %x20-26 / %x28-7E / non-ascii + +;; Multiline Literal String + +ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body + ml-literal-string-delim +ml-literal-string-delim = 3apostrophe +ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ] + +mll-content = mll-char / newline +mll-char = %x09 / %x20-26 / %x28-7E / non-ascii +mll-quotes = 1*2apostrophe + +;; Integer + +integer = dec-int / hex-int / oct-int / bin-int + +minus = %x2D ; - +plus = %x2B ; + +underscore = %x5F ; _ +digit1-9 = %x31-39 ; 1-9 +digit0-7 = %x30-37 ; 0-7 +digit0-1 = %x30-31 ; 0-1 + +hex-prefix = %x30.78 ; 0x +oct-prefix = %x30.6F ; 0o +bin-prefix = %x30.62 ; 0b + +dec-int = [ minus / plus ] unsigned-dec-int +unsigned-dec-int = DIGIT / digit1-9 1*( DIGIT / underscore DIGIT ) + +hex-int = hex-prefix HEXDIG *( HEXDIG / underscore HEXDIG ) +oct-int = oct-prefix digit0-7 *( digit0-7 / underscore digit0-7 ) +bin-int = bin-prefix digit0-1 *( digit0-1 / underscore digit0-1 ) + +;; Float + +float = float-int-part ( exp / frac [ exp ] ) +float =/ special-float + +float-int-part = dec-int +frac = decimal-point zero-prefixable-int +decimal-point = %x2E ; . +zero-prefixable-int = DIGIT *( DIGIT / underscore DIGIT ) + +exp = "e" float-exp-part +float-exp-part = [ minus / plus ] zero-prefixable-int + +special-float = [ minus / plus ] ( inf / nan ) +inf = %x69.6e.66 ; inf +nan = %x6e.61.6e ; nan + +;; Boolean + +boolean = true / false + +true = %x74.72.75.65 ; true +false = %x66.61.6C.73.65 ; false + +;; Date and Time (as defined in RFC 3339) + +date-time = offset-date-time / local-date-time / local-date / local-time + +date-fullyear = 4DIGIT +date-month = 2DIGIT ; 01-12 +date-mday = 2DIGIT ; 01-28, 01-29, 01-30, 01-31 based on month/year +time-delim = "T" / %x20 ; T, t, or space +time-hour = 2DIGIT ; 00-23 +time-minute = 2DIGIT ; 00-59 +time-second = 2DIGIT ; 00-58, 00-59, 00-60 based on leap second rules +time-secfrac = "." 1*DIGIT +time-numoffset = ( "+" / "-" ) time-hour ":" time-minute +time-offset = "Z" / time-numoffset + +partial-time = time-hour ":" time-minute ":" time-second [ time-secfrac ] +full-date = date-fullyear "-" date-month "-" date-mday +full-time = partial-time time-offset + +;; Offset Date-Time + +offset-date-time = full-date time-delim full-time + +;; Local Date-Time + +local-date-time = full-date time-delim partial-time + +;; Local Date + +local-date = full-date + +;; Local Time + +local-time = partial-time + +;; Array + +array = array-open [ array-values ] ws-comment-newline array-close + +array-open = %x5B ; [ +array-close = %x5D ; ] + +array-values = ws-comment-newline val ws-comment-newline array-sep array-values +array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ] + +array-sep = %x2C ; , Comma + +ws-comment-newline = *( wschar / [ comment ] newline ) + +;; Table + +table = std-table / array-table + +;; Standard Table + +std-table = std-table-open key std-table-close + +std-table-open = %x5B ws ; [ Left square bracket +std-table-close = ws %x5D ; ] Right square bracket + +;; Inline Table + +inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close + +inline-table-open = %x7B ws ; { +inline-table-close = ws %x7D ; } +inline-table-sep = ws %x2C ws ; , Comma + +inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] + +;; Array Table + +array-table = array-table-open key array-table-close + +array-table-open = %x5B.5B ws ; [[ Double left square bracket +array-table-close = ws %x5D.5D ; ]] Double right square bracket + +;; Built-in ABNF terms, reproduced here for clarity + +ALPHA = %x41-5A / %x61-7A ; A-Z / a-z +DIGIT = %x30-39 ; 0-9 +HEXDIG = DIGIT / "A" / "B" / "C" / "D" / "E" / "F" diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/types.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/types.go new file mode 100644 index 000000000..3c6b8fe57 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/types.go @@ -0,0 +1,14 @@ +package toml + +import ( + "encoding" + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() +var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}(nil)) +var sliceInterfaceType = reflect.TypeOf([]interface{}(nil)) +var stringType = reflect.TypeOf("") diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go new file mode 100644 index 000000000..98231bae6 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -0,0 +1,1311 @@ +package toml + +import ( + "encoding" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "reflect" + "strings" + "sync/atomic" + "time" + + "github.com/pelletier/go-toml/v2/internal/danger" + "github.com/pelletier/go-toml/v2/internal/tracker" + "github.com/pelletier/go-toml/v2/unstable" +) + +// Unmarshal deserializes a TOML document into a Go value. +// +// It is a shortcut for Decoder.Decode() with the default options. +func Unmarshal(data []byte, v interface{}) error { + p := unstable.Parser{} + p.Reset(data) + d := decoder{p: &p} + + return d.FromParser(v) +} + +// Decoder reads and decode a TOML document from an input stream. +type Decoder struct { + // input + r io.Reader + + // global settings + strict bool + + // toggles unmarshaler interface + unmarshalerInterface bool +} + +// NewDecoder creates a new Decoder that will read from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +// DisallowUnknownFields causes the Decoder to return an error when the +// destination is a struct and the input contains a key that does not match a +// non-ignored field. +// +// In that case, the Decoder returns a StrictMissingError that can be used to +// retrieve the individual errors as well as generate a human readable +// description of the missing fields. +func (d *Decoder) DisallowUnknownFields() *Decoder { + d.strict = true + return d +} + +// EnableUnmarshalerInterface allows to enable unmarshaler interface. +// +// With this feature enabled, types implementing the unstable/Unmarshaler +// interface can be decoded from any structure of the document. It allows types +// that don't have a straightfoward TOML representation to provide their own +// decoding logic. +// +// Currently, types can only decode from a single value. Tables and array tables +// are not supported. +// +// *Unstable:* This method does not follow the compatibility guarantees of +// semver. It can be changed or removed without a new major version being +// issued. +func (d *Decoder) EnableUnmarshalerInterface() *Decoder { + d.unmarshalerInterface = true + return d +} + +// Decode the whole content of r into v. +// +// By default, values in the document that don't exist in the target Go value +// are ignored. See Decoder.DisallowUnknownFields() to change this behavior. +// +// When a TOML local date, time, or date-time is decoded into a time.Time, its +// value is represented in time.Local timezone. Otherwise the appropriate Local* +// structure is used. For time values, precision up to the nanosecond is +// supported by truncating extra digits. +// +// Empty tables decoded in an interface{} create an empty initialized +// map[string]interface{}. +// +// Types implementing the encoding.TextUnmarshaler interface are decoded from a +// TOML string. +// +// When decoding a number, go-toml will return an error if the number is out of +// bounds for the target type (which includes negative numbers when decoding +// into an unsigned int). +// +// If an error occurs while decoding the content of the document, this function +// returns a toml.DecodeError, providing context about the issue. When using +// strict mode and a field is missing, a `toml.StrictMissingError` is +// returned. In any other case, this function returns a standard Go error. +// +// # Type mapping +// +// List of supported TOML types and their associated accepted Go types: +// +// String -> string +// Integer -> uint*, int*, depending on size +// Float -> float*, depending on size +// Boolean -> bool +// Offset Date-Time -> time.Time +// Local Date-time -> LocalDateTime, time.Time +// Local Date -> LocalDate, time.Time +// Local Time -> LocalTime, time.Time +// Array -> slice and array, depending on elements types +// Table -> map and struct +// Inline Table -> same as Table +// Array of Tables -> same as Array and Table +func (d *Decoder) Decode(v interface{}) error { + b, err := ioutil.ReadAll(d.r) + if err != nil { + return fmt.Errorf("toml: %w", err) + } + + p := unstable.Parser{} + p.Reset(b) + dec := decoder{ + p: &p, + strict: strict{ + Enabled: d.strict, + }, + unmarshalerInterface: d.unmarshalerInterface, + } + + return dec.FromParser(v) +} + +type decoder struct { + // Which parser instance in use for this decoding session. + p *unstable.Parser + + // Flag indicating that the current expression is stashed. + // If set to true, calling nextExpr will not actually pull a new expression + // but turn off the flag instead. + stashedExpr bool + + // Skip expressions until a table is found. This is set to true when a + // table could not be created (missing field in map), so all KV expressions + // need to be skipped. + skipUntilTable bool + + // Flag indicating that the current array/slice table should be cleared because + // it is the first encounter of an array table. + clearArrayTable bool + + // Tracks position in Go arrays. + // This is used when decoding [[array tables]] into Go arrays. Given array + // tables are separate TOML expression, we need to keep track of where we + // are at in the Go array, as we can't just introspect its size. + arrayIndexes map[reflect.Value]int + + // Tracks keys that have been seen, with which type. + seen tracker.SeenTracker + + // Strict mode + strict strict + + // Flag that enables/disables unmarshaler interface. + unmarshalerInterface bool + + // Current context for the error. + errorContext *errorContext +} + +type errorContext struct { + Struct reflect.Type + Field []int +} + +func (d *decoder) typeMismatchError(toml string, target reflect.Type) error { + return fmt.Errorf("toml: %s", d.typeMismatchString(toml, target)) +} + +func (d *decoder) typeMismatchString(toml string, target reflect.Type) string { + if d.errorContext != nil && d.errorContext.Struct != nil { + ctx := d.errorContext + f := ctx.Struct.FieldByIndex(ctx.Field) + return fmt.Sprintf("cannot decode TOML %s into struct field %s.%s of type %s", toml, ctx.Struct, f.Name, f.Type) + } + return fmt.Sprintf("cannot decode TOML %s into a Go value of type %s", toml, target) +} + +func (d *decoder) expr() *unstable.Node { + return d.p.Expression() +} + +func (d *decoder) nextExpr() bool { + if d.stashedExpr { + d.stashedExpr = false + return true + } + return d.p.NextExpression() +} + +func (d *decoder) stashExpr() { + d.stashedExpr = true +} + +func (d *decoder) arrayIndex(shouldAppend bool, v reflect.Value) int { + if d.arrayIndexes == nil { + d.arrayIndexes = make(map[reflect.Value]int, 1) + } + + idx, ok := d.arrayIndexes[v] + + if !ok { + d.arrayIndexes[v] = 0 + } else if shouldAppend { + idx++ + d.arrayIndexes[v] = idx + } + + return idx +} + +func (d *decoder) FromParser(v interface{}) error { + r := reflect.ValueOf(v) + if r.Kind() != reflect.Ptr { + return fmt.Errorf("toml: decoding can only be performed into a pointer, not %s", r.Kind()) + } + + if r.IsNil() { + return fmt.Errorf("toml: decoding pointer target cannot be nil") + } + + r = r.Elem() + if r.Kind() == reflect.Interface && r.IsNil() { + newMap := map[string]interface{}{} + r.Set(reflect.ValueOf(newMap)) + } + + err := d.fromParser(r) + if err == nil { + return d.strict.Error(d.p.Data()) + } + + var e *unstable.ParserError + if errors.As(err, &e) { + return wrapDecodeError(d.p.Data(), e) + } + + return err +} + +func (d *decoder) fromParser(root reflect.Value) error { + for d.nextExpr() { + err := d.handleRootExpression(d.expr(), root) + if err != nil { + return err + } + } + + return d.p.Error() +} + +/* +Rules for the unmarshal code: + +- The stack is used to keep track of which values need to be set where. +- handle* functions <=> switch on a given unstable.Kind. +- unmarshalX* functions need to unmarshal a node of kind X. +- An "object" is either a struct or a map. +*/ + +func (d *decoder) handleRootExpression(expr *unstable.Node, v reflect.Value) error { + var x reflect.Value + var err error + var first bool // used for to clear array tables on first use + + if !(d.skipUntilTable && expr.Kind == unstable.KeyValue) { + first, err = d.seen.CheckExpression(expr) + if err != nil { + return err + } + } + + switch expr.Kind { + case unstable.KeyValue: + if d.skipUntilTable { + return nil + } + x, err = d.handleKeyValue(expr, v) + case unstable.Table: + d.skipUntilTable = false + d.strict.EnterTable(expr) + x, err = d.handleTable(expr.Key(), v) + case unstable.ArrayTable: + d.skipUntilTable = false + d.strict.EnterArrayTable(expr) + d.clearArrayTable = first + x, err = d.handleArrayTable(expr.Key(), v) + default: + panic(fmt.Errorf("parser should not permit expression of kind %s at document root", expr.Kind)) + } + + if d.skipUntilTable { + if expr.Kind == unstable.Table || expr.Kind == unstable.ArrayTable { + d.strict.MissingTable(expr) + } + } else if err == nil && x.IsValid() { + v.Set(x) + } + + return err +} + +func (d *decoder) handleArrayTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { + if key.Next() { + return d.handleArrayTablePart(key, v) + } + return d.handleKeyValues(v) +} + +func (d *decoder) handleArrayTableCollectionLast(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { + switch v.Kind() { + case reflect.Interface: + elem := v.Elem() + if !elem.IsValid() { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if elem.Kind() == reflect.Slice { + if elem.Type() != sliceInterfaceType { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if !elem.CanSet() { + nelem := reflect.New(sliceInterfaceType).Elem() + nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap())) + reflect.Copy(nelem, elem) + elem = nelem + } + if d.clearArrayTable && elem.Len() > 0 { + elem.SetLen(0) + d.clearArrayTable = false + } + } + return d.handleArrayTableCollectionLast(key, elem) + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + elem = ptr.Elem() + } + + elem, err := d.handleArrayTableCollectionLast(key, elem) + if err != nil { + return reflect.Value{}, err + } + v.Elem().Set(elem) + + return v, nil + case reflect.Slice: + if d.clearArrayTable && v.Len() > 0 { + v.SetLen(0) + d.clearArrayTable = false + } + elemType := v.Type().Elem() + var elem reflect.Value + if elemType.Kind() == reflect.Interface { + elem = makeMapStringInterface() + } else { + elem = reflect.New(elemType).Elem() + } + elem2, err := d.handleArrayTable(key, elem) + if err != nil { + return reflect.Value{}, err + } + if elem2.IsValid() { + elem = elem2 + } + return reflect.Append(v, elem), nil + case reflect.Array: + idx := d.arrayIndex(true, v) + if idx >= v.Len() { + return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx) + } + elem := v.Index(idx) + _, err := d.handleArrayTable(key, elem) + return v, err + default: + return reflect.Value{}, d.typeMismatchError("array table", v.Type()) + } +} + +// When parsing an array table expression, each part of the key needs to be +// evaluated like a normal key, but if it returns a collection, it also needs to +// point to the last element of the collection. Unless it is the last part of +// the key, then it needs to create a new element at the end. +func (d *decoder) handleArrayTableCollection(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { + if key.IsLast() { + return d.handleArrayTableCollectionLast(key, v) + } + + switch v.Kind() { + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + elem = ptr.Elem() + } + + elem, err := d.handleArrayTableCollection(key, elem) + if err != nil { + return reflect.Value{}, err + } + if elem.IsValid() { + v.Elem().Set(elem) + } + + return v, nil + case reflect.Slice: + elem := v.Index(v.Len() - 1) + x, err := d.handleArrayTable(key, elem) + if err != nil || d.skipUntilTable { + return reflect.Value{}, err + } + if x.IsValid() { + elem.Set(x) + } + + return v, err + case reflect.Array: + idx := d.arrayIndex(false, v) + if idx >= v.Len() { + return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx) + } + elem := v.Index(idx) + _, err := d.handleArrayTable(key, elem) + return v, err + } + + return d.handleArrayTable(key, v) +} + +func (d *decoder) handleKeyPart(key unstable.Iterator, v reflect.Value, nextFn handlerFn, makeFn valueMakerFn) (reflect.Value, error) { + var rv reflect.Value + + // First, dispatch over v to make sure it is a valid object. + // There is no guarantee over what it could be. + switch v.Kind() { + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + v.Set(reflect.New(v.Type().Elem())) + } + elem = v.Elem() + return d.handleKeyPart(key, elem, nextFn, makeFn) + case reflect.Map: + vt := v.Type() + + // Create the key for the map element. Convert to key type. + mk, err := d.keyFromData(vt.Key(), key.Node().Data) + if err != nil { + return reflect.Value{}, err + } + + // If the map does not exist, create it. + if v.IsNil() { + vt := v.Type() + v = reflect.MakeMap(vt) + rv = v + } + + mv := v.MapIndex(mk) + set := false + if !mv.IsValid() { + // If there is no value in the map, create a new one according to + // the map type. If the element type is interface, create either a + // map[string]interface{} or a []interface{} depending on whether + // this is the last part of the array table key. + + t := vt.Elem() + if t.Kind() == reflect.Interface { + mv = makeFn() + } else { + mv = reflect.New(t).Elem() + } + set = true + } else if mv.Kind() == reflect.Interface { + mv = mv.Elem() + if !mv.IsValid() { + mv = makeFn() + } + set = true + } else if !mv.CanAddr() { + vt := v.Type() + t := vt.Elem() + oldmv := mv + mv = reflect.New(t).Elem() + mv.Set(oldmv) + set = true + } + + x, err := nextFn(key, mv) + if err != nil { + return reflect.Value{}, err + } + + if x.IsValid() { + mv = x + set = true + } + + if set { + v.SetMapIndex(mk, mv) + } + case reflect.Struct: + path, found := structFieldPath(v, string(key.Node().Data)) + if !found { + d.skipUntilTable = true + return reflect.Value{}, nil + } + + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + t := v.Type() + d.errorContext.Struct = t + d.errorContext.Field = path + + f := fieldByIndex(v, path) + x, err := nextFn(key, f) + if err != nil || d.skipUntilTable { + return reflect.Value{}, err + } + if x.IsValid() { + f.Set(x) + } + d.errorContext.Field = nil + d.errorContext.Struct = nil + case reflect.Interface: + if v.Elem().IsValid() { + v = v.Elem() + } else { + v = makeMapStringInterface() + } + + x, err := d.handleKeyPart(key, v, nextFn, makeFn) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + v = x + } + rv = v + default: + panic(fmt.Errorf("unhandled part: %s", v.Kind())) + } + + return rv, nil +} + +// HandleArrayTablePart navigates the Go structure v using the key v. It is +// only used for the prefix (non-last) parts of an array-table. When +// encountering a collection, it should go to the last element. +func (d *decoder) handleArrayTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { + var makeFn valueMakerFn + if key.IsLast() { + makeFn = makeSliceInterface + } else { + makeFn = makeMapStringInterface + } + return d.handleKeyPart(key, v, d.handleArrayTableCollection, makeFn) +} + +// HandleTable returns a reference when it has checked the next expression but +// cannot handle it. +func (d *decoder) handleTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { + if v.Kind() == reflect.Slice { + if v.Len() == 0 { + return reflect.Value{}, unstable.NewParserError(key.Node().Data, "cannot store a table in a slice") + } + elem := v.Index(v.Len() - 1) + x, err := d.handleTable(key, elem) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + elem.Set(x) + } + return reflect.Value{}, nil + } + if key.Next() { + // Still scoping the key + return d.handleTablePart(key, v) + } + // Done scoping the key. + // Now handle all the key-value expressions in this table. + return d.handleKeyValues(v) +} + +// Handle root expressions until the end of the document or the next +// non-key-value. +func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) { + var rv reflect.Value + for d.nextExpr() { + expr := d.expr() + if expr.Kind != unstable.KeyValue { + // Stash the expression so that fromParser can just loop and use + // the right handler. + // We could just recurse ourselves here, but at least this gives a + // chance to pop the stack a bit. + d.stashExpr() + break + } + + _, err := d.seen.CheckExpression(expr) + if err != nil { + return reflect.Value{}, err + } + + x, err := d.handleKeyValue(expr, v) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + v = x + rv = x + } + } + return rv, nil +} + +type ( + handlerFn func(key unstable.Iterator, v reflect.Value) (reflect.Value, error) + valueMakerFn func() reflect.Value +) + +func makeMapStringInterface() reflect.Value { + return reflect.MakeMap(mapStringInterfaceType) +} + +func makeSliceInterface() reflect.Value { + return reflect.MakeSlice(sliceInterfaceType, 0, 16) +} + +func (d *decoder) handleTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { + return d.handleKeyPart(key, v, d.handleTable, makeMapStringInterface) +} + +func (d *decoder) tryTextUnmarshaler(node *unstable.Node, v reflect.Value) (bool, error) { + // Special case for time, because we allow to unmarshal to it from + // different kind of AST nodes. + if v.Type() == timeType { + return false, nil + } + + if v.CanAddr() && v.Addr().Type().Implements(textUnmarshalerType) { + err := v.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText(node.Data) + if err != nil { + return false, unstable.NewParserError(d.p.Raw(node.Raw), "%w", err) + } + + return true, nil + } + + return false, nil +} + +func (d *decoder) handleValue(value *unstable.Node, v reflect.Value) error { + for v.Kind() == reflect.Ptr { + v = initAndDereferencePointer(v) + } + + if d.unmarshalerInterface { + if v.CanAddr() && v.Addr().CanInterface() { + if outi, ok := v.Addr().Interface().(unstable.Unmarshaler); ok { + return outi.UnmarshalTOML(value) + } + } + } + + ok, err := d.tryTextUnmarshaler(value, v) + if ok || err != nil { + return err + } + + switch value.Kind { + case unstable.String: + return d.unmarshalString(value, v) + case unstable.Integer: + return d.unmarshalInteger(value, v) + case unstable.Float: + return d.unmarshalFloat(value, v) + case unstable.Bool: + return d.unmarshalBool(value, v) + case unstable.DateTime: + return d.unmarshalDateTime(value, v) + case unstable.LocalDate: + return d.unmarshalLocalDate(value, v) + case unstable.LocalTime: + return d.unmarshalLocalTime(value, v) + case unstable.LocalDateTime: + return d.unmarshalLocalDateTime(value, v) + case unstable.InlineTable: + return d.unmarshalInlineTable(value, v) + case unstable.Array: + return d.unmarshalArray(value, v) + default: + panic(fmt.Errorf("handleValue not implemented for %s", value.Kind)) + } +} + +func (d *decoder) unmarshalArray(array *unstable.Node, v reflect.Value) error { + switch v.Kind() { + case reflect.Slice: + if v.IsNil() { + v.Set(reflect.MakeSlice(v.Type(), 0, 16)) + } else { + v.SetLen(0) + } + case reflect.Array: + // arrays are always initialized + case reflect.Interface: + elem := v.Elem() + if !elem.IsValid() { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if elem.Kind() == reflect.Slice { + if elem.Type() != sliceInterfaceType { + elem = reflect.New(sliceInterfaceType).Elem() + elem.Set(reflect.MakeSlice(sliceInterfaceType, 0, 16)) + } else if !elem.CanSet() { + nelem := reflect.New(sliceInterfaceType).Elem() + nelem.Set(reflect.MakeSlice(sliceInterfaceType, elem.Len(), elem.Cap())) + reflect.Copy(nelem, elem) + elem = nelem + } + } + err := d.unmarshalArray(array, elem) + if err != nil { + return err + } + v.Set(elem) + return nil + default: + // TODO: use newDecodeError, but first the parser needs to fill + // array.Data. + return d.typeMismatchError("array", v.Type()) + } + + elemType := v.Type().Elem() + + it := array.Children() + idx := 0 + for it.Next() { + n := it.Node() + + // TODO: optimize + if v.Kind() == reflect.Slice { + elem := reflect.New(elemType).Elem() + + err := d.handleValue(n, elem) + if err != nil { + return err + } + + v.Set(reflect.Append(v, elem)) + } else { // array + if idx >= v.Len() { + return nil + } + elem := v.Index(idx) + err := d.handleValue(n, elem) + if err != nil { + return err + } + idx++ + } + } + + return nil +} + +func (d *decoder) unmarshalInlineTable(itable *unstable.Node, v reflect.Value) error { + // Make sure v is an initialized object. + switch v.Kind() { + case reflect.Map: + if v.IsNil() { + v.Set(reflect.MakeMap(v.Type())) + } + case reflect.Struct: + // structs are always initialized. + case reflect.Interface: + elem := v.Elem() + if !elem.IsValid() { + elem = makeMapStringInterface() + v.Set(elem) + } + return d.unmarshalInlineTable(itable, elem) + default: + return unstable.NewParserError(d.p.Raw(itable.Raw), "cannot store inline table in Go type %s", v.Kind()) + } + + it := itable.Children() + for it.Next() { + n := it.Node() + + x, err := d.handleKeyValue(n, v) + if err != nil { + return err + } + if x.IsValid() { + v = x + } + } + + return nil +} + +func (d *decoder) unmarshalDateTime(value *unstable.Node, v reflect.Value) error { + dt, err := parseDateTime(value.Data) + if err != nil { + return err + } + + v.Set(reflect.ValueOf(dt)) + return nil +} + +func (d *decoder) unmarshalLocalDate(value *unstable.Node, v reflect.Value) error { + ld, err := parseLocalDate(value.Data) + if err != nil { + return err + } + + if v.Type() == timeType { + cast := ld.AsTime(time.Local) + v.Set(reflect.ValueOf(cast)) + return nil + } + + v.Set(reflect.ValueOf(ld)) + + return nil +} + +func (d *decoder) unmarshalLocalTime(value *unstable.Node, v reflect.Value) error { + lt, rest, err := parseLocalTime(value.Data) + if err != nil { + return err + } + + if len(rest) > 0 { + return unstable.NewParserError(rest, "extra characters at the end of a local time") + } + + v.Set(reflect.ValueOf(lt)) + return nil +} + +func (d *decoder) unmarshalLocalDateTime(value *unstable.Node, v reflect.Value) error { + ldt, rest, err := parseLocalDateTime(value.Data) + if err != nil { + return err + } + + if len(rest) > 0 { + return unstable.NewParserError(rest, "extra characters at the end of a local date time") + } + + if v.Type() == timeType { + cast := ldt.AsTime(time.Local) + + v.Set(reflect.ValueOf(cast)) + return nil + } + + v.Set(reflect.ValueOf(ldt)) + + return nil +} + +func (d *decoder) unmarshalBool(value *unstable.Node, v reflect.Value) error { + b := value.Data[0] == 't' + + switch v.Kind() { + case reflect.Bool: + v.SetBool(b) + case reflect.Interface: + v.Set(reflect.ValueOf(b)) + default: + return unstable.NewParserError(value.Data, "cannot assign boolean to a %t", b) + } + + return nil +} + +func (d *decoder) unmarshalFloat(value *unstable.Node, v reflect.Value) error { + f, err := parseFloat(value.Data) + if err != nil { + return err + } + + switch v.Kind() { + case reflect.Float64: + v.SetFloat(f) + case reflect.Float32: + if f > math.MaxFloat32 { + return unstable.NewParserError(value.Data, "number %f does not fit in a float32", f) + } + v.SetFloat(f) + case reflect.Interface: + v.Set(reflect.ValueOf(f)) + default: + return unstable.NewParserError(value.Data, "float cannot be assigned to %s", v.Kind()) + } + + return nil +} + +const ( + maxInt = int64(^uint(0) >> 1) + minInt = -maxInt - 1 +) + +// Maximum value of uint for decoding. Currently the decoder parses the integer +// into an int64. As a result, on architectures where uint is 64 bits, the +// effective maximum uint we can decode is the maximum of int64. On +// architectures where uint is 32 bits, the maximum value we can decode is +// lower: the maximum of uint32. I didn't find a way to figure out this value at +// compile time, so it is computed during initialization. +var maxUint int64 = math.MaxInt64 + +func init() { + m := uint64(^uint(0)) + if m < uint64(maxUint) { + maxUint = int64(m) + } +} + +func (d *decoder) unmarshalInteger(value *unstable.Node, v reflect.Value) error { + kind := v.Kind() + if kind == reflect.Float32 || kind == reflect.Float64 { + return d.unmarshalFloat(value, v) + } + + i, err := parseInteger(value.Data) + if err != nil { + return err + } + + var r reflect.Value + + switch kind { + case reflect.Int64: + v.SetInt(i) + return nil + case reflect.Int32: + if i < math.MinInt32 || i > math.MaxInt32 { + return fmt.Errorf("toml: number %d does not fit in an int32", i) + } + + r = reflect.ValueOf(int32(i)) + case reflect.Int16: + if i < math.MinInt16 || i > math.MaxInt16 { + return fmt.Errorf("toml: number %d does not fit in an int16", i) + } + + r = reflect.ValueOf(int16(i)) + case reflect.Int8: + if i < math.MinInt8 || i > math.MaxInt8 { + return fmt.Errorf("toml: number %d does not fit in an int8", i) + } + + r = reflect.ValueOf(int8(i)) + case reflect.Int: + if i < minInt || i > maxInt { + return fmt.Errorf("toml: number %d does not fit in an int", i) + } + + r = reflect.ValueOf(int(i)) + case reflect.Uint64: + if i < 0 { + return fmt.Errorf("toml: negative number %d does not fit in an uint64", i) + } + + r = reflect.ValueOf(uint64(i)) + case reflect.Uint32: + if i < 0 || i > math.MaxUint32 { + return fmt.Errorf("toml: negative number %d does not fit in an uint32", i) + } + + r = reflect.ValueOf(uint32(i)) + case reflect.Uint16: + if i < 0 || i > math.MaxUint16 { + return fmt.Errorf("toml: negative number %d does not fit in an uint16", i) + } + + r = reflect.ValueOf(uint16(i)) + case reflect.Uint8: + if i < 0 || i > math.MaxUint8 { + return fmt.Errorf("toml: negative number %d does not fit in an uint8", i) + } + + r = reflect.ValueOf(uint8(i)) + case reflect.Uint: + if i < 0 || i > maxUint { + return fmt.Errorf("toml: negative number %d does not fit in an uint", i) + } + + r = reflect.ValueOf(uint(i)) + case reflect.Interface: + r = reflect.ValueOf(i) + default: + return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("integer", v.Type())) + } + + if !r.Type().AssignableTo(v.Type()) { + r = r.Convert(v.Type()) + } + + v.Set(r) + + return nil +} + +func (d *decoder) unmarshalString(value *unstable.Node, v reflect.Value) error { + switch v.Kind() { + case reflect.String: + v.SetString(string(value.Data)) + case reflect.Interface: + v.Set(reflect.ValueOf(string(value.Data))) + default: + return unstable.NewParserError(d.p.Raw(value.Raw), d.typeMismatchString("string", v.Type())) + } + + return nil +} + +func (d *decoder) handleKeyValue(expr *unstable.Node, v reflect.Value) (reflect.Value, error) { + d.strict.EnterKeyValue(expr) + + v, err := d.handleKeyValueInner(expr.Key(), expr.Value(), v) + if d.skipUntilTable { + d.strict.MissingField(expr) + d.skipUntilTable = false + } + + d.strict.ExitKeyValue(expr) + + return v, err +} + +func (d *decoder) handleKeyValueInner(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) { + if key.Next() { + // Still scoping the key + return d.handleKeyValuePart(key, value, v) + } + // Done scoping the key. + // v is whatever Go value we need to fill. + return reflect.Value{}, d.handleValue(value, v) +} + +func (d *decoder) keyFromData(keyType reflect.Type, data []byte) (reflect.Value, error) { + switch { + case stringType.AssignableTo(keyType): + return reflect.ValueOf(string(data)), nil + + case stringType.ConvertibleTo(keyType): + return reflect.ValueOf(string(data)).Convert(keyType), nil + + case keyType.Implements(textUnmarshalerType): + mk := reflect.New(keyType.Elem()) + if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err) + } + return mk, nil + + case reflect.PtrTo(keyType).Implements(textUnmarshalerType): + mk := reflect.New(keyType) + if err := mk.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return reflect.Value{}, fmt.Errorf("toml: error unmarshalling key type %s from text: %w", stringType, err) + } + return mk.Elem(), nil + } + return reflect.Value{}, fmt.Errorf("toml: cannot convert map key of type %s to expected type %s", stringType, keyType) +} + +func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) { + // contains the replacement for v + var rv reflect.Value + + // First, dispatch over v to make sure it is a valid object. + // There is no guarantee over what it could be. + switch v.Kind() { + case reflect.Map: + vt := v.Type() + + mk, err := d.keyFromData(vt.Key(), key.Node().Data) + if err != nil { + return reflect.Value{}, err + } + + // If the map does not exist, create it. + if v.IsNil() { + v = reflect.MakeMap(vt) + rv = v + } + + mv := v.MapIndex(mk) + set := false + if !mv.IsValid() || key.IsLast() { + set = true + mv = reflect.New(v.Type().Elem()).Elem() + } + + nv, err := d.handleKeyValueInner(key, value, mv) + if err != nil { + return reflect.Value{}, err + } + if nv.IsValid() { + mv = nv + set = true + } + + if set { + v.SetMapIndex(mk, mv) + } + case reflect.Struct: + path, found := structFieldPath(v, string(key.Node().Data)) + if !found { + d.skipUntilTable = true + break + } + + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + t := v.Type() + d.errorContext.Struct = t + d.errorContext.Field = path + + f := fieldByIndex(v, path) + + if !f.CanAddr() { + // If the field is not addressable, need to take a slower path and + // make a copy of the struct itself to a new location. + nvp := reflect.New(v.Type()) + nvp.Elem().Set(v) + v = nvp.Elem() + _, err := d.handleKeyValuePart(key, value, v) + if err != nil { + return reflect.Value{}, err + } + return nvp.Elem(), nil + } + x, err := d.handleKeyValueInner(key, value, f) + if err != nil { + return reflect.Value{}, err + } + + if x.IsValid() { + f.Set(x) + } + d.errorContext.Struct = nil + d.errorContext.Field = nil + case reflect.Interface: + v = v.Elem() + + // Following encoding/json: decoding an object into an + // interface{}, it needs to always hold a + // map[string]interface{}. This is for the types to be + // consistent whether a previous value was set or not. + if !v.IsValid() || v.Type() != mapStringInterfaceType { + v = makeMapStringInterface() + } + + x, err := d.handleKeyValuePart(key, value, v) + if err != nil { + return reflect.Value{}, err + } + if x.IsValid() { + v = x + } + rv = v + case reflect.Ptr: + elem := v.Elem() + if !elem.IsValid() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + rv = v + elem = ptr.Elem() + } + + elem2, err := d.handleKeyValuePart(key, value, elem) + if err != nil { + return reflect.Value{}, err + } + if elem2.IsValid() { + elem = elem2 + } + v.Elem().Set(elem) + default: + return reflect.Value{}, fmt.Errorf("unhandled kv part: %s", v.Kind()) + } + + return rv, nil +} + +func initAndDereferencePointer(v reflect.Value) reflect.Value { + var elem reflect.Value + if v.IsNil() { + ptr := reflect.New(v.Type().Elem()) + v.Set(ptr) + } + elem = v.Elem() + return elem +} + +// Same as reflect.Value.FieldByIndex, but creates pointers if needed. +func fieldByIndex(v reflect.Value, path []int) reflect.Value { + for _, x := range path { + v = v.Field(x) + + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + return v +} + +type fieldPathsMap = map[string][]int + +var globalFieldPathsCache atomic.Value // map[danger.TypeID]fieldPathsMap + +func structFieldPath(v reflect.Value, name string) ([]int, bool) { + t := v.Type() + + cache, _ := globalFieldPathsCache.Load().(map[danger.TypeID]fieldPathsMap) + fieldPaths, ok := cache[danger.MakeTypeID(t)] + + if !ok { + fieldPaths = map[string][]int{} + + forEachField(t, nil, func(name string, path []int) { + fieldPaths[name] = path + // extra copy for the case-insensitive match + fieldPaths[strings.ToLower(name)] = path + }) + + newCache := make(map[danger.TypeID]fieldPathsMap, len(cache)+1) + newCache[danger.MakeTypeID(t)] = fieldPaths + for k, v := range cache { + newCache[k] = v + } + globalFieldPathsCache.Store(newCache) + } + + path, ok := fieldPaths[name] + if !ok { + path, ok = fieldPaths[strings.ToLower(name)] + } + return path, ok +} + +func forEachField(t reflect.Type, path []int, do func(name string, path []int)) { + n := t.NumField() + for i := 0; i < n; i++ { + f := t.Field(i) + + if !f.Anonymous && f.PkgPath != "" { + // only consider exported fields. + continue + } + + fieldPath := append(path, i) + fieldPath = fieldPath[:len(fieldPath):len(fieldPath)] + + name := f.Tag.Get("toml") + if name == "-" { + continue + } + + if i := strings.IndexByte(name, ','); i >= 0 { + name = name[:i] + } + + if f.Anonymous && name == "" { + t2 := f.Type + if t2.Kind() == reflect.Ptr { + t2 = t2.Elem() + } + + if t2.Kind() == reflect.Struct { + forEachField(t2, fieldPath, do) + } + continue + } + + if name == "" { + name = f.Name + } + + do(name, fieldPath) + } +} diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go new file mode 100644 index 000000000..f526bf2c0 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go @@ -0,0 +1,136 @@ +package unstable + +import ( + "fmt" + "unsafe" + + "github.com/pelletier/go-toml/v2/internal/danger" +) + +// Iterator over a sequence of nodes. +// +// Starts uninitialized, you need to call Next() first. +// +// For example: +// +// it := n.Children() +// for it.Next() { +// n := it.Node() +// // do something with n +// } +type Iterator struct { + started bool + node *Node +} + +// Next moves the iterator forward and returns true if points to a +// node, false otherwise. +func (c *Iterator) Next() bool { + if !c.started { + c.started = true + } else if c.node.Valid() { + c.node = c.node.Next() + } + return c.node.Valid() +} + +// IsLast returns true if the current node of the iterator is the last +// one. Subsequent calls to Next() will return false. +func (c *Iterator) IsLast() bool { + return c.node.next == 0 +} + +// Node returns a pointer to the node pointed at by the iterator. +func (c *Iterator) Node() *Node { + return c.node +} + +// Node in a TOML expression AST. +// +// Depending on Kind, its sequence of children should be interpreted +// differently. +// +// - Array have one child per element in the array. +// - InlineTable have one child per key-value in the table (each of kind +// InlineTable). +// - KeyValue have at least two children. The first one is the value. The rest +// make a potentially dotted key. +// - Table and ArrayTable's children represent a dotted key (same as +// KeyValue, but without the first node being the value). +// +// When relevant, Raw describes the range of bytes this node is referring to in +// the input document. Use Parser.Raw() to retrieve the actual bytes. +type Node struct { + Kind Kind + Raw Range // Raw bytes from the input. + Data []byte // Node value (either allocated or referencing the input). + + // References to other nodes, as offsets in the backing array + // from this node. References can go backward, so those can be + // negative. + next int // 0 if last element + child int // 0 if no child +} + +// Range of bytes in the document. +type Range struct { + Offset uint32 + Length uint32 +} + +// Next returns a pointer to the next node, or nil if there is no next node. +func (n *Node) Next() *Node { + if n.next == 0 { + return nil + } + ptr := unsafe.Pointer(n) + size := unsafe.Sizeof(Node{}) + return (*Node)(danger.Stride(ptr, size, n.next)) +} + +// Child returns a pointer to the first child node of this node. Other children +// can be accessed calling Next on the first child. Returns an nil if this Node +// has no child. +func (n *Node) Child() *Node { + if n.child == 0 { + return nil + } + ptr := unsafe.Pointer(n) + size := unsafe.Sizeof(Node{}) + return (*Node)(danger.Stride(ptr, size, n.child)) +} + +// Valid returns true if the node's kind is set (not to Invalid). +func (n *Node) Valid() bool { + return n != nil +} + +// Key returns the children nodes making the Key on a supported node. Panics +// otherwise. They are guaranteed to be all be of the Kind Key. A simple key +// would return just one element. +func (n *Node) Key() Iterator { + switch n.Kind { + case KeyValue: + value := n.Child() + if !value.Valid() { + panic(fmt.Errorf("KeyValue should have at least two children")) + } + return Iterator{node: value.Next()} + case Table, ArrayTable: + return Iterator{node: n.Child()} + default: + panic(fmt.Errorf("Key() is not supported on a %s", n.Kind)) + } +} + +// Value returns a pointer to the value node of a KeyValue. +// Guaranteed to be non-nil. Panics if not called on a KeyValue node, +// or if the Children are malformed. +func (n *Node) Value() *Node { + return n.Child() +} + +// Children returns an iterator over a node's children. +func (n *Node) Children() Iterator { + return Iterator{node: n.Child()} +} diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go new file mode 100644 index 000000000..9538e30df --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go @@ -0,0 +1,71 @@ +package unstable + +// root contains a full AST. +// +// It is immutable once constructed with Builder. +type root struct { + nodes []Node +} + +// Iterator over the top level nodes. +func (r *root) Iterator() Iterator { + it := Iterator{} + if len(r.nodes) > 0 { + it.node = &r.nodes[0] + } + return it +} + +func (r *root) at(idx reference) *Node { + return &r.nodes[idx] +} + +type reference int + +const invalidReference reference = -1 + +func (r reference) Valid() bool { + return r != invalidReference +} + +type builder struct { + tree root + lastIdx int +} + +func (b *builder) Tree() *root { + return &b.tree +} + +func (b *builder) NodeAt(ref reference) *Node { + return b.tree.at(ref) +} + +func (b *builder) Reset() { + b.tree.nodes = b.tree.nodes[:0] + b.lastIdx = 0 +} + +func (b *builder) Push(n Node) reference { + b.lastIdx = len(b.tree.nodes) + b.tree.nodes = append(b.tree.nodes, n) + return reference(b.lastIdx) +} + +func (b *builder) PushAndChain(n Node) reference { + newIdx := len(b.tree.nodes) + b.tree.nodes = append(b.tree.nodes, n) + if b.lastIdx >= 0 { + b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx + } + b.lastIdx = newIdx + return reference(b.lastIdx) +} + +func (b *builder) AttachChild(parent reference, child reference) { + b.tree.nodes[parent].child = int(child) - int(parent) +} + +func (b *builder) Chain(from reference, to reference) { + b.tree.nodes[from].next = int(to) - int(from) +} diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go new file mode 100644 index 000000000..7ff26c53c --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go @@ -0,0 +1,3 @@ +// Package unstable provides APIs that do not meet the backward compatibility +// guarantees yet. +package unstable diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go new file mode 100644 index 000000000..ff9df1bef --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go @@ -0,0 +1,71 @@ +package unstable + +import "fmt" + +// Kind represents the type of TOML structure contained in a given Node. +type Kind int + +const ( + // Meta + Invalid Kind = iota + Comment + Key + + // Top level structures + Table + ArrayTable + KeyValue + + // Containers values + Array + InlineTable + + // Values + String + Bool + Float + Integer + LocalDate + LocalTime + LocalDateTime + DateTime +) + +// String implementation of fmt.Stringer. +func (k Kind) String() string { + switch k { + case Invalid: + return "Invalid" + case Comment: + return "Comment" + case Key: + return "Key" + case Table: + return "Table" + case ArrayTable: + return "ArrayTable" + case KeyValue: + return "KeyValue" + case Array: + return "Array" + case InlineTable: + return "InlineTable" + case String: + return "String" + case Bool: + return "Bool" + case Float: + return "Float" + case Integer: + return "Integer" + case LocalDate: + return "LocalDate" + case LocalTime: + return "LocalTime" + case LocalDateTime: + return "LocalDateTime" + case DateTime: + return "DateTime" + } + panic(fmt.Errorf("Kind.String() not implemented for '%d'", k)) +} diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go new file mode 100644 index 000000000..50358a44f --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go @@ -0,0 +1,1245 @@ +package unstable + +import ( + "bytes" + "fmt" + "unicode" + + "github.com/pelletier/go-toml/v2/internal/characters" + "github.com/pelletier/go-toml/v2/internal/danger" +) + +// ParserError describes an error relative to the content of the document. +// +// It cannot outlive the instance of Parser it refers to, and may cause panics +// if the parser is reset. +type ParserError struct { + Highlight []byte + Message string + Key []string // optional +} + +// Error is the implementation of the error interface. +func (e *ParserError) Error() string { + return e.Message +} + +// NewParserError is a convenience function to create a ParserError +// +// Warning: Highlight needs to be a subslice of Parser.data, so only slices +// returned by Parser.Raw are valid candidates. +func NewParserError(highlight []byte, format string, args ...interface{}) error { + return &ParserError{ + Highlight: highlight, + Message: fmt.Errorf(format, args...).Error(), + } +} + +// Parser scans over a TOML-encoded document and generates an iterative AST. +// +// To prime the Parser, first reset it with the contents of a TOML document. +// Then, process all top-level expressions sequentially. See Example. +// +// Don't forget to check Error() after you're done parsing. +// +// Each top-level expression needs to be fully processed before calling +// NextExpression() again. Otherwise, calls to various Node methods may panic if +// the parser has moved on the next expression. +// +// For performance reasons, go-toml doesn't make a copy of the input bytes to +// the parser. Make sure to copy all the bytes you need to outlive the slice +// given to the parser. +type Parser struct { + data []byte + builder builder + ref reference + left []byte + err error + first bool + + KeepComments bool +} + +// Data returns the slice provided to the last call to Reset. +func (p *Parser) Data() []byte { + return p.data +} + +// Range returns a range description that corresponds to a given slice of the +// input. If the argument is not a subslice of the parser input, this function +// panics. +func (p *Parser) Range(b []byte) Range { + return Range{ + Offset: uint32(danger.SubsliceOffset(p.data, b)), + Length: uint32(len(b)), + } +} + +// Raw returns the slice corresponding to the bytes in the given range. +func (p *Parser) Raw(raw Range) []byte { + return p.data[raw.Offset : raw.Offset+raw.Length] +} + +// Reset brings the parser to its initial state for a given input. It wipes an +// reuses internal storage to reduce allocation. +func (p *Parser) Reset(b []byte) { + p.builder.Reset() + p.ref = invalidReference + p.data = b + p.left = b + p.err = nil + p.first = true +} + +// NextExpression parses the next top-level expression. If an expression was +// successfully parsed, it returns true. If the parser is at the end of the +// document or an error occurred, it returns false. +// +// Retrieve the parsed expression with Expression(). +func (p *Parser) NextExpression() bool { + if len(p.left) == 0 || p.err != nil { + return false + } + + p.builder.Reset() + p.ref = invalidReference + + for { + if len(p.left) == 0 || p.err != nil { + return false + } + + if !p.first { + p.left, p.err = p.parseNewline(p.left) + } + + if len(p.left) == 0 || p.err != nil { + return false + } + + p.ref, p.left, p.err = p.parseExpression(p.left) + + if p.err != nil { + return false + } + + p.first = false + + if p.ref.Valid() { + return true + } + } +} + +// Expression returns a pointer to the node representing the last successfully +// parsed expression. +func (p *Parser) Expression() *Node { + return p.builder.NodeAt(p.ref) +} + +// Error returns any error that has occurred during parsing. +func (p *Parser) Error() error { + return p.err +} + +// Position describes a position in the input. +type Position struct { + // Number of bytes from the beginning of the input. + Offset int + // Line number, starting at 1. + Line int + // Column number, starting at 1. + Column int +} + +// Shape describes the position of a range in the input. +type Shape struct { + Start Position + End Position +} + +func (p *Parser) position(b []byte) Position { + offset := danger.SubsliceOffset(p.data, b) + + lead := p.data[:offset] + + return Position{ + Offset: offset, + Line: bytes.Count(lead, []byte{'\n'}) + 1, + Column: len(lead) - bytes.LastIndex(lead, []byte{'\n'}), + } +} + +// Shape returns the shape of the given range in the input. Will +// panic if the range is not a subslice of the input. +func (p *Parser) Shape(r Range) Shape { + raw := p.Raw(r) + return Shape{ + Start: p.position(raw), + End: p.position(raw[r.Length:]), + } +} + +func (p *Parser) parseNewline(b []byte) ([]byte, error) { + if b[0] == '\n' { + return b[1:], nil + } + + if b[0] == '\r' { + _, rest, err := scanWindowsNewline(b) + return rest, err + } + + return nil, NewParserError(b[0:1], "expected newline but got %#U", b[0]) +} + +func (p *Parser) parseComment(b []byte) (reference, []byte, error) { + ref := invalidReference + data, rest, err := scanComment(b) + if p.KeepComments && err == nil { + ref = p.builder.Push(Node{ + Kind: Comment, + Raw: p.Range(data), + Data: data, + }) + } + return ref, rest, err +} + +func (p *Parser) parseExpression(b []byte) (reference, []byte, error) { + // expression = ws [ comment ] + // expression =/ ws keyval ws [ comment ] + // expression =/ ws table ws [ comment ] + ref := invalidReference + + b = p.parseWhitespace(b) + + if len(b) == 0 { + return ref, b, nil + } + + if b[0] == '#' { + ref, rest, err := p.parseComment(b) + return ref, rest, err + } + + if b[0] == '\n' || b[0] == '\r' { + return ref, b, nil + } + + var err error + if b[0] == '[' { + ref, b, err = p.parseTable(b) + } else { + ref, b, err = p.parseKeyval(b) + } + + if err != nil { + return ref, nil, err + } + + b = p.parseWhitespace(b) + + if len(b) > 0 && b[0] == '#' { + cref, rest, err := p.parseComment(b) + if cref != invalidReference { + p.builder.Chain(ref, cref) + } + return ref, rest, err + } + + return ref, b, nil +} + +func (p *Parser) parseTable(b []byte) (reference, []byte, error) { + // table = std-table / array-table + if len(b) > 1 && b[1] == '[' { + return p.parseArrayTable(b) + } + + return p.parseStdTable(b) +} + +func (p *Parser) parseArrayTable(b []byte) (reference, []byte, error) { + // array-table = array-table-open key array-table-close + // array-table-open = %x5B.5B ws ; [[ Double left square bracket + // array-table-close = ws %x5D.5D ; ]] Double right square bracket + ref := p.builder.Push(Node{ + Kind: ArrayTable, + }) + + b = b[2:] + b = p.parseWhitespace(b) + + k, b, err := p.parseKey(b) + if err != nil { + return ref, nil, err + } + + p.builder.AttachChild(ref, k) + b = p.parseWhitespace(b) + + b, err = expect(']', b) + if err != nil { + return ref, nil, err + } + + b, err = expect(']', b) + + return ref, b, err +} + +func (p *Parser) parseStdTable(b []byte) (reference, []byte, error) { + // std-table = std-table-open key std-table-close + // std-table-open = %x5B ws ; [ Left square bracket + // std-table-close = ws %x5D ; ] Right square bracket + ref := p.builder.Push(Node{ + Kind: Table, + }) + + b = b[1:] + b = p.parseWhitespace(b) + + key, b, err := p.parseKey(b) + if err != nil { + return ref, nil, err + } + + p.builder.AttachChild(ref, key) + + b = p.parseWhitespace(b) + + b, err = expect(']', b) + + return ref, b, err +} + +func (p *Parser) parseKeyval(b []byte) (reference, []byte, error) { + // keyval = key keyval-sep val + ref := p.builder.Push(Node{ + Kind: KeyValue, + }) + + key, b, err := p.parseKey(b) + if err != nil { + return invalidReference, nil, err + } + + // keyval-sep = ws %x3D ws ; = + + b = p.parseWhitespace(b) + + if len(b) == 0 { + return invalidReference, nil, NewParserError(b, "expected = after a key, but the document ends there") + } + + b, err = expect('=', b) + if err != nil { + return invalidReference, nil, err + } + + b = p.parseWhitespace(b) + + valRef, b, err := p.parseVal(b) + if err != nil { + return ref, b, err + } + + p.builder.Chain(valRef, key) + p.builder.AttachChild(ref, valRef) + + return ref, b, err +} + +//nolint:cyclop,funlen +func (p *Parser) parseVal(b []byte) (reference, []byte, error) { + // val = string / boolean / array / inline-table / date-time / float / integer + ref := invalidReference + + if len(b) == 0 { + return ref, nil, NewParserError(b, "expected value, not eof") + } + + var err error + c := b[0] + + switch c { + case '"': + var raw []byte + var v []byte + if scanFollowsMultilineBasicStringDelimiter(b) { + raw, v, b, err = p.parseMultilineBasicString(b) + } else { + raw, v, b, err = p.parseBasicString(b) + } + + if err == nil { + ref = p.builder.Push(Node{ + Kind: String, + Raw: p.Range(raw), + Data: v, + }) + } + + return ref, b, err + case '\'': + var raw []byte + var v []byte + if scanFollowsMultilineLiteralStringDelimiter(b) { + raw, v, b, err = p.parseMultilineLiteralString(b) + } else { + raw, v, b, err = p.parseLiteralString(b) + } + + if err == nil { + ref = p.builder.Push(Node{ + Kind: String, + Raw: p.Range(raw), + Data: v, + }) + } + + return ref, b, err + case 't': + if !scanFollowsTrue(b) { + return ref, nil, NewParserError(atmost(b, 4), "expected 'true'") + } + + ref = p.builder.Push(Node{ + Kind: Bool, + Data: b[:4], + }) + + return ref, b[4:], nil + case 'f': + if !scanFollowsFalse(b) { + return ref, nil, NewParserError(atmost(b, 5), "expected 'false'") + } + + ref = p.builder.Push(Node{ + Kind: Bool, + Data: b[:5], + }) + + return ref, b[5:], nil + case '[': + return p.parseValArray(b) + case '{': + return p.parseInlineTable(b) + default: + return p.parseIntOrFloatOrDateTime(b) + } +} + +func atmost(b []byte, n int) []byte { + if n >= len(b) { + return b + } + + return b[:n] +} + +func (p *Parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) { + v, rest, err := scanLiteralString(b) + if err != nil { + return nil, nil, nil, err + } + + return v, v[1 : len(v)-1], rest, nil +} + +func (p *Parser) parseInlineTable(b []byte) (reference, []byte, error) { + // inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close + // inline-table-open = %x7B ws ; { + // inline-table-close = ws %x7D ; } + // inline-table-sep = ws %x2C ws ; , Comma + // inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] + parent := p.builder.Push(Node{ + Kind: InlineTable, + Raw: p.Range(b[:1]), + }) + + first := true + + var child reference + + b = b[1:] + + var err error + + for len(b) > 0 { + previousB := b + b = p.parseWhitespace(b) + + if len(b) == 0 { + return parent, nil, NewParserError(previousB[:1], "inline table is incomplete") + } + + if b[0] == '}' { + break + } + + if !first { + b, err = expect(',', b) + if err != nil { + return parent, nil, err + } + b = p.parseWhitespace(b) + } + + var kv reference + + kv, b, err = p.parseKeyval(b) + if err != nil { + return parent, nil, err + } + + if first { + p.builder.AttachChild(parent, kv) + } else { + p.builder.Chain(child, kv) + } + child = kv + + first = false + } + + rest, err := expect('}', b) + + return parent, rest, err +} + +//nolint:funlen,cyclop +func (p *Parser) parseValArray(b []byte) (reference, []byte, error) { + // array = array-open [ array-values ] ws-comment-newline array-close + // array-open = %x5B ; [ + // array-close = %x5D ; ] + // array-values = ws-comment-newline val ws-comment-newline array-sep array-values + // array-values =/ ws-comment-newline val ws-comment-newline [ array-sep ] + // array-sep = %x2C ; , Comma + // ws-comment-newline = *( wschar / [ comment ] newline ) + arrayStart := b + b = b[1:] + + parent := p.builder.Push(Node{ + Kind: Array, + }) + + // First indicates whether the parser is looking for the first element + // (non-comment) of the array. + first := true + + lastChild := invalidReference + + addChild := func(valueRef reference) { + if lastChild == invalidReference { + p.builder.AttachChild(parent, valueRef) + } else { + p.builder.Chain(lastChild, valueRef) + } + lastChild = valueRef + } + + var err error + for len(b) > 0 { + cref := invalidReference + cref, b, err = p.parseOptionalWhitespaceCommentNewline(b) + if err != nil { + return parent, nil, err + } + + if cref != invalidReference { + addChild(cref) + } + + if len(b) == 0 { + return parent, nil, NewParserError(arrayStart[:1], "array is incomplete") + } + + if b[0] == ']' { + break + } + + if b[0] == ',' { + if first { + return parent, nil, NewParserError(b[0:1], "array cannot start with comma") + } + b = b[1:] + + cref, b, err = p.parseOptionalWhitespaceCommentNewline(b) + if err != nil { + return parent, nil, err + } + if cref != invalidReference { + addChild(cref) + } + } else if !first { + return parent, nil, NewParserError(b[0:1], "array elements must be separated by commas") + } + + // TOML allows trailing commas in arrays. + if len(b) > 0 && b[0] == ']' { + break + } + + var valueRef reference + valueRef, b, err = p.parseVal(b) + if err != nil { + return parent, nil, err + } + + addChild(valueRef) + + cref, b, err = p.parseOptionalWhitespaceCommentNewline(b) + if err != nil { + return parent, nil, err + } + if cref != invalidReference { + addChild(cref) + } + + first = false + } + + rest, err := expect(']', b) + + return parent, rest, err +} + +func (p *Parser) parseOptionalWhitespaceCommentNewline(b []byte) (reference, []byte, error) { + rootCommentRef := invalidReference + latestCommentRef := invalidReference + + addComment := func(ref reference) { + if rootCommentRef == invalidReference { + rootCommentRef = ref + } else if latestCommentRef == invalidReference { + p.builder.AttachChild(rootCommentRef, ref) + latestCommentRef = ref + } else { + p.builder.Chain(latestCommentRef, ref) + latestCommentRef = ref + } + } + + for len(b) > 0 { + var err error + b = p.parseWhitespace(b) + + if len(b) > 0 && b[0] == '#' { + var ref reference + ref, b, err = p.parseComment(b) + if err != nil { + return invalidReference, nil, err + } + if ref != invalidReference { + addComment(ref) + } + } + + if len(b) == 0 { + break + } + + if b[0] == '\n' || b[0] == '\r' { + b, err = p.parseNewline(b) + if err != nil { + return invalidReference, nil, err + } + } else { + break + } + } + + return rootCommentRef, b, nil +} + +func (p *Parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, error) { + token, rest, err := scanMultilineLiteralString(b) + if err != nil { + return nil, nil, nil, err + } + + i := 3 + + // skip the immediate new line + if token[i] == '\n' { + i++ + } else if token[i] == '\r' && token[i+1] == '\n' { + i += 2 + } + + return token, token[i : len(token)-3], rest, err +} + +//nolint:funlen,gocognit,cyclop +func (p *Parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, error) { + // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body + // ml-basic-string-delim + // ml-basic-string-delim = 3quotation-mark + // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] + // + // mlb-content = mlb-char / newline / mlb-escaped-nl + // mlb-char = mlb-unescaped / escaped + // mlb-quotes = 1*2quotation-mark + // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // mlb-escaped-nl = escape ws newline *( wschar / newline ) + token, escaped, rest, err := scanMultilineBasicString(b) + if err != nil { + return nil, nil, nil, err + } + + i := 3 + + // skip the immediate new line + if token[i] == '\n' { + i++ + } else if token[i] == '\r' && token[i+1] == '\n' { + i += 2 + } + + // fast path + startIdx := i + endIdx := len(token) - len(`"""`) + + if !escaped { + str := token[startIdx:endIdx] + verr := characters.Utf8TomlValidAlreadyEscaped(str) + if verr.Zero() { + return token, str, rest, nil + } + return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") + } + + var builder bytes.Buffer + + // The scanner ensures that the token starts and ends with quotes and that + // escapes are balanced. + for i < len(token)-3 { + c := token[i] + + //nolint:nestif + if c == '\\' { + // When the last non-whitespace character on a line is an unescaped \, + // it will be trimmed along with all whitespace (including newlines) up + // to the next non-whitespace character or closing delimiter. + + isLastNonWhitespaceOnLine := false + j := 1 + findEOLLoop: + for ; j < len(token)-3-i; j++ { + switch token[i+j] { + case ' ', '\t': + continue + case '\r': + if token[i+j+1] == '\n' { + continue + } + case '\n': + isLastNonWhitespaceOnLine = true + } + break findEOLLoop + } + if isLastNonWhitespaceOnLine { + i += j + for ; i < len(token)-3; i++ { + c := token[i] + if !(c == '\n' || c == '\r' || c == ' ' || c == '\t') { + i-- + break + } + } + i++ + continue + } + + // handle escaping + i++ + c = token[i] + + switch c { + case '"', '\\': + builder.WriteByte(c) + case 'b': + builder.WriteByte('\b') + case 'f': + builder.WriteByte('\f') + case 'n': + builder.WriteByte('\n') + case 'r': + builder.WriteByte('\r') + case 't': + builder.WriteByte('\t') + case 'e': + builder.WriteByte(0x1B) + case 'u': + x, err := hexToRune(atmost(token[i+1:], 4), 4) + if err != nil { + return nil, nil, nil, err + } + builder.WriteRune(x) + i += 4 + case 'U': + x, err := hexToRune(atmost(token[i+1:], 8), 8) + if err != nil { + return nil, nil, nil, err + } + + builder.WriteRune(x) + i += 8 + default: + return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c) + } + i++ + } else { + size := characters.Utf8ValidNext(token[i:]) + if size == 0 { + return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c) + } + builder.Write(token[i : i+size]) + i += size + } + } + + return token, builder.Bytes(), rest, nil +} + +func (p *Parser) parseKey(b []byte) (reference, []byte, error) { + // key = simple-key / dotted-key + // simple-key = quoted-key / unquoted-key + // + // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ + // quoted-key = basic-string / literal-string + // dotted-key = simple-key 1*( dot-sep simple-key ) + // + // dot-sep = ws %x2E ws ; . Period + raw, key, b, err := p.parseSimpleKey(b) + if err != nil { + return invalidReference, nil, err + } + + ref := p.builder.Push(Node{ + Kind: Key, + Raw: p.Range(raw), + Data: key, + }) + + for { + b = p.parseWhitespace(b) + if len(b) > 0 && b[0] == '.' { + b = p.parseWhitespace(b[1:]) + + raw, key, b, err = p.parseSimpleKey(b) + if err != nil { + return ref, nil, err + } + + p.builder.PushAndChain(Node{ + Kind: Key, + Raw: p.Range(raw), + Data: key, + }) + } else { + break + } + } + + return ref, b, nil +} + +func (p *Parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) { + if len(b) == 0 { + return nil, nil, nil, NewParserError(b, "expected key but found none") + } + + // simple-key = quoted-key / unquoted-key + // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ + // quoted-key = basic-string / literal-string + switch { + case b[0] == '\'': + return p.parseLiteralString(b) + case b[0] == '"': + return p.parseBasicString(b) + case isUnquotedKeyChar(b[0]): + key, rest = scanUnquotedKey(b) + return key, key, rest, nil + default: + return nil, nil, nil, NewParserError(b[0:1], "invalid character at start of key: %c", b[0]) + } +} + +//nolint:funlen,cyclop +func (p *Parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) { + // basic-string = quotation-mark *basic-char quotation-mark + // quotation-mark = %x22 ; " + // basic-char = basic-unescaped / escaped + // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // escaped = escape escape-seq-char + // escape-seq-char = %x22 ; " quotation mark U+0022 + // escape-seq-char =/ %x5C ; \ reverse solidus U+005C + // escape-seq-char =/ %x62 ; b backspace U+0008 + // escape-seq-char =/ %x66 ; f form feed U+000C + // escape-seq-char =/ %x6E ; n line feed U+000A + // escape-seq-char =/ %x72 ; r carriage return U+000D + // escape-seq-char =/ %x74 ; t tab U+0009 + // escape-seq-char =/ %x75 4HEXDIG ; uXXXX U+XXXX + // escape-seq-char =/ %x55 8HEXDIG ; UXXXXXXXX U+XXXXXXXX + token, escaped, rest, err := scanBasicString(b) + if err != nil { + return nil, nil, nil, err + } + + startIdx := len(`"`) + endIdx := len(token) - len(`"`) + + // Fast path. If there is no escape sequence, the string should just be + // an UTF-8 encoded string, which is the same as Go. In that case, + // validate the string and return a direct reference to the buffer. + if !escaped { + str := token[startIdx:endIdx] + verr := characters.Utf8TomlValidAlreadyEscaped(str) + if verr.Zero() { + return token, str, rest, nil + } + return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") + } + + i := startIdx + + var builder bytes.Buffer + + // The scanner ensures that the token starts and ends with quotes and that + // escapes are balanced. + for i < len(token)-1 { + c := token[i] + if c == '\\' { + i++ + c = token[i] + + switch c { + case '"', '\\': + builder.WriteByte(c) + case 'b': + builder.WriteByte('\b') + case 'f': + builder.WriteByte('\f') + case 'n': + builder.WriteByte('\n') + case 'r': + builder.WriteByte('\r') + case 't': + builder.WriteByte('\t') + case 'e': + builder.WriteByte(0x1B) + case 'u': + x, err := hexToRune(token[i+1:len(token)-1], 4) + if err != nil { + return nil, nil, nil, err + } + + builder.WriteRune(x) + i += 4 + case 'U': + x, err := hexToRune(token[i+1:len(token)-1], 8) + if err != nil { + return nil, nil, nil, err + } + + builder.WriteRune(x) + i += 8 + default: + return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c) + } + i++ + } else { + size := characters.Utf8ValidNext(token[i:]) + if size == 0 { + return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c) + } + builder.Write(token[i : i+size]) + i += size + } + } + + return token, builder.Bytes(), rest, nil +} + +func hexToRune(b []byte, length int) (rune, error) { + if len(b) < length { + return -1, NewParserError(b, "unicode point needs %d character, not %d", length, len(b)) + } + b = b[:length] + + var r uint32 + for i, c := range b { + d := uint32(0) + switch { + case '0' <= c && c <= '9': + d = uint32(c - '0') + case 'a' <= c && c <= 'f': + d = uint32(c - 'a' + 10) + case 'A' <= c && c <= 'F': + d = uint32(c - 'A' + 10) + default: + return -1, NewParserError(b[i:i+1], "non-hex character") + } + r = r*16 + d + } + + if r > unicode.MaxRune || 0xD800 <= r && r < 0xE000 { + return -1, NewParserError(b, "escape sequence is invalid Unicode code point") + } + + return rune(r), nil +} + +func (p *Parser) parseWhitespace(b []byte) []byte { + // ws = *wschar + // wschar = %x20 ; Space + // wschar =/ %x09 ; Horizontal tab + _, rest := scanWhitespace(b) + + return rest +} + +//nolint:cyclop +func (p *Parser) parseIntOrFloatOrDateTime(b []byte) (reference, []byte, error) { + switch b[0] { + case 'i': + if !scanFollowsInf(b) { + return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'inf'") + } + + return p.builder.Push(Node{ + Kind: Float, + Data: b[:3], + Raw: p.Range(b[:3]), + }), b[3:], nil + case 'n': + if !scanFollowsNan(b) { + return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'nan'") + } + + return p.builder.Push(Node{ + Kind: Float, + Data: b[:3], + Raw: p.Range(b[:3]), + }), b[3:], nil + case '+', '-': + return p.scanIntOrFloat(b) + } + + if len(b) < 3 { + return p.scanIntOrFloat(b) + } + + s := 5 + if len(b) < s { + s = len(b) + } + + for idx, c := range b[:s] { + if isDigit(c) { + continue + } + + if idx == 2 && c == ':' || (idx == 4 && c == '-') { + return p.scanDateTime(b) + } + + break + } + + return p.scanIntOrFloat(b) +} + +func (p *Parser) scanDateTime(b []byte) (reference, []byte, error) { + // scans for contiguous characters in [0-9T:Z.+-], and up to one space if + // followed by a digit. + hasDate := false + hasTime := false + hasTz := false + seenSpace := false + + i := 0 +byteLoop: + for ; i < len(b); i++ { + c := b[i] + + switch { + case isDigit(c): + case c == '-': + hasDate = true + const minOffsetOfTz = 8 + if i >= minOffsetOfTz { + hasTz = true + } + case c == 'T' || c == 't' || c == ':' || c == '.': + hasTime = true + case c == '+' || c == '-' || c == 'Z' || c == 'z': + hasTz = true + case c == ' ': + if !seenSpace && i+1 < len(b) && isDigit(b[i+1]) { + i += 2 + // Avoid reaching past the end of the document in case the time + // is malformed. See TestIssue585. + if i >= len(b) { + i-- + } + seenSpace = true + hasTime = true + } else { + break byteLoop + } + default: + break byteLoop + } + } + + var kind Kind + + if hasTime { + if hasDate { + if hasTz { + kind = DateTime + } else { + kind = LocalDateTime + } + } else { + kind = LocalTime + } + } else { + kind = LocalDate + } + + return p.builder.Push(Node{ + Kind: kind, + Data: b[:i], + }), b[i:], nil +} + +//nolint:funlen,gocognit,cyclop +func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) { + i := 0 + + if len(b) > 2 && b[0] == '0' && b[1] != '.' && b[1] != 'e' && b[1] != 'E' { + var isValidRune validRuneFn + + switch b[1] { + case 'x': + isValidRune = isValidHexRune + case 'o': + isValidRune = isValidOctalRune + case 'b': + isValidRune = isValidBinaryRune + default: + i++ + } + + if isValidRune != nil { + i += 2 + for ; i < len(b); i++ { + if !isValidRune(b[i]) { + break + } + } + } + + return p.builder.Push(Node{ + Kind: Integer, + Data: b[:i], + Raw: p.Range(b[:i]), + }), b[i:], nil + } + + isFloat := false + + for ; i < len(b); i++ { + c := b[i] + + if c >= '0' && c <= '9' || c == '+' || c == '-' || c == '_' { + continue + } + + if c == '.' || c == 'e' || c == 'E' { + isFloat = true + + continue + } + + if c == 'i' { + if scanFollowsInf(b[i:]) { + return p.builder.Push(Node{ + Kind: Float, + Data: b[:i+3], + Raw: p.Range(b[:i+3]), + }), b[i+3:], nil + } + + return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'i' while scanning for a number") + } + + if c == 'n' { + if scanFollowsNan(b[i:]) { + return p.builder.Push(Node{ + Kind: Float, + Data: b[:i+3], + Raw: p.Range(b[:i+3]), + }), b[i+3:], nil + } + + return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'n' while scanning for a number") + } + + break + } + + if i == 0 { + return invalidReference, b, NewParserError(b, "incomplete number") + } + + kind := Integer + + if isFloat { + kind = Float + } + + return p.builder.Push(Node{ + Kind: kind, + Data: b[:i], + Raw: p.Range(b[:i]), + }), b[i:], nil +} + +func isDigit(r byte) bool { + return r >= '0' && r <= '9' +} + +type validRuneFn func(r byte) bool + +func isValidHexRune(r byte) bool { + return r >= 'a' && r <= 'f' || + r >= 'A' && r <= 'F' || + r >= '0' && r <= '9' || + r == '_' +} + +func isValidOctalRune(r byte) bool { + return r >= '0' && r <= '7' || r == '_' +} + +func isValidBinaryRune(r byte) bool { + return r == '0' || r == '1' || r == '_' +} + +func expect(x byte, b []byte) ([]byte, error) { + if len(b) == 0 { + return nil, NewParserError(b, "expected character %c but the document ended here", x) + } + + if b[0] != x { + return nil, NewParserError(b[0:1], "expected character %c", x) + } + + return b[1:], nil +} diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go new file mode 100644 index 000000000..0512181d2 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go @@ -0,0 +1,270 @@ +package unstable + +import "github.com/pelletier/go-toml/v2/internal/characters" + +func scanFollows(b []byte, pattern string) bool { + n := len(pattern) + + return len(b) >= n && string(b[:n]) == pattern +} + +func scanFollowsMultilineBasicStringDelimiter(b []byte) bool { + return scanFollows(b, `"""`) +} + +func scanFollowsMultilineLiteralStringDelimiter(b []byte) bool { + return scanFollows(b, `'''`) +} + +func scanFollowsTrue(b []byte) bool { + return scanFollows(b, `true`) +} + +func scanFollowsFalse(b []byte) bool { + return scanFollows(b, `false`) +} + +func scanFollowsInf(b []byte) bool { + return scanFollows(b, `inf`) +} + +func scanFollowsNan(b []byte) bool { + return scanFollows(b, `nan`) +} + +func scanUnquotedKey(b []byte) ([]byte, []byte) { + // unquoted-key = 1*( ALPHA / DIGIT / %x2D / %x5F ) ; A-Z / a-z / 0-9 / - / _ + for i := 0; i < len(b); i++ { + if !isUnquotedKeyChar(b[i]) { + return b[:i], b[i:] + } + } + + return b, b[len(b):] +} + +func isUnquotedKeyChar(r byte) bool { + return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '-' || r == '_' +} + +func scanLiteralString(b []byte) ([]byte, []byte, error) { + // literal-string = apostrophe *literal-char apostrophe + // apostrophe = %x27 ; ' apostrophe + // literal-char = %x09 / %x20-26 / %x28-7E / non-ascii + for i := 1; i < len(b); { + switch b[i] { + case '\'': + return b[:i+1], b[i+1:], nil + case '\n', '\r': + return nil, nil, NewParserError(b[i:i+1], "literal strings cannot have new lines") + } + size := characters.Utf8ValidNext(b[i:]) + if size == 0 { + return nil, nil, NewParserError(b[i:i+1], "invalid character") + } + i += size + } + + return nil, nil, NewParserError(b[len(b):], "unterminated literal string") +} + +func scanMultilineLiteralString(b []byte) ([]byte, []byte, error) { + // ml-literal-string = ml-literal-string-delim [ newline ] ml-literal-body + // ml-literal-string-delim + // ml-literal-string-delim = 3apostrophe + // ml-literal-body = *mll-content *( mll-quotes 1*mll-content ) [ mll-quotes ] + // + // mll-content = mll-char / newline + // mll-char = %x09 / %x20-26 / %x28-7E / non-ascii + // mll-quotes = 1*2apostrophe + for i := 3; i < len(b); { + switch b[i] { + case '\'': + if scanFollowsMultilineLiteralStringDelimiter(b[i:]) { + i += 3 + + // At that point we found 3 apostrophe, and i is the + // index of the byte after the third one. The scanner + // needs to be eager, because there can be an extra 2 + // apostrophe that can be accepted at the end of the + // string. + + if i >= len(b) || b[i] != '\'' { + return b[:i], b[i:], nil + } + i++ + + if i >= len(b) || b[i] != '\'' { + return b[:i], b[i:], nil + } + i++ + + if i < len(b) && b[i] == '\'' { + return nil, nil, NewParserError(b[i-3:i+1], "''' not allowed in multiline literal string") + } + + return b[:i], b[i:], nil + } + case '\r': + if len(b) < i+2 { + return nil, nil, NewParserError(b[len(b):], `need a \n after \r`) + } + if b[i+1] != '\n' { + return nil, nil, NewParserError(b[i:i+2], `need a \n after \r`) + } + i += 2 // skip the \n + continue + } + size := characters.Utf8ValidNext(b[i:]) + if size == 0 { + return nil, nil, NewParserError(b[i:i+1], "invalid character") + } + i += size + } + + return nil, nil, NewParserError(b[len(b):], `multiline literal string not terminated by '''`) +} + +func scanWindowsNewline(b []byte) ([]byte, []byte, error) { + const lenCRLF = 2 + if len(b) < lenCRLF { + return nil, nil, NewParserError(b, "windows new line expected") + } + + if b[1] != '\n' { + return nil, nil, NewParserError(b, `windows new line should be \r\n`) + } + + return b[:lenCRLF], b[lenCRLF:], nil +} + +func scanWhitespace(b []byte) ([]byte, []byte) { + for i := 0; i < len(b); i++ { + switch b[i] { + case ' ', '\t': + continue + default: + return b[:i], b[i:] + } + } + + return b, b[len(b):] +} + +func scanComment(b []byte) ([]byte, []byte, error) { + // comment-start-symbol = %x23 ; # + // non-ascii = %x80-D7FF / %xE000-10FFFF + // non-eol = %x09 / %x20-7F / non-ascii + // + // comment = comment-start-symbol *non-eol + + for i := 1; i < len(b); { + if b[i] == '\n' { + return b[:i], b[i:], nil + } + if b[i] == '\r' { + if i+1 < len(b) && b[i+1] == '\n' { + return b[:i+1], b[i+1:], nil + } + return nil, nil, NewParserError(b[i:i+1], "invalid character in comment") + } + size := characters.Utf8ValidNext(b[i:]) + if size == 0 { + return nil, nil, NewParserError(b[i:i+1], "invalid character in comment") + } + + i += size + } + + return b, b[len(b):], nil +} + +func scanBasicString(b []byte) ([]byte, bool, []byte, error) { + // basic-string = quotation-mark *basic-char quotation-mark + // quotation-mark = %x22 ; " + // basic-char = basic-unescaped / escaped + // basic-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // escaped = escape escape-seq-char + escaped := false + i := 1 + + for ; i < len(b); i++ { + switch b[i] { + case '"': + return b[:i+1], escaped, b[i+1:], nil + case '\n', '\r': + return nil, escaped, nil, NewParserError(b[i:i+1], "basic strings cannot have new lines") + case '\\': + if len(b) < i+2 { + return nil, escaped, nil, NewParserError(b[i:i+1], "need a character after \\") + } + escaped = true + i++ // skip the next character + } + } + + return nil, escaped, nil, NewParserError(b[len(b):], `basic string not terminated by "`) +} + +func scanMultilineBasicString(b []byte) ([]byte, bool, []byte, error) { + // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body + // ml-basic-string-delim + // ml-basic-string-delim = 3quotation-mark + // ml-basic-body = *mlb-content *( mlb-quotes 1*mlb-content ) [ mlb-quotes ] + // + // mlb-content = mlb-char / newline / mlb-escaped-nl + // mlb-char = mlb-unescaped / escaped + // mlb-quotes = 1*2quotation-mark + // mlb-unescaped = wschar / %x21 / %x23-5B / %x5D-7E / non-ascii + // mlb-escaped-nl = escape ws newline *( wschar / newline ) + + escaped := false + i := 3 + + for ; i < len(b); i++ { + switch b[i] { + case '"': + if scanFollowsMultilineBasicStringDelimiter(b[i:]) { + i += 3 + + // At that point we found 3 apostrophe, and i is the + // index of the byte after the third one. The scanner + // needs to be eager, because there can be an extra 2 + // apostrophe that can be accepted at the end of the + // string. + + if i >= len(b) || b[i] != '"' { + return b[:i], escaped, b[i:], nil + } + i++ + + if i >= len(b) || b[i] != '"' { + return b[:i], escaped, b[i:], nil + } + i++ + + if i < len(b) && b[i] == '"' { + return nil, escaped, nil, NewParserError(b[i-3:i+1], `""" not allowed in multiline basic string`) + } + + return b[:i], escaped, b[i:], nil + } + case '\\': + if len(b) < i+2 { + return nil, escaped, nil, NewParserError(b[len(b):], "need a character after \\") + } + escaped = true + i++ // skip the next character + case '\r': + if len(b) < i+2 { + return nil, escaped, nil, NewParserError(b[len(b):], `need a \n after \r`) + } + if b[i+1] != '\n' { + return nil, escaped, nil, NewParserError(b[i:i+2], `need a \n after \r`) + } + i++ // skip the \n + } + } + + return nil, escaped, nil, NewParserError(b[len(b):], `multiline basic string not terminated by """`) +} diff --git a/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go b/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go new file mode 100644 index 000000000..00cfd6de4 --- /dev/null +++ b/nodeadm/vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go @@ -0,0 +1,7 @@ +package unstable + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a TOML document. +type Unmarshaler interface { + UnmarshalTOML(value *Node) error +} diff --git a/nodeadm/vendor/github.com/stretchr/objx/README.md b/nodeadm/vendor/github.com/stretchr/objx/README.md index 246660b21..78dc1f8b0 100644 --- a/nodeadm/vendor/github.com/stretchr/objx/README.md +++ b/nodeadm/vendor/github.com/stretchr/objx/README.md @@ -4,20 +4,20 @@ [![Maintainability](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/maintainability)](https://codeclimate.com/github/stretchr/objx/maintainability) [![Test Coverage](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/test_coverage)](https://codeclimate.com/github/stretchr/objx/test_coverage) [![Sourcegraph](https://sourcegraph.com/github.com/stretchr/objx/-/badge.svg)](https://sourcegraph.com/github.com/stretchr/objx) -[![GoDoc](https://godoc.org/github.com/stretchr/objx?status.svg)](https://godoc.org/github.com/stretchr/objx) +[![GoDoc](https://pkg.go.dev/badge/github.com/stretchr/objx?utm_source=godoc)](https://pkg.go.dev/github.com/stretchr/objx) Objx - Go package for dealing with maps, slices, JSON and other data. Get started: - Install Objx with [one line of code](#installation), or [update it with another](#staying-up-to-date) -- Check out the API Documentation http://godoc.org/github.com/stretchr/objx +- Check out the API Documentation http://pkg.go.dev/github.com/stretchr/objx ## Overview Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc. ### Pattern -Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: +Objx uses a predictable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: m, err := objx.FromJSON(json) @@ -74,7 +74,7 @@ To update Objx to the latest version, run: go get -u github.com/stretchr/objx ### Supported go versions -We support the lastest three major Go versions, which are 1.10, 1.11 and 1.12 at the moment. +We currently support the three recent major Go versions. ## Contributing Please feel free to submit issues, fork the repository and send pull requests! diff --git a/nodeadm/vendor/github.com/stretchr/objx/Taskfile.yml b/nodeadm/vendor/github.com/stretchr/objx/Taskfile.yml index 7746f516d..8a79e8d67 100644 --- a/nodeadm/vendor/github.com/stretchr/objx/Taskfile.yml +++ b/nodeadm/vendor/github.com/stretchr/objx/Taskfile.yml @@ -1,7 +1,4 @@ -version: '2' - -env: - GOFLAGS: -mod=vendor +version: '3' tasks: default: diff --git a/nodeadm/vendor/github.com/stretchr/objx/accessors.go b/nodeadm/vendor/github.com/stretchr/objx/accessors.go index 4c6045588..72f1d1c1c 100644 --- a/nodeadm/vendor/github.com/stretchr/objx/accessors.go +++ b/nodeadm/vendor/github.com/stretchr/objx/accessors.go @@ -14,17 +14,17 @@ const ( // For example, `location.address.city` PathSeparator string = "." - // arrayAccesRegexString is the regex used to extract the array number + // arrayAccessRegexString is the regex used to extract the array number // from the access path - arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` + arrayAccessRegexString = `^(.+)\[([0-9]+)\]$` // mapAccessRegexString is the regex used to extract the map key // from the access path mapAccessRegexString = `^([^\[]*)\[([^\]]+)\](.*)$` ) -// arrayAccesRegex is the compiled arrayAccesRegexString -var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) +// arrayAccessRegex is the compiled arrayAccessRegexString +var arrayAccessRegex = regexp.MustCompile(arrayAccessRegexString) // mapAccessRegex is the compiled mapAccessRegexString var mapAccessRegex = regexp.MustCompile(mapAccessRegexString) @@ -37,11 +37,11 @@ var mapAccessRegex = regexp.MustCompile(mapAccessRegexString) // // Get can only operate directly on map[string]interface{} and []interface. // -// Example +// # Example // // To access the title of the third chapter of the second book, do: // -// o.Get("books[1].chapters[2].title") +// o.Get("books[1].chapters[2].title") func (m Map) Get(selector string) *Value { rawObj := access(m, selector, nil, false) return &Value{data: rawObj} @@ -52,26 +52,26 @@ func (m Map) Get(selector string) *Value { // // Set can only operate directly on map[string]interface{} and []interface // -// Example +// # Example // // To set the title of the third chapter of the second book, do: // -// o.Set("books[1].chapters[2].title","Time to Go") +// o.Set("books[1].chapters[2].title","Time to Go") func (m Map) Set(selector string, value interface{}) Map { access(m, selector, value, true) return m } -// getIndex returns the index, which is hold in s by two braches. -// It also returns s withour the index part, e.g. name[1] will return (1, name). +// getIndex returns the index, which is hold in s by two branches. +// It also returns s without the index part, e.g. name[1] will return (1, name). // If no index is found, -1 is returned func getIndex(s string) (int, string) { - arrayMatches := arrayAccesRegex.FindStringSubmatch(s) + arrayMatches := arrayAccessRegex.FindStringSubmatch(s) if len(arrayMatches) > 0 { // Get the key into the map selector := arrayMatches[1] // Get the index into the array at the key - // We know this cannt fail because arrayMatches[2] is an int for sure + // We know this can't fail because arrayMatches[2] is an int for sure index, _ := strconv.Atoi(arrayMatches[2]) return index, selector } diff --git a/nodeadm/vendor/github.com/stretchr/objx/conversions.go b/nodeadm/vendor/github.com/stretchr/objx/conversions.go index 080aa46e4..01c63d7d3 100644 --- a/nodeadm/vendor/github.com/stretchr/objx/conversions.go +++ b/nodeadm/vendor/github.com/stretchr/objx/conversions.go @@ -15,7 +15,7 @@ import ( const SignatureSeparator = "_" // URLValuesSliceKeySuffix is the character that is used to -// specify a suffic for slices parsed by URLValues. +// specify a suffix for slices parsed by URLValues. // If the suffix is set to "[i]", then the index of the slice // is used in place of i // Ex: Suffix "[]" would have the form a[]=b&a[]=c @@ -30,7 +30,7 @@ const ( ) // SetURLValuesSliceKeySuffix sets the character that is used to -// specify a suffic for slices parsed by URLValues. +// specify a suffix for slices parsed by URLValues. // If the suffix is set to "[i]", then the index of the slice // is used in place of i // Ex: Suffix "[]" would have the form a[]=b&a[]=c diff --git a/nodeadm/vendor/github.com/stretchr/objx/doc.go b/nodeadm/vendor/github.com/stretchr/objx/doc.go index 6d6af1a83..b170af74b 100644 --- a/nodeadm/vendor/github.com/stretchr/objx/doc.go +++ b/nodeadm/vendor/github.com/stretchr/objx/doc.go @@ -1,19 +1,19 @@ /* -Objx - Go package for dealing with maps, slices, JSON and other data. +Package objx provides utilities for dealing with maps, slices, JSON and other data. -Overview +# Overview Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc. -Pattern +# Pattern -Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. +Objx uses a predictable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: - m, err := objx.FromJSON(json) + m, err := objx.FromJSON(json) NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, the rest will be optimistic and try to figure things out without panicking. @@ -21,46 +21,46 @@ the rest will be optimistic and try to figure things out without panicking. Use `Get` to access the value you're interested in. You can use dot and array notation too: - m.Get("places[0].latlng") + m.Get("places[0].latlng") Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. - if m.Get("code").IsStr() { // Your code... } + if m.Get("code").IsStr() { // Your code... } Or you can just assume the type, and use one of the strong type methods to extract the real value: - m.Get("code").Int() + m.Get("code").Int() If there's no value there (or if it's the wrong type) then a default value will be returned, or you can be explicit about the default value. - Get("code").Int(-1) + Get("code").Int(-1) If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, manipulating and selecting that data. You can find out more by exploring the index below. -Reading data +# Reading data A simple example of how to use Objx: - // Use MustFromJSON to make an objx.Map from some JSON - m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) + // Use MustFromJSON to make an objx.Map from some JSON + m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) - // Get the details - name := m.Get("name").Str() - age := m.Get("age").Int() + // Get the details + name := m.Get("name").Str() + age := m.Get("age").Int() - // Get their nickname (or use their name if they don't have one) - nickname := m.Get("nickname").Str(name) + // Get their nickname (or use their name if they don't have one) + nickname := m.Get("nickname").Str(name) -Ranging +# Ranging Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For example, to `range` the data, do what you would expect: - m := objx.MustFromJSON(json) - for key, value := range m { - // Your code... - } + m := objx.MustFromJSON(json) + for key, value := range m { + // Your code... + } */ package objx diff --git a/nodeadm/vendor/github.com/stretchr/objx/map.go b/nodeadm/vendor/github.com/stretchr/objx/map.go index a64712a08..ab9f9ae67 100644 --- a/nodeadm/vendor/github.com/stretchr/objx/map.go +++ b/nodeadm/vendor/github.com/stretchr/objx/map.go @@ -47,17 +47,16 @@ func New(data interface{}) Map { // // The arguments follow a key, value pattern. // -// // Returns nil if any key argument is non-string or if there are an odd number of arguments. // -// Example +// # Example // // To easily create Maps: // -// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) +// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) // -// // creates an Map equivalent to -// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}} +// // creates an Map equivalent to +// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}} func MSI(keyAndValuePairs ...interface{}) Map { newMap := Map{} keyAndValuePairsLen := len(keyAndValuePairs) diff --git a/nodeadm/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/nodeadm/vendor/github.com/stretchr/testify/assert/assertion_compare.go index b774da88d..4d4b4aad6 100644 --- a/nodeadm/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/nodeadm/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -28,6 +28,8 @@ var ( uint32Type = reflect.TypeOf(uint32(1)) uint64Type = reflect.TypeOf(uint64(1)) + uintptrType = reflect.TypeOf(uintptr(1)) + float32Type = reflect.TypeOf(float32(1)) float64Type = reflect.TypeOf(float64(1)) @@ -308,11 +310,11 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Struct: { // All structs enter here. We're not interested in most types. - if !canConvert(obj1Value, timeType) { + if !obj1Value.CanConvert(timeType) { break } - // time.Time can compared! + // time.Time can be compared! timeObj1, ok := obj1.(time.Time) if !ok { timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) @@ -328,7 +330,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { case reflect.Slice: { // We only care about the []byte type. - if !canConvert(obj1Value, bytesType) { + if !obj1Value.CanConvert(bytesType) { break } @@ -345,6 +347,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true } + case reflect.Uintptr: + { + uintptrObj1, ok := obj1.(uintptr) + if !ok { + uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr) + } + uintptrObj2, ok := obj2.(uintptr) + if !ok { + uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr) + } + if uintptrObj1 > uintptrObj2 { + return compareGreater, true + } + if uintptrObj1 == uintptrObj2 { + return compareEqual, true + } + if uintptrObj1 < uintptrObj2 { + return compareLess, true + } + } } return compareEqual, false diff --git a/nodeadm/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/nodeadm/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go deleted file mode 100644 index da867903e..000000000 --- a/nodeadm/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build go1.17 -// +build go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_legacy.go - -package assert - -import "reflect" - -// Wrapper around reflect.Value.CanConvert, for compatibility -// reasons. -func canConvert(value reflect.Value, to reflect.Type) bool { - return value.CanConvert(to) -} diff --git a/nodeadm/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/nodeadm/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go deleted file mode 100644 index 1701af2a3..000000000 --- a/nodeadm/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !go1.17 -// +build !go1.17 - -// TODO: once support for Go 1.16 is dropped, this file can be -// merged/removed with assertion_compare_go1.17_test.go and -// assertion_compare_can_convert.go - -package assert - -import "reflect" - -// Older versions of Go does not have the reflect.Value.CanConvert -// method. -func canConvert(value reflect.Value, to reflect.Type) bool { - return false -} diff --git a/nodeadm/vendor/github.com/stretchr/testify/assert/assertion_format.go b/nodeadm/vendor/github.com/stretchr/testify/assert/assertion_format.go index 84dbd6c79..3ddab109a 100644 --- a/nodeadm/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/nodeadm/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -107,7 +104,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") @@ -616,6 +613,16 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...) } +// NotImplementsf asserts that an object does not implement the specified interface. +// +// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) +} + // NotNilf asserts that the specified object is not nil. // // assert.NotNilf(t, err, "error message %s", "formatted") @@ -660,10 +667,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -747,10 +756,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/nodeadm/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/nodeadm/vendor/github.com/stretchr/testify/assert/assertion_forward.go index b1d94aec5..a84e09bd4 100644 --- a/nodeadm/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/nodeadm/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -1,7 +1,4 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ +// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT. package assert @@ -189,7 +186,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValues(uint32(123), int32(123)) @@ -200,7 +197,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertable to the same types +// EqualValuesf asserts that two objects are equal or convertible to the same types // and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") @@ -1221,6 +1218,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in return NotErrorIsf(a.t, err, target, msg, args...) } +// NotImplements asserts that an object does not implement the specified interface. +// +// a.NotImplements((*MyInterface)(nil), new(MyObject)) +func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplements(a.t, interfaceObject, object, msgAndArgs...) +} + +// NotImplementsf asserts that an object does not implement the specified interface. +// +// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotImplementsf(a.t, interfaceObject, object, msg, args...) +} + // NotNil asserts that the specified object is not nil. // // a.NotNil(err) @@ -1309,10 +1326,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// a.NotSubset([1, 3, 4], [1, 2]) +// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1320,10 +1339,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubsetf asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1483,10 +1504,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// a.Subset([1, 2, 3], [1, 2]) +// a.Subset({"x": 1, "y": 2}, {"x": 1}) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1494,10 +1516,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subsetf asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") +// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/nodeadm/vendor/github.com/stretchr/testify/assert/assertions.go b/nodeadm/vendor/github.com/stretchr/testify/assert/assertions.go index a55d1bba9..0b7570f21 100644 --- a/nodeadm/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/nodeadm/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - yaml "gopkg.in/yaml.v3" + "gopkg.in/yaml.v3" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -110,7 +110,12 @@ func copyExportedFields(expected interface{}) interface{} { return result.Interface() case reflect.Array, reflect.Slice: - result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + var result reflect.Value + if expectedKind == reflect.Array { + result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem() + } else { + result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) + } for i := 0; i < expectedValue.Len(); i++ { index := expectedValue.Index(i) if isNil(index) { @@ -140,6 +145,8 @@ func copyExportedFields(expected interface{}) interface{} { // structures. // // This function does no assertion of any kind. +// +// Deprecated: Use [EqualExportedValues] instead. func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool { expectedCleaned := copyExportedFields(expected) actualCleaned := copyExportedFields(actual) @@ -153,17 +160,40 @@ func ObjectsAreEqualValues(expected, actual interface{}) bool { return true } - actualType := reflect.TypeOf(actual) - if actualType == nil { + expectedValue := reflect.ValueOf(expected) + actualValue := reflect.ValueOf(actual) + if !expectedValue.IsValid() || !actualValue.IsValid() { return false } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + + expectedType := expectedValue.Type() + actualType := actualValue.Type() + if !expectedType.ConvertibleTo(actualType) { + return false + } + + if !isNumericType(expectedType) || !isNumericType(actualType) { // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + return reflect.DeepEqual( + expectedValue.Convert(actualType).Interface(), actual, + ) } - return false + // If BOTH values are numeric, there are chances of false positives due + // to overflow or underflow. So, we need to make sure to always convert + // the smaller type to a larger type before comparing. + if expectedType.Size() >= actualType.Size() { + return actualValue.Convert(expectedType).Interface() == expected + } + + return expectedValue.Convert(actualType).Interface() == actual +} + +// isNumericType returns true if the type is one of: +// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, +// float32, float64, complex64, complex128 +func isNumericType(t reflect.Type) bool { + return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128 } /* CallerInfo is necessary because the assert functions use the testing object @@ -266,7 +296,7 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { // Aligns the provided message so that all lines after the first line start at the same location as the first line. // Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). -// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the +// The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the // basis on which the alignment occurs). func indentMessageLines(message string, longestLabelLen int) string { outBuf := new(bytes.Buffer) @@ -382,6 +412,25 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg return true } +// NotImplements asserts that an object does not implement the specified interface. +// +// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if object == nil { + return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...) + } + if reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...) + } + + return true +} + // IsType asserts that the specified objects are of the same type. func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -496,7 +545,7 @@ func samePointers(first, second interface{}) bool { // representations appropriate to be presented to the user. // // If the values are not of like type, the returned strings will be prefixed -// with the type name, and the value will be enclosed in parenthesis similar +// with the type name, and the value will be enclosed in parentheses similar // to a type conversion in the Go grammar. func formatUnequalValues(expected, actual interface{}) (e string, a string) { if reflect.TypeOf(expected) != reflect.TypeOf(actual) { @@ -523,7 +572,7 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertable to the same types +// EqualValues asserts that two objects are equal or convertible to the same types // and equal. // // assert.EqualValues(t, uint32(123), int32(123)) @@ -566,12 +615,19 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } + if aType.Kind() == reflect.Ptr { + aType = aType.Elem() + } + if bType.Kind() == reflect.Ptr { + bType = bType.Elem() + } + if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) } if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) + return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) } expected = copyExportedFields(expected) @@ -620,17 +676,6 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { return Fail(t, "Expected value not to be nil.", msgAndArgs...) } -// containsKind checks if a specified kind in the slice of kinds. -func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { - for i := 0; i < len(kinds); i++ { - if kind == kinds[i] { - return true - } - } - - return false -} - // isNil checks if a specified object is nil or not, without Failing. func isNil(object interface{}) bool { if object == nil { @@ -638,16 +683,13 @@ func isNil(object interface{}) bool { } value := reflect.ValueOf(object) - kind := value.Kind() - isNilableKind := containsKind( - []reflect.Kind{ - reflect.Chan, reflect.Func, - reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice, reflect.UnsafePointer}, - kind) - - if isNilableKind && value.IsNil() { - return true + switch value.Kind() { + case + reflect.Chan, reflect.Func, + reflect.Interface, reflect.Map, + reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + + return value.IsNil() } return false @@ -731,16 +773,14 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { +// getLen tries to get the length of an object. +// It returns (0, false) if impossible. +func getLen(x interface{}) (length int, ok bool) { v := reflect.ValueOf(x) defer func() { - if e := recover(); e != nil { - ok = false - } + ok = recover() == nil }() - return true, v.Len() + return v.Len(), true } // Len asserts that the specified object has specific length. @@ -751,13 +791,13 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - ok, l := getLen(object) + l, ok := getLen(object) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...) } if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) } return true } @@ -919,10 +959,11 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). +// Subset asserts that the specified list(array, slice...) or map contains all +// elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") +// assert.Subset(t, [1, 2, 3], [1, 2]) +// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -975,10 +1016,12 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). +// NotSubset asserts that the specified list(array, slice...) or map does NOT +// contain all elements given in the specified subset list(array, slice...) or +// map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") +// assert.NotSubset(t, [1, 3, 4], [1, 2]) +// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1439,7 +1482,7 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd h.Helper() } if math.IsNaN(epsilon) { - return Fail(t, "epsilon must not be NaN") + return Fail(t, "epsilon must not be NaN", msgAndArgs...) } actualEpsilon, err := calcRelativeError(expected, actual) if err != nil { @@ -1458,19 +1501,26 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m if h, ok := t.(tHelper); ok { h.Helper() } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { + + if expected == nil || actual == nil { return Fail(t, "Parameters must be slice", msgAndArgs...) } - actualSlice := reflect.ValueOf(actual) expectedSlice := reflect.ValueOf(expected) + actualSlice := reflect.ValueOf(actual) - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result + if expectedSlice.Type().Kind() != reflect.Slice { + return Fail(t, "Expected value must be slice", msgAndArgs...) + } + + expectedLen := expectedSlice.Len() + if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) { + return false + } + + for i := 0; i < expectedLen; i++ { + if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) { + return false } } @@ -1870,23 +1920,18 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { } // FailNow panics. -func (c *CollectT) FailNow() { +func (*CollectT) FailNow() { panic("Assertion failed") } -// Reset clears the collected errors. -func (c *CollectT) Reset() { - c.errors = nil +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Reset() { + panic("Reset() is deprecated") } -// Copy copies the collected errors to the supplied t. -func (c *CollectT) Copy(t TestingT) { - if tt, ok := t.(tHelper); ok { - tt.Helper() - } - for _, err := range c.errors { - t.Errorf("%v", err) - } +// Deprecated: That was a method for internal usage that should not have been published. Now just panics. +func (*CollectT) Copy(TestingT) { + panic("Copy() is deprecated") } // EventuallyWithT asserts that given condition will be met in waitFor time, @@ -1912,8 +1957,8 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time h.Helper() } - collect := new(CollectT) - ch := make(chan bool, 1) + var lastFinishedTickErrs []error + ch := make(chan []error, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1924,19 +1969,25 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time for tick := ticker.C; ; { select { case <-timer.C: - collect.Copy(t) + for _, err := range lastFinishedTickErrs { + t.Errorf("%v", err) + } return Fail(t, "Condition never satisfied", msgAndArgs...) case <-tick: tick = nil - collect.Reset() go func() { + collect := new(CollectT) + defer func() { + ch <- collect.errors + }() condition(collect) - ch <- len(collect.errors) == 0 }() - case v := <-ch: - if v { + case errs := <-ch: + if len(errs) == 0 { return true } + // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. + lastFinishedTickErrs = errs tick = ticker.C } } diff --git a/nodeadm/vendor/github.com/stretchr/testify/assert/http_assertions.go b/nodeadm/vendor/github.com/stretchr/testify/assert/http_assertions.go index d8038c28a..861ed4b7c 100644 --- a/nodeadm/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/nodeadm/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -12,7 +12,7 @@ import ( // an error if building a new request fails. func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url, nil) + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return -1, err } @@ -32,12 +32,12 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent if !isSuccessCode { - Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isSuccessCode @@ -54,12 +54,12 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect if !isRedirectCode { - Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isRedirectCode @@ -76,12 +76,12 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } isErrorCode := code >= http.StatusBadRequest if !isErrorCode { - Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...) } return isErrorCode @@ -98,12 +98,12 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va } code, err := httpCode(handler, method, url, values) if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...) } successful := code == statuscode if !successful { - Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code)) + Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...) } return successful @@ -113,7 +113,10 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va // empty string if building a new request fails. func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if len(values) > 0 { + url += "?" + values.Encode() + } + req, err := http.NewRequest(method, url, http.NoBody) if err != nil { return "" } @@ -135,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -155,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/nodeadm/vendor/github.com/stretchr/testify/mock/mock.go b/nodeadm/vendor/github.com/stretchr/testify/mock/mock.go index f4b42e44f..213bde2ea 100644 --- a/nodeadm/vendor/github.com/stretchr/testify/mock/mock.go +++ b/nodeadm/vendor/github.com/stretchr/testify/mock/mock.go @@ -18,6 +18,9 @@ import ( "github.com/stretchr/testify/assert" ) +// regex for GCCGO functions +var gccgoRE = regexp.MustCompile(`\.pN\d+_`) + // TestingT is an interface wrapper around *testing.T type TestingT interface { Logf(format string, args ...interface{}) @@ -111,7 +114,7 @@ func (c *Call) Return(returnArguments ...interface{}) *Call { return c } -// Panic specifies if the functon call should fail and the panic message +// Panic specifies if the function call should fail and the panic message // // Mock.On("DoSomething").Panic("test panic") func (c *Call) Panic(msg string) *Call { @@ -123,21 +126,21 @@ func (c *Call) Panic(msg string) *Call { return c } -// Once indicates that that the mock should only return the value once. +// Once indicates that the mock should only return the value once. // // Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() func (c *Call) Once() *Call { return c.Times(1) } -// Twice indicates that that the mock should only return the value twice. +// Twice indicates that the mock should only return the value twice. // // Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() func (c *Call) Twice() *Call { return c.Times(2) } -// Times indicates that that the mock should only return the indicated number +// Times indicates that the mock should only return the indicated number // of times. // // Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) @@ -455,9 +458,8 @@ func (m *Mock) Called(arguments ...interface{}) Arguments { // For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock // uses interface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree // With GCCGO we need to remove interface information starting from pN
. - re := regexp.MustCompile("\\.pN\\d+_") - if re.MatchString(functionPath) { - functionPath = re.Split(functionPath, -1)[0] + if gccgoRE.MatchString(functionPath) { + functionPath = gccgoRE.Split(functionPath, -1)[0] } parts := strings.Split(functionPath, ".") functionName := parts[len(parts)-1] @@ -474,7 +476,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen found, call := m.findExpectedCall(methodName, arguments...) if found < 0 { - // expected call found but it has already been called with repeatable times + // expected call found, but it has already been called with repeatable times if call != nil { m.mutex.Unlock() m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(\"%s\").Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo()) @@ -563,7 +565,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen Assertions */ -type assertExpectationser interface { +type assertExpectationiser interface { AssertExpectations(TestingT) bool } @@ -580,7 +582,7 @@ func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { t.Logf("Deprecated mock.AssertExpectationsForObjects(myMock.Mock) use mock.AssertExpectationsForObjects(myMock)") obj = m } - m := obj.(assertExpectationser) + m := obj.(assertExpectationiser) if !m.AssertExpectations(t) { t.Logf("Expectations didn't match for Mock: %+v", reflect.TypeOf(m)) return false @@ -592,6 +594,9 @@ func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { // AssertExpectations asserts that everything specified with On and Return was // in fact called as expected. Calls may have occurred in any order. func (m *Mock) AssertExpectations(t TestingT) bool { + if s, ok := t.(interface{ Skipped() bool }); ok && s.Skipped() { + return true + } if h, ok := t.(tHelper); ok { h.Helper() } @@ -606,8 +611,8 @@ func (m *Mock) AssertExpectations(t TestingT) bool { satisfied, reason := m.checkExpectation(expectedCall) if !satisfied { failedExpectations++ + t.Logf(reason) } - t.Logf(reason) } if failedExpectations != 0 { @@ -758,25 +763,33 @@ const ( Anything = "mock.Anything" ) -// AnythingOfTypeArgument is a string that contains the type of an argument +// AnythingOfTypeArgument contains the type of an argument +// for use when type checking. Used in Diff and Assert. +// +// Deprecated: this is an implementation detail that must not be used. Use [AnythingOfType] instead. +type AnythingOfTypeArgument = anythingOfTypeArgument + +// anythingOfTypeArgument is a string that contains the type of an argument // for use when type checking. Used in Diff and Assert. -type AnythingOfTypeArgument string +type anythingOfTypeArgument string -// AnythingOfType returns an AnythingOfTypeArgument object containing the -// name of the type to check for. Used in Diff and Assert. +// AnythingOfType returns a special value containing the +// name of the type to check for. The type name will be matched against the type name returned by [reflect.Type.String]. +// +// Used in Diff and Assert. // // For example: // // Assert(t, AnythingOfType("string"), AnythingOfType("int")) func AnythingOfType(t string) AnythingOfTypeArgument { - return AnythingOfTypeArgument(t) + return anythingOfTypeArgument(t) } // IsTypeArgument is a struct that contains the type of an argument // for use when type checking. This is an alternative to AnythingOfType. // Used in Diff and Assert. type IsTypeArgument struct { - t interface{} + t reflect.Type } // IsType returns an IsTypeArgument object containing the type to check for. @@ -786,7 +799,7 @@ type IsTypeArgument struct { // For example: // Assert(t, IsType(""), IsType(0)) func IsType(t interface{}) *IsTypeArgument { - return &IsTypeArgument{t: t} + return &IsTypeArgument{t: reflect.TypeOf(t)} } // FunctionalOptionsArgument is a struct that contains the type and value of an functional option argument @@ -950,53 +963,55 @@ func (args Arguments) Diff(objects []interface{}) (string, int) { differences++ output = fmt.Sprintf("%s\t%d: FAIL: %s not matched by %s\n", output, i, actualFmt, matcher) } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { - // type checking - if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) - } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*IsTypeArgument)(nil)) { - t := expected.(*IsTypeArgument).t - if reflect.TypeOf(t) != reflect.TypeOf(actual) { - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, reflect.TypeOf(t).Name(), reflect.TypeOf(actual).Name(), actualFmt) - } - } else if reflect.TypeOf(expected) == reflect.TypeOf((*FunctionalOptionsArgument)(nil)) { - t := expected.(*FunctionalOptionsArgument).value + } else { + switch expected := expected.(type) { + case anythingOfTypeArgument: + // type checking + if reflect.TypeOf(actual).Name() != string(expected) && reflect.TypeOf(actual).String() != string(expected) { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actualFmt) + } + case *IsTypeArgument: + actualT := reflect.TypeOf(actual) + if actualT != expected.t { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, expected.t.Name(), actualT.Name(), actualFmt) + } + case *FunctionalOptionsArgument: + t := expected.value - var name string - tValue := reflect.ValueOf(t) - if tValue.Len() > 0 { - name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() - } + var name string + tValue := reflect.ValueOf(t) + if tValue.Len() > 0 { + name = "[]" + reflect.TypeOf(tValue.Index(0).Interface()).String() + } - tName := reflect.TypeOf(t).Name() - if name != reflect.TypeOf(actual).String() && tValue.Len() != 0 { - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt) - } else { - if ef, af := assertOpts(t, actual); ef == "" && af == "" { + tName := reflect.TypeOf(t).Name() + if name != reflect.TypeOf(actual).String() && tValue.Len() != 0 { + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: type %s != type %s - %s\n", output, i, tName, reflect.TypeOf(actual).Name(), actualFmt) + } else { + if ef, af := assertOpts(t, actual); ef == "" && af == "" { + // match + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName) + } else { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, af, ef) + } + } + + default: + if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { // match - output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, tName, tName) + output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt) } else { // not match differences++ - output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, af, ef) + output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt) } } - } else { - // normal checking - - if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { - // match - output = fmt.Sprintf("%s\t%d: PASS: %s == %s\n", output, i, actualFmt, expectedFmt) - } else { - // not match - differences++ - output = fmt.Sprintf("%s\t%d: FAIL: %s != %s\n", output, i, actualFmt, expectedFmt) - } } } diff --git a/nodeadm/vendor/modules.txt b/nodeadm/vendor/modules.txt index 92973e1b9..509d95e36 100644 --- a/nodeadm/vendor/modules.txt +++ b/nodeadm/vendor/modules.txt @@ -197,6 +197,13 @@ github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.2 ## explicit; go 1.12 github.com/modern-go/reflect2 +# github.com/pelletier/go-toml/v2 v2.2.2 +## explicit; go 1.16 +github.com/pelletier/go-toml/v2 +github.com/pelletier/go-toml/v2/internal/characters +github.com/pelletier/go-toml/v2/internal/danger +github.com/pelletier/go-toml/v2/internal/tracker +github.com/pelletier/go-toml/v2/unstable # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib @@ -225,11 +232,11 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/stretchr/objx v0.5.0 -## explicit; go 1.12 -github.com/stretchr/objx -# github.com/stretchr/testify v1.8.4 +# github.com/stretchr/objx v0.5.2 ## explicit; go 1.20 +github.com/stretchr/objx +# github.com/stretchr/testify v1.9.0 +## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/mock # go.uber.org/multierr v1.11.0 diff --git a/templates/al2/provisioners/install-worker.sh b/templates/al2/provisioners/install-worker.sh index 680b74e23..78873a501 100644 --- a/templates/al2/provisioners/install-worker.sh +++ b/templates/al2/provisioners/install-worker.sh @@ -196,7 +196,7 @@ EOF ############################################################################### sudo yum install -y nerdctl -sudo mkdir /etc/nerdctl +sudo mkdir -p /etc/nerdctl cat << EOF | sudo tee -a /etc/nerdctl/nerdctl.toml namespace = "k8s.io" EOF @@ -437,13 +437,13 @@ if [[ "$CACHE_CONTAINER_IMAGES" == "true" ]] && ! [[ ${ISOLATED_REGIONS} =~ $BIN fi CACHE_IMGS=( - ${KUBE_PROXY_IMGS[@]+"${KUBE_PROXY_IMGS[@]}"} - ${VPC_CNI_IMGS[@]+"${VPC_CNI_IMGS[@]}"} + ${KUBE_PROXY_IMGS[@]:-} + ${VPC_CNI_IMGS[@]:-} ) PULLED_IMGS=() REGIONS=$(aws ec2 describe-regions --all-regions --output text --query 'Regions[].[RegionName]') - for img in "${CACHE_IMGS[@]}"; do + for img in "${CACHE_IMGS[@]:-}"; do ## only kube-proxy-minimal is vended for K8s 1.24+ if [[ "${img}" == *"kube-proxy:"* ]] && [[ "${img}" != *"-minimal-"* ]] && vercmp "${K8S_MINOR_VERSION}" gteq "1.24"; then continue @@ -467,7 +467,7 @@ if [[ "$CACHE_CONTAINER_IMAGES" == "true" ]] && ! [[ ${ISOLATED_REGIONS} =~ $BIN #### Tag the pulled down image for all other regions in the partition for region in ${REGIONS[*]}; do - for img in "${PULLED_IMGS[@]}"; do + for img in "${PULLED_IMGS[@]:-}"; do region_uri=$(/etc/eks/get-ecr-uri.sh "${region}" "${AWS_DOMAIN}") regional_img="${img/$ECR_URI/$region_uri}" sudo ctr -n k8s.io image tag "${img}" "${regional_img}" || : diff --git a/templates/al2/runtime/bootstrap.sh b/templates/al2/runtime/bootstrap.sh index 81cf70a89..beb575ba6 100755 --- a/templates/al2/runtime/bootstrap.sh +++ b/templates/al2/runtime/bootstrap.sh @@ -46,6 +46,11 @@ function log { log "INFO: starting..." +if [ "${EUID}" -ne 0 ]; then + log "ERROR: script must be run as root" + exit 1 +fi + POSITIONAL=() while [[ $# -gt 0 ]]; do @@ -554,15 +559,15 @@ if [[ "$CONTAINER_RUNTIME" = "containerd" ]]; then log "WARNING: Flag --docker-config-json was set but will be ignored as it's not relevant to containerd" fi - sudo mkdir -p /etc/containerd - sudo mkdir -p /etc/containerd/config.d - sudo mkdir -p /etc/cni/net.d + mkdir -p /etc/containerd + mkdir -p /etc/containerd/config.d + mkdir -p /etc/cni/net.d if [[ -n "${CONTAINERD_CONFIG_FILE}" ]]; then - sudo cp -v "${CONTAINERD_CONFIG_FILE}" /etc/eks/containerd/containerd-config.toml + cp -v "${CONTAINERD_CONFIG_FILE}" /etc/eks/containerd/containerd-config.toml fi - sudo sed -i s,SANDBOX_IMAGE,$PAUSE_CONTAINER,g /etc/eks/containerd/containerd-config.toml + sed -i s,SANDBOX_IMAGE,$PAUSE_CONTAINER,g /etc/eks/containerd/containerd-config.toml echo "$(jq '.cgroupDriver="systemd"' "${KUBELET_CONFIG}")" > "${KUBELET_CONFIG}" ##allow --reserved-cpus options via kubelet arg directly. Disable default reserved cgroup option in such cases @@ -574,17 +579,17 @@ if [[ "$CONTAINER_RUNTIME" = "containerd" ]]; then # Check if the containerd config file is the same as the one used in the image build. # If different, then restart containerd w/ proper config if ! cmp -s /etc/eks/containerd/containerd-config.toml /etc/containerd/config.toml; then - sudo cp -v /etc/eks/containerd/containerd-config.toml /etc/containerd/config.toml - sudo cp -v /etc/eks/containerd/sandbox-image.service /etc/systemd/system/sandbox-image.service - sudo chown root:root /etc/systemd/system/sandbox-image.service + cp -v /etc/eks/containerd/containerd-config.toml /etc/containerd/config.toml + cp -v /etc/eks/containerd/sandbox-image.service /etc/systemd/system/sandbox-image.service + chown root:root /etc/systemd/system/sandbox-image.service systemctl daemon-reload systemctl enable containerd sandbox-image systemctl restart sandbox-image containerd fi - sudo cp -v /etc/eks/containerd/kubelet-containerd.service /etc/systemd/system/kubelet.service - sudo chown root:root /etc/systemd/system/kubelet.service + cp -v /etc/eks/containerd/kubelet-containerd.service /etc/systemd/system/kubelet.service + chown root:root /etc/systemd/system/kubelet.service # Validate containerd config - sudo containerd config dump > /dev/null + containerd config dump > /dev/null # --container-runtime flag is gone in 1.27+ # TODO: remove this when 1.26 is EOL @@ -595,7 +600,7 @@ elif [[ "$CONTAINER_RUNTIME" = "dockerd" ]]; then mkdir -p /etc/docker bash -c "/sbin/iptables-save > /etc/sysconfig/iptables" cp -v /etc/eks/iptables-restore.service /etc/systemd/system/iptables-restore.service - sudo chown root:root /etc/systemd/system/iptables-restore.service + chown root:root /etc/systemd/system/iptables-restore.service systemctl daemon-reload systemctl enable iptables-restore @@ -642,38 +647,4 @@ systemctl daemon-reload systemctl enable kubelet systemctl start kubelet -# gpu boost clock -if command -v nvidia-smi &> /dev/null; then - log "INFO: nvidia-smi found" - - nvidia-smi -q > /tmp/nvidia-smi-check - if [[ "$?" == "0" ]]; then - sudo nvidia-smi -pm 1 # set persistence mode - sudo nvidia-smi --auto-boost-default=0 - - GPUNAME=$(nvidia-smi -L | head -n1) - log "INFO: GPU name: $GPUNAME" - - # set application clock to maximum - if [[ $GPUNAME == *"A100"* ]]; then - nvidia-smi -ac 1215,1410 - elif [[ $GPUNAME == *"V100"* ]]; then - nvidia-smi -ac 877,1530 - elif [[ $GPUNAME == *"K80"* ]]; then - nvidia-smi -ac 2505,875 - elif [[ $GPUNAME == *"T4"* ]]; then - nvidia-smi -ac 5001,1590 - elif [[ $GPUNAME == *"M60"* ]]; then - nvidia-smi -ac 2505,1177 - elif [[ $GPUNAME == *"H100"* ]]; then - nvidia-smi -ac 2619,1980 - else - echo "unsupported gpu" - fi - else - log "ERROR: nvidia-smi check failed!" - cat /tmp/nvidia-smi-check - fi -fi - log "INFO: complete!" diff --git a/templates/shared/runtime/eni-max-pods.txt b/templates/shared/runtime/eni-max-pods.txt index eca414165..0d318ac6a 100644 --- a/templates/shared/runtime/eni-max-pods.txt +++ b/templates/shared/runtime/eni-max-pods.txt @@ -161,11 +161,11 @@ c6in.12xlarge 234 c6in.16xlarge 737 c6in.24xlarge 737 c6in.2xlarge 58 -c6in.32xlarge 345 +c6in.32xlarge 394 c6in.4xlarge 234 c6in.8xlarge 234 c6in.large 29 -c6in.metal 345 +c6in.metal 394 c6in.xlarge 58 c7a.12xlarge 234 c7a.16xlarge 737 @@ -206,6 +206,11 @@ c7gn.large 29 c7gn.medium 8 c7gn.metal 737 c7gn.xlarge 58 +c7i-flex.2xlarge 58 +c7i-flex.4xlarge 234 +c7i-flex.8xlarge 234 +c7i-flex.large 29 +c7i-flex.xlarge 58 c7i.12xlarge 234 c7i.16xlarge 737 c7i.24xlarge 737 @@ -475,21 +480,21 @@ m6idn.12xlarge 234 m6idn.16xlarge 737 m6idn.24xlarge 737 m6idn.2xlarge 58 -m6idn.32xlarge 345 +m6idn.32xlarge 394 m6idn.4xlarge 234 m6idn.8xlarge 234 m6idn.large 29 -m6idn.metal 345 +m6idn.metal 394 m6idn.xlarge 58 m6in.12xlarge 234 m6in.16xlarge 737 m6in.24xlarge 737 m6in.2xlarge 58 -m6in.32xlarge 345 +m6in.32xlarge 394 m6in.4xlarge 234 m6in.8xlarge 234 m6in.large 29 -m6in.metal 345 +m6in.metal 394 m6in.xlarge 58 m7a.12xlarge 234 m7a.16xlarge 737 @@ -538,6 +543,7 @@ m7i.metal-24xl 737 m7i.metal-48xl 737 m7i.xlarge 58 mac1.metal 234 +mac2-m1ultra.metal 234 mac2-m2.metal 234 mac2-m2pro.metal 234 mac2.metal 234 @@ -676,21 +682,21 @@ r6idn.12xlarge 234 r6idn.16xlarge 737 r6idn.24xlarge 737 r6idn.2xlarge 58 -r6idn.32xlarge 345 +r6idn.32xlarge 394 r6idn.4xlarge 234 r6idn.8xlarge 234 r6idn.large 29 -r6idn.metal 345 +r6idn.metal 394 r6idn.xlarge 58 r6in.12xlarge 234 r6in.16xlarge 737 r6in.24xlarge 737 r6in.2xlarge 58 -r6in.32xlarge 345 +r6in.32xlarge 394 r6in.4xlarge 234 r6in.8xlarge 234 r6in.large 29 -r6in.metal 345 +r6in.metal 394 r6in.xlarge 58 r7a.12xlarge 234 r7a.16xlarge 737 @@ -787,6 +793,10 @@ u-6tb1.56xlarge 737 u-6tb1.metal 147 u-9tb1.112xlarge 737 u-9tb1.metal 147 +u7i-12tb.224xlarge 737 +u7in-16tb.224xlarge 394 +u7in-24tb.224xlarge 394 +u7in-32tb.224xlarge 394 vt1.24xlarge 737 vt1.3xlarge 58 vt1.6xlarge 234