diff --git a/.github/workflows/deploy-lambdas.yaml b/.github/workflows/deploy-lambdas.yaml new file mode 100644 index 00000000000..e6e50556a3b --- /dev/null +++ b/.github/workflows/deploy-lambdas.yaml @@ -0,0 +1,95 @@ +name: Deploy lambdas to S3 and ECR + +on: + push: + branches: + - master + paths: + - '.github/workflows/deploy-lambdas.yaml' + - 'lambdas/**' + +jobs: + deploy-lambda-s3: + strategy: + matrix: + path: + - access_counts + - indexer + - pkgevents + - pkgpush + - pkgselect + - preview + - s3hash + - s3select + - status_reports + - tabular_preview + - transcode + runs-on: ubuntu-latest + # These permissions are needed to interact with GitHub's OIDC Token endpoint. + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v4 + - name: Build zip + run: | + BUILDER_IMAGE=quiltdata/lambda:build-3.8 + + touch ./out.zip + + docker run --rm \ + --entrypoint /build_zip.sh \ + -v "$PWD/lambdas/${{ matrix.path }}":/lambda/function:z \ + -v "$PWD/lambdas/shared":/lambda/shared:z \ + -v "$PWD/out.zip":/out.zip:z \ + -v "$PWD/lambdas/scripts/build_zip.sh":/build_zip.sh:z \ + "$BUILDER_IMAGE" + - name: Configure AWS credentials from Prod account + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::730278974607:role/github/GitHub-Quilt + aws-region: us-east-1 + - name: Upload zips to Prod S3 + run: | + s3_key="${{ matrix.path }}/${{ github.sha }}.zip" + ./lambdas/scripts/upload_zip.sh ./out.zip "$AWS_REGION" "$s3_key" + - name: Configure AWS credentials from GovCloud account + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws-us-gov:iam::313325871032:role/github/GitHub-Quilt + aws-region: us-gov-east-1 + - name: Upload zips to GovCloud S3 + run: | + s3_key="${{ matrix.path }}/${{ github.sha }}.zip" + ./lambdas/scripts/upload_zip.sh ./out.zip "$AWS_REGION" "$s3_key" + + deploy-lambda-ecr: + strategy: + matrix: + path: + - molecule + - thumbnail + runs-on: ubuntu-latest + # These permissions are needed to interact with GitHub's OIDC Token endpoint. + permissions: + id-token: write + contents: read + steps: + - uses: actions/checkout@v4 + - name: Build Docker image + working-directory: ./lambdas/${{ matrix.path }} + run: docker buildx build -t "quiltdata/lambdas/${{ matrix.path }}:${{ github.sha }}" -f Dockerfile .. + - name: Configure AWS credentials from Prod account + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws:iam::730278974607:role/github/GitHub-Quilt + aws-region: us-east-1 + - name: Push Docker image to Prod ECR + run: ./lambdas/scripts/upload_ecr.sh 730278974607 "quiltdata/lambdas/${{ matrix.path }}:${{ github.sha }}" + - name: Configure AWS credentials from GovCloud account + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: arn:aws-us-gov:iam::313325871032:role/github/GitHub-Quilt + aws-region: us-gov-east-1 + - name: Push Docker image to GovCloud ECR + run: ./lambdas/scripts/upload_ecr.sh 313325871032 "quiltdata/lambdas/${{ matrix.path }}:${{ github.sha }}" diff --git a/lambdas/scripts/build_zip.sh b/lambdas/scripts/build_zip.sh new file mode 100755 index 00000000000..5a44a5989d3 --- /dev/null +++ b/lambdas/scripts/build_zip.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +set -e + +# Make sure "*" expands to an empty list rather than a literal "*" if there are no matches. +shopt -s nullglob + +error() { + echo $@ 2>&1 + exit 1 +} + +[ -f "/.dockerenv" ] || error "This should only run inside of quiltdata/lambda container." + +mkdir out +cd out + +pip3 install -U pip setuptools + +# install everything into a temporary directory +pip3 install --no-compile --no-deps -t . /lambda/shared/ -r /lambda/function/requirements.txt /lambda/function/ +python3 -m compileall -b . + +# add binaries +if [ -f /lambda/function/quilt_binaries.json ]; then + url=$(cat /lambda/function/quilt_binaries.json | jq -r '.s3zip') + echo "Adding binary deps from $url" + bin_zip=$(realpath "$(mktemp)") + curl -o "$bin_zip" "$url" + bin_dir="quilt_binaries" + mkdir "$bin_dir" + unzip "$bin_zip" -d "$bin_dir" + rm "$bin_zip" +fi + +find . \( -name 'test_*' -o -name '*.py' -o -name '*.h' -o -name '*.c' -o -name '*.cc' -o -name '*.cpp' -o -name '*.exe' \) -type f -delete + +# pyarrow is "special": +# if there's a "libfoo.so" and a "libfoo.so.1.2.3", then only the latter is actually used, so delete the former. +for lib in pyarrow/*.so.*; do rm -f "${lib%%.*}.so"; done + +find . -name tests -type d -exec rm -r \{} \+ +find . \( -name '*.so.*' -o -name '*.so' \) -type f -exec strip \{} \+ + +MAX_SIZE=262144000 +size=$(du -b -s . | cut -f 1) +[[ $size -lt $MAX_SIZE ]] || error "The package size is too large: $size; must be smaller than $MAX_SIZE. Consider using docker-based deployment." + +zip -r - . > /out.zip diff --git a/lambdas/scripts/upload_ecr.sh b/lambdas/scripts/upload_ecr.sh new file mode 100755 index 00000000000..ab8f42f3831 --- /dev/null +++ b/lambdas/scripts/upload_ecr.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +set -e + +error() { + echo $@ 2>&1 + exit 1 +} + +[[ $# == 2 ]] || error "Usage: $0 account_id image_name" + +account_id=$1 +image_name=$2 + +regions=$(aws ec2 describe-regions --query "Regions[].{Name:RegionName}" --output text) + +for region in $regions +do + docker_url=$account_id.dkr.ecr.$region.amazonaws.com + echo "Logging in to $docker_url..." + aws ecr get-login-password --region $region | docker login -u AWS --password-stdin "$docker_url" + + echo "Pushing to $region..." + remote_image_name="$docker_url/$image_name" + docker tag "$image_name" "$remote_image_name" + docker push "$remote_image_name" +done + diff --git a/lambdas/scripts/upload_zip.sh b/lambdas/scripts/upload_zip.sh new file mode 100755 index 00000000000..28e5799320e --- /dev/null +++ b/lambdas/scripts/upload_zip.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -e + +error() { + echo $@ 2>&1 + exit 1 +} + +[[ $# == 3 ]] || error "Usage: $0 zip_file primary_region s3_key" + +zip_file=$1 +primary_region=$2 +s3_key=$3 + +regions=$(aws ec2 describe-regions --query "Regions[].{Name:RegionName}" --output text) + +echo "Uploading to $primary_region..." +aws s3 cp --acl public-read "$zip_file" --region "$primary_region" "s3://quilt-lambda-$primary_region/$s3_key" + +for region in $regions +do + if [[ $region != $primary_region ]] + then + echo "Copying to $region..." + aws s3 cp --acl public-read \ + --source-region "$primary_region" --region "$region" \ + "s3://quilt-lambda-$primary_region/$s3_key" "s3://quilt-lambda-$region/$s3_key" + fi +done +