diff --git a/bin/ec2-resize-ebs b/bin/ec2-resize-ebs new file mode 100755 index 0000000..8e7e39f --- /dev/null +++ b/bin/ec2-resize-ebs @@ -0,0 +1,116 @@ +#!/bin/bash +# Stop the EC2 instance, Resize the EBS volume and reattch it, Start the EC2 instance and reassociate the original Elastic IP. + +source $(dirname $0)/../inc/ec2-include + +if [ ! $2 ]; then + echo "Usage: $(basename $0) INSTANCE_ID SIZE" + echo + echo " INSTANCE_ID - The EC2 instance whose EBS volume you wish to resize." + echo " SIZE - The new size for the EBS volume (In gigabytes)." + exit +fi + +instanceid=$1 +size=$2 + +# Get the root EBS volume id for the remote EC2 instance +oldvolumeid=$(ec2-describe-instances $instanceid | egrep "^BLOCKDEVICE./dev/sda1" | cut -f3) +# Get the availability zone for the remote EC2 instance +zone=$(ec2-describe-instances $instanceid | egrep ^INSTANCE | cut -f12) +# Check if the instance has an elastic IP so we can reassociate it with the instance after we stop and start it +ELASTIC_IP=$(ec2-describe-addresses | grep $instanceid | cut -f2) + +# If the server didn't have an elastic IP we need to get its regular IP +if [ $ELASTIC_IP ]; then + INSTANCE_IP=$ELASTIC_IP +else + INSTANCE_IP=$(ec2-describe-instances $instanceid | egrep "^INSTANCE.$instanceid" | cut -f4) +fi + +echo "Logging into remote server ($INSTANCE_IP) to check the remote filesystem is ext..." +FILESYSTEM=$(ssh -o StrictHostKeyChecking=no ubuntu@$INSTANCE_IP "df -T / | grep -v Filesystem" | awk '{ print $2 }') + +# Check the filesystem is ext3 ext4 etc.. we remove the last character 3 or 4 so the check works for multiple versions and future versions of the ext filesystem +if [ ${FILESYSTEM%?} == "ext" ]; then + echo "The remote servers filesystem is $FILESYSTEM." + echo +else + echo "Error: The remote servers filesystem is '$FILESYSTEM' but this script only supports resizing ext filesystems." + exit 1 +fi + +# Stop the instance +echo "Stopping the remote EC2 instance ($instanceid)..." +ec2-stop-instances $instanceid +echo + +# Detach the original volume from the instance +echo "Detaching the EBS volume ($oldvolumeid) from the EC2 instance..." +while ! ec2-detach-volume $oldvolumeid; do sleep 1; done +echo + +# Create a snapshot of the original volume +echo "Create a snapshot of the EBS volume ($oldvolumeid)..." +snapshotid=$(ec2-create-snapshot $oldvolumeid --description "Backup before resizing the EBS volume to $size GB" | cut -f2) +while ec2-describe-snapshots $snapshotid | grep -q pending; do sleep 1; done +echo + +# Set the tags on the EBS snapshot +echo "Set tags on the EBS snapshot ($snapshotid)..." +instancename=$(ec2-describe-tags --filter "resource-id=$instanceid" --filter "key=Name" | cut -f5) +ec2-create-tags $snapshotid --tag "Name=$instancename" --tag "Type=root" +echo + +# Create a new volume from the snapshot, specifying a larger size +echo "Create a new volume from the EBS snapshot ($snapshotid) with the new size of $size GB..." +newvolumeid=$(ec2-create-volume --availability-zone $zone --size $size --snapshot $snapshotid | cut -f2) +echo + +# Set the tags on the EBS volume +echo "Set tags on the EBS volume ($newvolumeid)..." +ec2-create-tags $newvolumeid --tag "Name=$instancename" --tag "Type=root" +echo + +# Attach the new volume to the instance +echo "Attaching the new EBS volume ($newvolumeid) to the EC2 instance ($instanceid)..." +ec2-attach-volume --instance $instanceid --device /dev/sda1 $newvolumeid +while ! ec2-describe-volumes $newvolumeid | grep -q attached; do sleep 1; done +echo + +# Start the instance +echo "Start the EC2 instance ($instanceid)..." +ec2-start-instances $instanceid +while ! ec2-describe-instances $instanceid | grep -q running; do sleep 1; done +echo + +# Get the IP of the EC2 instance +INSTANCE_IP=$(ec2-describe-instances $instanceid | egrep "^INSTANCE.$instanceid" | cut -f4) + +# Even though the server says its running it sometimes takes a few seconds to get a route to host so we delay 5 seconds +sleep 5 + +# Resize the root file system to fill the new EBS volume +echo "Logging into the remote server ($INSTANCE_IP) to resize the root file system (/dev/xvda1) to fill the new EBS volume..." +ssh -o StrictHostKeyChecking=no ubuntu@$INSTANCE_IP "sudo resize2fs /dev/xvda1 | df -h /" +echo + +# If the instance originally had an Elastic IP attached +if [ $ELASTIC_IP ]; then + # Associate the original Elastic IP + echo "Associating elastic IP ($ELASTIC_IP)..." + ec2-associate-address $ELASTIC_IP -i $instanceid + # Wait for the Elastic IP to associate + while ! ec2-describe-addresses | grep -q $instanceid; do sleep 1; done + echo + INSTANCE_IP=$ELASTIC_IP +fi + +# Delete the old EBS volume +echo "Deleting the old EBS volume..." +ec2-delete-volume $oldvolumeid +echo + +# Turn on delete-on-termination +echo "Turning on delete-on-termination so that the new EBS volume will be deleted when the EC2 instance is terminated as is common with EC2 instances..." +ec2-modify-instance-attribute --block-device-mapping /dev/sda1=$newvolumeid::true $instanceid diff --git a/bin/i-associate-address b/bin/i-associate-address new file mode 100755 index 0000000..fbda37b --- /dev/null +++ b/bin/i-associate-address @@ -0,0 +1,46 @@ +#!/bin/bash +# Associate an Elastic IP Address to an instance + +source $(dirname $0)/../inc/ec2-include + +if [ ! $1 ]; then + echo "Usage: $(basename $0) IP" + echo + echo " IP - The elastic IP to attach to this instance. " + echo " Specifying \"new\" will create a new IP and attach it to this instance. " + exit +fi + +IP=$1 + +ATTACHED_IP=( $(i-describe-address) ) + +# If an Elastic IP is already attached to this EC2 instance. +if [ ! -z "${ATTACHED_IP[1]}" ]; then + echo "This EC2 instance already has an elastic IP (${ATTACHED_IP[1]}) associated." + echo "First disassociate it with 'i-disassociate-address'" + exit +else + # If the user specified they want a new Elastic IP created + if [ $IP == "new" ]; then + echo "Creating IP Address..." + OUTPUT=( $(ec2-allocate-address) ) + echo ${OUTPUT[*]} + IP=${OUTPUT[1]} + echo + else + # If the Elastic IP is associated to another EC2 instance + IPS_INSTANCE_ID=( $(ec2-describe-addresses | grep "ADDRESS" | grep $IP) ) + + if [[ ${IPS_INSTANCE_ID[2]} == *i-* ]]; then + echo "That Elastic IP is already associated to another EC2 instance (${IPS_INSTANCE_ID[2]})." + exit + fi + fi +fi + +echo "Attaching IP Address..." +ec2-associate-address -i $INSTANCE_ID $IP + +# Make the script wait until the network connection is back up before we continue. +perl -MIO::Socket::INET -e 'until(new IO::Socket::INET("169.254.169.254:80")){print"Waiting for network connection after associating an Elastic IP...\n";sleep 1}' diff --git a/bin/i-create-ami b/bin/i-create-ami new file mode 100755 index 0000000..8abc6a1 --- /dev/null +++ b/bin/i-create-ami @@ -0,0 +1,74 @@ +#!/bin/bash +# Clean Instance, Bundle Instance, Upload Bundle to S3, Register AMI + +source $(dirname $0)/../inc/ec2-include + +if [ ! $3 ]; then + echo "Usage: $(basename $0) BUCKET PREFIX CLEAN" + echo + echo " BUCKET - The S3 bucket where the bundled AMI should be stored." + echo " PREFIX - The filename prefix for bundled AMI files." + echo " CLEAN - Should we clean potentially private data from this instance (true/false)?" + exit +fi + +BUCKET=$1 +PREFIX=$2 +CLEAN=$3 +ACCOUNT_NUMBER=$(cat /root/.ec2/account-number) +ACCESS_KEY_ID=$(cat /root/.ec2/access-key-id) +SECRET_ACCESS_KEY=$(cat /root/.ec2/secret-access-key) + +if [ $(uname -m) = 'x86_64' ]; then + ARCH=x86_64 +else + ARCH=i386 +fi + +# For security reasons we remove any files from the instance that may contain sensitive information +if [ ! $CLEAN = "false" ]; then + echo "Cleaning the instance ready for bundling..." + rm -f /root/.*hist* $HOME/.*hist* + rm -f /root/.my.cnf + rm -rf /root/.ec2 + rm -f /root/.ssh/* + rm -f /var/log/*.gz + rm -f /var/ec2/* + find /var/log -name mysql -prune -o -type f -print | while read i; do sudo cp /dev/null $i; done +fi + +# Remove any existing AMI bundles, and create a location for new AMI bundles +echo +if [ -d /mnt/ami ]; then + if [ "$(ls -A /mnt/ami)" ]; then # Make sure there's files in the directory + echo "Removing existing AMI Bundle from /mnt/ami to avoid conflicts..." + rm /mnt/ami/*; + fi +else + echo "Creating a location at /mnt/ami for the bundled instance..." + mkdir /mnt/ami +fi + +# Bundle the instance +echo +echo "Bundling the instance..." +sudo -E ec2-bundle-vol \ + -r $ARCH \ + -d /mnt/ami \ + -p $PREFIX \ + -u $ACCOUNT_NUMBER \ + -k /root/.ec2/pk.pem \ + -c /root/.ec2/cert.pem \ + -s 10240 \ + --ec2cert /etc/ec2/amitools/cert-ec2.pem \ + -e /tmp,/mnt,/root/.ssh,/root/.ec2 + +# Upload bundle to an S3 Bucket +echo +echo "Uploading instance bundle to S3..." +ec2-upload-bundle -b $BUCKET -m /mnt/ami/$PREFIX.manifest.xml -a $ACCESS_KEY_ID -s $SECRET_ACCESS_KEY + +# Register the AMI +echo +echo "Registering the AMI..." +ec2-register $BUCKET/$PREFIX.manifest.xml diff --git a/bin/i-delete-volume b/bin/i-delete-volume new file mode 100755 index 0000000..01f6cc2 --- /dev/null +++ b/bin/i-delete-volume @@ -0,0 +1,18 @@ +#!/bin/bash +# Unmount, Detach and Delete an EBS Volume + +source $(dirname $0)/../inc/ec2-include + +if [ ! $1 ]; then + echo "Usage: $(basename $0) MOUNT_POINT" + echo + echo " MOUNT_POINT - The mount location of the volume that you wish to delete. (E.g. /ebs)" + exit +fi + +echo "Detaching EBS Volume..." +source i-detach-volume $1 +echo + +echo "Deleting EBS Volume..." +ec2-delete-volume $VOLUME_ID diff --git a/bin/i-describe-address b/bin/i-describe-address new file mode 100755 index 0000000..199b31d --- /dev/null +++ b/bin/i-describe-address @@ -0,0 +1,6 @@ +#!/bin/bash +# Describe the Elastic IP attached to this instance + +source $(dirname $0)/../inc/ec2-include + +ec2-describe-addresses | grep "ADDRESS" | grep $INSTANCE_ID diff --git a/bin/i-describe-volumes b/bin/i-describe-volumes new file mode 100755 index 0000000..b2aeba8 --- /dev/null +++ b/bin/i-describe-volumes @@ -0,0 +1,6 @@ +#!/bin/bash +# Describe the EBS Volumes attached to this instance + +source $(dirname $0)/../inc/ec2-include + +ec2-describe-volumes | grep -B1 $INSTANCE_ID diff --git a/bin/i-detach-volume b/bin/i-detach-volume new file mode 100755 index 0000000..bb8bd12 --- /dev/null +++ b/bin/i-detach-volume @@ -0,0 +1,108 @@ +#!/bin/bash +# Unmount, and Detach an EBS Volume + +source $(dirname $0)/../inc/ec2-include + +if [ ! $1 ]; then + echo "Usage: $(basename $0) MOUNT_POINT" + echo + echo " MOUNT_POINT - The mount location of the volume that you wish to detach. (E.g. /ebs)" + exit +fi + +# Check if the specified mount point exists +if [ -d "$1" ]; then + # Unify mount point to ensure it will match the 'df' output + # For example ../mountpoint/ will become /mountpoint + MOUNT_POINT=$(cd "$1"; pwd) + + # Find the device belonging to this mount point + DEVICE=( $(df $MOUNT_POINT | grep " $MOUNT_POINT") ) + DEVICE=${DEVICE[0]} +else + MOUNT_POINT=$1 +fi + +if [ $DEVICE ]; then + echo "Device location for '$MOUNT_POINT' is '$DEVICE'" + echo + +else + echo "Warning: '$MOUNT_POINT' isn't currently mounted." + echo " Type 'df' to list currently mounted devices." + echo + UNMOUNTED="true" + + # Check /etc/fstab incase there's an entry for the device + echo "Checking /etc/fstab for an associated device..." + DEVICE=( $(cat /etc/fstab | grep " $MOUNT_POINT ") ) + DEVICE=${DEVICE[0]} + + if [ $DEVICE ]; then + echo "Device location for '$MOUNT_POINT' is '$DEVICE'" + echo + else + echo "Error: Couldn't locate an associated device for '$MOUNT_POINT'" + echo + exit 1 + fi +fi + +# Find the Volume ID of the device +echo "Locating EBS Volume ID..." +VOLUME_ID=( $(i-describe-volumes | grep "ATTACHMENT" | grep /dev/sdk${DEVICE: -1} | grep "attached") ) +VOLUME_ID=${VOLUME_ID[1]} + +if [ ! $VOLUME_ID ]; then + echo "Error: We couldn't associate '/dev/sdk${DEVICE: -1}' with an EBS Volume. Please ensure the device exists. " + echo " Type 'ec2-describe-volumes' to list attached EBS Volumes and their device locations." +else + echo "Volume ID is '$VOLUME_ID'" +fi + +if [ ! $UNMOUNTED ]; then + # Unmount the EBS Volume + echo + echo "Unmounting device..." + umount $DEVICE + echo +fi + +# Remove the EBS Volume from /etc/fstab +FSTAB_CONTENT=$(grep " $MOUNT_POINT " /etc/fstab); + +if [ "$FSTAB_CONTENT" ]; then + echo "Removing the following entries from fstab:" + echo "$FSTAB_CONTENT" + cp /etc/fstab /tmp/fstab.bak + grep -v " $MOUNT_POINT " /tmp/fstab.bak > /etc/fstab +else + echo "No mount entries found in /etc/fstab" +fi + +echo + +# Remove the crontab entry that performs EBS Snapshots for this Volume +CRONTAB_CONTENT=$(grep "i-snapshot-volume $MOUNT_POINT " /etc/cron.d/snapshot-ebs); + +if [ "$CRONTAB_CONTENT" ]; then + echo "Removing the following entries from the '/etc/cron.d/snapshot-ebs' crontab:" + echo "$CRONTAB_CONTENT" + cp /etc/cron.d/snapshot-ebs /tmp/ebs-snapshots.bak + grep -v "i-snapshot-volume $MOUNT_POINT " /tmp/ebs-snapshots.bak > /etc/cron.d/snapshot-ebs +else + echo "Cron entry wasn't found in /etc/cron.d/snapshot-ebs" +fi + +echo + +# Detach the EBS Volume +if [ "$VOLUME_ID" ]; then + echo "Detaching EBS Volume..." + # Wait for EBS volume to detach before doing anything else + ec2-detach-volume $VOLUME_ID --instance $INSTANCE_ID + while [ ! -z "$(i-describe-volumes | grep "ATTACHMENT" | grep /dev/sdf${DEVICE: -1})" ]; do sleep 1; done + echo +fi + +echo -e "\E[31mPlease note the directory '$MOUNT_POINT' still exists but can be manually removed if unneeded.\033[0m" diff --git a/bin/i-disassociate-address b/bin/i-disassociate-address new file mode 100755 index 0000000..7e3d3a0 --- /dev/null +++ b/bin/i-disassociate-address @@ -0,0 +1,7 @@ +#!/bin/bash +# Disassociate the IP address from the instance. + +source $(dirname $0)/../inc/ec2-include + +echo "Disassociating IP ($IP) from instance..." +ec2-disassociate-address $IP diff --git a/bin/i-set-hostname b/bin/i-set-hostname new file mode 100755 index 0000000..86226d0 --- /dev/null +++ b/bin/i-set-hostname @@ -0,0 +1,33 @@ +#!/bin/bash +# Set the hostname of an instance + +source $(dirname $0)/../inc/ec2-include + +if [ ! $1 ]; then + echo "Usage: $(basename $0) IP" + echo + echo " HOSTNAME - A list of hostnames to associate to this instance seperated by a space." + echo " The first hostname on the line will be treated as the main hostname and placed" + echo " in /etc/hostname, it and subsequent hostnames will be placed in /etc/hosts." + exit +fi + +hostnames=( $* ) + +echo "Setting main hostname in /etc/hostname..." +echo ${hostnames[0]} > /etc/hostname +echo + +echo "Setting hostnames in /etc/hosts..." +echo -e "\n# Hosts added by EC2 user-data" >> /etc/hosts +echo "127.0.0.1 ${hostnames[*]}" >> /etc/hosts +echo + +echo "Loading the hostname changes..." +hostname -F /etc/hostname +echo + +echo -e "\E[31mKeep in mind this doesn't set a Reverse DNS Record, for that you will need an Elastic IP, then fill out the form at:\033[0m" +echo -e "\E[31mhttps://aws-portal.amazon.com/gp/aws/html-forms-controller/contactus/ec2-email-limit-rdns-request\033[0m" +echo +echo -e "\E[31mIf you had existing hostnames set in /etc/hosts you may want to remove them to keep the file clean.\033[0m" diff --git a/bin/s3-delete b/bin/s3-delete new file mode 100755 index 0000000..b44204f --- /dev/null +++ b/bin/s3-delete @@ -0,0 +1,90 @@ +#!/bin/bash + +# Pragmas +set -u +set -e + +function printHelpAndExit +{ + exitCode=$1 + printf "%s: version %s\n" "$weAreKnownAs" "$version" + printf "Part of s3-bash. Latest version is at %s\n" 'http://code.google.com/p/s3-bash/' + printf "Usage %s: -h\n" "$weAreKnownAs" + printf "Usage %s: [-vS] [-H file] [-a file] -k key -s file url\n" "$weAreKnownAs" + printf " Option\tType\tRequirement\tDescription\n" + printf " -h\t\tprecedent\tprint this help\n" + printf " -v\t\toptional\tverbose output\n" + printf " -k\tstring\tmandatory\tAWS Access Key Id (Will also look in /root/.ec2/access-key-id)\n" + printf " -s\tfile\tmandatory\tAWS Secret Access Key Id File (Will also look in /root/.ec2/secret-access-key)\n" + printf " -S\t\toptional\tUse https\n" + printf " -H\tfile\toptional\tFile to write response headers to\n" + printf " -a\tfile\toptional\tFile to read Amazon custom headers from (X-Amz-Date is not allowed)\n" + printf " \turl\tmandatory\trelative url including bucket name and leading slash, eg /bucket/path/to/object?acl. Assumed to be already encoded\n" + printf "\n" + printf "Notes\n" + printf "Specify proxies using a ~/.curlrc file\n" + exit $exitCode +} + +function parseOptions +{ + verbose="" + url="" + awsAccessKeyId="" + awsAccessSecretKeyIdFile="" + protocol="http" + dumpHeaderFile="/dev/null" + amazonHeaderFile="/dev/null" + while getopts "hvk:s:SH:T:a:" optionName; do + case "$optionName" in + h) printHelpAndExit 0;; + v) verbose="-v";; + k) awsAccessKeyId="$OPTARG";; + s) awsAccessSecretKeyIdFile="$OPTARG" + if [ ! -e "$awsAccessSecretKeyIdFile" ]; then + printErrorHelpAndExit "AWS Secret Key Id file does not exist" $userSpecifiedDataErrorExitCode + fi;; + S) protocol="https";; + H) dumpHeaderFile="$OPTARG";; + a) amazonHeaderFile="$OPTARG";; + [?]) printErrorHelpAndExit "Option not recognised" $userSpecifiedDataErrorExitCode;; + esac + done + if [ 1 -eq $OPTIND ]; then + printErrorHelpAndExit "Internal Error: parseOptions or a parent method in the call stack was not called with $"@"." $internalErrorExitCode + fi + let "toShift = $OPTIND - 1" + shift $toShift + if [ $# -eq 0 ]; then + printErrorHelpAndExit "URL not specified" $userSpecifiedDataErrorExitCode + fi + url="$1" + verifyUrl + + if [ -z "$awsAccessSecretKeyIdFile" ] && [ ! -f "/root/.ec2/secret-access-key" ]; then + printErrorHelpAndExit "AWS Secret Access Key file not specified" $userSpecifiedDataErrorExitCode + elif [ -z "$awsAccessKeyId" ] && [ ! -f "/root/.ec2/access-key-id" ]; then + printErrorHelpAndExit "AWS Access Key Id not specified" $userSpecifiedDataErrorExitCode + fi +} + +function prepareToRunCurl +{ + readonly verb="DELETE" + readonly verbToPass="-X DELETE" + readonly contentMD5="" + readonly contentType="" +} + +readonly weAreKnownAs="$(basename $0)" + +readonly commonFunctions="$(dirname $0)/../inc/s3-include" +if [ -e "$commonFunctions" ]; then + source "$commonFunctions" +else + version="Unknown" + invalidEnvironmentExitCode=4 + printHelpAndExit "$weAreKnownAs: Could not locate file s3-common-functions" $invalidEnvironmentExitCode +fi + +main "$@" diff --git a/bin/s3-get b/bin/s3-get new file mode 100755 index 0000000..21d36cb --- /dev/null +++ b/bin/s3-get @@ -0,0 +1,94 @@ +#!/bin/bash + +# Pragmas +set -u +set -e + +function printHelpAndExit +{ + exitCode=$1 + printf "%s: version %s\n" "$weAreKnownAs" "$version" + printf "Part of s3-bash. Latest version is at %s\n" 'http://code.google.com/p/s3-bash/' + printf "Usage %s: -h\n" "$weAreKnownAs" + printf "Usage %s: [-vS] [-H file] [-a file] -k key -s file url\n" "$weAreKnownAs" + printf " Option\tType\tRequirement\tDescription\n" + printf " -h\t\tprecedent\tprint this help\n" + printf " -v\t\toptional\tverbose output\n" + printf " -k\tstring\tmandatory\tAWS Access Key Id (Will also look in /root/.ec2/access-key-id)\n" + printf " -s\tfile\tmandatory\tAWS Secret Access Key Id File (Will also look in /root/.ec2/secret-access-key)\n" + printf " -S\t\toptional\tUse https\n" + printf " -H\tfile\toptional\tFile to write response headers to\n" + printf " -a\tfile\toptional\tFile to read Amazon custom headers from (X-Amz-Date is not allowed)\n" + printf " \turl\tmandatory\trelative url including bucket name and leading slash, eg /bucket/path/to/object?acl. Assumed to be already encoded\n" + printf "\n" + printf "Notes\n" + printf "Specify proxies using a ~/.curlrc file\n" + printf "Content is returned on stdout\n" + exit $exitCode +} + +function parseOptions +{ + verbose="" + url="" + awsAccessKeyId="" + awsAccessSecretKeyIdFile="" + protocol="http" + dumpHeaderFile="/dev/null" + amazonHeaderFile="/dev/null" + while getopts "hvk:s:SH:a:" optionName; do + case "$optionName" in + h) printHelpAndExit 0;; + v) verbose="-v";; + k) awsAccessKeyId="$OPTARG";; + s) awsAccessSecretKeyIdFile="$OPTARG" + if [ ! -e "$awsAccessSecretKeyIdFile" ]; then + printErrorHelpAndExit "AWS Secret Key Id file does not exist" $userSpecifiedDataErrorExitCode + fi;; + S) protocol="https";; + H) dumpHeaderFile="$OPTARG";; + a) amazonHeaderFile="$OPTARG" + ;; + [?]) printErrorHelpAndExit "Option not recognised" $userSpecifiedDataErrorExitCode;; + esac + done + + if [ 1 -eq $OPTIND ]; then + printErrorHelpAndExit "Internal Error: parseOptions or a parent method in the call stack was not called with $"@"." $internalErrorExitCode + fi + let "toShift = $OPTIND - 1" + shift $toShift + + if [ $# -eq 0 ]; then + printErrorHelpAndExit "URL not specified" $userSpecifiedDataErrorExitCode + fi + url="$1" + verifyUrl + + if [ -z "$awsAccessSecretKeyIdFile" ] && [ ! -f "/root/.ec2/secret-access-key" ]; then + printErrorHelpAndExit "AWS Secret Access Key file not specified" $userSpecifiedDataErrorExitCode + elif [ -z "$awsAccessKeyId" ] && [ ! -f "/root/.ec2/access-key-id" ]; then + printErrorHelpAndExit "AWS Access Key Id not specified" $userSpecifiedDataErrorExitCode + fi +} + +function prepareToRunCurl +{ + readonly verb="GET" + readonly verbToPass="--get" + readonly contentMD5="" + readonly contentType="" +} + +readonly weAreKnownAs="$(basename $0)" + +readonly commonFunctions="$(dirname $0)/../inc/s3-include" +if [ -e "$commonFunctions" ]; then + source "$commonFunctions" +else + version="Unknown" + invalidEnvironmentExitCode=4 + printErrorHelpAndExit "$weAreKnownAs: Could not locate file s3-common-functions" $invalidEnvironmentExitCode +fi + +main "$@" diff --git a/bin/s3-put b/bin/s3-put new file mode 100755 index 0000000..c43be75 --- /dev/null +++ b/bin/s3-put @@ -0,0 +1,103 @@ +#!/bin/bash + +# Pragmas +set -u +set -e + +function printHelpAndExit +{ + exitCode=$1 + printf "%s: version %s\n" "$weAreKnownAs" "$version" + printf "Part of s3-bash. Latest version is at %s\n" 'http://code.google.com/p/s3-bash/' + printf "Usage %s: -h\n" "$weAreKnownAs" + printf "Usage %s: [-vS] [-H file] [-a file] -k key -s file -T file url\n" "$weAreKnownAs" + printf " Option\tType\tRequirement\tDescription\n" + printf " -h\t\tprecedent\tprint this help\n" + printf " -v\t\toptional\tverbose output\n" + printf " -k\tstring\tmandatory\tAWS Access Key Id (Will also look in /root/.ec2/access-key-id)\n" + printf " -s\tfile\tmandatory\tAWS Secret Access Key Id File (Will also look in /root/.ec2/secret-access-key)\n" + printf " -T\tfile\tmandatory\tFile (or stdin with -) to PUT\n" + printf " -S\t\toptional\tUse https\n" + printf " -H\tfile\toptional\tFile to write response headers to\n" + printf " -a\tfile\toptional\tFile to read Amazon custom headers from (X-Amz-Date is not allowed)\n" + printf " -c\tMIME\toptional\tMIME Content type. Default is text/plain\n" + printf " \turl\tmandatory\trelative url including bucket name and leading slash, eg /bucket/path/to/object?acl. Assumed to be already encoded\n" + printf "\n" + printf "Notes\n" + printf "Specify proxies using a ~/.curlrc file\n" + printf "Specify content to PUT using stdin using option -T -\n" + exit $exitCode +} + +function parseOptions +{ + verbose="" + url="" + awsAccessKeyId="" + awsAccessSecretKeyIdFile="" + protocol="http" + fileToUpload="" + dumpHeaderFile="/dev/null" + amazonHeaderFile="/dev/null" + contentType="text/plain" + while getopts "hvk:s:SH:T:a:c:" optionName; do + case "$optionName" in + h) printHelpAndExit 0;; + v) verbose="-v";; + k) awsAccessKeyId="$OPTARG";; + s) awsAccessSecretKeyIdFile="$OPTARG" + if [ ! -e "$awsAccessSecretKeyIdFile" ]; then + printErrorHelpAndExit "AWS Secret Key Id file does not exist" $userSpecifiedDataErrorExitCode + fi;; + S) protocol="https";; + H) dumpHeaderFile="$OPTARG";; + T) fileToUpload="$OPTARG";; + a) amazonHeaderFile="$OPTARG";; + c) contentType="$OPTARG";; + [?]) printErrorHelpAndExit "Option not recognised" $userSpecifiedDataErrorExitCode;; + esac + done + if [ 1 -eq $OPTIND ]; then + printErrorHelpAndExit "Internal Error: parseOptions or a parent method in the call stack was not called with $"@"." $internalErrorExitCode + fi + let "toShift = $OPTIND - 1" + shift $toShift + if [ $# -eq 0 ]; then + printErrorHelpAndExit "URL not specified" $userSpecifiedDataErrorExitCode + fi + url="$1" + verifyUrl + + if [ -z "$awsAccessSecretKeyIdFile" ] && [ ! -f "/root/.ec2/secret-access-key" ]; then + printErrorHelpAndExit "AWS Secret Access Key file not specified" $userSpecifiedDataErrorExitCode + elif [ -z "$awsAccessKeyId" ] && [ ! -f "/root/.ec2/access-key-id" ]; then + printErrorHelpAndExit "AWS Access Key Id not specified" $userSpecifiedDataErrorExitCode + elif [ -z "$fileToUpload" ]; then + printErrorHelpAndExit "File to upload not specified" $userSpecifiedDataErrorExitCode + fi +} + +function prepareToRunCurl +{ + readonly verb="PUT" + if [ ! "-" = "$fileToUpload" ]; then + readonly contentMD5="$(base64EncodedMD5 "$fileToUpload")" + readonly verbToPass="-T \"$fileToUpload\"" + else + readonly contentMD5="" + readonly verbToPass="-T -" + fi +} + +readonly weAreKnownAs="$(basename $0)" + +readonly commonFunctions="$(dirname $0)/../inc/s3-include" +if [ -e "$commonFunctions" ]; then + source "$commonFunctions" +else + version="Unknown" + invalidEnvironmentExitCode=4 + printErrorHelpAndExit "$weAreKnownAs: Could not locate file s3-common-functions" $invalidEnvironmentExitCode +fi + +main "$@" diff --git a/inc/s3-include b/inc/s3-include new file mode 100755 index 0000000..04ed413 --- /dev/null +++ b/inc/s3-include @@ -0,0 +1,329 @@ +#!/bin/bash + +# Pragmas +set -u +set -e + +# Constants +readonly version="0.02" +readonly userSpecifiedDataErrorExitCode=1 +readonly invalidCommandLineOption=2 +readonly internalErrorExitCode=3 +readonly invalidEnvironmentExitCode=4 +readonly ipadXorByte=0x36 +readonly opadXorByte=0x5c + +# Command-like aliases +readonly sha1="openssl dgst -sha1 -binary" +readonly base64encode="openssl enc -base64 -e -in" +readonly base64decode="openssl enc -base64 -d -in" + +# Globals +declare -a temporaryFiles + +function base64EncodedMD5 +{ + openssl dgst -md5 -binary "$1" | openssl enc -e -base64 +} + +function printErrorMessage +{ + printf "%s: %s\n" "$1" "$2" 1>&2 +} + +function printErrorHelpAndExit +{ + printErrorMessage "$weAreKnownAs" "$1" + printHelpAndExit $2 +} + +function checkProgramIsInEnvironment +{ + if [ ! -x "$(which $1)" ]; then + printErrorHelpAndExit "Environment Error: $1 not found on the path or not executable" $invalidEnvironmentExitCode + fi +} + +# Do not use this from directly. Due to a bug in bash, array assignments do not work when the function is used with command substitution +function createTemporaryFile +{ + local temporaryFile="$(mktemp "$temporaryDirectory/$$.$1.XXXXXXXX")" || printErrorHelpAndExit "Environment Error: Could not create a temporary file. Please check you /tmp folder permissions allow files and folders to be created and disc space." $invalidEnvironmentExitCode + local length="${#temporaryFiles[@]}" + temporaryFiles[$length]="$temporaryFile" +} + +function mostRecentTemporaryFile +{ + local length="${#temporaryFiles[@]}" + local lastIndex + ((lastIndex = --length)) + echo "${temporaryFiles[$lastIndex]}" +} + +function deleteTemporaryFile +{ + rm -f "$1" || printErrorHelpAndExit "Environment Error: Could not delete a temporary file ($1)." $invalidEnvironmentExitCode +} + +function removeTemporaryFiles +{ + length="${#temporaryFiles[@]}" + if [ $length -eq 0 ]; then + return + fi + for temporaryFile in ${temporaryFiles[@]}; do + deleteTemporaryFile "$temporaryFile" + done + temporaryFiles=() + length="${#temporaryFiles[@]}" +} + +function checkEnvironment +{ + programs=(openssl curl od dd printf sed awk sort mktemp rm grep cp ls env bash) + for program in "${programs[@]}"; do + checkProgramIsInEnvironment "$program" + done + + local temporaryFolder="${TMPDIR:-/tmp}" + if [ ! -x "$temporaryFolder" ]; then + printErrorHelpAndExit "Environment Error: The temporary directory ($temporaryFolder) does not exist. Please set the TMPDIR environment variable to your temporary directory" $invalidEnvironmentExitCode + fi + readonly temporaryDirectory="$temporaryFolder/s3-bash/$weAreKnownAs" + mkdir -p "$temporaryDirectory" || printErrorHelpAndExit "Environment Error: Could not create a temporary directory ($temporaryDiectory). Please check you /tmp folder permissions allow files and folders to be created and you have sufficient disc space" $invalidEnvironmentExitCode + + #Check we can create and delete temporary files + createTemporaryFile "check" + temporaryFileCheck="$(mostRecentTemporaryFile)" + echo "Checking we can write to temporary files. If this is still here then we could not delete temporary files." > "$temporaryFileCheck" + removeTemporaryFiles +} + +function setErrorTraps +{ + trap "removeTemporaryFiles; exit $internalErrorExitCode" INT TERM EXIT +} + +function unsetErrorTraps +{ + trap - INT TERM EXIT +} + +function verifyUrl +{ + if [ -z "$url" ]; then + printErrorHelpAndExit "URL not specified" $userSpecifiedDataErrorExitCode + elif echo $url | grep -q http://; then + printErrorHelpAndExit "URL starts with http://" $userSpecifiedDataErrorExitCode + elif echo $url | grep -q https://; then + printErrorHelpAndExit "URL starts with https://" $userSpecifiedDataErrorExitCode + elif echo $url | grep -v ^/; then + printErrorHelpAndExit "URL does not start with /" $userSpecifiedDataErrorExitCode + fi +} + +function appendHash +{ + local fileToHash="$1" + local fileToWriteTo="$2" + $sha1 "$fileToHash" >> "$fileToWriteTo" +} + +function writeHash +{ + local fileToHash="$1" + local fileToWriteTo="$2" + $sha1 -out "$fileToWriteTo" "$fileToHash" +} + +function checkAwsKey +{ + local originalKeyFile="$1" + local keySize="$(ls -l "$originalKeyFile" | awk '{ print $5 }')" + if [ ! $keySize -eq 40 ]; then + printErrorHelpAndExit "We do not understand Amazon AWS secret keys which are not 40 bytes long. Have you included a carriage return or line feed by mistake at the end of the secret key file?" $userSpecifiedDataErrorExitCode + fi +} + +function padDecodedKeyTo +{ + local originalKeyFile="$1" + local keyFile="$2" + cp "$originalKeyFile" "$keyFile" + + local keySize=$(ls -l "$keyFile" | awk '{ print $5 }') + if [ $keySize -lt 64 ]; then + local zerosToWrite=$((64 - $keySize)) + dd if=/dev/zero of=$keyFile bs=1 count=$zerosToWrite seek=$keySize 2> /dev/null + elif [ $keySize -gt 64 ]; then + echo "Warning: Support for hashing keys bigger than the SHA1 block size of 64 bytes is untested" 1>&2 + writeHash "$originalKeyFile" "$keyFile" + local keySize=$(ls -l "$keyFile" | awk '{ print $5 }') + if [ $keySize -lt 64 ]; then + local zerosToWrite=$((64 - $keySize)) + dd if=/dev/zero of=$keyFile bs=1 count=$zerosToWrite seek=$keySize 2> /dev/null + fi + exit 1 + else + : + fi +} + +function writeLongAsByte +{ + local byte="$1" + local file="$2" + printf "\\$(printf "%o" $byte)" >> "$file" +} + +function readBytesAndXorAndWriteAsBytesTo +{ + local inputFile="$1" + local xorByte=$2 + local outputFile="$3" + + od -v -A n -t uC "$inputFile" | awk '{ OFS="\n"; for (i = 1; i <= NF; i++) print $i }' | + while read byte; do + ((xord = byte ^ xorByte)) + writeLongAsByte $xord "$outputFile" + done +} + +function writeHexByte +{ + local byte="$1" + local file="$2" + printf "\\$(printf "%o" 0x$byte)" >> "$file" +} + +function writeHexString +{ + local hexString="$1" + for byte in $(echo $hexString | sed 's/../& /g'); do + writeHexByte "$byte" "$2" + done +} + +function writeStringToSign +{ + local outputFile="$1" + echo $verb >> "$outputFile" + echo "$contentMD5" >> "$outputFile" + echo "$contentType" >> "$outputFile" + echo "$currentDateTime" >> "$outputFile" + + writeStringToSignAmazonHeaders "$outputFile" + + urlPath="$(echo "$url" | awk 'BEGIN { FS="[?]"} { print $1 }')" + urlQueryString="$(echo "$url" | awk 'BEGIN { FS="[?]"} { print $2 }')" + printf "$urlPath" >> "$outputFile" + if [ "$urlQueryString" = "acl" ] || [ "$urlQueryString" = "torrent" ]; then + printf "?" >> "$outputFile" + printf "$urlQueryString" >> "$outputFile" + fi +} + +function writeStringToSignAmazonHeaders() +{ + local outputFile="$1" + + #Convert all headers to lower case + #sort + #Strip ": " to ":" + #Add LF to each header + awk 'BEGIN { FS=": " } NF == 2 { print tolower($1) ":" $2 }' "$amazonHeaderFile" | sort >> "$outputFile" + #TODO: RFC 2616, section 4.2 (combine repeated headers' values) + #TODO: Unfold long lines (not supported elsewhere) +} + +function computeAwsAuthorizationHeader +{ + # If the Secret Key File wasn't specified as a parameter set its default location + if [ -z "$awsAccessSecretKeyIdFile" ]; then + awsAccessSecretKeyIdFile="/root/.ec2/secret-access-key" + fi + + checkAwsKey "$awsAccessSecretKeyIdFile" + + createTemporaryFile "key" + local tempKeyFile="$(mostRecentTemporaryFile)" + + createTemporaryFile "ipad" + local ipadHashingFile="$(mostRecentTemporaryFile)" + + createTemporaryFile "opad" + local opadHashingFile="$(mostRecentTemporaryFile)" + + createTemporaryFile "HMAC-SHA1" + local hmacSha1File="$(mostRecentTemporaryFile)" + + padDecodedKeyTo "$awsAccessSecretKeyIdFile" "$tempKeyFile" + readBytesAndXorAndWriteAsBytesTo "$tempKeyFile" ipadXorByte "$ipadHashingFile" + + writeStringToSign "$ipadHashingFile" + + readBytesAndXorAndWriteAsBytesTo "$tempKeyFile" opadXorByte "$opadHashingFile" + appendHash "$ipadHashingFile" "$opadHashingFile" + writeHash "$opadHashingFile" "$hmacSha1File" + + local signature="$($base64encode "$hmacSha1File")" + + # If the Secret Key File wasn't specified as a parameter set its default location + if [ -z "$awsAccessKeyId" ]; then + awsAccessKeyId=`cat /root/.ec2/access-key-id` + fi + + echo "Authorization: AWS $awsAccessKeyId:$signature" +} + +function writeAmazonHeadersForCurl +{ + if [ ! -e "$amazonHeaderFile" ]; then + printErrorHelpAndExit "Amazon Header file does not exist" $userSpecifiedDataErrorExitCode + elif grep -q ^X-Amz-Date: "$amazonHeaderFile"; then + printErrorHelpAndExit "X-Amz-Date header not allowed" $userSpecifiedDataErrorExitCode + fi + # Consider using sed... + awk 'BEGIN { ORS=" "; FS="\0" } { print "--header \"" $1 "\""}' "$amazonHeaderFile" >> "$1" +} + +function runCurl +{ + local verbAndAnyData="$1" + local fullUrl="$protocol://s3.amazonaws.com$url" + createTemporaryFile "curl" + local tempCurlCommand="$(mostRecentTemporaryFile)" + local cleanUpCommand="rm -f "$tempCurlCommand"" + + echo "#! /usr/bin/env bash" >> "$tempCurlCommand" + printf "curl %s %s --dump-header \"%s\" " "$verbose" "$verbAndAnyData" "$dumpHeaderFile" >> "$tempCurlCommand" + writeAmazonHeadersForCurl "$tempCurlCommand" + printf " --header \"%s\"" "Date: $currentDateTime" >> "$tempCurlCommand" + printf " --header \"%s\"" "$authorizationHeader" >> "$tempCurlCommand" + if [ ! -z "$contentType" ]; then + printf " --header \"Content-Type: %s\"" "$contentType" >> "$tempCurlCommand" + fi + if [ ! -z "$contentMD5" ]; then + printf " --header \"Content-MD5: %s\"" "$contentMD5" >> "$tempCurlCommand" + fi + printf " \"%s\"\n" "$fullUrl" >> "$tempCurlCommand" + + unsetErrorTraps + exec env bash "$tempCurlCommand" +} + +function initialise +{ + setErrorTraps + checkEnvironment +} + +function main +{ + initialise + parseOptions "$@" + readonly currentDateTime="$(LC_TIME=C date "+%a, %d %h %Y %T %z")" + prepareToRunCurl + readonly authorizationHeader="$(computeAwsAuthorizationHeader)" + runCurl "$verbToPass" +}