Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Align Capture Proxy naming and Improve Migration Console UX #284

Merged
merged 4 commits into from
Aug 23, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ services:


# Run combined instance of Capture Proxy and Elasticsearch
capture_proxy_es:
capture-proxy-es:
image: 'migrations/capture_proxy:latest'
networks:
- migrations
Expand All @@ -18,7 +18,7 @@ services:
- kafka

# Run separate instances of Capture Proxy and Elasticsearch
# captureproxy:
# capture-proxy:
# image: 'migrations/capture_proxy:latest'
# networks:
# - migrations
Expand Down Expand Up @@ -102,7 +102,7 @@ services:
- sharedComparatorSqlResults:/shared
command: /bin/sh -c "cd trafficComparator && pip3 install --editable . && nc -v -l -p 9220 | tee /dev/stderr | trafficcomparator -vv stream | trafficcomparator dump-to-sqlite --db /shared/comparisons.db"

jupyter_notebook:
jupyter-notebook:
image: 'migrations/jupyter_notebook:latest'
networks:
- migrations
Expand All @@ -116,7 +116,7 @@ services:
- COMPARISONS_DB_LOCATION=/shared/comparisons.db
command: /bin/sh -c 'cd trafficComparator && pip3 install --editable ".[data]" && jupyter notebook --ip=0.0.0.0 --port=8888 --no-browser --allow-root'

migration_console:
migration-console:
image: 'migrations/migration_console:latest'
networks:
- migrations
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,10 @@ RUN apt-get update && \

COPY runTestBenchmarks.sh /root/
COPY humanReadableLogs.py /root/
COPY catIndices.sh /root/
RUN chmod ug+x /root/runTestBenchmarks.sh
RUN chmod ug+x /root/humanReadableLogs.py
RUN chmod ug+x /root/catIndices.sh
WORKDIR /root

CMD tail -f /dev/null
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
#!/bin/bash

# Default values
source_endpoint="https://capture-proxy-es:9200"
source_auth_user_and_pass="admin:admin"
source_no_auth=false
target_no_auth=false

# Check for the presence of COPILOT_SERVICE_NAME environment variable
if [ -n "$COPILOT_SERVICE_NAME" ]; then
target_endpoint="https://${MIGRATION_DOMAIN_ENDPOINT}:443"
target_auth_user_and_pass="admin:Admin123!"
else
target_endpoint="https://opensearchtarget:9200"
target_auth_user_and_pass="admin:admin"
fi

# Override default values with optional command-line arguments
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
--target_endpoint)
target_endpoint="$2"
shift
shift
;;
--target_auth_user_and_pass)
target_auth_user_and_pass="$2"
shift
shift
;;
--target_no_auth)
target_no_auth=true
shift
;;
--source_endpoint)
source_endpoint="$2"
shift
shift
;;
--source_auth_user_and_pass)
source_auth_user_and_pass="$2"
shift
shift
;;
--source_no_auth)
source_no_auth=true
shift
;;
*)
shift
;;
esac
done

source_auth_string="-u $source_auth_user_and_pass"
target_auth_string="-u $target_auth_user_and_pass"

if [ "$source_no_auth" = true ]; then
source_auth_string=""
fi
if [ "$target_no_auth" = true ]; then
target_auth_string=""
fi

echo "SOURCE CLUSTER"
echo "curl $source_endpoint/_cat/indices?v"
curl $source_endpoint/_cat/indices?v --insecure $source_auth_string
echo ""
echo "TARGET CLUSTER"
echo "curl $target_endpoint/_cat/indices?v"
curl $target_endpoint/_cat/indices?v --insecure $target_auth_string
echo ""
Original file line number Diff line number Diff line change
@@ -1,25 +1,17 @@
#!/bin/bash

# Default values
default_docker_endpoint="https://capture_proxy_es:9200"
default_copilot_endpoint="https://capture-proxy-es:443"
endpoint="https://capture-proxy-es:9200"
auth_user="admin"
auth_pass="admin"
no_auth=false

# Check for the presence of COPILOT_SERVICE_NAME environment variable
if [ -n "$COPILOT_SERVICE_NAME" ]; then
ENDPOINT="$default_copilot_endpoint"
else
ENDPOINT="$default_docker_endpoint"
fi

# Override default values with optional command-line arguments
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
--endpoint)
ENDPOINT="$2"
endpoint="$2"
shift
shift
;;
Expand Down Expand Up @@ -54,12 +46,12 @@ fi
base_options_string="use_ssl:true,verify_certs:false"
client_options="${base_options_string}${auth_string}"

echo "Running opensearch-benchmark workloads against ${ENDPOINT}"
echo "Running opensearch-benchmark workloads against ${endpoint}"
echo "Running opensearch-benchmark w/ 'geonames' workload..." &&
opensearch-benchmark execute-test --distribution-version=1.0.0 --target-host=$ENDPOINT --workload=geonames --pipeline=benchmark-only --test-mode --kill-running-processes --workload-params "target_throughput:0.5,bulk_size:10,bulk_indexing_clients:1,search_clients:1" --client-options=$client_options &&
opensearch-benchmark execute-test --distribution-version=1.0.0 --target-host=$endpoint --workload=geonames --pipeline=benchmark-only --test-mode --kill-running-processes --workload-params "target_throughput:0.5,bulk_size:10,bulk_indexing_clients:1,search_clients:1" --client-options=$client_options &&
echo "Running opensearch-benchmark w/ 'http_logs' workload..." &&
opensearch-benchmark execute-test --distribution-version=1.0.0 --target-host=$ENDPOINT --workload=http_logs --pipeline=benchmark-only --test-mode --kill-running-processes --workload-params "target_throughput:0.5,bulk_size:10,bulk_indexing_clients:1,search_clients:1" --client-options=$client_options &&
opensearch-benchmark execute-test --distribution-version=1.0.0 --target-host=$endpoint --workload=http_logs --pipeline=benchmark-only --test-mode --kill-running-processes --workload-params "target_throughput:0.5,bulk_size:10,bulk_indexing_clients:1,search_clients:1" --client-options=$client_options &&
echo "Running opensearch-benchmark w/ 'nested' workload..." &&
opensearch-benchmark execute-test --distribution-version=1.0.0 --target-host=$ENDPOINT --workload=nested --pipeline=benchmark-only --test-mode --kill-running-processes --workload-params "target_throughput:0.5,bulk_size:10,bulk_indexing_clients:1,search_clients:1" --client-options=$client_options &&
opensearch-benchmark execute-test --distribution-version=1.0.0 --target-host=$endpoint --workload=nested --pipeline=benchmark-only --test-mode --kill-running-processes --workload-params "target_throughput:0.5,bulk_size:10,bulk_indexing_clients:1,search_clients:1" --client-options=$client_options &&
echo "Running opensearch-benchmark w/ 'nyc_taxis' workload..." &&
opensearch-benchmark execute-test --distribution-version=1.0.0 --target-host=$ENDPOINT --workload=nyc_taxis --pipeline=benchmark-only --test-mode --kill-running-processes --workload-params "target_throughput:0.5,bulk_size:10,bulk_indexing_clients:1,search_clients:1" --client-options=$client_options
opensearch-benchmark execute-test --distribution-version=1.0.0 --target-host=$endpoint --workload=nyc_taxis --pipeline=benchmark-only --test-mode --kill-running-processes --workload-params "target_throughput:0.5,bulk_size:10,bulk_indexing_clients:1,search_clients:1" --client-options=$client_options
15 changes: 12 additions & 3 deletions deployment/copilot/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -140,14 +140,23 @@ Once the solution is deployed, the easiest way to test the solution is to exec i
// Exec into container
copilot svc exec -a migration-copilot -e dev -n migration-console -c "bash"

// Run benchmark workload (i.e. geonames, nyc_taxis, http_logs)
opensearch-benchmark execute-test --distribution-version=1.0.0 --target-host=https://capture-proxy-es:443 --workload=geonames --pipeline=benchmark-only --test-mode --kill-running-processes --workload-params "target_throughput:0.5,bulk_size:10,bulk_indexing_clients:1,search_clients:1" --client-options "use_ssl:true,verify_certs:false,basic_auth_user:admin,basic_auth_password:admin"
// Run opensearch-benchmark workload (i.e. geonames, nyc_taxis, http_logs)

// Option 1: Automated script
./runTestBenchmarks.sh

// Option 2: Manually execute command
opensearch-benchmark execute-test --distribution-version=1.0.0 --target-host=https://capture-proxy-es:9200 --workload=geonames --pipeline=benchmark-only --test-mode --kill-running-processes --workload-params "target_throughput:0.5,bulk_size:10,bulk_indexing_clients:1,search_clients:1" --client-options "use_ssl:true,verify_certs:false,basic_auth_user:admin,basic_auth_password:admin"
```

After the benchmark has been run, the indices and documents of the source and target clusters can be checked from the same migration-console container to confirm
```
// Option 1: Automated script
./catIndices.sh

// Option 2: Manually execute cluster requests
// Check source cluster
curl https://capture-proxy-es:443/_cat/indices?v --insecure -u admin:admin
curl https://capture-proxy-es:9200/_cat/indices?v --insecure -u admin:admin

// Check target cluster
curl https://$MIGRATION_DOMAIN_ENDPOINT:443/_cat/indices?v --insecure -u admin:Admin123!
Expand Down
9 changes: 7 additions & 2 deletions deployment/copilot/capture-proxy-es/manifest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,20 @@ image:
# Docker build arguments. For additional overrides: https://aws.github.io/copilot-cli/docs/manifest/lb-web-service/#image-build
build: ../TrafficCapture/dockerSolution/build/docker/trafficCaptureProxyServer/Dockerfile
# Port exposed through your container to route traffic to it.
port: 443
port: 9200

command: /bin/sh -c '/usr/local/bin/docker-entrypoint.sh eswrapper & /runJavaWithClasspath.sh org.opensearch.migrations.trafficcapture.proxyserver.Main --kafkaConnection ${MIGRATION_KAFKA_BROKER_ENDPOINTS} --enableMSKAuth --destinationUri https://localhost:9200 --insecureDestination --listenPort 443 --sslConfigFile /usr/share/elasticsearch/config/proxy_tls.yml & wait -n 1'
command: /bin/sh -c '/usr/local/bin/docker-entrypoint.sh eswrapper & /runJavaWithClasspath.sh org.opensearch.migrations.trafficcapture.proxyserver.Main --kafkaConnection ${MIGRATION_KAFKA_BROKER_ENDPOINTS} --enableMSKAuth --destinationUri https://localhost:19200 --insecureDestination --listenPort 9200 --sslConfigFile /usr/share/elasticsearch/config/proxy_tls.yml & wait -n 1'

cpu: 1024 # Number of CPU units for the task.
memory: 4096 # Amount of memory in MiB used by the task.
count: 1 # Number of tasks that should be running in your service.
exec: true # Enable getting a shell to your container (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-exec.html).

# Pass environment variables as key value pairs.
variables:
# Set Elasticsearch port to 19200 to allow capture proxy at port 9200
http.port: 19200

environments:
dev:
count: 1 # Number of tasks to run for the "dev" environment.
Expand Down
4 changes: 2 additions & 2 deletions deployment/copilot/capture-proxy/manifest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@ image:
# Docker build arguments. For additional overrides: https://aws.github.io/copilot-cli/docs/manifest/lb-web-service/#image-build
build: ../TrafficCapture/dockerSolution/build/docker/trafficCaptureProxyServer/Dockerfile
# Port exposed through your container to route traffic to it.
port: 443
port: 9200

command: /runJavaWithClasspath.sh org.opensearch.migrations.trafficcapture.proxyserver.Main --kafkaConnection ${MIGRATION_KAFKA_BROKER_ENDPOINTS} --enableMSKAuth --destinationUri https://elasticsearch:9200 --insecureDestination --listenPort 443 --sslConfigFile /usr/share/elasticsearch/config/proxy_tls.yml
command: /runJavaWithClasspath.sh org.opensearch.migrations.trafficcapture.proxyserver.Main --kafkaConnection ${MIGRATION_KAFKA_BROKER_ENDPOINTS} --enableMSKAuth --destinationUri https://elasticsearch:9200 --insecureDestination --listenPort 9200 --sslConfigFile /usr/share/elasticsearch/config/proxy_tls.yml

cpu: 512 # Number of CPU units for the task.
memory: 2048 # Amount of memory in MiB used by the task.
Expand Down
4 changes: 4 additions & 0 deletions deployment/copilot/migration-console/manifest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,10 @@ memory: 1024 # Amount of memory in MiB used by the task.
count: 1 # Number of tasks that should be running in your service.
exec: true # Enable getting a shell to your container (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-exec.html).

# Pass environment variables as key value pairs.
variables:
MIGRATION_DOMAIN_ENDPOINT: ${MIGRATION_DOMAIN_ENDPOINT}

environments:
dev:
count: 1 # Number of tasks to run for the "dev" environment.
Expand Down
Loading