diff --git a/.whitesource b/.whitesource index 8e62c4c9f..83face232 100644 --- a/.whitesource +++ b/.whitesource @@ -1,23 +1,17 @@ { "scanSettings": { "configMode": "LOCAL", - "configExternalURL": "", "projectToken": "", - "baseBranches": [] + "baseBranches": ["main"] }, "checkRunSettings": { - "vulnerableCheckRunConclusionLevel": "failure", + "vulnerableCheckRunConclusionLevel": "success", + "licenseCheckRunConclusionLevel": "success", "displayMode": "diff", "useMendCheckNames": true, - "strictMode" : "failure" + "strictMode" : "warning" }, "issueSettings": { - "minSeverityLevel": "LOW", - "issueType": "DEPENDENCY" - }, - "remediateSettings": { - "workflowRules": { - "enabled": true - } + "minSeverityLevel": "NONE" } } diff --git a/DocumentsFromSnapshotMigration/build.gradle b/DocumentsFromSnapshotMigration/build.gradle index 4f928b962..d12cbc390 100644 --- a/DocumentsFromSnapshotMigration/build.gradle +++ b/DocumentsFromSnapshotMigration/build.gradle @@ -153,13 +153,23 @@ task slowTest(type: Test) { jacoco { enabled = true } + testLogging { + events "passed", "skipped", "failed" + exceptionFormat "full" + showExceptions true + showCauses true + showStackTraces true + // showStandardStreams true + } + reports { + html.required = true + html.destination file("${buildDir}/reports/tests/slowTest") + } } jacocoTestReport { dependsOn slowTest reports { - xml.required = true - xml.destination file("${buildDir}/reports/jacoco/test/jacocoTestReport.xml") html.required = true html.destination file("${buildDir}/reports/jacoco/test/html") } diff --git a/DocumentsFromSnapshotMigration/src/main/java/com/rfs/RfsMigrateDocuments.java b/DocumentsFromSnapshotMigration/src/main/java/com/rfs/RfsMigrateDocuments.java index e4a45b859..001c5cd79 100644 --- a/DocumentsFromSnapshotMigration/src/main/java/com/rfs/RfsMigrateDocuments.java +++ b/DocumentsFromSnapshotMigration/src/main/java/com/rfs/RfsMigrateDocuments.java @@ -2,6 +2,7 @@ import com.beust.jcommander.JCommander; import com.beust.jcommander.Parameter; +import com.beust.jcommander.ParameterException; import java.io.IOException; import java.net.URI; @@ -22,6 +23,8 @@ import com.rfs.common.DefaultSourceRepoAccessor; import com.rfs.common.DocumentReindexer; +import com.rfs.common.FileSystemRepo; +import com.rfs.common.IndexMetadata; import com.rfs.common.LuceneDocumentsReader; import com.rfs.common.OpenSearchClient; import com.rfs.common.S3Uri; @@ -49,25 +52,38 @@ public static class Args { description = "The name of the snapshot to migrate") public String snapshotName; + @Parameter(names = {"--snapshot-local-dir"}, + required = false, + description = ("The absolute path to the directory on local disk where the snapshot exists. Use this parameter" + + " if have a copy of the snapshot disk. Mutually exclusive with --s3-local-dir, --s3-repo-uri, and --s3-region." + )) + public String snapshotLocalDir = null; + @Parameter(names = {"--s3-local-dir"}, - required = true, - description = "The absolute path to the directory on local disk to download S3 files to") - public String s3LocalDirPath; + required = false, + description = ("The absolute path to the directory on local disk to download S3 files to. If you supply this, you must" + + " also supply --s3-repo-uri and --s3-region. Mutually exclusive with --snapshot-local-dir." + )) + public String s3LocalDir = null; @Parameter(names = {"--s3-repo-uri"}, - required = true, - description = "The S3 URI of the snapshot repo, like: s3://my-bucket/dir1/dir2") - public String s3RepoUri; + required = false, + description = ("The S3 URI of the snapshot repo, like: s3://my-bucket/dir1/dir2. If you supply this, you must" + + " also supply --s3-local-dir and --s3-region. Mutually exclusive with --snapshot-local-dir." + )) + public String s3RepoUri = null; @Parameter(names = {"--s3-region"}, - required = true, - description = "The AWS Region the S3 bucket is in, like: us-east-2") - public String s3Region; + required = false, + description = ("The AWS Region the S3 bucket is in, like: us-east-2. If you supply this, you must" + + " also supply --s3-local-dir and --s3-repo-uri. Mutually exclusive with --snapshot-local-dir." + )) + public String s3Region = null; @Parameter(names = {"--lucene-dir"}, required = true, description = "The absolute path to the directory where we'll put the Lucene docs") - public String luceneDirPath; + public String luceneDir; @Parameter(names = {"--target-host"}, required = true, @@ -97,17 +113,40 @@ public NoWorkLeftException(String message) { } } + public static void validateArgs(Args args) { + boolean isSnapshotLocalDirProvided = args.snapshotLocalDir != null; + boolean areAllS3ArgsProvided = args.s3LocalDir != null && args.s3RepoUri != null && args.s3Region != null; + boolean areAnyS3ArgsProvided = args.s3LocalDir != null || args.s3RepoUri != null || args.s3Region != null; + + if (isSnapshotLocalDirProvided && areAnyS3ArgsProvided) { + throw new ParameterException("You must provide either --snapshot-local-dir or --s3-local-dir, --s3-repo-uri, and --s3-region, but not both."); + } + + if (areAnyS3ArgsProvided && !areAllS3ArgsProvided) { + throw new ParameterException("If provide the S3 Snapshot args, you must provide all of them (--s3-local-dir, --s3-repo-uri and --s3-region)."); + } + + if (!isSnapshotLocalDirProvided && !areAllS3ArgsProvided) { + throw new ParameterException("You must provide either --snapshot-local-dir or --s3-local-dir, --s3-repo-uri, and --s3-region."); + } + + } + public static void main(String[] args) throws Exception { - // Grab out args Args arguments = new Args(); JCommander.newBuilder() .addObject(arguments) .build() .parse(args); - var luceneDirPath = Paths.get(arguments.luceneDirPath); + validateArgs(arguments); + + var luceneDirPath = Paths.get(arguments.luceneDir); + var snapshotLocalDirPath = arguments.snapshotLocalDir != null ? Paths.get(arguments.snapshotLocalDir) : null; + + try (var processManager = new LeaseExpireTrigger(workItemId->{ - log.error("terminating RunRfsWorker because its lease has expired for " + workItemId); + log.error("Terminating RunRfsWorker because its lease has expired for " + workItemId); System.exit(PROCESS_TIMED_OUT); }, Clock.systemUTC())) { var workCoordinator = new OpenSearchWorkCoordinator(new ApacheHttpClient(new URI(arguments.targetHost)), @@ -120,8 +159,16 @@ public static void main(String[] args) throws Exception { new OpenSearchClient(arguments.targetHost, arguments.targetUser, arguments.targetPass, false); DocumentReindexer reindexer = new DocumentReindexer(targetClient); - SourceRepo sourceRepo = S3Repo.create(Paths.get(arguments.s3LocalDirPath), - new S3Uri(arguments.s3RepoUri), arguments.s3Region); + SourceRepo sourceRepo; + if (snapshotLocalDirPath == null) { + sourceRepo = S3Repo.create( + Paths.get(arguments.s3LocalDir), + new S3Uri(arguments.s3RepoUri), + arguments.s3Region + ); + } else { + sourceRepo = new FileSystemRepo(snapshotLocalDirPath); + } SnapshotRepo.Provider repoDataProvider = new SnapshotRepoProvider_ES_7_10(sourceRepo); IndexMetadata.Factory indexMetadataFactory = new IndexMetadataFactory_ES_7_10(repoDataProvider); diff --git a/DocumentsFromSnapshotMigration/src/test/java/com/rfs/FullTest.java b/DocumentsFromSnapshotMigration/src/test/java/com/rfs/FullTest.java index 55d4db17e..87a5715e3 100644 --- a/DocumentsFromSnapshotMigration/src/test/java/com/rfs/FullTest.java +++ b/DocumentsFromSnapshotMigration/src/test/java/com/rfs/FullTest.java @@ -47,7 +47,10 @@ import org.slf4j.event.Level; import reactor.core.publisher.Flux; +import java.io.BufferedReader; +import java.io.File; import java.io.IOException; +import java.io.InputStreamReader; import java.net.URI; import java.nio.file.Files; import java.nio.file.Path; @@ -65,6 +68,7 @@ import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.UnaryOperator; @@ -79,18 +83,22 @@ public class FullTest { final static long TOLERABLE_CLIENT_SERVER_CLOCK_DIFFERENCE_SECONDS = 3600; final static Pattern CAT_INDICES_INDEX_COUNT_PATTERN = Pattern.compile("(?:\\S+\\s+){2}(\\S+)\\s+(?:\\S+\\s+){3}(\\S+)"); + final static List SOURCE_IMAGES = List.of( + SearchClusterContainer.ES_V7_10_2, + SearchClusterContainer.ES_V7_17 + ); + final static List TARGET_IMAGES = List.of( + SearchClusterContainer.OS_V1_3_16, + SearchClusterContainer.OS_V2_14_0 + ); public static final String SOURCE_SERVER_ALIAS = "source"; public static final int MAX_SHARD_SIZE_BYTES = 64 * 1024 * 1024; - public static Stream makeArgs() { - var sourceImageNames = List.of( - makeParamsForBase(SearchClusterContainer.ES_V7_17), - makeParamsForBase(SearchClusterContainer.ES_V7_10_2)); - var targetImageNames = List.of( - SearchClusterContainer.OS_V1_3_16.getImageName(), - SearchClusterContainer.OS_V2_14_0.getImageName()); + public static Stream makeDocumentMigrationArgs() { + List sourceImageArgs = SOURCE_IMAGES.stream().map(name -> makeParamsForBase(name)).collect(Collectors.toList()); + var targetImageNames = TARGET_IMAGES.stream().map(SearchClusterContainer.Version::getImageName).collect(Collectors.toList()); var numWorkers = List.of(1, 3, 40); - return sourceImageNames.stream() + return sourceImageArgs.stream() .flatMap(a-> targetImageNames.stream().flatMap(b-> numWorkers.stream().map(c->Arguments.of(a[0], a[1], a[2], b, c)))); @@ -105,8 +113,8 @@ private static Object[] makeParamsForBase(SearchClusterContainer.Version baseSou } @ParameterizedTest - @MethodSource("makeArgs") - public void test(SearchClusterContainer.Version baseSourceImageVersion, + @MethodSource("makeDocumentMigrationArgs") + public void testDocumentMigration(SearchClusterContainer.Version baseSourceImageVersion, String generatorImage, String[] generatorArgs, String targetImageName, int numWorkers) throws Exception @@ -308,6 +316,112 @@ private DocumentsRunner.CompletionStatus migrateDocumentsWithOneWorker(SourceRep } } + public static Stream makeProcessExitArgs() { + return Stream.of( + Arguments.of(true, 0), + Arguments.of(false, 1) + ); + } + + @ParameterizedTest + @MethodSource("makeProcessExitArgs") + public void testProcessExitsAsExpected(boolean targetAvailable, int expectedExitCode) throws Exception { + var sourceImageArgs = makeParamsForBase(SearchClusterContainer.ES_V7_10_2); + var baseSourceImageVersion = (SearchClusterContainer.Version) sourceImageArgs[0]; + var generatorImage = (String) sourceImageArgs[1]; + var generatorArgs = (String[]) sourceImageArgs[2]; + var targetImageName = SearchClusterContainer.OS_V2_14_0.getImageName(); + + try (var esSourceContainer = new PreloadedSearchClusterContainer(baseSourceImageVersion, + SOURCE_SERVER_ALIAS, generatorImage, generatorArgs); + OpensearchContainer osTargetContainer = + new OpensearchContainer<>(targetImageName)) { + esSourceContainer.start(); + osTargetContainer.start(); + + final var SNAPSHOT_NAME = "test_snapshot"; + final List INDEX_ALLOWLIST = List.of(); + CreateSnapshot.run( + c -> new FileSystemSnapshotCreator(SNAPSHOT_NAME, c, SearchClusterContainer.CLUSTER_SNAPSHOT_DIR), + new OpenSearchClient(esSourceContainer.getUrl(), null), + false); + var tempDirSnapshot = Files.createTempDirectory("opensearchMigrationReindexFromSnapshot_test_snapshot"); + var tempDirLucene = Files.createTempDirectory("opensearchMigrationReindexFromSnapshot_test_lucene"); + + String targetAddress = osTargetContainer.getHttpHostAddress(); + + String[] args = { + "--snapshot-name", SNAPSHOT_NAME, + "--snapshot-local-dir", tempDirSnapshot.toString(), + "--lucene-dir", tempDirLucene.toString(), + "--target-host", targetAddress + }; + + try { + esSourceContainer.copySnapshotData(tempDirSnapshot.toString()); + + var targetClient = new OpenSearchClient(targetAddress, null); + var sourceRepo = new FileSystemRepo(tempDirSnapshot); + migrateMetadata(sourceRepo, targetClient, SNAPSHOT_NAME, INDEX_ALLOWLIST); + + // Stop the target container if we don't want it to be available. We've already cached the address it was + // using, so we can have reasonable confidence that nothing else will be using it and bork our test. + if (!targetAvailable) { + osTargetContainer.stop(); + } + + String classpath = System.getProperty("java.class.path"); + String javaHome = System.getProperty("java.home"); + String javaExecutable = javaHome + File.separator + "bin" + File.separator + "java"; + + // Kick off the doc migration process + log.atInfo().setMessage("Running RfsMigrateDocuments with args: " + Arrays.toString(args)).log(); + ProcessBuilder processBuilder = new ProcessBuilder( + javaExecutable, "-cp", classpath, "com.rfs.RfsMigrateDocuments" + ); + processBuilder.command().addAll(Arrays.asList(args)); + processBuilder.redirectErrorStream(true); + + Process process = processBuilder.start(); + log.atInfo().setMessage("Process started with ID: " + Long.toString(process.toHandle().pid())).log(); + + // Kill the process and fail if we have to wait too long + int timeoutSeconds = 90; + boolean finished = process.waitFor(timeoutSeconds, TimeUnit.SECONDS); + if (!finished) { + // Print the process output + StringBuilder output = new StringBuilder(); + try (BufferedReader reader = new BufferedReader(new InputStreamReader(process.getInputStream()))) { + String line; + while ((line = reader.readLine()) != null) { + output.append(line).append(System.lineSeparator()); + } + } + log.atError().setMessage("Process Output:").log(); + log.atError().setMessage(output.toString()).log(); + + log.atError().setMessage("Process timed out, attempting to kill it...").log(); + process.destroy(); // Try to be nice about things first... + if (!process.waitFor(10, TimeUnit.SECONDS)) { + log.atError().setMessage("Process still running, attempting to force kill it...").log(); + process.destroyForcibly(); // ..then avada kedavra + } + Assertions.fail("The process did not finish within the timeout period (" + timeoutSeconds + " seconds)."); + } + + int actualExitCode = process.exitValue(); + log.atInfo().setMessage("Process exited with code: " + actualExitCode).log(); + + // Check if the exit code is as expected + Assertions.assertEquals(expectedExitCode, actualExitCode, "The program did not exit with the expected status code."); + + } finally { + deleteTree(tempDirSnapshot); + deleteTree(tempDirLucene); + } + } + } + private static void deleteTree(Path path) throws IOException { try (var walk = Files.walk(path)) { walk.sorted(Comparator.reverseOrder()).forEach(p -> { diff --git a/RFS/build.gradle b/RFS/build.gradle index b1ae1de4e..91b94f72c 100644 --- a/RFS/build.gradle +++ b/RFS/build.gradle @@ -115,8 +115,8 @@ test { } testLogging { - exceptionFormat = 'full' - events "failed" + events "passed", "skipped", "failed" + exceptionFormat "full" showExceptions true showCauses true showStackTraces true diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/console_api/console_api/settings.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/console_api/console_api/settings.py index bd134f205..f04907feb 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/console_api/console_api/settings.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/console_api/console_api/settings.py @@ -12,6 +12,7 @@ import os from pathlib import Path +from typing import List # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent @@ -28,8 +29,17 @@ # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True -DEPLOYED_STAGE = os.environ.get('MIGRATION_STAGE') -ALLOWED_HOSTS = ['migration-console', f'migration-console.migration.{DEPLOYED_STAGE}.local', 'localhost'] + +def get_allowed_hosts() -> List[str]: + hosts = os.getenv('API_ALLOWED_HOSTS', '') + if not hosts: + return ['localhost'] + # Remove any quotes and strip extra spacing characters + hosts = hosts.replace('"', '').replace('\'', '') + return [host.strip() for host in hosts.split(',')] + + +ALLOWED_HOSTS = get_allowed_hosts() # Application definition diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/docker-compose-console-only.yml b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/docker-compose-console-only.yml index 5392b0fda..073c026b3 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/docker-compose-console-only.yml +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/docker-compose-console-only.yml @@ -21,6 +21,7 @@ services: - AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} - AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN} - AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} + - API_ALLOWED_HOSTS=localhost ports: - "8000:8000" command: python3 /root/console_api/manage.py runserver_plus 0.0.0.0:8000 diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/backfill_rfs.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/backfill_rfs.py index 41b4cddf8..02a813c20 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/backfill_rfs.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/backfill_rfs.py @@ -1,6 +1,8 @@ from datetime import datetime from typing import Dict, Optional +import requests + from console_link.models.backfill_base import Backfill, BackfillStatus from console_link.models.cluster import Cluster from console_link.models.schema_tools import contains_one_of @@ -76,6 +78,12 @@ def __init__(self, config: Dict, target_cluster: Cluster) -> None: self.target_cluster = target_cluster self.docker_config = self.config["reindex_from_snapshot"]["docker"] + def get_status(self, *args, **kwargs) -> CommandResult: + return CommandResult(True, (BackfillStatus.RUNNING, "This is my running state message")) + + def scale(self, units: int, *args, **kwargs) -> CommandResult: + raise NotImplementedError() + class ECSRFSBackfill(RFSBackfill): def __init__(self, config: Dict, target_cluster: Cluster) -> None: @@ -122,6 +130,13 @@ def get_status(self, deep_check, *args, **kwargs) -> CommandResult: return CommandResult(True, (BackfillStatus.STOPPED, status_string)) def _get_detailed_status(self) -> Optional[str]: + # Check whether the working state index exists. If not, we can't run queries. + try: + self.target_cluster.call_api("/.migrations_working_state") + except requests.exceptions.RequestException: + logger.warning("Working state index does not yet exist, deep status checks can't be performed.") + return None + current_epoch_seconds = int(datetime.now().timestamp()) incomplete_query = {"query": { "bool": {"must_not": [{"exists": {"field": "completedAt"}}]} diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_backfill.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_backfill.py index c18c92136..623e84299 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_backfill.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_backfill.py @@ -247,16 +247,17 @@ def test_ecs_rfs_calculates_backfill_status_from_ecs_instance_statuses_running(e def test_ecs_rfs_get_status_deep_check(ecs_rfs_backfill, mocker): target = create_valid_cluster() - mocked_stopped_status = InstanceStatuses( + mocked_instance_status = InstanceStatuses( desired=1, running=1, pending=0 ) - mock = mocker.patch.object(ECSService, 'get_instance_statuses', autospec=True, return_value=mocked_stopped_status) + mock = mocker.patch.object(ECSService, 'get_instance_statuses', autospec=True, return_value=mocked_instance_status) with open(TEST_DATA_DIRECTORY / "migrations_working_state_search.json") as f: data = json.load(f) total_shards = data['hits']['total']['value'] with requests_mock.Mocker() as rm: + rm.get(f"{target.endpoint}/.migrations_working_state", status_code=200) rm.get(f"{target.endpoint}/.migrations_working_state/_search", status_code=200, json=data) @@ -265,5 +266,5 @@ def test_ecs_rfs_get_status_deep_check(ecs_rfs_backfill, mocker): mock.assert_called_once_with(ecs_rfs_backfill.ecs_client) assert value.success assert BackfillStatus.RUNNING == value.value[0] - assert str(mocked_stopped_status) in value.value[1] + assert str(mocked_instance_status) in value.value[1] assert str(total_shards) in value.value[1] diff --git a/deployment/cdk/opensearch-service-migration/lib/service-stacks/migration-console-stack.ts b/deployment/cdk/opensearch-service-migration/lib/service-stacks/migration-console-stack.ts index 252eb31ee..36942fb6c 100644 --- a/deployment/cdk/opensearch-service-migration/lib/service-stacks/migration-console-stack.ts +++ b/deployment/cdk/opensearch-service-migration/lib/service-stacks/migration-console-stack.ts @@ -25,6 +25,7 @@ export interface MigrationConsoleProps extends StackPropsExt { readonly fargateCpuArch: CpuArchitecture, readonly migrationConsoleEnableOSI: boolean, readonly migrationAPIEnabled?: boolean, + readonly migrationAPIAllowedHosts?: string, readonly servicesYaml: ServicesYaml, } @@ -327,6 +328,9 @@ export class MigrationConsoleStack extends MigrationServiceCore { imageCommand = ['/bin/sh', '-c', '/root/loadServicesFromParameterStore.sh && python3 /root/console_api/manage.py runserver_plus 0.0.0.0:8000' ] + + const defaultAllowedHosts = `migration-console.migration.${props.stage}.local,localhost` + environment["API_ALLOWED_HOSTS"] = props.migrationAPIAllowedHosts ? `${defaultAllowedHosts},${props.migrationAPIAllowedHosts}` : defaultAllowedHosts } if (props.migrationConsoleEnableOSI) { diff --git a/deployment/cdk/opensearch-service-migration/lib/stack-composer.ts b/deployment/cdk/opensearch-service-migration/lib/stack-composer.ts index f470692a6..6113f65cf 100644 --- a/deployment/cdk/opensearch-service-migration/lib/stack-composer.ts +++ b/deployment/cdk/opensearch-service-migration/lib/stack-composer.ts @@ -195,6 +195,7 @@ export class StackComposer { const migrationConsoleServiceEnabled = this.getContextForType('migrationConsoleServiceEnabled', 'boolean', defaultValues, contextJSON) const migrationConsoleEnableOSI = this.getContextForType('migrationConsoleEnableOSI', 'boolean', defaultValues, contextJSON) const migrationAPIEnabled = this.getContextForType('migrationAPIEnabled', 'boolean', defaultValues, contextJSON) + const migrationAPIAllowedHosts = this.getContextForType('migrationAPIAllowedHosts', 'string', defaultValues, contextJSON) const trafficReplayerServiceEnabled = this.getContextForType('trafficReplayerServiceEnabled', 'boolean', defaultValues, contextJSON) const trafficReplayerEnableClusterFGACAuth = this.getContextForType('trafficReplayerEnableClusterFGACAuth', 'boolean', defaultValues, contextJSON) const trafficReplayerMaxUptime = this.getContextForType('trafficReplayerMaxUptime', 'string', defaultValues, contextJSON); @@ -557,6 +558,7 @@ export class StackComposer { migrationConsoleEnableOSI: migrationConsoleEnableOSI, migrationAPIEnabled: migrationAPIEnabled, servicesYaml: servicesYaml, + migrationAPIAllowedHosts: migrationAPIAllowedHosts, stackName: `OSMigrations-${stage}-${region}-MigrationConsole`, description: "This stack contains resources for the Migration Console ECS service", stage: stage, diff --git a/deployment/cdk/opensearch-service-migration/options.md b/deployment/cdk/opensearch-service-migration/options.md index 2d32aa458..5b62d301e 100644 --- a/deployment/cdk/opensearch-service-migration/options.md +++ b/deployment/cdk/opensearch-service-migration/options.md @@ -14,6 +14,7 @@ These tables list all CDK context configuration values a user can specify for th | captureProxyESExtraArgs | string | `"--suppressCaptureForHeaderMatch user-agent .*elastic-java/7.17.0.*"` | Extra arguments to provide to the Capture Proxy command. This includes available arguments specified by the [Capture Proxy](../../../TrafficCapture/trafficCaptureProxyServer/src/main/java/org/opensearch/migrations/trafficcapture/proxyserver/CaptureProxy.java). | | migrationConsoleServiceEnabled | boolean | true | Enable deploying the given service, via a new CloudFormation stack | | migrationAPIEnabled | boolean | true | **Experimental** flag for enabling an API on the Migration Console for controlling migration actions | +| migrationAPIAllowedHosts | string | "test-endpoint1, localhost" | Comma delimited string of host or domain names that the API will serve. Other domains will receive a 400: bad request error | | migrationConsoleEnableOSI | boolean | true | **Experimental**: When enabled, will setup necessary IAM roles and a CloudWatch log group for controlling an OpenSearch Ingestion pipeline from the Migration Console | | trafficReplayerServiceEnabled | boolean | true | Enable deploying the given service, via a new CloudFormation stack | | captureProxyServiceEnabled | boolean | true | Enable deploying the given service, via a new CloudFormation stack | @@ -27,10 +28,10 @@ These tables list all CDK context configuration values a user can specify for th | trafficReplayerUserAgentSuffix | string | "AWS/test/v1.0.0" | A custom user agent that will be provided to the Replayer using the `--user-agent` flag. This will append the provided user agent to any existing user agents when requests are made to the target cluster. This setting could also be specified with the `trafficReplayerExtraArgs` option | | trafficReplayerExtraArgs | string | "--sigv4-auth-header-service-region es,us-east-1 --speedup-factor 5" | Extra arguments to provide to the Replayer command. This includes auth header options and other parameters supported by the [Traffic Replayer](../../../TrafficCapture/trafficReplayer/src/main/java/org/opensearch/migrations/replay/TrafficReplayer.java). | | trafficReplayerMaxUptime | string | "P1D" | The maximum uptime for the Traffic Replayer service, specified in ISO 8601 duration format. This controls how long the Traffic Replayer will run before automatically shutting down. Example values: "PT1H" (hourly), "P1D" (daily). When this duration is reached, ECS will initiate the startup of a new Traffic Replayer task to ensure continuous operation. This mechanism ensures that the Traffic Replayer service can manage its resources effectively and prevent issues associated with long running processes. Set to the greater of the given value 5 minutes. When not specified, the replayer will run continuously. | -| sourceClusterEndpoint | string | `"https://my-source-cluster.com:443"` | The URI of the source cluster from that the migration will reference. **Note**: if this is not provided and elasticsearchService or captureProxyESService is enabled, the migration will reference a uri for that service. | -| albEnabled | boolean | false | Enable deploying an ALB in front of all services that expose an ingress API. Enabling seamless client cutover and rollbacks for the migration. | -| albAcmCertArn | string | `"arn:aws:acm:us-east-1:12345678912:certificate/abc123de-4888-4fa7-a508-3811e2d49fc3"` | The ACM certificate ARN to use for the ALB. If not specified, a custom resource will be deployed to create one. If creation must happen locally, a script has been provded to create and upload a cert and can be invoked with `npm run create-acm-cert` and will return the uploaded cert arn. | -| targetClusterProxyServiceEnabled | boolean | false | Enable a non-capturing proxy to use a load balancer against a managed OpenSearch cluster. This is needed to enable alb cuttover to a managed opensearch cluster as there is no way to directly route an ALB listener to an AWS Managed OpenSearch Service. | +| sourceClusterEndpoint | string | `"https://my-source-cluster.com:443"` | The URI of the source cluster from that the migration will reference. **Note**: if this is not provided and elasticsearchService or captureProxyESService is enabled, the migration will reference a uri for that service. | +| albEnabled | boolean | false | Enable deploying an ALB in front of all services that expose an ingress API. Enabling seamless client cutover and rollbacks for the migration. | +| albAcmCertArn | string | `"arn:aws:acm:us-east-1:12345678912:certificate/abc123de-4888-4fa7-a508-3811e2d49fc3"` | The ACM certificate ARN to use for the ALB. If not specified, a custom resource will be deployed to create one. If creation must happen locally, a script has been provded to create and upload a cert and can be invoked with `npm run create-acm-cert` and will return the uploaded cert arn. | +| targetClusterProxyServiceEnabled | boolean | false | Enable a non-capturing proxy to use a load balancer against a managed OpenSearch cluster. This is needed to enable alb cuttover to a managed opensearch cluster as there is no way to directly route an ALB listener to an AWS Managed OpenSearch Service. | ### Fetch Migration Service Options | Name | Type | Example | Description | diff --git a/test/Pipfile b/test/Pipfile index 561e71265..88df1e49e 100644 --- a/test/Pipfile +++ b/test/Pipfile @@ -4,7 +4,7 @@ verify_ssl = true name = "pypi" [packages] -certifi = "==2023.7.22" +certifi = "==2024.7.4" charset-normalizer = "==3.1.0" idna = "==3.7" iniconfig = "==2.0.0" diff --git a/test/Pipfile.lock b/test/Pipfile.lock index f92baa114..65b5899e8 100644 --- a/test/Pipfile.lock +++ b/test/Pipfile.lock @@ -1,7 +1,7 @@ { "_meta": { "hash": { - "sha256": "c3eb7803ca177922eb612398ccf4dc236f1e0f80030ebc45c714b929b157af4e" + "sha256": "e1a3d55892ace5dfd9e6deb4863fa576f432f1a3de368c3ba6a3718dbd92f2e0" }, "pipfile-spec": 6, "requires": { @@ -35,12 +35,12 @@ }, "certifi": { "hashes": [ - "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082", - "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9" + "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b", + "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90" ], "index": "pypi", "markers": "python_version >= '3.6'", - "version": "==2023.7.22" + "version": "==2024.7.4" }, "charset-normalizer": { "hashes": [ diff --git a/whitesource.config b/whitesource.config index f9c1c6426..ac1bf2fd1 100644 --- a/whitesource.config +++ b/whitesource.config @@ -1,3 +1,32 @@ -python.invokePipAsModule=true -python.path=python3.9 -python.installVirtualenv=true +bower.runPreStep=false +cocoapods.runPreStep=false +excludes=**/* +go.collectDependenciesAtRuntime=false +gradle.resolveDependencies=false +haskell.runPreStep=false +hex.runPreStep=false +includes=nothing +maven.resolveDependencies=false +npm.includeDevDependencies=false +npm.resolveDependencies=false +npm.runPreStep=false +nuget.runPreStep=false +paket.runPreStep=false +php.runPreStep=false +python.ignorePipInstallErrors=true +python.includePipenvDevDependencies=false +python.installVirtualenv=false +python.invokePipAsModule=false +python.path=python3.11 +python.pipenvDevDependencies=false +python.resolveDependencies=false +python.resolveHierarchyTree=false +python.resolvePipEditablePackages=false +python.resolveSetupFiles=false +python.runPipenvPreStep=false +python.runPoetryPreStep=false +r.runPreStep=false +resolveAllDependencies=false +ruby.installMissingGems=false +ruby.runBundleInstall=false +sbt.runPreStep=false