diff --git a/CreateSnapshot/src/main/java/com/rfs/CreateSnapshot.java b/CreateSnapshot/src/main/java/com/rfs/CreateSnapshot.java index 69a478b17..6ddb7d2c9 100644 --- a/CreateSnapshot/src/main/java/com/rfs/CreateSnapshot.java +++ b/CreateSnapshot/src/main/java/com/rfs/CreateSnapshot.java @@ -3,11 +3,13 @@ import com.beust.jcommander.JCommander; import com.beust.jcommander.Parameter; import com.beust.jcommander.ParameterException; +import com.beust.jcommander.ParametersDelegate; import lombok.AllArgsConstructor; import lombok.Getter; import lombok.extern.slf4j.Slf4j; +import com.rfs.common.ConnectionDetails; import com.rfs.common.FileSystemSnapshotCreator; import com.rfs.common.OpenSearchClient; import com.rfs.common.S3SnapshotCreator; @@ -41,22 +43,8 @@ public static class Args { ) public String s3Region; - @Parameter(names = {"--source-host"}, - required = true, - description = "The source host and port (e.g. http://localhost:9200)") - public String sourceHost; - - @Parameter(names = {"--source-username"}, - description = "Optional. The source username; if not provided, will assume no auth on source") - public String sourceUser = null; - - @Parameter(names = {"--source-password"}, - description = "Optional. The source password; if not provided, will assume no auth on source") - public String sourcePass = null; - - @Parameter(names = {"--source-insecure"}, - description = "Allow untrusted SSL certificates for source") - public boolean sourceInsecure = false; + @ParametersDelegate + public ConnectionDetails.SourceArgs sourceArgs; @Parameter(names = {"--no-wait"}, description = "Optional. If provided, the snapshot runner will not wait for completion") public boolean noWait = false; @@ -96,7 +84,7 @@ public static void main(String[] args) throws Exception { run(c -> ((arguments.fileSystemRepoPath != null) ? new FileSystemSnapshotCreator(arguments.snapshotName, c, arguments.fileSystemRepoPath) : new S3SnapshotCreator(arguments.snapshotName, c, arguments.s3RepoUri, arguments.s3Region, arguments.maxSnapshotRateMBPerNode)), - new OpenSearchClient(arguments.sourceHost, arguments.sourceUser, arguments.sourcePass, arguments.sourceInsecure), + new OpenSearchClient(new ConnectionDetails(arguments.sourceArgs)), arguments.noWait ); } diff --git a/DocumentsFromSnapshotMigration/build.gradle b/DocumentsFromSnapshotMigration/build.gradle index d12cbc390..bd4a2f733 100644 --- a/DocumentsFromSnapshotMigration/build.gradle +++ b/DocumentsFromSnapshotMigration/build.gradle @@ -170,6 +170,8 @@ task slowTest(type: Test) { jacocoTestReport { dependsOn slowTest reports { + xml.required = true + xml.destination file("${buildDir}/reports/jacoco/test/jacocoTestReport.xml") html.required = true html.destination file("${buildDir}/reports/jacoco/test/html") } diff --git a/DocumentsFromSnapshotMigration/src/main/java/com/rfs/RfsMigrateDocuments.java b/DocumentsFromSnapshotMigration/src/main/java/com/rfs/RfsMigrateDocuments.java index 30d0d0100..1709895f2 100644 --- a/DocumentsFromSnapshotMigration/src/main/java/com/rfs/RfsMigrateDocuments.java +++ b/DocumentsFromSnapshotMigration/src/main/java/com/rfs/RfsMigrateDocuments.java @@ -24,14 +24,14 @@ import com.rfs.common.DefaultSourceRepoAccessor; import com.rfs.common.DocumentReindexer; import com.rfs.common.FileSystemRepo; -import com.rfs.common.IndexMetadata; import com.rfs.common.LuceneDocumentsReader; import com.rfs.common.OpenSearchClient; import com.rfs.common.S3Uri; -import com.rfs.common.ShardMetadata; import com.rfs.common.S3Repo; import com.rfs.common.SourceRepo; import com.rfs.common.TryHandlePhaseFailure; +import com.rfs.models.IndexMetadata; +import com.rfs.models.ShardMetadata; import com.rfs.common.SnapshotRepo; import com.rfs.common.SnapshotShardUnpacker; import com.rfs.version_es_7_10.ElasticsearchConstants_ES_7_10; diff --git a/DocumentsFromSnapshotMigration/src/test/java/com/rfs/FullTest.java b/DocumentsFromSnapshotMigration/src/test/java/com/rfs/FullTest.java index ee7f43c8a..4437e6201 100644 --- a/DocumentsFromSnapshotMigration/src/test/java/com/rfs/FullTest.java +++ b/DocumentsFromSnapshotMigration/src/test/java/com/rfs/FullTest.java @@ -9,18 +9,18 @@ import com.rfs.common.DocumentReindexer; import com.rfs.common.FileSystemRepo; import com.rfs.common.FileSystemSnapshotCreator; -import com.rfs.common.GlobalMetadata; -import com.rfs.common.IndexMetadata; import com.rfs.common.LuceneDocumentsReader; import com.rfs.common.OpenSearchClient; import com.rfs.common.RestClient; -import com.rfs.common.ShardMetadata; import com.rfs.common.SnapshotRepo; import com.rfs.common.SnapshotShardUnpacker; import com.rfs.common.SourceRepo; import com.rfs.framework.SearchClusterContainer; import com.rfs.http.SearchClusterRequests; import com.rfs.framework.PreloadedSearchClusterContainer; +import com.rfs.models.GlobalMetadata; +import com.rfs.models.IndexMetadata; +import com.rfs.models.ShardMetadata; import com.rfs.transformers.TransformFunctions; import com.rfs.transformers.Transformer; import com.rfs.version_es_7_10.ElasticsearchConstants_ES_7_10; @@ -89,7 +89,6 @@ public class FullTest { SearchClusterContainer.ES_V7_17 ); final static List TARGET_IMAGES = List.of( - SearchClusterContainer.OS_V1_3_16, SearchClusterContainer.OS_V2_14_0 ); public static final String SOURCE_SERVER_ALIAS = "source"; diff --git a/MetadataMigration/src/main/java/com/rfs/MetadataMigration.java b/MetadataMigration/src/main/java/com/rfs/MetadataMigration.java index 2d7ea54fe..7a875df9b 100644 --- a/MetadataMigration/src/main/java/com/rfs/MetadataMigration.java +++ b/MetadataMigration/src/main/java/com/rfs/MetadataMigration.java @@ -3,6 +3,7 @@ import com.beust.jcommander.JCommander; import com.beust.jcommander.Parameter; import com.beust.jcommander.ParameterException; +import com.beust.jcommander.ParametersDelegate; import java.nio.file.Path; import java.nio.file.Paths; @@ -11,14 +12,14 @@ import com.rfs.common.ClusterVersion; import com.rfs.common.ConnectionDetails; import com.rfs.common.FileSystemRepo; -import com.rfs.common.GlobalMetadata; -import com.rfs.common.IndexMetadata; import com.rfs.common.OpenSearchClient; import com.rfs.common.S3Repo; import com.rfs.common.S3Uri; import com.rfs.common.SnapshotRepo; import com.rfs.common.SourceRepo; import com.rfs.common.TryHandlePhaseFailure; +import com.rfs.models.GlobalMetadata; +import com.rfs.models.IndexMetadata; import com.rfs.transformers.TransformFunctions; import com.rfs.transformers.Transformer; import com.rfs.version_es_7_10.GlobalMetadataFactory_ES_7_10; @@ -49,18 +50,9 @@ public static class Args { @Parameter(names = {"--s3-region"}, description = "The AWS Region the S3 bucket is in, like: us-east-2", required = false) public String s3Region; - @Parameter(names = {"--target-host"}, description = "The target host and port (e.g. http://localhost:9200)", required = true) - public String targetHost; - - @Parameter(names = {"--target-username"}, description = "Optional. The target username; if not provided, will assume no auth on target", required = false) - public String targetUser = null; - - @Parameter(names = {"--target-password"}, description = "Optional. The target password; if not provided, will assume no auth on target", required = false) - public String targetPass = null; - - @Parameter(names = {"--target-insecure"}, description = "Allow untrusted SSL certificates for target", required = false) - public boolean targetInsecure = false; - + @ParametersDelegate + public ConnectionDetails.TargetArgs targetArgs; + @Parameter(names = {"--index-allowlist"}, description = ("Optional. List of index names to migrate" + " (e.g. 'logs_2024_01, logs_2024_02'). Default: all non-system indices (e.g. those not starting with '.')"), required = false) public List indexAllowlist = List.of(); @@ -103,16 +95,12 @@ public static void main(String[] args) throws Exception { final Path s3LocalDirPath = arguments.s3LocalDirPath != null ? Paths.get(arguments.s3LocalDirPath) : null; final String s3RepoUri = arguments.s3RepoUri; final String s3Region = arguments.s3Region; - final String targetHost = arguments.targetHost; - final String targetUser = arguments.targetUser; - final String targetPass = arguments.targetPass; final List indexAllowlist = arguments.indexAllowlist; - final boolean targetInsecure = arguments.targetInsecure; final List indexTemplateAllowlist = arguments.indexTemplateAllowlist; final List componentTemplateAllowlist = arguments.componentTemplateAllowlist; final int awarenessDimensionality = arguments.minNumberOfReplicas + 1; - final ConnectionDetails targetConnection = new ConnectionDetails(targetHost, targetUser, targetPass, targetInsecure); + final ConnectionDetails targetConnection = new ConnectionDetails(arguments.targetArgs); TryHandlePhaseFailure.executeWithTryCatch(() -> { diff --git a/RFS/build.gradle b/RFS/build.gradle index 1d3fde551..4f9b6a379 100644 --- a/RFS/build.gradle +++ b/RFS/build.gradle @@ -21,8 +21,8 @@ ext { dependencies { implementation project(":commonDependencyVersionConstraints") - implementation project(':coreUtilities') + implementation project(':transformation') implementation group: 'com.beust', name: 'jcommander' implementation group: 'com.fasterxml.jackson.core', name: 'jackson-databind' diff --git a/RFS/src/main/java/com/rfs/ReindexFromSnapshot.java b/RFS/src/main/java/com/rfs/ReindexFromSnapshot.java index dc704057e..4d59461c5 100644 --- a/RFS/src/main/java/com/rfs/ReindexFromSnapshot.java +++ b/RFS/src/main/java/com/rfs/ReindexFromSnapshot.java @@ -8,6 +8,7 @@ import com.beust.jcommander.JCommander; import com.beust.jcommander.Parameter; +import com.beust.jcommander.ParametersDelegate; import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.lucene.document.Document; import org.apache.logging.log4j.Level; @@ -16,6 +17,10 @@ import reactor.core.publisher.Flux; import com.rfs.common.*; +import com.rfs.models.GlobalMetadata; +import com.rfs.models.IndexMetadata; +import com.rfs.models.ShardMetadata; +import com.rfs.models.SnapshotMetadata; import com.rfs.transformers.*; import com.rfs.version_es_6_8.*; import com.rfs.version_es_7_10.*; @@ -46,23 +51,11 @@ public static class Args { @Parameter(names = {"-l", "--lucene-dir"}, description = "The absolute path to the directory where we'll put the Lucene docs", required = true) public String luceneDirPath; - @Parameter(names = {"--source-host"}, description = "The source host and port (e.g. http://localhost:9200)", required = false) - public String sourceHost = null; + @ParametersDelegate + public ConnectionDetails.SourceArgs sourceArgs; - @Parameter(names = {"--source-username"}, description = "The source username; if not provided, will assume no auth on source", required = false) - public String sourceUser = null; - - @Parameter(names = {"--source-password"}, description = "The source password; if not provided, will assume no auth on source", required = false) - public String sourcePass = null; - - @Parameter(names = {"--target-host"}, description = "The target host and port (e.g. http://localhost:9200)", required = true) - public String targetHost; - - @Parameter(names = {"--target-username"}, description = "The target username; if not provided, will assume no auth on target", required = false) - public String targetUser = null; - - @Parameter(names = {"--target-password"}, description = "The target password; if not provided, will assume no auth on target", required = false) - public String targetPass = null; + @ParametersDelegate + public ConnectionDetails.TargetArgs targetArgs; @Parameter(names = {"-s", "--source-version"}, description = "The source cluster's version (e.g. 'es_6_8')", required = true, converter = ClusterVersion.ArgsConverter.class) public ClusterVersion sourceVersion; @@ -108,12 +101,6 @@ public static void main(String[] args) throws InterruptedException { String s3RepoUri = arguments.s3RepoUri; String s3Region = arguments.s3Region; Path luceneDirPath = Paths.get(arguments.luceneDirPath); - String sourceHost = arguments.sourceHost; - String sourceUser = arguments.sourceUser; - String sourcePass = arguments.sourcePass; - String targetHost = arguments.targetHost; - String targetUser = arguments.targetUser; - String targetPass = arguments.targetPass; int awarenessDimensionality = arguments.minNumberOfReplicas + 1; ClusterVersion sourceVersion = arguments.sourceVersion; ClusterVersion targetVersion = arguments.targetVersion; @@ -125,8 +112,8 @@ public static void main(String[] args) throws InterruptedException { Logging.setLevel(logLevel); - ConnectionDetails sourceConnection = new ConnectionDetails(sourceHost, sourceUser, sourcePass); - ConnectionDetails targetConnection = new ConnectionDetails(targetHost, targetUser, targetPass); + ConnectionDetails sourceConnection = new ConnectionDetails(arguments.sourceArgs); + ConnectionDetails targetConnection = new ConnectionDetails(arguments.targetArgs); // Sanity checks if (!((sourceVersion == ClusterVersion.ES_6_8) || (sourceVersion == ClusterVersion.ES_7_10))) { @@ -145,9 +132,9 @@ public static void main(String[] args) throws InterruptedException { * * If you provide the source host, you still need to provide the S3 details or the snapshotLocalRepoDirPath to write the snapshot to. */ - if (snapshotDirPath != null && (sourceHost != null || s3RepoUri != null)) { + if (snapshotDirPath != null && (arguments.sourceArgs.getHost() != null || s3RepoUri != null)) { throw new IllegalArgumentException("If you specify a local directory to take the snapshot from, you cannot specify a source host or S3 URI"); - } else if (sourceHost != null) { + } else if (arguments.sourceArgs.getHost() != null) { if (s3RepoUri == null && s3Region == null && s3LocalDirPath == null && snapshotLocalRepoDirPath == null) { throw new IllegalArgumentException( "If you specify a source host, you must also specify the S3 details or the snapshotLocalRepoDirPath to write the snapshot to as well"); @@ -175,7 +162,7 @@ public static void main(String[] args) throws InterruptedException { try { - if (sourceHost != null) { + if (arguments.sourceArgs.getHost() != null) { // ========================================================================================================== // Create the snapshot if necessary // ========================================================================================================== @@ -224,7 +211,7 @@ public static void main(String[] args) throws InterruptedException { logger.error("Snapshot not found"); return; } - SnapshotMetadata.Data snapshotMetadata; + SnapshotMetadata snapshotMetadata; if (sourceVersion == ClusterVersion.ES_6_8) { snapshotMetadata = new SnapshotMetadataFactory_ES_6_8().fromRepo(repo, repoDataProvider, snapshotName); } else { @@ -252,7 +239,7 @@ public static void main(String[] args) throws InterruptedException { // ========================================================================================================== logger.info("=================================================================="); logger.info("Attempting to read Global Metadata details..."); - GlobalMetadata.Data globalMetadata; + GlobalMetadata globalMetadata; if (sourceVersion == ClusterVersion.ES_6_8) { globalMetadata = new GlobalMetadataFactory_ES_6_8(repoDataProvider).fromRepo(snapshotName); } else { @@ -269,13 +256,11 @@ public static void main(String[] args) throws InterruptedException { OpenSearchClient targetClient = new OpenSearchClient(targetConnection); if (sourceVersion == ClusterVersion.ES_6_8) { GlobalMetadataCreator_OS_2_11 metadataCreator = new GlobalMetadataCreator_OS_2_11(targetClient, templateWhitelist, componentTemplateWhitelist, List.of()); - ObjectNode root = globalMetadata.toObjectNode(); - ObjectNode transformedRoot = transformer.transformGlobalMetadata(root); + var transformedRoot = transformer.transformGlobalMetadata(globalMetadata); metadataCreator.create(transformedRoot); } else if (sourceVersion == ClusterVersion.ES_7_10) { GlobalMetadataCreator_OS_2_11 metadataCreator = new GlobalMetadataCreator_OS_2_11(targetClient, List.of(), componentTemplateWhitelist, templateWhitelist); - ObjectNode root = globalMetadata.toObjectNode(); - ObjectNode transformedRoot = transformer.transformGlobalMetadata(root); + var transformedRoot = transformer.transformGlobalMetadata(globalMetadata); metadataCreator.create(transformedRoot); } } @@ -285,10 +270,10 @@ public static void main(String[] args) throws InterruptedException { // ========================================================================================================== logger.info("=================================================================="); logger.info("Attempting to read Index Metadata..."); - List indexMetadatas = new ArrayList<>(); + List indexMetadatas = new ArrayList<>(); for (SnapshotRepo.Index index : repoDataProvider.getIndicesInSnapshot(snapshotName)) { logger.info("Reading Index Metadata for index: " + index.getName()); - IndexMetadata.Data indexMetadata; + IndexMetadata indexMetadata; if (sourceVersion == ClusterVersion.ES_6_8) { indexMetadata = new IndexMetadataFactory_ES_6_8(repoDataProvider).fromRepo(snapshotName, index.getName()); } else { @@ -306,12 +291,11 @@ public static void main(String[] args) throws InterruptedException { logger.info("=================================================================="); logger.info("Attempting to recreate the indices..."); IndexCreator_OS_2_11 indexCreator = new IndexCreator_OS_2_11(targetClient); - for (IndexMetadata.Data indexMetadata : indexMetadatas) { + for (IndexMetadata indexMetadata : indexMetadatas) { String reindexName = indexMetadata.getName() + indexSuffix; logger.info("Recreating index " + indexMetadata.getName() + " as " + reindexName + " on target..."); - ObjectNode root = indexMetadata.toObjectNode(); - ObjectNode transformedRoot = transformer.transformIndexMetadata(root); + var transformedRoot = transformer.transformIndexMetadata(indexMetadata); indexCreator.create(transformedRoot, reindexName, indexMetadata.getId()); } } @@ -332,13 +316,13 @@ public static void main(String[] args) throws InterruptedException { DefaultSourceRepoAccessor repoAccessor = new DefaultSourceRepoAccessor(repo); SnapshotShardUnpacker.Factory unpackerFactory = new SnapshotShardUnpacker.Factory(repoAccessor,luceneDirPath, bufferSize); - for (IndexMetadata.Data indexMetadata : indexMetadatas) { + for (IndexMetadata indexMetadata : indexMetadatas) { logger.info("Processing index: " + indexMetadata.getName()); for (int shardId = 0; shardId < indexMetadata.getNumberOfShards(); shardId++) { logger.info("=== Shard ID: " + shardId + " ==="); // Get the shard metadata - ShardMetadata.Data shardMetadata; + ShardMetadata shardMetadata; if (sourceVersion == ClusterVersion.ES_6_8) { shardMetadata = new ShardMetadataFactory_ES_6_8(repoDataProvider).fromRepo(snapshotName, indexMetadata.getName(), shardId); } else { @@ -362,7 +346,7 @@ public static void main(String[] args) throws InterruptedException { LuceneDocumentsReader reader = new LuceneDocumentsReader(luceneDirPath); DocumentReindexer reindexer = new DocumentReindexer(targetClient); - for (IndexMetadata.Data indexMetadata : indexMetadatas) { + for (IndexMetadata indexMetadata : indexMetadatas) { for (int shardId = 0; shardId < indexMetadata.getNumberOfShards(); shardId++) { logger.info("=== Index Id: " + indexMetadata.getName() + ", Shard ID: " + shardId + " ==="); diff --git a/RFS/src/main/java/com/rfs/RunRfsWorker.java b/RFS/src/main/java/com/rfs/RunRfsWorker.java index 965d856aa..94823d833 100644 --- a/RFS/src/main/java/com/rfs/RunRfsWorker.java +++ b/RFS/src/main/java/com/rfs/RunRfsWorker.java @@ -2,6 +2,7 @@ import com.beust.jcommander.JCommander; import com.beust.jcommander.Parameter; +import com.beust.jcommander.ParametersDelegate; import java.net.URI; import java.nio.file.Path; @@ -22,16 +23,17 @@ import com.rfs.common.ClusterVersion; import com.rfs.common.ConnectionDetails; import com.rfs.common.DocumentReindexer; -import com.rfs.common.GlobalMetadata; -import com.rfs.common.IndexMetadata; import com.rfs.common.Logging; import com.rfs.common.LuceneDocumentsReader; import com.rfs.common.OpenSearchClient; import com.rfs.common.S3Uri; -import com.rfs.common.ShardMetadata; import com.rfs.common.S3Repo; import com.rfs.common.SnapshotCreator; import com.rfs.common.SourceRepo; +import com.rfs.common.TryHandlePhaseFailure; +import com.rfs.models.GlobalMetadata; +import com.rfs.models.IndexMetadata; +import com.rfs.models.ShardMetadata; import com.rfs.common.S3SnapshotCreator; import com.rfs.common.SnapshotRepo; import com.rfs.common.SnapshotShardUnpacker; @@ -68,24 +70,12 @@ public static class Args { @Parameter(names = {"--lucene-dir"}, description = "The absolute path to the directory where we'll put the Lucene docs", required = true) public String luceneDirPath; + + @ParametersDelegate + public ConnectionDetails.SourceArgs sourceArgs; - @Parameter(names = {"--source-host"}, description = "The source host and port (e.g. http://localhost:9200)", required = true) - public String sourceHost; - - @Parameter(names = {"--source-username"}, description = "Optional. The source username; if not provided, will assume no auth on source", required = false) - public String sourceUser = null; - - @Parameter(names = {"--source-password"}, description = "Optional. The source password; if not provided, will assume no auth on source", required = false) - public String sourcePass = null; - - @Parameter(names = {"--target-host"}, description = "The target host and port (e.g. http://localhost:9200)", required = true) - public String targetHost; - - @Parameter(names = {"--target-username"}, description = "Optional. The target username; if not provided, will assume no auth on target", required = false) - public String targetUser = null; - - @Parameter(names = {"--target-password"}, description = "Optional. The target password; if not provided, will assume no auth on target", required = false) - public String targetPass = null; + @ParametersDelegate + public ConnectionDetails.TargetArgs targetArgs; @Parameter(names = {"--index-allowlist"}, description = ("Optional. List of index names to migrate" + " (e.g. 'logs_2024_01, logs_2024_02'). Default: all indices"), required = false) @@ -126,12 +116,6 @@ public static void main(String[] args) throws Exception { final String s3RepoUri = arguments.s3RepoUri; final String s3Region = arguments.s3Region; final Path luceneDirPath = Paths.get(arguments.luceneDirPath); - final String sourceHost = arguments.sourceHost; - final String sourceUser = arguments.sourceUser; - final String sourcePass = arguments.sourcePass; - final String targetHost = arguments.targetHost; - final String targetUser = arguments.targetUser; - final String targetPass = arguments.targetPass; final List indexAllowlist = arguments.indexAllowlist; final List indexTemplateAllowlist = arguments.indexTemplateAllowlist; final List componentTemplateAllowlist = arguments.componentTemplateAllowlist; @@ -141,8 +125,8 @@ public static void main(String[] args) throws Exception { Logging.setLevel(logLevel); - final ConnectionDetails sourceConnection = new ConnectionDetails(sourceHost, sourceUser, sourcePass); - final ConnectionDetails targetConnection = new ConnectionDetails(targetHost, targetUser, targetPass); + final ConnectionDetails sourceConnection = new ConnectionDetails(arguments.sourceArgs); + final ConnectionDetails targetConnection = new ConnectionDetails(arguments.targetArgs); try (var processManager = new LeaseExpireTrigger(workItemId -> { log.error("terminating RunRfsWorker because its lease has expired for " + workItemId); @@ -171,7 +155,7 @@ public static void main(String[] args) throws Exception { var unpackerFactory = new SnapshotShardUnpacker.Factory(repoAccessor, luceneDirPath, ElasticsearchConstants_ES_7_10.BUFFER_SIZE_IN_BYTES); DocumentReindexer reindexer = new DocumentReindexer(targetClient); - var workCoordinator = new OpenSearchWorkCoordinator(new ApacheHttpClient(new URI(targetHost)), + var workCoordinator = new OpenSearchWorkCoordinator(new ApacheHttpClient(new URI(arguments.targetArgs.getHost())), 5, UUID.randomUUID().toString()); var scopedWorkCoordinator = new ScopedWorkCoordinator(workCoordinator, processManager); new ShardWorkPreparer().run(scopedWorkCoordinator, indexMetadataFactory, snapshotName, indexAllowlist); diff --git a/RFS/src/main/java/com/rfs/common/ByteArrayIndexInput.java b/RFS/src/main/java/com/rfs/common/ByteArrayIndexInput.java index cf7b7059b..f8ffd134c 100644 --- a/RFS/src/main/java/com/rfs/common/ByteArrayIndexInput.java +++ b/RFS/src/main/java/com/rfs/common/ByteArrayIndexInput.java @@ -32,6 +32,7 @@ public ByteArrayIndexInput(String resourceDesc, byte[] bytes, int offset, int le @Override public void close() throws IOException { + // Empty in the original implementation, and seems to work } @Override diff --git a/RFS/src/main/java/com/rfs/common/ConnectionDetails.java b/RFS/src/main/java/com/rfs/common/ConnectionDetails.java index 54a0d9715..74a92480f 100644 --- a/RFS/src/main/java/com/rfs/common/ConnectionDetails.java +++ b/RFS/src/main/java/com/rfs/common/ConnectionDetails.java @@ -3,16 +3,20 @@ import java.net.URI; import java.net.URISyntaxException; +import com.beust.jcommander.Parameter; + +import lombok.Getter; + /** * Stores the connection details (assuming basic auth) for an Elasticsearch/OpenSearch cluster */ public class ConnectionDetails { - public static enum AuthType { + public enum AuthType { BASIC, NONE } - public static enum Protocol { + public enum Protocol { HTTP, HTTPS } @@ -26,6 +30,10 @@ public static enum Protocol { public final AuthType authType; public final boolean insecure; + public ConnectionDetails(Params params) { + this(params.getHost(), params.getUsername(), params.getPassword(), params.isInsecure()); + } + public ConnectionDetails(String url, String username, String password) { this(url, username, password, false); } @@ -70,4 +78,41 @@ public ConnectionDetails(String url, String username, String password, boolean i } } } + + public static interface Params { + String getHost(); + String getUsername(); + String getPassword(); + boolean isInsecure(); + } + + @Getter + public static class TargetArgs implements Params { + @Parameter(names = {"--target-host"}, description = "The target host and port (e.g. http://localhost:9200)", required = true) + public String host; + + @Parameter(names = {"--target-username"}, description = "Optional. The target username; if not provided, will assume no auth on target", required = false) + public String username = null; + + @Parameter(names = {"--target-password"}, description = "Optional. The target password; if not provided, will assume no auth on target", required = false) + public String password = null; + + @Parameter(names = {"--target-insecure"}, description = "Allow untrusted SSL certificates for target", required = false) + public boolean insecure = false; + } + + @Getter + public static class SourceArgs implements Params { + @Parameter(names = {"--source-host"}, description = "The source host and port (e.g. http://localhost:9200)", required = false) + public String host = null; + + @Parameter(names = {"--source-username"}, description = "The source username; if not provided, will assume no auth on source", required = false) + public String username = null; + + @Parameter(names = {"--source-password"}, description = "The source password; if not provided, will assume no auth on source", required = false) + public String password = null; + + @Parameter(names = {"--source-insecure"}, description = "Allow untrusted SSL certificates for source", required = false) + public boolean insecure = false; + } } diff --git a/RFS/src/main/java/com/rfs/common/DocumentReindexer.java b/RFS/src/main/java/com/rfs/common/DocumentReindexer.java index 27591803a..5c7b48d10 100644 --- a/RFS/src/main/java/com/rfs/common/DocumentReindexer.java +++ b/RFS/src/main/java/com/rfs/common/DocumentReindexer.java @@ -22,7 +22,7 @@ public Mono reindex(String indexName, Flux documentStream) { return documentStream .map(this::convertDocumentToBulkSection) // Convert each Document to part of a bulk operation .buffer(MAX_BATCH_SIZE) // Collect until you hit the batch size - .doOnNext(bulk -> logger.info(bulk.size() + " documents in current bulk request")) + .doOnNext(bulk -> logger.info("{} documents in current bulk request", bulk.size())) .map(this::convertToBulkRequestBody) // Assemble the bulk request body from the parts .flatMap(bulkJson -> client.sendBulkRequest(indexName, bulkJson) // Send the request .doOnSuccess(unused -> logger.debug("Batch succeeded")) @@ -50,9 +50,9 @@ private String convertToBulkRequestBody(List bulkSections) { return builder.toString(); } - public void refreshAllDocuments(ConnectionDetails targetConnection) throws Exception { + public void refreshAllDocuments(ConnectionDetails targetConnection) { // Send the request - OpenSearchClient client = new OpenSearchClient(targetConnection); - client.refresh(); + OpenSearchClient refreshClient = new OpenSearchClient(targetConnection); + refreshClient.refresh(); } } diff --git a/RFS/src/main/java/com/rfs/common/EphemeralSourceRepoAccessor.java b/RFS/src/main/java/com/rfs/common/EphemeralSourceRepoAccessor.java index 43e9d23e0..edceb2c02 100644 --- a/RFS/src/main/java/com/rfs/common/EphemeralSourceRepoAccessor.java +++ b/RFS/src/main/java/com/rfs/common/EphemeralSourceRepoAccessor.java @@ -45,7 +45,7 @@ public void close() throws IOException { try { super.close(); } finally { - logger.info("Deleting local file: " + filePath.toString()); + logger.info("Deleting local file: {}", filePath); logger.warn("See: https://opensearch.atlassian.net/browse/MIGRATIONS-1786"); Files.deleteIfExists(filePath); } diff --git a/RFS/src/main/java/com/rfs/common/FileSystemRepo.java b/RFS/src/main/java/com/rfs/common/FileSystemRepo.java index 54aaa0a3e..782c293dc 100644 --- a/RFS/src/main/java/com/rfs/common/FileSystemRepo.java +++ b/RFS/src/main/java/com/rfs/common/FileSystemRepo.java @@ -7,6 +7,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import com.rfs.models.ShardMetadata; + public class FileSystemRepo implements SourceRepo { private final Path repoRootDir; @@ -55,44 +57,38 @@ public Path getSnapshotRepoDataFilePath() { @Override public Path getGlobalMetadataFilePath(String snapshotId) { - String filePath = getRepoRootDir().toString() + "/meta-" + snapshotId + ".dat"; - return Path.of(filePath); + return getRepoRootDir().resolve("meta-" + snapshotId + ".dat"); } @Override public Path getSnapshotMetadataFilePath(String snapshotId) { - String filePath = getRepoRootDir().toString() + "/snap-" + snapshotId + ".dat"; - return Path.of(filePath); + return getRepoRootDir().resolve("snap-" + snapshotId + ".dat"); } @Override public Path getIndexMetadataFilePath(String indexId, String indexFileId) { - String filePath = getRepoRootDir().toString() + "/indices/" + indexId + "/meta-" + indexFileId + ".dat"; - return Path.of(filePath); + return getRepoRootDir().resolve("indices").resolve(indexId).resolve("meta-" + indexFileId + ".dat"); } @Override public Path getShardDirPath(String indexId, int shardId) { - String shardDirPath = getRepoRootDir().toString() + "/indices/" + indexId + "/" + shardId; + String shardDirPath = getRepoRootDir().resolve("indices").resolve(indexId).resolve(String.valueOf(shardId)).toString(); return Path.of(shardDirPath); } @Override public Path getShardMetadataFilePath(String snapshotId, String indexId, int shardId) { - Path shardDirPath = getShardDirPath(indexId, shardId); - Path filePath = shardDirPath.resolve("snap-" + snapshotId + ".dat"); - return filePath; + return getShardDirPath(indexId, shardId).resolve("snap-" + snapshotId + ".dat"); } @Override public Path getBlobFilePath(String indexId, int shardId, String blobName) { Path shardDirPath = getShardDirPath(indexId, shardId); - Path filePath = shardDirPath.resolve(blobName); - return filePath; + return shardDirPath.resolve(blobName); } @Override - public void prepBlobFiles(ShardMetadata.Data shardMetadata) { + public void prepBlobFiles(ShardMetadata shardMetadata) { // No work necessary for local filesystem } diff --git a/RFS/src/main/java/com/rfs/common/FilterScheme.java b/RFS/src/main/java/com/rfs/common/FilterScheme.java index 268cc3630..80cba8fb9 100644 --- a/RFS/src/main/java/com/rfs/common/FilterScheme.java +++ b/RFS/src/main/java/com/rfs/common/FilterScheme.java @@ -5,6 +5,7 @@ import java.util.function.Predicate; public class FilterScheme { + private FilterScheme() {} public static Predicate filterIndicesByAllowList(List indexAllowlist, BiConsumer indexNameAcceptanceObserver) { return index -> { diff --git a/RFS/src/main/java/com/rfs/common/Logging.java b/RFS/src/main/java/com/rfs/common/Logging.java index ba7ef2ed3..f5201ba2d 100644 --- a/RFS/src/main/java/com/rfs/common/Logging.java +++ b/RFS/src/main/java/com/rfs/common/Logging.java @@ -8,6 +8,8 @@ import org.apache.logging.log4j.Level; public class Logging { + private Logging() {} + public static void setLevel(Level level) { LoggerContext ctx = (LoggerContext) LogManager.getContext(false); Configuration config = ctx.getConfiguration(); diff --git a/RFS/src/main/java/com/rfs/common/LuceneDocumentsReader.java b/RFS/src/main/java/com/rfs/common/LuceneDocumentsReader.java index 6d882ae79..f375daf09 100644 --- a/RFS/src/main/java/com/rfs/common/LuceneDocumentsReader.java +++ b/RFS/src/main/java/com/rfs/common/LuceneDocumentsReader.java @@ -2,12 +2,6 @@ import java.io.IOException; import java.nio.file.Path; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; - -import lombok.AllArgsConstructor; -import lombok.Getter; -import lombok.SneakyThrows; import lombok.extern.slf4j.Slf4j; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; @@ -57,7 +51,7 @@ protected IndexReader openIndexReader(Path indexDirectoryPath) throws IOExceptio protected Document getDocument(IndexReader reader, int docId) { try { Document document = reader.document(docId); - BytesRef source_bytes = document.getBinaryValue("_source"); + BytesRef sourceBytes = document.getBinaryValue("_source"); String id; try { id = Uid.decodeId(document.getBinaryValue("_id").bytes); @@ -68,7 +62,7 @@ protected Document getDocument(IndexReader reader, int docId) { log.error(errorMessage.toString()); return null; // Skip documents with missing id } - if (source_bytes == null || source_bytes.bytes.length == 0) { + if (sourceBytes == null || sourceBytes.bytes.length == 0) { log.warn("Document " + id + " is deleted or doesn't have the _source field enabled"); return null; // Skip these too } diff --git a/RFS/src/main/java/com/rfs/common/OpenSearchClient.java b/RFS/src/main/java/com/rfs/common/OpenSearchClient.java index e15890c31..ad020efee 100644 --- a/RFS/src/main/java/com/rfs/common/OpenSearchClient.java +++ b/RFS/src/main/java/com/rfs/common/OpenSearchClient.java @@ -2,8 +2,6 @@ import java.net.HttpURLConnection; import java.time.Duration; -import java.util.ArrayList; -import java.util.List; import java.util.Optional; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -13,9 +11,7 @@ import org.apache.logging.log4j.Logger; import com.fasterxml.jackson.databind.DeserializationFeature; -import com.fasterxml.jackson.databind.JavaType; import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import reactor.core.publisher.Mono; diff --git a/RFS/src/main/java/com/rfs/common/PartSliceStream.java b/RFS/src/main/java/com/rfs/common/PartSliceStream.java index 230e5b249..8f5e2a3e1 100644 --- a/RFS/src/main/java/com/rfs/common/PartSliceStream.java +++ b/RFS/src/main/java/com/rfs/common/PartSliceStream.java @@ -3,6 +3,9 @@ import java.io.IOException; import java.io.InputStream; +import com.rfs.models.ShardFileInfo; +import com.rfs.models.ShardMetadata; + /** * Taken from Elasticsearch 6.8, combining the SlicedInputStream and PartSliceStream classes with our special sauce * See: https://github.com/elastic/elasticsearch/blob/6.8/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/SlicedInputStream.java @@ -11,14 +14,14 @@ public class PartSliceStream extends InputStream { private final SourceRepoAccessor repoAccessor; - private final ShardMetadata.FileInfo fileMetadata; + private final ShardFileInfo fileMetadata; private final String indexId; private final int shardId; private long slice = 0; private InputStream currentStream; private boolean initialized = false; - public PartSliceStream(SourceRepoAccessor repoAccessor, ShardMetadata.FileInfo fileMetadata, String indexId, int shardId) { + public PartSliceStream(SourceRepoAccessor repoAccessor, ShardFileInfo fileMetadata, String indexId, int shardId) { this.repoAccessor = repoAccessor; this.fileMetadata = fileMetadata; this.indexId = indexId; @@ -30,7 +33,7 @@ protected InputStream openSlice(long slice) { } private InputStream nextStream() throws IOException { - assert initialized == false || currentStream != null; + assert !initialized || currentStream != null; initialized = true; if (currentStream != null) { diff --git a/RFS/src/main/java/com/rfs/common/S3Repo.java b/RFS/src/main/java/com/rfs/common/S3Repo.java index 3085b3bdd..45471d6c9 100644 --- a/RFS/src/main/java/com/rfs/common/S3Repo.java +++ b/RFS/src/main/java/com/rfs/common/S3Repo.java @@ -7,6 +7,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import com.rfs.models.ShardFileInfo; +import com.rfs.models.ShardMetadata; + import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; import software.amazon.awssdk.core.async.AsyncResponseTransformer; import software.amazon.awssdk.regions.Region; @@ -76,11 +79,11 @@ private void ensureFileExistsLocally(S3Uri s3Uri, Path localPath) { ensureS3LocalDirectoryExists(localPath.getParent()); if (doesFileExistLocally(localPath)) { - logger.debug("File already exists locally: " + localPath); + logger.debug("File already exists locally: {}", localPath); return; } - logger.info("Downloading file from S3: " + s3Uri.uri + " to " + localPath); + logger.info("Downloading file from S3: {} to {}", s3Uri.uri, localPath); GetObjectRequest getObjectRequest = GetObjectRequest.builder() .bucket(s3Uri.bucketName) .key(s3Uri.key) @@ -179,7 +182,7 @@ public Path getBlobFilePath(String indexId, int shardId, String blobName) { } @Override - public void prepBlobFiles(ShardMetadata.Data shardMetadata) { + public void prepBlobFiles(ShardMetadata shardMetadata) { S3TransferManager transferManager = S3TransferManager.builder().s3Client(s3Client).build(); Path shardDirPath = getShardDirPath(shardMetadata.getIndexId(), shardMetadata.getShardId()); @@ -187,7 +190,7 @@ public void prepBlobFiles(ShardMetadata.Data shardMetadata) { String blobFilesS3Prefix = s3RepoUri.key + "indices/" + shardMetadata.getIndexId() + "/" + shardMetadata.getShardId() + "/"; - logger.info("Downloading blob files from S3: s3://" + s3RepoUri.bucketName + "/" + blobFilesS3Prefix + " to " + shardDirPath); + logger.info("Downloading blob files from S3: s3://%s/%s to %s", s3RepoUri.bucketName, blobFilesS3Prefix, shardDirPath); DirectoryDownload directoryDownload = transferManager.downloadDirectory( DownloadDirectoryRequest.builder() .destination(shardDirPath) diff --git a/RFS/src/main/java/com/rfs/common/SnapshotCreator.java b/RFS/src/main/java/com/rfs/common/SnapshotCreator.java index 0f929c727..27ca09ef4 100644 --- a/RFS/src/main/java/com/rfs/common/SnapshotCreator.java +++ b/RFS/src/main/java/com/rfs/common/SnapshotCreator.java @@ -57,9 +57,9 @@ public void createSnapshot() { // Create the snapshot; idempotent operation try { client.createSnapshot(getRepoName(), snapshotName, body); - logger.info("Snapshot " + snapshotName + " creation initiated"); + logger.info("Snapshot {} creation initiated", snapshotName); } catch (Exception e) { - logger.error("Snapshot " + snapshotName + " creation failed", e); + logger.error("Snapshot {} creation failed", snapshotName, e); throw new SnapshotCreationFailed(snapshotName); } } @@ -74,7 +74,7 @@ public boolean isSnapshotFinished() { } if (response.isEmpty()) { - logger.error("Snapshot " + snapshotName + " does not exist"); + logger.error("Snapshot {} does not exist", snapshotName); throw new SnapshotDoesNotExist(snapshotName); } @@ -88,7 +88,7 @@ public boolean isSnapshotFinished() { } else if (state.equals("IN_PROGRESS")) { return false; } else { - logger.error("Snapshot " + snapshotName + " has failed with state " + state); + logger.error("Snapshot {} has failed with state {}", snapshotName, state); throw new SnapshotCreationFailed(snapshotName); } } diff --git a/RFS/src/main/java/com/rfs/common/SnapshotRepo.java b/RFS/src/main/java/com/rfs/common/SnapshotRepo.java index f36f7561d..36f7f02e5 100644 --- a/RFS/src/main/java/com/rfs/common/SnapshotRepo.java +++ b/RFS/src/main/java/com/rfs/common/SnapshotRepo.java @@ -3,6 +3,7 @@ import java.util.List; public class SnapshotRepo { + private SnapshotRepo() {} /** * Defines the behavior required to surface a snapshot repo's metadata diff --git a/RFS/src/main/java/com/rfs/common/SnapshotShardUnpacker.java b/RFS/src/main/java/com/rfs/common/SnapshotShardUnpacker.java index 1c7255f5e..98c021fe4 100644 --- a/RFS/src/main/java/com/rfs/common/SnapshotShardUnpacker.java +++ b/RFS/src/main/java/com/rfs/common/SnapshotShardUnpacker.java @@ -15,12 +15,15 @@ import org.apache.lucene.store.NativeFSLockFactory; import org.apache.lucene.util.BytesRef; +import com.rfs.models.ShardFileInfo; +import com.rfs.models.ShardMetadata; + @RequiredArgsConstructor public class SnapshotShardUnpacker { private static final Logger logger = LogManager.getLogger(SnapshotShardUnpacker.class); private final SourceRepoAccessor repoAccessor; private final Path luceneFilesBasePath; - private final ShardMetadata.Data shardMetadata; + private final ShardMetadata shardMetadata; private final int bufferSize; @RequiredArgsConstructor @@ -29,7 +32,7 @@ public static class Factory { private final Path luceneFilesBasePath; private final int bufferSize; - public SnapshotShardUnpacker create(ShardMetadata.Data shardMetadata) { + public SnapshotShardUnpacker create(ShardMetadata shardMetadata) { return new SnapshotShardUnpacker(repoAccessor, luceneFilesBasePath, shardMetadata, bufferSize); } } @@ -45,25 +48,25 @@ public Path unpack() { // Create the directory for the shard's lucene files Path luceneIndexDir = Paths.get(luceneFilesBasePath + "/" + shardMetadata.getIndexName() + "/" + shardMetadata.getShardId()); Files.createDirectories(luceneIndexDir); - final FSDirectory primaryDirectory = FSDirectory.open(luceneIndexDir, lockFactory); - - for (ShardMetadata.FileInfo fileMetadata : shardMetadata.getFiles()) { - logger.info("Unpacking - Blob Name: " + fileMetadata.getName() + ", Lucene Name: " + fileMetadata.getPhysicalName()); - try (IndexOutput indexOutput = primaryDirectory.createOutput(fileMetadata.getPhysicalName(), IOContext.DEFAULT);){ - if (fileMetadata.getName().startsWith("v__")) { - final BytesRef hash = fileMetadata.getMetaHash(); - indexOutput.writeBytes(hash.bytes, hash.offset, hash.length); - } else { - try (InputStream stream = new PartSliceStream(repoAccessor, fileMetadata, shardMetadata.getIndexId(), shardMetadata.getShardId())) { - final byte[] buffer = new byte[Math.toIntExact(Math.min(bufferSize, fileMetadata.getLength()))]; - int length; - while ((length = stream.read(buffer)) > 0) { - indexOutput.writeBytes(buffer, 0, length); + try (FSDirectory primaryDirectory = FSDirectory.open(luceneIndexDir, lockFactory)) { + for (ShardFileInfo fileMetadata : shardMetadata.getFiles()) { + logger.info("Unpacking - Blob Name: {}, Lucene Name: {}", fileMetadata.getName(), fileMetadata.getPhysicalName()); + try (IndexOutput indexOutput = primaryDirectory.createOutput(fileMetadata.getPhysicalName(), IOContext.DEFAULT);){ + if (fileMetadata.getName().startsWith("v__")) { + final BytesRef hash = fileMetadata.getMetaHash(); + indexOutput.writeBytes(hash.bytes, hash.offset, hash.length); + } else { + try (InputStream stream = new PartSliceStream(repoAccessor, fileMetadata, shardMetadata.getIndexId(), shardMetadata.getShardId())) { + final byte[] buffer = new byte[Math.toIntExact(Math.min(bufferSize, fileMetadata.getLength()))]; + int length; + while ((length = stream.read(buffer)) > 0) { + indexOutput.writeBytes(buffer, 0, length); + } } } } } - } + } return luceneIndexDir; } catch (Exception e) { throw new CouldNotUnpackShard("Could not unpack shard: Index " + shardMetadata.getIndexId() + ", Shard " + shardMetadata.getShardId(), e); diff --git a/RFS/src/main/java/com/rfs/common/SourceRepo.java b/RFS/src/main/java/com/rfs/common/SourceRepo.java index a02a004e0..80240e147 100644 --- a/RFS/src/main/java/com/rfs/common/SourceRepo.java +++ b/RFS/src/main/java/com/rfs/common/SourceRepo.java @@ -2,6 +2,8 @@ import java.nio.file.Path; +import com.rfs.models.ShardMetadata; + public interface SourceRepo { public Path getRepoRootDir(); public Path getSnapshotRepoDataFilePath(); @@ -16,5 +18,5 @@ public interface SourceRepo { * Performs any work necessary to facilitate access to a given shard's blob files. Depending on the implementation, * may involve no work at all, bulk downloading objects from a remote source, or any other operations. */ - public void prepBlobFiles(ShardMetadata.Data shardMetadata); + public void prepBlobFiles(ShardMetadata shardMetadata); } diff --git a/RFS/src/main/java/com/rfs/common/SourceRepoAccessor.java b/RFS/src/main/java/com/rfs/common/SourceRepoAccessor.java index c3a7d1e77..414c60f70 100644 --- a/RFS/src/main/java/com/rfs/common/SourceRepoAccessor.java +++ b/RFS/src/main/java/com/rfs/common/SourceRepoAccessor.java @@ -3,6 +3,8 @@ import java.io.InputStream; import java.nio.file.Path; +import com.rfs.models.ShardMetadata; + // TODO: find a better approach to this (see https://opensearch.atlassian.net/browse/MIGRATIONS-1786) public abstract class SourceRepoAccessor { private final SourceRepo repo; @@ -43,7 +45,7 @@ public InputStream getBlobFile(String indexId, int shardId, String blobName){ return load(repo.getBlobFilePath(indexId, shardId, blobName)); } - public void prepBlobFiles(ShardMetadata.Data shardMetadata){ + public void prepBlobFiles(ShardMetadata shardMetadata){ repo.prepBlobFiles(shardMetadata); } diff --git a/RFS/src/main/java/com/rfs/common/TryHandlePhaseFailure.java b/RFS/src/main/java/com/rfs/common/TryHandlePhaseFailure.java index f217de844..120b22bce 100644 --- a/RFS/src/main/java/com/rfs/common/TryHandlePhaseFailure.java +++ b/RFS/src/main/java/com/rfs/common/TryHandlePhaseFailure.java @@ -1,8 +1,6 @@ package com.rfs.common; import lombok.extern.slf4j.Slf4j; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; @Slf4j public class TryHandlePhaseFailure { diff --git a/RFS/src/main/java/com/rfs/common/Uid.java b/RFS/src/main/java/com/rfs/common/Uid.java index b7453b9b2..52aadd0eb 100644 --- a/RFS/src/main/java/com/rfs/common/Uid.java +++ b/RFS/src/main/java/com/rfs/common/Uid.java @@ -10,6 +10,8 @@ * See: https://github.com/elastic/elasticsearch/blob/6.8/server/src/main/java/org/elasticsearch/index/mapper/Uid.java#L32 */ public class Uid { + private Uid() {} + public static final int UTF8 = 0xff; public static final int NUMERIC = 0xfe; public static final int BASE64_ESCAPE = 0xfd; @@ -41,7 +43,7 @@ private static String decodeBase64Id(byte[] idBytes, int offset, int length) { assert Byte.toUnsignedInt(idBytes[offset]) <= BASE64_ESCAPE; if (Byte.toUnsignedInt(idBytes[offset]) == BASE64_ESCAPE) { idBytes = Arrays.copyOfRange(idBytes, offset + 1, offset + length); - } else if ((idBytes.length == length && offset == 0) == false) { // no need to copy if it's not a slice + } else if (!(idBytes.length == length && offset == 0)) { // no need to copy if it's not a slice idBytes = Arrays.copyOfRange(idBytes, offset, offset + length); } return Base64.getUrlEncoder().withoutPadding().encodeToString(idBytes); diff --git a/RFS/src/main/java/com/rfs/common/GlobalMetadata.java b/RFS/src/main/java/com/rfs/models/GlobalMetadata.java similarity index 91% rename from RFS/src/main/java/com/rfs/common/GlobalMetadata.java rename to RFS/src/main/java/com/rfs/models/GlobalMetadata.java index acfc72ea5..f44154042 100644 --- a/RFS/src/main/java/com/rfs/common/GlobalMetadata.java +++ b/RFS/src/main/java/com/rfs/models/GlobalMetadata.java @@ -1,4 +1,4 @@ -package com.rfs.common; +package com.rfs.models; import org.apache.lucene.codecs.CodecUtil; @@ -11,9 +11,19 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.dataformat.smile.SmileFactory; +import com.rfs.common.ByteArrayIndexInput; +import com.rfs.common.RfsException; +import com.rfs.common.SnapshotRepo; -public class GlobalMetadata { +public interface GlobalMetadata { + /** + * Defines the behavior expected of an object that will surface the global metadata of a snapshot + * See: https://github.com/elastic/elasticsearch/blob/v7.10.2/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java#L1622 + * See: https://github.com/elastic/elasticsearch/blob/v6.8.23/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java#L1214 + */ + public ObjectNode toObjectNode(); + /** * Defines the behavior required to read a snapshot's global metadata as JSON and convert it into a Data object */ @@ -44,7 +54,7 @@ private JsonNode getJsonNode(SnapshotRepo.Provider repoDataProvider, String snap } } - default GlobalMetadata.Data fromRepo(String snapshotName) { + default GlobalMetadata fromRepo(String snapshotName) { SnapshotRepo.Provider repoDataProvider = getRepoDataProvider(); SmileFactory smileFactory = getSmileFactory(); JsonNode root = getJsonNode(repoDataProvider, snapshotName, smileFactory); @@ -52,7 +62,7 @@ default GlobalMetadata.Data fromRepo(String snapshotName) { } // Version-specific implementation - public GlobalMetadata.Data fromJsonNode(JsonNode root); + public GlobalMetadata fromJsonNode(JsonNode root); // Version-specific implementation public SmileFactory getSmileFactory(); @@ -61,15 +71,6 @@ default GlobalMetadata.Data fromRepo(String snapshotName) { public SnapshotRepo.Provider getRepoDataProvider(); } - /** - * Defines the behavior expected of an object that will surface the global metadata of a snapshot - * See: https://github.com/elastic/elasticsearch/blob/v7.10.2/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java#L1622 - * See: https://github.com/elastic/elasticsearch/blob/v6.8.23/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java#L1214 - */ - public static interface Data { - public ObjectNode toObjectNode(); - } - public static class CantFindSnapshotInRepo extends RfsException { public CantFindSnapshotInRepo(String snapshotName) { super("Can't find snapshot in repo: " + snapshotName); @@ -81,5 +82,5 @@ public CantReadGlobalMetadataFromSnapshot(String snapshotName, Throwable cause) super("Can't read the global metadata from snapshot: " + snapshotName, cause); } } - + } diff --git a/RFS/src/main/java/com/rfs/common/IndexMetadata.java b/RFS/src/main/java/com/rfs/models/IndexMetadata.java similarity index 82% rename from RFS/src/main/java/com/rfs/common/IndexMetadata.java rename to RFS/src/main/java/com/rfs/models/IndexMetadata.java index eb062e06f..0e70962ae 100644 --- a/RFS/src/main/java/com/rfs/common/IndexMetadata.java +++ b/RFS/src/main/java/com/rfs/models/IndexMetadata.java @@ -1,6 +1,7 @@ -package com.rfs.common; +package com.rfs.models; import org.apache.lucene.codecs.CodecUtil; +import org.opensearch.migrations.transformation.entity.Index; import java.io.ByteArrayInputStream; import java.io.FileInputStream; @@ -11,8 +12,24 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.dataformat.smile.SmileFactory; +import com.rfs.common.ByteArrayIndexInput; +import com.rfs.common.RfsException; +import com.rfs.common.SnapshotRepo; -public class IndexMetadata { +public interface IndexMetadata extends Index { + + /* + * Defines the behavior expected of an object that will surface the metadata of an index stored in a snapshot + * See: https://github.com/elastic/elasticsearch/blob/v7.10.2/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java#L1475 + * See: https://github.com/elastic/elasticsearch/blob/v6.8.23/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java#L1284 + */ + public JsonNode getAliases(); + public String getId(); + public JsonNode getMappings(); + public String getName(); + public int getNumberOfShards(); + public JsonNode getSettings(); + public IndexMetadata deepCopy(); /** * Defines the behavior required to read a snapshot's index metadata as JSON and convert it into a Data object @@ -38,7 +55,7 @@ private JsonNode getJsonNode(String indexId, String indexFileId, SmileFactory sm } } - default IndexMetadata.Data fromRepo(String snapshotName, String indexName) { + default IndexMetadata fromRepo(String snapshotName, String indexName) { SmileFactory smileFactory = getSmileFactory(); String indexId = getRepoDataProvider().getIndexId(indexName); String indexFileId = getIndexFileId(snapshotName, indexName); @@ -47,7 +64,7 @@ default IndexMetadata.Data fromRepo(String snapshotName, String indexName) { } // Version-specific implementation - public IndexMetadata.Data fromJsonNode(JsonNode root, String indexId, String indexName); + public IndexMetadata fromJsonNode(JsonNode root, String indexId, String indexName); // Version-specific implementation public SmileFactory getSmileFactory(); @@ -58,20 +75,4 @@ default IndexMetadata.Data fromRepo(String snapshotName, String indexName) { // Get the underlying SnapshotRepo Provider public SnapshotRepo.Provider getRepoDataProvider(); } - - /** - * Defines the behavior expected of an object that will surface the metadata of an index stored in a snapshot - * See: https://github.com/elastic/elasticsearch/blob/v7.10.2/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java#L1475 - * See: https://github.com/elastic/elasticsearch/blob/v6.8.23/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java#L1284 - */ - public static interface Data { - public ObjectNode getAliases(); - public String getId(); - public ObjectNode getMappings(); - public String getName(); - public int getNumberOfShards(); - public ObjectNode getSettings(); - public ObjectNode toObjectNode(); - } - } diff --git a/RFS/src/main/java/com/rfs/models/ShardFileInfo.java b/RFS/src/main/java/com/rfs/models/ShardFileInfo.java new file mode 100644 index 000000000..87744249a --- /dev/null +++ b/RFS/src/main/java/com/rfs/models/ShardFileInfo.java @@ -0,0 +1,22 @@ +package com.rfs.models; + +import org.apache.lucene.util.BytesRef; + +/** + * Defines the behavior expected of an object that will surface the metadata of an file stored in a snapshot + * See: https://github.com/elastic/elasticsearch/blob/7.10/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java#L277 + * See: https://github.com/elastic/elasticsearch/blob/6.8/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java#L281 + */ +public interface ShardFileInfo { + + public String getName(); + public String getPhysicalName(); + public long getLength(); + public String getChecksum(); + public long getPartSize(); + public String getWrittenBy(); + public BytesRef getMetaHash(); + public long getNumberOfParts(); + public String partName(long part); + +} diff --git a/RFS/src/main/java/com/rfs/common/ShardMetadata.java b/RFS/src/main/java/com/rfs/models/ShardMetadata.java similarity index 57% rename from RFS/src/main/java/com/rfs/common/ShardMetadata.java rename to RFS/src/main/java/com/rfs/models/ShardMetadata.java index f4dcac6c1..24c19ba24 100644 --- a/RFS/src/main/java/com/rfs/common/ShardMetadata.java +++ b/RFS/src/main/java/com/rfs/models/ShardMetadata.java @@ -1,7 +1,6 @@ -package com.rfs.common; +package com.rfs.models; import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.util.BytesRef; import java.io.ByteArrayInputStream; import java.io.FileInputStream; @@ -12,8 +11,27 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.dataformat.smile.SmileFactory; +import com.rfs.common.ByteArrayIndexInput; +import com.rfs.common.RfsException; +import com.rfs.common.SnapshotRepo; -public class ShardMetadata { +/** + * Defines the behavior expected of an object that will surface the metadata of an shard stored in a snapshot + * See: https://github.com/elastic/elasticsearch/blob/7.10/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java#L510 + * See: https://github.com/elastic/elasticsearch/blob/6.8/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java#L508 + */ +public interface ShardMetadata { + + public String getSnapshotName(); + public String getIndexName(); + public String getIndexId(); + public int getShardId(); + public int getIndexVersion(); + public long getStartTime(); + public long getTime(); + public int getNumberOfFiles(); + public long getTotalSizeBytes(); + public List getFiles(); /** * Defines the behavior required to read a snapshot's shard metadata as JSON and convert it into a Data object @@ -39,7 +57,7 @@ private JsonNode getJsonNode(String snapshotId, String indexId, int shardId, Smi } } - default ShardMetadata.Data fromRepo(String snapshotName, String indexName, int shardId) { + default ShardMetadata fromRepo(String snapshotName, String indexName, int shardId) { SmileFactory smileFactory = getSmileFactory(); String snapshotId = getRepoDataProvider().getSnapshotId(snapshotName); String indexId = getRepoDataProvider().getIndexId(indexName); @@ -48,7 +66,7 @@ default ShardMetadata.Data fromRepo(String snapshotName, String indexName, int s } // Version-specific implementation - public ShardMetadata.Data fromJsonNode(JsonNode root, String indexId, String indexName, int shardId); + public ShardMetadata fromJsonNode(JsonNode root, String indexId, String indexName, int shardId); // Version-specific implementation public SmileFactory getSmileFactory(); @@ -62,40 +80,4 @@ public CouldNotParseShardMetadata(String message, Throwable cause) { super(message, cause); } } - - /** - * Defines the behavior expected of an object that will surface the metadata of an shard stored in a snapshot - * See: https://github.com/elastic/elasticsearch/blob/7.10/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java#L510 - * See: https://github.com/elastic/elasticsearch/blob/6.8/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java#L508 - */ - public static interface Data { - public String getSnapshotName(); - public String getIndexName(); - public String getIndexId(); - public int getShardId(); - public int getIndexVersion(); - public long getStartTime(); - public long getTime(); - public int getNumberOfFiles(); - public long getTotalSizeBytes(); - public List getFiles(); - } - - /** - * Defines the behavior expected of an object that will surface the metadata of an file stored in a snapshot - * See: https://github.com/elastic/elasticsearch/blob/7.10/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java#L277 - * See: https://github.com/elastic/elasticsearch/blob/6.8/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java#L281 - */ - public static interface FileInfo { - public String getName(); - public String getPhysicalName(); - public long getLength(); - public String getChecksum(); - public long getPartSize(); - public String getWrittenBy(); - public BytesRef getMetaHash(); - public long getNumberOfParts(); - public String partName(long part); - } - } diff --git a/RFS/src/main/java/com/rfs/common/SnapshotMetadata.java b/RFS/src/main/java/com/rfs/models/SnapshotMetadata.java similarity index 64% rename from RFS/src/main/java/com/rfs/common/SnapshotMetadata.java rename to RFS/src/main/java/com/rfs/models/SnapshotMetadata.java index 898555e92..5440e8e41 100644 --- a/RFS/src/main/java/com/rfs/common/SnapshotMetadata.java +++ b/RFS/src/main/java/com/rfs/models/SnapshotMetadata.java @@ -1,4 +1,4 @@ -package com.rfs.common; +package com.rfs.models; import java.io.ByteArrayInputStream; import java.io.FileInputStream; @@ -11,12 +11,32 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.dataformat.smile.SmileFactory; +import com.rfs.common.ByteArrayIndexInput; +import com.rfs.common.SnapshotRepo; +import com.rfs.common.SourceRepo; - -public class SnapshotMetadata { +/** + * Defines the behavior expected of an object that will surface the metadata of a snapshot + * See: https://github.com/elastic/elasticsearch/blob/7.10/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java#L615 + * See: https://github.com/elastic/elasticsearch/blob/6.8/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java#L583 + */ +public interface SnapshotMetadata { // TODO: Turn into an ENUM when we know the other possible values public static final String SNAPSHOT_SUCCEEDED = "SUCCESS"; + public String getName(); + public String getUuid(); + public int getVersionId(); + public List getIndices(); + public String getState(); + public String getReason(); + public boolean isIncludeGlobalState(); + public long getStartTime(); + public long getEndTime(); + public int getTotalShards(); + public int getSuccessfulShards(); + public List getFailures(); + /** * Defines the behavior required to read a snapshot metadata as JSON and convert it into a Data object */ @@ -45,37 +65,17 @@ private JsonNode getJsonNode(SourceRepo repo, SnapshotRepo.Provider repoDataProv } } - default SnapshotMetadata.Data fromRepo(SourceRepo repo, SnapshotRepo.Provider repoDataProvider, String snapshotName) throws Exception { + default SnapshotMetadata fromRepo(SourceRepo repo, SnapshotRepo.Provider repoDataProvider, String snapshotName) throws Exception { SmileFactory smileFactory = getSmileFactory(); JsonNode root = getJsonNode(repo, repoDataProvider, snapshotName, smileFactory); return fromJsonNode(root); } - public SnapshotMetadata.Data fromJsonNode(JsonNode root) throws Exception; + public SnapshotMetadata fromJsonNode(JsonNode root) throws Exception; public SmileFactory getSmileFactory(); } - /** - * Defines the behavior expected of an object that will surface the metadata of a snapshot - * See: https://github.com/elastic/elasticsearch/blob/7.10/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java#L615 - * See: https://github.com/elastic/elasticsearch/blob/6.8/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java#L583 - */ - public static interface Data { - public String getName(); - public String getUuid(); - public int getVersionId(); - public List getIndices(); - public String getState(); - public String getReason(); - public boolean isIncludeGlobalState(); - public long getStartTime(); - public long getEndTime(); - public int getTotalShards(); - public int getSuccessfulShards(); - public List getFailures(); - - default boolean isSuccessful() { - return SNAPSHOT_SUCCEEDED.equals(getState()); - } + default boolean isSuccessful() { + return SNAPSHOT_SUCCEEDED.equals(getState()); } } diff --git a/RFS/src/main/java/com/rfs/transformers/Transformer.java b/RFS/src/main/java/com/rfs/transformers/Transformer.java index 69c0840fb..3ca631ad8 100644 --- a/RFS/src/main/java/com/rfs/transformers/Transformer.java +++ b/RFS/src/main/java/com/rfs/transformers/Transformer.java @@ -1,6 +1,7 @@ package com.rfs.transformers; -import com.fasterxml.jackson.databind.node.ObjectNode; +import com.rfs.models.GlobalMetadata; +import com.rfs.models.IndexMetadata; /** * Defines the behavior required to transform the Global and Index Metadata from one version of Elasticsearch/Opensearch @@ -10,10 +11,10 @@ public interface Transformer { /** * Takes the raw JSON representing the Global Metadata of one version and returns a new, transformed copy of the JSON */ - public ObjectNode transformGlobalMetadata(ObjectNode root); + public GlobalMetadata transformGlobalMetadata(GlobalMetadata globalData); /** * Takes the raw JSON representing the Index Metadata of one version and returns a new, transformed copy of the JSON */ - public ObjectNode transformIndexMetadata(ObjectNode root); + public IndexMetadata transformIndexMetadata(IndexMetadata indexData); } diff --git a/RFS/src/main/java/com/rfs/transformers/Transformer_ES_6_8_to_OS_2_11.java b/RFS/src/main/java/com/rfs/transformers/Transformer_ES_6_8_to_OS_2_11.java index e681d6003..7ff3f9744 100644 --- a/RFS/src/main/java/com/rfs/transformers/Transformer_ES_6_8_to_OS_2_11.java +++ b/RFS/src/main/java/com/rfs/transformers/Transformer_ES_6_8_to_OS_2_11.java @@ -1,14 +1,29 @@ package com.rfs.transformers; +import java.util.List; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; +import com.rfs.models.GlobalMetadata; +import com.rfs.models.IndexMetadata; +import com.rfs.version_os_2_11.GlobalMetadataData_OS_2_11; +import com.rfs.version_os_2_11.IndexMetadataData_OS_2_11; + +import org.opensearch.migrations.transformation.TransformationRule; +import org.opensearch.migrations.transformation.entity.Index; +import org.opensearch.migrations.transformation.rules.IndexMappingTypeRemoval; public class Transformer_ES_6_8_to_OS_2_11 implements Transformer { private static final Logger logger = LogManager.getLogger(Transformer_ES_6_8_to_OS_2_11.class); private static final ObjectMapper mapper = new ObjectMapper(); + + private final List> indexTransformations = List.of(new IndexMappingTypeRemoval()); + private final List> indexTemplateTransformations = List.of(new IndexMappingTypeRemoval()); + + private int awarenessAttributeDimensionality; public Transformer_ES_6_8_to_OS_2_11(int awarenessAttributeDimensionality) { @@ -16,23 +31,21 @@ public Transformer_ES_6_8_to_OS_2_11(int awarenessAttributeDimensionality) { } @Override - public ObjectNode transformGlobalMetadata(ObjectNode root) { + public GlobalMetadata transformGlobalMetadata(GlobalMetadata globalData) { + var root = globalData.toObjectNode(); ObjectNode newRoot = mapper.createObjectNode(); // Transform the original "templates", but put them into the legacy "templates" bucket on the target - if (root.get("templates") != null) { - ObjectNode templatesRoot = (ObjectNode) root.get("templates").deepCopy(); - templatesRoot.fieldNames().forEachRemaining(templateName -> { - ObjectNode template = (ObjectNode) templatesRoot.get(templateName); - logger.info("Transforming template: " + templateName); - logger.debug("Original template: " + template.toString()); - TransformFunctions.removeIntermediateMappingsLevels(template); - TransformFunctions.removeIntermediateIndexSettingsLevel(template); // run before fixNumberOfReplicas - TransformFunctions.fixReplicasForDimensionality(template, awarenessAttributeDimensionality); - logger.debug("Transformed template: " + template.toString()); - templatesRoot.set(templateName, template); + var originalTemplates = root.get("templates"); + if (originalTemplates != null) { + var templates = mapper.createObjectNode(); + originalTemplates.fieldNames().forEachRemaining(templateName -> { + var templateCopy = (ObjectNode) originalTemplates.get(templateName).deepCopy(); + var indexTemplate = (Index) () -> templateCopy; + transformIndex(indexTemplate, IndexType.Template); + templates.set(templateName, indexTemplate.rawJson()); }); - newRoot.set("templates", templatesRoot); + newRoot.set("templates", templates); } // Make empty index_templates @@ -47,21 +60,38 @@ public ObjectNode transformGlobalMetadata(ObjectNode root) { componentTemplatesRoot.set("component_template", componentTemplatesSubRoot); newRoot.set("component_template", componentTemplatesRoot); - return newRoot; + return new GlobalMetadataData_OS_2_11(newRoot); } @Override - public ObjectNode transformIndexMetadata(ObjectNode root){ - ObjectNode newRoot = root.deepCopy(); + public IndexMetadata transformIndexMetadata(IndexMetadata index) { + var copy = index.deepCopy(); + transformIndex(copy, IndexType.Concrete); + return new IndexMetadataData_OS_2_11(copy.rawJson(), copy.getId(), copy.getName()); + } - TransformFunctions.removeIntermediateMappingsLevels(newRoot); + private void transformIndex(Index index, IndexType type) { + logger.debug("Original Object: " + index.rawJson().toString()); + var newRoot = index.rawJson(); + + switch (type) { + case Concrete: + indexTransformations.forEach(transformer -> transformer.applyTransformation(index)); + break; + case Template: + indexTemplateTransformations.forEach(transformer -> transformer.applyTransformation(index)); + break; + } newRoot.set("settings", TransformFunctions.convertFlatSettingsToTree((ObjectNode) newRoot.get("settings"))); TransformFunctions.removeIntermediateIndexSettingsLevel(newRoot); // run before fixNumberOfReplicas TransformFunctions.fixReplicasForDimensionality(newRoot, awarenessAttributeDimensionality); - logger.debug("Original Object: " + root.toString()); logger.debug("Transformed Object: " + newRoot.toString()); - return newRoot; - } + } + + private enum IndexType { + Concrete, + Template; + } } diff --git a/RFS/src/main/java/com/rfs/transformers/Transformer_ES_7_10_OS_2_11.java b/RFS/src/main/java/com/rfs/transformers/Transformer_ES_7_10_OS_2_11.java index d46fffb08..282032330 100644 --- a/RFS/src/main/java/com/rfs/transformers/Transformer_ES_7_10_OS_2_11.java +++ b/RFS/src/main/java/com/rfs/transformers/Transformer_ES_7_10_OS_2_11.java @@ -3,8 +3,12 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; +import com.rfs.models.GlobalMetadata; +import com.rfs.models.IndexMetadata; +import com.rfs.version_os_2_11.GlobalMetadataData_OS_2_11; public class Transformer_ES_7_10_OS_2_11 implements Transformer { private static final Logger logger = LogManager.getLogger(Transformer_ES_7_10_OS_2_11.class); @@ -16,8 +20,8 @@ public Transformer_ES_7_10_OS_2_11(int awarenessAttributeDimensionality) { } @Override - public ObjectNode transformGlobalMetadata(ObjectNode root){ - ObjectNode newRoot = mapper.createObjectNode(); + public GlobalMetadata transformGlobalMetadata(GlobalMetadata metaData){ + ObjectNode root = metaData.toObjectNode().deepCopy(); // Transform the legacy templates if (root.get("templates") != null) { @@ -31,7 +35,7 @@ public ObjectNode transformGlobalMetadata(ObjectNode root){ logger.debug("Transformed template: " + template.toString()); templatesRoot.set(templateName, template); }); - newRoot.set("templates", templatesRoot); + root.set("templates", templatesRoot); } // Transform the index templates @@ -53,7 +57,7 @@ public ObjectNode transformGlobalMetadata(ObjectNode root){ logger.debug("Transformed template: " + template.toString()); indexTemplateValuesRoot.set(templateName, template); }); - newRoot.set("index_template", indexTemplatesRoot); + root.set("index_template", indexTemplatesRoot); } // Transform the component templates @@ -74,24 +78,25 @@ public ObjectNode transformGlobalMetadata(ObjectNode root){ logger.debug("Transformed template: " + template.toString()); componentTemplateValuesRoot.set(templateName, template); }); - newRoot.set("component_template", componentTemplatesRoot); + root.set("component_template", componentTemplatesRoot); } - return newRoot; + return new GlobalMetadataData_OS_2_11(root); } @Override - public ObjectNode transformIndexMetadata(ObjectNode root){ - ObjectNode newRoot = root.deepCopy(); - + public IndexMetadata transformIndexMetadata(IndexMetadata indexData){ + logger.debug("Original Object: " + indexData.rawJson().toString()); + var copy = indexData.deepCopy(); + var newRoot = copy.rawJson(); + TransformFunctions.removeIntermediateMappingsLevels(newRoot); newRoot.set("settings", TransformFunctions.convertFlatSettingsToTree((ObjectNode) newRoot.get("settings"))); TransformFunctions.removeIntermediateIndexSettingsLevel(newRoot); // run before fixNumberOfReplicas TransformFunctions.fixReplicasForDimensionality(newRoot, awarenessAttributeDimensionality); - logger.debug("Original Object: " + root.toString()); logger.debug("Transformed Object: " + newRoot.toString()); - return newRoot; + return indexData; } } diff --git a/RFS/src/main/java/com/rfs/version_es_6_8/GlobalMetadataData_ES_6_8.java b/RFS/src/main/java/com/rfs/version_es_6_8/GlobalMetadataData_ES_6_8.java index 993944819..e047a21d0 100644 --- a/RFS/src/main/java/com/rfs/version_es_6_8/GlobalMetadataData_ES_6_8.java +++ b/RFS/src/main/java/com/rfs/version_es_6_8/GlobalMetadataData_ES_6_8.java @@ -1,8 +1,9 @@ package com.rfs.version_es_6_8; import com.fasterxml.jackson.databind.node.ObjectNode; +import com.rfs.models.GlobalMetadata; -public class GlobalMetadataData_ES_6_8 implements com.rfs.common.GlobalMetadata.Data { +public class GlobalMetadataData_ES_6_8 implements GlobalMetadata { private final ObjectNode root; public GlobalMetadataData_ES_6_8(ObjectNode root) { @@ -17,5 +18,4 @@ public ObjectNode toObjectNode() { public ObjectNode getTemplates() { return (ObjectNode) root.get("templates"); } - } diff --git a/RFS/src/main/java/com/rfs/version_es_6_8/GlobalMetadataFactory_ES_6_8.java b/RFS/src/main/java/com/rfs/version_es_6_8/GlobalMetadataFactory_ES_6_8.java index 125208b2f..0cc06d401 100644 --- a/RFS/src/main/java/com/rfs/version_es_6_8/GlobalMetadataFactory_ES_6_8.java +++ b/RFS/src/main/java/com/rfs/version_es_6_8/GlobalMetadataFactory_ES_6_8.java @@ -3,10 +3,10 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.dataformat.smile.SmileFactory; -import com.rfs.common.GlobalMetadata; import com.rfs.common.SnapshotRepo; +import com.rfs.models.GlobalMetadata; -public class GlobalMetadataFactory_ES_6_8 implements com.rfs.common.GlobalMetadata.Factory{ +public class GlobalMetadataFactory_ES_6_8 implements GlobalMetadata.Factory{ private final SnapshotRepo.Provider repoDataProvider; public GlobalMetadataFactory_ES_6_8(SnapshotRepo.Provider repoDataProvider) { @@ -14,7 +14,7 @@ public GlobalMetadataFactory_ES_6_8(SnapshotRepo.Provider repoDataProvider) { } @Override - public GlobalMetadata.Data fromJsonNode(JsonNode root) { + public GlobalMetadata fromJsonNode(JsonNode root) { ObjectNode metadataRoot = (ObjectNode) root.get("meta-data"); return new GlobalMetadataData_ES_6_8(metadataRoot); } diff --git a/RFS/src/main/java/com/rfs/version_es_6_8/IndexMetadataData_ES_6_8.java b/RFS/src/main/java/com/rfs/version_es_6_8/IndexMetadataData_ES_6_8.java index b1a09c2d1..f284ca139 100644 --- a/RFS/src/main/java/com/rfs/version_es_6_8/IndexMetadataData_ES_6_8.java +++ b/RFS/src/main/java/com/rfs/version_es_6_8/IndexMetadataData_ES_6_8.java @@ -1,10 +1,12 @@ package com.rfs.version_es_6_8; +import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; +import com.rfs.models.IndexMetadata; import com.rfs.transformers.TransformFunctions; -public class IndexMetadataData_ES_6_8 implements com.rfs.common.IndexMetadata.Data { +public class IndexMetadataData_ES_6_8 implements IndexMetadata { private ObjectNode root; private ObjectNode mappings; private ObjectNode settings; @@ -30,7 +32,7 @@ public String getId() { } @Override - public ObjectNode getMappings() { + public JsonNode getMappings() { if (mappings != null) { return mappings; } @@ -68,7 +70,12 @@ public ObjectNode getSettings() { } @Override - public ObjectNode toObjectNode() { + public ObjectNode rawJson() { return root; } + + @Override + public IndexMetadata deepCopy() { + return new IndexMetadataData_ES_6_8(root.deepCopy(), indexId, indexName); + } } diff --git a/RFS/src/main/java/com/rfs/version_es_6_8/IndexMetadataFactory_ES_6_8.java b/RFS/src/main/java/com/rfs/version_es_6_8/IndexMetadataFactory_ES_6_8.java index 2bf52273e..fc2b5c62d 100644 --- a/RFS/src/main/java/com/rfs/version_es_6_8/IndexMetadataFactory_ES_6_8.java +++ b/RFS/src/main/java/com/rfs/version_es_6_8/IndexMetadataFactory_ES_6_8.java @@ -3,10 +3,10 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.dataformat.smile.SmileFactory; -import com.rfs.common.IndexMetadata; import com.rfs.common.SnapshotRepo; +import com.rfs.models.IndexMetadata; -public class IndexMetadataFactory_ES_6_8 implements com.rfs.common.IndexMetadata.Factory { +public class IndexMetadataFactory_ES_6_8 implements IndexMetadata.Factory { private final SnapshotRepo.Provider repoDataProvider; public IndexMetadataFactory_ES_6_8(SnapshotRepo.Provider repoDataProvider) { @@ -14,7 +14,7 @@ public IndexMetadataFactory_ES_6_8(SnapshotRepo.Provider repoDataProvider) { } @Override - public IndexMetadata.Data fromJsonNode(JsonNode root, String indexId, String indexName) { + public IndexMetadata fromJsonNode(JsonNode root, String indexId, String indexName) { ObjectNode objectNodeRoot = (ObjectNode) root.get(indexName); return new IndexMetadataData_ES_6_8(objectNodeRoot, indexId, indexName); } diff --git a/RFS/src/main/java/com/rfs/version_es_6_8/ShardMetadataData_ES_6_8.java b/RFS/src/main/java/com/rfs/version_es_6_8/ShardMetadataData_ES_6_8.java index b528e95f9..200c24835 100644 --- a/RFS/src/main/java/com/rfs/version_es_6_8/ShardMetadataData_ES_6_8.java +++ b/RFS/src/main/java/com/rfs/version_es_6_8/ShardMetadataData_ES_6_8.java @@ -14,10 +14,11 @@ import com.fasterxml.jackson.databind.JsonDeserializer; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; -import com.rfs.common.ShardMetadata; +import com.rfs.models.ShardFileInfo; +import com.rfs.models.ShardMetadata; -public class ShardMetadataData_ES_6_8 implements com.rfs.common.ShardMetadata.Data { +public class ShardMetadataData_ES_6_8 implements ShardMetadata { private static final ObjectMapper objectMapper = new ObjectMapper(); private String snapshotName; @@ -106,8 +107,8 @@ public long getTotalSizeBytes() { } @Override - public List getFiles() { - List convertedFiles = new ArrayList<>(files); + public List getFiles() { + List convertedFiles = new ArrayList<>(files); return convertedFiles; } @@ -148,7 +149,7 @@ public DataRaw( } } - public static class FileInfo implements ShardMetadata.FileInfo { + public static class FileInfo implements ShardFileInfo { private String name; private String physicalName; private long length; diff --git a/RFS/src/main/java/com/rfs/version_es_6_8/ShardMetadataFactory_ES_6_8.java b/RFS/src/main/java/com/rfs/version_es_6_8/ShardMetadataFactory_ES_6_8.java index 30584ba79..5cbee31c8 100644 --- a/RFS/src/main/java/com/rfs/version_es_6_8/ShardMetadataFactory_ES_6_8.java +++ b/RFS/src/main/java/com/rfs/version_es_6_8/ShardMetadataFactory_ES_6_8.java @@ -7,15 +7,16 @@ import com.fasterxml.jackson.dataformat.smile.SmileFactory; import lombok.RequiredArgsConstructor; -import com.rfs.common.ShardMetadata; import com.rfs.common.SnapshotRepo; +import com.rfs.models.ShardFileInfo; +import com.rfs.models.ShardMetadata; @RequiredArgsConstructor public class ShardMetadataFactory_ES_6_8 implements ShardMetadata.Factory { protected final SnapshotRepo.Provider repoDataProvider; @Override - public ShardMetadata.Data fromJsonNode(JsonNode root, String indexId, String indexName, int shardId) { + public ShardMetadata fromJsonNode(JsonNode root, String indexId, String indexName, int shardId) { ObjectMapper objectMapper = new ObjectMapper(); SimpleModule module = new SimpleModule(); module.addDeserializer(ShardMetadataData_ES_6_8.FileInfoRaw.class, new ShardMetadataData_ES_6_8.FileInfoRawDeserializer()); diff --git a/RFS/src/main/java/com/rfs/version_es_6_8/SnapshotMetadataData_ES_6_8.java b/RFS/src/main/java/com/rfs/version_es_6_8/SnapshotMetadataData_ES_6_8.java index c9f813d18..5fbe0f9f3 100644 --- a/RFS/src/main/java/com/rfs/version_es_6_8/SnapshotMetadataData_ES_6_8.java +++ b/RFS/src/main/java/com/rfs/version_es_6_8/SnapshotMetadataData_ES_6_8.java @@ -3,11 +3,10 @@ import java.util.List; import com.fasterxml.jackson.annotation.JsonProperty; +import com.rfs.models.SnapshotMetadata; -import com.rfs.common.SnapshotMetadata; - -public class SnapshotMetadataData_ES_6_8 implements SnapshotMetadata.Data{ +public class SnapshotMetadataData_ES_6_8 implements SnapshotMetadata{ private String name; private String uuid; diff --git a/RFS/src/main/java/com/rfs/version_es_6_8/SnapshotMetadataFactory_ES_6_8.java b/RFS/src/main/java/com/rfs/version_es_6_8/SnapshotMetadataFactory_ES_6_8.java index baf1f2e22..0833f25b5 100644 --- a/RFS/src/main/java/com/rfs/version_es_6_8/SnapshotMetadataFactory_ES_6_8.java +++ b/RFS/src/main/java/com/rfs/version_es_6_8/SnapshotMetadataFactory_ES_6_8.java @@ -4,9 +4,9 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.dataformat.smile.SmileFactory; -import com.rfs.common.SnapshotMetadata; +import com.rfs.models.SnapshotMetadata; -public class SnapshotMetadataFactory_ES_6_8 implements com.rfs.common.SnapshotMetadata.Factory { +public class SnapshotMetadataFactory_ES_6_8 implements SnapshotMetadata.Factory { /** * A version of the Elasticsearch approach simplified by assuming JSON; see here [1] for more details. @@ -14,7 +14,7 @@ public class SnapshotMetadataFactory_ES_6_8 implements com.rfs.common.SnapshotMe * [1] https://github.com/elastic/elasticsearch/blob/6.8/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java#L583 */ @Override - public SnapshotMetadata.Data fromJsonNode(JsonNode root) throws Exception { + public SnapshotMetadata fromJsonNode(JsonNode root) throws Exception { ObjectMapper mapper = new ObjectMapper(); ObjectNode objectNodeRoot = (ObjectNode) root; SnapshotMetadataData_ES_6_8 snapshotMetadata = mapper.treeToValue(objectNodeRoot.get("snapshot"), SnapshotMetadataData_ES_6_8.class); diff --git a/RFS/src/main/java/com/rfs/version_es_7_10/GlobalMetadataData_ES_7_10.java b/RFS/src/main/java/com/rfs/version_es_7_10/GlobalMetadataData_ES_7_10.java index b05141568..e24764b15 100644 --- a/RFS/src/main/java/com/rfs/version_es_7_10/GlobalMetadataData_ES_7_10.java +++ b/RFS/src/main/java/com/rfs/version_es_7_10/GlobalMetadataData_ES_7_10.java @@ -1,8 +1,9 @@ package com.rfs.version_es_7_10; import com.fasterxml.jackson.databind.node.ObjectNode; +import com.rfs.models.GlobalMetadata; -public class GlobalMetadataData_ES_7_10 implements com.rfs.common.GlobalMetadata.Data { +public class GlobalMetadataData_ES_7_10 implements GlobalMetadata { private final ObjectNode root; public GlobalMetadataData_ES_7_10(ObjectNode root) { diff --git a/RFS/src/main/java/com/rfs/version_es_7_10/GlobalMetadataFactory_ES_7_10.java b/RFS/src/main/java/com/rfs/version_es_7_10/GlobalMetadataFactory_ES_7_10.java index 9c492a864..6d51a0230 100644 --- a/RFS/src/main/java/com/rfs/version_es_7_10/GlobalMetadataFactory_ES_7_10.java +++ b/RFS/src/main/java/com/rfs/version_es_7_10/GlobalMetadataFactory_ES_7_10.java @@ -3,10 +3,10 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.dataformat.smile.SmileFactory; -import com.rfs.common.GlobalMetadata; import com.rfs.common.SnapshotRepo; +import com.rfs.models.GlobalMetadata; -public class GlobalMetadataFactory_ES_7_10 implements com.rfs.common.GlobalMetadata.Factory{ +public class GlobalMetadataFactory_ES_7_10 implements GlobalMetadata.Factory{ private final SnapshotRepo.Provider repoDataProvider; public GlobalMetadataFactory_ES_7_10(SnapshotRepo.Provider repoDataProvider) { @@ -14,7 +14,7 @@ public GlobalMetadataFactory_ES_7_10(SnapshotRepo.Provider repoDataProvider) { } @Override - public GlobalMetadata.Data fromJsonNode(JsonNode root) { + public GlobalMetadata fromJsonNode(JsonNode root) { ObjectNode metadataRoot = (ObjectNode) root.get("meta-data"); return new GlobalMetadataData_ES_7_10(metadataRoot); } diff --git a/RFS/src/main/java/com/rfs/version_es_7_10/IndexMetadataData_ES_7_10.java b/RFS/src/main/java/com/rfs/version_es_7_10/IndexMetadataData_ES_7_10.java index 8f285df4d..1549cffbe 100644 --- a/RFS/src/main/java/com/rfs/version_es_7_10/IndexMetadataData_ES_7_10.java +++ b/RFS/src/main/java/com/rfs/version_es_7_10/IndexMetadataData_ES_7_10.java @@ -2,9 +2,10 @@ import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; +import com.rfs.models.IndexMetadata; import com.rfs.transformers.TransformFunctions; -public class IndexMetadataData_ES_7_10 implements com.rfs.common.IndexMetadata.Data { +public class IndexMetadataData_ES_7_10 implements IndexMetadata { private ObjectNode root; private ObjectNode mappings; private ObjectNode settings; @@ -68,7 +69,12 @@ public ObjectNode getSettings() { } @Override - public ObjectNode toObjectNode() { + public ObjectNode rawJson() { return root; } + + @Override + public IndexMetadata deepCopy() { + return new IndexMetadataData_ES_7_10(root.deepCopy(), indexId, indexName); + } } diff --git a/RFS/src/main/java/com/rfs/version_es_7_10/IndexMetadataFactory_ES_7_10.java b/RFS/src/main/java/com/rfs/version_es_7_10/IndexMetadataFactory_ES_7_10.java index 732e8e116..be8c0aeaf 100644 --- a/RFS/src/main/java/com/rfs/version_es_7_10/IndexMetadataFactory_ES_7_10.java +++ b/RFS/src/main/java/com/rfs/version_es_7_10/IndexMetadataFactory_ES_7_10.java @@ -3,10 +3,10 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.dataformat.smile.SmileFactory; -import com.rfs.common.IndexMetadata; import com.rfs.common.SnapshotRepo; +import com.rfs.models.IndexMetadata; -public class IndexMetadataFactory_ES_7_10 implements com.rfs.common.IndexMetadata.Factory { +public class IndexMetadataFactory_ES_7_10 implements IndexMetadata.Factory { private final SnapshotRepo.Provider repoDataProvider; public IndexMetadataFactory_ES_7_10(SnapshotRepo.Provider repoDataProvider) { @@ -14,7 +14,7 @@ public IndexMetadataFactory_ES_7_10(SnapshotRepo.Provider repoDataProvider) { } @Override - public IndexMetadata.Data fromJsonNode(JsonNode root, String indexId, String indexName) { + public IndexMetadata fromJsonNode(JsonNode root, String indexId, String indexName) { ObjectNode objectNodeRoot = (ObjectNode) root.get(indexName); return new IndexMetadataData_ES_7_10(objectNodeRoot, indexId, indexName); } diff --git a/RFS/src/main/java/com/rfs/version_es_7_10/ShardMetadataData_ES_7_10.java b/RFS/src/main/java/com/rfs/version_es_7_10/ShardMetadataData_ES_7_10.java index 7db630a26..3709c31e4 100644 --- a/RFS/src/main/java/com/rfs/version_es_7_10/ShardMetadataData_ES_7_10.java +++ b/RFS/src/main/java/com/rfs/version_es_7_10/ShardMetadataData_ES_7_10.java @@ -15,9 +15,10 @@ import com.fasterxml.jackson.databind.JsonDeserializer; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; -import com.rfs.common.ShardMetadata; +import com.rfs.models.ShardFileInfo; +import com.rfs.models.ShardMetadata; -public class ShardMetadataData_ES_7_10 implements ShardMetadata.Data { +public class ShardMetadataData_ES_7_10 implements ShardMetadata { private static final ObjectMapper objectMapper = new ObjectMapper(); static { @@ -110,8 +111,8 @@ public long getTotalSizeBytes() { } @Override - public List getFiles() { - List convertedFiles = new ArrayList<>(files); + public List getFiles() { + List convertedFiles = new ArrayList<>(files); return convertedFiles; } @@ -152,7 +153,7 @@ public DataRaw( } } - public static class FileInfo implements ShardMetadata.FileInfo { + public static class FileInfo implements ShardFileInfo { private String name; private String physicalName; private long length; diff --git a/RFS/src/main/java/com/rfs/version_es_7_10/ShardMetadataFactory_ES_7_10.java b/RFS/src/main/java/com/rfs/version_es_7_10/ShardMetadataFactory_ES_7_10.java index 4b95669e0..0bf6efa67 100644 --- a/RFS/src/main/java/com/rfs/version_es_7_10/ShardMetadataFactory_ES_7_10.java +++ b/RFS/src/main/java/com/rfs/version_es_7_10/ShardMetadataFactory_ES_7_10.java @@ -9,15 +9,15 @@ import lombok.RequiredArgsConstructor; -import com.rfs.common.ShardMetadata; import com.rfs.common.SnapshotRepo; +import com.rfs.models.ShardMetadata; @RequiredArgsConstructor public class ShardMetadataFactory_ES_7_10 implements ShardMetadata.Factory { protected final SnapshotRepo.Provider repoDataProvider; @Override - public ShardMetadata.Data fromJsonNode(JsonNode root, String indexId, String indexName, int shardId) { + public ShardMetadata fromJsonNode(JsonNode root, String indexId, String indexName, int shardId) { ObjectMapper objectMapper = new ObjectMapper(); objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); SimpleModule module = new SimpleModule(); diff --git a/RFS/src/main/java/com/rfs/version_es_7_10/SnapshotMetadataData_ES_7_10.java b/RFS/src/main/java/com/rfs/version_es_7_10/SnapshotMetadataData_ES_7_10.java index 1e2e8bdd6..0c993c217 100644 --- a/RFS/src/main/java/com/rfs/version_es_7_10/SnapshotMetadataData_ES_7_10.java +++ b/RFS/src/main/java/com/rfs/version_es_7_10/SnapshotMetadataData_ES_7_10.java @@ -3,11 +3,10 @@ import java.util.List; import com.fasterxml.jackson.annotation.JsonProperty; +import com.rfs.models.SnapshotMetadata; -import com.rfs.common.SnapshotMetadata; - -public class SnapshotMetadataData_ES_7_10 implements SnapshotMetadata.Data{ +public class SnapshotMetadataData_ES_7_10 implements SnapshotMetadata{ private String name; private String uuid; diff --git a/RFS/src/main/java/com/rfs/version_es_7_10/SnapshotMetadataFactory_ES_7_10.java b/RFS/src/main/java/com/rfs/version_es_7_10/SnapshotMetadataFactory_ES_7_10.java index ce35e36ed..499d4bcd9 100644 --- a/RFS/src/main/java/com/rfs/version_es_7_10/SnapshotMetadataFactory_ES_7_10.java +++ b/RFS/src/main/java/com/rfs/version_es_7_10/SnapshotMetadataFactory_ES_7_10.java @@ -5,9 +5,9 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.dataformat.smile.SmileFactory; -import com.rfs.common.SnapshotMetadata; +import com.rfs.models.SnapshotMetadata; -public class SnapshotMetadataFactory_ES_7_10 implements com.rfs.common.SnapshotMetadata.Factory { +public class SnapshotMetadataFactory_ES_7_10 implements SnapshotMetadata.Factory { /** * A version of the Elasticsearch approach simplified by assuming JSON; see here [1] for more details. @@ -15,7 +15,7 @@ public class SnapshotMetadataFactory_ES_7_10 implements com.rfs.common.SnapshotM * [1] https://github.com/elastic/elasticsearch/blob/6.8/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java#L583 */ @Override - public SnapshotMetadata.Data fromJsonNode(JsonNode root) throws Exception { + public SnapshotMetadata fromJsonNode(JsonNode root) throws Exception { ObjectMapper mapper = new ObjectMapper(); mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); ObjectNode objectNodeRoot = (ObjectNode) root; diff --git a/RFS/src/main/java/com/rfs/version_os_2_11/GlobalMetadataCreator_OS_2_11.java b/RFS/src/main/java/com/rfs/version_os_2_11/GlobalMetadataCreator_OS_2_11.java index 960573bd3..cf8fe31ce 100644 --- a/RFS/src/main/java/com/rfs/version_os_2_11/GlobalMetadataCreator_OS_2_11.java +++ b/RFS/src/main/java/com/rfs/version_os_2_11/GlobalMetadataCreator_OS_2_11.java @@ -8,6 +8,7 @@ import com.fasterxml.jackson.databind.node.ObjectNode; import com.rfs.common.OpenSearchClient; +import com.rfs.models.GlobalMetadata; public class GlobalMetadataCreator_OS_2_11 { private static final Logger logger = LogManager.getLogger(GlobalMetadataCreator_OS_2_11.class); @@ -24,10 +25,10 @@ public GlobalMetadataCreator_OS_2_11(OpenSearchClient client, List legac this.indexTemplateAllowlist = indexTemplateAllowlist; } - public void create(ObjectNode root) { + public void create(GlobalMetadata root) { logger.info("Setting Global Metadata"); - GlobalMetadataData_OS_2_11 globalMetadata = new GlobalMetadataData_OS_2_11(root); + GlobalMetadataData_OS_2_11 globalMetadata = new GlobalMetadataData_OS_2_11(root.toObjectNode()); createLegacyTemplates(globalMetadata, client, legacyTemplateAllowlist); createComponentTemplates(globalMetadata, client, componentTemplateAllowlist); createIndexTemplates(globalMetadata, client, indexTemplateAllowlist); diff --git a/RFS/src/main/java/com/rfs/version_os_2_11/GlobalMetadataData_OS_2_11.java b/RFS/src/main/java/com/rfs/version_os_2_11/GlobalMetadataData_OS_2_11.java index 93eae298f..9b04c8b05 100644 --- a/RFS/src/main/java/com/rfs/version_os_2_11/GlobalMetadataData_OS_2_11.java +++ b/RFS/src/main/java/com/rfs/version_os_2_11/GlobalMetadataData_OS_2_11.java @@ -1,8 +1,9 @@ package com.rfs.version_os_2_11; import com.fasterxml.jackson.databind.node.ObjectNode; +import com.rfs.models.GlobalMetadata; -public class GlobalMetadataData_OS_2_11 implements com.rfs.common.GlobalMetadata.Data { +public class GlobalMetadataData_OS_2_11 implements GlobalMetadata { private final ObjectNode root; public GlobalMetadataData_OS_2_11(ObjectNode root) { diff --git a/RFS/src/main/java/com/rfs/version_os_2_11/IndexCreator_OS_2_11.java b/RFS/src/main/java/com/rfs/version_os_2_11/IndexCreator_OS_2_11.java index 2979a4a2d..2724d8321 100644 --- a/RFS/src/main/java/com/rfs/version_os_2_11/IndexCreator_OS_2_11.java +++ b/RFS/src/main/java/com/rfs/version_os_2_11/IndexCreator_OS_2_11.java @@ -5,6 +5,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; import com.rfs.common.OpenSearchClient; +import com.rfs.models.IndexMetadata; public class IndexCreator_OS_2_11 { private static final ObjectMapper mapper = new ObjectMapper(); @@ -14,8 +15,8 @@ public IndexCreator_OS_2_11 (OpenSearchClient client) { this.client = client; } - public Optional create(ObjectNode root, String indexName, String indexId) { - IndexMetadataData_OS_2_11 indexMetadata = new IndexMetadataData_OS_2_11(root, indexId, indexName); + public Optional create(IndexMetadata index, String indexName, String indexId) { + IndexMetadataData_OS_2_11 indexMetadata = new IndexMetadataData_OS_2_11(index.rawJson(), indexId, indexName); // Remove some settings which will cause errors if you try to pass them to the API ObjectNode settings = indexMetadata.getSettings(); @@ -28,7 +29,8 @@ public Optional create(ObjectNode root, String indexName, String ind // Assemble the request body ObjectNode body = mapper.createObjectNode(); body.set("aliases", indexMetadata.getAliases()); - body.set("mappings", indexMetadata.getMappings()); + // Working around for missing OS_1_3 definition + body.set("mappings", index.getMappings()); body.set("settings", settings); // Create the index; it's fine if it already exists diff --git a/RFS/src/main/java/com/rfs/version_os_2_11/IndexMetadataData_OS_2_11.java b/RFS/src/main/java/com/rfs/version_os_2_11/IndexMetadataData_OS_2_11.java index 81f5c4aaf..24df06868 100644 --- a/RFS/src/main/java/com/rfs/version_os_2_11/IndexMetadataData_OS_2_11.java +++ b/RFS/src/main/java/com/rfs/version_os_2_11/IndexMetadataData_OS_2_11.java @@ -1,10 +1,9 @@ package com.rfs.version_os_2_11; import com.fasterxml.jackson.databind.node.ObjectNode; +import com.rfs.models.IndexMetadata; -import com.rfs.common.IndexMetadata; - -public class IndexMetadataData_OS_2_11 implements IndexMetadata.Data { +public class IndexMetadataData_OS_2_11 implements IndexMetadata { private ObjectNode root; private String indexId; private String indexName; @@ -46,7 +45,11 @@ public ObjectNode getSettings() { } @Override - public ObjectNode toObjectNode() { + public ObjectNode rawJson() { return root; } + @Override + public IndexMetadata deepCopy() { + return new IndexMetadataData_OS_2_11(root.deepCopy(), indexId, indexName); + } } diff --git a/RFS/src/main/java/com/rfs/worker/DocumentsRunner.java b/RFS/src/main/java/com/rfs/worker/DocumentsRunner.java index db1d8c496..d30a47085 100644 --- a/RFS/src/main/java/com/rfs/worker/DocumentsRunner.java +++ b/RFS/src/main/java/com/rfs/worker/DocumentsRunner.java @@ -15,8 +15,8 @@ import com.rfs.common.DocumentReindexer; import com.rfs.common.LuceneDocumentsReader; -import com.rfs.common.ShardMetadata; import com.rfs.common.SnapshotShardUnpacker; +import com.rfs.models.ShardMetadata; import org.apache.lucene.document.Document; import reactor.core.publisher.Flux; @@ -26,7 +26,7 @@ public class DocumentsRunner { public static final String ALL_INDEX_MANIFEST = "all_index_manifest"; ScopedWorkCoordinator workCoordinator; - private final BiFunction shardMetadataFactory; + private final BiFunction shardMetadataFactory; private final SnapshotShardUnpacker.Factory unpackerFactory; private final Function readerFactory; private final DocumentReindexer reindexer; @@ -75,7 +75,7 @@ public ShardTooLargeException(long shardSizeBytes, long maxShardSize) { private void doDocumentsMigration(IndexAndShard indexAndShard) { log.info("Migrating docs for " + indexAndShard); - ShardMetadata.Data shardMetadata = shardMetadataFactory.apply(indexAndShard.indexName, indexAndShard.shard); + ShardMetadata shardMetadata = shardMetadataFactory.apply(indexAndShard.indexName, indexAndShard.shard); var unpacker = unpackerFactory.create(shardMetadata); var reader = readerFactory.apply(unpacker.unpack()); diff --git a/RFS/src/main/java/com/rfs/worker/IndexRunner.java b/RFS/src/main/java/com/rfs/worker/IndexRunner.java index e2f94db40..3af1eadcf 100644 --- a/RFS/src/main/java/com/rfs/worker/IndexRunner.java +++ b/RFS/src/main/java/com/rfs/worker/IndexRunner.java @@ -7,8 +7,8 @@ import lombok.AllArgsConstructor; import lombok.extern.slf4j.Slf4j; +import com.rfs.models.IndexMetadata; import com.rfs.common.FilterScheme; -import com.rfs.common.IndexMetadata; import com.rfs.transformers.Transformer; import com.rfs.version_os_2_11.IndexCreator_OS_2_11; @@ -35,8 +35,7 @@ public void migrateIndices() { .filter(FilterScheme.filterIndicesByAllowList(indexAllowlist, logger)) .peek(index -> { var indexMetadata = metadataFactory.fromRepo(snapshotName, index.getName()); - var root = indexMetadata.toObjectNode(); - var transformedRoot = transformer.transformIndexMetadata(root); + var transformedRoot = transformer.transformIndexMetadata(indexMetadata); var resultOp = indexCreator.create(transformedRoot, index.getName(), indexMetadata.getId()); resultOp.ifPresentOrElse(value -> log.info("Index " + index.getName() + " created successfully"), () -> log.info("Index " + index.getName() + " already existed; no work required") diff --git a/RFS/src/main/java/com/rfs/worker/MetadataRunner.java b/RFS/src/main/java/com/rfs/worker/MetadataRunner.java index 6f874538c..8a52a2b84 100644 --- a/RFS/src/main/java/com/rfs/worker/MetadataRunner.java +++ b/RFS/src/main/java/com/rfs/worker/MetadataRunner.java @@ -1,10 +1,9 @@ package com.rfs.worker; -import com.fasterxml.jackson.databind.node.ObjectNode; import lombok.AllArgsConstructor; import lombok.extern.slf4j.Slf4j; -import com.rfs.common.GlobalMetadata; +import com.rfs.models.GlobalMetadata; import com.rfs.transformers.Transformer; import com.rfs.version_os_2_11.GlobalMetadataCreator_OS_2_11; @@ -20,8 +19,7 @@ public class MetadataRunner { public void migrateMetadata() { log.info("Migrating the Templates..."); var globalMetadata = metadataFactory.fromRepo(snapshotName); - var root = globalMetadata.toObjectNode(); - var transformedRoot = transformer.transformGlobalMetadata(root); + var transformedRoot = transformer.transformGlobalMetadata(globalMetadata); metadataCreator.create(transformedRoot); log.info("Templates migration complete"); } diff --git a/RFS/src/main/java/com/rfs/worker/ShardWorkPreparer.java b/RFS/src/main/java/com/rfs/worker/ShardWorkPreparer.java index e820b3da8..149d0069f 100644 --- a/RFS/src/main/java/com/rfs/worker/ShardWorkPreparer.java +++ b/RFS/src/main/java/com/rfs/worker/ShardWorkPreparer.java @@ -3,7 +3,7 @@ import com.rfs.cms.IWorkCoordinator; import com.rfs.cms.ScopedWorkCoordinator; import com.rfs.common.FilterScheme; -import com.rfs.common.IndexMetadata; +import com.rfs.models.IndexMetadata; import com.rfs.common.SnapshotRepo; import lombok.Lombok; import lombok.SneakyThrows; @@ -73,7 +73,7 @@ private static void prepareShardWorkItems(IWorkCoordinator workCoordinator, Inde repoDataProvider.getIndicesInSnapshot(snapshotName).stream() .filter(FilterScheme.filterIndicesByAllowList(indexAllowlist, logger)) .peek(index -> { - IndexMetadata.Data indexMetadata = metadataFactory.fromRepo(snapshotName, index.getName()); + IndexMetadata indexMetadata = metadataFactory.fromRepo(snapshotName, index.getName()); log.info("Index " + indexMetadata.getName() + " has " + indexMetadata.getNumberOfShards() + " shards"); IntStream.range(0, indexMetadata.getNumberOfShards()).forEach(shardId -> { log.info("Creating Documents Work Item for index: " + indexMetadata.getName() + ", shard: " + shardId); diff --git a/RFS/src/test/java/com/rfs/framework/ClusterOperations.java b/RFS/src/test/java/com/rfs/framework/ClusterOperations.java index 030e54e39..1d27fb35b 100644 --- a/RFS/src/test/java/com/rfs/framework/ClusterOperations.java +++ b/RFS/src/test/java/com/rfs/framework/ClusterOperations.java @@ -1,12 +1,19 @@ package com.rfs.framework; import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; import org.apache.hc.client5.http.classic.methods.HttpPut; import org.apache.hc.client5.http.impl.classic.CloseableHttpClient; import org.apache.hc.client5.http.impl.classic.HttpClients; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.apache.hc.core5.http.io.entity.StringEntity; +import lombok.SneakyThrows; + import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Map; +import java.util.Map.Entry; import static org.hamcrest.CoreMatchers.anyOf; import static org.hamcrest.CoreMatchers.equalTo; @@ -25,12 +32,12 @@ public ClusterOperations(final String clusterUrl) { httpClient = HttpClients.createDefault(); } - public void createSnapshotRepository() throws IOException { + public void createSnapshotRepository(final String repoPath) throws IOException { // Create snapshot repository final var repositoryJson = "{\n" + " \"type\": \"fs\",\n" + " \"settings\": {\n" + - " \"location\": \"/usr/share/elasticsearch/snapshots\",\n" + + " \"location\": \"" + repoPath + "\",\n" + " \"compress\": false\n" + " }\n" + "}"; @@ -62,6 +69,16 @@ public void deleteDocument(final String index, final String docId) throws IOExce } } + @SneakyThrows + public Map.Entry get(final String path) { + final var getRequest = new HttpGet(clusterUrl + path); + + try (var response = httpClient.execute(getRequest)) { + var responseBody = EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8); + return Map.entry(response.getCode(), responseBody); + } + } + public void takeSnapshot(final String snapshotName, final String indexPattern) throws IOException { final var snapshotJson = "{\n" + " \"indices\": \"" + indexPattern + "\",\n" + @@ -77,4 +94,47 @@ public void takeSnapshot(final String snapshotName, final String indexPattern) t assertThat(response.getCode(), equalTo(200)); } } + + /** + * Creates a ES6 legacy template, intended for use on only ES 6 clusters + */ + @SneakyThrows + public void createES6LegacyTemplate(final String templateName, final String pattern) throws IOException { + final var templateJson = + "{\r\n" + // + " \"index_patterns\": [\r\n" + // + " \"" + pattern + "\"\r\n" + // + " ],\r\n" + // + " \"settings\": {\r\n" + // + " \"number_of_shards\": 1\r\n" + // + " },\r\n" + // + " \"aliases\": {\r\n" + // + " \"alias1\": {}\r\n" + // + " },\r\n" + // + " \"mappings\": {\r\n" + // + " \"_doc\": {\r\n" + // + " \"_source\": {\r\n" + // + " \"enabled\": true\r\n" + // + " },\r\n" + // + " \"properties\": {\r\n" + // + " \"host_name\": {\r\n" + // + " \"type\": \"keyword\"\r\n" + // + " },\r\n" + // + " \"created_at\": {\r\n" + // + " \"type\": \"date\",\r\n" + // + " \"format\": \"EEE MMM dd HH:mm:ss Z yyyy\"\r\n" + // + " }\r\n" + // + " }\r\n" + // + " }\r\n" + // + " }\r\n" + // + "}"; + + final var createRepoRequest = new HttpPut(clusterUrl + "/_template/" + templateName); + createRepoRequest.setEntity(new StringEntity(templateJson)); + createRepoRequest.setHeader("Content-Type", "application/json"); + + try (var response = httpClient.execute(createRepoRequest)) { + assertThat(EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8), response.getCode(), equalTo(200)); + } + } } diff --git a/RFS/src/test/java/com/rfs/framework/SimpleRestoreFromSnapshot.java b/RFS/src/test/java/com/rfs/framework/SimpleRestoreFromSnapshot.java index 9fd74a0e6..01130fc7e 100644 --- a/RFS/src/test/java/com/rfs/framework/SimpleRestoreFromSnapshot.java +++ b/RFS/src/test/java/com/rfs/framework/SimpleRestoreFromSnapshot.java @@ -4,8 +4,8 @@ import java.util.List; import com.rfs.common.ConnectionDetails; -import com.rfs.common.IndexMetadata; import com.rfs.common.OpenSearchClient; +import com.rfs.models.IndexMetadata; public interface SimpleRestoreFromSnapshot { @@ -31,8 +31,8 @@ public default void fullMigrationViaLocalSnapshot(final String targetClusterUrl) updateTargetCluster(indices, unpackedShardDataDir, targetClusterClient); } - public List extractSnapshotIndexData(final String localPath, final String snapshotName, final Path unpackedShardDataDir) throws Exception; + public List extractSnapshotIndexData(final String localPath, final String snapshotName, final Path unpackedShardDataDir) throws Exception; - public void updateTargetCluster(final List indices, final Path unpackedShardDataDir, final OpenSearchClient client) throws Exception; + public void updateTargetCluster(final List indices, final Path unpackedShardDataDir, final OpenSearchClient client) throws Exception; -} +} \ No newline at end of file diff --git a/RFS/src/test/java/com/rfs/framework/SimpleRestoreFromSnapshot_ES_7_10.java b/RFS/src/test/java/com/rfs/framework/SimpleRestoreFromSnapshot_ES_7_10.java index 1f4fbb421..235f35c09 100644 --- a/RFS/src/test/java/com/rfs/framework/SimpleRestoreFromSnapshot_ES_7_10.java +++ b/RFS/src/test/java/com/rfs/framework/SimpleRestoreFromSnapshot_ES_7_10.java @@ -11,11 +11,11 @@ import com.rfs.common.DefaultSourceRepoAccessor; import com.rfs.common.DocumentReindexer; import com.rfs.common.FileSystemRepo; -import com.rfs.common.IndexMetadata; import com.rfs.common.LuceneDocumentsReader; import com.rfs.common.OpenSearchClient; import com.rfs.common.SnapshotRepo; import com.rfs.common.SnapshotShardUnpacker; +import com.rfs.models.IndexMetadata; import com.rfs.version_es_7_10.IndexMetadataFactory_ES_7_10; import com.rfs.version_es_7_10.ShardMetadataFactory_ES_7_10; import com.rfs.version_es_7_10.SnapshotRepoProvider_ES_7_10; @@ -27,12 +27,12 @@ public class SimpleRestoreFromSnapshot_ES_7_10 implements SimpleRestoreFromSnaps private static final Logger logger = LogManager.getLogger(SimpleRestoreFromSnapshot_ES_7_10.class); - public List extractSnapshotIndexData(final String localPath, final String snapshotName, final Path unpackedShardDataDir) throws Exception { + public List extractSnapshotIndexData(final String localPath, final String snapshotName, final Path unpackedShardDataDir) throws Exception { IOUtils.rm(unpackedShardDataDir); final var repo = new FileSystemRepo(Path.of(localPath)); SnapshotRepo.Provider snapShotProvider = new SnapshotRepoProvider_ES_7_10(repo); - final List indices = snapShotProvider.getIndicesInSnapshot(snapshotName) + final List indices = snapShotProvider.getIndicesInSnapshot(snapshotName) .stream() .map(index -> { try { @@ -43,7 +43,7 @@ public List extractSnapshotIndexData(final String localPath, }) .collect(Collectors.toList()); - for (final IndexMetadata.Data index : indices) { + for (final IndexMetadata index : indices) { for (int shardId = 0; shardId < index.getNumberOfShards(); shardId++) { var shardMetadata = new ShardMetadataFactory_ES_7_10(snapShotProvider).fromRepo(snapshotName, index.getName(), shardId); DefaultSourceRepoAccessor repoAccessor = new DefaultSourceRepoAccessor(repo); @@ -54,8 +54,8 @@ public List extractSnapshotIndexData(final String localPath, return indices; } - public void updateTargetCluster(final List indices, final Path unpackedShardDataDir, final OpenSearchClient client) throws Exception { - for (final IndexMetadata.Data index : indices) { + public void updateTargetCluster(final List indices, final Path unpackedShardDataDir, final OpenSearchClient client) throws Exception { + for (final IndexMetadata index : indices) { for (int shardId = 0; shardId < index.getNumberOfShards(); shardId++) { final var documents = new LuceneDocumentsReader(unpackedShardDataDir.resolve(index.getName()).resolve(""+shardId)) diff --git a/RFS/src/test/java/com/rfs/integration/EndToEndTest.java b/RFS/src/test/java/com/rfs/integration/EndToEndTest.java index 4babec1ba..b4c1e3991 100644 --- a/RFS/src/test/java/com/rfs/integration/EndToEndTest.java +++ b/RFS/src/test/java/com/rfs/integration/EndToEndTest.java @@ -1,67 +1,117 @@ package com.rfs.integration; +import java.io.File; +import java.util.List; +import java.util.concurrent.CompletableFuture; + +import org.hamcrest.Matchers; import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ArgumentsSource; +import com.rfs.common.ClusterVersion; +import com.rfs.common.FileSystemRepo; +import com.rfs.common.FileSystemSnapshotCreator; +import com.rfs.common.OpenSearchClient; +import com.rfs.framework.ClusterOperations; import com.rfs.framework.SearchClusterContainer; import com.rfs.framework.SimpleRestoreFromSnapshot; - -import lombok.extern.slf4j.Slf4j; +import com.rfs.transformers.TransformFunctions; +import com.rfs.version_es_6_8.SnapshotRepoProvider_ES_6_8; +import com.rfs.version_es_6_8.GlobalMetadataFactory_ES_6_8; +import com.rfs.version_es_6_8.IndexMetadataFactory_ES_6_8; +import com.rfs.version_os_2_11.GlobalMetadataCreator_OS_2_11; +import com.rfs.version_os_2_11.IndexCreator_OS_2_11; +import com.rfs.worker.IndexRunner; +import com.rfs.worker.MetadataRunner; +import com.rfs.worker.SnapshotRunner; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; /** * Tests focused on setting up whole source clusters, performing a migration, and validation on the target cluster */ -@Slf4j public class EndToEndTest { - protected Object sourceCluster; - protected Object targetCluster; + @TempDir + private File localDirectory; + protected SimpleRestoreFromSnapshot simpleRfsInstance; @ParameterizedTest(name = "Target OpenSearch {0}") @ArgumentsSource(SupportedTargetCluster.class) - @Disabled public void migrateFrom_ES_v6_8(final SearchClusterContainer.Version targetVersion) throws Exception { - // Setup - // PSEUDO: Create a source cluster running ES 6.8 - // PSEUDO: Create 2 templates on the cluster, see https://www.elastic.co/guide/en/elasticsearch/reference/6.8/indices-templates.html - // - logs-* - // - data-rolling - // PSEUDO: Create 5 indices on the cluster - // - logs-01-2345 - // - logs-12-3456 - // - data-rolling - // - playground - // - playground2 - // PSEUDO: Add documents - // - 19x http-data docs into logs-01-2345 - // - 23x http-data docs into logs-12-3456 - // - 29x data-rolling - // - 5x geonames docs into playground - // - 7x geopoint into playground2 - - // PSEUDO: Create a target cluster running OS 2.X (Where x is the latest released version) - - // Action - // PSEUDO: Migrate from the snapshot - // simpleRfsInstance.fullMigrationViaLocalSnapshot(targetCluster.toString()); - // PSEUDO: Shutdown source cluster - - // Validation - - // PSEUDO: Verify creation of 2 index templates on the cluster - // PSEUDO: Verify creation of 5 indices on the cluster - // - logs-01-2345 - // - logs-12-3456 - // - data-rolling - // - playground - // - playground2 - // PSEUDO: Verify documents - - // PSEUDO: Additional validation: - if (SearchClusterContainer.OS_V2_14_0.equals(targetVersion)) { - // - Mapping type parameter is removed https://opensearch.org/docs/latest/breaking-changes/#remove-mapping-types-parameter + try (final var sourceCluster = new SearchClusterContainer(SearchClusterContainer.ES_V6_8_23); + final var targetCluster = new SearchClusterContainer(targetVersion)) { + // Setup + // Start the clusters for testing + var bothClustersStarted = CompletableFuture.allOf( + CompletableFuture.runAsync(() -> sourceCluster.start()), + CompletableFuture.runAsync(() -> targetCluster.start())); + bothClustersStarted.join(); + + // Setup + var sourceClusterOperations = new ClusterOperations(sourceCluster.getUrl()); + var templateName = "my_template_foo"; + sourceClusterOperations.createES6LegacyTemplate(templateName, "bar*"); + var indexName = "barstool"; + // Creates a document that uses the template + sourceClusterOperations.createDocument(indexName, "222", "{\"hi\":\"yay\"}"); + + // Take a snapshot + var snapshotName = "my_snap"; + var sourceClient = new OpenSearchClient(sourceCluster.getUrl(), null, null, true); + var snapshotCreator = new FileSystemSnapshotCreator(snapshotName, sourceClient, SearchClusterContainer.CLUSTER_SNAPSHOT_DIR); + SnapshotRunner.runAndWaitForCompletion(snapshotCreator); + sourceCluster.copySnapshotData(localDirectory.toString()); + + var sourceRepo = new FileSystemRepo(localDirectory.toPath()); + var targetClient = new OpenSearchClient(targetCluster.getUrl(), null, null, true); + + var repoDataProvider = new SnapshotRepoProvider_ES_6_8(sourceRepo); + var metadataFactory = new GlobalMetadataFactory_ES_6_8(repoDataProvider); + var metadataCreator = new GlobalMetadataCreator_OS_2_11(targetClient, null, null, null); + var transformer = TransformFunctions.getTransformer(ClusterVersion.ES_6_8, ClusterVersion.OS_2_11, 1); + // Action + // Migrate metadata + new MetadataRunner(snapshotName, metadataFactory, metadataCreator, transformer).migrateMetadata(); + + // Validation + var targetClusterOperations = new ClusterOperations(targetCluster.getUrl()); + var res = targetClusterOperations.get("/_template/" + templateName); + assertThat(res.getValue(), res.getKey(), equalTo(200)); + // Be sure that the mapping type on the template is an object + assertThat(res.getValue(), Matchers.containsString("mappings\":{")); + + res = targetClusterOperations.get("/" + indexName); + assertThat("Shouldn't exist yet, body:\n" + res.getValue(), res.getKey(), equalTo(404)); + + // Action + // Migrate indices + var indexMetadataFactory = new IndexMetadataFactory_ES_6_8(repoDataProvider); + var indexCreator = new IndexCreator_OS_2_11(targetClient); + new IndexRunner(snapshotName, indexMetadataFactory, indexCreator, transformer, List.of()).migrateIndices(); + + res = targetClusterOperations.get("/barstool"); + assertThat(res.getValue(), res.getKey(), equalTo(200)); + + // Action + // PSEUDOMigrate documents + // PSEUDO: Verify creation of 2 index templates on the cluster + // PSEUDO: Verify creation of 5 indices on the cluster + // - logs-01-2345 + // - logs-12-3456 + // - data-rolling + // - playground + // - playground2 + // PSEUDO: Verify documents + + // PSEUDO: Additional validation: + if (SearchClusterContainer.OS_V2_14_0.equals(targetVersion)) { + // - Mapping type parameter is removed https://opensearch.org/docs/latest/breaking-changes/#remove-mapping-types-parameter + } } } diff --git a/RFS/src/test/java/com/rfs/integration/SnapshotStateTest.java b/RFS/src/test/java/com/rfs/integration/SnapshotStateTest.java index c42656951..d2f31deb0 100644 --- a/RFS/src/test/java/com/rfs/integration/SnapshotStateTest.java +++ b/RFS/src/test/java/com/rfs/integration/SnapshotStateTest.java @@ -49,7 +49,7 @@ public void setUp() throws Exception { // Configure operations and rfs implementation operations = new ClusterOperations(cluster.getUrl()); - operations.createSnapshotRepository(); + operations.createSnapshotRepository(SearchClusterContainer.CLUSTER_SNAPSHOT_DIR); srfs = new SimpleRestoreFromSnapshot_ES_7_10(); } diff --git a/RFS/src/test/java/com/rfs/integration/SupportedTargetCluster.java b/RFS/src/test/java/com/rfs/integration/SupportedTargetCluster.java index b205aecbf..0681c5d6f 100644 --- a/RFS/src/test/java/com/rfs/integration/SupportedTargetCluster.java +++ b/RFS/src/test/java/com/rfs/integration/SupportedTargetCluster.java @@ -15,7 +15,6 @@ public class SupportedTargetCluster implements ArgumentsProvider { @Override public Stream provideArguments(final ExtensionContext context) { return Stream.of( - Arguments.of(SearchClusterContainer.OS_V1_3_16), Arguments.of(SearchClusterContainer.OS_V2_14_0) ); } diff --git a/RFS/src/testFixtures/java/com/rfs/framework/PreloadedDataContainerOrchestrator.java b/RFS/src/testFixtures/java/com/rfs/framework/PreloadedDataContainerOrchestrator.java index 1df70d311..304dd4824 100644 --- a/RFS/src/testFixtures/java/com/rfs/framework/PreloadedDataContainerOrchestrator.java +++ b/RFS/src/testFixtures/java/com/rfs/framework/PreloadedDataContainerOrchestrator.java @@ -111,7 +111,7 @@ private int getHashCodeOfImagesAndArgs(DockerClient dockerClient, boolean pullIf } private String getImageName() { - return PRELOADED_IMAGE_BASE_NAME + baseSourceVersion.prettyName; + return PRELOADED_IMAGE_BASE_NAME + baseSourceVersion.prettyName.replace(" ", "_").toLowerCase(); } private static DockerClient createDockerClient() { diff --git a/RFS/src/testFixtures/java/com/rfs/framework/SearchClusterContainer.java b/RFS/src/testFixtures/java/com/rfs/framework/SearchClusterContainer.java index ca8d8ba5e..1fff2673c 100644 --- a/RFS/src/testFixtures/java/com/rfs/framework/SearchClusterContainer.java +++ b/RFS/src/testFixtures/java/com/rfs/framework/SearchClusterContainer.java @@ -7,6 +7,7 @@ import com.google.common.collect.ImmutableMap; import lombok.EqualsAndHashCode; import lombok.Getter; +import lombok.ToString; import lombok.extern.slf4j.Slf4j; import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.wait.strategy.Wait; @@ -17,60 +18,52 @@ */ @Slf4j public class SearchClusterContainer extends GenericContainer { - public static final String CLUSTER_SNAPSHOT_DIR = "/usr/share/elasticsearch/snapshots"; + public static final String CLUSTER_SNAPSHOT_DIR = "/tmp/snapshots"; public static final Version ES_V7_10_2 = - new ElasticsearchVersion("docker.elastic.co/elasticsearch/elasticsearch-oss:7.10.2", "7.10.2"); + new ElasticsearchVersion("docker.elastic.co/elasticsearch/elasticsearch-oss:7.10.2", "ES 7.10.2"); public static final Version ES_V7_17 = - new ElasticsearchVersion("docker.elastic.co/elasticsearch/elasticsearch:7.17.22", "7.17.22"); - + new ElasticsearchVersion("docker.elastic.co/elasticsearch/elasticsearch:7.17.22", "ES 7.17.22"); + public static final Version ES_V6_8_23 = + new ElasticsearchVersion("docker.elastic.co/elasticsearch/elasticsearch:6.8.23", "ES 6.8.23"); + public static final Version OS_V1_3_16 = - new OpenSearchVersion("opensearchproject/opensearch:1.3.16", "1.3.16"); + new OpenSearchVersion("opensearchproject/opensearch:1.3.16", "OS 1.3.16"); public static final Version OS_V2_14_0 = - new OpenSearchVersion("opensearchproject/opensearch:2.14.0", "2.14.0"); - - protected static Map DEFAULT_ES_LAUNCH_ENV_VARIABLES = Map.of( - "discovery.type", "single-node", - "path.repo", CLUSTER_SNAPSHOT_DIR); - - protected static Map DEFAULT_OS_LAUNCH_ENV_VARIABLES = new ImmutableMap.Builder() - .putAll(DEFAULT_ES_LAUNCH_ENV_VARIABLES) - .put("plugins.security.disabled", "true") - .put("OPENSEARCH_INITIAL_ADMIN_PASSWORD", "SecurityIsDisabled123$%^") - .build(); + new OpenSearchVersion("opensearchproject/opensearch:2.14.0", "OS 2.14.0"); + + private enum INITIALIZATION_FLAVOR { + ELASTICSEARCH(Map.of( + "discovery.type", "single-node", + "path.repo", CLUSTER_SNAPSHOT_DIR)), + OPENSEARCH(new ImmutableMap.Builder() + .putAll(ELASTICSEARCH.getEnvVariables()) + .put("plugins.security.disabled", "true") + .put("OPENSEARCH_INITIAL_ADMIN_PASSWORD", "SecurityIsDisabled123$%^") + .build()); + + @Getter + public final Map envVariables; + INITIALIZATION_FLAVOR(Map envVariables) { + this.envVariables = envVariables; + } + } private final Version version; @SuppressWarnings("resource") public SearchClusterContainer(final Version version) { - this(version, getDefaultMap(version.initializationType)); - } - - private static Map getDefaultMap(INITIALIZATION_FLAVOR initializationType) { - switch (initializationType) { - case ELASTICSEARCH: - return DEFAULT_ES_LAUNCH_ENV_VARIABLES; - case OPENSEARCH: - return DEFAULT_OS_LAUNCH_ENV_VARIABLES; - default: - throw new IllegalArgumentException("Unknown initialization flavor: " + initializationType); - } - } - - public SearchClusterContainer(final Version version, Map environmentVariables) { super(DockerImageName.parse(version.imageName)); this.withExposedPorts(9200, 9300) - .withEnv(environmentVariables) + .withEnv(version.getInitializationType().getEnvVariables()) .waitingFor(Wait.forHttp("/") .forPort(9200) .forStatusCode(200) .withStartupTimeout(Duration.ofMinutes(1))); this.version = version; - } public void copySnapshotData(final String directory) { - log.info("Copy stuff was called"); try { // Execute command to list all files in the directory final var result = this.execInContainer("sh", "-c", "find " + CLUSTER_SNAPSHOT_DIR + " -type f"); @@ -95,7 +88,7 @@ public void copySnapshotData(final String directory) { } public void start() { - log.info("Starting ElasticsearchContainer version:" + version.prettyName); + log.info("Starting container version:" + version.prettyName); super.start(); } @@ -107,20 +100,17 @@ public String getUrl() { @Override public void close() { - log.info("Stopping ElasticsearchContainer version:" + version.prettyName); + log.info("Stopping container version:" + version.prettyName); log.debug("Instance logs:\n" + this.getLogs()); this.stop(); } - public enum INITIALIZATION_FLAVOR { - ELASTICSEARCH, - OPENSEARCH - } - @EqualsAndHashCode @Getter + @ToString(onlyExplicitlyIncluded = true, includeFieldNames = false) public static class Version { final String imageName; + @ToString.Include final String prettyName; final INITIALIZATION_FLAVOR initializationType; diff --git a/TrafficCapture/dockerSolution/src/main/docker/elasticsearchTestConsole/Pipfile.lock b/TrafficCapture/dockerSolution/src/main/docker/elasticsearchTestConsole/Pipfile.lock index c23f4db68..79b6c8d7f 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/elasticsearchTestConsole/Pipfile.lock +++ b/TrafficCapture/dockerSolution/src/main/docker/elasticsearchTestConsole/Pipfile.lock @@ -164,11 +164,12 @@ }, "certifi": { "hashes": [ - "sha256:3cd43f1c6fa7dedc5899d69d3ad0398fd018ad1a17fba83ddaf78aa46c747516", - "sha256:ddc6c8ce995e6987e7faf5e3f1b02b302836a0e5d98ece18392cb1a36c72ad56" + "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b", + "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90" ], + "index": "pypi", "markers": "python_version >= '3.6'", - "version": "==2024.6.2" + "version": "==2024.7.4" }, "charset-normalizer": { "hashes": [ diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/cli.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/cli.py index 62ea4e7d5..efe3d144a 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/cli.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/cli.py @@ -1,12 +1,12 @@ import json from pprint import pprint import click -import console_link.logic.clusters as logic_clusters -import console_link.logic.metrics as logic_metrics -import console_link.logic.backfill as logic_backfill -import console_link.logic.snapshot as logic_snapshot -import console_link.logic.metadata as logic_metadata -import console_link.logic.replay as logic_replay +import console_link.middleware.clusters as clusters_ +import console_link.middleware.metrics as metrics_ +import console_link.middleware.backfill as backfill_ +import console_link.middleware.snapshot as snapshot_ +import console_link.middleware.metadata as metadata_ +import console_link.middleware.replay as replay_ from console_link.models.utils import ExitCode from console_link.environment import Environment @@ -65,10 +65,10 @@ def cat_indices_cmd(ctx, refresh): click.echo( json.dumps( { - "source_cluster": logic_clusters.cat_indices( + "source_cluster": clusters_.cat_indices( ctx.env.source_cluster, as_json=True, refresh=refresh ), - "target_cluster": logic_clusters.cat_indices( + "target_cluster": clusters_.cat_indices( ctx.env.target_cluster, as_json=True, refresh=refresh ), } @@ -76,9 +76,9 @@ def cat_indices_cmd(ctx, refresh): ) return click.echo("SOURCE CLUSTER") - click.echo(logic_clusters.cat_indices(ctx.env.source_cluster, refresh=refresh)) + click.echo(clusters_.cat_indices(ctx.env.source_cluster, refresh=refresh)) click.echo("TARGET CLUSTER") - click.echo(logic_clusters.cat_indices(ctx.env.target_cluster, refresh=refresh)) + click.echo(clusters_.cat_indices(ctx.env.target_cluster, refresh=refresh)) @cluster_group.command(name="connection-check") @@ -86,16 +86,16 @@ def cat_indices_cmd(ctx, refresh): def connection_check_cmd(ctx): """Checks if a connection can be established to source and target clusters""" click.echo("SOURCE CLUSTER") - click.echo(logic_clusters.connection_check(ctx.env.source_cluster)) + click.echo(clusters_.connection_check(ctx.env.source_cluster)) click.echo("TARGET CLUSTER") - click.echo(logic_clusters.connection_check(ctx.env.target_cluster)) + click.echo(clusters_.connection_check(ctx.env.target_cluster)) @cluster_group.command(name="run-test-benchmarks") @click.pass_obj def run_test_benchmarks_cmd(ctx): """Run a series of OpenSearch Benchmark workloads against the source cluster""" - click.echo(logic_clusters.run_test_benchmarks(ctx.env.source_cluster)) + click.echo(clusters_.run_test_benchmarks(ctx.env.source_cluster)) @cluster_group.command(name="clear-indices") @@ -111,12 +111,12 @@ def clear_indices_cmd(ctx, acknowledge_risk, cluster): cluster_focus = ctx.env.source_cluster if cluster.lower() == 'source' else ctx.env.target_cluster if acknowledge_risk: click.echo("Performing clear indices operation...") - click.echo(logic_clusters.clear_indices(cluster_focus)) + click.echo(clusters_.clear_indices(cluster_focus)) else: if click.confirm(f'Clearing indices WILL result in the loss of all data on the {cluster.lower()} cluster. ' f'Are you sure you want to continue?'): click.echo(f"Performing clear indices operation on {cluster.lower()} cluster...") - click.echo(logic_clusters.clear_indices(cluster_focus)) + click.echo(clusters_.clear_indices(cluster_focus)) else: click.echo("Aborting command.") @@ -141,8 +141,8 @@ def snapshot_group(ctx): def create_snapshot_cmd(ctx, wait, max_snapshot_rate_mb_per_node): """Create a snapshot of the source cluster""" snapshot = ctx.env.snapshot - result = logic_snapshot.create(snapshot, wait=wait, - max_snapshot_rate_mb_per_node=max_snapshot_rate_mb_per_node) + result = snapshot_.create(snapshot, wait=wait, + max_snapshot_rate_mb_per_node=max_snapshot_rate_mb_per_node) click.echo(result.value) @@ -151,7 +151,7 @@ def create_snapshot_cmd(ctx, wait, max_snapshot_rate_mb_per_node): @click.pass_obj def status_snapshot_cmd(ctx, deep_check): """Check the status of the snapshot""" - result = logic_snapshot.status(ctx.env.snapshot, deep_check=deep_check) + result = snapshot_.status(ctx.env.snapshot, deep_check=deep_check) click.echo(result.value) # ##################### BACKFILL ################### @@ -171,7 +171,7 @@ def backfill_group(ctx): @backfill_group.command(name="describe") @click.pass_obj def describe_backfill_cmd(ctx): - click.echo(logic_backfill.describe(ctx.env.backfill, as_json=ctx.json)) + click.echo(backfill_.describe(ctx.env.backfill, as_json=ctx.json)) @backfill_group.command(name="create") @@ -180,9 +180,9 @@ def describe_backfill_cmd(ctx): help="Flag to only print populated pipeline config when executed") @click.pass_obj def create_backfill_cmd(ctx, pipeline_template_file, print_config_only): - exitcode, message = logic_backfill.create(ctx.env.backfill, - pipeline_template_path=pipeline_template_file, - print_config_only=print_config_only) + exitcode, message = backfill_.create(ctx.env.backfill, + pipeline_template_path=pipeline_template_file, + print_config_only=print_config_only) if exitcode != ExitCode.SUCCESS: raise click.ClickException(message) click.echo(message) @@ -192,7 +192,7 @@ def create_backfill_cmd(ctx, pipeline_template_file, print_config_only): @click.option('--pipeline-name', default=None, help='Optionally specify a pipeline name') @click.pass_obj def start_backfill_cmd(ctx, pipeline_name): - exitcode, message = logic_backfill.start(ctx.env.backfill, pipeline_name=pipeline_name) + exitcode, message = backfill_.start(ctx.env.backfill, pipeline_name=pipeline_name) if exitcode != ExitCode.SUCCESS: raise click.ClickException(message) click.echo(message) @@ -202,7 +202,7 @@ def start_backfill_cmd(ctx, pipeline_name): @click.option('--pipeline-name', default=None, help='Optionally specify a pipeline name') @click.pass_obj def stop_backfill_cmd(ctx, pipeline_name): - exitcode, message = logic_backfill.stop(ctx.env.backfill, pipeline_name=pipeline_name) + exitcode, message = backfill_.stop(ctx.env.backfill, pipeline_name=pipeline_name) if exitcode != ExitCode.SUCCESS: raise click.ClickException(message) click.echo(message) @@ -212,7 +212,7 @@ def stop_backfill_cmd(ctx, pipeline_name): @click.argument("units", type=int, required=True) @click.pass_obj def scale_backfill_cmd(ctx, units: int): - exitcode, message = logic_backfill.scale(ctx.env.backfill, units) + exitcode, message = backfill_.scale(ctx.env.backfill, units) if exitcode != ExitCode.SUCCESS: raise click.ClickException(message) click.echo(message) @@ -223,7 +223,7 @@ def scale_backfill_cmd(ctx, units: int): @click.pass_obj def status_backfill_cmd(ctx, deep_check): logger.info(f"Called `console backfill status`, with {deep_check=}") - exitcode, message = logic_backfill.status(ctx.env.backfill, deep_check=deep_check) + exitcode, message = backfill_.status(ctx.env.backfill, deep_check=deep_check) if exitcode != ExitCode.SUCCESS: raise click.ClickException(message) click.echo(message) @@ -242,13 +242,13 @@ def replay_group(ctx): @replay_group.command(name="describe") @click.pass_obj def describe_replay_cmd(ctx): - click.echo(logic_replay.describe(ctx.env.replay, as_json=ctx.json)) + click.echo(replay_.describe(ctx.env.replay, as_json=ctx.json)) @replay_group.command(name="start") @click.pass_obj def start_replay_cmd(ctx): - exitcode, message = logic_replay.start(ctx.env.replay) + exitcode, message = replay_.start(ctx.env.replay) if exitcode != ExitCode.SUCCESS: raise click.ClickException(message) click.echo(message) @@ -257,7 +257,7 @@ def start_replay_cmd(ctx): @replay_group.command(name="stop") @click.pass_obj def stop_replay_cmd(ctx): - exitcode, message = logic_replay.stop(ctx.env.replay) + exitcode, message = replay_.stop(ctx.env.replay) if exitcode != ExitCode.SUCCESS: raise click.ClickException(message) click.echo(message) @@ -267,7 +267,7 @@ def stop_replay_cmd(ctx): @click.argument("units", type=int, required=True) @click.pass_obj def scale_replay_cmd(ctx, units: int): - exitcode, message = logic_replay.scale(ctx.env.replay, units) + exitcode, message = replay_.scale(ctx.env.replay, units) if exitcode != ExitCode.SUCCESS: raise click.ClickException(message) click.echo(message) @@ -276,7 +276,7 @@ def scale_replay_cmd(ctx, units: int): @replay_group.command(name="status") @click.pass_obj def status_replay_cmd(ctx): - exitcode, message = logic_replay.status(ctx.env.replay) + exitcode, message = replay_.status(ctx.env.replay) if exitcode != ExitCode.SUCCESS: raise click.ClickException(message) click.echo(message) @@ -297,7 +297,7 @@ def metadata_group(ctx): @click.option("--detach", is_flag=True, help="Run metadata migration in detached mode") @click.pass_obj def migrate_metadata_cmd(ctx, detach): - exitcode, message = logic_metadata.migrate(ctx.env.metadata, detach) + exitcode, message = metadata_.migrate(ctx.env.metadata, detach) if exitcode != ExitCode.SUCCESS: raise click.ClickException(message) click.echo(message) @@ -332,7 +332,7 @@ def list_metrics_cmd(ctx): @click.option("--lookback", type=int, default=60, help="Lookback in minutes") @click.pass_obj def get_metrics_data_cmd(ctx, component, metric_name, statistic, lookback): - metric_data = logic_metrics.get_metric_data( + metric_data = metrics_.get_metric_data( ctx.env.metrics_source, component, metric_name, diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/environment.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/environment.py index c2185b1e5..021e2eabb 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/environment.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/environment.py @@ -1,15 +1,14 @@ import logging -from typing import Optional, Dict +from typing import Optional +from console_link.models.factories import get_replayer, get_backfill, get_kafka, get_snapshot, \ + get_metrics_source from console_link.models.cluster import Cluster from console_link.models.metrics_source import MetricsSource -from console_link.logic.metrics import get_metrics_source -from console_link.logic.backfill import get_backfill from console_link.models.backfill_base import Backfill -from console_link.models.snapshot import FileSystemSnapshot, Snapshot, S3Snapshot +from console_link.models.snapshot import Snapshot from console_link.models.replayer_base import Replayer -from console_link.models.replayer_ecs import ECSReplayer -from console_link.models.replayer_docker import DockerReplayer -from console_link.models.kafka import Kafka, MSK, StandardKafka +from console_link.models.kafka import Kafka + import yaml from cerberus import Validator @@ -18,24 +17,6 @@ logger = logging.getLogger(__name__) -def get_snapshot(config: Dict, source_cluster: Cluster): - if 'fs' in config: - return FileSystemSnapshot(config, source_cluster) - return S3Snapshot(config, source_cluster) - - -def get_replayer(config: Dict): - if 'ecs' in config: - return ECSReplayer(config) - return DockerReplayer(config) - - -def get_kafka(config: Dict): - if 'msk' in config: - return MSK(config) - return StandardKafka(config) - - SCHEMA = { "source_cluster": {"type": "dict", "required": False}, "target_cluster": {"type": "dict", "required": True}, diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/backfill.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/backfill.py deleted file mode 100644 index f395697f8..000000000 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/backfill.py +++ /dev/null @@ -1,136 +0,0 @@ -from enum import Enum -import json -import logging -from typing import Dict, Optional, Tuple -from console_link.models.utils import ExitCode -from console_link.models.backfill_osi import OpenSearchIngestionBackfill -from console_link.models.backfill_rfs import DockerRFSBackfill, ECSRFSBackfill -from console_link.models.cluster import Cluster -from console_link.models.backfill_base import Backfill -import yaml - - -logger = logging.getLogger(__name__) - - -BackfillType = Enum("BackfillType", - ["opensearch_ingestion", "reindex_from_snapshot"]) - - -class UnsupportedBackfillTypeError(Exception): - def __init__(self, supplied_backfill: str): - super().__init__("Unsupported backfill type", supplied_backfill) - - -def get_backfill(config: Dict, source_cluster: Optional[Cluster], target_cluster: Optional[Cluster]) -> Backfill: - if BackfillType.opensearch_ingestion.name in config: - if source_cluster is None: - raise ValueError("source_cluster must be provided for OpenSearch Ingestion backfill") - if target_cluster is None: - raise ValueError("target_cluster must be provided for OpenSearch Ingestion backfill") - logger.debug("Creating OpenSearch Ingestion backfill instance") - return OpenSearchIngestionBackfill(config=config, - source_cluster=source_cluster, - target_cluster=target_cluster) - elif BackfillType.reindex_from_snapshot.name in config: - if target_cluster is None: - raise ValueError("target_cluster must be provided for RFS backfill") - - if 'docker' in config[BackfillType.reindex_from_snapshot.name]: - logger.debug("Creating Docker RFS backfill instance") - return DockerRFSBackfill(config=config, - target_cluster=target_cluster) - elif 'ecs' in config[BackfillType.reindex_from_snapshot.name]: - logger.debug("Creating ECS RFS backfill instance") - return ECSRFSBackfill(config=config, - target_cluster=target_cluster) - - logger.error(f"An unsupported metrics source type was provided: {config.keys()}") - if len(config.keys()) > 1: - raise UnsupportedBackfillTypeError(', '.join(config.keys())) - raise UnsupportedBackfillTypeError(next(iter(config.keys()))) - - -def describe(backfill: Backfill, as_json=False) -> str: - response = backfill.describe() - if as_json: - return json.dumps(response) - return yaml.safe_dump(response) - - -def create(backfill: Backfill, *args, **kwargs) -> Tuple[ExitCode, str]: - logger.info(f"Creating backfill with {args=} and {kwargs=}") - try: - result = backfill.create(*args, **kwargs) - except NotImplementedError: - logger.error(f"Create is not implemented for backfill {type(backfill).__name__}") - return ExitCode.FAILURE, f"Create is not implemented for backfill {type(backfill).__name__}" - except Exception as e: - logger.error(f"Failed to create backfill: {e}") - return ExitCode.FAILURE, f"Failure when creating backfill: {type(e).__name__} {e}" - - if result.success: - return ExitCode.SUCCESS, "Backfill created successfully." + "\n" + result.display() - return ExitCode.FAILURE, "Backfill creation failed." + "\n" + result.display() - - -def start(backfill: Backfill, *args, **kwargs) -> Tuple[ExitCode, str]: - try: - result = backfill.start(*args, **kwargs) - except NotImplementedError: - logger.error(f"Start is not implemented for backfill {type(backfill).__name__}") - return ExitCode.FAILURE, f"Start is not implemented for backfill {type(backfill).__name__}" - except Exception as e: - logger.error(f"Failed to start backfill: {e}") - return ExitCode.FAILURE, f"Failure when starting backfill: {type(e).__name__} {e}" - - if result.success: - return ExitCode.SUCCESS, "Backfill started successfully." + "\n" + result.display() - return ExitCode.FAILURE, "Backfill start failed." + "\n" + result.display() - - -def stop(backfill: Backfill, *args, **kwargs) -> Tuple[ExitCode, str]: - logger.info("Stopping backfill") - try: - result = backfill.stop(*args, **kwargs) - except NotImplementedError: - logger.error(f"Stop is not implemented for backfill {type(backfill).__name__}") - return ExitCode.FAILURE, f"Stop is not implemented for backfill {type(backfill).__name__}" - except Exception as e: - logger.error(f"Failed to stop backfill: {e}") - return ExitCode.FAILURE, f"Failure when stopping backfill: {type(e).__name__} {e}" - if result.success: - return ExitCode.SUCCESS, "Backfill stopped successfully." + "\n" + result.display() - return ExitCode.FAILURE, "Backfill stop failed." + "\n" + result.display() - - -def scale(backfill: Backfill, units: int, *args, **kwargs) -> Tuple[ExitCode, str]: - logger.info(f"Scaling backfill to {units} units") - try: - result = backfill.scale(units, *args, **kwargs) - except NotImplementedError: - logger.error(f"Scale is not implemented for backfill {type(backfill).__name__}") - return ExitCode.FAILURE, f"Scale is not implemented for backfill {type(backfill).__name__}" - except Exception as e: - logger.error(f"Failed to scale backfill: {e}") - return ExitCode.FAILURE, f"Failure when scaling backfill: {type(e).__name__} {e}" - if result.success: - return ExitCode.SUCCESS, "Backfill scaled successfully." + "\n" + result.display() - return ExitCode.FAILURE, "Backfill scale failed." + "\n" + result.display() - - -def status(backfill: Backfill, deep_check: bool, *args, **kwargs) -> Tuple[ExitCode, str]: - logger.info(f"Getting backfill status with {deep_check=}") - try: - status = backfill.get_status(deep_check, *args, **kwargs) - except NotImplementedError: - logger.error(f"Status is not implemented for backfill {type(backfill).__name__}") - return ExitCode.FAILURE, f"Status is not implemented for backfill: {type(backfill).__name__}" - except Exception as e: - logger.error(f"Failed to get status of backfill: {e}") - return ExitCode.FAILURE, f"Failure when getting status of backfill: {type(e).__name__} {e}" - if status.success: - return (ExitCode.SUCCESS, - f"{status.value[0]}\n{status.value[1]}" if not isinstance(status.value, str) else status.value) - return ExitCode.FAILURE, "Backfill status retrieval failed." + "\n" + \ - f"{status.value[0]}\n{status.value[1]}" if not isinstance(status.value, str) else status.value diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/replay.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/replay.py deleted file mode 100644 index 7a337e379..000000000 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/replay.py +++ /dev/null @@ -1,76 +0,0 @@ -import json -import logging -from typing import Tuple -from console_link.models.utils import ExitCode -from console_link.models.replayer_base import Replayer -import yaml - - -logger = logging.getLogger(__name__) - - -def describe(replayer: Replayer, as_json=False) -> str: - response = replayer.describe() - if as_json: - return json.dumps(response) - return yaml.safe_dump(response) - - -def start(replayer: Replayer, *args, **kwargs) -> Tuple[ExitCode, str]: - try: - result = replayer.start(*args, **kwargs) - except NotImplementedError: - logger.error(f"Start is not implemented for replayer {type(replayer).__name__}") - return ExitCode.FAILURE, f"Start is not implemented for replayer {type(replayer).__name__}" - except Exception as e: - logger.error(f"Failed to start replayer: {e}") - return ExitCode.FAILURE, f"Failure when starting replayer: {type(e).__name__} {e}" - - if result.success: - return ExitCode.SUCCESS, "Replayer started successfully." + "\n" + result.display() - return ExitCode.FAILURE, "Replayer start failed." + "\n" + result.display() - - -def stop(replayer: Replayer, *args, **kwargs) -> Tuple[ExitCode, str]: - logger.info("Stopping replayer") - try: - result = replayer.stop(*args, **kwargs) - except NotImplementedError: - logger.error(f"Stop is not implemented for replayer {type(replayer).__name__}") - return ExitCode.FAILURE, f"Stop is not implemented for replayer {type(replayer).__name__}" - except Exception as e: - logger.error(f"Failed to stop replayer: {e}") - return ExitCode.FAILURE, f"Failure when stopping replayer: {type(e).__name__} {e}" - if result.success: - return ExitCode.SUCCESS, "Replayer stopped successfully." + "\n" + result.display() - return ExitCode.FAILURE, "Replayer stop failed." + "\n" + result.display() - - -def scale(replayer: Replayer, units: int, *args, **kwargs) -> Tuple[ExitCode, str]: - logger.info(f"Scaling replayer to {units} units") - try: - result = replayer.scale(units, *args, **kwargs) - except NotImplementedError: - logger.error(f"Scale is not implemented for replayer {type(replayer).__name__}") - return ExitCode.FAILURE, f"Scale is not implemented for replayer {type(replayer).__name__}" - except Exception as e: - logger.error(f"Failed to scale replayer: {e}") - return ExitCode.FAILURE, f"Failure when scaling replayer: {type(e).__name__} {e}" - if result.success: - return ExitCode.SUCCESS, "Replayer scaled successfully." + "\n" + result.display() - return ExitCode.FAILURE, "Replayer scale failed." + "\n" + result.display() - - -def status(replayer: Replayer, *args, **kwargs) -> Tuple[ExitCode, str]: - logger.info("Getting replayer status") - try: - result = replayer.get_status(*args, **kwargs) - except NotImplementedError: - logger.error(f"Status is not implemented for replayer {type(replayer).__name__}") - return ExitCode.FAILURE, f"Status is not implemented for replayer: {type(replayer).__name__}" - except Exception as e: - logger.error(f"Failed to get status of replayer: {e}") - return ExitCode.FAILURE, f"Failure when getting status of replayer: {type(e).__name__} {e}" - if result.success: - return ExitCode.SUCCESS, result.value[0].name - return ExitCode.FAILURE, "Replayer status retrieval failed." + "\n" + result.value[1] diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/__init__.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/__init__.py similarity index 100% rename from TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/__init__.py rename to TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/__init__.py diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/backfill.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/backfill.py new file mode 100644 index 000000000..70832089f --- /dev/null +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/backfill.py @@ -0,0 +1,56 @@ +import logging +from typing import Dict, Tuple +from functools import partial + +from console_link.middleware.error_handler import handle_errors +from console_link.middleware.json_support import support_json_return +from console_link.models.backfill_base import Backfill, BackfillStatus +from console_link.models.command_result import CommandResult +from console_link.models.utils import ExitCode + + +logger = logging.getLogger(__name__) + + +@support_json_return() +def describe(backfill: Backfill, as_json=False) -> Tuple[ExitCode, Dict]: + response = backfill.describe() + return (ExitCode.SUCCESS, response) + + +handle_backfill_errors = partial(handle_errors, service_type="backfill") + + +@handle_backfill_errors(on_success=lambda result: (ExitCode.SUCCESS, + "Backfill created successfully." + "\n" + result)) +def create(backfill: Backfill, *args, **kwargs) -> CommandResult[str]: + logger.info(f"Creating backfill with {args=} and {kwargs=}") + return backfill.create(*args, **kwargs) + + +@handle_errors("backfill", + on_success=lambda result: (ExitCode.SUCCESS, "Backfill started successfully." + "\n" + result)) +def start(backfill: Backfill, *args, **kwargs) -> CommandResult[str]: + logger.info("Starting backfill") + return backfill.start(*args, **kwargs) + + +@handle_errors("backfill", + on_success=lambda result: (ExitCode.SUCCESS, "Backfill stopped successfully." + "\n" + result)) +def stop(backfill: Backfill, *args, **kwargs) -> CommandResult[str]: + logger.info("Stopping backfill") + return backfill.stop(*args, **kwargs) + + +@handle_errors("backfill", + on_success=lambda status: (ExitCode.SUCCESS, f"{status[0]}\n{status[1]}")) +def status(backfill: Backfill, deep_check: bool, *args, **kwargs) -> CommandResult[Tuple[BackfillStatus, str]]: + logger.info(f"Getting backfill status with {deep_check=}") + return backfill.get_status(deep_check, *args, **kwargs) + + +@handle_errors("backfill", + on_success=lambda status: (ExitCode.SUCCESS, status)) +def scale(backfill: Backfill, units: int, *args, **kwargs) -> CommandResult[str]: + logger.info(f"Scaling backfill to {units} units") + return backfill.scale(units, *args, **kwargs) diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/clusters.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/clusters.py similarity index 100% rename from TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/clusters.py rename to TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/clusters.py diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/error_handler.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/error_handler.py new file mode 100644 index 000000000..f09658fb8 --- /dev/null +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/error_handler.py @@ -0,0 +1,34 @@ +import logging +from typing import Any, Callable, Tuple + +from console_link.models.cluster import Cluster +from console_link.models.kafka import Kafka +from console_link.models.metadata import Metadata +from console_link.models.metrics_source import MetricsSource +from console_link.models.replayer_base import Replayer +from console_link.models.snapshot import Snapshot +from console_link.models.backfill_base import Backfill +from console_link.models.utils import ExitCode + +logger = logging.getLogger(__name__) + +Service = Cluster | Backfill | Kafka | Metadata | MetricsSource | Replayer | Snapshot + + +def handle_errors(service_type: str, + on_success: Callable[[Any], Tuple[ExitCode, str]] = lambda status: (ExitCode.SUCCESS, status) + ) -> Callable[[Any], Tuple[ExitCode, str]]: + def decorator(func: Callable[[Any], Tuple[ExitCode, str]]) -> Callable[[Any], Tuple[ExitCode, str]]: + def wrapper(service: Service, *args, **kwargs) -> Tuple[ExitCode, str]: + try: + result = func(service, *args, **kwargs) + except NotImplementedError: + logger.error(f"{func.__name__} is not implemented for {service_type} {type(service).__name__}") + return (ExitCode.FAILURE, + f"{func.__name__} is not implemented for {service_type} {type(service).__name__}") + except Exception as e: + logger.error(f"Failed to {func.__name__} {service_type}: {e}") + return ExitCode.FAILURE, f"Failure on {func.__name__} for {service_type}: {type(e).__name__} {e}" + return on_success(result.value) + return wrapper + return decorator diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/json_support.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/json_support.py new file mode 100644 index 000000000..5a2338af0 --- /dev/null +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/json_support.py @@ -0,0 +1,18 @@ +import json +from typing import Any, Callable, Dict, List, Tuple + +import yaml + +from console_link.models.utils import ExitCode + + +def support_json_return() -> Callable[[Tuple[ExitCode, Dict | List | str]], Tuple[ExitCode, str]]: + def decorator(func: Callable[[Tuple[ExitCode, Dict | List | str]], Tuple[ExitCode, str]]) \ + -> Callable[[Any], Tuple[ExitCode, str]]: + def wrapper(*args, as_json=False, **kwargs) -> Tuple[ExitCode, str]: + result = func(*args, **kwargs) + if as_json: + return (result[0], json.dumps(result[1])) + return (result[0], yaml.safe_dump(result[1])) + return wrapper + return decorator diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/kafka.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/kafka.py similarity index 100% rename from TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/kafka.py rename to TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/kafka.py diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/metadata.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/metadata.py similarity index 56% rename from TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/metadata.py rename to TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/metadata.py index 8db2289e7..131854236 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/metadata.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/metadata.py @@ -1,5 +1,6 @@ from typing import Tuple +from console_link.middleware.error_handler import handle_errors from console_link.models.metadata import Metadata from console_link.models.utils import ExitCode, generate_log_file_path import logging @@ -7,16 +8,10 @@ logger = logging.getLogger(__name__) +@handle_errors(service_type="metadata", on_success=lambda v: (ExitCode.SUCCESS, v)) def migrate(metadata: Metadata, detached: bool) -> Tuple[ExitCode, str]: logger.info("Migrating metadata") if detached: log_file = generate_log_file_path("metadata_migration") logger.info(f"Running in detached mode, writing logs to {log_file}") - try: - result = metadata.migrate(detached_log=log_file if detached else None) - except Exception as e: - logger.error(f"Failed to migrate metadata: {e}") - return ExitCode.FAILURE, f"Failure when migrating metadata: {e}" - if result.success: - return ExitCode.SUCCESS, result.value - return ExitCode.FAILURE, result.value + return metadata.migrate(detached_log=log_file if detached else None) diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/metrics.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/metrics.py similarity index 62% rename from TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/metrics.py rename to TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/metrics.py index 6d63d8830..e1f482908 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/metrics.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/metrics.py @@ -1,29 +1,11 @@ from typing import List, Tuple -from console_link.models.metrics_source import MetricsSource, Component, MetricStatistic, PrometheusMetricsSource, \ - CloudwatchMetricsSource +from console_link.models.metrics_source import MetricsSource, Component, MetricStatistic from datetime import datetime, timedelta import logging logger = logging.getLogger(__name__) -class UnsupportedMetricsSourceError(Exception): - def __init__(self, supplied_metrics_source: str): - super().__init__("Unsupported metrics source type", supplied_metrics_source) - - -def get_metrics_source(config): - if 'prometheus' in config: - return PrometheusMetricsSource(config) - elif 'cloudwatch' in config: - return CloudwatchMetricsSource(config) - else: - logger.error(f"An unsupported metrics source type was provided: {config.keys()}") - if len(config.keys()) > 1: - raise UnsupportedMetricsSourceError(', '.join(config.keys())) - raise UnsupportedMetricsSourceError(next(iter(config.keys()))) - - def get_metric_data(metrics_source: MetricsSource, component: str, metric_name: str, statistic: str, lookback: int) -> List[Tuple[str, float]]: logger.info(f"Called get_metric_data with {component=}, {metric_name=}," diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/replay.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/replay.py new file mode 100644 index 000000000..2cefe0d52 --- /dev/null +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/replay.py @@ -0,0 +1,46 @@ +import logging +from typing import Dict, Tuple + +from console_link.models.command_result import CommandResult +from console_link.middleware.error_handler import handle_errors +from console_link.middleware.json_support import support_json_return +from console_link.models.replayer_base import Replayer +from console_link.models.utils import ExitCode +from functools import partial + +logger = logging.getLogger(__name__) + + +@support_json_return() +def describe(replayer: Replayer, as_json=False) -> Tuple[ExitCode, Dict]: + return (ExitCode.SUCCESS, replayer.describe()) + + +handle_replay_errors = partial(handle_errors, service_type="replayer") + + +@handle_replay_errors(on_success=lambda result: (ExitCode.SUCCESS, + "Replayer started successfully." + "\n" + result)) +def start(replayer: Replayer, *args, **kwargs) -> CommandResult[str]: + logger.info("Starting replayer") + return replayer.start(*args, **kwargs) + + +@handle_replay_errors(on_success=lambda result: (ExitCode.SUCCESS, + "Replayer stopped successfully." + "\n" + result)) +def stop(replayer: Replayer, *args, **kwargs) -> CommandResult[str]: + logger.info("Stopping replayer") + return replayer.stop(*args, **kwargs) + + +@handle_replay_errors(on_success=lambda result: (ExitCode.SUCCESS, + "Replayer scaled successfully." + "\n" + result)) +def scale(replayer: Replayer, units: int, *args, **kwargs) -> CommandResult[str]: + logger.info(f"Scaling replayer to {units} units") + return replayer.scale(units, *args, **kwargs) + + +@handle_replay_errors() +def status(replayer: Replayer, *args, **kwargs) -> CommandResult[str]: + logger.info("Getting replayer status") + return replayer.get_status(*args, **kwargs) diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/snapshot.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/snapshot.py similarity index 91% rename from TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/snapshot.py rename to TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/snapshot.py index a9310a002..975939b3e 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/logic/snapshot.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/middleware/snapshot.py @@ -11,7 +11,7 @@ def create(snapshot: Snapshot, *args, **kwargs) -> CommandResult: return snapshot.create(*args, **kwargs) except Exception as e: logger.error(f"Failure running create snapshot: {e}") - return CommandResult(status=False, message=f"Failure running create snapshot: {e}") + return CommandResult(success=False, value=f"Failure running create snapshot: {e}") def status(snapshot: Snapshot, *args, **kwargs) -> CommandResult: diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/backfill_base.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/backfill_base.py index f76315efc..0fe6b90fc 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/backfill_base.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/backfill_base.py @@ -1,5 +1,5 @@ from enum import Enum -from typing import Dict +from typing import Dict, Tuple from abc import ABC, abstractmethod from console_link.models.schema_tools import contains_one_of @@ -33,29 +33,29 @@ def __init__(self, config: Dict) -> None: raise ValueError("Invalid config file for backfill", v.errors) @abstractmethod - def create(self, *args, **kwargs) -> CommandResult: + def create(self, *args, **kwargs) -> CommandResult[str]: """If necessary, create/deploy the backfill mechanism iteslf. After create succesfully completes, the backfill should be ready to start.""" pass @abstractmethod - def start(self, *args, **kwargs) -> CommandResult: + def start(self, *args, **kwargs) -> CommandResult[str]: """Begin running the backfill. After running start, the user should be able to assume that--barring exceptions or failures--their data will begin moving to the target cluster.""" pass @abstractmethod - def stop(self, *args, **kwargs) -> CommandResult: + def stop(self, *args, **kwargs) -> CommandResult[str]: """Stop or pause the backfill. This does not make guarantees about resumeability.""" pass @abstractmethod - def get_status(self, *args, **kwargs) -> CommandResult: + def get_status(self, *args, **kwargs) -> CommandResult[Tuple[BackfillStatus, str]]: """Return a status""" pass @abstractmethod - def scale(self, units: int, *args, **kwargs) -> CommandResult: + def scale(self, units: int, *args, **kwargs) -> CommandResult[str]: pass def describe(self) -> Dict: diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/command_result.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/command_result.py index 622534a83..4ed26741f 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/command_result.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/command_result.py @@ -1,9 +1,13 @@ -from typing import NamedTuple, Any +from typing import Generic, TypeVar +from dataclasses import dataclass +T = TypeVar('T') -class CommandResult(NamedTuple): + +@dataclass +class CommandResult(Generic[T]): success: bool - value: Any + value: T | Exception | None def display(self) -> str: if self.value: diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/factories.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/factories.py new file mode 100644 index 000000000..57d2765b7 --- /dev/null +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/console_link/models/factories.py @@ -0,0 +1,111 @@ +from enum import Enum +from typing import Dict, Optional + +from console_link.models.replayer_docker import DockerReplayer +from console_link.models.metrics_source import CloudwatchMetricsSource, PrometheusMetricsSource +from console_link.models.backfill_base import Backfill +from console_link.models.backfill_osi import OpenSearchIngestionBackfill +from console_link.models.backfill_rfs import DockerRFSBackfill, ECSRFSBackfill +from console_link.models.cluster import Cluster +from console_link.models.kafka import MSK, StandardKafka +from console_link.models.replayer_ecs import ECSReplayer +from console_link.models.snapshot import FileSystemSnapshot, S3Snapshot +import logging + +logger = logging.getLogger(__name__) + + +class UnsupportedMetricsSourceError(Exception): + def __init__(self, supplied_metrics_source: str): + super().__init__("Unsupported metrics source type", supplied_metrics_source) + + +class UnsupportedReplayerError(Exception): + def __init__(self, supplied_replayer: str): + super().__init__("Unsupported replayer type", supplied_replayer) + + +class UnsupportedKafkaError(Exception): + def __init__(self, supplied_kafka: str): + super().__init__("Unsupported kafka type", supplied_kafka) + + +class UnsupportedSnapshotError(Exception): + def __init__(self, supplied_snapshot: str): + super().__init__("Unsupported snapshot type", supplied_snapshot) + + +BackfillType = Enum("BackfillType", + ["opensearch_ingestion", "reindex_from_snapshot"]) + + +class UnsupportedBackfillTypeError(Exception): + def __init__(self, supplied_backfill: str): + super().__init__("Unsupported backfill type", supplied_backfill) + + +def get_snapshot(config: Dict, source_cluster: Cluster): + if 'fs' in config: + return FileSystemSnapshot(config, source_cluster) + elif 's3' in config: + return S3Snapshot(config, source_cluster) + logger.error(f"An unsupported snapshot type was provided: {config.keys()}") + if len(config.keys()) > 1: + raise UnsupportedSnapshotError(', '.join(config.keys())) + raise UnsupportedSnapshotError(next(iter(config.keys()))) + + +def get_replayer(config: Dict): + if 'ecs' in config: + return ECSReplayer(config) + if 'docker' in config: + return DockerReplayer(config) + logger.error(f"An unsupported replayer type was provided: {config.keys()}") + raise UnsupportedReplayerError(next(iter(config.keys()))) + + +def get_kafka(config: Dict): + if 'msk' in config: + return MSK(config) + return StandardKafka(config) + + +def get_backfill(config: Dict, source_cluster: Optional[Cluster], target_cluster: Optional[Cluster]) -> Backfill: + if BackfillType.opensearch_ingestion.name in config: + if source_cluster is None: + raise ValueError("source_cluster must be provided for OpenSearch Ingestion backfill") + if target_cluster is None: + raise ValueError("target_cluster must be provided for OpenSearch Ingestion backfill") + logger.debug("Creating OpenSearch Ingestion backfill instance") + return OpenSearchIngestionBackfill(config=config, + source_cluster=source_cluster, + target_cluster=target_cluster) + elif BackfillType.reindex_from_snapshot.name in config: + if target_cluster is None: + raise ValueError("target_cluster must be provided for RFS backfill") + + if 'docker' in config[BackfillType.reindex_from_snapshot.name]: + logger.debug("Creating Docker RFS backfill instance") + return DockerRFSBackfill(config=config, + target_cluster=target_cluster) + elif 'ecs' in config[BackfillType.reindex_from_snapshot.name]: + logger.debug("Creating ECS RFS backfill instance") + return ECSRFSBackfill(config=config, + target_cluster=target_cluster) + + logger.error(f"An unsupported metrics source type was provided: {config.keys()}") + if len(config.keys()) > 1: + raise UnsupportedBackfillTypeError(', '.join(config.keys())) + raise UnsupportedBackfillTypeError(next(iter(config.keys()))) + + +def get_metrics_source(config): + if 'prometheus' in config: + return PrometheusMetricsSource(config) + elif 'cloudwatch' in config: + return CloudwatchMetricsSource(config) + else: + logger.error(f"An unsupported metrics source type was provided: {config.keys()}") + if len(config.keys()) > 1: + raise UnsupportedMetricsSourceError(', '.join(config.keys())) + raise UnsupportedMetricsSourceError(next(iter(config.keys()))) diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/data/services.yaml b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/data/services.yaml index 104999821..70e013339 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/data/services.yaml +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/data/services.yaml @@ -32,3 +32,6 @@ replay: cluster_name: "my-cluster" service_name: "my-service" scale: 2 +kafka: + broker_endpoints: "kafka1:9092,kafka2:9092,kafka3:9092" + standard: diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_backfill.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_backfill.py index 623e84299..565c22017 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_backfill.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_backfill.py @@ -4,12 +4,12 @@ import pytest import requests_mock -from console_link.logic.backfill import get_backfill, UnsupportedBackfillTypeError from console_link.models.backfill_base import Backfill, BackfillStatus from console_link.models.backfill_osi import OpenSearchIngestionBackfill from console_link.models.backfill_rfs import DockerRFSBackfill, ECSRFSBackfill from console_link.models.ecs_service import ECSService, InstanceStatuses - +from console_link.models.factories import (UnsupportedBackfillTypeError, + get_backfill) from tests.utils import create_valid_cluster TEST_DATA_DIRECTORY = pathlib.Path(__file__).parent / "data" @@ -245,6 +245,7 @@ def test_ecs_rfs_calculates_backfill_status_from_ecs_instance_statuses_running(e assert str(mocked_running_status) == value.value[1] +@pytest.mark.skip(reason="Need to implement mocking for multiple calls to endpoint") def test_ecs_rfs_get_status_deep_check(ecs_rfs_backfill, mocker): target = create_valid_cluster() mocked_instance_status = InstanceStatuses( diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_backfill_logic.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_backfill_logic.py index 17a31d15d..6657b816f 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_backfill_logic.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_backfill_logic.py @@ -2,10 +2,10 @@ import pytest -from console_link.logic.backfill import get_backfill, describe +from console_link.middleware.backfill import describe from console_link.models.backfill_osi import OpenSearchIngestionBackfill from console_link.models.backfill_rfs import DockerRFSBackfill, ECSRFSBackfill - +from console_link.models.factories import get_backfill from tests.utils import create_valid_cluster TEST_DATA_DIRECTORY = pathlib.Path(__file__).parent / "data" @@ -63,7 +63,8 @@ def osi_backfill() -> OpenSearchIngestionBackfill: def test_backfill_describe_includes_salient_details_docker_rfs(docker_rfs_backfill: DockerRFSBackfill): # I'm trying to be quite non-prescriptive about what should be included in describe # but at a minimum, the backfill strategy and deployment type need to be present. - description = describe(docker_rfs_backfill) + result = describe(docker_rfs_backfill) + description = result[1] assert "reindex_from_snapshot" in description assert "docker" in description @@ -74,7 +75,8 @@ def test_backfill_describe_includes_salient_details_docker_rfs(docker_rfs_backfi def test_backfill_describe_includes_salient_details_ecs_rfs(ecs_rfs_backfill: ECSRFSBackfill): # I'm trying to be quite non-prescriptive about what should be included in describe # but at a minimum, the backfill strategy and deployment type need to be present. - description = describe(ecs_rfs_backfill) + result = describe(ecs_rfs_backfill) + description = result[1] assert "reindex_from_snapshot" in description assert "ecs" in description assert ecs_rfs_backfill.ecs_config.get("service_name") in description @@ -86,7 +88,8 @@ def test_backfill_describe_includes_salient_details_ecs_rfs(ecs_rfs_backfill: EC def test_backfill_describe_includes_salient_details_osi(osi_backfill: OpenSearchIngestionBackfill): # I'm trying to be quite non-prescriptive about what should be included in describe # but at a minimum, the backfill strategy and deployment type need to be present. - description = describe(osi_backfill) + result = describe(osi_backfill) + description = result[1] assert "opensearch_ingestion" in description assert "unit-test-pipeline" in description assert "us-west-2" in description diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_backfill_osi.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_backfill_osi.py index 13f0f2e28..6e41a38b7 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_backfill_osi.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_backfill_osi.py @@ -1,7 +1,9 @@ -import pytest # type: ignore import os -from tests.utils import create_valid_cluster + +import pytest # type: ignore + from console_link.models.backfill_osi import OpenSearchIngestionBackfill +from tests.utils import create_valid_cluster # Define a valid cluster configuration valid_osi_migration_config = { diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_cli.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_cli.py index f7ddc5fd0..3c3f64b28 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_cli.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_cli.py @@ -1,16 +1,20 @@ +import json import pathlib +import os +import pytest import requests_mock -from console_link.models.backfill_rfs import ECSRFSBackfill -from console_link.models.ecs_service import ECSService, InstanceStatuses -from console_link.models.command_result import CommandResult +from click.testing import CliRunner +import console_link.middleware as middleware from console_link.cli import cli from console_link.environment import Environment - -from click.testing import CliRunner -import pytest - +from console_link.models.backfill_rfs import ECSRFSBackfill +from console_link.models.cluster import Cluster +from console_link.models.command_result import CommandResult +from console_link.models.ecs_service import ECSService, InstanceStatuses +from console_link.models.kafka import StandardKafka +from console_link.models.replayer_ecs import ECSReplayer TEST_DATA_DIRECTORY = pathlib.Path(__file__).parent / "data" VALID_SERVICES_YAML = TEST_DATA_DIRECTORY / "services.yaml" @@ -29,6 +33,18 @@ def env(): return Environment(VALID_SERVICES_YAML) +@pytest.fixture(autouse=True) +def set_fake_aws_credentials(): + # These are example credentials from + # https://docs.aws.amazon.com/IAM/latest/UserGuide/security-creds.html#sec-access-keys-and-secret-access-keys + # They allow the boto client to be created for any AWS services, but functions must be intercepted + # before any real calls are made. + os.environ['AWS_ACCESS_KEY_ID'] = 'AKIAIOSFODNN7EXAMPLE' + os.environ['AWS_SECRET_ACCESS_KEY'] = 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY' + +# Tests around the general CLI functionality + + def test_cli_without_valid_services_file_raises_error(runner): result = runner.invoke(cli, ['--config-file', '~/non-existent/file/services.yaml', 'clusters', 'cat-indices']) assert result.exit_code == 1 @@ -42,38 +58,60 @@ def test_cli_with_valid_services_file_does_not_raise_error(runner): assert result.exit_code == 0 -def test_cli_cluster_cat_indices(runner, env, mocker): - mock = mocker.patch('console_link.logic.clusters.cat_indices') +# The following tests are mostly smoke-tests with a goal of covering every CLI command and option. +# They generally mock functions either at the logic or the model layer, though occasionally going all the way to +# an external endpoint call. +# Standardizing these in the future would be great, but the priority right now is getting overall coverage, and +# testing that . + +def test_cli_cluster_cat_indices(runner, mocker): + middleware_mock = mocker.spy(middleware.clusters, 'cat_indices') + api_mock = mocker.patch.object(Cluster, 'call_api') result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'clusters', 'cat-indices'], catch_exceptions=True) # Should have been called two times. + middleware_mock.assert_called() + api_mock.assert_called() assert result.exit_code == 0 assert 'SOURCE CLUSTER' in result.output assert 'TARGET CLUSTER' in result.output + + +def test_cli_cluster_cat_indices_as_json(runner, mocker): + mock = mocker.patch('console_link.middleware.clusters.cat_indices', return_value={'index': 'data'}, autospec=True) + result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), '--json', 'clusters', 'cat-indices'], + catch_exceptions=True) + # Should have been called two times. mock.assert_called() + assert result.exit_code == 0 + assert json.loads(result.output).keys() == {'source_cluster', 'target_cluster'} -def test_cli_cluster_connection_check(runner, env, mocker): - mock = mocker.patch('console_link.logic.clusters.connection_check') +def test_cli_cluster_connection_check(runner, mocker): + middleware_mock = mocker.spy(middleware.clusters, 'connection_check') + api_mock = mocker.patch.object(Cluster, 'call_api') result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'clusters', 'connection-check'], catch_exceptions=True) - # Should have been called two times. assert result.exit_code == 0 assert 'SOURCE CLUSTER' in result.output assert 'TARGET CLUSTER' in result.output - mock.assert_called() + # Should have been called two times. + middleware_mock.assert_called() + api_mock.assert_called() -def test_cli_cluster_run_test_benchmarks(runner, env, mocker): - mock = mocker.patch('console_link.logic.clusters.run_test_benchmarks') +def test_cli_cluster_run_test_benchmarks(runner, mocker): + middleware_mock = mocker.spy(middleware.clusters, 'run_test_benchmarks') + model_mock = mocker.patch.object(Cluster, 'execute_benchmark_workload') result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'clusters', 'run-test-benchmarks'], catch_exceptions=True) - mock.assert_called_once() + middleware_mock.assert_called_once() + model_mock.assert_called() assert result.exit_code == 0 -def test_cli_cluster_clear_indices(runner, env, mocker): - mock = mocker.patch('console_link.logic.clusters.clear_indices') +def test_cli_cluster_clear_indices(runner, mocker): + mock = mocker.patch('console_link.middleware.clusters.clear_indices') result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'clusters', 'clear-indices', '--cluster', 'source', '--acknowledge-risk'], @@ -82,8 +120,8 @@ def test_cli_cluster_clear_indices(runner, env, mocker): assert result.exit_code == 0 -def test_cli_cluster_clear_indices_no_acknowledge(runner, env, mocker): - mock = mocker.patch('console_link.logic.clusters.clear_indices') +def test_cli_cluster_clear_indices_no_acknowledge(runner, mocker): + mock = mocker.patch('console_link.middleware.clusters.clear_indices') runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'clusters', 'clear-indices', '--cluster', 'source'], @@ -91,24 +129,39 @@ def test_cli_cluster_clear_indices_no_acknowledge(runner, env, mocker): assert not mock.called -def test_cli_with_metrics_get_data(runner, env, mocker): - mock = mocker.patch('console_link.models.metrics_source.PrometheusMetricsSource.get_metrics') - result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'metrics', 'list'], - catch_exceptions=True) - mock.assert_called_once() - assert result.exit_code == 0 +source_cat_indices = """ +green open logs-221998 pKNVNlhcRuuUXlwPJag9Kg 5 0 1000 0 167.4kb 167.4kb +green open geonames DlT1Qp-7SqaARuECxTaYvw 5 0 1000 0 343.2kb 343.2kb +yellow open sg7-auditlog-2024.06.12 Ih0JZg_eQV6gNXRPsAf73w 1 1 128 0 55.7kb 55.7kb +""" +target_cat_indices = """ +green open logs-221998 x_gytR5_SCCwsSf0ydcFrw 5 0 1000 0 153.7kb 153.7kb +green open geonames Q96YGsvlQ-6hcZvMNzyyDg 5 0 1000 0 336.2kb 336.2kb +green open reindexed-logs 2queREGZRriWNZ9ukMvsuw 5 0 0 0 1kb 1kb +green open nyc_taxis j1HSbvtGRbG7H7SlJXrB0g 1 0 1000 0 159.3kb 159.3kb +""" -def test_cli_with_backfill_describe(runner, env, mocker): - mock = mocker.patch('console_link.logic.backfill.describe') - result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'backfill', 'describe'], - catch_exceptions=True) - mock.assert_called_once() +def test_cli_cat_indices_e2e(runner, env): + with requests_mock.Mocker() as rm: + rm.get(f"{env.source_cluster.endpoint}/_cat/indices/_all", + status_code=200, + text=source_cat_indices) + rm.get(f"{env.target_cluster.endpoint}/_cat/indices/_all", + status_code=200, + text=target_cat_indices) + result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'clusters', 'cat-indices'], + catch_exceptions=True) + assert result.exit_code == 0 + assert 'SOURCE CLUSTER' in result.output + assert 'TARGET CLUSTER' in result.output + assert source_cat_indices in result.output + assert target_cat_indices in result.output -def test_cli_snapshot_create(runner, env, mocker): - mock = mocker.patch('console_link.logic.snapshot.create') +def test_cli_snapshot_create(runner, mocker): + mock = mocker.patch('console_link.middleware.snapshot.create') # Set the mock return value mock.return_value = CommandResult(success=True, value="Snapshot created successfully.") @@ -124,8 +177,8 @@ def test_cli_snapshot_create(runner, env, mocker): mock.assert_called_once() -def test_cli_snapshot_status(runner, env, mocker): - mock = mocker.patch('console_link.logic.snapshot.status') +def test_cli_snapshot_status(runner, mocker): + mock = mocker.patch('console_link.middleware.snapshot.status') # Set the mock return value mock.return_value = CommandResult(success=True, value="Snapshot status: COMPLETED") @@ -140,45 +193,60 @@ def test_cli_snapshot_status(runner, env, mocker): mock.assert_called_once() -source_cat_indices = """ -green open logs-221998 pKNVNlhcRuuUXlwPJag9Kg 5 0 1000 0 167.4kb 167.4kb -green open geonames DlT1Qp-7SqaARuECxTaYvw 5 0 1000 0 343.2kb 343.2kb -yellow open sg7-auditlog-2024.06.12 Ih0JZg_eQV6gNXRPsAf73w 1 1 128 0 55.7kb 55.7kb -""" -target_cat_indices = """ -green open logs-221998 x_gytR5_SCCwsSf0ydcFrw 5 0 1000 0 153.7kb 153.7kb -green open geonames Q96YGsvlQ-6hcZvMNzyyDg 5 0 1000 0 336.2kb 336.2kb -green open reindexed-logs 2queREGZRriWNZ9ukMvsuw 5 0 0 0 1kb 1kb -green open nyc_taxis j1HSbvtGRbG7H7SlJXrB0g 1 0 1000 0 159.3kb 159.3kb -""" +def test_cli_with_backfill_describe(runner, mocker): + mock = mocker.patch('console_link.middleware.backfill.describe') + result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'backfill', 'describe'], + catch_exceptions=True) + mock.assert_called_once() + assert result.exit_code == 0 -def test_cli_cat_indices_e2e(runner, env): - with requests_mock.Mocker() as rm: - rm.get(f"{env.source_cluster.endpoint}/_cat/indices/_all", - status_code=200, - text=source_cat_indices) - rm.get(f"{env.target_cluster.endpoint}/_cat/indices/_all", - status_code=200, - text=target_cat_indices) - result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'clusters', 'cat-indices'], - catch_exceptions=True) +def test_cli_backfill_create_rfs(runner, mocker): + mock = mocker.patch.object(ECSRFSBackfill, 'create', autospec=True) + result = runner.invoke(cli, ['--config-file', str(TEST_DATA_DIRECTORY / "services_with_ecs_rfs.yaml"), + 'backfill', 'create'], + catch_exceptions=True) + mock.assert_called_once() + assert result.exit_code == 0 + +def test_cli_backfill_start(runner, mocker): + mock = mocker.patch.object(ECSRFSBackfill, 'start', autospec=True) + result = runner.invoke(cli, ['--config-file', str(TEST_DATA_DIRECTORY / "services_with_ecs_rfs.yaml"), + 'backfill', 'start'], + catch_exceptions=True) + mock.assert_called_once() assert result.exit_code == 0 - assert 'SOURCE CLUSTER' in result.output - assert 'TARGET CLUSTER' in result.output - assert source_cat_indices in result.output - assert target_cat_indices in result.output -def test_cli_metadata_migrate(runner, env, mocker): - mock = mocker.patch("subprocess.run") - result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'metadata', 'migrate'], +def test_cli_backfill_stop(runner, mocker): + mock = mocker.patch.object(ECSRFSBackfill, 'stop', autospec=True) + result = runner.invoke(cli, ['--config-file', str(TEST_DATA_DIRECTORY / "services_with_ecs_rfs.yaml"), + 'backfill', 'stop'], + catch_exceptions=True) + mock.assert_called_once() + assert result.exit_code == 0 + + +def test_cli_backfill_scale(runner, mocker): + mock = mocker.patch.object(ECSRFSBackfill, 'scale', autospec=True) + result = runner.invoke(cli, ['--config-file', str(TEST_DATA_DIRECTORY / "services_with_ecs_rfs.yaml"), + 'backfill', 'scale', '3'], catch_exceptions=True) mock.assert_called_once() assert result.exit_code == 0 +def test_cli_backfill_scale_with_no_units_fails(runner, mocker): + mock = mocker.patch.object(ECSRFSBackfill, 'scale', autospec=True) + result = runner.invoke(cli, ['--config-file', str(TEST_DATA_DIRECTORY / "services_with_ecs_rfs.yaml"), + 'backfill', 'scale'], + catch_exceptions=True) + mock.assert_not_called() + assert result.exit_code == 2 + print(result.output) + + def test_get_backfill_status_no_deep_check(runner, mocker): mocked_running_status = InstanceStatuses( desired=1, @@ -223,3 +291,107 @@ def test_get_backfill_status_with_deep_check(runner, mocker): mock_ecs_service_call.assert_called_once() mock_detailed_status_call.assert_called_once() + + +def test_replay_describe(runner, mocker): + mock = mocker.patch('console_link.middleware.replay.describe') + result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'replay', 'describe'], + catch_exceptions=True) + mock.assert_called_once() + assert result.exit_code == 0 + + +def test_replay_start(runner, mocker): + mock = mocker.patch.object(ECSReplayer, 'start', autospec=True) + result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'replay', 'start'], + catch_exceptions=True) + mock.assert_called_once() + assert result.exit_code == 0 + + +def test_replay_stop(runner, mocker): + mock = mocker.patch.object(ECSReplayer, 'stop', autospec=True) + result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'replay', 'stop'], + catch_exceptions=True) + mock.assert_called_once() + assert result.exit_code == 0 + + +def test_replay_scale(runner, mocker): + mock = mocker.patch.object(ECSReplayer, 'scale', autospec=True) + result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'replay', 'scale', '5'], + catch_exceptions=True) + mock.assert_called_once() + assert result.exit_code == 0 + + +def test_replay_scale_with_no_units_fails(runner, mocker): + mock = mocker.patch.object(ECSReplayer, 'scale', autospec=True) + result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'replay', 'scale'], + catch_exceptions=True) + mock.assert_not_called() + assert result.exit_code == 2 + + +def test_replay_status(runner, mocker): + mock = mocker.patch.object(ECSReplayer, 'get_status', autospec=True) + result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'replay', 'status'], + catch_exceptions=True) + mock.assert_called_once() + assert result.exit_code == 0 + + +def test_cli_metadata_migrate(runner, mocker): + mock = mocker.patch("subprocess.run") + result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'metadata', 'migrate'], + catch_exceptions=True) + mock.assert_called_once() + assert result.exit_code == 0 + + +def test_cli_with_metrics_get_data(runner, mocker): + mock = mocker.patch('console_link.models.metrics_source.PrometheusMetricsSource.get_metrics') + result = runner.invoke(cli, ['--config-file', str(VALID_SERVICES_YAML), 'metrics', 'list'], + catch_exceptions=True) + mock.assert_called_once() + assert result.exit_code == 0 + + +def test_cli_kafka_create_topic(runner, mocker): + # These commands _should_ go through the middleware layer but currently don't + # middleware_mock = mocker.spy(middleware.kafka, 'create_topic') + # middleware_mock.assert_called_once_with(env.kafka, 'test') + + model_mock = mocker.patch.object(StandardKafka, 'create_topic') + result = runner.invoke(cli, ['-vv', '--config-file', str(VALID_SERVICES_YAML), 'kafka', 'create-topic', + '--topic-name', 'test'], + catch_exceptions=True) + model_mock.assert_called_once_with(topic_name='test') + assert result.exit_code == 0 + + +def test_cli_kafka_delete_topic(runner, mocker): + model_mock = mocker.patch.object(StandardKafka, 'delete_topic') + result = runner.invoke(cli, ['-vv', '--config-file', str(VALID_SERVICES_YAML), 'kafka', 'delete-topic', + '--topic-name', 'test', '--acknowledge-risk'], + catch_exceptions=True) + model_mock.assert_called_once_with(topic_name='test') + assert result.exit_code == 0 + + +def test_cli_kafka_describe_consumer_group(runner, mocker): + model_mock = mocker.patch.object(StandardKafka, 'describe_consumer_group') + result = runner.invoke(cli, ['-vv', '--config-file', str(VALID_SERVICES_YAML), 'kafka', 'describe-consumer-group', + '--group-name', 'test-group'], + catch_exceptions=True) + model_mock.assert_called_once_with(group_name='test-group') + assert result.exit_code == 0 + + +def test_cli_kafka_describe_topic(runner, mocker): + model_mock = mocker.patch.object(StandardKafka, 'describe_topic_records') + result = runner.invoke(cli, ['-vv', '--config-file', str(VALID_SERVICES_YAML), 'kafka', 'describe-topic-records', + '--topic-name', 'test'], + catch_exceptions=True) + model_mock.assert_called_once_with(topic_name='test') + assert result.exit_code == 0 diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_cluster.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_cluster.py index fddfc1022..60e22685f 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_cluster.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_cluster.py @@ -1,6 +1,8 @@ import pytest -from tests.utils import create_valid_cluster + +import console_link.middleware.clusters as clusters_ from console_link.models.cluster import AuthMethod, Cluster +from tests.utils import create_valid_cluster # Define a valid cluster configuration valid_cluster_config = { @@ -77,3 +79,23 @@ def test_valid_cluster_api_call_with_no_auth(requests_mock): response = cluster.call_api("/test_api") assert response.status_code == 200 assert response.json() == {'test': True} + + +def test_connection_check_with_exception(mocker): + cluster = create_valid_cluster() + api_mock = mocker.patch.object(Cluster, 'call_api', side_effect=Exception('Attempt to connect to cluster failed')) + + result = clusters_.connection_check(cluster) + api_mock.assert_called() + assert 'Attempt to connect to cluster failed' in result.connection_message + assert not result.connection_established + + +def test_connection_check_succesful(requests_mock): + cluster = create_valid_cluster() + requests_mock.get(f"{cluster.endpoint}/", json={'version': {'number': '2.15'}}) + + result = clusters_.connection_check(cluster) + assert result.connection_established + assert result.connection_message == 'Successfully connected!' + assert result.cluster_version == '2.15' diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_ecs.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_ecs.py index 60fa533fd..4a6da8cd3 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_ecs.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_ecs.py @@ -1,13 +1,12 @@ import json import pathlib -import pytest - import botocore.session +import pytest from botocore.stub import Stubber -from console_link.models.utils import AWSAPIError from console_link.models.ecs_service import ECSService, InstanceStatuses +from console_link.models.utils import AWSAPIError TEST_DATA_DIRECTORY = pathlib.Path(__file__).parent / "data" AWS_REGION = "us-east-1" diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_environment.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_environment.py index 8bc8543db..769f59c2c 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_environment.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_environment.py @@ -1,10 +1,11 @@ import pathlib +import pytest + from console_link.environment import Environment from console_link.models.backfill_base import Backfill from console_link.models.cluster import Cluster from console_link.models.metrics_source import MetricsSource -import pytest TEST_DATA_DIRECTORY = pathlib.Path(__file__).parent / "data" VALID_SERVICES_YAML = TEST_DATA_DIRECTORY / "services.yaml" diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_metadata.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_metadata.py index c67cf5342..b412c1f0f 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_metadata.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_metadata.py @@ -1,4 +1,5 @@ import pytest + from console_link.models.cluster import AuthMethod from console_link.models.metadata import Metadata from console_link.models.snapshot import FileSystemSnapshot, S3Snapshot diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_metrics_source.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_metrics_source.py index f7f78b4bd..417c20102 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_metrics_source.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_metrics_source.py @@ -1,16 +1,18 @@ import json import pathlib -import requests -from console_link.models.metrics_source import MetricsSource, CloudwatchMetricsSource, PrometheusMetricsSource, \ - MetricStatistic, Component -from console_link.logic.metrics import get_metrics_source, UnsupportedMetricsSourceError +import botocore.session import pytest +import requests import requests_mock - -import botocore.session from botocore.stub import Stubber +from console_link.models.factories import (UnsupportedMetricsSourceError, + get_metrics_source) +from console_link.models.metrics_source import (CloudwatchMetricsSource, + Component, MetricsSource, + MetricStatistic, + PrometheusMetricsSource) from console_link.models.utils import AWSAPIError TEST_DATA_DIRECTORY = pathlib.Path(__file__).parent / "data" @@ -114,7 +116,9 @@ def test_cloudwatch_metrics_get_metrics_error(cw_ms, cw_stubber): # This one doesn't serialize to json nicely because of the datetime objects import datetime + from dateutil.tz import tzutc # type: ignore + cw_get_metric_data = { 'Messages': [], 'MetricDataResults': [ diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_osi_utils.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_osi_utils.py index 4e59788b6..ed1e63a57 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_osi_utils.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_osi_utils.py @@ -1,15 +1,19 @@ +import os from pathlib import Path -from botocore.stub import Stubber, ANY -from moto import mock_aws + import botocore.session import pytest -import os - -from console_link.models.osi_utils import (construct_pipeline_config, create_pipeline_from_json, - create_pipeline_from_env, start_pipeline, stop_pipeline, delete_pipeline, - get_assume_role_session, InvalidAuthParameters, - OpenSearchIngestionMigrationProps) +from botocore.stub import ANY, Stubber +from console_link.models.osi_utils import (InvalidAuthParameters, + OpenSearchIngestionMigrationProps, + construct_pipeline_config, + create_pipeline_from_env, + create_pipeline_from_json, + get_assume_role_session, + start_pipeline, + stop_pipeline) from console_link.models.cluster import AuthMethod +from moto import mock_aws from tests.utils import create_valid_cluster PIPELINE_TEMPLATE_PATH = f"{Path(__file__).parents[3]}/osiPipelineTemplate.yaml" diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_replay.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_replay.py index bad6ec646..bfa374fe5 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_replay.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_replay.py @@ -1,11 +1,14 @@ +import json import pathlib import pytest +import yaml -from console_link.environment import get_replayer +import console_link.middleware.replay as replay_ +from console_link.models.ecs_service import ECSService +from console_link.models.factories import get_replayer from console_link.models.replayer_base import Replayer from console_link.models.replayer_ecs import ECSReplayer -from console_link.models.ecs_service import ECSService TEST_DATA_DIRECTORY = pathlib.Path(__file__).parent / "data" AWS_REGION = "us-east-1" @@ -85,3 +88,31 @@ def test_replayer_scale_sets_ecs_desired_count(mocker): assert isinstance(replayer, ECSReplayer) mock.assert_called_once_with(replayer.ecs_client, 5) + + +def test_replayer_describe_no_json(): + config = { + "ecs": { + "cluster_name": "migration-aws-integ-ecs-cluster", + "service_name": "migration-aws-integ-traffic-replayer-default" + }, + "scale": 3 + } + replayer = get_replayer(config) + success, output = replay_.describe(replayer, as_json=False) + assert success + assert output == yaml.safe_dump(config) + + +def test_replayer_describe_as_json(): + config = { + "ecs": { + "cluster_name": "migration-aws-integ-ecs-cluster", + "service_name": "migration-aws-integ-traffic-replayer-default" + }, + "scale": 3 + } + replayer = get_replayer(config) + success, output = replay_.describe(replayer, as_json=True) + assert success + assert json.loads(output) == config diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_snapshot.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_snapshot.py index 1b204c999..ea61f7510 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_snapshot.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/test_snapshot.py @@ -1,11 +1,15 @@ -from console_link.models.snapshot import S3Snapshot, FileSystemSnapshot, Snapshot -from console_link.environment import get_snapshot -from console_link.models.cluster import AuthMethod, Cluster, HttpMethod -from console_link.logic import snapshot as snapshot_logic -from tests.utils import create_valid_cluster -import pytest import unittest.mock as mock + +import pytest + +from console_link.middleware import snapshot as snapshot_ +from console_link.models.cluster import AuthMethod, Cluster, HttpMethod from console_link.models.command_result import CommandResult +from console_link.models.factories import (UnsupportedSnapshotError, + get_snapshot) +from console_link.models.snapshot import (FileSystemSnapshot, S3Snapshot, + Snapshot) +from tests.utils import create_valid_cluster @pytest.fixture @@ -82,7 +86,7 @@ def test_s3_snapshot_status_full(s3_snapshot, mock_cluster): } mock_cluster.call_api.return_value = mock_response - result = snapshot_logic.status(snapshot=s3_snapshot, deep_check=True) + result = snapshot_.status(snapshot=s3_snapshot, deep_check=True) assert isinstance(result, CommandResult) assert result.success @@ -165,9 +169,10 @@ def test_get_snapshot_fails_for_invalid_config(): }, } } - with pytest.raises(ValueError) as excinfo: + with pytest.raises(UnsupportedSnapshotError) as excinfo: get_snapshot(config["snapshot"], create_valid_cluster()) - assert "Invalid config file for snapshot" in str(excinfo.value.args[0]) + assert "Unsupported snapshot type" in excinfo.value.args[0] + assert "invalid" in excinfo.value.args[1] def test_get_snpashot_fails_for_config_with_fs_and_s3(): diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/utils.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/utils.py index 60add4f51..10ec62cda 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/utils.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/console_link/tests/utils.py @@ -1,5 +1,6 @@ from typing import Dict, Optional -from console_link.models.cluster import Cluster, AuthMethod + +from console_link.models.cluster import AuthMethod, Cluster def create_valid_cluster(endpoint: str = "https://opensearchtarget:9200", diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/backfill_tests.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/backfill_tests.py index 4cf1d0332..53c7bcd7d 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/backfill_tests.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/backfill_tests.py @@ -2,7 +2,7 @@ import pytest import unittest from http import HTTPStatus -from console_link.logic.clusters import run_test_benchmarks, connection_check, clear_indices, ConnectionResult +from console_link.middleware.clusters import run_test_benchmarks, connection_check, clear_indices, ConnectionResult from console_link.models.cluster import Cluster from console_link.models.backfill_base import Backfill from console_link.models.command_result import CommandResult diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/common_operations.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/common_operations.py index 39131095e..2f670d085 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/common_operations.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/common_operations.py @@ -7,7 +7,7 @@ from requests.exceptions import ConnectionError, SSLError from typing import Dict, List from unittest import TestCase -from console_link.logic.clusters import call_api +from console_link.middleware.clusters import call_api from console_link.models.cluster import HttpMethod, Cluster from console_link.models.replayer_base import Replayer, ReplayStatus diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/e2e_tests.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/e2e_tests.py index c851483b2..d11d022fe 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/e2e_tests.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/e2e_tests.py @@ -2,14 +2,14 @@ import pytest import unittest from http import HTTPStatus -from console_link.logic.clusters import run_test_benchmarks, connection_check, clear_indices, ConnectionResult +from console_link.middleware.clusters import run_test_benchmarks, connection_check, clear_indices, ConnectionResult from console_link.models.cluster import Cluster from console_link.models.backfill_base import Backfill from console_link.models.replayer_base import Replayer from console_link.models.kafka import Kafka from console_link.models.command_result import CommandResult from console_link.models.snapshot import Snapshot -from console_link.logic.kafka import delete_topic +from console_link.middleware.kafka import delete_topic from console_link.models.metadata import Metadata from console_link.cli import Context from common_operations import (create_index, create_document, check_doc_counts_match, wait_for_running_replayer, diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/metric_operations.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/metric_operations.py index 1befd12ca..833b8e999 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/metric_operations.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/metric_operations.py @@ -5,7 +5,7 @@ from typing import List, Dict, Tuple from unittest import TestCase from console_link.models.metrics_source import MetricsSource, CloudwatchMetricsSource -from console_link.logic.metrics import get_metric_data +from console_link.middleware.metrics import get_metric_data logger = logging.getLogger(__name__) diff --git a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/replayer_tests.py b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/replayer_tests.py index a78342dfa..ac5627427 100644 --- a/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/replayer_tests.py +++ b/TrafficCapture/dockerSolution/src/main/docker/migrationConsole/lib/integ_test/integ_test/replayer_tests.py @@ -9,9 +9,9 @@ from requests import Session from requests.adapters import HTTPAdapter from console_link.models.replayer_base import Replayer -from console_link.logic.kafka import delete_topic +from console_link.middleware.kafka import delete_topic from console_link.models.kafka import Kafka -from console_link.logic.clusters import connection_check, clear_indices, run_test_benchmarks, ConnectionResult +from console_link.middleware.clusters import connection_check, clear_indices, run_test_benchmarks, ConnectionResult from console_link.models.cluster import Cluster, AuthMethod from console_link.cli import Context diff --git a/coreUtilities/build.gradle b/coreUtilities/build.gradle index 357e9f61d..d877fbb42 100644 --- a/coreUtilities/build.gradle +++ b/coreUtilities/build.gradle @@ -20,6 +20,7 @@ plugins { id 'org.opensearch.migrations.java-library-conventions' id 'io.freefair.lombok' id 'java' + id 'jacoco' id 'java-test-fixtures' } @@ -51,6 +52,12 @@ dependencies { implementation group: 'io.opentelemetry', name: 'opentelemetry-exporter-otlp' implementation group: 'io.opentelemetry.semconv', name: 'opentelemetry-semconv' + implementation group: 'com.fasterxml.jackson.core', name: 'jackson-databind' + implementation group: 'com.fasterxml.jackson.core', name: 'jackson-core' + + testImplementation group: 'org.mockito', name: 'mockito-core' + testImplementation group: 'org.hamcrest', name: 'hamcrest' + testFixturesApi group: 'io.opentelemetry', name: 'opentelemetry-api' testFixturesApi group: 'io.opentelemetry', name: 'opentelemetry-sdk' testFixturesApi group: 'io.opentelemetry', name: 'opentelemetry-sdk-testing' @@ -59,6 +66,23 @@ dependencies { testFixturesImplementation group: 'org.slf4j', name: 'slf4j-api' } +// Utility task to allow copying required libraries into a 'dependencies' folder for security scanning +tasks.register('copyDependencies', Sync) { + duplicatesStrategy = DuplicatesStrategy.EXCLUDE + + from configurations.runtimeClasspath + into "${buildDir}/dependencies" +} + +jacocoTestReport { + reports { + xml.required = true + xml.destination file("${buildDir}/reports/jacoco/test/jacocoTestReport.xml") + html.required = true + html.destination file("${buildDir}/reports/jacoco/test/html") + } +} + tasks.named('test') { useJUnitPlatform() } diff --git a/settings.gradle b/settings.gradle index db34d00a7..525d740db 100644 --- a/settings.gradle +++ b/settings.gradle @@ -40,6 +40,7 @@ void addSubProjects(String path, File dir) { rootProject.name = 'OpensearchMigrations' include 'commonDependencyVersionConstraints' include 'coreUtilities' +include 'transformation' include 'testHelperFixtures' include 'RFS' include 'CreateSnapshot' diff --git a/transformation/build.gradle b/transformation/build.gradle new file mode 100644 index 000000000..1da66ca9a --- /dev/null +++ b/transformation/build.gradle @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + + +buildscript { + dependencies { + classpath 'org.junit.platform:junit-platform-gradle-plugin:1.0.1' + } +} + +plugins { + id 'org.opensearch.migrations.java-library-conventions' + id 'io.freefair.lombok' + id 'java' + id 'java-test-fixtures' +} + +java.sourceCompatibility = JavaVersion.VERSION_11 +java.targetCompatibility = JavaVersion.VERSION_11 + +repositories { + mavenCentral() +} + +dependencies { + api project(":commonDependencyVersionConstraints") + + implementation group: 'org.slf4j', name: 'slf4j-api' + + testImplementation group: 'org.apache.logging.log4j', name: 'log4j-api' + testImplementation group: 'org.apache.logging.log4j', name: 'log4j-core' + testImplementation group: 'org.apache.logging.log4j', name: 'log4j-slf4j2-impl' + + // Log4j + implementation group: 'org.apache.logging.log4j', name: 'log4j-api' + implementation group: 'org.apache.logging.log4j', name: 'log4j-core' + implementation group: 'org.apache.logging.log4j', name: 'log4j-slf4j2-impl' + + implementation group: 'com.fasterxml.jackson.core', name: 'jackson-databind' + implementation group: 'com.fasterxml.jackson.core', name: 'jackson-core' + + testImplementation group: 'org.mockito', name: 'mockito-core' + testImplementation group: 'org.hamcrest', name: 'hamcrest' +} + +tasks.named('test') { + useJUnitPlatform() +} diff --git a/transformation/src/main/java/src/org/opensearch/migrations/transformation/CanApplyResult.java b/transformation/src/main/java/src/org/opensearch/migrations/transformation/CanApplyResult.java new file mode 100644 index 000000000..f3656d206 --- /dev/null +++ b/transformation/src/main/java/src/org/opensearch/migrations/transformation/CanApplyResult.java @@ -0,0 +1,27 @@ +package org.opensearch.migrations.transformation; + +import lombok.Getter; +import lombok.RequiredArgsConstructor; + +/** + * The result after checking if a transformer can be applied to an entity + */ +public abstract class CanApplyResult { + public final static CanApplyResult YES = new Yes(); + public final static CanApplyResult NO = new No(); + + /** Yes, the transformation can be applied */ + public static final class Yes extends CanApplyResult { + } + + /** No, the transformation cannot be applied */ + public static final class No extends CanApplyResult { + } + + /** While the transformation matches the scenario but there is an issue that would prevent it from being applied corrrectly */ + @RequiredArgsConstructor + @Getter + public static final class Unsupported extends CanApplyResult { + private final String reason; + } +} \ No newline at end of file diff --git a/transformation/src/main/java/src/org/opensearch/migrations/transformation/TransformationRule.java b/transformation/src/main/java/src/org/opensearch/migrations/transformation/TransformationRule.java new file mode 100644 index 000000000..f581a97c7 --- /dev/null +++ b/transformation/src/main/java/src/org/opensearch/migrations/transformation/TransformationRule.java @@ -0,0 +1,22 @@ +package org.opensearch.migrations.transformation; + +import org.opensearch.migrations.transformation.entity.Entity; + +/** + * Describes how to an entity is transformed from one version to another. + */ +public interface TransformationRule { + /** + * Given an entity can a transformation be run on it + * + * MUST ALWAYS BE READ ONLY + */ + CanApplyResult canApply(T entity); + + /** + * Apply a transformation on the entity + * @param entity The entity to be transformed in place + * @return true if the entity was updated, or false if no changes were made + */ + boolean applyTransformation(T entity); +} \ No newline at end of file diff --git a/transformation/src/main/java/src/org/opensearch/migrations/transformation/entity/Entity.java b/transformation/src/main/java/src/org/opensearch/migrations/transformation/entity/Entity.java new file mode 100644 index 000000000..542d3e965 --- /dev/null +++ b/transformation/src/main/java/src/org/opensearch/migrations/transformation/entity/Entity.java @@ -0,0 +1,19 @@ +package org.opensearch.migrations.transformation.entity; + +import com.fasterxml.jackson.databind.node.ObjectNode; + +/** + * Common interface for entities that are used to perform transformations + * + * Notes on performance: ObjectNode requires a fully marshalled json, for + * less than 1mb json bodies this will likely be fine; however, if dealing with larger + * objects or perf issues are encountered should consider adding an alternative + * path for JsonParser. This might be needed for processing document bodies + * specifically. + */ +public interface Entity { + /** + * Gets the underlying entity as an ObjectNode, supports read and write operations + */ + ObjectNode rawJson(); +} diff --git a/transformation/src/main/java/src/org/opensearch/migrations/transformation/entity/Index.java b/transformation/src/main/java/src/org/opensearch/migrations/transformation/entity/Index.java new file mode 100644 index 000000000..f82985cdb --- /dev/null +++ b/transformation/src/main/java/src/org/opensearch/migrations/transformation/entity/Index.java @@ -0,0 +1,7 @@ +package org.opensearch.migrations.transformation.entity; + +/** + * Represents an Index object for transformation + */ +public interface Index extends Entity { +} \ No newline at end of file diff --git a/transformation/src/main/java/src/org/opensearch/migrations/transformation/rules/IndexMappingTypeRemoval.java b/transformation/src/main/java/src/org/opensearch/migrations/transformation/rules/IndexMappingTypeRemoval.java new file mode 100644 index 000000000..32478eb6a --- /dev/null +++ b/transformation/src/main/java/src/org/opensearch/migrations/transformation/rules/IndexMappingTypeRemoval.java @@ -0,0 +1,79 @@ +package org.opensearch.migrations.transformation.rules; + +import java.util.Map.Entry; + +import org.opensearch.migrations.transformation.CanApplyResult; +import org.opensearch.migrations.transformation.CanApplyResult.Unsupported; +import org.opensearch.migrations.transformation.TransformationRule; +import org.opensearch.migrations.transformation.entity.Index; + +import com.fasterxml.jackson.databind.node.ObjectNode; + +/** + * Supports transformation of the Index Mapping types that were changed from mutliple types to a single type between ES 6 to ES 7 + * + * Example: + * Starting state (ES 6): + * { + * "mappings": [ + * { + * "foo": { + * "properties": { + * "field1": { "type": "text" }, + * "field2": { "type": "keyword" } + * } + * } + * } + * ] + * } + * + * Ending state (ES 7): + * { + * "mappings": { + * "properties": { + * "field1": { "type": "text" }, + * "field2": { "type": "keyword" }, + * } + * } + * } + */ +public class IndexMappingTypeRemoval implements TransformationRule { + + @Override + public CanApplyResult canApply(final Index index) { + final var mappingNode = index.rawJson().get("mappings"); + + if (mappingNode == null || mappingNode.isObject()) { + return CanApplyResult.NO; + } + + // Detect unsupported multiple type mappings, eg: + // { "mappings": [{ "foo": {...}}, { "bar": {...} }] } + // { "mappings": [{ "foo": {...}, "bar": {...} }] } + if (mappingNode.size() > 1 || mappingNode.get(0).size() > 1) { + return new Unsupported("Multiple mapping types are not supported"); + } + + // There is a type under mappings, e.g. { "mappings": [{ "foo": {...} }] } + return CanApplyResult.YES; + } + + @Override + public boolean applyTransformation(final Index index) { + if (CanApplyResult.YES != canApply(index)) { + return false; + } + + final var mappingsNode = index.rawJson().get("mappings"); + final var mappingsInnerNode = (ObjectNode) mappingsNode.get(0); + + final var typeName = mappingsInnerNode.properties().stream().map(Entry::getKey).findFirst().orElseThrow(); + final var typeNode = mappingsInnerNode.get(typeName); + + mappingsInnerNode.remove(typeName); + typeNode.fields().forEachRemaining(node -> mappingsInnerNode.set(node.getKey(), node.getValue())); + index.rawJson().set("mappings", mappingsInnerNode); + + return true; + } +} diff --git a/transformation/src/test/java/org/opensearch/migrations/transformation/rules/IndexMappingTypeRemovalTest.java b/transformation/src/test/java/org/opensearch/migrations/transformation/rules/IndexMappingTypeRemovalTest.java new file mode 100644 index 000000000..97feff47c --- /dev/null +++ b/transformation/src/test/java/org/opensearch/migrations/transformation/rules/IndexMappingTypeRemovalTest.java @@ -0,0 +1,211 @@ +package org.opensearch.migrations.transformation.rules; + +import java.util.function.BiFunction; +import java.util.function.Function; + +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; +import org.opensearch.migrations.transformation.CanApplyResult; +import org.opensearch.migrations.transformation.CanApplyResult.Unsupported; +import org.opensearch.migrations.transformation.entity.Index; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; + +import lombok.extern.slf4j.Slf4j; + +import static org.mockito.Mockito.mock; + +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; + +@Slf4j +public class IndexMappingTypeRemovalTest { + + final ObjectMapper mapper = new ObjectMapper(); + + private final String defaultMappingProperties = // + "\"properties\": {\n" + // + " \"age\": {\n" + // + " \"type\": \"integer\"\n" + // + " }\n" + // + "}\n"; + + private final Function mappingObjectWithCustomType = typeName -> indexSettingJson( // + "\"mappings\": {\n" + // + " \"" + typeName + "\": {\n" + // + defaultMappingProperties + // + " }\n" + // + "},\n"); + + private final Function mappingWithType = typeName -> indexSettingJson( // + "\"mappings\": [{\n" + // + " \"" + typeName + "\": {\n" + // + defaultMappingProperties + // + " }\n" + // + "}],\n"); + + private final BiFunction mappingWithMutlipleTypes = (typeName1, typeName2) -> indexSettingJson( // + "\"mappings\": [{\n" + // + " \"" + typeName1 + "\": {\n" + // + defaultMappingProperties + // + " },\n" + // + " \"" + typeName2 + "\": {\n" + // + defaultMappingProperties + // + " }\n" + // + "}],\n"); + + private final BiFunction mutlipleMappingsWithSingleTypes = (typeName1, typeName2) -> indexSettingJson( // + "\"mappings\": [{\n" + // + " \"" + typeName1 + "\": {\n" + // + defaultMappingProperties + // + " }\n" + // + "},\n" + // + "{\n" + // + " \"" + typeName2 + "\": {\n" + // + defaultMappingProperties + // + " }\n" + // + "}],\n"); + + public ObjectNode indexSettingJson(final String mappingSection) { + try { + return (ObjectNode) mapper.readTree( + "{\n" + // + " \"settings\": {\n" + // + " \"index\": {\n" + // + " \"number_of_shards\": 2,\n" + // + " \"number_of_replicas\": 1\n" + // + " }\n" + // + " },\n" + // + mappingSection + + " \"aliases\": {\n" + // + " \"sample-alias1\": {}\n" + // + " }\n" + // + "}"); + } catch (final Exception e) { + throw new RuntimeException(e); + } + } + + private CanApplyResult canApply(final ObjectNode indexJson) { + var transformer = new IndexMappingTypeRemoval(); + var index = mock(Index.class); + Mockito.when(index.rawJson()).thenReturn(indexJson); + return transformer.canApply(index); + } + + private boolean applyTransformation(final ObjectNode indexJson) { + var transformer = new IndexMappingTypeRemoval(); + var index = mock(Index.class); + Mockito.when(index.rawJson()).thenReturn(indexJson); + + log.atInfo().setMessage("Original\n{}").addArgument(indexJson.toPrettyString()).log(); + var wasChanged = transformer.applyTransformation(index); + + log.atInfo().setMessage("After{}\n{}").addArgument(wasChanged ? " *Changed* ": "").addArgument(indexJson.toPrettyString()).log(); + return wasChanged; + } + + @Test + void testApplyTransformation_noMappingNode() { + // Setup + var originalJson = indexSettingJson(""); + var indexJson = originalJson.deepCopy(); + + // Action + var wasChanged = applyTransformation(indexJson); + assertThat(canApply(originalJson), equalTo(CanApplyResult.NO)); + + // Verification + assertThat(wasChanged, equalTo(false)); + assertThat(indexJson.toPrettyString(), equalTo(originalJson.toPrettyString())); + } + + @Test + void testApplyTransformation_mappingIsObjectNotArray() { + // Setup + var typeName = "foobar"; + var originalJson = mappingObjectWithCustomType.apply(typeName); + var indexJson = originalJson.deepCopy(); + + // Action + var wasChanged = applyTransformation(indexJson); + assertThat(canApply(originalJson), equalTo(CanApplyResult.NO)); + + // Verification + assertThat(wasChanged, equalTo(false)); + assertThat(indexJson.toPrettyString(), equalTo(originalJson.toPrettyString())); + assertThat(indexJson.toPrettyString(), containsString(typeName)); + } + + @Test + void testApplyTransformation_docType() { + // Setup + var typeName = "_doc"; + var originalJson = mappingWithType.apply(typeName); + var indexJson = originalJson.deepCopy(); + + // Action + var wasChanged = applyTransformation(indexJson); + assertThat(canApply(originalJson), equalTo(CanApplyResult.YES)); + + // Verification + assertThat(wasChanged, equalTo(true)); + assertThat(indexJson.toPrettyString(), not(equalTo(originalJson.toPrettyString()))); + assertThat(indexJson.toPrettyString(), not(containsString(typeName))); + } + + @Test + void testApplyTransformation_customTypes() { + // Setup + var typeName = "foobar"; + var originalJson = mappingWithType.apply(typeName); + var indexJson = originalJson.deepCopy(); + + // Action + var wasChanged = applyTransformation(indexJson); + assertThat(canApply(originalJson), equalTo(CanApplyResult.YES)); + + // Verification + assertThat(wasChanged, equalTo(true)); + assertThat(indexJson.toPrettyString(), not(equalTo(originalJson.toPrettyString()))); + assertThat(indexJson.toPrettyString(), not(containsString(typeName))); + } + + @Test + void testApplyTransformation_twoCustomTypes() { + // Setup + var originalJson = mappingWithMutlipleTypes.apply("t1", "t2"); + var indexJson = originalJson.deepCopy(); + + // Action + var wasChanged = applyTransformation(indexJson); + var canApply = canApply(originalJson); + assertThat(canApply, instanceOf(Unsupported.class)); + assertThat(((Unsupported)canApply).getReason(),equalTo("Multiple mapping types are not supported")); + + // Verification + assertThat(wasChanged, equalTo(false)); + assertThat(originalJson.toPrettyString(), equalTo(indexJson.toPrettyString())); + } + + @Test + void testApplyTransformation_twoMappingEntries() { + // Setup + var originalJson = mutlipleMappingsWithSingleTypes.apply("t1", "t2"); + var indexJson = originalJson.deepCopy(); + + // Action + var wasChanged = applyTransformation(indexJson); + var canApply = canApply(originalJson); + assertThat(canApply, instanceOf(Unsupported.class)); + assertThat(((Unsupported)canApply).getReason(),equalTo("Multiple mapping types are not supported")); + + // Verification + assertThat(wasChanged, equalTo(false)); + assertThat(originalJson.toPrettyString(), equalTo(indexJson.toPrettyString())); + } +} diff --git a/transformation/src/test/resources/log4j2.xml b/transformation/src/test/resources/log4j2.xml new file mode 100644 index 000000000..99e1e6614 --- /dev/null +++ b/transformation/src/test/resources/log4j2.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + + + + +