diff --git a/openmetadata-service/pom.xml b/openmetadata-service/pom.xml index 58a1b7cd18e0..63cf94e9dc55 100644 --- a/openmetadata-service/pom.xml +++ b/openmetadata-service/pom.xml @@ -1017,6 +1017,11 @@ owasp-java-html-sanitizer ${owasp-html-sanitizer.version} + + org.apache.commons + commons-compress + ${commons-compress.version} + diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/migration/api/MigrationTestCase.java b/openmetadata-service/src/main/java/org/openmetadata/service/migration/api/MigrationTestCase.java new file mode 100644 index 000000000000..59999cc6ee00 --- /dev/null +++ b/openmetadata-service/src/main/java/org/openmetadata/service/migration/api/MigrationTestCase.java @@ -0,0 +1,10 @@ +package org.openmetadata.service.migration.api; + +import java.util.List; +import org.jdbi.v3.core.Handle; + +public interface MigrationTestCase { + List validateBefore(Handle handle); + + List validateAfter(Handle handle); +} diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/migration/api/MigrationWorkflow.java b/openmetadata-service/src/main/java/org/openmetadata/service/migration/api/MigrationWorkflow.java index e9df40fff097..08189447eb2e 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/migration/api/MigrationWorkflow.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/migration/api/MigrationWorkflow.java @@ -46,7 +46,7 @@ public class MigrationWorkflow { public static final String SUCCESS_MSG = "Success"; public static final String FAILED_MSG = "Failed due to : "; public static final String CURRENT = "Current"; - private List migrations; + @Getter private List migrations; private final String nativeSQLScriptRootPath; private final ConnectionType connectionType; private final String extensionSQLScriptRootPath; diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/migration/api/TestResult.java b/openmetadata-service/src/main/java/org/openmetadata/service/migration/api/TestResult.java new file mode 100644 index 000000000000..57b76ac9f301 --- /dev/null +++ b/openmetadata-service/src/main/java/org/openmetadata/service/migration/api/TestResult.java @@ -0,0 +1,11 @@ +package org.openmetadata.service.migration.api; + +public record TestResult(String name, boolean passed, String detail) { + public static TestResult pass(String name) { + return new TestResult(name, true, ""); + } + + public static TestResult fail(String name, String detail) { + return new TestResult(name, false, detail); + } +} diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/util/DatabaseBackupRestore.java b/openmetadata-service/src/main/java/org/openmetadata/service/util/DatabaseBackupRestore.java new file mode 100644 index 000000000000..53bdb44b72e2 --- /dev/null +++ b/openmetadata-service/src/main/java/org/openmetadata/service/util/DatabaseBackupRestore.java @@ -0,0 +1,585 @@ +package org.openmetadata.service.util; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonToken; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.math.BigDecimal; +import java.nio.file.Files; +import java.nio.file.Path; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Base64; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; +import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; +import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; +import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream; +import org.apache.commons.compress.compressors.gzip.GzipCompressorOutputStream; +import org.jdbi.v3.core.Handle; +import org.jdbi.v3.core.Jdbi; +import org.openmetadata.service.jdbi3.locator.ConnectionType; + +@Slf4j +public class DatabaseBackupRestore { + + public static final int DEFAULT_BATCH_SIZE = 1000; + private static final long MAX_METADATA_SIZE = 10 * 1024 * 1024; + private static final Pattern SAFE_IDENTIFIER = Pattern.compile("^[a-zA-Z_][a-zA-Z0-9_]*$"); + private static final ObjectMapper MAPPER = + new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); + + private final Jdbi jdbi; + private final ConnectionType connectionType; + private final String databaseName; + private final int batchSize; + + public DatabaseBackupRestore(Jdbi jdbi, ConnectionType connectionType, String databaseName) { + this(jdbi, connectionType, databaseName, DEFAULT_BATCH_SIZE); + } + + public DatabaseBackupRestore( + Jdbi jdbi, ConnectionType connectionType, String databaseName, int batchSize) { + this.jdbi = jdbi; + this.connectionType = connectionType; + this.databaseName = databaseName; + this.batchSize = batchSize; + } + + public List discoverTables(Handle handle) { + String sql; + if (connectionType == ConnectionType.MYSQL) { + sql = + "SELECT table_name FROM information_schema.tables " + + "WHERE table_type = 'BASE TABLE' AND table_schema = :db ORDER BY table_name"; + return handle.createQuery(sql).bind("db", databaseName).mapTo(String.class).list(); + } else { + sql = + "SELECT table_name FROM information_schema.tables " + + "WHERE table_type = 'BASE TABLE' AND table_schema = current_schema() " + + "ORDER BY table_name"; + return handle.createQuery(sql).mapTo(String.class).list(); + } + } + + public List discoverColumns(Handle handle, String tableName) { + String sql; + if (connectionType == ConnectionType.MYSQL) { + sql = + "SELECT column_name FROM information_schema.columns " + + "WHERE table_schema = :db AND table_name = :table " + + "AND (extra NOT LIKE '%GENERATED%' OR extra IS NULL) " + + "ORDER BY ordinal_position"; + return handle + .createQuery(sql) + .bind("db", databaseName) + .bind("table", tableName) + .mapTo(String.class) + .list(); + } else { + sql = + "SELECT column_name FROM information_schema.columns " + + "WHERE table_schema = current_schema() AND table_name = :table " + + "AND (is_generated = 'NEVER' OR is_generated IS NULL) " + + "AND (column_default NOT LIKE 'nextval%' OR column_default IS NULL) " + + "ORDER BY ordinal_position"; + return handle.createQuery(sql).bind("table", tableName).mapTo(String.class).list(); + } + } + + List discoverPrimaryKeyColumns(Handle handle, String tableName) { + String sql; + if (connectionType == ConnectionType.MYSQL) { + sql = + "SELECT kcu.column_name FROM information_schema.key_column_usage kcu " + + "WHERE kcu.table_schema = :db AND kcu.table_name = :table " + + "AND kcu.constraint_name = 'PRIMARY' " + + "ORDER BY kcu.ordinal_position"; + return handle + .createQuery(sql) + .bind("db", databaseName) + .bind("table", tableName) + .mapTo(String.class) + .list(); + } else { + sql = + "SELECT kcu.column_name " + + "FROM information_schema.table_constraints tc " + + "JOIN information_schema.key_column_usage kcu " + + "ON tc.constraint_name = kcu.constraint_name " + + "AND tc.table_schema = kcu.table_schema " + + "WHERE tc.table_schema = current_schema() AND tc.table_name = :table " + + "AND tc.constraint_type = 'PRIMARY KEY' " + + "ORDER BY kcu.ordinal_position"; + return handle.createQuery(sql).bind("table", tableName).mapTo(String.class).list(); + } + } + + Set discoverBinaryColumns(Handle handle, String tableName) { + String sql; + if (connectionType == ConnectionType.MYSQL) { + sql = + "SELECT column_name FROM information_schema.columns " + + "WHERE table_schema = :db AND table_name = :table " + + "AND data_type IN ('blob', 'tinyblob', 'mediumblob', 'longblob', 'binary', 'varbinary')"; + return new HashSet<>( + handle + .createQuery(sql) + .bind("db", databaseName) + .bind("table", tableName) + .mapTo(String.class) + .list()); + } else { + sql = + "SELECT column_name FROM information_schema.columns " + + "WHERE table_schema = current_schema() AND table_name = :table " + + "AND data_type = 'bytea'"; + return new HashSet<>( + handle.createQuery(sql).bind("table", tableName).mapTo(String.class).list()); + } + } + + public static String extractDatabaseName(String jdbcUrl) { + String url = jdbcUrl; + int questionMark = url.indexOf('?'); + if (questionMark > 0) { + url = url.substring(0, questionMark); + } + int lastSlash = url.lastIndexOf('/'); + if (lastSlash < 0 || lastSlash == url.length() - 1) { + throw new IllegalArgumentException("Cannot extract database name from JDBC URL: " + jdbcUrl); + } + String dbName = url.substring(lastSlash + 1); + if (dbName.isEmpty()) { + throw new IllegalArgumentException("Cannot extract database name from JDBC URL: " + jdbcUrl); + } + return dbName; + } + + public void backup(String backupPath) throws IOException { + LOG.info("Starting database backup to {}", backupPath); + try (FileOutputStream fos = new FileOutputStream(backupPath); + BufferedOutputStream bos = new BufferedOutputStream(fos); + GzipCompressorOutputStream gzos = new GzipCompressorOutputStream(bos); + TarArchiveOutputStream taos = new TarArchiveOutputStream(gzos)) { + + taos.setLongFileMode(TarArchiveOutputStream.LONGFILE_POSIX); + + ObjectNode metadata = MAPPER.createObjectNode(); + metadata.put("timestamp", Instant.now().toString()); + metadata.put("version", System.getProperty("project.version", "unknown")); + metadata.put("databaseType", connectionType.name()); + metadata.put("databaseName", databaseName); + ObjectNode tablesMetadata = MAPPER.createObjectNode(); + + jdbi.useHandle( + handle -> { + beginRepeatableReadTransaction(handle); + try { + List tables = discoverTables(handle); + LOG.info("Discovered {} tables", tables.size()); + + for (String tableName : tables) { + backupTable(handle, tableName, taos, tablesMetadata); + } + } finally { + commitTransaction(handle); + } + }); + + metadata.set("tables", tablesMetadata); + byte[] metadataBytes = MAPPER.writeValueAsBytes(metadata); + TarArchiveEntry metadataEntry = new TarArchiveEntry("metadata.json"); + metadataEntry.setSize(metadataBytes.length); + taos.putArchiveEntry(metadataEntry); + taos.write(metadataBytes); + taos.closeArchiveEntry(); + + LOG.info("Backup completed successfully"); + } + } + + private void beginRepeatableReadTransaction(Handle handle) { + if (connectionType == ConnectionType.MYSQL) { + handle.execute("SET TRANSACTION ISOLATION LEVEL REPEATABLE READ"); + handle.execute("START TRANSACTION"); + } else { + handle.execute("BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ"); + } + } + + private void commitTransaction(Handle handle) { + handle.execute("COMMIT"); + } + + private void backupTable( + Handle handle, String tableName, TarArchiveOutputStream taos, ObjectNode tablesMetadata) + throws IOException { + List columns = discoverColumns(handle, tableName); + if (columns.isEmpty()) { + LOG.warn("No columns found for table {}, skipping", tableName); + return; + } + + String quotedColumns = quoteColumns(columns); + String quotedTable = quoteIdentifier(tableName); + + List pkColumns = discoverPrimaryKeyColumns(handle, tableName); + String orderByClause = buildOrderByClause(pkColumns, columns); + Set binaryColumns = discoverBinaryColumns(handle, tableName); + + Path tempFile = Files.createTempFile("backup_" + tableName + "_", ".json"); + int rowCount; + try { + rowCount = + writeTableToTempFile( + handle, quotedColumns, quotedTable, orderByClause, columns, tempFile); + addTempFileToTar(taos, tempFile, "tables/" + tableName + ".json"); + } finally { + Files.deleteIfExists(tempFile); + } + + ObjectNode tableInfo = MAPPER.createObjectNode(); + ArrayNode columnsArray = MAPPER.createArrayNode(); + columns.forEach(columnsArray::add); + tableInfo.set("columns", columnsArray); + ArrayNode binaryColumnsArray = MAPPER.createArrayNode(); + binaryColumns.forEach(binaryColumnsArray::add); + tableInfo.set("binaryColumns", binaryColumnsArray); + tableInfo.put("rowCount", rowCount); + tablesMetadata.set(tableName, tableInfo); + + LOG.info("Backed up table {} ({} rows, {} columns)", tableName, rowCount, columns.size()); + } + + private String buildOrderByClause(List pkColumns, List allColumns) { + List orderColumns = pkColumns.isEmpty() ? List.of(allColumns.get(0)) : pkColumns; + return " ORDER BY " + + orderColumns.stream().map(this::quoteIdentifier).collect(Collectors.joining(", ")); + } + + private int writeTableToTempFile( + Handle handle, + String quotedColumns, + String quotedTable, + String orderByClause, + List columns, + Path tempFile) + throws IOException { + int rowCount = 0; + try (OutputStream os = new BufferedOutputStream(new FileOutputStream(tempFile.toFile())); + JsonGenerator gen = new JsonFactory().createGenerator(os)) { + gen.setCodec(MAPPER); + gen.writeStartArray(); + + int offset = 0; + while (true) { + String sql = + String.format( + "SELECT %s FROM %s%s LIMIT %d OFFSET %d", + quotedColumns, quotedTable, orderByClause, batchSize, offset); + List> rows = handle.createQuery(sql).mapToMap().list(); + + for (Map row : rows) { + gen.writeStartObject(); + for (String col : columns) { + Object val = row.get(col); + if (val == null) { + gen.writeNullField(col); + } else if (val instanceof Number number) { + if (number instanceof Long l) { + gen.writeNumberField(col, l); + } else if (number instanceof Integer i) { + gen.writeNumberField(col, i); + } else if (number instanceof Double d) { + gen.writeNumberField(col, d); + } else if (number instanceof Float f) { + gen.writeNumberField(col, f); + } else if (number instanceof BigDecimal bd) { + gen.writeNumberField(col, bd); + } else { + gen.writeNumberField(col, number.longValue()); + } + } else if (val instanceof Boolean b) { + gen.writeBooleanField(col, b); + } else if (val instanceof byte[] bytes) { + gen.writeBinaryField(col, bytes); + } else { + gen.writeStringField(col, val.toString()); + } + } + gen.writeEndObject(); + rowCount++; + } + + if (rows.size() < batchSize) { + break; + } + offset += batchSize; + } + + gen.writeEndArray(); + } + return rowCount; + } + + private void addTempFileToTar(TarArchiveOutputStream taos, Path tempFile, String entryName) + throws IOException { + long fileSize = Files.size(tempFile); + TarArchiveEntry entry = new TarArchiveEntry(entryName); + entry.setSize(fileSize); + taos.putArchiveEntry(entry); + + try (FileInputStream fis = new FileInputStream(tempFile.toFile())) { + fis.transferTo(taos); + } + taos.closeArchiveEntry(); + } + + public void restore(String backupPath, boolean force) throws IOException { + LOG.info("Starting database restore from {}", backupPath); + + ObjectNode metadata = readBackupMetadata(backupPath); + String backupDbType = metadata.get("databaseType").asText(); + if (!backupDbType.equals(connectionType.name())) { + throw new IllegalStateException( + String.format( + "Backup database type '%s' does not match current connection type '%s'", + backupDbType, connectionType.name())); + } + + LOG.info( + "Backup info - version: {}, timestamp: {}, databaseType: {}", + metadata.get("version").asText(), + metadata.get("timestamp").asText(), + backupDbType); + + ObjectNode tablesMetadata = (ObjectNode) metadata.get("tables"); + + Set validTables = new HashSet<>(); + tablesMetadata.fieldNames().forEachRemaining(validTables::add); + + jdbi.useHandle( + handle -> { + disableForeignKeyChecks(handle); + try { + if (force) { + truncateAllTables(handle, tablesMetadata); + } else { + validateTablesEmpty(handle, tablesMetadata); + } + restoreTablesFromArchive(handle, backupPath, tablesMetadata, validTables); + LOG.info("Restore completed successfully"); + } finally { + enableForeignKeyChecks(handle); + } + }); + } + + public static ObjectNode readBackupMetadata(String backupPath) throws IOException { + try (FileInputStream fis = new FileInputStream(backupPath); + BufferedInputStream bis = new BufferedInputStream(fis); + GzipCompressorInputStream gzis = new GzipCompressorInputStream(bis); + TarArchiveInputStream tais = new TarArchiveInputStream(gzis)) { + + TarArchiveEntry entry; + while ((entry = tais.getNextEntry()) != null) { + if ("metadata.json".equals(entry.getName())) { + if (entry.getSize() > MAX_METADATA_SIZE) { + throw new IOException( + "metadata.json exceeds maximum allowed size of " + MAX_METADATA_SIZE + " bytes"); + } + byte[] content = tais.readNBytes((int) entry.getSize()); + return (ObjectNode) MAPPER.readTree(content); + } + } + } + throw new IOException("metadata.json not found in backup archive"); + } + + private void restoreTablesFromArchive( + Handle handle, String backupPath, ObjectNode tablesMetadata, Set validTables) + throws IOException { + try (FileInputStream fis = new FileInputStream(backupPath); + BufferedInputStream bis = new BufferedInputStream(fis); + GzipCompressorInputStream gzis = new GzipCompressorInputStream(bis); + TarArchiveInputStream tais = new TarArchiveInputStream(gzis)) { + + TarArchiveEntry entry; + while ((entry = tais.getNextEntry()) != null) { + String name = entry.getName(); + if (!name.startsWith("tables/") || !name.endsWith(".json")) { + continue; + } + + String tableName = name.substring("tables/".length(), name.length() - ".json".length()); + + if (!validTables.contains(tableName)) { + LOG.warn("Table {} from archive not in metadata, skipping", tableName); + continue; + } + + JsonNode tableMetaNode = tablesMetadata.get(tableName); + if (tableMetaNode == null) { + LOG.warn("No metadata found for table {}, skipping", tableName); + continue; + } + + List columns = new ArrayList<>(); + tableMetaNode.get("columns").forEach(col -> columns.add(col.asText())); + + Set binaryColumns = new HashSet<>(); + JsonNode binaryColumnsNode = tableMetaNode.get("binaryColumns"); + if (binaryColumnsNode != null) { + binaryColumnsNode.forEach(col -> binaryColumns.add(col.asText())); + } + + LOG.info("Restoring table {}", tableName); + int rowCount = insertRowsStreaming(handle, tableName, columns, binaryColumns, tais); + LOG.info("Restored table {} ({} rows)", tableName, rowCount); + } + } + } + + private void validateTablesEmpty(Handle handle, ObjectNode tablesMetadata) { + List nonEmptyTables = new ArrayList<>(); + tablesMetadata + .fieldNames() + .forEachRemaining( + tableName -> { + String sql = String.format("SELECT COUNT(*) FROM %s", quoteIdentifier(tableName)); + int count = handle.createQuery(sql).mapTo(Integer.class).one(); + if (count > 0) { + nonEmptyTables.add(tableName + " (" + count + " rows)"); + } + }); + + if (!nonEmptyTables.isEmpty()) { + throw new IllegalStateException( + "Cannot restore: the following tables are not empty. Use --force to truncate them: " + + String.join(", ", nonEmptyTables)); + } + } + + private void truncateAllTables(Handle handle, ObjectNode tablesMetadata) { + LOG.info("Truncating all target tables (force mode)"); + tablesMetadata + .fieldNames() + .forEachRemaining( + tableName -> { + String sql = String.format("TRUNCATE TABLE %s", quoteIdentifier(tableName)); + handle.execute(sql); + LOG.info("Truncated table {}", tableName); + }); + } + + int insertRowsStreaming( + Handle handle, + String tableName, + List columns, + Set binaryColumns, + TarArchiveInputStream tais) + throws IOException { + String quotedColumns = quoteColumns(columns); + String placeholders = columns.stream().map(c -> "?").collect(Collectors.joining(", ")); + String sql = + String.format( + "INSERT INTO %s (%s) VALUES (%s)", + quoteIdentifier(tableName), quotedColumns, placeholders); + + int totalRows = 0; + try (JsonParser parser = new JsonFactory().createParser(tais)) { + JsonToken token = parser.nextToken(); + if (token != JsonToken.START_ARRAY) { + return 0; + } + + var batch = handle.prepareBatch(sql); + int batchCount = 0; + + while (parser.nextToken() != JsonToken.END_ARRAY) { + ObjectNode row = MAPPER.readTree(parser); + for (int idx = 0; idx < columns.size(); idx++) { + String col = columns.get(idx); + JsonNode val = row.get(col); + if (val == null || val.isNull()) { + batch.bind(idx, (Object) null); + } else if (binaryColumns.contains(col)) { + batch.bind(idx, Base64.getDecoder().decode(val.asText())); + } else if (val.isNumber()) { + if (val.isLong() || val.isInt() || val.isBigInteger()) { + batch.bind(idx, val.longValue()); + } else { + batch.bind(idx, val.doubleValue()); + } + } else if (val.isBoolean()) { + batch.bind(idx, val.booleanValue()); + } else { + batch.bind(idx, val.asText()); + } + } + batch.add(); + batchCount++; + totalRows++; + + if (batchCount >= batchSize) { + batch.execute(); + batch = handle.prepareBatch(sql); + batchCount = 0; + } + } + + if (batchCount > 0) { + batch.execute(); + } + } + return totalRows; + } + + private void disableForeignKeyChecks(Handle handle) { + if (connectionType == ConnectionType.MYSQL) { + handle.execute("SET FOREIGN_KEY_CHECKS = 0"); + } else { + handle.execute("SET session_replication_role = 'replica'"); + } + } + + private void enableForeignKeyChecks(Handle handle) { + if (connectionType == ConnectionType.MYSQL) { + handle.execute("SET FOREIGN_KEY_CHECKS = 1"); + } else { + handle.execute("SET session_replication_role = 'origin'"); + } + } + + String quoteIdentifier(String identifier) { + if (!SAFE_IDENTIFIER.matcher(identifier).matches()) { + throw new IllegalArgumentException("Invalid SQL identifier: " + identifier); + } + if (connectionType == ConnectionType.MYSQL) { + return "`" + identifier + "`"; + } + return "\"" + identifier + "\""; + } + + String quoteColumns(List columns) { + return columns.stream().map(this::quoteIdentifier).collect(Collectors.joining(", ")); + } +} diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/util/MigrationTestRunner.java b/openmetadata-service/src/main/java/org/openmetadata/service/util/MigrationTestRunner.java new file mode 100644 index 000000000000..7d5db8028fc3 --- /dev/null +++ b/openmetadata-service/src/main/java/org/openmetadata/service/util/MigrationTestRunner.java @@ -0,0 +1,288 @@ +package org.openmetadata.service.util; + +import com.fasterxml.jackson.databind.node.ObjectNode; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import lombok.extern.slf4j.Slf4j; +import org.jdbi.v3.core.Handle; +import org.jdbi.v3.core.Jdbi; +import org.openmetadata.service.OpenMetadataApplicationConfig; +import org.openmetadata.service.jdbi3.MigrationDAO; +import org.openmetadata.service.jdbi3.locator.ConnectionType; +import org.openmetadata.service.migration.api.MigrationProcess; +import org.openmetadata.service.migration.api.MigrationTestCase; +import org.openmetadata.service.migration.api.MigrationWorkflow; +import org.openmetadata.service.migration.api.TestResult; +import org.openmetadata.service.migration.context.MigrationWorkflowContext; + +@Slf4j +public class MigrationTestRunner { + + record MigrationTestEntry( + String version, String testName, String phase, boolean passed, String detail) {} + + private final Jdbi jdbi; + private final ConnectionType connectionType; + private final OpenMetadataApplicationConfig config; + private final String nativeSQLScriptRootPath; + private final String extensionSQLScriptRootPath; + + public MigrationTestRunner( + Jdbi jdbi, + ConnectionType connectionType, + OpenMetadataApplicationConfig config, + String nativeSQLScriptRootPath, + String extensionSQLScriptRootPath) { + this.jdbi = jdbi; + this.connectionType = connectionType; + this.config = config; + this.nativeSQLScriptRootPath = nativeSQLScriptRootPath; + this.extensionSQLScriptRootPath = extensionSQLScriptRootPath; + } + + public int run(String backupPath) throws IOException { + return run(backupPath, DatabaseBackupRestore.DEFAULT_BATCH_SIZE); + } + + public int run(String backupPath, int batchSize) throws IOException { + DatabaseBackupRestore backupRestore = + new DatabaseBackupRestore( + jdbi, + connectionType, + DatabaseBackupRestore.extractDatabaseName(config.getDataSourceFactory().getUrl()), + batchSize); + backupRestore.restore(backupPath, true); + + ObjectNode metadata = DatabaseBackupRestore.readBackupMetadata(backupPath); + String backupTimestamp = metadata.has("timestamp") ? metadata.get("timestamp").asText() : "N/A"; + String backupDbType = + metadata.has("databaseType") ? metadata.get("databaseType").asText() : "N/A"; + + MigrationDAO migrationDAO = jdbi.onDemand(MigrationDAO.class); + List executedVersions; + try { + executedVersions = migrationDAO.getMigrationVersions(); + } catch (Exception e) { + executedVersions = Collections.emptyList(); + } + String sourceVersion = + executedVersions.stream().max(MigrationTestRunner::compareVersions).orElse("unknown"); + + MigrationWorkflow workflow = + new MigrationWorkflow( + jdbi, + nativeSQLScriptRootPath, + connectionType, + extensionSQLScriptRootPath, + config.getMigrationConfiguration().getFlywayPath(), + config, + true); + workflow.loadMigrations(); + + List migrations = workflow.getMigrations(); + String targetVersion = + migrations.isEmpty() ? sourceVersion : migrations.get(migrations.size() - 1).getVersion(); + + List entries = new ArrayList<>(); + + try (Handle handle = jdbi.open()) { + MigrationWorkflowContext context = new MigrationWorkflowContext(handle); + context.computeInitialContext(sourceVersion); + + for (MigrationProcess process : migrations) { + String version = process.getVersion(); + String versionPkg = versionToPackage(version); + MigrationTestCase testCase = loadTestCase(versionPkg); + + if (testCase != null) { + entries.addAll(runValidation(testCase::validateBefore, handle, version, "BEFORE")); + } + + boolean migrationFailed = false; + try { + process.initialize(handle, jdbi); + process.runSchemaChanges(true); + process.runDataMigration(); + process.runPostDDLScripts(true); + context.computeMigrationContext(process, true); + workflow.updateMigrationStepInDB(process, context); + } catch (Exception e) { + migrationFailed = true; + LOG.error("Migration {} failed", version, e); + entries.add( + new MigrationTestEntry(version, "migration execution", "RUN", false, e.getMessage())); + } + + if (testCase != null && !migrationFailed) { + entries.addAll(runValidation(testCase::validateAfter, handle, version, "AFTER")); + } else if (!migrationFailed) { + entries.add(new MigrationTestEntry(version, "(no tests)", "-", true, "")); + } + + if (migrationFailed) { + LOG.warn( + "Migration {} failed. The database is in a partially-migrated state. " + + "Re-restore the backup before retrying.", + version); + break; + } + } + } + + printSummary(entries, sourceVersion, targetVersion, backupDbType, backupTimestamp); + + long failCount = entries.stream().filter(e -> !e.passed() && !"-".equals(e.phase())).count(); + return failCount > 0 ? 1 : 0; + } + + @FunctionalInterface + private interface ValidationSupplier { + List run(Handle handle); + } + + private List runValidation( + ValidationSupplier supplier, Handle handle, String version, String phase) { + List entries = new ArrayList<>(); + try { + List results = supplier.run(handle); + for (TestResult result : results) { + entries.add( + new MigrationTestEntry( + version, result.name(), phase, result.passed(), result.detail())); + } + } catch (Exception e) { + entries.add( + new MigrationTestEntry(version, "validation error", phase, false, e.getMessage())); + } + return entries; + } + + private MigrationTestCase loadTestCase(String versionPkg) { + String className = + String.format("org.openmetadata.service.migration.tests.%s.MigrationTest", versionPkg); + try { + Class clazz = Class.forName(className); + return (MigrationTestCase) clazz.getDeclaredConstructor().newInstance(); + } catch (ClassNotFoundException e) { + return null; + } catch (Exception e) { + LOG.warn("Failed to instantiate test class {}", className, e); + return null; + } + } + + static String versionToPackage(String version) { + String base = version.contains("-") ? version.split("-")[0] : version; + String[] parts = base.split("\\."); + StringBuilder sb = new StringBuilder("v"); + for (int i = 0; i < parts.length; i++) { + if (i > 0) sb.append("_"); + sb.append(Integer.parseInt(parts[i])); + } + return sb.toString(); + } + + private void printSummary( + List entries, + String sourceVersion, + String targetVersion, + String dbType, + String backupTimestamp) { + int colVersion = 10; + int colTest = 30; + int colPhase = 8; + int colResult = 8; + + for (MigrationTestEntry entry : entries) { + colVersion = Math.max(colVersion, entry.version().length() + 2); + colTest = Math.max(colTest, entry.testName().length() + 2); + } + + String headerFmt = + " %-" + colVersion + "s| %-" + colTest + "s| %-" + colPhase + "s| %-" + colResult + "s"; + int totalWidth = colVersion + colTest + colPhase + colResult + 7; + String separator = "-".repeat(totalWidth); + String doubleSeparator = "=".repeat(totalWidth); + + LOG.info(doubleSeparator); + LOG.info(centerText("Migration Test Summary", totalWidth)); + LOG.info(doubleSeparator); + LOG.info(" Source version : {}", sourceVersion); + LOG.info(" Target version : {}", targetVersion); + LOG.info(" Database type : {}", dbType); + LOG.info(" Backup timestamp: {}", backupTimestamp); + LOG.info(separator); + LOG.info(String.format(headerFmt, "Migration", "Test", "Phase", "Result")); + LOG.info(separator); + + int passed = 0; + int failed = 0; + + for (MigrationTestEntry entry : entries) { + String result; + if ("-".equals(entry.phase())) { + result = "-"; + } else if (entry.passed()) { + result = "PASS"; + passed++; + } else { + result = "FAIL"; + failed++; + } + + LOG.info(String.format(headerFmt, entry.version(), entry.testName(), entry.phase(), result)); + + if (!entry.passed() && !entry.detail().isEmpty() && !"-".equals(entry.phase())) { + String detailFmt = + " %-" + + colVersion + + "s| %-" + + (colTest - 2) + + "s| %-" + + colPhase + + "s| %-" + + colResult + + "s"; + LOG.info(String.format(detailFmt, "", entry.detail(), "", "")); + } + } + + LOG.info(separator); + LOG.info(" Total: {} passed, {} failed", passed, failed); + LOG.info(doubleSeparator); + } + + private static String centerText(String text, int width) { + if (text.length() >= width) { + return text; + } + int padding = (width - text.length()) / 2; + return " ".repeat(padding) + text; + } + + private static int compareVersions(String v1, String v2) { + int[] parts1 = parseVersionParts(v1); + int[] parts2 = parseVersionParts(v2); + int length = Math.max(parts1.length, parts2.length); + for (int i = 0; i < length; i++) { + int p1 = i < parts1.length ? parts1[i] : 0; + int p2 = i < parts2.length ? parts2[i] : 0; + if (p1 != p2) { + return Integer.compare(p1, p2); + } + } + return 0; + } + + private static int[] parseVersionParts(String version) { + String base = version.contains("-") ? version.split("-")[0] : version; + String[] parts = base.split("\\."); + int[] numbers = new int[parts.length]; + for (int i = 0; i < parts.length; i++) { + numbers[i] = Integer.parseInt(parts[i]); + } + return numbers; + } +} diff --git a/openmetadata-service/src/main/java/org/openmetadata/service/util/OpenMetadataOperations.java b/openmetadata-service/src/main/java/org/openmetadata/service/util/OpenMetadataOperations.java index e3b9b68c021b..b52606b94770 100644 --- a/openmetadata-service/src/main/java/org/openmetadata/service/util/OpenMetadataOperations.java +++ b/openmetadata-service/src/main/java/org/openmetadata/service/util/OpenMetadataOperations.java @@ -173,7 +173,7 @@ public Integer call() { + "'drop-create', 'changelog', 'migrate', 'migrate-secrets', 'reindex', 'reembed', 'reindex-rdf', 'reindexdi', 'deploy-pipelines', " + "'dbServiceCleanup', 'relationshipCleanup', 'tagUsageCleanup', 'drop-indexes', 'remove-security-config', 'create-indexes', " + "'setOpenMetadataUrl', 'configureEmailSettings', 'get-security-config', 'update-security-config', 'install-app', 'delete-app', 'create-user', 'reset-password', " - + "'syncAlertOffset', 'analyze-tables', 'cleanup-flowable-history', 'regenerate-bot-tokens'"); + + "'syncAlertOffset', 'analyze-tables', 'cleanup-flowable-history', 'regenerate-bot-tokens', 'backup', 'restore', 'test-migration'"); LOG.info( "Use 'reindex --auto-tune' for automatic performance optimization based on cluster capabilities"); LOG.info( @@ -2610,6 +2610,116 @@ public Integer cleanupFlowableHistory( } } + @Command(name = "backup", description = "Backup the entire database to a .tar.gz archive.") + public Integer backup( + @Option( + names = {"--backup-path"}, + required = true, + description = "Path where the backup .tar.gz file will be created") + String backupPath, + @Option( + names = {"--batch-size"}, + defaultValue = "1000", + description = + "Number of rows to read/write per batch. Default: " + + DatabaseBackupRestore.DEFAULT_BATCH_SIZE) + int batchSize) { + try { + parseConfig(); + ConnectionType connType = ConnectionType.from(config.getDataSourceFactory().getDriverClass()); + DatasourceConfig.initialize(connType.label); + String databaseName = + DatabaseBackupRestore.extractDatabaseName(config.getDataSourceFactory().getUrl()); + DatabaseBackupRestore backupRestore = + new DatabaseBackupRestore(jdbi, connType, databaseName, batchSize); + backupRestore.backup(backupPath); + return 0; + } catch (Exception e) { + LOG.error("Backup failed", e); + return 1; + } + } + + @Command(name = "restore", description = "Restore the database from a .tar.gz backup archive.") + public Integer restore( + @Option( + names = {"--backup-path"}, + required = true, + description = "Path to the backup .tar.gz file to restore from") + String backupPath, + @Option( + names = {"--force"}, + defaultValue = "false", + description = + "Force restore by truncating existing tables. Without this flag, restore fails if tables have data.") + boolean force, + @Option( + names = {"--batch-size"}, + defaultValue = "1000", + description = + "Number of rows to insert per batch. Default: " + + DatabaseBackupRestore.DEFAULT_BATCH_SIZE) + int batchSize) { + try { + parseConfig(); + ConnectionType connType = ConnectionType.from(config.getDataSourceFactory().getDriverClass()); + DatasourceConfig.initialize(connType.label); + String databaseName = + DatabaseBackupRestore.extractDatabaseName(config.getDataSourceFactory().getUrl()); + DatabaseBackupRestore backupRestore = + new DatabaseBackupRestore(jdbi, connType, databaseName, batchSize); + backupRestore.restore(backupPath, force); + return 0; + } catch (Exception e) { + LOG.error("Restore failed", e); + return 1; + } + } + + @Command( + name = "test-migration", + description = + "Test database migrations by restoring a backup and running pending migrations with before/after validation.") + public Integer testMigration( + @Option( + names = {"--backup-path"}, + required = true, + description = + "Path to the backup .tar.gz file to restore and test migrations against") + String backupPath, + @Option( + names = {"--force"}, + defaultValue = "false", + description = + "Force execution. This command restores a backup (truncating all tables) before running migrations. Pass --force to confirm.") + boolean force, + @Option( + names = {"--batch-size"}, + defaultValue = "1000", + description = + "Number of rows per batch during restore. Default: " + + DatabaseBackupRestore.DEFAULT_BATCH_SIZE) + int batchSize) { + if (!force) { + LOG.error( + "test-migration restores a backup which truncates all existing tables. " + + "Pass --force to confirm you want to proceed."); + return 1; + } + try { + parseConfig(); + ConnectionType connType = ConnectionType.from(config.getDataSourceFactory().getDriverClass()); + DatasourceConfig.initialize(connType.label); + MigrationTestRunner runner = + new MigrationTestRunner( + jdbi, connType, config, nativeSQLScriptRootPath, extensionSQLScriptRootPath); + return runner.run(backupPath, batchSize); + } catch (Exception e) { + LOG.error("Migration test failed", e); + return 1; + } + } + private void analyzeEntityTable(String entity) { try { EntityRepository repository = Entity.getEntityRepository(entity); diff --git a/openmetadata-service/src/test/java/org/openmetadata/service/util/DatabaseBackupRestoreTest.java b/openmetadata-service/src/test/java/org/openmetadata/service/util/DatabaseBackupRestoreTest.java new file mode 100644 index 000000000000..8a6ee0a65f37 --- /dev/null +++ b/openmetadata-service/src/test/java/org/openmetadata/service/util/DatabaseBackupRestoreTest.java @@ -0,0 +1,214 @@ +package org.openmetadata.service.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import java.io.BufferedOutputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.util.List; +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; +import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; +import org.apache.commons.compress.compressors.gzip.GzipCompressorOutputStream; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import org.openmetadata.service.jdbi3.locator.ConnectionType; + +class DatabaseBackupRestoreTest { + + private static final ObjectMapper MAPPER = new ObjectMapper(); + + @Test + void testExtractDatabaseNameMySQL() { + assertEquals( + "openmetadata_db", + DatabaseBackupRestore.extractDatabaseName( + "jdbc:mysql://localhost:3306/openmetadata_db?useSSL=false")); + } + + @Test + void testExtractDatabaseNamePostgres() { + assertEquals( + "openmetadata_db", + DatabaseBackupRestore.extractDatabaseName( + "jdbc:postgresql://localhost:5432/openmetadata_db?sslmode=disable")); + } + + @Test + void testExtractDatabaseNameNoParams() { + assertEquals( + "mydb", DatabaseBackupRestore.extractDatabaseName("jdbc:mysql://localhost:3306/mydb")); + } + + @Test + void testExtractDatabaseNameEmptyThrows() { + assertThrows( + IllegalArgumentException.class, + () -> DatabaseBackupRestore.extractDatabaseName("jdbc:mysql://localhost:3306/")); + } + + @Test + void testQuoteIdentifierMySQL() { + DatabaseBackupRestore mysqlInstance = + new DatabaseBackupRestore(null, ConnectionType.MYSQL, "testdb"); + assertEquals("`foo`", mysqlInstance.quoteIdentifier("foo")); + } + + @Test + void testQuoteIdentifierPostgres() { + DatabaseBackupRestore pgInstance = + new DatabaseBackupRestore(null, ConnectionType.POSTGRES, "testdb"); + assertEquals("\"foo\"", pgInstance.quoteIdentifier("foo")); + } + + @Test + void testQuoteIdentifierMySQLRejectsInvalidIdentifier() { + DatabaseBackupRestore mysqlInstance = + new DatabaseBackupRestore(null, ConnectionType.MYSQL, "testdb"); + assertThrows(IllegalArgumentException.class, () -> mysqlInstance.quoteIdentifier("col`name")); + } + + @Test + void testQuoteIdentifierPostgresRejectsInvalidIdentifier() { + DatabaseBackupRestore pgInstance = + new DatabaseBackupRestore(null, ConnectionType.POSTGRES, "testdb"); + assertThrows(IllegalArgumentException.class, () -> pgInstance.quoteIdentifier("col\"name")); + } + + @Test + void testQuoteIdentifierRejectsSqlInjection() { + DatabaseBackupRestore mysqlInstance = + new DatabaseBackupRestore(null, ConnectionType.MYSQL, "testdb"); + assertThrows( + IllegalArgumentException.class, + () -> mysqlInstance.quoteIdentifier("foo; DROP TABLE users; --")); + } + + @Test + void testQuoteColumnsMySQL() { + DatabaseBackupRestore mysqlInstance = + new DatabaseBackupRestore(null, ConnectionType.MYSQL, "testdb"); + String result = mysqlInstance.quoteColumns(List.of("id", "name", "email")); + assertEquals("`id`, `name`, `email`", result); + } + + @Test + void testQuoteColumnsPostgres() { + DatabaseBackupRestore pgInstance = + new DatabaseBackupRestore(null, ConnectionType.POSTGRES, "testdb"); + String result = pgInstance.quoteColumns(List.of("id", "name", "email")); + assertEquals("\"id\", \"name\", \"email\"", result); + } + + @Test + void testQuoteColumnsSingleColumn() { + DatabaseBackupRestore mysqlInstance = + new DatabaseBackupRestore(null, ConnectionType.MYSQL, "testdb"); + assertEquals("`id`", mysqlInstance.quoteColumns(List.of("id"))); + } + + @Test + void testReadBackupMetadataMissingThrows(@TempDir Path tempDir) throws IOException { + Path archivePath = tempDir.resolve("no-metadata.tar.gz"); + try (FileOutputStream fos = new FileOutputStream(archivePath.toFile()); + BufferedOutputStream bos = new BufferedOutputStream(fos); + GzipCompressorOutputStream gzos = new GzipCompressorOutputStream(bos); + TarArchiveOutputStream taos = new TarArchiveOutputStream(gzos)) { + byte[] content = "some data".getBytes(StandardCharsets.UTF_8); + TarArchiveEntry entry = new TarArchiveEntry("tables/users.json"); + entry.setSize(content.length); + taos.putArchiveEntry(entry); + taos.write(content); + taos.closeArchiveEntry(); + } + + IOException ex = + assertThrows( + IOException.class, + () -> DatabaseBackupRestore.readBackupMetadata(archivePath.toString())); + assertTrue(ex.getMessage().contains("metadata.json not found")); + } + + @Test + void testReadBackupMetadataSuccess(@TempDir Path tempDir) throws IOException { + Path archivePath = tempDir.resolve("with-metadata.tar.gz"); + + ObjectNode metadata = MAPPER.createObjectNode(); + metadata.put("timestamp", "2026-01-15T10:30:00Z"); + metadata.put("version", "1.6.0"); + metadata.put("databaseType", "MYSQL"); + metadata.put("databaseName", "openmetadata_db"); + + byte[] metadataBytes = MAPPER.writeValueAsBytes(metadata); + + try (FileOutputStream fos = new FileOutputStream(archivePath.toFile()); + BufferedOutputStream bos = new BufferedOutputStream(fos); + GzipCompressorOutputStream gzos = new GzipCompressorOutputStream(bos); + TarArchiveOutputStream taos = new TarArchiveOutputStream(gzos)) { + TarArchiveEntry entry = new TarArchiveEntry("metadata.json"); + entry.setSize(metadataBytes.length); + taos.putArchiveEntry(entry); + taos.write(metadataBytes); + taos.closeArchiveEntry(); + } + + ObjectNode result = DatabaseBackupRestore.readBackupMetadata(archivePath.toString()); + assertNotNull(result); + assertEquals("2026-01-15T10:30:00Z", result.get("timestamp").asText()); + assertEquals("1.6.0", result.get("version").asText()); + assertEquals("MYSQL", result.get("databaseType").asText()); + assertEquals("openmetadata_db", result.get("databaseName").asText()); + } + + @Test + void testReadBackupMetadataRoundTrip(@TempDir Path tempDir) throws IOException { + Path archivePath = tempDir.resolve("round-trip.tar.gz"); + + ObjectNode tablesMetadata = MAPPER.createObjectNode(); + ObjectNode usersTable = MAPPER.createObjectNode(); + usersTable.putArray("columns").add("id").add("name").add("email"); + usersTable.putArray("binaryColumns"); + usersTable.put("rowCount", 42); + tablesMetadata.set("users", usersTable); + + ObjectNode metadata = MAPPER.createObjectNode(); + metadata.put("timestamp", "2026-03-19T08:00:00Z"); + metadata.put("version", "1.6.0"); + metadata.put("databaseType", "POSTGRES"); + metadata.put("databaseName", "om_db"); + metadata.set("tables", tablesMetadata); + + byte[] metadataBytes = MAPPER.writeValueAsBytes(metadata); + + try (FileOutputStream fos = new FileOutputStream(archivePath.toFile()); + BufferedOutputStream bos = new BufferedOutputStream(fos); + GzipCompressorOutputStream gzos = new GzipCompressorOutputStream(bos); + TarArchiveOutputStream taos = new TarArchiveOutputStream(gzos)) { + TarArchiveEntry entry = new TarArchiveEntry("metadata.json"); + entry.setSize(metadataBytes.length); + taos.putArchiveEntry(entry); + taos.write(metadataBytes); + taos.closeArchiveEntry(); + } + + ObjectNode result = DatabaseBackupRestore.readBackupMetadata(archivePath.toString()); + assertNotNull(result); + assertEquals("POSTGRES", result.get("databaseType").asText()); + assertEquals("om_db", result.get("databaseName").asText()); + + ObjectNode resultTables = (ObjectNode) result.get("tables"); + assertNotNull(resultTables); + assertNotNull(resultTables.get("users")); + assertEquals(42, resultTables.get("users").get("rowCount").asInt()); + assertEquals(3, resultTables.get("users").get("columns").size()); + assertEquals("id", resultTables.get("users").get("columns").get(0).asText()); + assertEquals("name", resultTables.get("users").get("columns").get(1).asText()); + assertEquals("email", resultTables.get("users").get("columns").get(2).asText()); + } +} diff --git a/openmetadata-service/src/test/java/org/openmetadata/service/util/MigrationTestRunnerTest.java b/openmetadata-service/src/test/java/org/openmetadata/service/util/MigrationTestRunnerTest.java new file mode 100644 index 000000000000..96eb25f8ed59 --- /dev/null +++ b/openmetadata-service/src/test/java/org/openmetadata/service/util/MigrationTestRunnerTest.java @@ -0,0 +1,62 @@ +package org.openmetadata.service.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import org.junit.jupiter.api.Test; + +class MigrationTestRunnerTest { + + @Test + void testVersionToPackageStandard() { + assertEquals("v1_12_0", MigrationTestRunner.versionToPackage("1.12.0")); + } + + @Test + void testVersionToPackageSingleDigit() { + assertEquals("v1_1_0", MigrationTestRunner.versionToPackage("1.1.0")); + } + + @Test + void testVersionToPackageWithPatch() { + assertEquals("v1_1_15", MigrationTestRunner.versionToPackage("1.1.15")); + } + + @Test + void testVersionToPackageWithExtension() { + assertEquals("v1_12_0", MigrationTestRunner.versionToPackage("1.12.0-collate")); + } + + @Test + void testVersionToPackageMajorOnly() { + assertEquals("v2_0_0", MigrationTestRunner.versionToPackage("2.0.0")); + } + + @Test + void testVersionToPackageTwoParts() { + assertEquals("v1_0", MigrationTestRunner.versionToPackage("1.0")); + } + + @Test + void testVersionToPackageSinglePart() { + assertEquals("v3", MigrationTestRunner.versionToPackage("3")); + } + + @Test + void testVersionToPackageInvalidNonNumericThrows() { + assertThrows(NumberFormatException.class, () -> MigrationTestRunner.versionToPackage("abc")); + } + + @Test + void testVersionToPackageWithExtensionTwoParts() { + assertEquals("v1_6", MigrationTestRunner.versionToPackage("1.6-SNAPSHOT")); + } + + @Test + void testVersionToPackageNoCollision() { + String v1 = MigrationTestRunner.versionToPackage("1.12.0"); + String v2 = MigrationTestRunner.versionToPackage("1.1.20"); + assertNotEquals(v1, v2); + } +} diff --git a/pom.xml b/pom.xml index 6701e6be2056..ca404a6fdd37 100644 --- a/pom.xml +++ b/pom.xml @@ -158,6 +158,7 @@ 0.26.0 0.64.8 20260101.1 + 1.27.1