use of org.apache.iceberg.actions.BaseMigrateTableActionResult in project iceberg by apache.
the class BaseMigrateTableSparkAction method doExecute.
private MigrateTable.Result doExecute() {
LOG.info("Starting the migration of {} to Iceberg", sourceTableIdent());
// move the source table to a new name, halting all modifications and allowing us to stage
// the creation of a new Iceberg table in its place
renameAndBackupSourceTable();
StagedSparkTable stagedTable = null;
Table icebergTable;
boolean threw = true;
try {
LOG.info("Staging a new Iceberg table {}", destTableIdent());
stagedTable = stageDestTable();
icebergTable = stagedTable.table();
LOG.info("Ensuring {} has a valid name mapping", destTableIdent());
ensureNameMappingPresent(icebergTable);
Some<String> backupNamespace = Some.apply(backupIdent.namespace()[0]);
TableIdentifier v1BackupIdent = new TableIdentifier(backupIdent.name(), backupNamespace);
String stagingLocation = getMetadataLocation(icebergTable);
LOG.info("Generating Iceberg metadata for {} in {}", destTableIdent(), stagingLocation);
SparkTableUtil.importSparkTable(spark(), v1BackupIdent, icebergTable, stagingLocation);
LOG.info("Committing staged changes to {}", destTableIdent());
stagedTable.commitStagedChanges();
threw = false;
} finally {
if (threw) {
LOG.error("Failed to perform the migration, aborting table creation and restoring the original table");
restoreSourceTable();
if (stagedTable != null) {
try {
stagedTable.abortStagedChanges();
} catch (Exception abortException) {
LOG.error("Cannot abort staged changes", abortException);
}
}
}
}
Snapshot snapshot = icebergTable.currentSnapshot();
long migratedDataFilesCount = Long.parseLong(snapshot.summary().get(SnapshotSummary.TOTAL_DATA_FILES_PROP));
LOG.info("Successfully loaded Iceberg metadata for {} files to {}", migratedDataFilesCount, destTableIdent());
return new BaseMigrateTableActionResult(migratedDataFilesCount);
}
Aggregations