use of org.apache.hudi.table.action.rollback.BaseRollbackHelper in project hudi by apache.
the class TestMarkerBasedRollbackStrategy method testCopyOnWriteRollbackWithTestTable.
@Test
public void testCopyOnWriteRollbackWithTestTable() throws Exception {
// given: wrote some base files and corresponding markers
HoodieTestTable testTable = HoodieTestTable.of(metaClient);
String f0 = testTable.addRequestedCommit("000").getFileIdsWithBaseFilesInPartitions("partA").get("partA");
String f1 = testTable.addCommit("001").withBaseFilesInPartition("partA", f0).getFileIdsWithBaseFilesInPartitions("partB").get("partB");
String f2 = "f2";
testTable.forCommit("001").withMarkerFile("partA", f0, IOType.MERGE).withMarkerFile("partB", f1, IOType.CREATE).withMarkerFile("partA", f2, IOType.CREATE);
// when
HoodieTable hoodieTable = HoodieSparkTable.create(getConfig(), context, metaClient);
List<HoodieRollbackRequest> rollbackRequests = new MarkerBasedRollbackStrategy(hoodieTable, context, getConfig(), "002").getRollbackRequests(new HoodieInstant(HoodieInstant.State.INFLIGHT, HoodieTimeline.COMMIT_ACTION, "001"));
List<HoodieRollbackStat> stats = new BaseRollbackHelper(hoodieTable.getMetaClient(), getConfig()).performRollback(context, new HoodieInstant(HoodieInstant.State.INFLIGHT, HoodieTimeline.COMMIT_ACTION, "001"), rollbackRequests);
// then: ensure files are deleted correctly, non-existent files reported as failed deletes
assertEquals(2, stats.size());
FileStatus[] partAFiles = testTable.listAllFilesInPartition("partA");
FileStatus[] partBFiles = testTable.listAllFilesInPartition("partB");
assertEquals(0, partBFiles.length);
assertEquals(1, partAFiles.length);
assertEquals(2, stats.stream().mapToInt(r -> r.getSuccessDeleteFiles().size()).sum());
assertEquals(1, stats.stream().mapToInt(r -> r.getFailedDeleteFiles().size()).sum());
}
use of org.apache.hudi.table.action.rollback.BaseRollbackHelper in project hudi by apache.
the class TestMarkerBasedRollbackStrategy method testRun.
private List<HoodieRollbackStat> testRun(boolean useFileListingMetadata, HoodieWriteConfig writeConfig, SparkRDDWriteClient writeClient) {
String newCommitTime = "001";
writeClient.startCommitWithTime(newCommitTime);
List<HoodieRecord> records = dataGen.generateInserts(newCommitTime, 100);
JavaRDD<WriteStatus> writeStatuses = writeClient.insert(jsc.parallelize(records, 1), newCommitTime);
writeClient.commit(newCommitTime, writeStatuses);
// Updates
newCommitTime = "002";
writeClient.startCommitWithTime(newCommitTime);
records = dataGen.generateUniqueUpdates(newCommitTime, 50);
writeStatuses = writeClient.upsert(jsc.parallelize(records, 1), newCommitTime);
writeStatuses.collect();
HoodieTable hoodieTable = HoodieSparkTable.create(getConfig(), context, metaClient);
List<HoodieRollbackRequest> rollbackRequests = new MarkerBasedRollbackStrategy(hoodieTable, context, getConfig(), "003").getRollbackRequests(new HoodieInstant(HoodieInstant.State.INFLIGHT, HoodieTimeline.DELTA_COMMIT_ACTION, "002"));
// rollback 2nd commit and ensure stats reflect the info.
return new BaseRollbackHelper(hoodieTable.getMetaClient(), getConfig()).performRollback(context, new HoodieInstant(HoodieInstant.State.INFLIGHT, HoodieTimeline.DELTA_COMMIT_ACTION, "002"), rollbackRequests);
}
Aggregations