use of org.apache.hudi.common.table.timeline.HoodieInstant in project hudi by apache.
the class TestCommitsCommand method generateMixedData.
/*
* generates both replace commit and commit data
* */
private LinkedHashMap<HoodieInstant, Integer[]> generateMixedData() throws Exception {
// generate data and metadata
LinkedHashMap<HoodieInstant, Integer[]> replaceCommitData = new LinkedHashMap<>();
replaceCommitData.put(new HoodieInstant(false, HoodieTimeline.REPLACE_COMMIT_ACTION, "103"), new Integer[] { 15, 10 });
LinkedHashMap<HoodieInstant, Integer[]> commitData = new LinkedHashMap<>();
commitData.put(new HoodieInstant(false, HoodieTimeline.COMMIT_ACTION, "102"), new Integer[] { 15, 10 });
commitData.put(new HoodieInstant(false, HoodieTimeline.COMMIT_ACTION, "101"), new Integer[] { 20, 10 });
for (Map.Entry<HoodieInstant, Integer[]> entry : commitData.entrySet()) {
String key = entry.getKey().getTimestamp();
Integer[] value = entry.getValue();
HoodieTestCommitMetadataGenerator.createCommitFileWithMetadata(tablePath1, key, hadoopConf(), Option.of(value[0]), Option.of(value[1]));
}
for (Map.Entry<HoodieInstant, Integer[]> entry : replaceCommitData.entrySet()) {
String key = entry.getKey().getTimestamp();
Integer[] value = entry.getValue();
HoodieTestReplaceCommitMetadataGenerator.createReplaceCommitFileWithMetadata(tablePath1, key, Option.of(value[0]), Option.of(value[1]), metaClient);
}
metaClient = HoodieTableMetaClient.reload(HoodieCLI.getTableMetaClient());
assertEquals(3, metaClient.reloadActiveTimeline().getCommitsTimeline().countInstants(), "There should be 3 commits");
LinkedHashMap<HoodieInstant, Integer[]> data = replaceCommitData;
data.putAll(commitData);
return data;
}
use of org.apache.hudi.common.table.timeline.HoodieInstant in project hudi by apache.
the class TestCompactionCommand method generateCompactionInstances.
private void generateCompactionInstances() throws IOException {
// create MOR table.
new TableCommand().createTable(tablePath, tableName, HoodieTableType.MERGE_ON_READ.name(), "", TimelineLayoutVersion.VERSION_1, HoodieAvroPayload.class.getName());
CompactionTestUtils.setupAndValidateCompactionOperations(HoodieCLI.getTableMetaClient(), true, 1, 2, 3, 4);
HoodieActiveTimeline activeTimeline = HoodieCLI.getTableMetaClient().reloadActiveTimeline();
// Create six commits
Arrays.asList("001", "003", "005", "007").forEach(timestamp -> {
activeTimeline.transitionCompactionInflightToComplete(new HoodieInstant(HoodieInstant.State.INFLIGHT, COMPACTION_ACTION, timestamp), Option.empty());
});
// Simulate a compaction commit in metadata table timeline
// so the archival in data table can happen
HoodieTestUtils.createCompactionCommitInMetadataTable(hadoopConf(), new HoodieWrapperFileSystem(FSUtils.getFs(tablePath, hadoopConf()), new NoOpConsistencyGuard()), tablePath, "007");
}
use of org.apache.hudi.common.table.timeline.HoodieInstant in project hudi by apache.
the class TestRollbacksCommand method testShowRollback.
/**
* Test case for command 'show rollback'.
*/
@Test
public void testShowRollback() throws IOException {
// get instant
HoodieActiveTimeline activeTimeline = new RollbacksCommand.RollbackTimeline(HoodieCLI.getTableMetaClient());
Stream<HoodieInstant> rollback = activeTimeline.getRollbackTimeline().filterCompletedInstants().getInstants();
HoodieInstant instant = rollback.findFirst().orElse(null);
assertNotNull(instant, "The instant can not be null.");
CommandResult cr = shell().executeCommand("show rollback --instant " + instant.getTimestamp());
assertTrue(cr.isSuccess());
List<Comparable[]> rows = new ArrayList<>();
// get metadata of instant
HoodieRollbackMetadata metadata = TimelineMetadataUtils.deserializeAvroMetadata(activeTimeline.getInstantDetails(instant).get(), HoodieRollbackMetadata.class);
// generate expect result
metadata.getPartitionMetadata().forEach((key, value) -> Stream.concat(value.getSuccessDeleteFiles().stream().map(f -> Pair.of(f, true)), value.getFailedDeleteFiles().stream().map(f -> Pair.of(f, false))).forEach(fileWithDeleteStatus -> {
Comparable[] row = new Comparable[5];
row[0] = metadata.getStartRollbackTime();
row[1] = metadata.getCommitsRollback().toString();
row[2] = key;
row[3] = fileWithDeleteStatus.getLeft();
row[4] = fileWithDeleteStatus.getRight();
rows.add(row);
}));
TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_INSTANT).addTableHeaderField(HoodieTableHeaderFields.HEADER_ROLLBACK_INSTANT).addTableHeaderField(HoodieTableHeaderFields.HEADER_PARTITION).addTableHeaderField(HoodieTableHeaderFields.HEADER_DELETED_FILE).addTableHeaderField(HoodieTableHeaderFields.HEADER_SUCCEEDED);
String expected = HoodiePrintHelper.print(header, new HashMap<>(), "", false, -1, false, rows);
expected = removeNonWordAndStripSpace(expected);
String got = removeNonWordAndStripSpace(cr.getResult().toString());
assertEquals(expected, got);
}
use of org.apache.hudi.common.table.timeline.HoodieInstant in project hudi by apache.
the class TestRollbacksCommand method testShowRollbacks.
/**
* Test case for command 'show rollbacks'.
*/
@Test
public void testShowRollbacks() {
CommandResult cr = shell().executeCommand("show rollbacks");
assertTrue(cr.isSuccess());
// get rollback instants
HoodieActiveTimeline activeTimeline = new RollbacksCommand.RollbackTimeline(HoodieCLI.getTableMetaClient());
Stream<HoodieInstant> rollback = activeTimeline.getRollbackTimeline().filterCompletedInstants().getInstants();
List<Comparable[]> rows = new ArrayList<>();
rollback.sorted().forEach(instant -> {
try {
// get pair of rollback time and instant time
HoodieRollbackMetadata metadata = TimelineMetadataUtils.deserializeAvroMetadata(activeTimeline.getInstantDetails(instant).get(), HoodieRollbackMetadata.class);
metadata.getCommitsRollback().forEach(c -> {
Comparable[] row = new Comparable[5];
row[0] = metadata.getStartRollbackTime();
row[1] = c;
// expect data
row[2] = 3;
row[3] = metadata.getTimeTakenInMillis();
row[4] = 3;
rows.add(row);
});
} catch (IOException e) {
e.printStackTrace();
}
});
TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_INSTANT).addTableHeaderField(HoodieTableHeaderFields.HEADER_ROLLBACK_INSTANT).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_DELETED).addTableHeaderField(HoodieTableHeaderFields.HEADER_TIME_TOKEN_MILLIS).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_PARTITIONS);
String expected = HoodiePrintHelper.print(header, new HashMap<>(), "", false, -1, false, rows);
expected = removeNonWordAndStripSpace(expected);
String got = removeNonWordAndStripSpace(cr.getResult().toString());
assertEquals(expected, got);
}
use of org.apache.hudi.common.table.timeline.HoodieInstant in project hudi by apache.
the class BaseHoodieWriteClient method startCommit.
private void startCommit(String instantTime, String actionType, HoodieTableMetaClient metaClient) {
LOG.info("Generate a new instant time: " + instantTime + " action: " + actionType);
// if there are pending compactions, their instantTime must not be greater than that of this instant time
metaClient.getActiveTimeline().filterPendingCompactionTimeline().lastInstant().ifPresent(latestPending -> ValidationUtils.checkArgument(HoodieTimeline.compareTimestamps(latestPending.getTimestamp(), HoodieTimeline.LESSER_THAN, instantTime), "Latest pending compaction instant time must be earlier than this instant time. Latest Compaction :" + latestPending + ", Ingesting at " + instantTime));
if (config.getFailedWritesCleanPolicy().isLazy()) {
this.heartbeatClient.start(instantTime);
}
if (actionType.equals(HoodieTimeline.REPLACE_COMMIT_ACTION)) {
metaClient.getActiveTimeline().createRequestedReplaceCommit(instantTime, actionType);
} else {
metaClient.getActiveTimeline().createNewInstant(new HoodieInstant(HoodieInstant.State.REQUESTED, actionType, instantTime));
}
}
Aggregations