use of org.apache.hudi.common.table.timeline.HoodieActiveTimeline in project hudi by apache.
the class ITTestClusteringCommand method testClustering.
/**
* Test case for command 'clustering run'.
*/
@Test
public void testClustering() throws IOException {
// generate commits
generateCommits();
CommandResult cr1 = scheduleClustering();
assertTrue(cr1.isSuccess());
// get clustering instance
HoodieActiveTimeline timeline = HoodieCLI.getTableMetaClient().getActiveTimeline();
Option<String> instance = timeline.filterPendingReplaceTimeline().firstInstant().map(HoodieInstant::getTimestamp);
assertTrue(instance.isPresent(), "Must have pending clustering.");
CommandResult cr2 = getShell().executeCommand(String.format("clustering run --parallelism %s --clusteringInstant %s --sparkMaster %s", 2, instance, "local"));
assertAll("Command run failed", () -> assertTrue(cr2.isSuccess()), () -> assertTrue(cr2.getResult().toString().startsWith("Succeeded to run clustering for ")));
// assert clustering complete
assertTrue(HoodieCLI.getTableMetaClient().getActiveTimeline().reload().filterCompletedInstants().getInstants().map(HoodieInstant::getTimestamp).collect(Collectors.toList()).contains(instance), "Pending clustering must be completed");
assertTrue(HoodieCLI.getTableMetaClient().getActiveTimeline().reload().getCompletedReplaceTimeline().getInstants().map(HoodieInstant::getTimestamp).collect(Collectors.toList()).contains(instance), "Pending clustering must be completed");
}
use of org.apache.hudi.common.table.timeline.HoodieActiveTimeline in project hudi by apache.
the class ITTestCommitsCommand method testRollbackCommit.
/**
* Test case of 'commit rollback' command.
*/
@Test
public void testRollbackCommit() throws Exception {
// Create some commits files and base files
Map<String, String> partitionAndFileId = new HashMap<String, String>() {
{
put(DEFAULT_FIRST_PARTITION_PATH, "file-1");
put(DEFAULT_SECOND_PARTITION_PATH, "file-2");
put(DEFAULT_THIRD_PARTITION_PATH, "file-3");
}
};
final String rollbackCommit = "102";
HoodieTestTable.of(metaClient).withPartitionMetaFiles(DEFAULT_PARTITION_PATHS).addCommit("100").withBaseFilesInPartitions(partitionAndFileId).addCommit("101").withBaseFilesInPartitions(partitionAndFileId).addCommit(rollbackCommit).withBaseFilesInPartitions(partitionAndFileId);
CommandResult cr = getShell().executeCommand(String.format("commit rollback --commit %s --sparkMaster %s --sparkMemory %s", rollbackCommit, "local", "4G"));
assertTrue(cr.isSuccess());
metaClient = HoodieTableMetaClient.reload(HoodieCLI.getTableMetaClient());
HoodieActiveTimeline rollbackTimeline = new RollbacksCommand.RollbackTimeline(metaClient);
assertEquals(1, rollbackTimeline.getRollbackTimeline().countInstants(), "There should have 1 rollback instant.");
HoodieActiveTimeline timeline = metaClient.reloadActiveTimeline();
assertEquals(2, timeline.getCommitsTimeline().countInstants(), "There should have 2 instants.");
// rollback complete commit
CommandResult cr2 = getShell().executeCommand(String.format("commit rollback --commit %s --sparkMaster %s --sparkMemory %s", "101", "local", "4G"));
assertTrue(cr2.isSuccess());
metaClient = HoodieTableMetaClient.reload(HoodieCLI.getTableMetaClient());
HoodieActiveTimeline rollbackTimeline2 = new RollbacksCommand.RollbackTimeline(metaClient);
assertEquals(1, rollbackTimeline2.getRollbackTimeline().countInstants(), "There should have 2 rollback instant.");
HoodieActiveTimeline timeline2 = metaClient.reloadActiveTimeline();
assertEquals(2, timeline2.getCommitsTimeline().countInstants(), "There should have 1 instants.");
}
use of org.apache.hudi.common.table.timeline.HoodieActiveTimeline in project hudi by apache.
the class ITTestCompactionCommand method testScheduleCompact.
/**
* Test case for command 'compaction schedule'.
*/
@Test
public void testScheduleCompact() throws IOException {
// generate commits
generateCommits();
CommandResult cr = getShell().executeCommand(String.format("compaction schedule --hoodieConfigs hoodie.compact.inline.max.delta.commits=1 --sparkMaster %s", "local"));
assertAll("Command run failed", () -> assertTrue(cr.isSuccess()), () -> assertTrue(cr.getResult().toString().startsWith("Attempted to schedule compaction for")));
// there is 1 requested compaction
HoodieActiveTimeline timeline = HoodieCLI.getTableMetaClient().getActiveTimeline();
assertEquals(1, timeline.filterPendingCompactionTimeline().countInstants());
}
use of org.apache.hudi.common.table.timeline.HoodieActiveTimeline in project hudi by apache.
the class TestCleansCommand method getLatestCleanTimeTakenInMillis.
/**
* Get time taken of latest instant.
*/
private Long getLatestCleanTimeTakenInMillis() throws IOException {
HoodieActiveTimeline activeTimeline = HoodieCLI.getTableMetaClient().getActiveTimeline();
HoodieTimeline timeline = activeTimeline.getCleanerTimeline().filterCompletedInstants();
HoodieInstant clean = timeline.getReverseOrderedInstants().findFirst().orElse(null);
if (clean != null) {
HoodieCleanMetadata cleanMetadata = TimelineMetadataUtils.deserializeHoodieCleanMetadata(timeline.getInstantDetails(clean).get());
return cleanMetadata.getTimeTakenInMillis();
}
return -1L;
}
use of org.apache.hudi.common.table.timeline.HoodieActiveTimeline in project hudi by apache.
the class CleansCommand method showCleans.
@CliCommand(value = "cleans show", help = "Show the cleans")
public String showCleans(@CliOption(key = { "limit" }, help = "Limit commits", unspecifiedDefaultValue = "-1") final Integer limit, @CliOption(key = { "sortBy" }, help = "Sorting Field", unspecifiedDefaultValue = "") final String sortByField, @CliOption(key = { "desc" }, help = "Ordering", unspecifiedDefaultValue = "false") final boolean descending, @CliOption(key = { "headeronly" }, help = "Print Header Only", unspecifiedDefaultValue = "false") final boolean headerOnly) throws IOException {
HoodieActiveTimeline activeTimeline = HoodieCLI.getTableMetaClient().getActiveTimeline();
HoodieTimeline timeline = activeTimeline.getCleanerTimeline().filterCompletedInstants();
List<HoodieInstant> cleans = timeline.getReverseOrderedInstants().collect(Collectors.toList());
List<Comparable[]> rows = new ArrayList<>();
for (HoodieInstant clean : cleans) {
HoodieCleanMetadata cleanMetadata = TimelineMetadataUtils.deserializeHoodieCleanMetadata(timeline.getInstantDetails(clean).get());
rows.add(new Comparable[] { clean.getTimestamp(), cleanMetadata.getEarliestCommitToRetain(), cleanMetadata.getTotalFilesDeleted(), cleanMetadata.getTimeTakenInMillis() });
}
TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_CLEAN_TIME).addTableHeaderField(HoodieTableHeaderFields.HEADER_EARLIEST_COMMAND_RETAINED).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_DELETED).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_TIME_TAKEN);
return HoodiePrintHelper.print(header, new HashMap<>(), sortByField, descending, limit, headerOnly, rows);
}
Aggregations