use of org.apache.hudi.common.table.timeline.HoodieActiveTimeline in project hudi by apache.
the class CompactionCommand method compactionsAll.
@CliCommand(value = "compactions show all", help = "Shows all compactions that are in active timeline")
public String compactionsAll(@CliOption(key = { "includeExtraMetadata" }, help = "Include extra metadata", unspecifiedDefaultValue = "false") final boolean includeExtraMetadata, @CliOption(key = { "limit" }, help = "Limit commits", unspecifiedDefaultValue = "-1") final Integer limit, @CliOption(key = { "sortBy" }, help = "Sorting Field", unspecifiedDefaultValue = "") final String sortByField, @CliOption(key = { "desc" }, help = "Ordering", unspecifiedDefaultValue = "false") final boolean descending, @CliOption(key = { "headeronly" }, help = "Print Header Only", unspecifiedDefaultValue = "false") final boolean headerOnly) {
HoodieTableMetaClient client = checkAndGetMetaClient();
HoodieActiveTimeline activeTimeline = client.getActiveTimeline();
return printAllCompactions(activeTimeline, compactionPlanReader(this::readCompactionPlanForActiveTimeline, activeTimeline), includeExtraMetadata, sortByField, descending, limit, headerOnly);
}
use of org.apache.hudi.common.table.timeline.HoodieActiveTimeline in project hudi by apache.
the class CompactionCommand method compactionShow.
@CliCommand(value = "compaction show", help = "Shows compaction details for a specific compaction instant")
public String compactionShow(@CliOption(key = "instant", mandatory = true, help = "Base path for the target hoodie table") final String compactionInstantTime, @CliOption(key = { "limit" }, help = "Limit commits", unspecifiedDefaultValue = "-1") final Integer limit, @CliOption(key = { "sortBy" }, help = "Sorting Field", unspecifiedDefaultValue = "") final String sortByField, @CliOption(key = { "desc" }, help = "Ordering", unspecifiedDefaultValue = "false") final boolean descending, @CliOption(key = { "headeronly" }, help = "Print Header Only", unspecifiedDefaultValue = "false") final boolean headerOnly) throws Exception {
HoodieTableMetaClient client = checkAndGetMetaClient();
HoodieActiveTimeline activeTimeline = client.getActiveTimeline();
HoodieCompactionPlan compactionPlan = TimelineMetadataUtils.deserializeCompactionPlan(activeTimeline.readCompactionPlanAsBytes(HoodieTimeline.getCompactionRequestedInstant(compactionInstantTime)).get());
return printCompaction(compactionPlan, sortByField, descending, limit, headerOnly);
}
use of org.apache.hudi.common.table.timeline.HoodieActiveTimeline in project hudi by apache.
the class ExportCommand method copyNonArchivedInstants.
private int copyNonArchivedInstants(List<HoodieInstant> instants, int limit, String localFolder) throws Exception {
int copyCount = 0;
if (instants.isEmpty()) {
return limit;
}
final Logger LOG = LogManager.getLogger(ExportCommand.class);
final HoodieTableMetaClient metaClient = HoodieCLI.getTableMetaClient();
final HoodieActiveTimeline timeline = metaClient.getActiveTimeline();
for (HoodieInstant instant : instants) {
String localPath = localFolder + Path.SEPARATOR + instant.getFileName();
byte[] data = null;
switch(instant.getAction()) {
case HoodieTimeline.CLEAN_ACTION:
{
HoodieCleanMetadata metadata = TimelineMetadataUtils.deserializeHoodieCleanMetadata(timeline.getInstantDetails(instant).get());
data = HoodieAvroUtils.avroToJson(metadata, true);
break;
}
case HoodieTimeline.DELTA_COMMIT_ACTION:
case HoodieTimeline.COMMIT_ACTION:
case HoodieTimeline.COMPACTION_ACTION:
{
// Already in json format
data = timeline.getInstantDetails(instant).get();
break;
}
case HoodieTimeline.ROLLBACK_ACTION:
{
HoodieRollbackMetadata metadata = TimelineMetadataUtils.deserializeHoodieRollbackMetadata(timeline.getInstantDetails(instant).get());
data = HoodieAvroUtils.avroToJson(metadata, true);
break;
}
case HoodieTimeline.SAVEPOINT_ACTION:
{
HoodieSavepointMetadata metadata = TimelineMetadataUtils.deserializeHoodieSavepointMetadata(timeline.getInstantDetails(instant).get());
data = HoodieAvroUtils.avroToJson(metadata, true);
break;
}
default:
{
throw new HoodieException("Unknown type of action " + instant.getAction());
}
}
if (data != null) {
writeToFile(localPath, data);
}
}
return copyCount;
}
use of org.apache.hudi.common.table.timeline.HoodieActiveTimeline in project hudi by apache.
the class RollbacksCommand method showRollbacks.
@CliCommand(value = "show rollbacks", help = "List all rollback instants")
public String showRollbacks(@CliOption(key = { "limit" }, help = "Limit #rows to be displayed", unspecifiedDefaultValue = "10") Integer limit, @CliOption(key = { "sortBy" }, help = "Sorting Field", unspecifiedDefaultValue = "") final String sortByField, @CliOption(key = { "desc" }, help = "Ordering", unspecifiedDefaultValue = "false") final boolean descending, @CliOption(key = { "headeronly" }, help = "Print Header Only", unspecifiedDefaultValue = "false") final boolean headerOnly) {
HoodieActiveTimeline activeTimeline = new RollbackTimeline(HoodieCLI.getTableMetaClient());
HoodieTimeline rollback = activeTimeline.getRollbackTimeline().filterCompletedInstants();
final List<Comparable[]> rows = new ArrayList<>();
rollback.getInstants().forEach(instant -> {
try {
HoodieRollbackMetadata metadata = TimelineMetadataUtils.deserializeAvroMetadata(activeTimeline.getInstantDetails(instant).get(), HoodieRollbackMetadata.class);
metadata.getCommitsRollback().forEach(c -> {
Comparable[] row = new Comparable[5];
row[0] = metadata.getStartRollbackTime();
row[1] = c;
row[2] = metadata.getTotalFilesDeleted();
row[3] = metadata.getTimeTakenInMillis();
row[4] = metadata.getPartitionMetadata() != null ? metadata.getPartitionMetadata().size() : 0;
rows.add(row);
});
} catch (IOException e) {
e.printStackTrace();
}
});
TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_INSTANT).addTableHeaderField(HoodieTableHeaderFields.HEADER_ROLLBACK_INSTANT).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_DELETED).addTableHeaderField(HoodieTableHeaderFields.HEADER_TIME_TOKEN_MILLIS).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_PARTITIONS);
return HoodiePrintHelper.print(header, new HashMap<>(), sortByField, descending, limit, headerOnly, rows);
}
use of org.apache.hudi.common.table.timeline.HoodieActiveTimeline in project hudi by apache.
the class ITTestClusteringCommand method testScheduleClustering.
/**
* Test case for command 'clustering schedule'.
*/
@Test
public void testScheduleClustering() throws IOException {
// generate commits
generateCommits();
CommandResult cr = scheduleClustering();
assertAll("Command run failed", () -> assertTrue(cr.isSuccess()), () -> assertTrue(cr.getResult().toString().startsWith("Succeeded to schedule clustering for")));
// there is 1 requested clustering
HoodieActiveTimeline timeline = HoodieCLI.getTableMetaClient().getActiveTimeline();
assertEquals(1, timeline.filterPendingReplaceTimeline().countInstants());
}
Aggregations