use of org.apache.hudi.cli.TableHeader in project hudi by apache.
the class TestFileSystemViewCommand method testShowCommits.
/**
* Test case for 'show fsview all'.
*/
@Test
public void testShowCommits() {
// Test default show fsview all
CommandResult cr = shell().executeCommand("show fsview all");
assertTrue(cr.isSuccess());
// Get all file groups
Stream<HoodieFileGroup> fileGroups = fsView.getAllFileGroups(partitionPath);
List<Comparable[]> rows = new ArrayList<>();
fileGroups.forEach(fg -> fg.getAllFileSlices().forEach(fs -> {
int idx = 0;
// For base file only Views, do not display any delta-file related columns
Comparable[] row = new Comparable[8];
row[idx++] = fg.getPartitionPath();
row[idx++] = fg.getFileGroupId().getFileId();
row[idx++] = fs.getBaseInstantTime();
row[idx++] = fs.getBaseFile().isPresent() ? fs.getBaseFile().get().getPath() : "";
row[idx++] = fs.getBaseFile().isPresent() ? fs.getBaseFile().get().getFileSize() : -1;
row[idx++] = fs.getLogFiles().count();
row[idx++] = fs.getLogFiles().mapToLong(HoodieLogFile::getFileSize).sum();
row[idx++] = fs.getLogFiles().collect(Collectors.toList()).toString();
rows.add(row);
}));
Function<Object, String> converterFunction = entry -> NumericUtils.humanReadableByteCount((Double.parseDouble(entry.toString())));
Map<String, Function<Object, String>> fieldNameToConverterMap = new HashMap<>();
fieldNameToConverterMap.put(HoodieTableHeaderFields.HEADER_TOTAL_DELTA_FILE_SIZE, converterFunction);
fieldNameToConverterMap.put(HoodieTableHeaderFields.HEADER_DATA_FILE_SIZE, converterFunction);
TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_PARTITION).addTableHeaderField(HoodieTableHeaderFields.HEADER_FILE_ID).addTableHeaderField(HoodieTableHeaderFields.HEADER_BASE_INSTANT).addTableHeaderField(HoodieTableHeaderFields.HEADER_DATA_FILE).addTableHeaderField(HoodieTableHeaderFields.HEADER_DATA_FILE_SIZE).addTableHeaderField(HoodieTableHeaderFields.HEADER_NUM_DELTA_FILES).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_DELTA_FILE_SIZE).addTableHeaderField(HoodieTableHeaderFields.HEADER_DELTA_FILES);
String expected = HoodiePrintHelper.print(header, fieldNameToConverterMap, "", false, -1, false, rows);
expected = removeNonWordAndStripSpace(expected);
String got = removeNonWordAndStripSpace(cr.getResult().toString());
assertEquals(expected, got);
}
use of org.apache.hudi.cli.TableHeader in project hudi by apache.
the class TestHoodieLogFileCommand method testShowLogFileCommits.
/**
* Test case for 'show logfile metadata'.
*/
@Test
public void testShowLogFileCommits() throws JsonProcessingException {
CommandResult cr = shell().executeCommand("show logfile metadata --logFilePathPattern " + partitionPath + "/*");
assertTrue(cr.isSuccess());
TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_INSTANT_TIME).addTableHeaderField(HoodieTableHeaderFields.HEADER_RECORD_COUNT).addTableHeaderField(HoodieTableHeaderFields.HEADER_BLOCK_TYPE).addTableHeaderField(HoodieTableHeaderFields.HEADER_HEADER_METADATA).addTableHeaderField(HoodieTableHeaderFields.HEADER_FOOTER_METADATA);
// construct expect result, there is only 1 line.
List<Comparable[]> rows = new ArrayList<>();
ObjectMapper objectMapper = new ObjectMapper();
String headerStr = objectMapper.writeValueAsString(dataBlock.getLogBlockHeader());
String footerStr = objectMapper.writeValueAsString(dataBlock.getLogBlockFooter());
Comparable[] output = new Comparable[] { INSTANT_TIME, 100, dataBlock.getBlockType(), headerStr, footerStr };
rows.add(output);
String expected = HoodiePrintHelper.print(header, new HashMap<>(), "", false, -1, false, rows);
expected = removeNonWordAndStripSpace(expected);
String got = removeNonWordAndStripSpace(cr.getResult().toString());
assertEquals(expected, got);
}
use of org.apache.hudi.cli.TableHeader in project hudi by apache.
the class TestRollbacksCommand method testShowRollback.
/**
* Test case for command 'show rollback'.
*/
@Test
public void testShowRollback() throws IOException {
// get instant
HoodieActiveTimeline activeTimeline = new RollbacksCommand.RollbackTimeline(HoodieCLI.getTableMetaClient());
Stream<HoodieInstant> rollback = activeTimeline.getRollbackTimeline().filterCompletedInstants().getInstants();
HoodieInstant instant = rollback.findFirst().orElse(null);
assertNotNull(instant, "The instant can not be null.");
CommandResult cr = shell().executeCommand("show rollback --instant " + instant.getTimestamp());
assertTrue(cr.isSuccess());
List<Comparable[]> rows = new ArrayList<>();
// get metadata of instant
HoodieRollbackMetadata metadata = TimelineMetadataUtils.deserializeAvroMetadata(activeTimeline.getInstantDetails(instant).get(), HoodieRollbackMetadata.class);
// generate expect result
metadata.getPartitionMetadata().forEach((key, value) -> Stream.concat(value.getSuccessDeleteFiles().stream().map(f -> Pair.of(f, true)), value.getFailedDeleteFiles().stream().map(f -> Pair.of(f, false))).forEach(fileWithDeleteStatus -> {
Comparable[] row = new Comparable[5];
row[0] = metadata.getStartRollbackTime();
row[1] = metadata.getCommitsRollback().toString();
row[2] = key;
row[3] = fileWithDeleteStatus.getLeft();
row[4] = fileWithDeleteStatus.getRight();
rows.add(row);
}));
TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_INSTANT).addTableHeaderField(HoodieTableHeaderFields.HEADER_ROLLBACK_INSTANT).addTableHeaderField(HoodieTableHeaderFields.HEADER_PARTITION).addTableHeaderField(HoodieTableHeaderFields.HEADER_DELETED_FILE).addTableHeaderField(HoodieTableHeaderFields.HEADER_SUCCEEDED);
String expected = HoodiePrintHelper.print(header, new HashMap<>(), "", false, -1, false, rows);
expected = removeNonWordAndStripSpace(expected);
String got = removeNonWordAndStripSpace(cr.getResult().toString());
assertEquals(expected, got);
}
use of org.apache.hudi.cli.TableHeader in project hudi by apache.
the class TestRollbacksCommand method testShowRollbacks.
/**
* Test case for command 'show rollbacks'.
*/
@Test
public void testShowRollbacks() {
CommandResult cr = shell().executeCommand("show rollbacks");
assertTrue(cr.isSuccess());
// get rollback instants
HoodieActiveTimeline activeTimeline = new RollbacksCommand.RollbackTimeline(HoodieCLI.getTableMetaClient());
Stream<HoodieInstant> rollback = activeTimeline.getRollbackTimeline().filterCompletedInstants().getInstants();
List<Comparable[]> rows = new ArrayList<>();
rollback.sorted().forEach(instant -> {
try {
// get pair of rollback time and instant time
HoodieRollbackMetadata metadata = TimelineMetadataUtils.deserializeAvroMetadata(activeTimeline.getInstantDetails(instant).get(), HoodieRollbackMetadata.class);
metadata.getCommitsRollback().forEach(c -> {
Comparable[] row = new Comparable[5];
row[0] = metadata.getStartRollbackTime();
row[1] = c;
// expect data
row[2] = 3;
row[3] = metadata.getTimeTakenInMillis();
row[4] = 3;
rows.add(row);
});
} catch (IOException e) {
e.printStackTrace();
}
});
TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_INSTANT).addTableHeaderField(HoodieTableHeaderFields.HEADER_ROLLBACK_INSTANT).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_DELETED).addTableHeaderField(HoodieTableHeaderFields.HEADER_TIME_TOKEN_MILLIS).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_PARTITIONS);
String expected = HoodiePrintHelper.print(header, new HashMap<>(), "", false, -1, false, rows);
expected = removeNonWordAndStripSpace(expected);
String got = removeNonWordAndStripSpace(cr.getResult().toString());
assertEquals(expected, got);
}
use of org.apache.hudi.cli.TableHeader in project hudi by apache.
the class TestStatsCommand method testFileSizeStats.
/**
* Test case for command 'stats filesizes'.
*/
@Test
public void testFileSizeStats() throws Exception {
String commit1 = "100";
String commit2 = "101";
Map<String, Integer[]> data = new LinkedHashMap<>();
data.put(commit1, new Integer[] { 100, 120, 150 });
data.put(commit2, new Integer[] { 200, 180, 250, 300 });
// generate data file
String partition1 = HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH;
String partition2 = HoodieTestDataGenerator.DEFAULT_SECOND_PARTITION_PATH;
String partition3 = HoodieTestDataGenerator.DEFAULT_THIRD_PARTITION_PATH;
HoodieTestTable testTable = HoodieTestTable.of(HoodieCLI.getTableMetaClient());
Integer[] data1 = data.get(commit1);
assertTrue(3 <= data1.length);
testTable.addCommit(commit1).withBaseFilesInPartition(partition1, data1[0]).withBaseFilesInPartition(partition2, data1[1]).withBaseFilesInPartition(partition3, data1[2]);
Integer[] data2 = data.get(commit2);
assertTrue(4 <= data2.length);
testTable.addCommit(commit2).withBaseFilesInPartition(partition1, data2[0]).withBaseFilesInPartition(partition2, data2[1], data2[2]).withBaseFilesInPartition(partition3, data2[3]);
CommandResult cr = shell().executeCommand("stats filesizes");
assertTrue(cr.isSuccess());
Histogram globalHistogram = new Histogram(new UniformReservoir(StatsCommand.MAX_FILES));
HashMap<String, Histogram> commitHistoMap = new HashMap<>();
data.forEach((k, v) -> {
commitHistoMap.put(k, new Histogram(new UniformReservoir(StatsCommand.MAX_FILES)));
for (int value : v) {
commitHistoMap.get(k).update(value);
globalHistogram.update(value);
}
});
// generate expect
List<Comparable[]> rows = new ArrayList<>();
for (Map.Entry<String, Histogram> entry : commitHistoMap.entrySet()) {
Snapshot s = entry.getValue().getSnapshot();
rows.add(new StatsCommand().printFileSizeHistogram(entry.getKey(), s));
}
Snapshot s = globalHistogram.getSnapshot();
rows.add(new StatsCommand().printFileSizeHistogram("ALL", s));
TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_COMMIT_TIME).addTableHeaderField(HoodieTableHeaderFields.HEADER_HISTOGRAM_MIN).addTableHeaderField(HoodieTableHeaderFields.HEADER_HISTOGRAM_10TH).addTableHeaderField(HoodieTableHeaderFields.HEADER_HISTOGRAM_50TH).addTableHeaderField(HoodieTableHeaderFields.HEADER_HISTOGRAM_AVG).addTableHeaderField(HoodieTableHeaderFields.HEADER_HISTOGRAM_95TH).addTableHeaderField(HoodieTableHeaderFields.HEADER_HISTOGRAM_MAX).addTableHeaderField(HoodieTableHeaderFields.HEADER_HISTOGRAM_NUM_FILES).addTableHeaderField(HoodieTableHeaderFields.HEADER_HISTOGRAM_STD_DEV);
String expect = HoodiePrintHelper.print(header, new StatsCommand().getFieldNameToConverterMap(), "", false, -1, false, rows);
expect = removeNonWordAndStripSpace(expect);
String got = removeNonWordAndStripSpace(cr.getResult().toString());
assertEquals(expect, got);
}
Aggregations