use of org.apache.hudi.cli.TableHeader in project hudi by apache.
the class TestCommitsCommand method generateExpectData.
private String generateExpectData(int records, Map<String, Integer[]> data) throws IOException {
FileSystem fs = FileSystem.get(hadoopConf());
List<String> partitionPaths = FSUtils.getAllPartitionFoldersThreeLevelsDown(fs, tablePath1);
int partitions = partitionPaths.size();
// default pre-commit is not null, file add always be 0 and update always be partition nums
int fileAdded = 0;
int fileUpdated = partitions;
int errors = 0;
// generate expect result
List<Comparable[]> rows = new ArrayList<>();
data.forEach((key, value) -> {
for (int i = 0; i < records; i++) {
// there are more than 1 partitions, so need to * partitions
rows.add(new Comparable[] { key, partitions * HoodieTestCommitMetadataGenerator.DEFAULT_TOTAL_WRITE_BYTES, fileAdded, fileUpdated, partitions, partitions * value[0], partitions * value[1], errors });
}
});
final Map<String, Function<Object, String>> fieldNameToConverterMap = new HashMap<>();
fieldNameToConverterMap.put(HoodieTableHeaderFields.HEADER_TOTAL_BYTES_WRITTEN, entry -> {
return NumericUtils.humanReadableByteCount((Double.valueOf(entry.toString())));
});
final TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_COMMIT_TIME).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_BYTES_WRITTEN).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_ADDED).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_UPDATED).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_PARTITIONS_WRITTEN).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_RECORDS_WRITTEN).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_UPDATE_RECORDS_WRITTEN).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_ERRORS);
return HoodiePrintHelper.print(header, fieldNameToConverterMap, "", false, -1, false, rows);
}
use of org.apache.hudi.cli.TableHeader in project hudi by apache.
the class TestCommitsCommand method testShowCommitPartitions.
/**
* Test case of 'commit showpartitions' command.
*/
@Test
public void testShowCommitPartitions() throws Exception {
Map<String, Integer[]> data = generateData();
String commitInstant = "101";
CommandResult cr = shell().executeCommand(String.format("commit showpartitions --commit %s", commitInstant));
assertTrue(cr.isSuccess());
Integer[] value = data.get(commitInstant);
List<Comparable[]> rows = new ArrayList<>();
// prevCommit not null, so add 0, update 1
Arrays.asList(HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH, HoodieTestDataGenerator.DEFAULT_SECOND_PARTITION_PATH).stream().forEach(partition -> rows.add(new Comparable[] { HoodieTimeline.COMMIT_ACTION, partition, 0, 1, 0, value[1], HoodieTestCommitMetadataGenerator.DEFAULT_TOTAL_WRITE_BYTES, 0 }));
Map<String, Function<Object, String>> fieldNameToConverterMap = new HashMap<>();
fieldNameToConverterMap.put(HoodieTableHeaderFields.HEADER_TOTAL_BYTES_WRITTEN, entry -> NumericUtils.humanReadableByteCount((Long.parseLong(entry.toString()))));
TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_ACTION).addTableHeaderField(HoodieTableHeaderFields.HEADER_PARTITION_PATH).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_ADDED).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_UPDATED).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_RECORDS_INSERTED).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_RECORDS_UPDATED).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_BYTES_WRITTEN).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_ERRORS);
String expected = HoodiePrintHelper.print(header, fieldNameToConverterMap, "", false, -1, false, rows);
expected = removeNonWordAndStripSpace(expected);
String got = removeNonWordAndStripSpace(cr.getResult().toString());
assertEquals(expected, got);
}
use of org.apache.hudi.cli.TableHeader in project hudi by apache.
the class TestCommitsCommand method testShowCommitFiles.
/**
* Test case of 'commit showfiles' command.
*/
@Test
public void testShowCommitFiles() throws Exception {
Map<String, Integer[]> data = generateData();
String commitInstant = "101";
CommandResult cr = shell().executeCommand(String.format("commit showfiles --commit %s", commitInstant));
assertTrue(cr.isSuccess());
Integer[] value = data.get(commitInstant);
List<Comparable[]> rows = new ArrayList<>();
Arrays.asList(HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH, HoodieTestDataGenerator.DEFAULT_SECOND_PARTITION_PATH).stream().forEach(partition -> rows.add(new Comparable[] { HoodieTimeline.COMMIT_ACTION, partition, HoodieTestCommitMetadataGenerator.DEFAULT_FILEID, HoodieTestCommitMetadataGenerator.DEFAULT_PRE_COMMIT, value[1], value[0], HoodieTestCommitMetadataGenerator.DEFAULT_TOTAL_WRITE_BYTES, // default 0 errors and blank file with 0 size
0, 0 }));
TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_ACTION).addTableHeaderField(HoodieTableHeaderFields.HEADER_PARTITION_PATH).addTableHeaderField(HoodieTableHeaderFields.HEADER_FILE_ID).addTableHeaderField(HoodieTableHeaderFields.HEADER_PREVIOUS_COMMIT).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_RECORDS_UPDATED).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_RECORDS_WRITTEN).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_BYTES_WRITTEN).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_ERRORS).addTableHeaderField(HoodieTableHeaderFields.HEADER_FILE_SIZE);
String expected = HoodiePrintHelper.print(header, new HashMap<>(), "", false, -1, false, rows);
expected = removeNonWordAndStripSpace(expected);
String got = removeNonWordAndStripSpace(cr.getResult().toString());
assertEquals(expected, got);
}
use of org.apache.hudi.cli.TableHeader in project hudi by apache.
the class TestCompactionCommand method testCompactionsShowArchived.
/**
* Test case for command 'compactions showarchived'.
*/
@Test
public void testCompactionsShowArchived() throws IOException {
generateCompactionInstances();
generateArchive();
CommandResult cr = shell().executeCommand("compactions showarchived --startTs 001 --endTs 005");
// generate result
Map<String, Integer> fileMap = new HashMap<>();
fileMap.put("001", 1);
fileMap.put("003", 2);
fileMap.put("005", 3);
List<Comparable[]> rows = Arrays.asList("005", "003", "001").stream().map(i -> new Comparable[] { i, HoodieInstant.State.COMPLETED, fileMap.get(i) }).collect(Collectors.toList());
Map<String, Function<Object, String>> fieldNameToConverterMap = new HashMap<>();
TableHeader header = new TableHeader().addTableHeaderField("Compaction Instant Time").addTableHeaderField("State").addTableHeaderField("Total FileIds to be Compacted");
String expected = HoodiePrintHelper.print(header, fieldNameToConverterMap, "", false, -1, false, rows);
expected = removeNonWordAndStripSpace(expected);
String got = removeNonWordAndStripSpace(cr.getResult().toString());
assertEquals(expected, got);
}
use of org.apache.hudi.cli.TableHeader in project hudi by apache.
the class TestFileSystemViewCommand method testShowLatestFileSlices.
/**
* Test case for command 'show fsview latest'.
*/
@Test
public void testShowLatestFileSlices() {
// Test show with partition path '2016/03/15'
CommandResult cr = shell().executeCommand("show fsview latest --partitionPath " + partitionPath);
assertTrue(cr.isSuccess());
Stream<FileSlice> fileSlice = fsView.getLatestFileSlices(partitionPath);
List<Comparable[]> rows = new ArrayList<>();
fileSlice.forEach(fs -> {
int idx = 0;
// For base file only Views, do not display any delta-file related columns
Comparable[] row = new Comparable[13];
row[idx++] = partitionPath;
row[idx++] = fs.getFileId();
row[idx++] = fs.getBaseInstantTime();
row[idx++] = fs.getBaseFile().isPresent() ? fs.getBaseFile().get().getPath() : "";
long dataFileSize = fs.getBaseFile().isPresent() ? fs.getBaseFile().get().getFileSize() : -1;
row[idx++] = dataFileSize;
row[idx++] = fs.getLogFiles().count();
row[idx++] = fs.getLogFiles().mapToLong(HoodieLogFile::getFileSize).sum();
long logFilesScheduledForCompactionTotalSize = fs.getLogFiles().filter(lf -> lf.getBaseCommitTime().equals(fs.getBaseInstantTime())).mapToLong(HoodieLogFile::getFileSize).sum();
row[idx++] = logFilesScheduledForCompactionTotalSize;
long logFilesUnscheduledTotalSize = fs.getLogFiles().filter(lf -> !lf.getBaseCommitTime().equals(fs.getBaseInstantTime())).mapToLong(HoodieLogFile::getFileSize).sum();
row[idx++] = logFilesUnscheduledTotalSize;
double logSelectedForCompactionToBaseRatio = dataFileSize > 0 ? logFilesScheduledForCompactionTotalSize / (dataFileSize * 1.0) : -1;
row[idx++] = logSelectedForCompactionToBaseRatio;
double logUnscheduledToBaseRatio = dataFileSize > 0 ? logFilesUnscheduledTotalSize / (dataFileSize * 1.0) : -1;
row[idx++] = logUnscheduledToBaseRatio;
row[idx++] = fs.getLogFiles().filter(lf -> lf.getBaseCommitTime().equals(fs.getBaseInstantTime())).collect(Collectors.toList()).toString();
row[idx++] = fs.getLogFiles().filter(lf -> !lf.getBaseCommitTime().equals(fs.getBaseInstantTime())).collect(Collectors.toList()).toString();
rows.add(row);
});
Function<Object, String> converterFunction = entry -> NumericUtils.humanReadableByteCount((Double.parseDouble(entry.toString())));
Map<String, Function<Object, String>> fieldNameToConverterMap = new HashMap<>();
fieldNameToConverterMap.put(HoodieTableHeaderFields.HEADER_DATA_FILE_SIZE, converterFunction);
fieldNameToConverterMap.put(HoodieTableHeaderFields.HEADER_TOTAL_DELTA_SIZE, converterFunction);
fieldNameToConverterMap.put(HoodieTableHeaderFields.HEADER_DELTA_SIZE_SCHEDULED, converterFunction);
fieldNameToConverterMap.put(HoodieTableHeaderFields.HEADER_DELTA_SIZE_UNSCHEDULED, converterFunction);
TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_PARTITION).addTableHeaderField(HoodieTableHeaderFields.HEADER_FILE_ID).addTableHeaderField(HoodieTableHeaderFields.HEADER_BASE_INSTANT).addTableHeaderField(HoodieTableHeaderFields.HEADER_DATA_FILE).addTableHeaderField(HoodieTableHeaderFields.HEADER_DATA_FILE_SIZE).addTableHeaderField(HoodieTableHeaderFields.HEADER_NUM_DELTA_FILES).addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_DELTA_SIZE).addTableHeaderField(HoodieTableHeaderFields.HEADER_DELTA_SIZE_SCHEDULED).addTableHeaderField(HoodieTableHeaderFields.HEADER_DELTA_SIZE_UNSCHEDULED).addTableHeaderField(HoodieTableHeaderFields.HEADER_DELTA_BASE_SCHEDULED).addTableHeaderField(HoodieTableHeaderFields.HEADER_DELTA_BASE_UNSCHEDULED).addTableHeaderField(HoodieTableHeaderFields.HEADER_DELTA_FILES_SCHEDULED).addTableHeaderField(HoodieTableHeaderFields.HEADER_DELTA_FILES_UNSCHEDULED);
String expected = HoodiePrintHelper.print(header, fieldNameToConverterMap, "", false, -1, false, rows);
expected = removeNonWordAndStripSpace(expected);
String got = removeNonWordAndStripSpace(cr.getResult().toString());
assertEquals(expected, got);
}
Aggregations