use of org.apache.hudi.common.model.HoodieWriteStat in project hudi by apache.
the class TestIncrementalFSViewSync method addInstant.
private List<String> addInstant(HoodieTableMetaClient metaClient, String instant, boolean deltaCommit, String baseInstant) throws IOException {
List<Pair<String, HoodieWriteStat>> writeStats = generateDataForInstant(baseInstant, instant, deltaCommit);
HoodieCommitMetadata metadata = new HoodieCommitMetadata();
writeStats.forEach(e -> metadata.addWriteStat(e.getKey(), e.getValue()));
HoodieInstant inflightInstant = new HoodieInstant(true, deltaCommit ? HoodieTimeline.DELTA_COMMIT_ACTION : HoodieTimeline.COMMIT_ACTION, instant);
metaClient.getActiveTimeline().createNewInstant(inflightInstant);
metaClient.getActiveTimeline().saveAsComplete(inflightInstant, Option.of(metadata.toJsonString().getBytes(StandardCharsets.UTF_8)));
/*
// Delete pending compaction if present
metaClient.getFs().delete(new Path(metaClient.getMetaPath(),
new HoodieInstant(State.REQUESTED, HoodieTimeline.COMPACTION_ACTION, instant).getFileName()));
*/
return writeStats.stream().map(e -> e.getValue().getPath()).collect(Collectors.toList());
}
use of org.apache.hudi.common.model.HoodieWriteStat in project hudi by apache.
the class TestIncrementalFSViewSync method generateDataForInstant.
private List<Pair<String, HoodieWriteStat>> generateDataForInstant(String baseInstant, String instant, boolean deltaCommit, List<String> fileIds) {
return partitions.stream().flatMap(p -> fileIds.stream().map(f -> {
try {
File file = new File(basePath + "/" + p + "/" + (deltaCommit ? FSUtils.makeLogFileName(f, ".log", baseInstant, Integer.parseInt(instant), TEST_WRITE_TOKEN) : FSUtils.makeDataFileName(instant, TEST_WRITE_TOKEN, f)));
file.createNewFile();
HoodieWriteStat w = new HoodieWriteStat();
w.setFileId(f);
w.setPath(String.format("%s/%s", p, file.getName()));
return Pair.of(p, w);
} catch (IOException e) {
throw new HoodieException(e);
}
})).collect(Collectors.toList());
}
use of org.apache.hudi.common.model.HoodieWriteStat in project hudi by apache.
the class TestTimelineUtils method getCommitMetadata.
private byte[] getCommitMetadata(String basePath, String partition, String commitTs, int count, Map<String, String> extraMetadata) throws IOException {
HoodieCommitMetadata commit = new HoodieCommitMetadata();
for (int i = 1; i <= count; i++) {
HoodieWriteStat stat = new HoodieWriteStat();
stat.setFileId(i + "");
stat.setPartitionPath(Paths.get(basePath, partition).toString());
stat.setPath(commitTs + "." + i + metaClient.getTableConfig().getBaseFileFormat().getFileExtension());
commit.addWriteStat(partition, stat);
}
for (Map.Entry<String, String> extraEntries : extraMetadata.entrySet()) {
commit.addMetadata(extraEntries.getKey(), extraEntries.getValue());
}
return commit.toJsonString().getBytes(StandardCharsets.UTF_8);
}
use of org.apache.hudi.common.model.HoodieWriteStat in project hudi by apache.
the class HoodieTestTable method generateHoodieWriteStatForPartition.
public static List<HoodieWriteStat> generateHoodieWriteStatForPartition(Map<String, List<Pair<String, Integer>>> partitionToFileIdMap, String commitTime, boolean bootstrap) {
List<HoodieWriteStat> writeStats = new ArrayList<>();
for (Map.Entry<String, List<Pair<String, Integer>>> entry : partitionToFileIdMap.entrySet()) {
String partition = entry.getKey();
for (Pair<String, Integer> fileIdInfo : entry.getValue()) {
HoodieWriteStat writeStat = new HoodieWriteStat();
String fileName = bootstrap ? fileIdInfo.getKey() : FileCreateUtils.baseFileName(commitTime, fileIdInfo.getKey());
writeStat.setFileId(fileName);
writeStat.setPartitionPath(partition);
writeStat.setPath(partition + "/" + fileName);
writeStat.setTotalWriteBytes(fileIdInfo.getValue());
writeStat.setFileSizeInBytes(fileIdInfo.getValue());
writeStats.add(writeStat);
}
}
return writeStats;
}
use of org.apache.hudi.common.model.HoodieWriteStat in project hudi by apache.
the class HoodieTestTable method generateHoodieWriteStatForPartitionLogFiles.
/**
* Returns the write stats for log files in the partition. Since log file has version associated with it, the {@param partitionToFileIdMap}
* contains list of Pair<String, Integer[]> where the Integer[] array has both file version and file size.
*/
private static List<HoodieWriteStat> generateHoodieWriteStatForPartitionLogFiles(Map<String, List<Pair<String, Integer[]>>> partitionToFileIdMap, String commitTime, boolean bootstrap) {
List<HoodieWriteStat> writeStats = new ArrayList<>();
if (partitionToFileIdMap == null) {
return writeStats;
}
for (Map.Entry<String, List<Pair<String, Integer[]>>> entry : partitionToFileIdMap.entrySet()) {
String partition = entry.getKey();
for (Pair<String, Integer[]> fileIdInfo : entry.getValue()) {
HoodieWriteStat writeStat = new HoodieWriteStat();
String fileName = bootstrap ? fileIdInfo.getKey() : FileCreateUtils.logFileName(commitTime, fileIdInfo.getKey(), fileIdInfo.getValue()[0]);
writeStat.setFileId(fileName);
writeStat.setPartitionPath(partition);
writeStat.setPath(partition + "/" + fileName);
writeStat.setTotalWriteBytes(fileIdInfo.getValue()[1]);
writeStat.setFileSizeInBytes(fileIdInfo.getValue()[1]);
writeStats.add(writeStat);
}
}
return writeStats;
}
Aggregations