use of org.apache.hudi.avro.model.HoodieFileStatus in project hudi by apache.
the class TestBootstrapRegexModeSelector method testModeSelector.
@Test
public void testModeSelector() {
List<String> partitionPaths = Arrays.asList("2020/05/01", "2020/05/02", "2020/05/10", "2020/05/11");
List<Pair<String, List<HoodieFileStatus>>> input = partitionPaths.stream().map(p -> Pair.<String, List<HoodieFileStatus>>of(p, new ArrayList<>())).collect(Collectors.toList());
String regex = "2020/05/1[0-9]";
BootstrapRegexModeSelector regexModeSelector = new BootstrapRegexModeSelector(getConfig(regex, BootstrapMode.FULL_RECORD));
Map<BootstrapMode, List<String>> result = regexModeSelector.select(input);
assertTrue(result.get(BootstrapMode.METADATA_ONLY).contains("2020/05/01"));
assertTrue(result.get(BootstrapMode.METADATA_ONLY).contains("2020/05/02"));
assertTrue(result.get(BootstrapMode.FULL_RECORD).contains("2020/05/10"));
assertTrue(result.get(BootstrapMode.FULL_RECORD).contains("2020/05/11"));
assertEquals(2, result.get(BootstrapMode.METADATA_ONLY).size());
assertEquals(2, result.get(BootstrapMode.FULL_RECORD).size());
regexModeSelector = new BootstrapRegexModeSelector(getConfig(regex, BootstrapMode.METADATA_ONLY));
result = regexModeSelector.select(input);
assertTrue(result.get(BootstrapMode.FULL_RECORD).contains("2020/05/01"));
assertTrue(result.get(BootstrapMode.FULL_RECORD).contains("2020/05/02"));
assertTrue(result.get(BootstrapMode.METADATA_ONLY).contains("2020/05/10"));
assertTrue(result.get(BootstrapMode.METADATA_ONLY).contains("2020/05/11"));
assertEquals(2, result.get(BootstrapMode.METADATA_ONLY).size());
assertEquals(2, result.get(BootstrapMode.FULL_RECORD).size());
}
use of org.apache.hudi.avro.model.HoodieFileStatus in project hudi by apache.
the class FileStatusUtils method fromFileStatus.
public static HoodieFileStatus fromFileStatus(FileStatus fileStatus) {
if (null == fileStatus) {
return null;
}
HoodieFileStatus fStatus = new HoodieFileStatus();
try {
fStatus.setPath(fromPath(fileStatus.getPath()));
fStatus.setLength(fileStatus.getLen());
fStatus.setIsDir(fileStatus.isDirectory());
fStatus.setBlockReplication((int) fileStatus.getReplication());
fStatus.setBlockSize(fileStatus.getBlockSize());
fStatus.setModificationTime(fileStatus.getModificationTime());
fStatus.setAccessTime(fileStatus.getModificationTime());
fStatus.setSymlink(fileStatus.isSymlink() ? fromPath(fileStatus.getSymlink()) : null);
safeReadAndSetMetadata(fStatus, fileStatus);
} catch (IOException ioe) {
throw new HoodieIOException(ioe.getMessage(), ioe);
}
return fStatus;
}
use of org.apache.hudi.avro.model.HoodieFileStatus in project hudi by apache.
the class TestBootstrap method generateInputBatch.
private static JavaRDD<HoodieRecord> generateInputBatch(JavaSparkContext jsc, List<Pair<String, List<HoodieFileStatus>>> partitionPaths, Schema writerSchema) {
List<Pair<String, Path>> fullFilePathsWithPartition = partitionPaths.stream().flatMap(p -> p.getValue().stream().map(x -> Pair.of(p.getKey(), FileStatusUtils.toPath(x.getPath())))).collect(Collectors.toList());
return jsc.parallelize(fullFilePathsWithPartition.stream().flatMap(p -> {
try {
Configuration conf = jsc.hadoopConfiguration();
AvroReadSupport.setAvroReadSchema(conf, writerSchema);
Iterator<GenericRecord> recIterator = new ParquetReaderIterator(AvroParquetReader.<GenericRecord>builder(p.getValue()).withConf(conf).build());
return StreamSupport.stream(Spliterators.spliteratorUnknownSize(recIterator, 0), false).map(gr -> {
try {
String key = gr.get("_row_key").toString();
String pPath = p.getKey();
return new HoodieAvroRecord<>(new HoodieKey(key, pPath), new RawTripTestPayload(gr.toString(), key, pPath, HoodieTestDataGenerator.TRIP_EXAMPLE_SCHEMA));
} catch (IOException e) {
throw new HoodieIOException(e.getMessage(), e);
}
});
} catch (IOException ioe) {
throw new HoodieIOException(ioe.getMessage(), ioe);
}
}).collect(Collectors.toList()));
}
use of org.apache.hudi.avro.model.HoodieFileStatus in project hudi by apache.
the class TestBootstrap method checkBootstrapResults.
private void checkBootstrapResults(int totalRecords, Schema schema, String instant, boolean checkNumRawFiles, int expNumInstants, int numVersions, long expTimestamp, long expROTimestamp, boolean isDeltaCommit, List<String> instantsWithValidRecords, boolean validateRecordsForCommitTime) throws Exception {
metaClient.reloadActiveTimeline();
assertEquals(expNumInstants, metaClient.getCommitsTimeline().filterCompletedInstants().countInstants());
assertEquals(instant, metaClient.getActiveTimeline().getCommitsTimeline().filterCompletedInstants().lastInstant().get().getTimestamp());
Dataset<Row> bootstrapped = sqlContext.read().format("parquet").load(basePath);
Dataset<Row> original = sqlContext.read().format("parquet").load(bootstrapBasePath);
bootstrapped.registerTempTable("bootstrapped");
original.registerTempTable("original");
if (checkNumRawFiles) {
List<HoodieFileStatus> files = BootstrapUtils.getAllLeafFoldersWithFiles(metaClient, metaClient.getFs(), bootstrapBasePath, context).stream().flatMap(x -> x.getValue().stream()).collect(Collectors.toList());
assertEquals(files.size() * numVersions, sqlContext.sql("select distinct _hoodie_file_name from bootstrapped").count());
}
if (!isDeltaCommit) {
String predicate = String.join(", ", instantsWithValidRecords.stream().map(p -> "\"" + p + "\"").collect(Collectors.toList()));
if (validateRecordsForCommitTime) {
assertEquals(totalRecords, sqlContext.sql("select * from bootstrapped where _hoodie_commit_time IN " + "(" + predicate + ")").count());
}
Dataset<Row> missingOriginal = sqlContext.sql("select a._row_key from original a where a._row_key not " + "in (select _hoodie_record_key from bootstrapped)");
assertEquals(0, missingOriginal.count());
Dataset<Row> missingBootstrapped = sqlContext.sql("select a._hoodie_record_key from bootstrapped a " + "where a._hoodie_record_key not in (select _row_key from original)");
assertEquals(0, missingBootstrapped.count());
// sqlContext.sql("select * from bootstrapped").show(10, false);
}
// RO Input Format Read
reloadInputFormats();
List<GenericRecord> records = HoodieMergeOnReadTestUtils.getRecordsUsingInputFormat(jsc.hadoopConfiguration(), FSUtils.getAllPartitionPaths(context, basePath, HoodieMetadataConfig.DEFAULT_METADATA_ENABLE_FOR_READERS, false).stream().map(f -> basePath + "/" + f).collect(Collectors.toList()), basePath, roJobConf, false, schema, TRIP_HIVE_COLUMN_TYPES, false, new ArrayList<>());
assertEquals(totalRecords, records.size());
Set<String> seenKeys = new HashSet<>();
for (GenericRecord r : records) {
assertEquals(r.get("_row_key").toString(), r.get("_hoodie_record_key").toString(), "Record :" + r);
assertEquals(expROTimestamp, ((LongWritable) r.get("timestamp")).get(), 0.1, "Record :" + r);
assertFalse(seenKeys.contains(r.get("_hoodie_record_key").toString()));
seenKeys.add(r.get("_hoodie_record_key").toString());
}
assertEquals(totalRecords, seenKeys.size());
// RT Input Format Read
reloadInputFormats();
seenKeys = new HashSet<>();
records = HoodieMergeOnReadTestUtils.getRecordsUsingInputFormat(jsc.hadoopConfiguration(), FSUtils.getAllPartitionPaths(context, basePath, HoodieMetadataConfig.DEFAULT_METADATA_ENABLE_FOR_READERS, false).stream().map(f -> basePath + "/" + f).collect(Collectors.toList()), basePath, rtJobConf, true, schema, TRIP_HIVE_COLUMN_TYPES, false, new ArrayList<>());
assertEquals(totalRecords, records.size());
for (GenericRecord r : records) {
assertEquals(r.get("_row_key").toString(), r.get("_hoodie_record_key").toString(), "Realtime Record :" + r);
assertEquals(expTimestamp, ((LongWritable) r.get("timestamp")).get(), 0.1, "Realtime Record :" + r);
assertFalse(seenKeys.contains(r.get("_hoodie_record_key").toString()));
seenKeys.add(r.get("_hoodie_record_key").toString());
}
assertEquals(totalRecords, seenKeys.size());
// RO Input Format Read - Project only Hoodie Columns
reloadInputFormats();
records = HoodieMergeOnReadTestUtils.getRecordsUsingInputFormat(jsc.hadoopConfiguration(), FSUtils.getAllPartitionPaths(context, basePath, HoodieMetadataConfig.DEFAULT_METADATA_ENABLE_FOR_READERS, false).stream().map(f -> basePath + "/" + f).collect(Collectors.toList()), basePath, roJobConf, false, schema, TRIP_HIVE_COLUMN_TYPES, true, HoodieRecord.HOODIE_META_COLUMNS);
assertEquals(totalRecords, records.size());
seenKeys = new HashSet<>();
for (GenericRecord r : records) {
assertFalse(seenKeys.contains(r.get("_hoodie_record_key").toString()));
seenKeys.add(r.get("_hoodie_record_key").toString());
}
assertEquals(totalRecords, seenKeys.size());
// RT Input Format Read - Project only Hoodie Columns
reloadInputFormats();
seenKeys = new HashSet<>();
records = HoodieMergeOnReadTestUtils.getRecordsUsingInputFormat(jsc.hadoopConfiguration(), FSUtils.getAllPartitionPaths(context, basePath, HoodieMetadataConfig.DEFAULT_METADATA_ENABLE_FOR_READERS, false).stream().map(f -> basePath + "/" + f).collect(Collectors.toList()), basePath, rtJobConf, true, schema, TRIP_HIVE_COLUMN_TYPES, true, HoodieRecord.HOODIE_META_COLUMNS);
assertEquals(totalRecords, records.size());
for (GenericRecord r : records) {
assertFalse(seenKeys.contains(r.get("_hoodie_record_key").toString()));
seenKeys.add(r.get("_hoodie_record_key").toString());
}
assertEquals(totalRecords, seenKeys.size());
// RO Input Format Read - Project only non-hoodie column
reloadInputFormats();
records = HoodieMergeOnReadTestUtils.getRecordsUsingInputFormat(jsc.hadoopConfiguration(), FSUtils.getAllPartitionPaths(context, basePath, HoodieMetadataConfig.DEFAULT_METADATA_ENABLE_FOR_READERS, false).stream().map(f -> basePath + "/" + f).collect(Collectors.toList()), basePath, roJobConf, false, schema, TRIP_HIVE_COLUMN_TYPES, true, Arrays.asList("_row_key"));
assertEquals(totalRecords, records.size());
seenKeys = new HashSet<>();
for (GenericRecord r : records) {
assertFalse(seenKeys.contains(r.get("_row_key").toString()));
seenKeys.add(r.get("_row_key").toString());
}
assertEquals(totalRecords, seenKeys.size());
// RT Input Format Read - Project only non-hoodie column
reloadInputFormats();
seenKeys = new HashSet<>();
records = HoodieMergeOnReadTestUtils.getRecordsUsingInputFormat(jsc.hadoopConfiguration(), FSUtils.getAllPartitionPaths(context, basePath, HoodieMetadataConfig.DEFAULT_METADATA_ENABLE_FOR_READERS, false).stream().map(f -> basePath + "/" + f).collect(Collectors.toList()), basePath, rtJobConf, true, schema, TRIP_HIVE_COLUMN_TYPES, true, Arrays.asList("_row_key"));
assertEquals(totalRecords, records.size());
for (GenericRecord r : records) {
assertFalse(seenKeys.contains(r.get("_row_key").toString()));
seenKeys.add(r.get("_row_key").toString());
}
assertEquals(totalRecords, seenKeys.size());
}
use of org.apache.hudi.avro.model.HoodieFileStatus in project hudi by apache.
the class TestHoodieTableFileSystemView method testViewForFileSlicesWithAsyncCompaction.
/**
* Helper method to test Views in the presence of concurrent compaction.
*
* @param skipCreatingDataFile if set, first File Slice will not have data-file set. This would simulate inserts going
* directly to log files
* @param isCompactionInFlight if set, compaction was inflight (running) when view was tested first time, otherwise
* compaction was in requested state
* @param expTotalFileSlices Total number of file-slices across file-groups in the partition path
* @param expTotalDataFiles Total number of data-files across file-groups in the partition path
* @param includeInvalidAndInflight Whether view includes inflight and invalid file-groups.
* @param testBootstrap enable Bootstrap and test
* @throws Exception -
*/
protected void testViewForFileSlicesWithAsyncCompaction(boolean skipCreatingDataFile, boolean isCompactionInFlight, int expTotalFileSlices, int expTotalDataFiles, boolean includeInvalidAndInflight, boolean testBootstrap) throws Exception {
if (testBootstrap) {
metaClient = HoodieTestUtils.init(tempDir.toAbsolutePath().toString(), getTableType(), BOOTSTRAP_SOURCE_PATH, testBootstrap);
}
String partitionPath = "2016/05/01";
new File(basePath + "/" + partitionPath).mkdirs();
String fileId = UUID.randomUUID().toString();
String srcName = "part_0000" + metaClient.getTableConfig().getBaseFileFormat().getFileExtension();
HoodieFileStatus srcFileStatus = HoodieFileStatus.newBuilder().setPath(HoodiePath.newBuilder().setUri(BOOTSTRAP_SOURCE_PATH + partitionPath + "/" + srcName).build()).setLength(256 * 1024 * 1024L).setAccessTime(new Date().getTime()).setModificationTime(new Date().getTime() + 99999).setBlockReplication(2).setOwner("hudi").setGroup("hudi").setBlockSize(128 * 1024 * 1024L).setPermission(HoodieFSPermission.newBuilder().setUserAction(FsAction.ALL.name()).setGroupAction(FsAction.READ.name()).setOtherAction(FsAction.NONE.name()).setStickyBit(true).build()).build();
// if skipCreatingDataFile, then instantTime1 below acts like delta-commit, otherwise it is base-commit
String instantTime1 = testBootstrap && !skipCreatingDataFile ? HoodieTimeline.METADATA_BOOTSTRAP_INSTANT_TS : "1";
String deltaInstantTime1 = "2";
String deltaInstantTime2 = "3";
String dataFileName = null;
if (!skipCreatingDataFile) {
dataFileName = FSUtils.makeDataFileName(instantTime1, TEST_WRITE_TOKEN, fileId);
new File(basePath + "/" + partitionPath + "/" + dataFileName).createNewFile();
}
String fileName1 = FSUtils.makeLogFileName(fileId, HoodieLogFile.DELTA_EXTENSION, instantTime1, 0, TEST_WRITE_TOKEN);
String fileName2 = FSUtils.makeLogFileName(fileId, HoodieLogFile.DELTA_EXTENSION, instantTime1, 1, TEST_WRITE_TOKEN);
new File(basePath + "/" + partitionPath + "/" + fileName1).createNewFile();
new File(basePath + "/" + partitionPath + "/" + fileName2).createNewFile();
HoodieActiveTimeline commitTimeline = metaClient.getActiveTimeline();
HoodieInstant instant1 = new HoodieInstant(true, HoodieTimeline.COMMIT_ACTION, instantTime1);
HoodieInstant deltaInstant2 = new HoodieInstant(true, HoodieTimeline.DELTA_COMMIT_ACTION, deltaInstantTime1);
HoodieInstant deltaInstant3 = new HoodieInstant(true, HoodieTimeline.DELTA_COMMIT_ACTION, deltaInstantTime2);
if (testBootstrap && !skipCreatingDataFile) {
try (IndexWriter writer = new HFileBootstrapIndex(metaClient).createWriter(BOOTSTRAP_SOURCE_PATH)) {
writer.begin();
BootstrapFileMapping mapping = new BootstrapFileMapping(BOOTSTRAP_SOURCE_PATH, partitionPath, partitionPath, srcFileStatus, fileId);
List<BootstrapFileMapping> b = new ArrayList<>();
b.add(mapping);
writer.appendNextPartition(partitionPath, b);
writer.finish();
}
}
saveAsComplete(commitTimeline, instant1, Option.empty());
saveAsComplete(commitTimeline, deltaInstant2, Option.empty());
saveAsComplete(commitTimeline, deltaInstant3, Option.empty());
refreshFsView();
List<FileSlice> fileSlices = rtView.getLatestFileSlices(partitionPath).collect(Collectors.toList());
assertEquals(1, fileSlices.size());
FileSlice fileSlice = fileSlices.get(0);
assertEquals(instantTime1, fileSlice.getBaseInstantTime());
if (!skipCreatingDataFile) {
assertTrue(fileSlice.getBaseFile().isPresent());
checkExternalFile(srcFileStatus, fileSlice.getBaseFile().get().getBootstrapBaseFile(), testBootstrap);
}
String compactionRequestedTime = "4";
String compactDataFileName = FSUtils.makeDataFileName(compactionRequestedTime, TEST_WRITE_TOKEN, fileId);
List<Pair<String, FileSlice>> partitionFileSlicesPairs = new ArrayList<>();
partitionFileSlicesPairs.add(Pair.of(partitionPath, fileSlices.get(0)));
HoodieCompactionPlan compactionPlan = CompactionUtils.buildFromFileSlices(partitionFileSlicesPairs, Option.empty(), Option.empty());
HoodieInstant compactionInstant;
if (isCompactionInFlight) {
// Create a Data-file but this should be skipped by view
new File(basePath + "/" + partitionPath + "/" + compactDataFileName).createNewFile();
compactionInstant = new HoodieInstant(State.INFLIGHT, HoodieTimeline.COMPACTION_ACTION, compactionRequestedTime);
HoodieInstant requested = HoodieTimeline.getCompactionRequestedInstant(compactionInstant.getTimestamp());
commitTimeline.saveToCompactionRequested(requested, TimelineMetadataUtils.serializeCompactionPlan(compactionPlan));
commitTimeline.transitionCompactionRequestedToInflight(requested);
} else {
compactionInstant = new HoodieInstant(State.REQUESTED, HoodieTimeline.COMPACTION_ACTION, compactionRequestedTime);
commitTimeline.saveToCompactionRequested(compactionInstant, TimelineMetadataUtils.serializeCompactionPlan(compactionPlan));
}
// View immediately after scheduling compaction
refreshFsView();
List<FileSlice> slices = rtView.getLatestFileSlices(partitionPath).collect(Collectors.toList());
assertEquals(1, slices.size(), "Expected latest file-slices");
assertEquals(compactionRequestedTime, slices.get(0).getBaseInstantTime(), "Base-Instant must be compaction Instant");
assertFalse(slices.get(0).getBaseFile().isPresent(), "Latest File Slice must not have data-file");
assertEquals(0, slices.get(0).getLogFiles().count(), "Latest File Slice must not have any log-files");
// Fake delta-ingestion after compaction-requested
String deltaInstantTime4 = "5";
String deltaInstantTime5 = "6";
List<String> allInstantTimes = Arrays.asList(instantTime1, deltaInstantTime1, deltaInstantTime2, compactionRequestedTime, deltaInstantTime4, deltaInstantTime5);
String fileName3 = FSUtils.makeLogFileName(fileId, HoodieLogFile.DELTA_EXTENSION, compactionRequestedTime, 0, TEST_WRITE_TOKEN);
String fileName4 = FSUtils.makeLogFileName(fileId, HoodieLogFile.DELTA_EXTENSION, compactionRequestedTime, 1, TEST_WRITE_TOKEN);
new File(basePath + "/" + partitionPath + "/" + fileName3).createNewFile();
new File(basePath + "/" + partitionPath + "/" + fileName4).createNewFile();
HoodieInstant deltaInstant4 = new HoodieInstant(true, HoodieTimeline.DELTA_COMMIT_ACTION, deltaInstantTime4);
HoodieInstant deltaInstant5 = new HoodieInstant(true, HoodieTimeline.DELTA_COMMIT_ACTION, deltaInstantTime5);
saveAsComplete(commitTimeline, deltaInstant4, Option.empty());
saveAsComplete(commitTimeline, deltaInstant5, Option.empty());
refreshFsView();
List<HoodieBaseFile> dataFiles = roView.getAllBaseFiles(partitionPath).collect(Collectors.toList());
if (skipCreatingDataFile) {
assertTrue(dataFiles.isEmpty(), "No data file expected");
} else {
assertEquals(1, dataFiles.size(), "One data-file is expected as there is only one file-group");
assertEquals(dataFileName, dataFiles.get(0).getFileName(), "Expect only valid data-file");
}
// Merge API Tests
List<FileSlice> fileSliceList = rtView.getLatestMergedFileSlicesBeforeOrOn(partitionPath, deltaInstantTime5).collect(Collectors.toList());
assertEquals(1, fileSliceList.size(), "Expect file-slice to be merged");
fileSlice = fileSliceList.get(0);
assertEquals(fileId, fileSlice.getFileId());
if (!skipCreatingDataFile) {
assertEquals(dataFileName, fileSlice.getBaseFile().get().getFileName(), "Data file must be present");
checkExternalFile(srcFileStatus, fileSlice.getBaseFile().get().getBootstrapBaseFile(), testBootstrap);
} else {
assertFalse(fileSlice.getBaseFile().isPresent(), "No data-file expected as it was not created");
}
assertEquals(instantTime1, fileSlice.getBaseInstantTime(), "Base Instant of penultimate file-slice must be base instant");
List<HoodieLogFile> logFiles = fileSlice.getLogFiles().collect(Collectors.toList());
assertEquals(4, logFiles.size(), "Log files must include those after compaction request");
assertEquals(fileName4, logFiles.get(0).getFileName(), "Log File Order check");
assertEquals(fileName3, logFiles.get(1).getFileName(), "Log File Order check");
assertEquals(fileName2, logFiles.get(2).getFileName(), "Log File Order check");
assertEquals(fileName1, logFiles.get(3).getFileName(), "Log File Order check");
fileSliceList = rtView.getLatestFileSlicesBeforeOrOn(partitionPath, deltaInstantTime5, true).collect(Collectors.toList());
assertEquals(1, fileSliceList.size(), "Expect only one file-id");
fileSlice = fileSliceList.get(0);
assertEquals(fileId, fileSlice.getFileId());
assertFalse(fileSlice.getBaseFile().isPresent(), "No data-file expected in latest file-slice");
assertEquals(compactionRequestedTime, fileSlice.getBaseInstantTime(), "Compaction requested instant must be base instant");
logFiles = fileSlice.getLogFiles().collect(Collectors.toList());
assertEquals(2, logFiles.size(), "Log files must include only those after compaction request");
assertEquals(fileName4, logFiles.get(0).getFileName(), "Log File Order check");
assertEquals(fileName3, logFiles.get(1).getFileName(), "Log File Order check");
// Data Files API tests
dataFiles = roView.getLatestBaseFiles().collect(Collectors.toList());
if (skipCreatingDataFile) {
assertEquals(0, dataFiles.size(), "Expect no data file to be returned");
} else {
assertEquals(1, dataFiles.size(), "Expect only one data-file to be sent");
dataFiles.forEach(df -> assertEquals(df.getCommitTime(), instantTime1, "Expect data-file for instant 1 be returned"));
checkExternalFile(srcFileStatus, dataFiles.get(0).getBootstrapBaseFile(), testBootstrap);
}
dataFiles = roView.getLatestBaseFiles(partitionPath).collect(Collectors.toList());
if (skipCreatingDataFile) {
assertEquals(0, dataFiles.size(), "Expect no data file to be returned");
} else {
assertEquals(1, dataFiles.size(), "Expect only one data-file to be sent");
dataFiles.forEach(df -> assertEquals(df.getCommitTime(), instantTime1, "Expect data-file for instant 1 be returned"));
checkExternalFile(srcFileStatus, dataFiles.get(0).getBootstrapBaseFile(), testBootstrap);
}
dataFiles = roView.getLatestBaseFilesBeforeOrOn(partitionPath, deltaInstantTime5).collect(Collectors.toList());
if (skipCreatingDataFile) {
assertEquals(0, dataFiles.size(), "Expect no data file to be returned");
} else {
assertEquals(1, dataFiles.size(), "Expect only one data-file to be sent");
dataFiles.forEach(df -> assertEquals(df.getCommitTime(), instantTime1, "Expect data-file for instant 1 be returned"));
checkExternalFile(srcFileStatus, dataFiles.get(0).getBootstrapBaseFile(), testBootstrap);
}
dataFiles = roView.getLatestBaseFilesInRange(allInstantTimes).collect(Collectors.toList());
if (skipCreatingDataFile) {
assertEquals(0, dataFiles.size(), "Expect no data file to be returned");
} else {
assertEquals(1, dataFiles.size(), "Expect only one data-file to be sent");
dataFiles.forEach(df -> assertEquals(df.getCommitTime(), instantTime1, "Expect data-file for instant 1 be returned"));
checkExternalFile(srcFileStatus, dataFiles.get(0).getBootstrapBaseFile(), testBootstrap);
}
// Inflight/Orphan File-groups needs to be in the view
// There is a data-file with this inflight file-id
final String inflightFileId1 = UUID.randomUUID().toString();
// There is a log-file with this inflight file-id
final String inflightFileId2 = UUID.randomUUID().toString();
// There is an orphan data file with this file-id
final String orphanFileId1 = UUID.randomUUID().toString();
// There is an orphan log data file with this file-id
final String orphanFileId2 = UUID.randomUUID().toString();
final String invalidInstantId = "INVALIDTIME";
String inflightDeltaInstantTime = "7";
String orphanDataFileName = FSUtils.makeDataFileName(invalidInstantId, TEST_WRITE_TOKEN, orphanFileId1);
new File(basePath + "/" + partitionPath + "/" + orphanDataFileName).createNewFile();
String orphanLogFileName = FSUtils.makeLogFileName(orphanFileId2, HoodieLogFile.DELTA_EXTENSION, invalidInstantId, 0, TEST_WRITE_TOKEN);
new File(basePath + "/" + partitionPath + "/" + orphanLogFileName).createNewFile();
String inflightDataFileName = FSUtils.makeDataFileName(inflightDeltaInstantTime, TEST_WRITE_TOKEN, inflightFileId1);
new File(basePath + "/" + partitionPath + "/" + inflightDataFileName).createNewFile();
String inflightLogFileName = FSUtils.makeLogFileName(inflightFileId2, HoodieLogFile.DELTA_EXTENSION, inflightDeltaInstantTime, 0, TEST_WRITE_TOKEN);
new File(basePath + "/" + partitionPath + "/" + inflightLogFileName).createNewFile();
// Mark instant as inflight
commitTimeline.createNewInstant(new HoodieInstant(State.REQUESTED, HoodieTimeline.DELTA_COMMIT_ACTION, inflightDeltaInstantTime));
commitTimeline.transitionRequestedToInflight(new HoodieInstant(State.REQUESTED, HoodieTimeline.DELTA_COMMIT_ACTION, inflightDeltaInstantTime), Option.empty());
refreshFsView();
List<FileSlice> allRawFileSlices = getAllRawFileSlices(partitionPath).collect(Collectors.toList());
dataFiles = allRawFileSlices.stream().flatMap(slice -> {
if (slice.getBaseFile().isPresent()) {
return Stream.of(slice.getBaseFile().get());
}
return Stream.empty();
}).collect(Collectors.toList());
if (includeInvalidAndInflight) {
assertEquals(2 + (isCompactionInFlight ? 1 : 0) + (skipCreatingDataFile ? 0 : 1), dataFiles.size(), "Inflight/Orphan data-file is also expected");
Set<String> fileNames = dataFiles.stream().map(HoodieBaseFile::getFileName).collect(Collectors.toSet());
assertTrue(fileNames.contains(orphanDataFileName), "Expect orphan data-file to be present");
assertTrue(fileNames.contains(inflightDataFileName), "Expect inflight data-file to be present");
if (!skipCreatingDataFile) {
assertTrue(fileNames.contains(dataFileName), "Expect old committed data-file");
}
if (isCompactionInFlight) {
assertTrue(fileNames.contains(compactDataFileName), "Expect inflight compacted data file to be present");
}
fileSliceList = getLatestRawFileSlices(partitionPath).collect(Collectors.toList());
assertEquals(includeInvalidAndInflight ? 5 : 1, fileSliceList.size(), "Expect both inflight and orphan file-slice to be included");
Map<String, FileSlice> fileSliceMap = fileSliceList.stream().collect(Collectors.toMap(FileSlice::getFileId, r -> r));
FileSlice orphanFileSliceWithDataFile = fileSliceMap.get(orphanFileId1);
FileSlice orphanFileSliceWithLogFile = fileSliceMap.get(orphanFileId2);
FileSlice inflightFileSliceWithDataFile = fileSliceMap.get(inflightFileId1);
FileSlice inflightFileSliceWithLogFile = fileSliceMap.get(inflightFileId2);
assertEquals(invalidInstantId, orphanFileSliceWithDataFile.getBaseInstantTime(), "Orphan File Slice with data-file check base-commit");
assertEquals(orphanDataFileName, orphanFileSliceWithDataFile.getBaseFile().get().getFileName(), "Orphan File Slice with data-file check data-file");
assertEquals(0, orphanFileSliceWithDataFile.getLogFiles().count(), "Orphan File Slice with data-file check data-file");
assertEquals(inflightDeltaInstantTime, inflightFileSliceWithDataFile.getBaseInstantTime(), "Inflight File Slice with data-file check base-commit");
assertEquals(inflightDataFileName, inflightFileSliceWithDataFile.getBaseFile().get().getFileName(), "Inflight File Slice with data-file check data-file");
assertEquals(0, inflightFileSliceWithDataFile.getLogFiles().count(), "Inflight File Slice with data-file check data-file");
assertEquals(invalidInstantId, orphanFileSliceWithLogFile.getBaseInstantTime(), "Orphan File Slice with log-file check base-commit");
assertFalse(orphanFileSliceWithLogFile.getBaseFile().isPresent(), "Orphan File Slice with log-file check data-file");
logFiles = orphanFileSliceWithLogFile.getLogFiles().collect(Collectors.toList());
assertEquals(1, logFiles.size(), "Orphan File Slice with log-file check data-file");
assertEquals(orphanLogFileName, logFiles.get(0).getFileName(), "Orphan File Slice with log-file check data-file");
assertEquals(inflightDeltaInstantTime, inflightFileSliceWithLogFile.getBaseInstantTime(), "Inflight File Slice with log-file check base-commit");
assertFalse(inflightFileSliceWithLogFile.getBaseFile().isPresent(), "Inflight File Slice with log-file check data-file");
logFiles = inflightFileSliceWithLogFile.getLogFiles().collect(Collectors.toList());
assertEquals(1, logFiles.size(), "Inflight File Slice with log-file check data-file");
assertEquals(inflightLogFileName, logFiles.get(0).getFileName(), "Inflight File Slice with log-file check data-file");
}
compactionInstant = new HoodieInstant(State.INFLIGHT, HoodieTimeline.COMPACTION_ACTION, compactionRequestedTime);
// Now simulate Compaction completing - Check the view
if (!isCompactionInFlight) {
// For inflight compaction, we already create a data-file to test concurrent inflight case.
// If we skipped creating data file corresponding to compaction commit, create it now
new File(basePath + "/" + partitionPath + "/" + compactDataFileName).createNewFile();
commitTimeline.createNewInstant(compactionInstant);
}
commitTimeline.saveAsComplete(compactionInstant, Option.empty());
refreshFsView();
// populate the cache
roView.getAllBaseFiles(partitionPath);
fileSliceList = rtView.getLatestFileSlices(partitionPath).collect(Collectors.toList());
LOG.info("FILESLICE LIST=" + fileSliceList);
dataFiles = fileSliceList.stream().map(FileSlice::getBaseFile).filter(Option::isPresent).map(Option::get).collect(Collectors.toList());
assertEquals(1, dataFiles.size(), "Expect only one data-files in latest view as there is only one file-group");
assertEquals(compactDataFileName, dataFiles.get(0).getFileName(), "Data Filename must match");
assertEquals(1, fileSliceList.size(), "Only one latest file-slice in the partition");
assertFalse(dataFiles.get(0).getBootstrapBaseFile().isPresent(), "No external data file must be present");
fileSlice = fileSliceList.get(0);
assertEquals(fileId, fileSlice.getFileId(), "Check file-Id is set correctly");
assertEquals(compactDataFileName, fileSlice.getBaseFile().get().getFileName(), "Check data-filename is set correctly");
assertEquals(compactionRequestedTime, fileSlice.getBaseInstantTime(), "Ensure base-instant is now compaction request instant");
logFiles = fileSlice.getLogFiles().collect(Collectors.toList());
assertEquals(2, logFiles.size(), "Only log-files after compaction request shows up");
assertEquals(fileName4, logFiles.get(0).getFileName(), "Log File Order check");
assertEquals(fileName3, logFiles.get(1).getFileName(), "Log File Order check");
// Data Files API tests
dataFiles = roView.getLatestBaseFiles().collect(Collectors.toList());
assertEquals(1, dataFiles.size(), "Expect only one data-file to be sent");
assertFalse(dataFiles.get(0).getBootstrapBaseFile().isPresent(), "No external data file must be present");
dataFiles.forEach(df -> {
assertEquals(df.getCommitTime(), compactionRequestedTime, "Expect data-file created by compaction be returned");
assertFalse(df.getBootstrapBaseFile().isPresent(), "No external data file must be present");
});
dataFiles = roView.getLatestBaseFiles(partitionPath).collect(Collectors.toList());
assertEquals(1, dataFiles.size(), "Expect only one data-file to be sent");
dataFiles.forEach(df -> {
assertEquals(df.getCommitTime(), compactionRequestedTime, "Expect data-file created by compaction be returned");
assertFalse(df.getBootstrapBaseFile().isPresent(), "No external data file must be present");
});
dataFiles = roView.getLatestBaseFilesBeforeOrOn(partitionPath, deltaInstantTime5).collect(Collectors.toList());
assertEquals(1, dataFiles.size(), "Expect only one data-file to be sent");
dataFiles.forEach(df -> {
assertEquals(df.getCommitTime(), compactionRequestedTime, "Expect data-file created by compaction be returned");
assertFalse(df.getBootstrapBaseFile().isPresent(), "No external data file must be present");
});
dataFiles = roView.getLatestBaseFilesInRange(allInstantTimes).collect(Collectors.toList());
assertEquals(1, dataFiles.size(), "Expect only one data-file to be sent");
dataFiles.forEach(df -> {
assertEquals(df.getCommitTime(), compactionRequestedTime, "Expect data-file created by compaction be returned");
assertFalse(df.getBootstrapBaseFile().isPresent(), "No external data file must be present");
});
assertEquals(expTotalFileSlices, rtView.getAllFileSlices(partitionPath).count(), "Total number of file-slices in partitions matches expected");
assertEquals(expTotalDataFiles, roView.getAllBaseFiles(partitionPath).count(), "Total number of data-files in partitions matches expected");
// file-groups includes inflight/invalid file-ids
assertEquals(5, fsView.getAllFileGroups(partitionPath).count(), "Total number of file-groups in partitions matches expected");
}
Aggregations