use of org.apache.hudi.common.model.HoodieTableType in project hudi by apache.
the class TestHoodieBackedMetadata method testRollbackOperationsNonPartitioned.
@Test
public void testRollbackOperationsNonPartitioned() throws Exception {
HoodieTableType tableType = COPY_ON_WRITE;
init(tableType);
doWriteInsertAndUpsertNonPartitioned(testTable);
// trigger an upsert
doWriteOperationNonPartitioned(testTable, "0000003", UPSERT);
// trigger a commit and rollback
doWriteOperationNonPartitioned(testTable, "0000004", UPSERT);
doRollback(testTable, "0000004", "0000005");
validateMetadata(testTable);
// trigger few upserts and validate
for (int i = 6; i < 10; i++) {
doWriteOperationNonPartitioned(testTable, "000000" + i, UPSERT);
}
validateMetadata(testTable);
}
use of org.apache.hudi.common.model.HoodieTableType in project hudi by apache.
the class TestHoodieBackedMetadata method testMetadataInsertUpsertCleanNonPartitioned.
@Test
public void testMetadataInsertUpsertCleanNonPartitioned() throws Exception {
HoodieTableType tableType = COPY_ON_WRITE;
init(tableType);
doWriteOperationNonPartitioned(testTable, "0000001", INSERT);
doWriteOperationNonPartitioned(testTable, "0000002", UPSERT);
testTable.doCleanBasedOnCommits("0000003", Arrays.asList("0000001"));
validateMetadata(testTable, emptyList(), true);
}
use of org.apache.hudi.common.model.HoodieTableType in project hudi by apache.
the class TestHoodieBackedMetadata method testMetadataTableServices.
/**
* Tests that table services in data table won't trigger table services in metadata table.
*
* @throws Exception
*/
@Test
public void testMetadataTableServices() throws Exception {
HoodieTableType tableType = COPY_ON_WRITE;
init(tableType, false);
writeConfig = getWriteConfigBuilder(true, true, false).withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(true).enableFullScan(true).enableMetrics(false).withMaxNumDeltaCommitsBeforeCompaction(// after 3 delta commits for regular writer operations, compaction should kick in.
3).build()).build();
initWriteConfigAndMetatableWriter(writeConfig, true);
doWriteOperation(testTable, "0000001", INSERT);
doCleanAndValidate(testTable, "0000003", Arrays.asList("0000001"));
HoodieTableMetadata tableMetadata = metadata(writeConfig, context);
// since clean was the last commit, table servives should not get triggered in metadata table.
assertFalse(tableMetadata.getLatestCompactionTime().isPresent());
doWriteOperation(testTable, "0000004", UPSERT);
// this should have triggered compaction in metadata table
tableMetadata = metadata(writeConfig, context);
assertTrue(tableMetadata.getLatestCompactionTime().isPresent());
assertEquals(tableMetadata.getLatestCompactionTime().get(), "0000003001");
}
use of org.apache.hudi.common.model.HoodieTableType in project hudi by apache.
the class TestHoodieBackedMetadata method testVirtualKeysInBaseFiles.
/**
* Tests that virtual key configs are honored in base files after compaction in metadata table.
*
* @throws Exception
*/
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testVirtualKeysInBaseFiles(boolean populateMetaFields) throws Exception {
HoodieTableType tableType = MERGE_ON_READ;
init(tableType, false);
writeConfig = getWriteConfigBuilder(true, true, false).withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(true).enableFullScan(true).enableMetrics(false).withPopulateMetaFields(populateMetaFields).withMaxNumDeltaCommitsBeforeCompaction(2).build()).build();
initWriteConfigAndMetatableWriter(writeConfig, true);
doWriteOperation(testTable, "0000001", INSERT);
doClean(testTable, "0000003", Arrays.asList("0000001"));
// this should have triggered compaction in metadata table
doWriteOperation(testTable, "0000004", UPSERT);
HoodieTableMetadata tableMetadata = metadata(writeConfig, context);
assertTrue(tableMetadata.getLatestCompactionTime().isPresent());
assertEquals(tableMetadata.getLatestCompactionTime().get(), "0000003001");
HoodieTableMetaClient metadataMetaClient = HoodieTableMetaClient.builder().setConf(hadoopConf).setBasePath(metadataTableBasePath).build();
HoodieWriteConfig metadataTableWriteConfig = getMetadataWriteConfig(writeConfig);
metadataMetaClient.reloadActiveTimeline();
HoodieTable table = HoodieSparkTable.create(metadataTableWriteConfig, context, metadataMetaClient);
table.getHoodieView().sync();
List<FileSlice> fileSlices = table.getSliceView().getLatestFileSlices("files").collect(Collectors.toList());
HoodieBaseFile baseFile = fileSlices.get(0).getBaseFile().get();
HoodieHFileReader hoodieHFileReader = new HoodieHFileReader(context.getHadoopConf().get(), new Path(baseFile.getPath()), new CacheConfig(context.getHadoopConf().get()));
List<Pair<String, IndexedRecord>> records = hoodieHFileReader.readAllRecords();
records.forEach(entry -> {
if (populateMetaFields) {
assertNotNull(((GenericRecord) entry.getSecond()).get(HoodieRecord.RECORD_KEY_METADATA_FIELD));
} else {
assertNull(((GenericRecord) entry.getSecond()).get(HoodieRecord.RECORD_KEY_METADATA_FIELD));
}
});
}
use of org.apache.hudi.common.model.HoodieTableType in project hudi by apache.
the class TestHoodieBackedTableMetadata method testTableOperations.
@Test
public void testTableOperations() throws Exception {
HoodieTableType tableType = HoodieTableType.COPY_ON_WRITE;
init(tableType);
doWriteInsertAndUpsert(testTable);
// trigger an upsert
doWriteOperation(testTable, "0000003");
verifyBaseMetadataTable();
}
Aggregations