use of org.apache.hudi.common.engine.HoodieLocalEngineContext in project hudi by apache.
the class TestRemoteHoodieTableFileSystemView method getFileSystemView.
protected SyncableFileSystemView getFileSystemView(HoodieTimeline timeline) {
FileSystemViewStorageConfig sConf = FileSystemViewStorageConfig.newBuilder().withStorageType(FileSystemViewStorageType.SPILLABLE_DISK).build();
HoodieMetadataConfig metadataConfig = HoodieMetadataConfig.newBuilder().build();
HoodieCommonConfig commonConfig = HoodieCommonConfig.newBuilder().build();
HoodieLocalEngineContext localEngineContext = new HoodieLocalEngineContext(metaClient.getHadoopConf());
try {
server = new TimelineService(localEngineContext, new Configuration(), TimelineService.Config.builder().serverPort(0).build(), FileSystem.get(new Configuration()), FileSystemViewManager.createViewManager(localEngineContext, metadataConfig, sConf, commonConfig));
server.startService();
} catch (Exception ex) {
throw new RuntimeException(ex);
}
LOG.info("Connecting to Timeline Server :" + server.getServerPort());
view = new RemoteHoodieTableFileSystemView("localhost", server.getServerPort(), metaClient);
return view;
}
use of org.apache.hudi.common.engine.HoodieLocalEngineContext in project hudi by apache.
the class TestFileSystemBackedTableMetadata method testNonPartitionedTable.
/**
* Test non partition hoodie table.
* @throws Exception
*/
@Test
public void testNonPartitionedTable() throws Exception {
// Generate 10 files under basepath
hoodieTestTable.addCommit("100").withBaseFilesInPartition(DEFAULT_PARTITION, IntStream.range(0, 10).toArray());
HoodieLocalEngineContext localEngineContext = new HoodieLocalEngineContext(metaClient.getHadoopConf());
FileSystemBackedTableMetadata fileSystemBackedTableMetadata = new FileSystemBackedTableMetadata(localEngineContext, new SerializableConfiguration(metaClient.getHadoopConf()), basePath, false);
Assertions.assertEquals(0, fileSystemBackedTableMetadata.getAllPartitionPaths().size());
Assertions.assertEquals(10, fileSystemBackedTableMetadata.getAllFilesInPartition(new Path(basePath)).length);
Assertions.assertEquals(10, fileSystemBackedTableMetadata.getAllFilesInPartitions(Collections.singletonList(basePath)).get(basePath).length);
}
use of org.apache.hudi.common.engine.HoodieLocalEngineContext in project hudi by apache.
the class TestFileSystemBackedTableMetadata method testDatePartitionedTableWithAssumeDateIsFalse.
/**
* Test listing of partitions result for date based partitions with assumeDataPartitioning = false.
* @throws Exception
*/
@Test
public void testDatePartitionedTableWithAssumeDateIsFalse() throws Exception {
String instant = "100";
hoodieTestTable = hoodieTestTable.addCommit(instant);
// Generate 10 files under each partition
DATE_PARTITIONS.stream().forEach(p -> {
try {
hoodieTestTable = hoodieTestTable.withPartitionMetaFiles(p).withBaseFilesInPartition(p, IntStream.range(0, 10).toArray());
} catch (Exception e) {
throw new RuntimeException(e);
}
});
HoodieLocalEngineContext localEngineContext = new HoodieLocalEngineContext(metaClient.getHadoopConf());
FileSystemBackedTableMetadata fileSystemBackedTableMetadata = new FileSystemBackedTableMetadata(localEngineContext, new SerializableConfiguration(metaClient.getHadoopConf()), basePath, false);
Assertions.assertEquals(3, fileSystemBackedTableMetadata.getAllPartitionPaths().size());
List<String> fullPartitionPaths = DATE_PARTITIONS.stream().map(p -> basePath + "/" + p).collect(Collectors.toList());
Map<String, FileStatus[]> partitionToFilesMap = fileSystemBackedTableMetadata.getAllFilesInPartitions(fullPartitionPaths);
for (String p : fullPartitionPaths) {
Assertions.assertEquals(10, partitionToFilesMap.get(p).length);
}
}
use of org.apache.hudi.common.engine.HoodieLocalEngineContext in project hudi by apache.
the class TestFileSystemBackedTableMetadata method testDatePartitionedTable.
/**
* Test listing of partitions result for date based partitions.
* @throws Exception
*/
@Test
public void testDatePartitionedTable() throws Exception {
String instant = "100";
hoodieTestTable = hoodieTestTable.addCommit(instant);
// Generate 10 files under each partition
DATE_PARTITIONS.stream().forEach(p -> {
try {
hoodieTestTable = hoodieTestTable.withBaseFilesInPartition(p, IntStream.range(0, 10).toArray());
} catch (Exception e) {
throw new RuntimeException(e);
}
});
HoodieLocalEngineContext localEngineContext = new HoodieLocalEngineContext(metaClient.getHadoopConf());
FileSystemBackedTableMetadata fileSystemBackedTableMetadata = new FileSystemBackedTableMetadata(localEngineContext, new SerializableConfiguration(metaClient.getHadoopConf()), basePath, true);
Assertions.assertEquals(3, fileSystemBackedTableMetadata.getAllPartitionPaths().size());
Assertions.assertEquals(10, fileSystemBackedTableMetadata.getAllFilesInPartition(new Path(basePath + "/" + DATE_PARTITIONS.get(0))).length);
List<String> fullPartitionPaths = DATE_PARTITIONS.stream().map(p -> basePath + "/" + p).collect(Collectors.toList());
Map<String, FileStatus[]> partitionToFilesMap = fileSystemBackedTableMetadata.getAllFilesInPartitions(fullPartitionPaths);
for (String p : fullPartitionPaths) {
Assertions.assertEquals(10, partitionToFilesMap.get(p).length);
}
}
use of org.apache.hudi.common.engine.HoodieLocalEngineContext in project hudi by apache.
the class TestFileSystemBackedTableMetadata method testMultiLevelEmptyPartitionTable.
@Test
public void testMultiLevelEmptyPartitionTable() throws Exception {
String instant = "100";
hoodieTestTable = hoodieTestTable.addCommit(instant);
// Generate 10 files under each partition
MULTI_LEVEL_PARTITIONS.stream().forEach(p -> {
try {
hoodieTestTable = hoodieTestTable.withPartitionMetaFiles(p);
} catch (Exception e) {
throw new RuntimeException(e);
}
});
HoodieLocalEngineContext localEngineContext = new HoodieLocalEngineContext(metaClient.getHadoopConf());
FileSystemBackedTableMetadata fileSystemBackedTableMetadata = new FileSystemBackedTableMetadata(localEngineContext, new SerializableConfiguration(metaClient.getHadoopConf()), basePath, false);
Assertions.assertEquals(3, fileSystemBackedTableMetadata.getAllPartitionPaths().size());
Assertions.assertEquals(0, fileSystemBackedTableMetadata.getAllFilesInPartition(new Path(basePath + "/" + MULTI_LEVEL_PARTITIONS.get(0))).length);
List<String> fullPartitionPaths = MULTI_LEVEL_PARTITIONS.stream().map(p -> basePath + "/" + p).collect(Collectors.toList());
Map<String, FileStatus[]> partitionToFilesMap = fileSystemBackedTableMetadata.getAllFilesInPartitions(fullPartitionPaths);
for (String p : fullPartitionPaths) {
Assertions.assertEquals(0, partitionToFilesMap.get(p).length);
}
}
Aggregations