use of org.apache.hudi.common.config.HoodieMetadataConfig in project hudi by apache.
the class TestRemoteHoodieTableFileSystemView method getFileSystemView.
protected SyncableFileSystemView getFileSystemView(HoodieTimeline timeline) {
FileSystemViewStorageConfig sConf = FileSystemViewStorageConfig.newBuilder().withStorageType(FileSystemViewStorageType.SPILLABLE_DISK).build();
HoodieMetadataConfig metadataConfig = HoodieMetadataConfig.newBuilder().build();
HoodieCommonConfig commonConfig = HoodieCommonConfig.newBuilder().build();
HoodieLocalEngineContext localEngineContext = new HoodieLocalEngineContext(metaClient.getHadoopConf());
try {
server = new TimelineService(localEngineContext, new Configuration(), TimelineService.Config.builder().serverPort(0).build(), FileSystem.get(new Configuration()), FileSystemViewManager.createViewManager(localEngineContext, metadataConfig, sConf, commonConfig));
server.startService();
} catch (Exception ex) {
throw new RuntimeException(ex);
}
LOG.info("Connecting to Timeline Server :" + server.getServerPort());
view = new RemoteHoodieTableFileSystemView("localhost", server.getServerPort(), metaClient);
return view;
}
use of org.apache.hudi.common.config.HoodieMetadataConfig in project hudi by apache.
the class HoodieBackedTableMetadataWriter method enablePartitions.
/**
* Enable metadata table partitions based on config.
*/
private void enablePartitions() {
final HoodieMetadataConfig metadataConfig = dataWriteConfig.getMetadataConfig();
boolean isBootstrapCompleted;
Option<HoodieTableMetaClient> metaClient = Option.empty();
try {
isBootstrapCompleted = dataMetaClient.getFs().exists(new Path(metadataWriteConfig.getBasePath(), HoodieTableMetaClient.METAFOLDER_NAME));
if (isBootstrapCompleted) {
metaClient = Option.of(HoodieTableMetaClient.builder().setConf(hadoopConf.get()).setBasePath(metadataWriteConfig.getBasePath()).build());
}
} catch (IOException e) {
throw new HoodieException("Failed to enable metadata partitions!", e);
}
Option<HoodieTableFileSystemView> fsView = Option.ofNullable(metaClient.isPresent() ? HoodieTableMetadataUtil.getFileSystemView(metaClient.get()) : null);
enablePartition(MetadataPartitionType.FILES, metadataConfig, metaClient, fsView, isBootstrapCompleted);
if (metadataConfig.isBloomFilterIndexEnabled()) {
enablePartition(MetadataPartitionType.BLOOM_FILTERS, metadataConfig, metaClient, fsView, isBootstrapCompleted);
}
if (metadataConfig.isColumnStatsIndexEnabled()) {
enablePartition(MetadataPartitionType.COLUMN_STATS, metadataConfig, metaClient, fsView, isBootstrapCompleted);
}
}
use of org.apache.hudi.common.config.HoodieMetadataConfig in project hudi by apache.
the class MetadataCommand method listPartitions.
@CliCommand(value = "metadata list-partitions", help = "List all partitions from metadata")
public String listPartitions(@CliOption(key = "sparkMaster", unspecifiedDefaultValue = SparkUtil.DEFAULT_SPARK_MASTER, help = "Spark master") final String master) throws IOException {
HoodieCLI.getTableMetaClient();
initJavaSparkContext(Option.of(master));
HoodieMetadataConfig config = HoodieMetadataConfig.newBuilder().enable(true).build();
HoodieBackedTableMetadata metadata = new HoodieBackedTableMetadata(new HoodieSparkEngineContext(jsc), config, HoodieCLI.basePath, "/tmp");
if (!metadata.enabled()) {
return "[ERROR] Metadata Table not enabled/initialized\n\n";
}
HoodieTimer timer = new HoodieTimer().startTimer();
List<String> partitions = metadata.getAllPartitionPaths();
LOG.debug("Took " + timer.endTimer() + " ms");
final List<Comparable[]> rows = new ArrayList<>();
partitions.stream().sorted(Comparator.reverseOrder()).forEach(p -> {
Comparable[] row = new Comparable[1];
row[0] = p;
rows.add(row);
});
TableHeader header = new TableHeader().addTableHeaderField("partition");
return HoodiePrintHelper.print(header, new HashMap<>(), "", false, Integer.MAX_VALUE, false, rows);
}
use of org.apache.hudi.common.config.HoodieMetadataConfig in project hudi by apache.
the class TimelineService method buildFileSystemViewManager.
public static FileSystemViewManager buildFileSystemViewManager(Config config, SerializableConfiguration conf) {
HoodieLocalEngineContext localEngineContext = new HoodieLocalEngineContext(conf.get());
// Just use defaults for now
HoodieMetadataConfig metadataConfig = HoodieMetadataConfig.newBuilder().build();
HoodieCommonConfig commonConfig = HoodieCommonConfig.newBuilder().build();
switch(config.viewStorageType) {
case MEMORY:
FileSystemViewStorageConfig.Builder inMemConfBuilder = FileSystemViewStorageConfig.newBuilder();
inMemConfBuilder.withStorageType(FileSystemViewStorageType.MEMORY);
return FileSystemViewManager.createViewManager(localEngineContext, metadataConfig, inMemConfBuilder.build(), commonConfig);
case SPILLABLE_DISK:
{
FileSystemViewStorageConfig.Builder spillableConfBuilder = FileSystemViewStorageConfig.newBuilder();
spillableConfBuilder.withStorageType(FileSystemViewStorageType.SPILLABLE_DISK).withBaseStoreDir(config.baseStorePathForFileGroups).withMaxMemoryForView(config.maxViewMemPerTableInMB * 1024 * 1024L).withMemFractionForPendingCompaction(config.memFractionForCompactionPerTable);
return FileSystemViewManager.createViewManager(localEngineContext, metadataConfig, spillableConfBuilder.build(), commonConfig);
}
case EMBEDDED_KV_STORE:
{
FileSystemViewStorageConfig.Builder rocksDBConfBuilder = FileSystemViewStorageConfig.newBuilder();
rocksDBConfBuilder.withStorageType(FileSystemViewStorageType.EMBEDDED_KV_STORE).withRocksDBPath(config.rocksDBPath);
return FileSystemViewManager.createViewManager(localEngineContext, metadataConfig, rocksDBConfBuilder.build(), commonConfig);
}
default:
throw new IllegalArgumentException("Invalid view manager storage type :" + config.viewStorageType);
}
}
use of org.apache.hudi.common.config.HoodieMetadataConfig in project hudi by apache.
the class TestTimelineServerBasedWriteMarkers method setup.
@BeforeEach
public void setup() throws IOException {
initPath();
initMetaClient();
this.jsc = new JavaSparkContext(HoodieClientTestUtils.getSparkConfForTest(TestTimelineServerBasedWriteMarkers.class.getName()));
this.context = new HoodieSparkEngineContext(jsc);
this.fs = FSUtils.getFs(metaClient.getBasePath(), metaClient.getHadoopConf());
this.markerFolderPath = new Path(metaClient.getMarkerFolderPath("000"));
FileSystemViewStorageConfig storageConf = FileSystemViewStorageConfig.newBuilder().withStorageType(FileSystemViewStorageType.SPILLABLE_DISK).build();
HoodieMetadataConfig metadataConfig = HoodieMetadataConfig.newBuilder().build();
HoodieLocalEngineContext localEngineContext = new HoodieLocalEngineContext(metaClient.getHadoopConf());
try {
timelineService = new TimelineService(localEngineContext, new Configuration(), TimelineService.Config.builder().serverPort(0).enableMarkerRequests(true).build(), FileSystem.get(new Configuration()), FileSystemViewManager.createViewManager(localEngineContext, metadataConfig, storageConf, HoodieCommonConfig.newBuilder().build()));
timelineService.startService();
} catch (Exception ex) {
throw new RuntimeException(ex);
}
this.writeMarkers = new TimelineServerBasedWriteMarkers(metaClient.getBasePath(), markerFolderPath.toString(), "000", "localhost", timelineService.getServerPort(), 300);
}
Aggregations