use of org.apache.hudi.exception.HoodieException in project hudi by apache.
the class FlinkOptions method allOptions.
/**
* Returns all the config options.
*/
public static List<ConfigOption<?>> allOptions() {
Field[] declaredFields = FlinkOptions.class.getDeclaredFields();
List<ConfigOption<?>> options = new ArrayList<>();
for (Field field : declaredFields) {
if (java.lang.reflect.Modifier.isStatic(field.getModifiers()) && field.getType().equals(ConfigOption.class)) {
try {
options.add((ConfigOption<?>) field.get(ConfigOption.class));
} catch (IllegalAccessException e) {
throw new HoodieException("Error while fetching static config option", e);
}
}
}
return options;
}
use of org.apache.hudi.exception.HoodieException in project hudi by apache.
the class ClusteringTestUtils method createClusteringPlan.
public static HoodieClusteringPlan createClusteringPlan(HoodieTableMetaClient metaClient, String instantTime, String fileId) {
try {
String basePath = metaClient.getBasePath();
String partition = DEFAULT_PARTITION_PATHS[0];
createBaseFile(basePath, partition, instantTime, fileId, 1);
FileSlice slice = new FileSlice(partition, instantTime, fileId);
slice.setBaseFile(new CompactionTestUtils.DummyHoodieBaseFile(Paths.get(basePath, partition, baseFileName(instantTime, fileId)).toString()));
List<FileSlice>[] fileSliceGroups = new List[] { Collections.singletonList(slice) };
HoodieClusteringPlan clusteringPlan = ClusteringUtils.createClusteringPlan("strategy", new HashMap<>(), fileSliceGroups, Collections.emptyMap());
return clusteringPlan;
} catch (Exception e) {
throw new HoodieException(e.getMessage(), e);
}
}
use of org.apache.hudi.exception.HoodieException in project hudi by apache.
the class HoodieBackedTableMetadataWriter method initializeFileGroups.
/**
* Initialize file groups for a partition. For file listing, we just have one file group.
*
* All FileGroups for a given metadata partition has a fixed prefix as per the {@link MetadataPartitionType#getFileIdPrefix()}.
* Each file group is suffixed with 4 digits with increments of 1 starting with 0000.
*
* Lets say we configure 10 file groups for record level index partition, and prefix as "record-index-bucket-"
* File groups will be named as :
* record-index-bucket-0000, .... -> ..., record-index-bucket-0009
*/
private void initializeFileGroups(HoodieTableMetaClient dataMetaClient, MetadataPartitionType metadataPartition, String instantTime, int fileGroupCount) throws IOException {
final HashMap<HeaderMetadataType, String> blockHeader = new HashMap<>();
blockHeader.put(HeaderMetadataType.INSTANT_TIME, instantTime);
// Archival of data table has a dependency on compaction(base files) in metadata table.
// It is assumed that as of time Tx of base instant (/compaction time) in metadata table,
// all commits in data table is in sync with metadata table. So, we always start with log file for any fileGroup.
final HoodieDeleteBlock block = new HoodieDeleteBlock(new HoodieKey[0], blockHeader);
LOG.info(String.format("Creating %d file groups for partition %s with base fileId %s at instant time %s", fileGroupCount, metadataPartition.getPartitionPath(), metadataPartition.getFileIdPrefix(), instantTime));
for (int i = 0; i < fileGroupCount; ++i) {
final String fileGroupFileId = String.format("%s%04d", metadataPartition.getFileIdPrefix(), i);
try {
HoodieLogFormat.Writer writer = HoodieLogFormat.newWriterBuilder().onParentPath(FSUtils.getPartitionPath(metadataWriteConfig.getBasePath(), metadataPartition.getPartitionPath())).withFileId(fileGroupFileId).overBaseCommit(instantTime).withLogVersion(HoodieLogFile.LOGFILE_BASE_VERSION).withFileSize(0L).withSizeThreshold(metadataWriteConfig.getLogFileMaxSize()).withFs(dataMetaClient.getFs()).withRolloverLogWriteToken(HoodieLogFormat.DEFAULT_WRITE_TOKEN).withLogWriteToken(HoodieLogFormat.DEFAULT_WRITE_TOKEN).withFileExtension(HoodieLogFile.DELTA_EXTENSION).build();
writer.appendBlock(block);
writer.close();
} catch (InterruptedException e) {
throw new HoodieException("Failed to created fileGroup " + fileGroupFileId + " for partition " + metadataPartition.getPartitionPath(), e);
}
}
}
use of org.apache.hudi.exception.HoodieException in project hudi by apache.
the class JmxReporterServer method start.
public void start() {
ValidationUtils.checkArgument(reporter != null && connector != null, "reporter or connector cannot be null!");
try {
connector.start();
reporter.start();
} catch (Exception e) {
throw new HoodieException("connector or reporter start failed", e);
}
}
use of org.apache.hudi.exception.HoodieException in project hudi by apache.
the class MetricsReporterFactory method createReporter.
public static MetricsReporter createReporter(HoodieWriteConfig config, MetricRegistry registry) {
String reporterClassName = config.getMetricReporterClassName();
if (!StringUtils.isNullOrEmpty(reporterClassName)) {
Object instance = ReflectionUtils.loadClass(reporterClassName, new Class<?>[] { Properties.class, MetricRegistry.class }, config.getProps(), registry);
if (!(instance instanceof CustomizableMetricsReporter)) {
throw new HoodieException(config.getMetricReporterClassName() + " is not a subclass of CustomizableMetricsReporter");
}
return (MetricsReporter) instance;
}
MetricsReporterType type = config.getMetricsReporterType();
MetricsReporter reporter = null;
switch(type) {
case GRAPHITE:
reporter = new MetricsGraphiteReporter(config, registry);
break;
case INMEMORY:
reporter = new InMemoryMetricsReporter();
break;
case JMX:
reporter = new JmxMetricsReporter(config, registry);
break;
case DATADOG:
reporter = new DatadogMetricsReporter(config, registry);
break;
case PROMETHEUS_PUSHGATEWAY:
reporter = new PushGatewayMetricsReporter(config, registry);
break;
case PROMETHEUS:
reporter = new PrometheusReporter(config, registry);
break;
case CONSOLE:
reporter = new ConsoleMetricsReporter(registry);
break;
case CLOUDWATCH:
reporter = new CloudWatchMetricsReporter(config, registry);
break;
default:
LOG.error("Reporter type[" + type + "] is not supported.");
break;
}
return reporter;
}
Aggregations