use of org.apache.druid.indexer.HadoopDruidDetermineConfigurationJob in project druid by druid-io.
the class CliInternalHadoopIndexer method run.
@Override
public void run() {
try {
Injector injector = makeInjector();
config = getHadoopDruidIndexerConfig();
MetadataStorageUpdaterJobSpec metadataSpec = config.getSchema().getIOConfig().getMetadataUpdateSpec();
// override metadata storage type based on HadoopIOConfig
Preconditions.checkNotNull(metadataSpec.getType(), "type in metadataUpdateSpec must not be null");
injector.getInstance(Properties.class).setProperty("druid.metadata.storage.type", metadataSpec.getType());
HadoopIngestionSpec.updateSegmentListIfDatasourcePathSpecIsUsed(config.getSchema(), HadoopDruidIndexerConfig.JSON_MAPPER, new MetadataStoreBasedUsedSegmentsRetriever(injector.getInstance(IndexerMetadataStorageCoordinator.class)));
List<Jobby> jobs = new ArrayList<>();
HadoopDruidIndexerJob indexerJob = new HadoopDruidIndexerJob(config, injector.getInstance(MetadataStorageUpdaterJobHandler.class));
jobs.add(new HadoopDruidDetermineConfigurationJob(config));
jobs.add(indexerJob);
boolean jobsSucceeded = JobHelper.runJobs(jobs);
JobHelper.renameIndexFilesForSegments(config.getSchema(), indexerJob.getPublishedSegmentAndIndexZipFilePaths());
JobHelper.maybeDeleteIntermediatePath(jobsSucceeded, config.getSchema());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
Aggregations