use of org.apache.druid.indexing.common.config.TaskConfig in project druid by druid-io.
the class ShuffleDataSegmentPusherTest method setup.
@Before
public void setup() throws IOException {
final WorkerConfig workerConfig = new WorkerConfig();
final TaskConfig taskConfig = new TaskConfig(null, null, null, null, null, false, null, null, ImmutableList.of(new StorageLocationConfig(temporaryFolder.newFolder(), null, null)), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final IndexingServiceClient indexingServiceClient = new NoopIndexingServiceClient();
if (LOCAL.equals(intermediateDataStore)) {
intermediaryDataManager = new LocalIntermediaryDataManager(workerConfig, taskConfig, indexingServiceClient);
} else if (DEEPSTORE.equals(intermediateDataStore)) {
localDeepStore = temporaryFolder.newFolder("localStorage");
intermediaryDataManager = new DeepStorageIntermediaryDataManager(new LocalDataSegmentPusher(new LocalDataSegmentPusherConfig() {
@Override
public File getStorageDirectory() {
return localDeepStore;
}
}));
}
intermediaryDataManager.start();
segmentPusher = new ShuffleDataSegmentPusher("supervisorTaskId", "subTaskId", intermediaryDataManager);
final Injector injector = GuiceInjectors.makeStartupInjectorWithModules(ImmutableList.of(binder -> binder.bind(LocalDataSegmentPuller.class)));
mapper = new DefaultObjectMapper();
mapper.registerModule(new SimpleModule("loadSpecTest").registerSubtypes(LocalLoadSpec.class));
mapper.setInjectableValues(new GuiceInjectableValues(injector));
final GuiceAnnotationIntrospector guiceIntrospector = new GuiceAnnotationIntrospector();
mapper.setAnnotationIntrospectors(new AnnotationIntrospectorPair(guiceIntrospector, mapper.getSerializationConfig().getAnnotationIntrospector()), new AnnotationIntrospectorPair(guiceIntrospector, mapper.getDeserializationConfig().getAnnotationIntrospector()));
}
use of org.apache.druid.indexing.common.config.TaskConfig in project druid by druid-io.
the class ShuffleResourceTest method setup.
@Before
public void setup() throws IOException {
final WorkerConfig workerConfig = new WorkerConfig() {
@Override
public long getIntermediaryPartitionDiscoveryPeriodSec() {
return 1;
}
@Override
public long getIntermediaryPartitionCleanupPeriodSec() {
return 2;
}
@Override
public Period getIntermediaryPartitionTimeout() {
return new Period("PT2S");
}
};
final TaskConfig taskConfig = new TaskConfig(null, null, null, null, null, false, null, null, ImmutableList.of(new StorageLocationConfig(tempDir.newFolder(), null, null)), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final IndexingServiceClient indexingServiceClient = new NoopIndexingServiceClient() {
@Override
public Map<String, TaskStatus> getTaskStatuses(Set<String> taskIds) {
final Map<String, TaskStatus> result = new HashMap<>();
for (String taskId : taskIds) {
result.put(taskId, new TaskStatus(taskId, TaskState.SUCCESS, 10));
}
return result;
}
};
intermediaryDataManager = new LocalIntermediaryDataManager(workerConfig, taskConfig, indexingServiceClient);
shuffleMetrics = new ShuffleMetrics();
shuffleResource = new ShuffleResource(intermediaryDataManager, Optional.of(shuffleMetrics));
}
use of org.apache.druid.indexing.common.config.TaskConfig in project druid by druid-io.
the class LocalIntermediaryDataManagerManualAddAndDeleteTest method setup.
@Before
public void setup() throws IOException {
final WorkerConfig workerConfig = new WorkerConfig();
intermediarySegmentsLocation = tempDir.newFolder();
siblingLocation = tempDir.newFolder();
final TaskConfig taskConfig = new TaskConfig(null, null, null, null, null, false, null, null, ImmutableList.of(new StorageLocationConfig(intermediarySegmentsLocation, 1200L, null)), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final IndexingServiceClient indexingServiceClient = new NoopIndexingServiceClient();
intermediaryDataManager = new LocalIntermediaryDataManager(workerConfig, taskConfig, indexingServiceClient);
intermediaryDataManager.start();
}
use of org.apache.druid.indexing.common.config.TaskConfig in project druid by druid-io.
the class ResetCluster method deleteIndexerHadoopWorkingDir.
private void deleteIndexerHadoopWorkingDir(Injector injector) {
try {
log.info("===========================================================================");
log.info("Deleting hadoopWorkingPath.");
log.info("===========================================================================");
TaskConfig taskConfig = injector.getInstance(TaskConfig.class);
HadoopTask.invokeForeignLoader("org.apache.druid.indexer.HadoopWorkingDirCleaner", new String[] { taskConfig.getHadoopWorkingPath() }, HadoopTask.buildClassLoader(null, taskConfig.getDefaultHadoopCoordinates()));
} catch (Exception ex) {
log.error(ex, "Failed to cleanup indexer hadoop working directory.");
}
}
Aggregations