use of org.apache.druid.indexing.worker.config.WorkerConfig in project druid by druid-io.
the class ForkingTaskRunnerTest method testMaskedIterator.
@Test
public void testMaskedIterator() {
Pair<List<String>, String> originalAndExpectedCommand = new Pair<>(Lists.list("java -cp", "/path/to/somewhere:some-jars.jar", "/some===file", // this should not be masked but there is not way to know this not a property and probably this is an unrealistic scenario anyways
"/asecretFileNa=me", "-Dsome.property=random", "-Dsome.otherproperty = random=random", "-Dsome.somesecret = secretvalue", "-Dsome.somesecret=secretvalue", "-Dsome.somepassword = secret=value", "-Dsome.some=notasecret", "-Dsome.otherSecret= =asfdhkj352872598====fasdlkjfa="), "java -cp /path/to/somewhere:some-jars.jar /some===file /asecretFileNa=<masked> -Dsome.property=random -Dsome.otherproperty = random=random " + "-Dsome.somesecret =<masked> -Dsome.somesecret=<masked> -Dsome.somepassword =<masked> -Dsome.some=notasecret -Dsome.otherSecret=<masked>");
StartupLoggingConfig startupLoggingConfig = new StartupLoggingConfig();
ForkingTaskRunner forkingTaskRunner = new ForkingTaskRunner(new ForkingTaskRunnerConfig(), null, new WorkerConfig(), null, null, null, null, startupLoggingConfig);
Assert.assertEquals(originalAndExpectedCommand.rhs, forkingTaskRunner.getMaskedCommand(startupLoggingConfig.getMaskProperties(), originalAndExpectedCommand.lhs));
}
use of org.apache.druid.indexing.worker.config.WorkerConfig in project druid by druid-io.
the class ForkingTaskRunnerTest method testTaskStatusWhenTaskProcessFails.
@Test
public void testTaskStatusWhenTaskProcessFails() throws ExecutionException, InterruptedException {
ForkingTaskRunner forkingTaskRunner = new ForkingTaskRunner(new ForkingTaskRunnerConfig(), new TaskConfig(null, null, null, null, ImmutableList.of(), false, new Period("PT0S"), new Period("PT10S"), ImmutableList.of(), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name()), new WorkerConfig(), new Properties(), new NoopTaskLogs(), new DefaultObjectMapper(), new DruidNode("middleManager", "host", false, 8091, null, true, false), new StartupLoggingConfig()) {
@Override
ProcessHolder runTaskProcess(List<String> command, File logFile, TaskLocation taskLocation) {
ProcessHolder processHolder = Mockito.mock(ProcessHolder.class);
Mockito.doNothing().when(processHolder).registerWithCloser(ArgumentMatchers.any());
Mockito.doNothing().when(processHolder).shutdown();
return processHolder;
}
@Override
int waitForTaskProcessToComplete(Task task, ProcessHolder processHolder, File logFile, File reportsFile) {
// Emulate task process failure
return 1;
}
};
final TaskStatus status = forkingTaskRunner.run(NoopTask.create()).get();
Assert.assertEquals(TaskState.FAILED, status.getStatusCode());
Assert.assertEquals("Task execution process exited unsuccessfully with code[1]. See middleManager logs for more details.", status.getErrorMsg());
}
use of org.apache.druid.indexing.worker.config.WorkerConfig in project druid by druid-io.
the class LocalIntermediaryDataManagerAutoCleanupTest method setup.
@Before
public void setup() throws IOException {
final WorkerConfig workerConfig = new WorkerConfig() {
@Override
public long getIntermediaryPartitionDiscoveryPeriodSec() {
return 1;
}
@Override
public long getIntermediaryPartitionCleanupPeriodSec() {
return 2;
}
@Override
public Period getIntermediaryPartitionTimeout() {
return new Period("PT2S");
}
};
final TaskConfig taskConfig = new TaskConfig(null, null, null, null, null, false, null, null, ImmutableList.of(new StorageLocationConfig(tempDir.newFolder(), null, null)), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final IndexingServiceClient indexingServiceClient = new NoopIndexingServiceClient() {
@Override
public Map<String, TaskStatus> getTaskStatuses(Set<String> taskIds) {
final Map<String, TaskStatus> result = new HashMap<>();
for (String taskId : taskIds) {
result.put(taskId, new TaskStatus(taskId, TaskState.SUCCESS, 10));
}
return result;
}
};
intermediaryDataManager = new LocalIntermediaryDataManager(workerConfig, taskConfig, indexingServiceClient);
intermediaryDataManager.start();
}
use of org.apache.druid.indexing.worker.config.WorkerConfig in project druid by druid-io.
the class ShuffleDataSegmentPusherTest method setup.
@Before
public void setup() throws IOException {
final WorkerConfig workerConfig = new WorkerConfig();
final TaskConfig taskConfig = new TaskConfig(null, null, null, null, null, false, null, null, ImmutableList.of(new StorageLocationConfig(temporaryFolder.newFolder(), null, null)), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final IndexingServiceClient indexingServiceClient = new NoopIndexingServiceClient();
if (LOCAL.equals(intermediateDataStore)) {
intermediaryDataManager = new LocalIntermediaryDataManager(workerConfig, taskConfig, indexingServiceClient);
} else if (DEEPSTORE.equals(intermediateDataStore)) {
localDeepStore = temporaryFolder.newFolder("localStorage");
intermediaryDataManager = new DeepStorageIntermediaryDataManager(new LocalDataSegmentPusher(new LocalDataSegmentPusherConfig() {
@Override
public File getStorageDirectory() {
return localDeepStore;
}
}));
}
intermediaryDataManager.start();
segmentPusher = new ShuffleDataSegmentPusher("supervisorTaskId", "subTaskId", intermediaryDataManager);
final Injector injector = GuiceInjectors.makeStartupInjectorWithModules(ImmutableList.of(binder -> binder.bind(LocalDataSegmentPuller.class)));
mapper = new DefaultObjectMapper();
mapper.registerModule(new SimpleModule("loadSpecTest").registerSubtypes(LocalLoadSpec.class));
mapper.setInjectableValues(new GuiceInjectableValues(injector));
final GuiceAnnotationIntrospector guiceIntrospector = new GuiceAnnotationIntrospector();
mapper.setAnnotationIntrospectors(new AnnotationIntrospectorPair(guiceIntrospector, mapper.getSerializationConfig().getAnnotationIntrospector()), new AnnotationIntrospectorPair(guiceIntrospector, mapper.getDeserializationConfig().getAnnotationIntrospector()));
}
use of org.apache.druid.indexing.worker.config.WorkerConfig in project druid by druid-io.
the class ShuffleResourceTest method setup.
@Before
public void setup() throws IOException {
final WorkerConfig workerConfig = new WorkerConfig() {
@Override
public long getIntermediaryPartitionDiscoveryPeriodSec() {
return 1;
}
@Override
public long getIntermediaryPartitionCleanupPeriodSec() {
return 2;
}
@Override
public Period getIntermediaryPartitionTimeout() {
return new Period("PT2S");
}
};
final TaskConfig taskConfig = new TaskConfig(null, null, null, null, null, false, null, null, ImmutableList.of(new StorageLocationConfig(tempDir.newFolder(), null, null)), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final IndexingServiceClient indexingServiceClient = new NoopIndexingServiceClient() {
@Override
public Map<String, TaskStatus> getTaskStatuses(Set<String> taskIds) {
final Map<String, TaskStatus> result = new HashMap<>();
for (String taskId : taskIds) {
result.put(taskId, new TaskStatus(taskId, TaskState.SUCCESS, 10));
}
return result;
}
};
intermediaryDataManager = new LocalIntermediaryDataManager(workerConfig, taskConfig, indexingServiceClient);
shuffleMetrics = new ShuffleMetrics();
shuffleResource = new ShuffleResource(intermediaryDataManager, Optional.of(shuffleMetrics));
}
Aggregations