use of org.apache.druid.segment.loading.StorageLocationConfig in project druid by druid-io.
the class LocalIntermediaryDataManagerAutoCleanupTest method setup.
@Before
public void setup() throws IOException {
final WorkerConfig workerConfig = new WorkerConfig() {
@Override
public long getIntermediaryPartitionDiscoveryPeriodSec() {
return 1;
}
@Override
public long getIntermediaryPartitionCleanupPeriodSec() {
return 2;
}
@Override
public Period getIntermediaryPartitionTimeout() {
return new Period("PT2S");
}
};
final TaskConfig taskConfig = new TaskConfig(null, null, null, null, null, false, null, null, ImmutableList.of(new StorageLocationConfig(tempDir.newFolder(), null, null)), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final IndexingServiceClient indexingServiceClient = new NoopIndexingServiceClient() {
@Override
public Map<String, TaskStatus> getTaskStatuses(Set<String> taskIds) {
final Map<String, TaskStatus> result = new HashMap<>();
for (String taskId : taskIds) {
result.put(taskId, new TaskStatus(taskId, TaskState.SUCCESS, 10));
}
return result;
}
};
intermediaryDataManager = new LocalIntermediaryDataManager(workerConfig, taskConfig, indexingServiceClient);
intermediaryDataManager.start();
}
use of org.apache.druid.segment.loading.StorageLocationConfig in project druid by druid-io.
the class ShuffleDataSegmentPusherTest method setup.
@Before
public void setup() throws IOException {
final WorkerConfig workerConfig = new WorkerConfig();
final TaskConfig taskConfig = new TaskConfig(null, null, null, null, null, false, null, null, ImmutableList.of(new StorageLocationConfig(temporaryFolder.newFolder(), null, null)), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final IndexingServiceClient indexingServiceClient = new NoopIndexingServiceClient();
if (LOCAL.equals(intermediateDataStore)) {
intermediaryDataManager = new LocalIntermediaryDataManager(workerConfig, taskConfig, indexingServiceClient);
} else if (DEEPSTORE.equals(intermediateDataStore)) {
localDeepStore = temporaryFolder.newFolder("localStorage");
intermediaryDataManager = new DeepStorageIntermediaryDataManager(new LocalDataSegmentPusher(new LocalDataSegmentPusherConfig() {
@Override
public File getStorageDirectory() {
return localDeepStore;
}
}));
}
intermediaryDataManager.start();
segmentPusher = new ShuffleDataSegmentPusher("supervisorTaskId", "subTaskId", intermediaryDataManager);
final Injector injector = GuiceInjectors.makeStartupInjectorWithModules(ImmutableList.of(binder -> binder.bind(LocalDataSegmentPuller.class)));
mapper = new DefaultObjectMapper();
mapper.registerModule(new SimpleModule("loadSpecTest").registerSubtypes(LocalLoadSpec.class));
mapper.setInjectableValues(new GuiceInjectableValues(injector));
final GuiceAnnotationIntrospector guiceIntrospector = new GuiceAnnotationIntrospector();
mapper.setAnnotationIntrospectors(new AnnotationIntrospectorPair(guiceIntrospector, mapper.getSerializationConfig().getAnnotationIntrospector()), new AnnotationIntrospectorPair(guiceIntrospector, mapper.getDeserializationConfig().getAnnotationIntrospector()));
}
use of org.apache.druid.segment.loading.StorageLocationConfig in project druid by druid-io.
the class ShuffleResourceTest method setup.
@Before
public void setup() throws IOException {
final WorkerConfig workerConfig = new WorkerConfig() {
@Override
public long getIntermediaryPartitionDiscoveryPeriodSec() {
return 1;
}
@Override
public long getIntermediaryPartitionCleanupPeriodSec() {
return 2;
}
@Override
public Period getIntermediaryPartitionTimeout() {
return new Period("PT2S");
}
};
final TaskConfig taskConfig = new TaskConfig(null, null, null, null, null, false, null, null, ImmutableList.of(new StorageLocationConfig(tempDir.newFolder(), null, null)), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final IndexingServiceClient indexingServiceClient = new NoopIndexingServiceClient() {
@Override
public Map<String, TaskStatus> getTaskStatuses(Set<String> taskIds) {
final Map<String, TaskStatus> result = new HashMap<>();
for (String taskId : taskIds) {
result.put(taskId, new TaskStatus(taskId, TaskState.SUCCESS, 10));
}
return result;
}
};
intermediaryDataManager = new LocalIntermediaryDataManager(workerConfig, taskConfig, indexingServiceClient);
shuffleMetrics = new ShuffleMetrics();
shuffleResource = new ShuffleResource(intermediaryDataManager, Optional.of(shuffleMetrics));
}
use of org.apache.druid.segment.loading.StorageLocationConfig in project druid by druid-io.
the class LocalIntermediaryDataManagerManualAddAndDeleteTest method setup.
@Before
public void setup() throws IOException {
final WorkerConfig workerConfig = new WorkerConfig();
intermediarySegmentsLocation = tempDir.newFolder();
siblingLocation = tempDir.newFolder();
final TaskConfig taskConfig = new TaskConfig(null, null, null, null, null, false, null, null, ImmutableList.of(new StorageLocationConfig(intermediarySegmentsLocation, 1200L, null)), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final IndexingServiceClient indexingServiceClient = new NoopIndexingServiceClient();
intermediaryDataManager = new LocalIntermediaryDataManager(workerConfig, taskConfig, indexingServiceClient);
intermediaryDataManager.start();
}
use of org.apache.druid.segment.loading.StorageLocationConfig in project druid by druid-io.
the class DruidServerConfigTest method testServerMaxSizePrecedence.
@Test
public void testServerMaxSizePrecedence() throws Exception {
String serverConfigWithDefaultSizeStr = "{\"maxSize\":0,\"tier\":\"_default_tier\",\"priority\":0," + "\"hiddenProperties\":[\"druid.metadata.storage.connector.password\"," + "\"druid.s3.accessKey\",\"druid.s3.secretKey\"]}\n";
String serverConfigWithNonDefaultSizeStr = "{\"maxSize\":123456,\"tier\":\"_default_tier\",\"priority\":0," + "\"hiddenProperties\":[\"druid.metadata.storage.connector.password\"," + "\"druid.s3.accessKey\",\"druid.s3.secretKey\"]}\n";
final List<StorageLocationConfig> locations = new ArrayList<>();
final StorageLocationConfig locationConfig1 = new StorageLocationConfig(testSegmentCacheDir1, 10000000000L, null);
locations.add(locationConfig1);
mapper.setInjectableValues(new InjectableValues.Std().addValue(ObjectMapper.class, new DefaultObjectMapper()).addValue(SegmentLoaderConfig.class, new SegmentLoaderConfig().withLocations(locations)));
DruidServerConfig serverConfigWithDefaultSize = mapper.readValue(mapper.writeValueAsString(mapper.readValue(serverConfigWithDefaultSizeStr, DruidServerConfig.class)), DruidServerConfig.class);
DruidServerConfig serverConfigWithNonDefaultSize = mapper.readValue(mapper.writeValueAsString(mapper.readValue(serverConfigWithNonDefaultSizeStr, DruidServerConfig.class)), DruidServerConfig.class);
Assert.assertEquals(serverConfigWithDefaultSize.getMaxSize(), 10000000000L);
Assert.assertEquals(serverConfigWithNonDefaultSize.getMaxSize(), 123456L);
}
Aggregations