use of org.apache.druid.client.indexing.IndexingServiceClient in project druid by druid-io.
the class CompactSegmentsTest method testSerde.
@Test
public void testSerde() throws Exception {
final TestDruidLeaderClient leaderClient = new TestDruidLeaderClient(JSON_MAPPER);
final HttpIndexingServiceClient indexingServiceClient = new HttpIndexingServiceClient(JSON_MAPPER, leaderClient);
JSON_MAPPER.setInjectableValues(new InjectableValues.Std().addValue(DruidCoordinatorConfig.class, COORDINATOR_CONFIG).addValue(ObjectMapper.class, JSON_MAPPER).addValue(IndexingServiceClient.class, indexingServiceClient));
final CompactSegments compactSegments = new CompactSegments(COORDINATOR_CONFIG, JSON_MAPPER, indexingServiceClient);
String compactSegmentString = JSON_MAPPER.writeValueAsString(compactSegments);
CompactSegments serdeCompactSegments = JSON_MAPPER.readValue(compactSegmentString, CompactSegments.class);
Assert.assertNotNull(serdeCompactSegments);
Assert.assertEquals(COORDINATOR_CONFIG.getCompactionSkipLockedIntervals(), serdeCompactSegments.isSkipLockedIntervals());
Assert.assertEquals(indexingServiceClient, serdeCompactSegments.getIndexingServiceClient());
}
use of org.apache.druid.client.indexing.IndexingServiceClient in project druid by druid-io.
the class CompactSegmentsTest method testRunWithLockedIntervalsNoSkip.
@Test
public void testRunWithLockedIntervalsNoSkip() {
Mockito.when(COORDINATOR_CONFIG.getCompactionSkipLockedIntervals()).thenReturn(false);
final TestDruidLeaderClient leaderClient = new TestDruidLeaderClient(JSON_MAPPER);
leaderClient.start();
HttpIndexingServiceClient indexingServiceClient = new HttpIndexingServiceClient(JSON_MAPPER, leaderClient);
// Lock all intervals for all the dataSources
final String datasource0 = DATA_SOURCE_PREFIX + 0;
leaderClient.lockedIntervals.computeIfAbsent(datasource0, k -> new ArrayList<>()).add(Intervals.of("2017/2018"));
final String datasource1 = DATA_SOURCE_PREFIX + 1;
leaderClient.lockedIntervals.computeIfAbsent(datasource1, k -> new ArrayList<>()).add(Intervals.of("2017/2018"));
final String datasource2 = DATA_SOURCE_PREFIX + 2;
leaderClient.lockedIntervals.computeIfAbsent(datasource2, k -> new ArrayList<>()).add(Intervals.of("2017/2018"));
// Verify that no locked intervals are skipped
CompactSegments compactSegments = new CompactSegments(COORDINATOR_CONFIG, JSON_MAPPER, indexingServiceClient);
int maxTaskSlots = partitionsSpec instanceof SingleDimensionPartitionsSpec ? 5 : 3;
final CoordinatorStats stats = doCompactSegments(compactSegments, createCompactionConfigs(1), maxTaskSlots);
Assert.assertEquals(3, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
Assert.assertEquals(3, leaderClient.submittedCompactionTasks.size());
leaderClient.submittedCompactionTasks.forEach(task -> {
System.out.println(task.getDataSource() + " : " + task.getIoConfig().getInputSpec().getInterval());
});
// Verify that tasks are submitted for the latest interval of each dataSource
final Map<String, Interval> datasourceToInterval = new HashMap<>();
leaderClient.submittedCompactionTasks.forEach(task -> datasourceToInterval.put(task.getDataSource(), task.getIoConfig().getInputSpec().getInterval()));
Assert.assertEquals(Intervals.of("2017-01-09T00:00:00Z/2017-01-09T12:00:00Z"), datasourceToInterval.get(datasource0));
Assert.assertEquals(Intervals.of("2017-01-09T00:00:00Z/2017-01-09T12:00:00Z"), datasourceToInterval.get(datasource1));
Assert.assertEquals(Intervals.of("2017-01-09T00:00:00Z/2017-01-09T12:00:00Z"), datasourceToInterval.get(datasource2));
}
use of org.apache.druid.client.indexing.IndexingServiceClient in project druid by druid-io.
the class LocalIntermediaryDataManagerAutoCleanupTest method setup.
@Before
public void setup() throws IOException {
final WorkerConfig workerConfig = new WorkerConfig() {
@Override
public long getIntermediaryPartitionDiscoveryPeriodSec() {
return 1;
}
@Override
public long getIntermediaryPartitionCleanupPeriodSec() {
return 2;
}
@Override
public Period getIntermediaryPartitionTimeout() {
return new Period("PT2S");
}
};
final TaskConfig taskConfig = new TaskConfig(null, null, null, null, null, false, null, null, ImmutableList.of(new StorageLocationConfig(tempDir.newFolder(), null, null)), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final IndexingServiceClient indexingServiceClient = new NoopIndexingServiceClient() {
@Override
public Map<String, TaskStatus> getTaskStatuses(Set<String> taskIds) {
final Map<String, TaskStatus> result = new HashMap<>();
for (String taskId : taskIds) {
result.put(taskId, new TaskStatus(taskId, TaskState.SUCCESS, 10));
}
return result;
}
};
intermediaryDataManager = new LocalIntermediaryDataManager(workerConfig, taskConfig, indexingServiceClient);
intermediaryDataManager.start();
}
use of org.apache.druid.client.indexing.IndexingServiceClient in project druid by druid-io.
the class ShuffleDataSegmentPusherTest method setup.
@Before
public void setup() throws IOException {
final WorkerConfig workerConfig = new WorkerConfig();
final TaskConfig taskConfig = new TaskConfig(null, null, null, null, null, false, null, null, ImmutableList.of(new StorageLocationConfig(temporaryFolder.newFolder(), null, null)), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final IndexingServiceClient indexingServiceClient = new NoopIndexingServiceClient();
if (LOCAL.equals(intermediateDataStore)) {
intermediaryDataManager = new LocalIntermediaryDataManager(workerConfig, taskConfig, indexingServiceClient);
} else if (DEEPSTORE.equals(intermediateDataStore)) {
localDeepStore = temporaryFolder.newFolder("localStorage");
intermediaryDataManager = new DeepStorageIntermediaryDataManager(new LocalDataSegmentPusher(new LocalDataSegmentPusherConfig() {
@Override
public File getStorageDirectory() {
return localDeepStore;
}
}));
}
intermediaryDataManager.start();
segmentPusher = new ShuffleDataSegmentPusher("supervisorTaskId", "subTaskId", intermediaryDataManager);
final Injector injector = GuiceInjectors.makeStartupInjectorWithModules(ImmutableList.of(binder -> binder.bind(LocalDataSegmentPuller.class)));
mapper = new DefaultObjectMapper();
mapper.registerModule(new SimpleModule("loadSpecTest").registerSubtypes(LocalLoadSpec.class));
mapper.setInjectableValues(new GuiceInjectableValues(injector));
final GuiceAnnotationIntrospector guiceIntrospector = new GuiceAnnotationIntrospector();
mapper.setAnnotationIntrospectors(new AnnotationIntrospectorPair(guiceIntrospector, mapper.getSerializationConfig().getAnnotationIntrospector()), new AnnotationIntrospectorPair(guiceIntrospector, mapper.getDeserializationConfig().getAnnotationIntrospector()));
}
use of org.apache.druid.client.indexing.IndexingServiceClient in project druid by druid-io.
the class ShuffleResourceTest method setup.
@Before
public void setup() throws IOException {
final WorkerConfig workerConfig = new WorkerConfig() {
@Override
public long getIntermediaryPartitionDiscoveryPeriodSec() {
return 1;
}
@Override
public long getIntermediaryPartitionCleanupPeriodSec() {
return 2;
}
@Override
public Period getIntermediaryPartitionTimeout() {
return new Period("PT2S");
}
};
final TaskConfig taskConfig = new TaskConfig(null, null, null, null, null, false, null, null, ImmutableList.of(new StorageLocationConfig(tempDir.newFolder(), null, null)), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final IndexingServiceClient indexingServiceClient = new NoopIndexingServiceClient() {
@Override
public Map<String, TaskStatus> getTaskStatuses(Set<String> taskIds) {
final Map<String, TaskStatus> result = new HashMap<>();
for (String taskId : taskIds) {
result.put(taskId, new TaskStatus(taskId, TaskState.SUCCESS, 10));
}
return result;
}
};
intermediaryDataManager = new LocalIntermediaryDataManager(workerConfig, taskConfig, indexingServiceClient);
shuffleMetrics = new ShuffleMetrics();
shuffleResource = new ShuffleResource(intermediaryDataManager, Optional.of(shuffleMetrics));
}
Aggregations