use of org.apache.druid.client.indexing.IndexingServiceClient in project druid by druid-io.
the class LocalIntermediaryDataManagerManualAddAndDeleteTest method setup.
@Before
public void setup() throws IOException {
final WorkerConfig workerConfig = new WorkerConfig();
intermediarySegmentsLocation = tempDir.newFolder();
siblingLocation = tempDir.newFolder();
final TaskConfig taskConfig = new TaskConfig(null, null, null, null, null, false, null, null, ImmutableList.of(new StorageLocationConfig(intermediarySegmentsLocation, 1200L, null)), false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final IndexingServiceClient indexingServiceClient = new NoopIndexingServiceClient();
intermediaryDataManager = new LocalIntermediaryDataManager(workerConfig, taskConfig, indexingServiceClient);
intermediaryDataManager.start();
}
use of org.apache.druid.client.indexing.IndexingServiceClient in project druid by druid-io.
the class DataSourcesResourceTest method testKillSegmentsInIntervalInDataSource.
@Test
public void testKillSegmentsInIntervalInDataSource() {
String interval = "2010-01-01_P1D";
Interval theInterval = Intervals.of(interval.replace('_', '/'));
IndexingServiceClient indexingServiceClient = EasyMock.createStrictMock(IndexingServiceClient.class);
indexingServiceClient.killUnusedSegments("api-issued", "datasource1", theInterval);
EasyMock.expectLastCall().once();
EasyMock.replay(indexingServiceClient, server);
DataSourcesResource dataSourcesResource = new DataSourcesResource(inventoryView, null, null, indexingServiceClient, null, null);
Response response = dataSourcesResource.killUnusedSegmentsInInterval("datasource1", interval);
Assert.assertEquals(200, response.getStatus());
Assert.assertEquals(null, response.getEntity());
EasyMock.verify(indexingServiceClient, server);
}
use of org.apache.druid.client.indexing.IndexingServiceClient in project druid by druid-io.
the class DataSourcesResourceTest method testMarkAsUnusedAllSegmentsInDataSource.
@Test
public void testMarkAsUnusedAllSegmentsInDataSource() {
IndexingServiceClient indexingServiceClient = EasyMock.createStrictMock(IndexingServiceClient.class);
EasyMock.replay(indexingServiceClient, server);
DataSourcesResource dataSourcesResource = new DataSourcesResource(inventoryView, null, null, indexingServiceClient, null, null);
try {
Response response = dataSourcesResource.markAsUnusedAllSegmentsOrKillUnusedSegmentsInInterval("datasource", "true", "???");
// 400 (Bad Request) or an IllegalArgumentException is expected.
Assert.assertEquals(400, response.getStatus());
Assert.assertNotNull(response.getEntity());
Assert.assertTrue(response.getEntity().toString().contains("java.lang.IllegalArgumentException"));
} catch (IllegalArgumentException ignore) {
// expected
}
EasyMock.verify(indexingServiceClient, server);
}
use of org.apache.druid.client.indexing.IndexingServiceClient in project druid by druid-io.
the class CompactSegmentsTest method testRunWithLockedIntervals.
@Test
public void testRunWithLockedIntervals() {
final TestDruidLeaderClient leaderClient = new TestDruidLeaderClient(JSON_MAPPER);
leaderClient.start();
HttpIndexingServiceClient indexingServiceClient = new HttpIndexingServiceClient(JSON_MAPPER, leaderClient);
// Lock all intervals for dataSource_1 and dataSource_2
final String datasource1 = DATA_SOURCE_PREFIX + 1;
leaderClient.lockedIntervals.computeIfAbsent(datasource1, k -> new ArrayList<>()).add(Intervals.of("2017/2018"));
final String datasource2 = DATA_SOURCE_PREFIX + 2;
leaderClient.lockedIntervals.computeIfAbsent(datasource2, k -> new ArrayList<>()).add(Intervals.of("2017/2018"));
// Lock all intervals but one for dataSource_0
final String datasource0 = DATA_SOURCE_PREFIX + 0;
leaderClient.lockedIntervals.computeIfAbsent(datasource0, k -> new ArrayList<>()).add(Intervals.of("2017-01-01T13:00:00Z/2017-02-01"));
// Verify that locked intervals are skipped and only one compaction task
// is submitted for dataSource_0
CompactSegments compactSegments = new CompactSegments(COORDINATOR_CONFIG, JSON_MAPPER, indexingServiceClient);
final CoordinatorStats stats = doCompactSegments(compactSegments, createCompactionConfigs(2), 4);
Assert.assertEquals(1, stats.getGlobalStat(CompactSegments.COMPACTION_TASK_COUNT));
Assert.assertEquals(1, leaderClient.submittedCompactionTasks.size());
final ClientCompactionTaskQuery compactionTask = leaderClient.submittedCompactionTasks.get(0);
Assert.assertEquals(datasource0, compactionTask.getDataSource());
Assert.assertEquals(Intervals.of("2017-01-01T00:00:00/2017-01-01T12:00:00"), compactionTask.getIoConfig().getInputSpec().getInterval());
}
Aggregations