use of org.apache.druid.server.coordination.DataSegmentChangeHandler in project druid by druid-io.
the class LoadQueuePeonTest method testMultipleLoadDropSegments.
@Test
public void testMultipleLoadDropSegments() throws Exception {
loadQueuePeon = new CuratorLoadQueuePeon(curator, LOAD_QUEUE_PATH, jsonMapper, Execs.scheduledSingleThreaded("test_load_queue_peon_scheduled-%d"), Execs.singleThreaded("test_load_queue_peon-%d"), new TestDruidCoordinatorConfig(null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 10, Duration.millis(0)));
loadQueuePeon.start();
ConcurrentMap<SegmentId, CountDownLatch> loadRequestSignals = new ConcurrentHashMap<>(5);
ConcurrentMap<SegmentId, CountDownLatch> dropRequestSignals = new ConcurrentHashMap<>(5);
ConcurrentMap<SegmentId, CountDownLatch> segmentLoadedSignals = new ConcurrentHashMap<>(5);
ConcurrentMap<SegmentId, CountDownLatch> segmentDroppedSignals = new ConcurrentHashMap<>(5);
final List<DataSegment> segmentToDrop = Lists.transform(ImmutableList.of("2014-10-26T00:00:00Z/P1D", "2014-10-25T00:00:00Z/P1D", "2014-10-24T00:00:00Z/P1D", "2014-10-23T00:00:00Z/P1D", "2014-10-22T00:00:00Z/P1D"), new Function<String, DataSegment>() {
@Override
public DataSegment apply(String intervalStr) {
DataSegment dataSegment = dataSegmentWithInterval(intervalStr);
return dataSegment;
}
});
final CountDownLatch[] dropRequestLatches = new CountDownLatch[5];
final CountDownLatch[] dropSegmentLatches = new CountDownLatch[5];
for (int i = 0; i < 5; i++) {
dropRequestLatches[i] = new CountDownLatch(1);
dropSegmentLatches[i] = new CountDownLatch(1);
}
int i = 0;
for (DataSegment s : segmentToDrop) {
dropRequestSignals.put(s.getId(), dropRequestLatches[i]);
segmentDroppedSignals.put(s.getId(), dropSegmentLatches[i++]);
}
final List<DataSegment> segmentToLoad = Lists.transform(ImmutableList.of("2014-10-27T00:00:00Z/P1D", "2014-10-29T00:00:00Z/P1M", "2014-10-31T00:00:00Z/P1D", "2014-10-30T00:00:00Z/P1D", "2014-10-28T00:00:00Z/P1D"), new Function<String, DataSegment>() {
@Override
public DataSegment apply(String intervalStr) {
DataSegment dataSegment = dataSegmentWithInterval(intervalStr);
loadRequestSignals.put(dataSegment.getId(), new CountDownLatch(1));
segmentLoadedSignals.put(dataSegment.getId(), new CountDownLatch(1));
return dataSegment;
}
});
final CountDownLatch[] loadRequestLatches = new CountDownLatch[5];
final CountDownLatch[] segmentLoadedLatches = new CountDownLatch[5];
for (i = 0; i < 5; i++) {
loadRequestLatches[i] = new CountDownLatch(1);
segmentLoadedLatches[i] = new CountDownLatch(1);
}
i = 0;
for (DataSegment s : segmentToDrop) {
loadRequestSignals.put(s.getId(), loadRequestLatches[i]);
segmentLoadedSignals.put(s.getId(), segmentLoadedLatches[i++]);
}
// segment with latest interval should be loaded first
final List<DataSegment> expectedLoadOrder = Lists.transform(ImmutableList.of("2014-10-29T00:00:00Z/P1M", "2014-10-31T00:00:00Z/P1D", "2014-10-30T00:00:00Z/P1D", "2014-10-28T00:00:00Z/P1D", "2014-10-27T00:00:00Z/P1D"), intervalStr -> dataSegmentWithInterval(intervalStr));
final DataSegmentChangeHandler handler = new DataSegmentChangeHandler() {
@Override
public void addSegment(DataSegment segment, DataSegmentChangeCallback callback) {
loadRequestSignals.get(segment.getId()).countDown();
}
@Override
public void removeSegment(DataSegment segment, DataSegmentChangeCallback callback) {
dropRequestSignals.get(segment.getId()).countDown();
}
};
loadQueueCache.getListenable().addListener((client, event) -> {
if (event.getType() == PathChildrenCacheEvent.Type.CHILD_ADDED) {
DataSegmentChangeRequest request = jsonMapper.readValue(event.getData().getData(), DataSegmentChangeRequest.class);
request.go(handler, null);
}
});
loadQueueCache.start();
for (final DataSegment segment : segmentToDrop) {
loadQueuePeon.dropSegment(segment, () -> segmentDroppedSignals.get(segment.getId()).countDown());
}
for (final DataSegment segment : segmentToLoad) {
loadQueuePeon.loadSegment(segment, () -> segmentLoadedSignals.get(segment.getId()).countDown());
}
Assert.assertEquals(6000, loadQueuePeon.getLoadQueueSize());
Assert.assertEquals(5, loadQueuePeon.getSegmentsToLoad().size());
Assert.assertEquals(5, loadQueuePeon.getSegmentsToDrop().size());
Assert.assertEquals(0, loadQueuePeon.getTimedOutSegments().size());
for (DataSegment segment : segmentToDrop) {
String dropRequestPath = ZKPaths.makePath(LOAD_QUEUE_PATH, segment.getId().toString());
Assert.assertTrue("Latch not counted down for " + dropRequestSignals.get(segment.getId()), dropRequestSignals.get(segment.getId()).await(10, TimeUnit.SECONDS));
Assert.assertNotNull("Path " + dropRequestPath + " doesn't exist", curator.checkExists().forPath(dropRequestPath));
Assert.assertEquals(segment, ((SegmentChangeRequestDrop) jsonMapper.readValue(curator.getData().decompressed().forPath(dropRequestPath), DataSegmentChangeRequest.class)).getSegment());
// simulate completion of drop request by historical
curator.delete().guaranteed().forPath(dropRequestPath);
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentDroppedSignals.get(segment.getId())));
}
for (DataSegment segment : expectedLoadOrder) {
String loadRequestPath = ZKPaths.makePath(LOAD_QUEUE_PATH, segment.getId().toString());
Assert.assertTrue(timing.forWaiting().awaitLatch(loadRequestSignals.get(segment.getId())));
Assert.assertNotNull(curator.checkExists().forPath(loadRequestPath));
Assert.assertEquals(segment, ((SegmentChangeRequestLoad) jsonMapper.readValue(curator.getData().decompressed().forPath(loadRequestPath), DataSegmentChangeRequest.class)).getSegment());
// simulate completion of load request by historical
curator.delete().guaranteed().forPath(loadRequestPath);
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentLoadedSignals.get(segment.getId())));
}
}
Aggregations