use of org.apache.druid.guice.ServerTypeConfig in project druid by druid-io.
the class SegmentLoadDropHandlerTest method setUp.
@Before
public void setUp() throws IOException {
try {
testStorageLocation = new TestStorageLocation(temporaryFolder);
infoDir = testStorageLocation.getInfoDir();
} catch (IOException e) {
throw new RuntimeException(e);
}
locations = Collections.singletonList(testStorageLocation.toStorageLocationConfig());
scheduledRunnable = new ArrayList<>();
segmentCacheManager = new CacheTestSegmentCacheManager();
segmentLoader = new CacheTestSegmentLoader();
segmentManager = new SegmentManager(segmentLoader);
segmentsAnnouncedByMe = new ConcurrentSkipListSet<>();
announceCount = new AtomicInteger(0);
announcer = new DataSegmentAnnouncer() {
@Override
public void announceSegment(DataSegment segment) {
segmentsAnnouncedByMe.add(segment);
announceCount.incrementAndGet();
}
@Override
public void unannounceSegment(DataSegment segment) {
segmentsAnnouncedByMe.remove(segment);
announceCount.decrementAndGet();
}
@Override
public void announceSegments(Iterable<DataSegment> segments) {
for (DataSegment segment : segments) {
segmentsAnnouncedByMe.add(segment);
}
announceCount.addAndGet(Iterables.size(segments));
}
@Override
public void unannounceSegments(Iterable<DataSegment> segments) {
for (DataSegment segment : segments) {
segmentsAnnouncedByMe.remove(segment);
}
announceCount.addAndGet(-Iterables.size(segments));
}
};
segmentLoaderConfig = new SegmentLoaderConfig() {
@Override
public File getInfoDir() {
return testStorageLocation.getInfoDir();
}
@Override
public int getNumLoadingThreads() {
return 5;
}
@Override
public int getAnnounceIntervalMillis() {
return 50;
}
@Override
public List<StorageLocationConfig> getLocations() {
return locations;
}
@Override
public int getDropSegmentDelayMillis() {
return 0;
}
};
noAnnouncerSegmentLoaderConfig = new SegmentLoaderConfig() {
@Override
public File getInfoDir() {
return testStorageLocation.getInfoDir();
}
@Override
public int getNumLoadingThreads() {
return 5;
}
@Override
public int getAnnounceIntervalMillis() {
return 0;
}
@Override
public List<StorageLocationConfig> getLocations() {
return locations;
}
@Override
public int getDropSegmentDelayMillis() {
return 0;
}
};
segmentLoaderConfigNoLocations = new SegmentLoaderConfig() {
@Override
public int getNumLoadingThreads() {
return 5;
}
@Override
public int getAnnounceIntervalMillis() {
return 50;
}
@Override
public int getDropSegmentDelayMillis() {
return 0;
}
};
scheduledExecutorFactory = new ScheduledExecutorFactory() {
@Override
public ScheduledExecutorService create(int corePoolSize, String nameFormat) {
/*
Override normal behavoir by adding the runnable to a list so that you can make sure
all the shceduled runnables are executed by explicitly calling run() on each item in the list
*/
return new ScheduledThreadPoolExecutor(corePoolSize, Execs.makeThreadFactory(nameFormat)) {
@Override
public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit unit) {
scheduledRunnable.add(command);
return null;
}
};
}
};
segmentLoadDropHandler = new SegmentLoadDropHandler(jsonMapper, segmentLoaderConfig, announcer, Mockito.mock(DataSegmentServerAnnouncer.class), segmentManager, segmentCacheManager, scheduledExecutorFactory.create(5, "SegmentLoadDropHandlerTest-[%d]"), new ServerTypeConfig(ServerType.HISTORICAL));
}
use of org.apache.druid.guice.ServerTypeConfig in project druid by druid-io.
the class SegmentLoadDropHandlerTest method testProcessBatchLoadDropLoadSequenceForSameSegment.
@Test(timeout = 60_000L)
public void testProcessBatchLoadDropLoadSequenceForSameSegment() throws Exception {
final SegmentManager segmentManager = Mockito.mock(SegmentManager.class);
Mockito.when(segmentManager.loadSegment(ArgumentMatchers.any(), ArgumentMatchers.anyBoolean(), ArgumentMatchers.any())).thenReturn(true);
Mockito.doNothing().when(segmentManager).dropSegment(ArgumentMatchers.any());
final SegmentLoadDropHandler segmentLoadDropHandler = new SegmentLoadDropHandler(jsonMapper, noAnnouncerSegmentLoaderConfig, announcer, Mockito.mock(DataSegmentServerAnnouncer.class), segmentManager, segmentCacheManager, scheduledExecutorFactory.create(5, "SegmentLoadDropHandlerTest-[%d]"), new ServerTypeConfig(ServerType.HISTORICAL));
segmentLoadDropHandler.start();
DataSegment segment1 = makeSegment("batchtest1", "1", Intervals.of("P1d/2011-04-01"));
List<DataSegmentChangeRequest> batch = ImmutableList.of(new SegmentChangeRequestLoad(segment1));
// load the segment
ListenableFuture<List<DataSegmentChangeRequestAndStatus>> future = segmentLoadDropHandler.processBatch(batch);
for (Runnable runnable : scheduledRunnable) {
runnable.run();
}
List<DataSegmentChangeRequestAndStatus> result = future.get();
Assert.assertEquals(STATE.SUCCESS, result.get(0).getStatus().getState());
scheduledRunnable.clear();
// drop the segment
batch = ImmutableList.of(new SegmentChangeRequestDrop(segment1));
future = segmentLoadDropHandler.processBatch(batch);
for (Runnable runnable : scheduledRunnable) {
runnable.run();
}
result = future.get();
Assert.assertEquals(STATE.SUCCESS, result.get(0).getStatus().getState());
scheduledRunnable.clear();
// check invocations after a load-drop sequence
Mockito.verify(segmentManager, Mockito.times(1)).loadSegment(ArgumentMatchers.any(), ArgumentMatchers.anyBoolean(), ArgumentMatchers.any());
Mockito.verify(segmentManager, Mockito.times(1)).dropSegment(ArgumentMatchers.any());
// try to reload the segment - this should be a no-op since it might be the case that this is the first load client
// with this request, we'll forget about the success of the load request
batch = ImmutableList.of(new SegmentChangeRequestLoad(segment1));
future = segmentLoadDropHandler.processBatch(batch);
Assert.assertEquals(scheduledRunnable.size(), 0);
result = future.get();
Assert.assertEquals(STATE.SUCCESS, result.get(0).getStatus().getState());
// check invocations - should stay the same
Mockito.verify(segmentManager, Mockito.times(1)).loadSegment(ArgumentMatchers.any(), ArgumentMatchers.anyBoolean(), ArgumentMatchers.any());
Mockito.verify(segmentManager, Mockito.times(1)).dropSegment(ArgumentMatchers.any());
// try to reload the segment - this time the loader will know that is a fresh request to load
// so, the segment manager will be asked to load
batch = ImmutableList.of(new SegmentChangeRequestLoad(segment1));
future = segmentLoadDropHandler.processBatch(batch);
for (Runnable runnable : scheduledRunnable) {
runnable.run();
}
result = future.get();
Assert.assertEquals(STATE.SUCCESS, result.get(0).getStatus().getState());
scheduledRunnable.clear();
// check invocations - the load segment counter should bump up
Mockito.verify(segmentManager, Mockito.times(2)).loadSegment(ArgumentMatchers.any(), ArgumentMatchers.anyBoolean(), ArgumentMatchers.any());
Mockito.verify(segmentManager, Mockito.times(1)).dropSegment(ArgumentMatchers.any());
segmentLoadDropHandler.stop();
}
use of org.apache.druid.guice.ServerTypeConfig in project druid by druid-io.
the class SegmentLoadDropHandlerTest method testStartStop.
@Test
public void testStartStop() throws Exception {
SegmentLoadDropHandler handler = new SegmentLoadDropHandler(jsonMapper, new SegmentLoaderConfig() {
@Override
public File getInfoDir() {
return infoDir;
}
@Override
public int getNumLoadingThreads() {
return 5;
}
@Override
public List<StorageLocationConfig> getLocations() {
return locations;
}
@Override
public int getAnnounceIntervalMillis() {
return 50;
}
}, announcer, Mockito.mock(DataSegmentServerAnnouncer.class), segmentManager, segmentCacheManager, new ServerTypeConfig(ServerType.HISTORICAL));
Set<DataSegment> segments = new HashSet<>();
for (int i = 0; i < COUNT; ++i) {
segments.add(makeSegment("test" + i, "1", Intervals.of("P1d/2011-04-01")));
segments.add(makeSegment("test" + i, "1", Intervals.of("P1d/2011-04-02")));
segments.add(makeSegment("test" + i, "2", Intervals.of("P1d/2011-04-02")));
segments.add(makeSegment("test_two" + i, "1", Intervals.of("P1d/2011-04-01")));
segments.add(makeSegment("test_two" + i, "1", Intervals.of("P1d/2011-04-02")));
}
for (DataSegment segment : segments) {
testStorageLocation.writeSegmentInfoToCache(segment);
}
testStorageLocation.checkInfoCache(segments);
Assert.assertTrue(segmentManager.getDataSourceCounts().isEmpty());
handler.start();
Assert.assertTrue(!segmentManager.getDataSourceCounts().isEmpty());
for (int i = 0; i < COUNT; ++i) {
Assert.assertEquals(3L, segmentManager.getDataSourceCounts().get("test" + i).longValue());
Assert.assertEquals(2L, segmentManager.getDataSourceCounts().get("test_two" + i).longValue());
}
Assert.assertEquals(5 * COUNT, announceCount.get());
handler.stop();
for (DataSegment segment : segments) {
testStorageLocation.deleteSegmentInfoFromCache(segment);
}
Assert.assertEquals(0, infoDir.listFiles().length);
Assert.assertTrue(infoDir.delete());
}
Aggregations