use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class BatchServerInventoryViewTest method testRunWithFilterCallback.
@Test
public void testRunWithFilterCallback() throws Exception {
final CountDownLatch removeCallbackLatch = new CountDownLatch(1);
segmentAnnouncer.announceSegments(testSegments);
waitForSync(filteredBatchServerInventoryView, testSegments);
DruidServer server = Iterables.get(filteredBatchServerInventoryView.getInventory(), 0);
Set<DataSegment> segments = Sets.newHashSet(server.iterateAllSegments());
Assert.assertEquals(testSegments, segments);
ServerView.SegmentCallback callback = EasyMock.createStrictMock(ServerView.SegmentCallback.class);
Comparator<DataSegment> dataSegmentComparator = Comparator.comparing(DataSegment::getInterval, Comparators.intervalsByStartThenEnd());
EasyMock.expect(callback.segmentAdded(EasyMock.anyObject(), EasyMock.cmp(makeSegment(INITIAL_SEGMENTS + 2), dataSegmentComparator, LogicalOperator.EQUAL))).andReturn(ServerView.CallbackAction.CONTINUE).times(1);
EasyMock.expect(callback.segmentRemoved(EasyMock.anyObject(), EasyMock.cmp(makeSegment(INITIAL_SEGMENTS + 2), dataSegmentComparator, LogicalOperator.EQUAL))).andAnswer(new IAnswer<ServerView.CallbackAction>() {
@Override
public ServerView.CallbackAction answer() {
removeCallbackLatch.countDown();
return ServerView.CallbackAction.CONTINUE;
}
}).times(1);
EasyMock.replay(callback);
filteredBatchServerInventoryView.registerSegmentCallback(Execs.directExecutor(), callback, new Predicate<Pair<DruidServerMetadata, DataSegment>>() {
@Override
public boolean apply(@Nullable Pair<DruidServerMetadata, DataSegment> input) {
return input.rhs.getInterval().getStart().equals(SEGMENT_INTERVAL_START.plusDays(INITIAL_SEGMENTS + 2));
}
});
DataSegment segment2 = makeSegment(INITIAL_SEGMENTS + 2);
segmentAnnouncer.announceSegment(segment2);
testSegments.add(segment2);
DataSegment oldSegment = makeSegment(-1);
segmentAnnouncer.announceSegment(oldSegment);
testSegments.add(oldSegment);
segmentAnnouncer.unannounceSegment(oldSegment);
testSegments.remove(oldSegment);
waitForSync(filteredBatchServerInventoryView, testSegments);
segmentAnnouncer.unannounceSegment(segment2);
testSegments.remove(segment2);
waitForSync(filteredBatchServerInventoryView, testSegments);
TIMING.forWaiting().awaitLatch(removeCallbackLatch);
EasyMock.verify(callback);
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class BatchServerInventoryViewTest method testSameTimeZnode.
@Test
public void testSameTimeZnode() throws Exception {
final int numThreads = INITIAL_SEGMENTS / 10;
final ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(numThreads));
segmentAnnouncer.announceSegments(testSegments);
waitForSync(batchServerInventoryView, testSegments);
DruidServer server = Iterables.get(batchServerInventoryView.getInventory(), 0);
final Set<DataSegment> segments = Sets.newHashSet(server.iterateAllSegments());
Assert.assertEquals(testSegments, segments);
final CountDownLatch latch = new CountDownLatch(numThreads);
final List<ListenableFuture<BatchDataSegmentAnnouncer>> futures = new ArrayList<>();
for (int i = 0; i < numThreads; ++i) {
final int ii = i;
futures.add(executor.submit(new Callable<BatchDataSegmentAnnouncer>() {
@Override
public BatchDataSegmentAnnouncer call() {
BatchDataSegmentAnnouncer segmentAnnouncer = new BatchDataSegmentAnnouncer(new DruidServerMetadata("id", "host", null, Long.MAX_VALUE, ServerType.HISTORICAL, "tier", 0), new BatchDataSegmentAnnouncerConfig() {
@Override
public int getSegmentsPerNode() {
return 50;
}
}, new ZkPathsConfig() {
@Override
public String getBase() {
return TEST_BASE_PATH;
}
}, announcer, jsonMapper);
List<DataSegment> segments = new ArrayList<DataSegment>();
try {
for (int j = 0; j < INITIAL_SEGMENTS / numThreads; ++j) {
segments.add(makeSegment(INITIAL_SEGMENTS + ii + numThreads * j));
}
latch.countDown();
latch.await();
segmentAnnouncer.announceSegments(segments);
testSegments.addAll(segments);
} catch (Exception e) {
throw new RuntimeException(e);
}
return segmentAnnouncer;
}
}));
}
final List<BatchDataSegmentAnnouncer> announcers = Futures.allAsList(futures).get();
Assert.assertEquals(INITIAL_SEGMENTS * 2, testSegments.size());
waitForSync(batchServerInventoryView, testSegments);
Assert.assertEquals(testSegments, Sets.newHashSet(server.iterateAllSegments()));
for (int i = 0; i < INITIAL_SEGMENTS; ++i) {
final DataSegment segment = makeSegment(100 + i);
segmentAnnouncer.unannounceSegment(segment);
testSegments.remove(segment);
}
waitForSync(batchServerInventoryView, testSegments);
Assert.assertEquals(testSegments, Sets.newHashSet(server.iterateAllSegments()));
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class ServersResourceTest method testDruidServerMetadataSerde.
@Test
public void testDruidServerMetadataSerde() throws Exception {
DruidServerMetadata metadata = new DruidServerMetadata("dummy", "host", null, 1234, ServerType.HISTORICAL, "tier", 1);
String metadataJson = objectMapper.writeValueAsString(metadata);
String expected = "{\"name\":\"dummy\",\"host\":\"host\",\"hostAndTlsPort\":null,\"maxSize\":1234,\"type\":\"historical\",\"tier\":\"tier\",\"priority\":1}";
Assert.assertEquals(expected, metadataJson);
DruidServerMetadata deserializedMetadata = objectMapper.readValue(metadataJson, DruidServerMetadata.class);
Assert.assertEquals(metadata, deserializedMetadata);
metadata = new DruidServerMetadata("host:123", "host:123", null, 0, ServerType.HISTORICAL, "t1", 0);
Assert.assertEquals(metadata, objectMapper.readValue("{\"name\":\"host:123\",\"maxSize\":0,\"type\":\"HISTORICAL\",\"tier\":\"t1\",\"priority\":0,\"host\":\"host:123\"}", DruidServerMetadata.class));
metadata = new DruidServerMetadata("host:123", "host:123", "host:214", 0, ServerType.HISTORICAL, "t1", 0);
Assert.assertEquals(metadata, objectMapper.readValue("{\"name\":\"host:123\",\"maxSize\":0,\"type\":\"HISTORICAL\",\"tier\":\"t1\",\"priority\":0,\"host\":\"host:123\",\"hostAndTlsPort\":\"host:214\"}", DruidServerMetadata.class));
Assert.assertEquals(metadata, objectMapper.readValue(objectMapper.writeValueAsString(metadata), DruidServerMetadata.class));
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class StorageNodeModuleTest method testDruidServerMetadataIsInjectedAsSingleton.
@Test
public void testDruidServerMetadataIsInjectedAsSingleton() {
DruidServerMetadata druidServerMetadata = injector.getInstance(DruidServerMetadata.class);
Assert.assertNotNull(druidServerMetadata);
DruidServerMetadata other = injector.getInstance(DruidServerMetadata.class);
Assert.assertSame(druidServerMetadata, other);
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class DruidSchemaConcurrencyTest method testDruidSchemaRefreshAndInventoryViewAddSegmentAndBrokerServerViewGetTimeline.
/**
* This tests the contention between 3 components, DruidSchema, InventoryView, and BrokerServerView.
* It first triggers refreshing DruidSchema. To mimic some heavy work done with {@link DruidSchema#lock},
* {@link DruidSchema#buildDruidTable} is overriden to sleep before doing real work. While refreshing DruidSchema,
* more new segments are added to InventoryView, which triggers updates of BrokerServerView. Finally, while
* BrokerServerView is updated, {@link BrokerServerView#getTimeline} is continuously called to mimic user query
* processing. All these calls must return without heavy contention.
*/
@Test(timeout = 30000L)
public void testDruidSchemaRefreshAndInventoryViewAddSegmentAndBrokerServerViewGetTimeline() throws InterruptedException, ExecutionException, TimeoutException {
schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
DruidTable buildDruidTable(final String dataSource) {
doInLock(() -> {
try {
// Mimic some heavy work done in lock in DruidSchema
Thread.sleep(5000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
});
return super.buildDruidTable(dataSource);
}
};
int numExistingSegments = 100;
int numServers = 19;
CountDownLatch segmentLoadLatch = new CountDownLatch(numExistingSegments);
serverView.registerTimelineCallback(Execs.directExecutor(), new TimelineCallback() {
@Override
public CallbackAction timelineInitialized() {
return CallbackAction.CONTINUE;
}
@Override
public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
segmentLoadLatch.countDown();
return CallbackAction.CONTINUE;
}
@Override
public CallbackAction segmentRemoved(DataSegment segment) {
return CallbackAction.CONTINUE;
}
@Override
public CallbackAction serverSegmentRemoved(DruidServerMetadata server, DataSegment segment) {
return CallbackAction.CONTINUE;
}
});
addSegmentsToCluster(0, numServers, numExistingSegments);
// Wait for all segments to be loaded in BrokerServerView
Assert.assertTrue(segmentLoadLatch.await(5, TimeUnit.SECONDS));
// Trigger refresh of DruidSchema. This will internally run the heavy work mimicked by the overriden buildDruidTable
Future refreshFuture = exec.submit(() -> {
schema.refresh(walker.getSegments().stream().map(DataSegment::getId).collect(Collectors.toSet()), Sets.newHashSet(DATASOURCE));
return null;
});
// Trigger updates of BrokerServerView. This should be done asynchronously.
// add completely new segments
addSegmentsToCluster(numExistingSegments, numServers, 50);
// add replicas of the first 30 segments.
addReplicasToCluster(1, numServers, 30);
// for the first 30 segments, we will still have replicas.
// for the other 20 segments, they will be completely removed from the cluster.
removeSegmentsFromCluster(numServers, 50);
Assert.assertFalse(refreshFuture.isDone());
for (int i = 0; i < 1000; i++) {
boolean hasTimeline = exec.submit(() -> serverView.getTimeline(DataSourceAnalysis.forDataSource(new TableDataSource(DATASOURCE))).isPresent()).get(100, TimeUnit.MILLISECONDS);
Assert.assertTrue(hasTimeline);
// We want to call getTimeline while BrokerServerView is being updated. Sleep might help with timing.
Thread.sleep(2);
}
refreshFuture.get(10, TimeUnit.SECONDS);
}
Aggregations