use of org.apache.druid.client.BrokerInternalQueryConfig in project druid by druid-io.
the class DruidSchemaConcurrencyTest method testDruidSchemaRefreshAndDruidSchemaGetSegmentMetadata.
/**
* This tests the contention between 2 methods of DruidSchema, {@link DruidSchema#refresh} and
* {@link DruidSchema#getSegmentMetadataSnapshot()}. It first triggers refreshing DruidSchema.
* To mimic some heavy work done with {@link DruidSchema#lock}, {@link DruidSchema#buildDruidTable} is overriden
* to sleep before doing real work. While refreshing DruidSchema, getSegmentMetadataSnapshot() is continuously
* called to mimic reading the segments table of SystemSchema. All these calls must return without heavy contention.
*/
@Test(timeout = 30000L)
public void testDruidSchemaRefreshAndDruidSchemaGetSegmentMetadata() throws InterruptedException, ExecutionException, TimeoutException {
schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
DruidTable buildDruidTable(final String dataSource) {
doInLock(() -> {
try {
// Mimic some heavy work done in lock in DruidSchema
Thread.sleep(5000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
});
return super.buildDruidTable(dataSource);
}
};
int numExistingSegments = 100;
int numServers = 19;
CountDownLatch segmentLoadLatch = new CountDownLatch(numExistingSegments);
serverView.registerTimelineCallback(Execs.directExecutor(), new TimelineCallback() {
@Override
public CallbackAction timelineInitialized() {
return CallbackAction.CONTINUE;
}
@Override
public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
segmentLoadLatch.countDown();
return CallbackAction.CONTINUE;
}
@Override
public CallbackAction segmentRemoved(DataSegment segment) {
return CallbackAction.CONTINUE;
}
@Override
public CallbackAction serverSegmentRemoved(DruidServerMetadata server, DataSegment segment) {
return CallbackAction.CONTINUE;
}
});
addSegmentsToCluster(0, numServers, numExistingSegments);
// Wait for all segments to be loaded in BrokerServerView
Assert.assertTrue(segmentLoadLatch.await(5, TimeUnit.SECONDS));
// Trigger refresh of DruidSchema. This will internally run the heavy work mimicked by the overriden buildDruidTable
Future refreshFuture = exec.submit(() -> {
schema.refresh(walker.getSegments().stream().map(DataSegment::getId).collect(Collectors.toSet()), Sets.newHashSet(DATASOURCE));
return null;
});
Assert.assertFalse(refreshFuture.isDone());
for (int i = 0; i < 1000; i++) {
Map<SegmentId, AvailableSegmentMetadata> segmentsMetadata = exec.submit(() -> schema.getSegmentMetadataSnapshot()).get(100, TimeUnit.MILLISECONDS);
Assert.assertFalse(segmentsMetadata.isEmpty());
// We want to call getTimeline while refreshing. Sleep might help with timing.
Thread.sleep(2);
}
refreshFuture.get(10, TimeUnit.SECONDS);
}
use of org.apache.druid.client.BrokerInternalQueryConfig in project druid by druid-io.
the class DruidSchemaTest method testServerSegmentRemovedCallbackRemoveUnknownSegment.
@Test
public void testServerSegmentRemovedCallbackRemoveUnknownSegment() throws InterruptedException {
String datasource = "serverSegmentRemoveTest";
CountDownLatch removeServerSegmentLatch = new CountDownLatch(1);
DruidSchema schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
void removeServerSegment(final DruidServerMetadata server, final DataSegment segment) {
super.removeServerSegment(server, segment);
if (datasource.equals(segment.getDataSource())) {
removeServerSegmentLatch.countDown();
}
}
};
serverView.addSegment(newSegment(datasource, 1), ServerType.BROKER);
serverView.removeSegment(newSegment(datasource, 1), ServerType.HISTORICAL);
Assert.assertTrue(removeServerSegmentLatch.await(1, TimeUnit.SECONDS));
Assert.assertEquals(4, schema.getTotalSegments());
}
use of org.apache.druid.client.BrokerInternalQueryConfig in project druid by druid-io.
the class DruidSchemaTest method testSegmentAddedCallbackAddExistingSegment.
@Test
public void testSegmentAddedCallbackAddExistingSegment() throws InterruptedException {
String datasource = "newSegmentAddTest";
CountDownLatch addSegmentLatch = new CountDownLatch(2);
DruidSchema schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
protected void addSegment(final DruidServerMetadata server, final DataSegment segment) {
super.addSegment(server, segment);
if (datasource.equals(segment.getDataSource())) {
addSegmentLatch.countDown();
}
}
};
DataSegment segment = newSegment(datasource, 1);
serverView.addSegment(segment, ServerType.REALTIME);
serverView.addSegment(segment, ServerType.HISTORICAL);
Assert.assertTrue(addSegmentLatch.await(1, TimeUnit.SECONDS));
Assert.assertEquals(5, schema.getTotalSegments());
List<AvailableSegmentMetadata> metadatas = schema.getSegmentMetadataSnapshot().values().stream().filter(metadata -> datasource.equals(metadata.getSegment().getDataSource())).collect(Collectors.toList());
Assert.assertEquals(1, metadatas.size());
AvailableSegmentMetadata metadata = metadatas.get(0);
// realtime flag is unset when there is any historical
Assert.assertEquals(0, metadata.isRealtime());
Assert.assertEquals(0, metadata.getNumRows());
Assert.assertEquals(2, metadata.getNumReplicas());
Assert.assertTrue(schema.getSegmentsNeedingRefresh().contains(metadata.getSegment().getId()));
Assert.assertFalse(schema.getMutableSegments().contains(metadata.getSegment().getId()));
}
use of org.apache.druid.client.BrokerInternalQueryConfig in project druid by druid-io.
the class DruidSchemaTest method testSegmentRemovedCallbackEmptyDataSourceAfterRemove.
@Test
public void testSegmentRemovedCallbackEmptyDataSourceAfterRemove() throws InterruptedException, IOException {
String datasource = "segmentRemoveTest";
CountDownLatch addSegmentLatch = new CountDownLatch(1);
CountDownLatch removeSegmentLatch = new CountDownLatch(1);
DruidSchema schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
protected void addSegment(final DruidServerMetadata server, final DataSegment segment) {
super.addSegment(server, segment);
if (datasource.equals(segment.getDataSource())) {
addSegmentLatch.countDown();
}
}
@Override
void removeSegment(final DataSegment segment) {
super.removeSegment(segment);
if (datasource.equals(segment.getDataSource())) {
removeSegmentLatch.countDown();
}
}
};
DataSegment segment = newSegment(datasource, 1);
serverView.addSegment(segment, ServerType.REALTIME);
Assert.assertTrue(addSegmentLatch.await(1, TimeUnit.SECONDS));
schema.refresh(Sets.newHashSet(segment.getId()), Sets.newHashSet(datasource));
serverView.removeSegment(segment, ServerType.REALTIME);
Assert.assertTrue(removeSegmentLatch.await(1, TimeUnit.SECONDS));
Assert.assertEquals(4, schema.getTotalSegments());
List<AvailableSegmentMetadata> metadatas = schema.getSegmentMetadataSnapshot().values().stream().filter(metadata -> datasource.equals(metadata.getSegment().getDataSource())).collect(Collectors.toList());
Assert.assertEquals(0, metadatas.size());
Assert.assertFalse(schema.getSegmentsNeedingRefresh().contains(segment.getId()));
Assert.assertFalse(schema.getMutableSegments().contains(segment.getId()));
Assert.assertFalse(schema.getDataSourcesNeedingRebuild().contains(datasource));
Assert.assertFalse(schema.getTableNames().contains(datasource));
}
use of org.apache.druid.client.BrokerInternalQueryConfig in project druid by druid-io.
the class DruidSchemaTest method testServerSegmentRemovedCallbackRemoveHistoricalSegment.
@Test
public void testServerSegmentRemovedCallbackRemoveHistoricalSegment() throws InterruptedException {
String datasource = "serverSegmentRemoveTest";
CountDownLatch addSegmentLatch = new CountDownLatch(1);
CountDownLatch removeServerSegmentLatch = new CountDownLatch(1);
DruidSchema schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
protected void addSegment(final DruidServerMetadata server, final DataSegment segment) {
super.addSegment(server, segment);
if (datasource.equals(segment.getDataSource())) {
addSegmentLatch.countDown();
}
}
@Override
void removeServerSegment(final DruidServerMetadata server, final DataSegment segment) {
super.removeServerSegment(server, segment);
if (datasource.equals(segment.getDataSource())) {
removeServerSegmentLatch.countDown();
}
}
};
DataSegment segment = newSegment(datasource, 1);
serverView.addSegment(segment, ServerType.HISTORICAL);
serverView.addSegment(segment, ServerType.BROKER);
Assert.assertTrue(addSegmentLatch.await(1, TimeUnit.SECONDS));
serverView.removeSegment(segment, ServerType.HISTORICAL);
Assert.assertTrue(removeServerSegmentLatch.await(1, TimeUnit.SECONDS));
Assert.assertEquals(5, schema.getTotalSegments());
List<AvailableSegmentMetadata> metadatas = schema.getSegmentMetadataSnapshot().values().stream().filter(metadata -> datasource.equals(metadata.getSegment().getDataSource())).collect(Collectors.toList());
Assert.assertEquals(1, metadatas.size());
AvailableSegmentMetadata metadata = metadatas.get(0);
Assert.assertEquals(0, metadata.isRealtime());
Assert.assertEquals(0, metadata.getNumRows());
// brokers are not counted as replicas yet
Assert.assertEquals(0, metadata.getNumReplicas());
}
Aggregations