use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class DruidSchemaTest method testServerSegmentRemovedCallbackRemoveHistoricalSegment.
@Test
public void testServerSegmentRemovedCallbackRemoveHistoricalSegment() throws InterruptedException {
String datasource = "serverSegmentRemoveTest";
CountDownLatch addSegmentLatch = new CountDownLatch(1);
CountDownLatch removeServerSegmentLatch = new CountDownLatch(1);
DruidSchema schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
protected void addSegment(final DruidServerMetadata server, final DataSegment segment) {
super.addSegment(server, segment);
if (datasource.equals(segment.getDataSource())) {
addSegmentLatch.countDown();
}
}
@Override
void removeServerSegment(final DruidServerMetadata server, final DataSegment segment) {
super.removeServerSegment(server, segment);
if (datasource.equals(segment.getDataSource())) {
removeServerSegmentLatch.countDown();
}
}
};
DataSegment segment = newSegment(datasource, 1);
serverView.addSegment(segment, ServerType.HISTORICAL);
serverView.addSegment(segment, ServerType.BROKER);
Assert.assertTrue(addSegmentLatch.await(1, TimeUnit.SECONDS));
serverView.removeSegment(segment, ServerType.HISTORICAL);
Assert.assertTrue(removeServerSegmentLatch.await(1, TimeUnit.SECONDS));
Assert.assertEquals(5, schema.getTotalSegments());
List<AvailableSegmentMetadata> metadatas = schema.getSegmentMetadataSnapshot().values().stream().filter(metadata -> datasource.equals(metadata.getSegment().getDataSource())).collect(Collectors.toList());
Assert.assertEquals(1, metadatas.size());
AvailableSegmentMetadata metadata = metadatas.get(0);
Assert.assertEquals(0, metadata.isRealtime());
Assert.assertEquals(0, metadata.getNumRows());
// brokers are not counted as replicas yet
Assert.assertEquals(0, metadata.getNumReplicas());
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class BatchDataSegmentAnnouncerTest method setUp.
@Before
public void setUp() throws Exception {
testingCluster = new TestingCluster(1);
testingCluster.start();
cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(false)).build();
cf.start();
cf.blockUntilConnected();
cf.create().creatingParentsIfNeeded().forPath(TEST_BASE_PATH);
jsonMapper = TestHelper.makeJsonMapper();
announcer = new TestAnnouncer(cf, Execs.directExecutor());
announcer.start();
segmentReader = new SegmentReader(cf, jsonMapper);
skipDimensionsAndMetrics = false;
skipLoadSpec = false;
segmentAnnouncer = new BatchDataSegmentAnnouncer(new DruidServerMetadata("id", "host", null, Long.MAX_VALUE, ServerType.HISTORICAL, "tier", 0), new BatchDataSegmentAnnouncerConfig() {
@Override
public int getSegmentsPerNode() {
return 50;
}
@Override
public long getMaxBytesPerNode() {
return maxBytesPerNode.get();
}
@Override
public boolean isSkipDimensionsAndMetrics() {
return skipDimensionsAndMetrics;
}
@Override
public boolean isSkipLoadSpec() {
return skipLoadSpec;
}
}, new ZkPathsConfig() {
@Override
public String getBase() {
return TEST_BASE_PATH;
}
}, announcer, jsonMapper);
testSegments = new HashSet<>();
for (int i = 0; i < 100; i++) {
testSegments.add(makeSegment(i));
}
exec = Execs.multiThreaded(NUM_THREADS, "BatchDataSegmentAnnouncerTest-%d");
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class ServerHolderTest method testEquals.
@Test
public void testEquals() {
final ServerHolder h1 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 100L, ServerType.HISTORICAL, "tier1", 0), 0L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
final ServerHolder h2 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name2", "host1", null, 200L, ServerType.HISTORICAL, "tier1", 0), 100L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
final ServerHolder h3 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host2", null, 200L, ServerType.HISTORICAL, "tier1", 0), 100L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
final ServerHolder h4 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 200L, ServerType.HISTORICAL, "tier2", 0), 100L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
final ServerHolder h5 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 100L, ServerType.REALTIME, "tier1", 0), 0L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
Assert.assertEquals(h1, h2);
Assert.assertNotEquals(h1, h3);
Assert.assertNotEquals(h1, h4);
Assert.assertNotEquals(h1, h5);
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class ServerHolderTest method testIsServingSegment.
@Test
public void testIsServingSegment() {
final ServerHolder h1 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 100L, ServerType.HISTORICAL, "tier1", 0), 0L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
Assert.assertTrue(h1.isServingSegment(SEGMENTS.get(0)));
Assert.assertFalse(h1.isServingSegment(SEGMENTS.get(1)));
Assert.assertTrue(h1.isServingSegment(SEGMENTS.get(0).getId()));
Assert.assertFalse(h1.isServingSegment(SEGMENTS.get(1).getId()));
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class ServerHolderTest method testCompareTo.
@Test
public void testCompareTo() {
// available size of 100
final ServerHolder h1 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 100L, ServerType.HISTORICAL, "tier1", 0), 0L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
// available size of 100
final ServerHolder h2 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 200L, ServerType.HISTORICAL, "tier1", 0), 100L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
// available size of 10
final ServerHolder h3 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 1000L, ServerType.HISTORICAL, "tier1", 0), 990L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
// available size of 50
final ServerHolder h4 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 50L, ServerType.HISTORICAL, "tier1", 0), 0L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
Assert.assertEquals(0, h1.compareTo(h2));
Assert.assertEquals(-1, h3.compareTo(h1));
Assert.assertEquals(-1, h3.compareTo(h4));
}
Aggregations