use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class ImmutableSegmentLoadInfoTest method testSerde.
@Test
public void testSerde() throws IOException {
ImmutableSegmentLoadInfo segmentLoadInfo = new ImmutableSegmentLoadInfo(new DataSegment("test_ds", Intervals.of("2011-04-01/2011-04-02"), "v1", null, null, null, NoneShardSpec.instance(), 0, 0), Sets.newHashSet(new DruidServerMetadata("a", "host", null, 10, ServerType.HISTORICAL, "tier", 1)));
ImmutableSegmentLoadInfo serde = mapper.readValue(mapper.writeValueAsBytes(segmentLoadInfo), ImmutableSegmentLoadInfo.class);
Assert.assertEquals(segmentLoadInfo, serde);
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class TestServerInventoryView method addSegment.
public void addSegment(DataSegment segment, ServerType serverType) {
final Pair<DruidServerMetadata, List<DataSegment>> whichServerAndSegments = getDummyServerAndSegmentsForType(serverType);
final DruidServerMetadata whichServer = whichServerAndSegments.lhs;
whichServerAndSegments.rhs.add(segment);
segmentCallbackExecs.forEach(execAndCallback -> execAndCallback.lhs.execute(() -> execAndCallback.rhs.segmentAdded(whichServer, segment)));
timelineCallbackExecs.forEach(execAndCallback -> execAndCallback.lhs.execute(() -> execAndCallback.rhs.segmentAdded(whichServer, segment)));
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class TestServerInventoryView method removeSegment.
public void removeSegment(DataSegment segment, ServerType serverType) {
final Pair<DruidServerMetadata, List<DataSegment>> whichServerAndSegments = getDummyServerAndSegmentsForType(serverType);
final DruidServerMetadata whichServer = whichServerAndSegments.lhs;
whichServerAndSegments.rhs.remove(segment);
segmentCallbackExecs.forEach(execAndCallback -> execAndCallback.lhs.execute(() -> execAndCallback.rhs.segmentRemoved(whichServer, segment)));
timelineCallbackExecs.forEach(execAndCallback -> execAndCallback.lhs.execute(() -> {
execAndCallback.rhs.serverSegmentRemoved(whichServer, segment);
// Fire segmentRemoved if all replicas have been removed.
if (!segments.contains(segment) && !brokerSegments.contains(segment) && !realtimeSegments.contains(segment)) {
execAndCallback.rhs.segmentRemoved(segment);
}
}));
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class TestServerInventoryView method getDummyServerAndSegmentsForType.
private Pair<DruidServerMetadata, List<DataSegment>> getDummyServerAndSegmentsForType(ServerType serverType) {
final DruidServerMetadata whichServer;
final List<DataSegment> whichSegments;
switch(serverType) {
case BROKER:
whichServer = DUMMY_BROKER;
whichSegments = brokerSegments;
break;
case REALTIME:
whichServer = DUMMY_SERVER_REALTIME;
whichSegments = realtimeSegments;
break;
default:
whichServer = DUMMY_SERVER;
whichSegments = segments;
break;
}
return new Pair<>(whichServer, whichSegments);
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class DruidSchemaTest method testSegmentRemovedCallbackNonEmptyDataSourceAfterRemove.
@Test
public void testSegmentRemovedCallbackNonEmptyDataSourceAfterRemove() throws InterruptedException, IOException {
String datasource = "segmentRemoveTest";
CountDownLatch addSegmentLatch = new CountDownLatch(2);
CountDownLatch removeSegmentLatch = new CountDownLatch(1);
DruidSchema schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
protected void addSegment(final DruidServerMetadata server, final DataSegment segment) {
super.addSegment(server, segment);
if (datasource.equals(segment.getDataSource())) {
addSegmentLatch.countDown();
}
}
@Override
void removeSegment(final DataSegment segment) {
super.removeSegment(segment);
if (datasource.equals(segment.getDataSource())) {
removeSegmentLatch.countDown();
}
}
};
List<DataSegment> segments = ImmutableList.of(newSegment(datasource, 1), newSegment(datasource, 2));
serverView.addSegment(segments.get(0), ServerType.REALTIME);
serverView.addSegment(segments.get(1), ServerType.HISTORICAL);
Assert.assertTrue(addSegmentLatch.await(1, TimeUnit.SECONDS));
schema.refresh(segments.stream().map(DataSegment::getId).collect(Collectors.toSet()), Sets.newHashSet(datasource));
serverView.removeSegment(segments.get(0), ServerType.REALTIME);
Assert.assertTrue(removeSegmentLatch.await(1, TimeUnit.SECONDS));
Assert.assertEquals(5, schema.getTotalSegments());
List<AvailableSegmentMetadata> metadatas = schema.getSegmentMetadataSnapshot().values().stream().filter(metadata -> datasource.equals(metadata.getSegment().getDataSource())).collect(Collectors.toList());
Assert.assertEquals(1, metadatas.size());
Assert.assertFalse(schema.getSegmentsNeedingRefresh().contains(segments.get(0).getId()));
Assert.assertFalse(schema.getMutableSegments().contains(segments.get(0).getId()));
Assert.assertTrue(schema.getDataSourcesNeedingRebuild().contains(datasource));
Assert.assertTrue(schema.getTableNames().contains(datasource));
}
Aggregations