use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class DruidSchemaTest method testAvailableSegmentMetadataNumRows.
/**
* This tests that {@link AvailableSegmentMetadata#getNumRows()} is correct in case
* of multiple replicas i.e. when {@link DruidSchema#addSegment(DruidServerMetadata, DataSegment)}
* is called more than once for same segment
*/
@Test
public void testAvailableSegmentMetadataNumRows() {
Map<SegmentId, AvailableSegmentMetadata> segmentsMetadata = schema.getSegmentMetadataSnapshot();
final List<DataSegment> segments = segmentsMetadata.values().stream().map(AvailableSegmentMetadata::getSegment).collect(Collectors.toList());
Assert.assertEquals(4, segments.size());
// find the only segment with datasource "foo2"
final DataSegment existingSegment = segments.stream().filter(segment -> segment.getDataSource().equals("foo2")).findFirst().orElse(null);
Assert.assertNotNull(existingSegment);
final AvailableSegmentMetadata existingMetadata = segmentsMetadata.get(existingSegment.getId());
// update AvailableSegmentMetadata of existingSegment with numRows=5
AvailableSegmentMetadata updatedMetadata = AvailableSegmentMetadata.from(existingMetadata).withNumRows(5).build();
schema.setAvailableSegmentMetadata(existingSegment.getId(), updatedMetadata);
// find a druidServer holding existingSegment
final Pair<ImmutableDruidServer, DataSegment> pair = druidServers.stream().flatMap(druidServer -> druidServer.iterateAllSegments().stream().filter(segment -> segment.getId().equals(existingSegment.getId())).map(segment -> Pair.of(druidServer, segment))).findAny().orElse(null);
Assert.assertNotNull(pair);
final ImmutableDruidServer server = pair.lhs;
Assert.assertNotNull(server);
final DruidServerMetadata druidServerMetadata = server.getMetadata();
// invoke DruidSchema#addSegment on existingSegment
schema.addSegment(druidServerMetadata, existingSegment);
segmentsMetadata = schema.getSegmentMetadataSnapshot();
// get the only segment with datasource "foo2"
final DataSegment currentSegment = segments.stream().filter(segment -> segment.getDataSource().equals("foo2")).findFirst().orElse(null);
final AvailableSegmentMetadata currentMetadata = segmentsMetadata.get(currentSegment.getId());
Assert.assertEquals(updatedMetadata.getSegment().getId(), currentMetadata.getSegment().getId());
Assert.assertEquals(updatedMetadata.getNumRows(), currentMetadata.getNumRows());
// numreplicas do not change here since we addSegment with the same server which was serving existingSegment before
Assert.assertEquals(updatedMetadata.getNumReplicas(), currentMetadata.getNumReplicas());
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class FireHydrantTest method testGetSegmentForQueryButNotAbleToAcquireReferences.
@Test
public void testGetSegmentForQueryButNotAbleToAcquireReferences() {
ReferenceCountingSegment incrementalSegmentReference = hydrant.getHydrantSegment();
Assert.assertEquals(0, incrementalSegmentReference.getNumReferences());
Optional<Pair<SegmentReference, Closeable>> maybeSegmentAndCloseable = hydrant.getSegmentForQuery(segmentReference -> new SegmentReference() {
@Override
public Optional<Closeable> acquireReferences() {
return Optional.empty();
}
@Override
public SegmentId getId() {
return incrementalIndexSegment.getId();
}
@Override
public Interval getDataInterval() {
return incrementalIndexSegment.getDataInterval();
}
@Nullable
@Override
public QueryableIndex asQueryableIndex() {
return incrementalIndexSegment.asQueryableIndex();
}
@Override
public StorageAdapter asStorageAdapter() {
return incrementalIndexSegment.asStorageAdapter();
}
@Override
public void close() {
incrementalIndexSegment.close();
}
});
Assert.assertFalse(maybeSegmentAndCloseable.isPresent());
Assert.assertEquals(0, incrementalSegmentReference.getNumReferences());
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class BalanceSegmentsTester method moveSegment.
@Override
protected boolean moveSegment(final BalancerSegmentHolder segment, final ImmutableDruidServer toServer, final DruidCoordinatorRuntimeParams params) {
final String toServerName = toServer.getName();
final LoadQueuePeon toPeon = params.getLoadManagementPeons().get(toServerName);
final String fromServerName = segment.getFromServer().getName();
final DataSegment segmentToMove = segment.getSegment();
final SegmentId segmentId = segmentToMove.getId();
if (!toPeon.getSegmentsToLoad().contains(segmentToMove) && (toServer.getSegment(segmentId) == null) && new ServerHolder(toServer, toPeon).getAvailableSize() > segmentToMove.getSize()) {
log.info("Moving [%s] from [%s] to [%s]", segmentId, fromServerName, toServerName);
try {
final LoadQueuePeon loadPeon = params.getLoadManagementPeons().get(toServerName);
loadPeon.loadSegment(segment.getSegment(), () -> {
});
final LoadQueuePeon dropPeon = params.getLoadManagementPeons().get(fromServerName);
dropPeon.markSegmentToDrop(segment.getSegment());
currentlyMovingSegments.get("normal").put(segmentId, segment);
return true;
} catch (Exception e) {
log.info(e, StringUtils.format("[%s] : Moving exception", segmentId));
}
}
return false;
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class CostBalancerStrategyTest method setupDummyCluster.
/**
* Create Druid cluster with serverCount servers having maxSegments segments each, and 1 server with 98 segment
* Cost Balancer Strategy should assign the next segment to the server with less segments.
*/
public static List<ServerHolder> setupDummyCluster(int serverCount, int maxSegments) {
List<ServerHolder> serverHolderList = new ArrayList<>();
// Each having having 100 segments
for (int i = 0; i < serverCount; i++) {
LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
List<DataSegment> segments = IntStream.range(0, maxSegments).mapToObj(j -> getSegment(j)).collect(Collectors.toList());
ImmutableDruidDataSource dataSource = new ImmutableDruidDataSource("DUMMY", Collections.emptyMap(), segments);
String serverName = "DruidServer_Name_" + i;
ServerHolder serverHolder = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata(serverName, "localhost", null, 10000000L, ServerType.HISTORICAL, "hot", 1), 3000L, ImmutableMap.of("DUMMY", dataSource), segments.size()), fromPeon);
serverHolderList.add(serverHolder);
}
// The best server to be available for next segment assignment has only 98 Segments
LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
ImmutableDruidServer druidServer = EasyMock.createMock(ImmutableDruidServer.class);
EasyMock.expect(druidServer.getName()).andReturn("BEST_SERVER").anyTimes();
EasyMock.expect(druidServer.getCurrSize()).andReturn(3000L).anyTimes();
EasyMock.expect(druidServer.getMaxSize()).andReturn(10000000L).anyTimes();
EasyMock.expect(druidServer.getSegment(EasyMock.anyObject())).andReturn(null).anyTimes();
Map<SegmentId, DataSegment> segments = new HashMap<>();
for (int j = 0; j < (maxSegments - 2); j++) {
DataSegment segment = getSegment(j);
segments.put(segment.getId(), segment);
EasyMock.expect(druidServer.getSegment(segment.getId())).andReturn(segment).anyTimes();
}
ImmutableDruidServerTests.expectSegments(druidServer, segments.values());
EasyMock.replay(druidServer);
serverHolderList.add(new ServerHolder(druidServer, fromPeon));
return serverHolderList;
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class HttpLoadQueuePeonTest method testSimple.
@Test(timeout = 60_000L)
public void testSimple() throws Exception {
HttpLoadQueuePeon httpLoadQueuePeon = new HttpLoadQueuePeon("http://dummy:4000", ServerTestHelper.MAPPER, new TestHttpClient(), config, Executors.newScheduledThreadPool(2, Execs.makeThreadFactory("HttpLoadQueuePeonTest-%s")), Execs.singleThreaded("HttpLoadQueuePeonTest"));
httpLoadQueuePeon.start();
Map<SegmentId, CountDownLatch> latches = ImmutableMap.of(segment1.getId(), new CountDownLatch(1), segment2.getId(), new CountDownLatch(1), segment3.getId(), new CountDownLatch(1), segment4.getId(), new CountDownLatch(1));
httpLoadQueuePeon.dropSegment(segment1, () -> latches.get(segment1.getId()).countDown());
httpLoadQueuePeon.loadSegment(segment2, () -> latches.get(segment2.getId()).countDown());
httpLoadQueuePeon.dropSegment(segment3, () -> latches.get(segment3.getId()).countDown());
httpLoadQueuePeon.loadSegment(segment4, () -> latches.get(segment4.getId()).countDown());
latches.get(segment1.getId()).await();
latches.get(segment2.getId()).await();
latches.get(segment3.getId()).await();
latches.get(segment4.getId()).await();
httpLoadQueuePeon.stop();
}
Aggregations