use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class DataSourcesResourceTest method testMarkSegmentsAsUnusedException.
@Test
public void testMarkSegmentsAsUnusedException() {
final DruidDataSource dataSource1 = new DruidDataSource("datasource1", new HashMap<>());
final Set<SegmentId> segmentIds = dataSegmentList.stream().filter(segment -> segment.getDataSource().equals(dataSource1.getName())).map(DataSegment::getId).collect(Collectors.toSet());
EasyMock.expect(inventoryView.getInventory()).andReturn(ImmutableList.of(server)).once();
EasyMock.expect(server.getDataSource("datasource1")).andReturn(dataSource1).once();
EasyMock.expect(segmentsMetadataManager.markSegmentsAsUnused(segmentIds)).andThrow(new RuntimeException("Exception occurred")).once();
EasyMock.replay(segmentsMetadataManager, inventoryView, server);
final DataSourcesResource.MarkDataSourceSegmentsPayload payload = new DataSourcesResource.MarkDataSourceSegmentsPayload(null, segmentIds.stream().map(SegmentId::toString).collect(Collectors.toSet()));
DataSourcesResource dataSourcesResource = new DataSourcesResource(inventoryView, segmentsMetadataManager, null, null, null, null);
Response response = dataSourcesResource.markSegmentsAsUnused("datasource1", payload);
Assert.assertEquals(500, response.getStatus());
Assert.assertNotNull(response.getEntity());
EasyMock.verify(segmentsMetadataManager, inventoryView, server);
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class SystemSchemaTest method verifyRow.
private void verifyRow(Object[] row, String segmentId, long size, long partitionNum, long numReplicas, long numRows, long isPublished, long isAvailable, long isRealtime, long isOvershadowed, CompactionState compactionState) throws Exception {
Assert.assertEquals(segmentId, row[0].toString());
SegmentId id = Iterables.get(SegmentId.iterateAllPossibleParsings(segmentId), 0);
Assert.assertEquals(id.getDataSource(), row[1]);
Assert.assertEquals(id.getIntervalStart().toString(), row[2]);
Assert.assertEquals(id.getIntervalEnd().toString(), row[3]);
Assert.assertEquals(size, row[4]);
Assert.assertEquals(id.getVersion(), row[5]);
Assert.assertEquals(partitionNum, row[6]);
Assert.assertEquals(numReplicas, row[7]);
Assert.assertEquals(numRows, row[8]);
Assert.assertEquals(isPublished, row[9]);
Assert.assertEquals(isAvailable, row[10]);
Assert.assertEquals(isRealtime, row[11]);
Assert.assertEquals(isOvershadowed, row[12]);
if (compactionState == null) {
Assert.assertNull(row[16]);
} else {
Assert.assertEquals(mapper.writeValueAsString(compactionState), row[16]);
}
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class DruidSchemaConcurrencyTest method testDruidSchemaRefreshAndDruidSchemaGetSegmentMetadata.
/**
* This tests the contention between 2 methods of DruidSchema, {@link DruidSchema#refresh} and
* {@link DruidSchema#getSegmentMetadataSnapshot()}. It first triggers refreshing DruidSchema.
* To mimic some heavy work done with {@link DruidSchema#lock}, {@link DruidSchema#buildDruidTable} is overriden
* to sleep before doing real work. While refreshing DruidSchema, getSegmentMetadataSnapshot() is continuously
* called to mimic reading the segments table of SystemSchema. All these calls must return without heavy contention.
*/
@Test(timeout = 30000L)
public void testDruidSchemaRefreshAndDruidSchemaGetSegmentMetadata() throws InterruptedException, ExecutionException, TimeoutException {
schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
DruidTable buildDruidTable(final String dataSource) {
doInLock(() -> {
try {
// Mimic some heavy work done in lock in DruidSchema
Thread.sleep(5000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
});
return super.buildDruidTable(dataSource);
}
};
int numExistingSegments = 100;
int numServers = 19;
CountDownLatch segmentLoadLatch = new CountDownLatch(numExistingSegments);
serverView.registerTimelineCallback(Execs.directExecutor(), new TimelineCallback() {
@Override
public CallbackAction timelineInitialized() {
return CallbackAction.CONTINUE;
}
@Override
public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
segmentLoadLatch.countDown();
return CallbackAction.CONTINUE;
}
@Override
public CallbackAction segmentRemoved(DataSegment segment) {
return CallbackAction.CONTINUE;
}
@Override
public CallbackAction serverSegmentRemoved(DruidServerMetadata server, DataSegment segment) {
return CallbackAction.CONTINUE;
}
});
addSegmentsToCluster(0, numServers, numExistingSegments);
// Wait for all segments to be loaded in BrokerServerView
Assert.assertTrue(segmentLoadLatch.await(5, TimeUnit.SECONDS));
// Trigger refresh of DruidSchema. This will internally run the heavy work mimicked by the overriden buildDruidTable
Future refreshFuture = exec.submit(() -> {
schema.refresh(walker.getSegments().stream().map(DataSegment::getId).collect(Collectors.toSet()), Sets.newHashSet(DATASOURCE));
return null;
});
Assert.assertFalse(refreshFuture.isDone());
for (int i = 0; i < 1000; i++) {
Map<SegmentId, AvailableSegmentMetadata> segmentsMetadata = exec.submit(() -> schema.getSegmentMetadataSnapshot()).get(100, TimeUnit.MILLISECONDS);
Assert.assertFalse(segmentsMetadata.isEmpty());
// We want to call getTimeline while refreshing. Sleep might help with timing.
Thread.sleep(2);
}
refreshFuture.get(10, TimeUnit.SECONDS);
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class DruidSchemaTest method testNullAvailableSegmentMetadata.
@Test
public void testNullAvailableSegmentMetadata() throws IOException {
final Map<SegmentId, AvailableSegmentMetadata> segmentMetadatas = schema.getSegmentMetadataSnapshot();
final List<DataSegment> segments = segmentMetadatas.values().stream().map(AvailableSegmentMetadata::getSegment).collect(Collectors.toList());
Assert.assertEquals(4, segments.size());
// remove one of the segments with datasource "foo"
final DataSegment segmentToRemove = segments.stream().filter(segment -> segment.getDataSource().equals("foo")).findFirst().orElse(null);
Assert.assertNotNull(segmentToRemove);
schema.removeSegment(segmentToRemove);
// The following line can cause NPE without segmentMetadata null check in DruidSchema#refreshSegmentsForDataSource
schema.refreshSegments(segments.stream().map(DataSegment::getId).collect(Collectors.toSet()));
Assert.assertEquals(3, schema.getSegmentMetadataSnapshot().size());
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class DruidSchemaTest method testNullDatasource.
@Test
public void testNullDatasource() throws IOException {
final Map<SegmentId, AvailableSegmentMetadata> segmentMetadatas = schema.getSegmentMetadataSnapshot();
final List<DataSegment> segments = segmentMetadatas.values().stream().map(AvailableSegmentMetadata::getSegment).collect(Collectors.toList());
Assert.assertEquals(4, segments.size());
// segments contains two segments with datasource "foo" and one with datasource "foo2"
// let's remove the only segment with datasource "foo2"
final DataSegment segmentToRemove = segments.stream().filter(segment -> segment.getDataSource().equals("foo2")).findFirst().orElse(null);
Assert.assertNotNull(segmentToRemove);
schema.removeSegment(segmentToRemove);
// The following line can cause NPE without segmentMetadata null check in DruidSchema#refreshSegmentsForDataSource
schema.refreshSegments(segments.stream().map(DataSegment::getId).collect(Collectors.toSet()));
Assert.assertEquals(3, schema.getSegmentMetadataSnapshot().size());
}
Aggregations