use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class HashJoinSegmentTest method setUp.
@Before
public void setUp() throws IOException {
allReferencesAcquireCount = 0;
allReferencesCloseCount = 0;
referencedSegmentAcquireCount = 0;
referencedSegmentClosedCount = 0;
indexedTableJoinableReferenceAcquireCount = 0;
indexedTableJoinableReferenceCloseCount = 0;
j0Closed = false;
j1Closed = false;
baseSegment = new QueryableIndexSegment(JoinTestHelper.createFactIndexBuilder(temporaryFolder.newFolder()).buildMMappedIndex(), SegmentId.dummy("facts"));
List<JoinableClause> joinableClauses = ImmutableList.of(new JoinableClause("j0.", new IndexedTableJoinable(JoinTestHelper.createCountriesIndexedTable()) {
@Override
public Optional<Closeable> acquireReferences() {
if (!j0Closed) {
indexedTableJoinableReferenceAcquireCount++;
Closer closer = Closer.create();
closer.register(() -> indexedTableJoinableReferenceCloseCount++);
return Optional.of(closer);
}
return Optional.empty();
}
}, JoinType.LEFT, JoinConditionAnalysis.forExpression("1", "j0.", ExprMacroTable.nil())), new JoinableClause("j1.", new IndexedTableJoinable(JoinTestHelper.createRegionsIndexedTable()) {
@Override
public Optional<Closeable> acquireReferences() {
if (!j1Closed) {
indexedTableJoinableReferenceAcquireCount++;
Closer closer = Closer.create();
closer.register(() -> indexedTableJoinableReferenceCloseCount++);
return Optional.of(closer);
}
return Optional.empty();
}
}, JoinType.LEFT, JoinConditionAnalysis.forExpression("1", "j1.", ExprMacroTable.nil())));
referencedSegment = ReferenceCountingSegment.wrapRootGenerationSegment(baseSegment);
SegmentReference testWrapper = new SegmentReference() {
@Override
public Optional<Closeable> acquireReferences() {
Closer closer = Closer.create();
return referencedSegment.acquireReferences().map(closeable -> {
referencedSegmentAcquireCount++;
closer.register(closeable);
closer.register(() -> referencedSegmentClosedCount++);
return closer;
});
}
@Override
public SegmentId getId() {
return referencedSegment.getId();
}
@Override
public Interval getDataInterval() {
return referencedSegment.getDataInterval();
}
@Nullable
@Override
public QueryableIndex asQueryableIndex() {
return referencedSegment.asQueryableIndex();
}
@Override
public StorageAdapter asStorageAdapter() {
return referencedSegment.asStorageAdapter();
}
@Override
public void close() {
referencedSegment.close();
}
};
hashJoinSegment = new HashJoinSegment(testWrapper, null, joinableClauses, null) {
@Override
public Optional<Closeable> acquireReferences() {
Closer closer = Closer.create();
return super.acquireReferences().map(closeable -> {
allReferencesAcquireCount++;
closer.register(closeable);
closer.register(() -> allReferencesCloseCount++);
return closer;
});
}
};
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class DruidSchemaTest method testRunSegmentMetadataQueryWithContext.
/**
* Ensure that the BrokerInternalQueryConfig context is honored for this internally generated SegmentMetadata Query
*/
@Test
public void testRunSegmentMetadataQueryWithContext() throws Exception {
Map<String, Object> queryContext = ImmutableMap.of("priority", 5);
String brokerInternalQueryConfigJson = "{\"context\": { \"priority\": 5} }";
TestHelper.makeJsonMapper();
BrokerInternalQueryConfig brokerInternalQueryConfig = MAPPER.readValue(MAPPER.writeValueAsString(MAPPER.readValue(brokerInternalQueryConfigJson, BrokerInternalQueryConfig.class)), BrokerInternalQueryConfig.class);
DataSegment segment = newSegment("test", 0);
List<SegmentId> segmentIterable = ImmutableList.of(segment.getId());
// This is the query that we expect this method to create. We will be testing that it matches the query generated by the method under test.
SegmentMetadataQuery expectedMetadataQuery = new SegmentMetadataQuery(new TableDataSource(segment.getDataSource()), new MultipleSpecificSegmentSpec(segmentIterable.stream().map(SegmentId::toDescriptor).collect(Collectors.toList())), new AllColumnIncluderator(), false, queryContext, EnumSet.noneOf(SegmentMetadataQuery.AnalysisType.class), false, false);
QueryLifecycleFactory factoryMock = EasyMock.createMock(QueryLifecycleFactory.class);
QueryLifecycle lifecycleMock = EasyMock.createMock(QueryLifecycle.class);
// Need to create schema for this test because the available schemas don't mock the QueryLifecycleFactory, which I need for this test.
DruidSchema mySchema = new DruidSchema(factoryMock, serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(globalTableJoinable), ImmutableMap.of(globalTableJoinable.getClass(), GlobalTableDataSource.class)), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), brokerInternalQueryConfig, null);
EasyMock.expect(factoryMock.factorize()).andReturn(lifecycleMock).once();
// This is the mat of the test, making sure that the query created by the method under test matches the expected query, specifically the operator configured context
EasyMock.expect(lifecycleMock.runSimple(expectedMetadataQuery, AllowAllAuthenticator.ALLOW_ALL_RESULT, Access.OK)).andReturn(null);
EasyMock.replay(factoryMock, lifecycleMock);
mySchema.runSegmentMetadataQuery(segmentIterable);
EasyMock.verify(factoryMock, lifecycleMock);
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class DruidSchemaTest method testAvailableSegmentMetadataIsRealtime.
@Test
public void testAvailableSegmentMetadataIsRealtime() {
Map<SegmentId, AvailableSegmentMetadata> segmentsMetadata = schema.getSegmentMetadataSnapshot();
final List<DataSegment> segments = segmentsMetadata.values().stream().map(AvailableSegmentMetadata::getSegment).collect(Collectors.toList());
// find the only realtime segment with datasource "foo3"
final DataSegment existingSegment = segments.stream().filter(segment -> segment.getDataSource().equals("foo3")).findFirst().orElse(null);
Assert.assertNotNull(existingSegment);
final AvailableSegmentMetadata metadata = segmentsMetadata.get(existingSegment.getId());
Assert.assertEquals(1L, metadata.isRealtime());
// get the historical server
final ImmutableDruidServer historicalServer = druidServers.stream().filter(s -> s.getType().equals(ServerType.HISTORICAL)).findAny().orElse(null);
Assert.assertNotNull(historicalServer);
final DruidServerMetadata historicalServerMetadata = historicalServer.getMetadata();
// add existingSegment to historical
schema.addSegment(historicalServerMetadata, existingSegment);
segmentsMetadata = schema.getSegmentMetadataSnapshot();
// get the segment with datasource "foo3"
DataSegment currentSegment = segments.stream().filter(segment -> segment.getDataSource().equals("foo3")).findFirst().orElse(null);
Assert.assertNotNull(currentSegment);
AvailableSegmentMetadata currentMetadata = segmentsMetadata.get(currentSegment.getId());
Assert.assertEquals(0L, currentMetadata.isRealtime());
ImmutableDruidServer realtimeServer = druidServers.stream().filter(s -> s.getType().equals(ServerType.REALTIME)).findAny().orElse(null);
Assert.assertNotNull(realtimeServer);
// drop existingSegment from realtime task
schema.removeServerSegment(realtimeServer.getMetadata(), existingSegment);
segmentsMetadata = schema.getSegmentMetadataSnapshot();
currentSegment = segments.stream().filter(segment -> segment.getDataSource().equals("foo3")).findFirst().orElse(null);
Assert.assertNotNull(currentSegment);
currentMetadata = segmentsMetadata.get(currentSegment.getId());
Assert.assertEquals(0L, currentMetadata.isRealtime());
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class DruidSchemaTest method setUp.
@Before
public void setUp() throws Exception {
final File tmpDir = temporaryFolder.newFolder();
final QueryableIndex index1 = IndexBuilder.create().tmpDir(new File(tmpDir, "1")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("m1", "m1"), new HyperUniquesAggregatorFactory("unique_dim1", "dim1")).withRollup(false).build()).rows(ROWS1).buildMMappedIndex();
final QueryableIndex index2 = IndexBuilder.create().tmpDir(new File(tmpDir, "2")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new LongSumAggregatorFactory("m1", "m1")).withRollup(false).build()).rows(ROWS2).buildMMappedIndex();
walker = new SpecificSegmentsQuerySegmentWalker(conglomerate).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(Intervals.of("2000/P1Y")).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index1).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(Intervals.of("2001/P1Y")).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index2).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE2).interval(index2.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index2);
final DataSegment segment1 = new DataSegment("foo3", Intervals.of("2012/2013"), "version3", null, ImmutableList.of("dim1", "dim2"), ImmutableList.of("met1", "met2"), new NumberedShardSpec(2, 3), null, 1, 100L, PruneSpecsHolder.DEFAULT);
final List<DataSegment> realtimeSegments = ImmutableList.of(segment1);
serverView = new TestServerInventoryView(walker.getSegments(), realtimeSegments);
druidServers = serverView.getDruidServers();
schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(globalTableJoinable), ImmutableMap.of(globalTableJoinable.getClass(), GlobalTableDataSource.class)), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
protected DruidTable buildDruidTable(String dataSource) {
DruidTable table = super.buildDruidTable(dataSource);
buildTableLatch.countDown();
return table;
}
@Override
void markDataSourceAsNeedRebuild(String datasource) {
super.markDataSourceAsNeedRebuild(datasource);
markDataSourceLatch.countDown();
}
};
schema2 = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(globalTableJoinable), ImmutableMap.of(globalTableJoinable.getClass(), GlobalTableDataSource.class)), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
boolean throwException = true;
@Override
protected DruidTable buildDruidTable(String dataSource) {
DruidTable table = super.buildDruidTable(dataSource);
buildTableLatch.countDown();
return table;
}
@Override
protected Set<SegmentId> refreshSegments(final Set<SegmentId> segments) throws IOException {
if (throwException) {
throwException = false;
throw new RuntimeException("Query[xxxx] url[http://xxxx:8083/druid/v2/] timed out.");
} else {
return super.refreshSegments(segments);
}
}
@Override
void markDataSourceAsNeedRebuild(String datasource) {
super.markDataSourceAsNeedRebuild(datasource);
markDataSourceLatch.countDown();
}
};
schema.start();
schema.awaitInitialization();
}
use of org.apache.druid.timeline.SegmentId in project druid by druid-io.
the class LoadQueuePeonTest method testMultipleLoadDropSegments.
@Test
public void testMultipleLoadDropSegments() throws Exception {
loadQueuePeon = new CuratorLoadQueuePeon(curator, LOAD_QUEUE_PATH, jsonMapper, Execs.scheduledSingleThreaded("test_load_queue_peon_scheduled-%d"), Execs.singleThreaded("test_load_queue_peon-%d"), new TestDruidCoordinatorConfig(null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, 10, Duration.millis(0)));
loadQueuePeon.start();
ConcurrentMap<SegmentId, CountDownLatch> loadRequestSignals = new ConcurrentHashMap<>(5);
ConcurrentMap<SegmentId, CountDownLatch> dropRequestSignals = new ConcurrentHashMap<>(5);
ConcurrentMap<SegmentId, CountDownLatch> segmentLoadedSignals = new ConcurrentHashMap<>(5);
ConcurrentMap<SegmentId, CountDownLatch> segmentDroppedSignals = new ConcurrentHashMap<>(5);
final List<DataSegment> segmentToDrop = Lists.transform(ImmutableList.of("2014-10-26T00:00:00Z/P1D", "2014-10-25T00:00:00Z/P1D", "2014-10-24T00:00:00Z/P1D", "2014-10-23T00:00:00Z/P1D", "2014-10-22T00:00:00Z/P1D"), new Function<String, DataSegment>() {
@Override
public DataSegment apply(String intervalStr) {
DataSegment dataSegment = dataSegmentWithInterval(intervalStr);
return dataSegment;
}
});
final CountDownLatch[] dropRequestLatches = new CountDownLatch[5];
final CountDownLatch[] dropSegmentLatches = new CountDownLatch[5];
for (int i = 0; i < 5; i++) {
dropRequestLatches[i] = new CountDownLatch(1);
dropSegmentLatches[i] = new CountDownLatch(1);
}
int i = 0;
for (DataSegment s : segmentToDrop) {
dropRequestSignals.put(s.getId(), dropRequestLatches[i]);
segmentDroppedSignals.put(s.getId(), dropSegmentLatches[i++]);
}
final List<DataSegment> segmentToLoad = Lists.transform(ImmutableList.of("2014-10-27T00:00:00Z/P1D", "2014-10-29T00:00:00Z/P1M", "2014-10-31T00:00:00Z/P1D", "2014-10-30T00:00:00Z/P1D", "2014-10-28T00:00:00Z/P1D"), new Function<String, DataSegment>() {
@Override
public DataSegment apply(String intervalStr) {
DataSegment dataSegment = dataSegmentWithInterval(intervalStr);
loadRequestSignals.put(dataSegment.getId(), new CountDownLatch(1));
segmentLoadedSignals.put(dataSegment.getId(), new CountDownLatch(1));
return dataSegment;
}
});
final CountDownLatch[] loadRequestLatches = new CountDownLatch[5];
final CountDownLatch[] segmentLoadedLatches = new CountDownLatch[5];
for (i = 0; i < 5; i++) {
loadRequestLatches[i] = new CountDownLatch(1);
segmentLoadedLatches[i] = new CountDownLatch(1);
}
i = 0;
for (DataSegment s : segmentToDrop) {
loadRequestSignals.put(s.getId(), loadRequestLatches[i]);
segmentLoadedSignals.put(s.getId(), segmentLoadedLatches[i++]);
}
// segment with latest interval should be loaded first
final List<DataSegment> expectedLoadOrder = Lists.transform(ImmutableList.of("2014-10-29T00:00:00Z/P1M", "2014-10-31T00:00:00Z/P1D", "2014-10-30T00:00:00Z/P1D", "2014-10-28T00:00:00Z/P1D", "2014-10-27T00:00:00Z/P1D"), intervalStr -> dataSegmentWithInterval(intervalStr));
final DataSegmentChangeHandler handler = new DataSegmentChangeHandler() {
@Override
public void addSegment(DataSegment segment, DataSegmentChangeCallback callback) {
loadRequestSignals.get(segment.getId()).countDown();
}
@Override
public void removeSegment(DataSegment segment, DataSegmentChangeCallback callback) {
dropRequestSignals.get(segment.getId()).countDown();
}
};
loadQueueCache.getListenable().addListener((client, event) -> {
if (event.getType() == PathChildrenCacheEvent.Type.CHILD_ADDED) {
DataSegmentChangeRequest request = jsonMapper.readValue(event.getData().getData(), DataSegmentChangeRequest.class);
request.go(handler, null);
}
});
loadQueueCache.start();
for (final DataSegment segment : segmentToDrop) {
loadQueuePeon.dropSegment(segment, () -> segmentDroppedSignals.get(segment.getId()).countDown());
}
for (final DataSegment segment : segmentToLoad) {
loadQueuePeon.loadSegment(segment, () -> segmentLoadedSignals.get(segment.getId()).countDown());
}
Assert.assertEquals(6000, loadQueuePeon.getLoadQueueSize());
Assert.assertEquals(5, loadQueuePeon.getSegmentsToLoad().size());
Assert.assertEquals(5, loadQueuePeon.getSegmentsToDrop().size());
Assert.assertEquals(0, loadQueuePeon.getTimedOutSegments().size());
for (DataSegment segment : segmentToDrop) {
String dropRequestPath = ZKPaths.makePath(LOAD_QUEUE_PATH, segment.getId().toString());
Assert.assertTrue("Latch not counted down for " + dropRequestSignals.get(segment.getId()), dropRequestSignals.get(segment.getId()).await(10, TimeUnit.SECONDS));
Assert.assertNotNull("Path " + dropRequestPath + " doesn't exist", curator.checkExists().forPath(dropRequestPath));
Assert.assertEquals(segment, ((SegmentChangeRequestDrop) jsonMapper.readValue(curator.getData().decompressed().forPath(dropRequestPath), DataSegmentChangeRequest.class)).getSegment());
// simulate completion of drop request by historical
curator.delete().guaranteed().forPath(dropRequestPath);
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentDroppedSignals.get(segment.getId())));
}
for (DataSegment segment : expectedLoadOrder) {
String loadRequestPath = ZKPaths.makePath(LOAD_QUEUE_PATH, segment.getId().toString());
Assert.assertTrue(timing.forWaiting().awaitLatch(loadRequestSignals.get(segment.getId())));
Assert.assertNotNull(curator.checkExists().forPath(loadRequestPath));
Assert.assertEquals(segment, ((SegmentChangeRequestLoad) jsonMapper.readValue(curator.getData().decompressed().forPath(loadRequestPath), DataSegmentChangeRequest.class)).getSegment());
// simulate completion of load request by historical
curator.delete().guaranteed().forPath(loadRequestPath);
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentLoadedSignals.get(segment.getId())));
}
}
Aggregations