use of io.druid.query.SegmentDescriptor in project druid by druid-io.
the class CoordinatorBasedSegmentHandoffNotifierTest method testHandoffChecksForPartitionNumber.
@Test
public void testHandoffChecksForPartitionNumber() {
Interval interval = new Interval("2011-04-01/2011-04-02");
Assert.assertTrue(CoordinatorBasedSegmentHandoffNotifier.isHandOffComplete(Lists.newArrayList(new ImmutableSegmentLoadInfo(createSegment(interval, "v1", 1), Sets.newHashSet(createHistoricalServerMetadata("a")))), new SegmentDescriptor(interval, "v1", 1)));
Assert.assertFalse(CoordinatorBasedSegmentHandoffNotifier.isHandOffComplete(Lists.newArrayList(new ImmutableSegmentLoadInfo(createSegment(interval, "v1", 1), Sets.newHashSet(createHistoricalServerMetadata("a")))), new SegmentDescriptor(interval, "v1", 2)));
}
use of io.druid.query.SegmentDescriptor in project druid by druid-io.
the class CoordinatorBasedSegmentHandoffNotifierTest method testHandoffCallbackNotCalled.
@Test
public void testHandoffCallbackNotCalled() throws IOException, InterruptedException {
Interval interval = new Interval("2011-04-01/2011-04-02");
SegmentDescriptor descriptor = new SegmentDescriptor(interval, "v1", 2);
DataSegment segment = new DataSegment("test_ds", interval, "v1", null, null, null, new NumberedShardSpec(2, 3), 0, 0);
CoordinatorClient coordinatorClient = EasyMock.createMock(CoordinatorClient.class);
EasyMock.expect(coordinatorClient.fetchServerView("test_ds", interval, true)).andReturn(Lists.newArrayList(new ImmutableSegmentLoadInfo(segment, Sets.newHashSet(createRealtimeServerMetadata("a1"))))).anyTimes();
EasyMock.replay(coordinatorClient);
CoordinatorBasedSegmentHandoffNotifier notifier = new CoordinatorBasedSegmentHandoffNotifier("test_ds", coordinatorClient, notifierConfig);
final AtomicBoolean callbackCalled = new AtomicBoolean(false);
notifier.registerSegmentHandoffCallback(descriptor, MoreExecutors.sameThreadExecutor(), new Runnable() {
@Override
public void run() {
callbackCalled.set(true);
}
});
notifier.checkForSegmentHandoffs();
// callback should have registered
Assert.assertEquals(1, notifier.getHandOffCallbacks().size());
Assert.assertTrue(notifier.getHandOffCallbacks().containsKey(descriptor));
Assert.assertFalse(callbackCalled.get());
EasyMock.verify(coordinatorClient);
}
use of io.druid.query.SegmentDescriptor in project druid by druid-io.
the class CoordinatorBasedSegmentHandoffNotifierTest method testHandoffChecksForVersion.
@Test
public void testHandoffChecksForVersion() {
Interval interval = new Interval("2011-04-01/2011-04-02");
Assert.assertFalse(CoordinatorBasedSegmentHandoffNotifier.isHandOffComplete(Lists.newArrayList(new ImmutableSegmentLoadInfo(createSegment(interval, "v1", 2), Sets.newHashSet(createHistoricalServerMetadata("a")))), new SegmentDescriptor(interval, "v2", 2)));
Assert.assertTrue(CoordinatorBasedSegmentHandoffNotifier.isHandOffComplete(Lists.newArrayList(new ImmutableSegmentLoadInfo(createSegment(interval, "v2", 2), Sets.newHashSet(createHistoricalServerMetadata("a")))), new SegmentDescriptor(interval, "v1", 2)));
Assert.assertTrue(CoordinatorBasedSegmentHandoffNotifier.isHandOffComplete(Lists.newArrayList(new ImmutableSegmentLoadInfo(createSegment(interval, "v1", 2), Sets.newHashSet(createHistoricalServerMetadata("a")))), new SegmentDescriptor(interval, "v1", 2)));
}
use of io.druid.query.SegmentDescriptor in project druid by druid-io.
the class RealtimePlumberSchoolTest method setUp.
@Before
public void setUp() throws Exception {
tmpDir = Files.createTempDir();
ObjectMapper jsonMapper = new DefaultObjectMapper();
schema = new DataSchema("test", jsonMapper.convertValue(new StringInputRowParser(new JSONParseSpec(new TimestampSpec("timestamp", "auto", null), new DimensionsSpec(null, null, null), null, null), null), Map.class), new AggregatorFactory[] { new CountAggregatorFactory("rows") }, new UniformGranularitySpec(Granularities.HOUR, Granularities.NONE, null), jsonMapper);
schema2 = new DataSchema("test", jsonMapper.convertValue(new StringInputRowParser(new JSONParseSpec(new TimestampSpec("timestamp", "auto", null), new DimensionsSpec(null, null, null), null, null), null), Map.class), new AggregatorFactory[] { new CountAggregatorFactory("rows") }, new UniformGranularitySpec(Granularities.YEAR, Granularities.NONE, null), jsonMapper);
announcer = EasyMock.createMock(DataSegmentAnnouncer.class);
announcer.announceSegment(EasyMock.<DataSegment>anyObject());
EasyMock.expectLastCall().anyTimes();
segmentPublisher = EasyMock.createNiceMock(SegmentPublisher.class);
dataSegmentPusher = EasyMock.createNiceMock(DataSegmentPusher.class);
handoffNotifierFactory = EasyMock.createNiceMock(SegmentHandoffNotifierFactory.class);
handoffNotifier = EasyMock.createNiceMock(SegmentHandoffNotifier.class);
EasyMock.expect(handoffNotifierFactory.createSegmentHandoffNotifier(EasyMock.anyString())).andReturn(handoffNotifier).anyTimes();
EasyMock.expect(handoffNotifier.registerSegmentHandoffCallback(EasyMock.<SegmentDescriptor>anyObject(), EasyMock.<Executor>anyObject(), EasyMock.<Runnable>anyObject())).andReturn(true).anyTimes();
emitter = EasyMock.createMock(ServiceEmitter.class);
EasyMock.replay(announcer, segmentPublisher, dataSegmentPusher, handoffNotifierFactory, handoffNotifier, emitter);
tuningConfig = new RealtimeTuningConfig(1, null, null, null, new IntervalStartVersioningPolicy(), rejectionPolicy, null, null, null, buildV9Directly, 0, 0, false, null);
realtimePlumberSchool = new RealtimePlumberSchool(emitter, new DefaultQueryRunnerFactoryConglomerate(Maps.<Class<? extends Query>, QueryRunnerFactory>newHashMap()), dataSegmentPusher, announcer, segmentPublisher, handoffNotifierFactory, MoreExecutors.sameThreadExecutor(), TestHelper.getTestIndexMerger(), TestHelper.getTestIndexMergerV9(), TestHelper.getTestIndexIO(), MapCache.create(0), FireDepartmentTest.NO_CACHE_CONFIG, TestHelper.getObjectMapper());
metrics = new FireDepartmentMetrics();
plumber = (RealtimePlumber) realtimePlumberSchool.findPlumber(schema, tuningConfig, metrics);
}
use of io.druid.query.SegmentDescriptor in project druid by druid-io.
the class KafkaIndexTaskTest method testRunConflictingWithoutTransactions.
@Test(timeout = 60_000L)
public void testRunConflictingWithoutTransactions() throws Exception {
final KafkaIndexTask task1 = createTask(null, new KafkaIOConfig("sequence0", new KafkaPartitions("topic0", ImmutableMap.of(0, 2L)), new KafkaPartitions("topic0", ImmutableMap.of(0, 5L)), kafkaServer.consumerProperties(), false, false, null), null, null);
final KafkaIndexTask task2 = createTask(null, new KafkaIOConfig("sequence1", new KafkaPartitions("topic0", ImmutableMap.of(0, 3L)), new KafkaPartitions("topic0", ImmutableMap.of(0, 8L)), kafkaServer.consumerProperties(), false, false, null), null, null);
// Insert data
try (final KafkaProducer<byte[], byte[]> kafkaProducer = kafkaServer.newProducer()) {
for (ProducerRecord<byte[], byte[]> record : RECORDS) {
kafkaProducer.send(record).get();
}
}
// Run first task
final ListenableFuture<TaskStatus> future1 = runTask(task1);
Assert.assertEquals(TaskStatus.Status.SUCCESS, future1.get().getStatusCode());
// Check published segments & metadata
SegmentDescriptor desc1 = SD(task1, "2010/P1D", 0);
SegmentDescriptor desc2 = SD(task1, "2011/P1D", 0);
Assert.assertEquals(ImmutableSet.of(desc1, desc2), publishedDescriptors());
Assert.assertNull(metadataStorageCoordinator.getDataSourceMetadata(DATA_SCHEMA.getDataSource()));
// Run second task
final ListenableFuture<TaskStatus> future2 = runTask(task2);
Assert.assertEquals(TaskStatus.Status.SUCCESS, future2.get().getStatusCode());
// Check metrics
Assert.assertEquals(3, task1.getFireDepartmentMetrics().processed());
Assert.assertEquals(0, task1.getFireDepartmentMetrics().unparseable());
Assert.assertEquals(0, task1.getFireDepartmentMetrics().thrownAway());
Assert.assertEquals(3, task2.getFireDepartmentMetrics().processed());
Assert.assertEquals(2, task2.getFireDepartmentMetrics().unparseable());
Assert.assertEquals(0, task2.getFireDepartmentMetrics().thrownAway());
// Check published segments & metadata
SegmentDescriptor desc3 = SD(task2, "2011/P1D", 1);
SegmentDescriptor desc4 = SD(task2, "2013/P1D", 0);
Assert.assertEquals(ImmutableSet.of(desc1, desc2, desc3, desc4), publishedDescriptors());
Assert.assertNull(metadataStorageCoordinator.getDataSourceMetadata(DATA_SCHEMA.getDataSource()));
// Check segments in deep storage
Assert.assertEquals(ImmutableList.of("c"), readSegmentDim1(desc1));
Assert.assertEquals(ImmutableList.of("d", "e"), readSegmentDim1(desc2));
Assert.assertEquals(ImmutableList.of("d", "e"), readSegmentDim1(desc3));
Assert.assertEquals(ImmutableList.of("f"), readSegmentDim1(desc4));
}
Aggregations