use of io.druid.timeline.DataSegment in project druid by druid-io.
the class DataSegmentTest method testIdentifierWithZeroPartition.
@Test
public void testIdentifierWithZeroPartition() {
final DataSegment segment = DataSegment.builder().dataSource("foo").interval(new Interval("2012-01-01/2012-01-02")).version(new DateTime("2012-01-01T11:22:33.444Z").toString()).shardSpec(new SingleDimensionShardSpec("bar", null, "abc", 0)).build();
Assert.assertEquals("foo_2012-01-01T00:00:00.000Z_2012-01-02T00:00:00.000Z_2012-01-01T11:22:33.444Z", segment.getIdentifier());
}
use of io.druid.timeline.DataSegment in project druid by druid-io.
the class DataSegmentTest method testV1SerializationNullMetrics.
@Test
public void testV1SerializationNullMetrics() throws Exception {
final DataSegment segment = DataSegment.builder().dataSource("foo").interval(new Interval("2012-01-01/2012-01-02")).version(new DateTime("2012-01-01T11:22:33.444Z").toString()).build();
final DataSegment segment2 = mapper.readValue(mapper.writeValueAsString(segment), DataSegment.class);
Assert.assertEquals("empty dimensions", ImmutableList.of(), segment2.getDimensions());
Assert.assertEquals("empty metrics", ImmutableList.of(), segment2.getMetrics());
}
use of io.druid.timeline.DataSegment in project druid by druid-io.
the class DataSegmentTest method testIdentifier.
@Test
public void testIdentifier() {
final DataSegment segment = DataSegment.builder().dataSource("foo").interval(new Interval("2012-01-01/2012-01-02")).version(new DateTime("2012-01-01T11:22:33.444Z").toString()).shardSpec(NoneShardSpec.instance()).build();
Assert.assertEquals("foo_2012-01-01T00:00:00.000Z_2012-01-02T00:00:00.000Z_2012-01-01T11:22:33.444Z", segment.getIdentifier());
}
use of io.druid.timeline.DataSegment in project druid by druid-io.
the class DirectDruidClientTest method testCancel.
@Test
public void testCancel() throws Exception {
HttpClient httpClient = EasyMock.createStrictMock(HttpClient.class);
Capture<Request> capturedRequest = EasyMock.newCapture();
ListenableFuture<Object> cancelledFuture = Futures.immediateCancelledFuture();
SettableFuture<Object> cancellationFuture = SettableFuture.create();
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())).andReturn(cancelledFuture).once();
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())).andReturn(cancellationFuture).once();
EasyMock.replay(httpClient);
final ServerSelector serverSelector = new ServerSelector(new DataSegment("test", new Interval("2013-01-01/2013-01-02"), new DateTime("2013-01-01").toString(), Maps.<String, Object>newHashMap(), Lists.<String>newArrayList(), Lists.<String>newArrayList(), NoneShardSpec.instance(), 0, 0L), new HighestPriorityTierSelectorStrategy(new ConnectionCountServerSelectorStrategy()));
DirectDruidClient client1 = new DirectDruidClient(new ReflectionQueryToolChestWarehouse(), QueryRunnerTestHelper.NOOP_QUERYWATCHER, new DefaultObjectMapper(), httpClient, "foo", new NoopServiceEmitter());
QueryableDruidServer queryableDruidServer1 = new QueryableDruidServer(new DruidServer("test1", "localhost", 0, "historical", DruidServer.DEFAULT_TIER, 0), client1);
serverSelector.addServerAndUpdateSegment(queryableDruidServer1, serverSelector.getSegment());
TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build();
HashMap<String, List> context = Maps.newHashMap();
cancellationFuture.set(new StatusResponseHolder(HttpResponseStatus.OK, new StringBuilder("cancelled")));
Sequence results = client1.run(query, context);
Assert.assertEquals(HttpMethod.DELETE, capturedRequest.getValue().getMethod());
Assert.assertEquals(0, client1.getNumOpenConnections());
QueryInterruptedException exception = null;
try {
Sequences.toList(results, Lists.newArrayList());
} catch (QueryInterruptedException e) {
exception = e;
}
Assert.assertNotNull(exception);
EasyMock.verify(httpClient);
}
use of io.druid.timeline.DataSegment in project druid by druid-io.
the class BatchServerInventoryViewTest method setUp.
@Before
public void setUp() throws Exception {
testingCluster = new TestingCluster(1);
testingCluster.start();
cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(true)).build();
cf.start();
cf.blockUntilConnected();
cf.create().creatingParentsIfNeeded().forPath(testBasePath);
jsonMapper = new DefaultObjectMapper();
announcer = new Announcer(cf, MoreExecutors.sameThreadExecutor());
announcer.start();
segmentAnnouncer = new BatchDataSegmentAnnouncer(new DruidServerMetadata("id", "host", Long.MAX_VALUE, "type", "tier", 0), new BatchDataSegmentAnnouncerConfig() {
@Override
public int getSegmentsPerNode() {
return 50;
}
}, new ZkPathsConfig() {
@Override
public String getBase() {
return testBasePath;
}
}, announcer, jsonMapper);
segmentAnnouncer.start();
testSegments = Sets.newConcurrentHashSet();
for (int i = 0; i < INITIAL_SEGMENTS; i++) {
testSegments.add(makeSegment(i));
}
batchServerInventoryView = new BatchServerInventoryView(new ZkPathsConfig() {
@Override
public String getBase() {
return testBasePath;
}
}, cf, jsonMapper, Predicates.<Pair<DruidServerMetadata, DataSegment>>alwaysTrue());
batchServerInventoryView.start();
inventoryUpdateCounter.set(0);
filteredBatchServerInventoryView = new BatchServerInventoryView(new ZkPathsConfig() {
@Override
public String getBase() {
return testBasePath;
}
}, cf, jsonMapper, new Predicate<Pair<DruidServerMetadata, DataSegment>>() {
@Override
public boolean apply(@Nullable Pair<DruidServerMetadata, DataSegment> input) {
return input.rhs.getInterval().getStart().isBefore(SEGMENT_INTERVAL_START.plusDays(INITIAL_SEGMENTS));
}
}) {
@Override
protected DruidServer addInnerInventory(DruidServer container, String inventoryKey, Set<DataSegment> inventory) {
DruidServer server = super.addInnerInventory(container, inventoryKey, inventory);
inventoryUpdateCounter.incrementAndGet();
return server;
}
};
filteredBatchServerInventoryView.start();
}
Aggregations