use of org.joda.time.Interval in project druid by druid-io.
the class DirectDruidClientTest method testCancel.
@Test
public void testCancel() throws Exception {
HttpClient httpClient = EasyMock.createStrictMock(HttpClient.class);
Capture<Request> capturedRequest = EasyMock.newCapture();
ListenableFuture<Object> cancelledFuture = Futures.immediateCancelledFuture();
SettableFuture<Object> cancellationFuture = SettableFuture.create();
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())).andReturn(cancelledFuture).once();
EasyMock.expect(httpClient.go(EasyMock.capture(capturedRequest), EasyMock.<HttpResponseHandler>anyObject())).andReturn(cancellationFuture).once();
EasyMock.replay(httpClient);
final ServerSelector serverSelector = new ServerSelector(new DataSegment("test", new Interval("2013-01-01/2013-01-02"), new DateTime("2013-01-01").toString(), Maps.<String, Object>newHashMap(), Lists.<String>newArrayList(), Lists.<String>newArrayList(), NoneShardSpec.instance(), 0, 0L), new HighestPriorityTierSelectorStrategy(new ConnectionCountServerSelectorStrategy()));
DirectDruidClient client1 = new DirectDruidClient(new ReflectionQueryToolChestWarehouse(), QueryRunnerTestHelper.NOOP_QUERYWATCHER, new DefaultObjectMapper(), httpClient, "foo", new NoopServiceEmitter());
QueryableDruidServer queryableDruidServer1 = new QueryableDruidServer(new DruidServer("test1", "localhost", 0, "historical", DruidServer.DEFAULT_TIER, 0), client1);
serverSelector.addServerAndUpdateSegment(queryableDruidServer1, serverSelector.getSegment());
TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build();
HashMap<String, List> context = Maps.newHashMap();
cancellationFuture.set(new StatusResponseHolder(HttpResponseStatus.OK, new StringBuilder("cancelled")));
Sequence results = client1.run(query, context);
Assert.assertEquals(HttpMethod.DELETE, capturedRequest.getValue().getMethod());
Assert.assertEquals(0, client1.getNumOpenConnections());
QueryInterruptedException exception = null;
try {
Sequences.toList(results, Lists.newArrayList());
} catch (QueryInterruptedException e) {
exception = e;
}
Assert.assertNotNull(exception);
EasyMock.verify(httpClient);
}
use of org.joda.time.Interval in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinatorTest method testUnUsedOverlapLow.
@Test
public void testUnUsedOverlapLow() throws IOException {
coordinator.announceHistoricalSegments(SEGMENTS);
unUseSegment();
Assert.assertTrue(coordinator.getUnusedSegmentsForInterval(defaultSegment.getDataSource(), new Interval(defaultSegment.getInterval().getStart().minus(1), defaultSegment.getInterval().getStart().plus(1))).isEmpty());
}
use of org.joda.time.Interval in project druid by druid-io.
the class SQLMetadataRuleManagerTest method testRuleInsert.
@Test
public void testRuleInsert() {
List<Rule> rules = Arrays.<Rule>asList(new IntervalLoadRule(new Interval("2015-01-01/2015-02-01"), ImmutableMap.<String, Integer>of(DruidServer.DEFAULT_TIER, DruidServer.DEFAULT_NUM_REPLICANTS)));
AuditInfo auditInfo = new AuditInfo("test_author", "test_comment", "127.0.0.1");
ruleManager.overrideRule("test_dataSource", rules, auditInfo);
// New rule should be be reflected in the in memory rules map immediately after being set by user
Map<String, List<Rule>> allRules = ruleManager.getAllRules();
Assert.assertEquals(1, allRules.size());
Assert.assertEquals(1, allRules.get("test_dataSource").size());
Assert.assertEquals(rules.get(0), allRules.get("test_dataSource").get(0));
}
use of org.joda.time.Interval in project druid by druid-io.
the class SQLMetadataRuleManagerTest method testAuditEntryCreated.
@Test
public void testAuditEntryCreated() throws Exception {
List<Rule> rules = Arrays.<Rule>asList(new IntervalLoadRule(new Interval("2015-01-01/2015-02-01"), ImmutableMap.<String, Integer>of(DruidServer.DEFAULT_TIER, DruidServer.DEFAULT_NUM_REPLICANTS)));
AuditInfo auditInfo = new AuditInfo("test_author", "test_comment", "127.0.0.1");
ruleManager.overrideRule("test_dataSource", rules, auditInfo);
// fetch rules from metadata storage
ruleManager.poll();
Assert.assertEquals(rules, ruleManager.getRules("test_dataSource"));
// verify audit entry is created
List<AuditEntry> auditEntries = auditManager.fetchAuditHistory("test_dataSource", "rules", null);
Assert.assertEquals(1, auditEntries.size());
AuditEntry entry = auditEntries.get(0);
Assert.assertEquals(rules, mapper.readValue(entry.getPayload(), new TypeReference<List<Rule>>() {
}));
Assert.assertEquals(auditInfo, entry.getAuditInfo());
Assert.assertEquals("test_dataSource", entry.getKey());
}
use of org.joda.time.Interval in project druid by druid-io.
the class SQLMetadataRuleManagerTest method testFetchAuditEntriesForAllDataSources.
@Test
public void testFetchAuditEntriesForAllDataSources() throws Exception {
List<Rule> rules = Arrays.<Rule>asList(new IntervalLoadRule(new Interval("2015-01-01/2015-02-01"), ImmutableMap.<String, Integer>of(DruidServer.DEFAULT_TIER, DruidServer.DEFAULT_NUM_REPLICANTS)));
AuditInfo auditInfo = new AuditInfo("test_author", "test_comment", "127.0.0.1");
ruleManager.overrideRule("test_dataSource", rules, auditInfo);
ruleManager.overrideRule("test_dataSource2", rules, auditInfo);
// fetch rules from metadata storage
ruleManager.poll();
Assert.assertEquals(rules, ruleManager.getRules("test_dataSource"));
Assert.assertEquals(rules, ruleManager.getRules("test_dataSource2"));
// test fetch audit entries
List<AuditEntry> auditEntries = auditManager.fetchAuditHistory("rules", null);
Assert.assertEquals(2, auditEntries.size());
for (AuditEntry entry : auditEntries) {
Assert.assertEquals(rules, mapper.readValue(entry.getPayload(), new TypeReference<List<Rule>>() {
}));
Assert.assertEquals(auditInfo, entry.getAuditInfo());
}
}
Aggregations