use of io.druid.client.DruidDataSource in project druid by druid-io.
the class DruidCoordinatorTest method testCoordinatorRun.
@Test(timeout = 60_000L)
public void testCoordinatorRun() throws Exception {
String dataSource = "dataSource1";
String tier = "hot";
// Setup MetadataRuleManager
Rule foreverLoadRule = new ForeverLoadRule(ImmutableMap.of(tier, 2));
EasyMock.expect(metadataRuleManager.getRulesWithDefault(EasyMock.anyString())).andReturn(ImmutableList.of(foreverLoadRule)).atLeastOnce();
metadataRuleManager.stop();
EasyMock.expectLastCall().once();
EasyMock.replay(metadataRuleManager);
// Setup MetadataSegmentManager
DruidDataSource[] druidDataSources = { new DruidDataSource(dataSource, Collections.<String, String>emptyMap()) };
final DataSegment dataSegment = new DataSegment(dataSource, new Interval("2010-01-01/P1D"), "v1", null, null, null, null, 0x9, 0);
druidDataSources[0].addSegment("0", dataSegment);
EasyMock.expect(databaseSegmentManager.isStarted()).andReturn(true).anyTimes();
EasyMock.expect(databaseSegmentManager.getInventory()).andReturn(ImmutableList.of(druidDataSources[0])).atLeastOnce();
EasyMock.replay(databaseSegmentManager);
ImmutableDruidDataSource immutableDruidDataSource = EasyMock.createNiceMock(ImmutableDruidDataSource.class);
EasyMock.expect(immutableDruidDataSource.getSegments()).andReturn(ImmutableSet.of(dataSegment)).atLeastOnce();
EasyMock.replay(immutableDruidDataSource);
// Setup ServerInventoryView
druidServer = new DruidServer("server1", "localhost", 5L, "historical", tier, 0);
loadManagementPeons.put("server1", loadQueuePeon);
EasyMock.expect(serverInventoryView.getInventory()).andReturn(ImmutableList.of(druidServer)).atLeastOnce();
EasyMock.expect(serverInventoryView.isStarted()).andReturn(true).anyTimes();
EasyMock.replay(serverInventoryView);
coordinator.start();
// Wait for this coordinator to become leader
leaderAnnouncerLatch.await();
// This coordinator should be leader by now
Assert.assertTrue(coordinator.isLeader());
Assert.assertEquals(druidNode.getHostAndPort(), coordinator.getCurrentLeader());
final CountDownLatch assignSegmentLatch = new CountDownLatch(1);
pathChildrenCache.getListenable().addListener(new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework curatorFramework, PathChildrenCacheEvent pathChildrenCacheEvent) throws Exception {
if (pathChildrenCacheEvent.getType().equals(PathChildrenCacheEvent.Type.CHILD_ADDED)) {
//Coordinator should try to assign segment to druidServer historical
//Simulate historical loading segment
druidServer.addDataSegment(dataSegment.getIdentifier(), dataSegment);
assignSegmentLatch.countDown();
}
}
});
pathChildrenCache.start();
assignSegmentLatch.await();
Assert.assertEquals(ImmutableMap.of(dataSource, 100.0), coordinator.getLoadStatus());
curator.delete().guaranteed().forPath(ZKPaths.makePath(LOADPATH, dataSegment.getIdentifier()));
// Wait for coordinator thread to run so that replication status is updated
while (coordinator.getSegmentAvailability().snapshot().get(dataSource) != 0) {
Thread.sleep(50);
}
Map segmentAvailability = coordinator.getSegmentAvailability().snapshot();
Assert.assertEquals(1, segmentAvailability.size());
Assert.assertEquals(0L, segmentAvailability.get(dataSource));
while (coordinator.getLoadPendingDatasources().get(dataSource).get() > 0) {
Thread.sleep(50);
}
// wait historical data to be updated
long startMillis = System.currentTimeMillis();
long coordinatorRunPeriodMillis = druidCoordinatorConfig.getCoordinatorPeriod().getMillis();
while (System.currentTimeMillis() - startMillis < coordinatorRunPeriodMillis) {
Thread.sleep(100);
}
Map<String, CountingMap<String>> replicationStatus = coordinator.getReplicationStatus();
Assert.assertNotNull(replicationStatus);
Assert.assertEquals(1, replicationStatus.entrySet().size());
CountingMap<String> dataSourceMap = replicationStatus.get(tier);
Assert.assertNotNull(dataSourceMap);
Assert.assertEquals(1, dataSourceMap.size());
Assert.assertNotNull(dataSourceMap.get(dataSource));
// The load rules asks for 2 replicas, therefore 1 replica should still be pending
while (dataSourceMap.get(dataSource).get() != 1L) {
Thread.sleep(50);
}
coordinator.stop();
leaderUnannouncerLatch.await();
Assert.assertFalse(coordinator.isLeader());
Assert.assertNull(coordinator.getCurrentLeader());
EasyMock.verify(serverInventoryView);
EasyMock.verify(metadataRuleManager);
}
use of io.druid.client.DruidDataSource in project druid by druid-io.
the class DatasourcesResourceTest method setUp.
@Before
public void setUp() {
request = EasyMock.createStrictMock(HttpServletRequest.class);
inventoryView = EasyMock.createStrictMock(CoordinatorServerView.class);
server = EasyMock.createStrictMock(DruidServer.class);
dataSegmentList = new ArrayList<>();
dataSegmentList.add(new DataSegment("datasource1", new Interval("2010-01-01/P1D"), null, null, null, null, null, 0x9, 10));
dataSegmentList.add(new DataSegment("datasource1", new Interval("2010-01-22/P1D"), null, null, null, null, null, 0x9, 20));
dataSegmentList.add(new DataSegment("datasource2", new Interval("2010-01-01/P1D"), null, null, null, null, null, 0x9, 30));
listDataSources = new ArrayList<>();
listDataSources.add(new DruidDataSource("datasource1", new HashMap()).addSegment("part1", dataSegmentList.get(0)));
listDataSources.add(new DruidDataSource("datasource2", new HashMap()).addSegment("part1", dataSegmentList.get(1)));
}
use of io.druid.client.DruidDataSource in project druid by druid-io.
the class DatasourcesResourceTest method testGetFullQueryableDataSources.
@Test
public void testGetFullQueryableDataSources() throws Exception {
EasyMock.expect(server.getDataSources()).andReturn(ImmutableList.of(listDataSources.get(0), listDataSources.get(1))).atLeastOnce();
EasyMock.expect(inventoryView.getInventory()).andReturn(ImmutableList.of(server)).atLeastOnce();
EasyMock.replay(inventoryView, server);
DatasourcesResource datasourcesResource = new DatasourcesResource(inventoryView, null, null, new AuthConfig());
Response response = datasourcesResource.getQueryableDataSources("full", null, request);
Set<DruidDataSource> result = (Set<DruidDataSource>) response.getEntity();
DruidDataSource[] resultantDruidDataSources = new DruidDataSource[result.size()];
result.toArray(resultantDruidDataSources);
Assert.assertEquals(200, response.getStatus());
Assert.assertEquals(2, resultantDruidDataSources.length);
Assert.assertArrayEquals(listDataSources.toArray(), resultantDruidDataSources);
response = datasourcesResource.getQueryableDataSources(null, null, request);
List<String> result1 = (List<String>) response.getEntity();
Assert.assertEquals(200, response.getStatus());
Assert.assertEquals(2, result1.size());
Assert.assertTrue(result1.contains("datasource1"));
Assert.assertTrue(result1.contains("datasource2"));
EasyMock.verify(inventoryView, server);
}
use of io.druid.client.DruidDataSource in project druid by druid-io.
the class DruidSchema method start.
@LifecycleStart
public void start() {
cacheExec.submit(new Runnable() {
@Override
public void run() {
try {
while (!Thread.currentThread().isInterrupted()) {
final Set<String> dataSources = Sets.newHashSet();
try {
synchronized (lock) {
final long nextRefresh = new DateTime(lastRefresh).plus(config.getMetadataRefreshPeriod()).getMillis();
while (!(isServerViewInitialized && !dataSourcesNeedingRefresh.isEmpty() && (refreshImmediately || nextRefresh < System.currentTimeMillis()))) {
lock.wait(Math.max(1, nextRefresh - System.currentTimeMillis()));
}
dataSources.addAll(dataSourcesNeedingRefresh);
dataSourcesNeedingRefresh.clear();
lastRefresh = System.currentTimeMillis();
refreshImmediately = false;
}
// Refresh dataSources.
for (final String dataSource : dataSources) {
log.debug("Refreshing metadata for dataSource[%s].", dataSource);
final long startTime = System.currentTimeMillis();
final DruidTable druidTable = computeTable(dataSource);
if (druidTable == null) {
if (tables.remove(dataSource) != null) {
log.info("Removed dataSource[%s] from the list of active dataSources.", dataSource);
}
} else {
tables.put(dataSource, druidTable);
log.info("Refreshed metadata for dataSource[%s] in %,dms.", dataSource, System.currentTimeMillis() - startTime);
}
}
initializationLatch.countDown();
} catch (InterruptedException e) {
// Fall through.
throw e;
} catch (Exception e) {
log.warn(e, "Metadata refresh failed for dataSources[%s], trying again soon.", Joiner.on(", ").join(dataSources));
synchronized (lock) {
// Add dataSources back to the refresh list.
dataSourcesNeedingRefresh.addAll(dataSources);
lock.notifyAll();
}
}
}
} catch (InterruptedException e) {
// Just exit.
} catch (Throwable e) {
// Throwables that fall out to here (not caught by an inner try/catch) are potentially gnarly, like
// OOMEs. Anyway, let's just emit an alert and stop refreshing metadata.
log.makeAlert(e, "Metadata refresh failed permanently").emit();
throw e;
} finally {
log.info("Metadata refresh stopped.");
}
}
});
serverView.registerSegmentCallback(MoreExecutors.sameThreadExecutor(), new ServerView.SegmentCallback() {
@Override
public ServerView.CallbackAction segmentViewInitialized() {
synchronized (lock) {
isServerViewInitialized = true;
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
@Override
public ServerView.CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
synchronized (lock) {
dataSourcesNeedingRefresh.add(segment.getDataSource());
if (!tables.containsKey(segment.getDataSource())) {
refreshImmediately = true;
}
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
@Override
public ServerView.CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
synchronized (lock) {
dataSourcesNeedingRefresh.add(segment.getDataSource());
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
});
serverView.registerServerCallback(MoreExecutors.sameThreadExecutor(), new ServerView.ServerCallback() {
@Override
public ServerView.CallbackAction serverRemoved(DruidServer server) {
final List<String> dataSourceNames = Lists.newArrayList();
for (DruidDataSource druidDataSource : server.getDataSources()) {
dataSourceNames.add(druidDataSource.getName());
}
synchronized (lock) {
dataSourcesNeedingRefresh.addAll(dataSourceNames);
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
});
}
use of io.druid.client.DruidDataSource in project druid by druid-io.
the class SQLMetadataSegmentManager method removeSegment.
@Override
public boolean removeSegment(String ds, final String segmentID) {
try {
connector.getDBI().withHandle(new HandleCallback<Void>() {
@Override
public Void withHandle(Handle handle) throws Exception {
handle.createStatement(String.format("UPDATE %s SET used=false WHERE id = :segmentID", getSegmentsTable())).bind("segmentID", segmentID).execute();
return null;
}
});
ConcurrentHashMap<String, DruidDataSource> dataSourceMap = dataSources.get();
if (!dataSourceMap.containsKey(ds)) {
log.warn("Cannot find datasource %s", ds);
return false;
}
DruidDataSource dataSource = dataSourceMap.get(ds);
dataSource.removePartition(segmentID);
if (dataSource.isEmpty()) {
dataSourceMap.remove(ds);
}
} catch (Exception e) {
log.error(e, e.toString());
return false;
}
return true;
}
Aggregations