use of io.druid.client.DruidDataSource in project druid by druid-io.
the class InventoryViewUtils method getSecuredDataSources.
public static Set<DruidDataSource> getSecuredDataSources(InventoryView inventoryView, final AuthorizationInfo authorizationInfo) {
if (authorizationInfo == null) {
throw new ISE("Invalid to call a secured method with null AuthorizationInfo!!");
} else {
final Map<Pair<Resource, Action>, Access> resourceAccessMap = new HashMap<>();
return ImmutableSet.copyOf(Iterables.filter(getDataSources(inventoryView), new Predicate<DruidDataSource>() {
@Override
public boolean apply(DruidDataSource input) {
Resource resource = new Resource(input.getName(), ResourceType.DATASOURCE);
Action action = Action.READ;
Pair<Resource, Action> key = new Pair<>(resource, action);
if (resourceAccessMap.containsKey(key)) {
return resourceAccessMap.get(key).isAllowed();
} else {
Access access = authorizationInfo.isAuthorized(key.lhs, key.rhs);
resourceAccessMap.put(key, access);
return access.isAllowed();
}
}
}));
}
}
use of io.druid.client.DruidDataSource in project druid by druid-io.
the class MetadataResource method getDatabaseSegmentDataSourceSegments.
@GET
@Path("/datasources/{dataSourceName}/segments")
@Produces(MediaType.APPLICATION_JSON)
@ResourceFilters(DatasourceResourceFilter.class)
public Response getDatabaseSegmentDataSourceSegments(@PathParam("dataSourceName") String dataSourceName, @QueryParam("full") String full) {
DruidDataSource dataSource = metadataSegmentManager.getInventoryValue(dataSourceName);
if (dataSource == null) {
return Response.status(Response.Status.NOT_FOUND).build();
}
Response.ResponseBuilder builder = Response.status(Response.Status.OK);
if (full != null) {
return builder.entity(dataSource.getSegments()).build();
}
return builder.entity(Iterables.transform(dataSource.getSegments(), new Function<DataSegment, String>() {
@Override
public String apply(DataSegment segment) {
return segment.getIdentifier();
}
})).build();
}
use of io.druid.client.DruidDataSource in project druid by druid-io.
the class TiersResource method getTierDatasources.
@GET
@Path("/{tierName}")
@Produces(MediaType.APPLICATION_JSON)
public Response getTierDatasources(@PathParam("tierName") String tierName, @QueryParam("simple") String simple) {
if (simple != null) {
Table<String, Interval, Map<String, Object>> retVal = HashBasedTable.create();
for (DruidServer druidServer : serverInventoryView.getInventory()) {
if (druidServer.getTier().equalsIgnoreCase(tierName)) {
for (DataSegment dataSegment : druidServer.getSegments().values()) {
Map<String, Object> properties = retVal.get(dataSegment.getDataSource(), dataSegment.getInterval());
if (properties == null) {
properties = Maps.newHashMap();
retVal.put(dataSegment.getDataSource(), dataSegment.getInterval(), properties);
}
properties.put("size", MapUtils.getLong(properties, "size", 0L) + dataSegment.getSize());
properties.put("count", MapUtils.getInt(properties, "count", 0) + 1);
}
}
}
return Response.ok(retVal.rowMap()).build();
}
Set<String> retVal = Sets.newHashSet();
for (DruidServer druidServer : serverInventoryView.getInventory()) {
if (druidServer.getTier().equalsIgnoreCase(tierName)) {
retVal.addAll(Lists.newArrayList(Iterables.transform(druidServer.getDataSources(), new Function<DruidDataSource, String>() {
@Override
public String apply(DruidDataSource input) {
return input.getName();
}
})));
}
}
return Response.ok(retVal).build();
}
use of io.druid.client.DruidDataSource in project druid by druid-io.
the class DruidCoordinatorTest method testCoordinatorRun.
@Test(timeout = 60_000L)
public void testCoordinatorRun() throws Exception {
String dataSource = "dataSource1";
String tier = "hot";
// Setup MetadataRuleManager
Rule foreverLoadRule = new ForeverLoadRule(ImmutableMap.of(tier, 2));
EasyMock.expect(metadataRuleManager.getRulesWithDefault(EasyMock.anyString())).andReturn(ImmutableList.of(foreverLoadRule)).atLeastOnce();
metadataRuleManager.stop();
EasyMock.expectLastCall().once();
EasyMock.replay(metadataRuleManager);
// Setup MetadataSegmentManager
DruidDataSource[] druidDataSources = { new DruidDataSource(dataSource, Collections.<String, String>emptyMap()) };
final DataSegment dataSegment = new DataSegment(dataSource, new Interval("2010-01-01/P1D"), "v1", null, null, null, null, 0x9, 0);
druidDataSources[0].addSegment("0", dataSegment);
EasyMock.expect(databaseSegmentManager.isStarted()).andReturn(true).anyTimes();
EasyMock.expect(databaseSegmentManager.getInventory()).andReturn(ImmutableList.of(druidDataSources[0])).atLeastOnce();
EasyMock.replay(databaseSegmentManager);
ImmutableDruidDataSource immutableDruidDataSource = EasyMock.createNiceMock(ImmutableDruidDataSource.class);
EasyMock.expect(immutableDruidDataSource.getSegments()).andReturn(ImmutableSet.of(dataSegment)).atLeastOnce();
EasyMock.replay(immutableDruidDataSource);
// Setup ServerInventoryView
druidServer = new DruidServer("server1", "localhost", 5L, "historical", tier, 0);
loadManagementPeons.put("server1", loadQueuePeon);
EasyMock.expect(serverInventoryView.getInventory()).andReturn(ImmutableList.of(druidServer)).atLeastOnce();
EasyMock.expect(serverInventoryView.isStarted()).andReturn(true).anyTimes();
EasyMock.replay(serverInventoryView);
coordinator.start();
// Wait for this coordinator to become leader
leaderAnnouncerLatch.await();
// This coordinator should be leader by now
Assert.assertTrue(coordinator.isLeader());
Assert.assertEquals(druidNode.getHostAndPort(), coordinator.getCurrentLeader());
final CountDownLatch assignSegmentLatch = new CountDownLatch(1);
pathChildrenCache.getListenable().addListener(new PathChildrenCacheListener() {
@Override
public void childEvent(CuratorFramework curatorFramework, PathChildrenCacheEvent pathChildrenCacheEvent) throws Exception {
if (pathChildrenCacheEvent.getType().equals(PathChildrenCacheEvent.Type.CHILD_ADDED)) {
//Coordinator should try to assign segment to druidServer historical
//Simulate historical loading segment
druidServer.addDataSegment(dataSegment.getIdentifier(), dataSegment);
assignSegmentLatch.countDown();
}
}
});
pathChildrenCache.start();
assignSegmentLatch.await();
Assert.assertEquals(ImmutableMap.of(dataSource, 100.0), coordinator.getLoadStatus());
curator.delete().guaranteed().forPath(ZKPaths.makePath(LOADPATH, dataSegment.getIdentifier()));
// Wait for coordinator thread to run so that replication status is updated
while (coordinator.getSegmentAvailability().snapshot().get(dataSource) != 0) {
Thread.sleep(50);
}
Map segmentAvailability = coordinator.getSegmentAvailability().snapshot();
Assert.assertEquals(1, segmentAvailability.size());
Assert.assertEquals(0L, segmentAvailability.get(dataSource));
while (coordinator.getLoadPendingDatasources().get(dataSource).get() > 0) {
Thread.sleep(50);
}
// wait historical data to be updated
long startMillis = System.currentTimeMillis();
long coordinatorRunPeriodMillis = druidCoordinatorConfig.getCoordinatorPeriod().getMillis();
while (System.currentTimeMillis() - startMillis < coordinatorRunPeriodMillis) {
Thread.sleep(100);
}
Map<String, CountingMap<String>> replicationStatus = coordinator.getReplicationStatus();
Assert.assertNotNull(replicationStatus);
Assert.assertEquals(1, replicationStatus.entrySet().size());
CountingMap<String> dataSourceMap = replicationStatus.get(tier);
Assert.assertNotNull(dataSourceMap);
Assert.assertEquals(1, dataSourceMap.size());
Assert.assertNotNull(dataSourceMap.get(dataSource));
// The load rules asks for 2 replicas, therefore 1 replica should still be pending
while (dataSourceMap.get(dataSource).get() != 1L) {
Thread.sleep(50);
}
coordinator.stop();
leaderUnannouncerLatch.await();
Assert.assertFalse(coordinator.isLeader());
Assert.assertNull(coordinator.getCurrentLeader());
EasyMock.verify(serverInventoryView);
EasyMock.verify(metadataRuleManager);
}
use of io.druid.client.DruidDataSource in project druid by druid-io.
the class DatasourcesResourceTest method setUp.
@Before
public void setUp() {
request = EasyMock.createStrictMock(HttpServletRequest.class);
inventoryView = EasyMock.createStrictMock(CoordinatorServerView.class);
server = EasyMock.createStrictMock(DruidServer.class);
dataSegmentList = new ArrayList<>();
dataSegmentList.add(new DataSegment("datasource1", new Interval("2010-01-01/P1D"), null, null, null, null, null, 0x9, 10));
dataSegmentList.add(new DataSegment("datasource1", new Interval("2010-01-22/P1D"), null, null, null, null, null, 0x9, 20));
dataSegmentList.add(new DataSegment("datasource2", new Interval("2010-01-01/P1D"), null, null, null, null, null, 0x9, 30));
listDataSources = new ArrayList<>();
listDataSources.add(new DruidDataSource("datasource1", new HashMap()).addSegment("part1", dataSegmentList.get(0)));
listDataSources.add(new DruidDataSource("datasource2", new HashMap()).addSegment("part1", dataSegmentList.get(1)));
}
Aggregations