use of io.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class BatchServerInventoryViewTest method setUp.
@Before
public void setUp() throws Exception {
testingCluster = new TestingCluster(1);
testingCluster.start();
cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(true)).build();
cf.start();
cf.blockUntilConnected();
cf.create().creatingParentsIfNeeded().forPath(testBasePath);
jsonMapper = new DefaultObjectMapper();
announcer = new Announcer(cf, MoreExecutors.sameThreadExecutor());
announcer.start();
segmentAnnouncer = new BatchDataSegmentAnnouncer(new DruidServerMetadata("id", "host", Long.MAX_VALUE, "type", "tier", 0), new BatchDataSegmentAnnouncerConfig() {
@Override
public int getSegmentsPerNode() {
return 50;
}
}, new ZkPathsConfig() {
@Override
public String getBase() {
return testBasePath;
}
}, announcer, jsonMapper);
segmentAnnouncer.start();
testSegments = Sets.newConcurrentHashSet();
for (int i = 0; i < INITIAL_SEGMENTS; i++) {
testSegments.add(makeSegment(i));
}
batchServerInventoryView = new BatchServerInventoryView(new ZkPathsConfig() {
@Override
public String getBase() {
return testBasePath;
}
}, cf, jsonMapper, Predicates.<Pair<DruidServerMetadata, DataSegment>>alwaysTrue());
batchServerInventoryView.start();
inventoryUpdateCounter.set(0);
filteredBatchServerInventoryView = new BatchServerInventoryView(new ZkPathsConfig() {
@Override
public String getBase() {
return testBasePath;
}
}, cf, jsonMapper, new Predicate<Pair<DruidServerMetadata, DataSegment>>() {
@Override
public boolean apply(@Nullable Pair<DruidServerMetadata, DataSegment> input) {
return input.rhs.getInterval().getStart().isBefore(SEGMENT_INTERVAL_START.plusDays(INITIAL_SEGMENTS));
}
}) {
@Override
protected DruidServer addInnerInventory(DruidServer container, String inventoryKey, Set<DataSegment> inventory) {
DruidServer server = super.addInnerInventory(container, inventoryKey, inventory);
inventoryUpdateCounter.incrementAndGet();
return server;
}
};
filteredBatchServerInventoryView.start();
}
use of io.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class DruidCoordinatorTest method testMoveSegment.
@Test
public void testMoveSegment() throws Exception {
loadQueuePeon = EasyMock.createNiceMock(LoadQueuePeon.class);
EasyMock.expect(loadQueuePeon.getLoadQueueSize()).andReturn(new Long(1));
EasyMock.replay(loadQueuePeon);
segment = EasyMock.createNiceMock(DataSegment.class);
EasyMock.replay(segment);
scheduledExecutorFactory = EasyMock.createNiceMock(ScheduledExecutorFactory.class);
EasyMock.replay(scheduledExecutorFactory);
EasyMock.replay(metadataRuleManager);
EasyMock.expect(druidServer.toImmutableDruidServer()).andReturn(new ImmutableDruidServer(new DruidServerMetadata("from", null, 5L, null, null, 0), 1L, null, ImmutableMap.of("dummySegment", segment))).atLeastOnce();
EasyMock.replay(druidServer);
druidServer2 = EasyMock.createMock(DruidServer.class);
EasyMock.expect(druidServer2.toImmutableDruidServer()).andReturn(new ImmutableDruidServer(new DruidServerMetadata("to", null, 5L, null, null, 0), 1L, null, ImmutableMap.of("dummySegment2", segment))).atLeastOnce();
EasyMock.replay(druidServer2);
loadManagementPeons.put("from", loadQueuePeon);
loadManagementPeons.put("to", loadQueuePeon);
EasyMock.expect(serverInventoryView.getInventoryManagerConfig()).andReturn(new InventoryManagerConfig() {
@Override
public String getContainerPath() {
return "";
}
@Override
public String getInventoryPath() {
return "";
}
});
EasyMock.replay(serverInventoryView);
coordinator.moveSegment(druidServer.toImmutableDruidServer(), druidServer2.toImmutableDruidServer(), "dummySegment", null);
EasyMock.verify(druidServer);
EasyMock.verify(druidServer2);
EasyMock.verify(loadQueuePeon);
EasyMock.verify(serverInventoryView);
EasyMock.verify(metadataRuleManager);
}
use of io.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class CostBalancerStrategyTest method setupDummyCluster.
/**
* Create Druid cluster with serverCount servers having maxSegments segments each, and 1 server with 98 segment
* Cost Balancer Strategy should assign the next segment to the server with less segments.
*/
public static List<ServerHolder> setupDummyCluster(int serverCount, int maxSegments) {
List<ServerHolder> serverHolderList = Lists.newArrayList();
// Each having having 100 segments
for (int i = 0; i < serverCount; i++) {
LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
Map<String, DataSegment> segments = Maps.newHashMap();
for (int j = 0; j < maxSegments; j++) {
DataSegment segment = getSegment(j);
segments.put(segment.getIdentifier(), segment);
}
serverHolderList.add(new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("DruidServer_Name_" + i, "localhost", 10000000L, "hot", "hot", 1), 3000L, ImmutableMap.of("DUMMY", EasyMock.createMock(ImmutableDruidDataSource.class)), ImmutableMap.copyOf(segments)), fromPeon));
}
// The best server to be available for next segment assignment has only 98 Segments
LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
ImmutableDruidServer druidServer = EasyMock.createMock(ImmutableDruidServer.class);
EasyMock.expect(druidServer.getName()).andReturn("BEST_SERVER").anyTimes();
EasyMock.expect(druidServer.getCurrSize()).andReturn(3000L).anyTimes();
EasyMock.expect(druidServer.getMaxSize()).andReturn(10000000L).anyTimes();
EasyMock.expect(druidServer.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
Map<String, DataSegment> segments = Maps.newHashMap();
for (int j = 0; j < (maxSegments - 2); j++) {
DataSegment segment = getSegment(j);
segments.put(segment.getIdentifier(), segment);
EasyMock.expect(druidServer.getSegment(segment.getIdentifier())).andReturn(segment).anyTimes();
}
EasyMock.expect(druidServer.getSegments()).andReturn(segments).anyTimes();
EasyMock.replay(druidServer);
serverHolderList.add(new ServerHolder(druidServer, fromPeon));
return serverHolderList;
}
use of io.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class DiskNormalizedCostBalancerStrategyTest method setupDummyCluster.
/**
* Create Druid cluster with serverCount servers having maxSegments segments each, and 1 server with 98 segment
* Cost Balancer Strategy should assign the next segment to the server with less segments.
*/
public static List<ServerHolder> setupDummyCluster(int serverCount, int maxSegments) {
List<ServerHolder> serverHolderList = Lists.newArrayList();
// Each having having 100 segments
for (int i = 0; i < serverCount; i++) {
LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
Map<String, DataSegment> segments = Maps.newHashMap();
for (int j = 0; j < maxSegments; j++) {
DataSegment segment = getSegment(j);
segments.put(segment.getIdentifier(), segment);
}
serverHolderList.add(new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("DruidServer_Name_" + i, "localhost", 10000000L, "hot", "hot", 1), 3000L, ImmutableMap.of("DUMMY", EasyMock.createMock(ImmutableDruidDataSource.class)), ImmutableMap.copyOf(segments)), fromPeon));
}
// The best server to be available for next segment assignment has greater max Size
LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
ImmutableDruidServer druidServer = EasyMock.createMock(ImmutableDruidServer.class);
EasyMock.expect(druidServer.getName()).andReturn("BEST_SERVER").anyTimes();
EasyMock.expect(druidServer.getCurrSize()).andReturn(3000L).anyTimes();
EasyMock.expect(druidServer.getMaxSize()).andReturn(100000000L).anyTimes();
EasyMock.expect(druidServer.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
Map<String, DataSegment> segments = Maps.newHashMap();
for (int j = 0; j < maxSegments; j++) {
DataSegment segment = getSegment(j);
segments.put(segment.getIdentifier(), segment);
EasyMock.expect(druidServer.getSegment(segment.getIdentifier())).andReturn(segment).anyTimes();
}
EasyMock.expect(druidServer.getSegments()).andReturn(segments).anyTimes();
EasyMock.replay(druidServer);
serverHolderList.add(new ServerHolder(druidServer, fromPeon));
return serverHolderList;
}
use of io.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class DruidSchema method start.
@LifecycleStart
public void start() {
cacheExec.submit(new Runnable() {
@Override
public void run() {
try {
while (!Thread.currentThread().isInterrupted()) {
final Set<String> dataSources = Sets.newHashSet();
try {
synchronized (lock) {
final long nextRefresh = new DateTime(lastRefresh).plus(config.getMetadataRefreshPeriod()).getMillis();
while (!(isServerViewInitialized && !dataSourcesNeedingRefresh.isEmpty() && (refreshImmediately || nextRefresh < System.currentTimeMillis()))) {
lock.wait(Math.max(1, nextRefresh - System.currentTimeMillis()));
}
dataSources.addAll(dataSourcesNeedingRefresh);
dataSourcesNeedingRefresh.clear();
lastRefresh = System.currentTimeMillis();
refreshImmediately = false;
}
// Refresh dataSources.
for (final String dataSource : dataSources) {
log.debug("Refreshing metadata for dataSource[%s].", dataSource);
final long startTime = System.currentTimeMillis();
final DruidTable druidTable = computeTable(dataSource);
if (druidTable == null) {
if (tables.remove(dataSource) != null) {
log.info("Removed dataSource[%s] from the list of active dataSources.", dataSource);
}
} else {
tables.put(dataSource, druidTable);
log.info("Refreshed metadata for dataSource[%s] in %,dms.", dataSource, System.currentTimeMillis() - startTime);
}
}
initializationLatch.countDown();
} catch (InterruptedException e) {
// Fall through.
throw e;
} catch (Exception e) {
log.warn(e, "Metadata refresh failed for dataSources[%s], trying again soon.", Joiner.on(", ").join(dataSources));
synchronized (lock) {
// Add dataSources back to the refresh list.
dataSourcesNeedingRefresh.addAll(dataSources);
lock.notifyAll();
}
}
}
} catch (InterruptedException e) {
// Just exit.
} catch (Throwable e) {
// Throwables that fall out to here (not caught by an inner try/catch) are potentially gnarly, like
// OOMEs. Anyway, let's just emit an alert and stop refreshing metadata.
log.makeAlert(e, "Metadata refresh failed permanently").emit();
throw e;
} finally {
log.info("Metadata refresh stopped.");
}
}
});
serverView.registerSegmentCallback(MoreExecutors.sameThreadExecutor(), new ServerView.SegmentCallback() {
@Override
public ServerView.CallbackAction segmentViewInitialized() {
synchronized (lock) {
isServerViewInitialized = true;
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
@Override
public ServerView.CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
synchronized (lock) {
dataSourcesNeedingRefresh.add(segment.getDataSource());
if (!tables.containsKey(segment.getDataSource())) {
refreshImmediately = true;
}
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
@Override
public ServerView.CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
synchronized (lock) {
dataSourcesNeedingRefresh.add(segment.getDataSource());
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
});
serverView.registerServerCallback(MoreExecutors.sameThreadExecutor(), new ServerView.ServerCallback() {
@Override
public ServerView.CallbackAction serverRemoved(DruidServer server) {
final List<String> dataSourceNames = Lists.newArrayList();
for (DruidDataSource druidDataSource : server.getDataSources()) {
dataSourceNames.add(druidDataSource.getName());
}
synchronized (lock) {
dataSourcesNeedingRefresh.addAll(dataSourceNames);
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
});
}
Aggregations