use of io.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class DiskNormalizedCostBalancerStrategyTest method setupDummyCluster.
/**
* Create Druid cluster with serverCount servers having maxSegments segments each, and 1 server with 98 segment
* Cost Balancer Strategy should assign the next segment to the server with less segments.
*/
public static List<ServerHolder> setupDummyCluster(int serverCount, int maxSegments) {
List<ServerHolder> serverHolderList = Lists.newArrayList();
// Each having having 100 segments
for (int i = 0; i < serverCount; i++) {
LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
Map<String, DataSegment> segments = Maps.newHashMap();
for (int j = 0; j < maxSegments; j++) {
DataSegment segment = getSegment(j);
segments.put(segment.getIdentifier(), segment);
}
serverHolderList.add(new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("DruidServer_Name_" + i, "localhost", 10000000L, "hot", "hot", 1), 3000L, ImmutableMap.of("DUMMY", EasyMock.createMock(ImmutableDruidDataSource.class)), ImmutableMap.copyOf(segments)), fromPeon));
}
// The best server to be available for next segment assignment has greater max Size
LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
ImmutableDruidServer druidServer = EasyMock.createMock(ImmutableDruidServer.class);
EasyMock.expect(druidServer.getName()).andReturn("BEST_SERVER").anyTimes();
EasyMock.expect(druidServer.getCurrSize()).andReturn(3000L).anyTimes();
EasyMock.expect(druidServer.getMaxSize()).andReturn(100000000L).anyTimes();
EasyMock.expect(druidServer.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
Map<String, DataSegment> segments = Maps.newHashMap();
for (int j = 0; j < maxSegments; j++) {
DataSegment segment = getSegment(j);
segments.put(segment.getIdentifier(), segment);
EasyMock.expect(druidServer.getSegment(segment.getIdentifier())).andReturn(segment).anyTimes();
}
EasyMock.expect(druidServer.getSegments()).andReturn(segments).anyTimes();
EasyMock.replay(druidServer);
serverHolderList.add(new ServerHolder(druidServer, fromPeon));
return serverHolderList;
}
use of io.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class DruidSchema method start.
@LifecycleStart
public void start() {
cacheExec.submit(new Runnable() {
@Override
public void run() {
try {
while (!Thread.currentThread().isInterrupted()) {
final Set<String> dataSources = Sets.newHashSet();
try {
synchronized (lock) {
final long nextRefresh = new DateTime(lastRefresh).plus(config.getMetadataRefreshPeriod()).getMillis();
while (!(isServerViewInitialized && !dataSourcesNeedingRefresh.isEmpty() && (refreshImmediately || nextRefresh < System.currentTimeMillis()))) {
lock.wait(Math.max(1, nextRefresh - System.currentTimeMillis()));
}
dataSources.addAll(dataSourcesNeedingRefresh);
dataSourcesNeedingRefresh.clear();
lastRefresh = System.currentTimeMillis();
refreshImmediately = false;
}
// Refresh dataSources.
for (final String dataSource : dataSources) {
log.debug("Refreshing metadata for dataSource[%s].", dataSource);
final long startTime = System.currentTimeMillis();
final DruidTable druidTable = computeTable(dataSource);
if (druidTable == null) {
if (tables.remove(dataSource) != null) {
log.info("Removed dataSource[%s] from the list of active dataSources.", dataSource);
}
} else {
tables.put(dataSource, druidTable);
log.info("Refreshed metadata for dataSource[%s] in %,dms.", dataSource, System.currentTimeMillis() - startTime);
}
}
initializationLatch.countDown();
} catch (InterruptedException e) {
// Fall through.
throw e;
} catch (Exception e) {
log.warn(e, "Metadata refresh failed for dataSources[%s], trying again soon.", Joiner.on(", ").join(dataSources));
synchronized (lock) {
// Add dataSources back to the refresh list.
dataSourcesNeedingRefresh.addAll(dataSources);
lock.notifyAll();
}
}
}
} catch (InterruptedException e) {
// Just exit.
} catch (Throwable e) {
// Throwables that fall out to here (not caught by an inner try/catch) are potentially gnarly, like
// OOMEs. Anyway, let's just emit an alert and stop refreshing metadata.
log.makeAlert(e, "Metadata refresh failed permanently").emit();
throw e;
} finally {
log.info("Metadata refresh stopped.");
}
}
});
serverView.registerSegmentCallback(MoreExecutors.sameThreadExecutor(), new ServerView.SegmentCallback() {
@Override
public ServerView.CallbackAction segmentViewInitialized() {
synchronized (lock) {
isServerViewInitialized = true;
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
@Override
public ServerView.CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
synchronized (lock) {
dataSourcesNeedingRefresh.add(segment.getDataSource());
if (!tables.containsKey(segment.getDataSource())) {
refreshImmediately = true;
}
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
@Override
public ServerView.CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
synchronized (lock) {
dataSourcesNeedingRefresh.add(segment.getDataSource());
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
});
serverView.registerServerCallback(MoreExecutors.sameThreadExecutor(), new ServerView.ServerCallback() {
@Override
public ServerView.CallbackAction serverRemoved(DruidServer server) {
final List<String> dataSourceNames = Lists.newArrayList();
for (DruidDataSource druidDataSource : server.getDataSources()) {
dataSourceNames.add(druidDataSource.getName());
}
synchronized (lock) {
dataSourcesNeedingRefresh.addAll(dataSourceNames);
lock.notifyAll();
}
return ServerView.CallbackAction.CONTINUE;
}
});
}
use of io.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class TestServerInventoryView method registerSegmentCallback.
@Override
public void registerSegmentCallback(Executor exec, final SegmentCallback callback) {
final DruidServerMetadata dummyServer = new DruidServerMetadata("dummy", "dummy", 0, "dummy", "dummy", 0);
for (final DataSegment segment : segments) {
exec.execute(new Runnable() {
@Override
public void run() {
callback.segmentAdded(dummyServer, segment);
}
});
}
exec.execute(new Runnable() {
@Override
public void run() {
callback.segmentViewInitialized();
}
});
}
use of io.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class ServerViewUtil method getTargetLocations.
public static List<LocatedSegmentDescriptor> getTargetLocations(TimelineServerView serverView, DataSource datasource, List<Interval> intervals, int numCandidates) {
TimelineLookup<String, ServerSelector> timeline = serverView.getTimeline(datasource);
if (timeline == null) {
return Collections.emptyList();
}
List<LocatedSegmentDescriptor> located = Lists.newArrayList();
for (Interval interval : intervals) {
for (TimelineObjectHolder<String, ServerSelector> holder : timeline.lookup(interval)) {
for (PartitionChunk<ServerSelector> chunk : holder.getObject()) {
ServerSelector selector = chunk.getObject();
final SegmentDescriptor descriptor = new SegmentDescriptor(holder.getInterval(), holder.getVersion(), chunk.getChunkNumber());
long size = selector.getSegment().getSize();
List<DruidServerMetadata> candidates = selector.getCandidates(numCandidates);
located.add(new LocatedSegmentDescriptor(descriptor, size, candidates));
}
}
}
return located;
}
use of io.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class BatchDataSegmentAnnouncerTest method setUp.
@Before
public void setUp() throws Exception {
testingCluster = new TestingCluster(1);
testingCluster.start();
cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(false)).build();
cf.start();
cf.blockUntilConnected();
cf.create().creatingParentsIfNeeded().forPath(testBasePath);
jsonMapper = new DefaultObjectMapper();
announcer = new Announcer(cf, MoreExecutors.sameThreadExecutor());
announcer.start();
segmentReader = new SegmentReader(cf, jsonMapper);
skipDimensionsAndMetrics = false;
skipLoadSpec = false;
segmentAnnouncer = new BatchDataSegmentAnnouncer(new DruidServerMetadata("id", "host", Long.MAX_VALUE, "type", "tier", 0), new BatchDataSegmentAnnouncerConfig() {
@Override
public int getSegmentsPerNode() {
return 50;
}
@Override
public long getMaxBytesPerNode() {
return maxBytesPerNode.get();
}
@Override
public boolean isSkipDimensionsAndMetrics() {
return skipDimensionsAndMetrics;
}
@Override
public boolean isSkipLoadSpec() {
return skipLoadSpec;
}
}, new ZkPathsConfig() {
@Override
public String getBase() {
return testBasePath;
}
}, announcer, jsonMapper);
segmentAnnouncer.start();
testSegments = Sets.newHashSet();
for (int i = 0; i < 100; i++) {
testSegments.add(makeSegment(i));
}
}
Aggregations