use of org.apache.druid.client.ImmutableDruidDataSource in project druid by druid-io.
the class DruidCoordinatorTest method testMoveSegment.
@Test
public void testMoveSegment() {
final DataSegment segment = EasyMock.createNiceMock(DataSegment.class);
EasyMock.expect(segment.getId()).andReturn(SegmentId.dummy("dummySegment"));
EasyMock.expect(segment.getDataSource()).andReturn("dummyDataSource");
EasyMock.replay(segment);
loadQueuePeon = EasyMock.createNiceMock(LoadQueuePeon.class);
EasyMock.expect(loadQueuePeon.getLoadQueueSize()).andReturn(new Long(1));
loadQueuePeon.markSegmentToDrop(segment);
EasyMock.expectLastCall().once();
Capture<LoadPeonCallback> loadCallbackCapture = Capture.newInstance();
Capture<LoadPeonCallback> dropCallbackCapture = Capture.newInstance();
loadQueuePeon.loadSegment(EasyMock.anyObject(DataSegment.class), EasyMock.capture(loadCallbackCapture));
EasyMock.expectLastCall().once();
loadQueuePeon.dropSegment(EasyMock.anyObject(DataSegment.class), EasyMock.capture(dropCallbackCapture));
EasyMock.expectLastCall().once();
loadQueuePeon.unmarkSegmentToDrop(segment);
EasyMock.expectLastCall().once();
EasyMock.expect(loadQueuePeon.getSegmentsToDrop()).andReturn(new HashSet<>()).once();
EasyMock.replay(loadQueuePeon);
ImmutableDruidDataSource druidDataSource = EasyMock.createNiceMock(ImmutableDruidDataSource.class);
EasyMock.expect(druidDataSource.getSegment(EasyMock.anyObject(SegmentId.class))).andReturn(segment);
EasyMock.replay(druidDataSource);
EasyMock.expect(segmentsMetadataManager.getImmutableDataSourceWithUsedSegments(EasyMock.anyString())).andReturn(druidDataSource);
EasyMock.replay(segmentsMetadataManager);
EasyMock.expect(dataSourcesSnapshot.getDataSource(EasyMock.anyString())).andReturn(druidDataSource).anyTimes();
EasyMock.replay(dataSourcesSnapshot);
scheduledExecutorFactory = EasyMock.createNiceMock(ScheduledExecutorFactory.class);
EasyMock.replay(scheduledExecutorFactory);
EasyMock.replay(metadataRuleManager);
ImmutableDruidDataSource dataSource = EasyMock.createMock(ImmutableDruidDataSource.class);
EasyMock.expect(dataSource.getSegments()).andReturn(Collections.singletonList(segment)).anyTimes();
EasyMock.replay(dataSource);
EasyMock.expect(druidServer.toImmutableDruidServer()).andReturn(new ImmutableDruidServer(new DruidServerMetadata("from", null, null, 5L, ServerType.HISTORICAL, null, 0), 1L, ImmutableMap.of("dummyDataSource", dataSource), 1)).atLeastOnce();
EasyMock.replay(druidServer);
DruidServer druidServer2 = EasyMock.createMock(DruidServer.class);
EasyMock.expect(druidServer2.toImmutableDruidServer()).andReturn(new ImmutableDruidServer(new DruidServerMetadata("to", null, null, 5L, ServerType.HISTORICAL, null, 0), 1L, ImmutableMap.of("dummyDataSource", dataSource), 1)).atLeastOnce();
EasyMock.replay(druidServer2);
loadManagementPeons.put("from", loadQueuePeon);
loadManagementPeons.put("to", loadQueuePeon);
EasyMock.expect(serverInventoryView.isSegmentLoadedByServer("to", segment)).andReturn(true).once();
EasyMock.replay(serverInventoryView);
mockCoordinatorRuntimeParams();
coordinator.moveSegment(coordinatorRuntimeParams, druidServer.toImmutableDruidServer(), druidServer2.toImmutableDruidServer(), segment, null);
LoadPeonCallback loadCallback = loadCallbackCapture.getValue();
loadCallback.execute();
LoadPeonCallback dropCallback = dropCallbackCapture.getValue();
dropCallback.execute();
EasyMock.verify(druidServer, druidServer2, loadQueuePeon, serverInventoryView, metadataRuleManager);
EasyMock.verify(coordinatorRuntimeParams);
}
use of org.apache.druid.client.ImmutableDruidDataSource in project druid by druid-io.
the class CuratorDruidCoordinatorTest method testMoveSegment.
@Test
public void testMoveSegment() throws Exception {
segmentViewInitLatch = new CountDownLatch(1);
segmentAddedLatch = new CountDownLatch(4);
segmentRemovedLatch = new CountDownLatch(0);
CountDownLatch destCountdown = new CountDownLatch(1);
CountDownLatch srcCountdown = new CountDownLatch(1);
setupView();
DruidServer source = new DruidServer("localhost:1", "localhost:1", null, 10000000L, ServerType.HISTORICAL, "default_tier", 0);
DruidServer dest = new DruidServer("localhost:2", "localhost:2", null, 10000000L, ServerType.HISTORICAL, "default_tier", 0);
setupZNodeForServer(source, zkPathsConfig, jsonMapper);
setupZNodeForServer(dest, zkPathsConfig, jsonMapper);
final List<DataSegment> sourceSegments = Lists.transform(ImmutableList.of(Pair.of("2011-04-01/2011-04-03", "v1"), Pair.of("2011-04-03/2011-04-06", "v1"), Pair.of("2011-04-06/2011-04-09", "v1")), input -> dataSegmentWithIntervalAndVersion(input.lhs, input.rhs));
final List<DataSegment> destinationSegments = Lists.transform(ImmutableList.of(Pair.of("2011-03-31/2011-04-01", "v1")), input -> dataSegmentWithIntervalAndVersion(input.lhs, input.rhs));
DataSegment segmentToMove = sourceSegments.get(2);
List<String> sourceSegKeys = new ArrayList<>();
for (DataSegment segment : sourceSegments) {
sourceSegKeys.add(announceBatchSegmentsForServer(source, ImmutableSet.of(segment), zkPathsConfig, jsonMapper));
}
for (DataSegment segment : destinationSegments) {
announceBatchSegmentsForServer(dest, ImmutableSet.of(segment), zkPathsConfig, jsonMapper);
}
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
// these child watchers are used to simulate actions of historicals, announcing a segment on noticing a load queue
// for the destination and unannouncing from source server when noticing a drop request
sourceLoadQueueChildrenCache.getListenable().addListener((CuratorFramework curatorFramework, PathChildrenCacheEvent event) -> {
if (event.getType().equals(PathChildrenCacheEvent.Type.INITIALIZED)) {
srcCountdown.countDown();
} else if (CuratorUtils.isChildAdded(event)) {
// Simulate source server dropping segment
unannounceSegmentFromBatchForServer(source, segmentToMove, sourceSegKeys.get(2), zkPathsConfig);
}
});
destinationLoadQueueChildrenCache.getListenable().addListener((CuratorFramework curatorFramework, PathChildrenCacheEvent event) -> {
if (event.getType().equals(PathChildrenCacheEvent.Type.INITIALIZED)) {
destCountdown.countDown();
} else if (CuratorUtils.isChildAdded(event)) {
// Simulate destination server loading segment
announceBatchSegmentsForServer(dest, ImmutableSet.of(segmentToMove), zkPathsConfig, jsonMapper);
}
});
sourceLoadQueueChildrenCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT);
destinationLoadQueueChildrenCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT);
Assert.assertTrue(timing.forWaiting().awaitLatch(srcCountdown));
Assert.assertTrue(timing.forWaiting().awaitLatch(destCountdown));
loadManagementPeons.put("localhost:1", sourceLoadQueuePeon);
loadManagementPeons.put("localhost:2", destinationLoadQueuePeon);
segmentRemovedLatch = new CountDownLatch(1);
segmentAddedLatch = new CountDownLatch(1);
ImmutableDruidDataSource druidDataSource = EasyMock.createNiceMock(ImmutableDruidDataSource.class);
EasyMock.expect(druidDataSource.getSegment(EasyMock.anyObject(SegmentId.class))).andReturn(sourceSegments.get(2));
EasyMock.replay(druidDataSource);
EasyMock.expect(segmentsMetadataManager.getImmutableDataSourceWithUsedSegments(EasyMock.anyString())).andReturn(druidDataSource);
EasyMock.expect(coordinatorRuntimeParams.getDataSourcesSnapshot()).andReturn(dataSourcesSnapshot).anyTimes();
EasyMock.replay(segmentsMetadataManager, coordinatorRuntimeParams);
EasyMock.expect(dataSourcesSnapshot.getDataSource(EasyMock.anyString())).andReturn(druidDataSource).anyTimes();
EasyMock.replay(dataSourcesSnapshot);
coordinator.moveSegment(coordinatorRuntimeParams, source.toImmutableDruidServer(), dest.toImmutableDruidServer(), sourceSegments.get(2), null);
// wait for destination server to load segment
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
// remove load queue key from destination server to trigger adding drop to load queue
curator.delete().guaranteed().forPath(ZKPaths.makePath(DESTINATION_LOAD_PATH, segmentToMove.getId().toString()));
// wait for drop
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
// clean up drop from load queue
curator.delete().guaranteed().forPath(ZKPaths.makePath(SOURCE_LOAD_PATH, segmentToMove.getId().toString()));
List<DruidServer> servers = new ArrayList<>(serverView.getInventory());
Assert.assertEquals(2, servers.get(0).getTotalSegments());
Assert.assertEquals(2, servers.get(1).getTotalSegments());
}
use of org.apache.druid.client.ImmutableDruidDataSource in project druid by druid-io.
the class DruidCoordinator method moveSegment.
public void moveSegment(DruidCoordinatorRuntimeParams params, ImmutableDruidServer fromServer, ImmutableDruidServer toServer, DataSegment segment, final LoadPeonCallback callback) {
if (segment == null) {
log.makeAlert(new IAE("Can not move null DataSegment"), "Exception moving null segment").emit();
if (callback != null) {
callback.execute();
}
throw new ISE("Cannot move null DataSegment");
}
SegmentId segmentId = segment.getId();
try {
if (fromServer.getMetadata().equals(toServer.getMetadata())) {
throw new IAE("Cannot move [%s] to and from the same server [%s]", segmentId, fromServer.getName());
}
ImmutableDruidDataSource dataSource = params.getDataSourcesSnapshot().getDataSource(segment.getDataSource());
if (dataSource == null) {
throw new IAE("Unable to find dataSource for segment [%s] in metadata", segmentId);
}
// get segment information from SegmentsMetadataManager instead of getting it from fromServer's.
// This is useful when SegmentsMetadataManager and fromServer DataSegment's are different for same
// identifier (say loadSpec differs because of deep storage migration).
final DataSegment segmentToLoad = dataSource.getSegment(segment.getId());
if (segmentToLoad == null) {
throw new IAE("No segment metadata found for segment Id [%s]", segment.getId());
}
final LoadQueuePeon loadPeon = loadManagementPeons.get(toServer.getName());
if (loadPeon == null) {
throw new IAE("LoadQueuePeon hasn't been created yet for path [%s]", toServer.getName());
}
final LoadQueuePeon dropPeon = loadManagementPeons.get(fromServer.getName());
if (dropPeon == null) {
throw new IAE("LoadQueuePeon hasn't been created yet for path [%s]", fromServer.getName());
}
final ServerHolder toHolder = new ServerHolder(toServer, loadPeon);
if (toHolder.getAvailableSize() < segmentToLoad.getSize()) {
throw new IAE("Not enough capacity on server [%s] for segment [%s]. Required: %,d, available: %,d.", toServer.getName(), segmentToLoad, segmentToLoad.getSize(), toHolder.getAvailableSize());
}
final String toLoadQueueSegPath = ZKPaths.makePath(zkPaths.getLoadQueuePath(), toServer.getName(), segmentId.toString());
final LoadPeonCallback loadPeonCallback = () -> {
dropPeon.unmarkSegmentToDrop(segmentToLoad);
if (callback != null) {
callback.execute();
}
};
// mark segment to drop before it is actually loaded on server
// to be able to account this information in DruidBalancerStrategy immediately
dropPeon.markSegmentToDrop(segmentToLoad);
try {
loadPeon.loadSegment(segmentToLoad, () -> {
try {
if (serverInventoryView.isSegmentLoadedByServer(toServer.getName(), segment) && (curator == null || curator.checkExists().forPath(toLoadQueueSegPath) == null) && !dropPeon.getSegmentsToDrop().contains(segment)) {
dropPeon.dropSegment(segment, loadPeonCallback);
} else {
loadPeonCallback.execute();
}
} catch (Exception e) {
throw new RuntimeException(e);
}
});
} catch (Exception e) {
dropPeon.unmarkSegmentToDrop(segmentToLoad);
throw new RuntimeException(e);
}
} catch (Exception e) {
log.makeAlert(e, "Exception moving segment %s", segmentId).emit();
if (callback != null) {
callback.execute();
}
}
}
use of org.apache.druid.client.ImmutableDruidDataSource in project druid by druid-io.
the class BalancerStrategyBenchmark method setup.
@Setup(Level.Trial)
public void setup() {
switch(mode) {
case "50percentOfSegmentsToConsiderPerMove":
percentOfSegmentsToConsider = 50;
useBatchedSegmentSampler = false;
break;
case "useBatchedSegmentSampler":
reservoirSize = maxSegmentsToMove;
useBatchedSegmentSampler = true;
break;
default:
}
List<List<DataSegment>> segmentList = new ArrayList<>(NUMBER_OF_SERVERS);
IntStream.range(0, NUMBER_OF_SERVERS).forEach(i -> segmentList.add(new ArrayList<>()));
for (int i = 0; i < numberOfSegments; i++) {
segmentList.get(RANDOM.nextInt(NUMBER_OF_SERVERS)).add(new DataSegment("test", TEST_SEGMENT_INTERVAL, String.valueOf(i), Collections.emptyMap(), Collections.emptyList(), Collections.emptyList(), null, 0, 10L));
}
for (List<DataSegment> segments : segmentList) {
serverHolders.add(new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("id", "host", null, 10000000L, ServerType.HISTORICAL, "hot", 1), 3000L, ImmutableMap.of("test", new ImmutableDruidDataSource("test", Collections.emptyMap(), segments)), segments.size()), new LoadQueuePeonTester()));
}
}
use of org.apache.druid.client.ImmutableDruidDataSource in project druid by druid-io.
the class DataSourcesSnapshotBenchmark method setUp.
@Setup
public void setUp() {
long start = System.currentTimeMillis();
Map<String, ImmutableDruidDataSource> dataSources = new HashMap<>();
for (int i = 0; i < numDataSources; i++) {
String dataSource = StringUtils.format("ds-%d", i);
List<DataSegment> segments = new ArrayList<>();
for (int j = 0; j < numSegmentPerDataSource; j++) {
segments.add(new DataSegment(dataSource, TEST_SEGMENT_INTERVAL, String.valueOf(j), Collections.emptyMap(), Collections.emptyList(), Collections.emptyList(), NoneShardSpec.instance(), 0, 10L));
}
dataSources.put(dataSource, new ImmutableDruidDataSource(dataSource, Collections.emptyMap(), segments));
}
snapshot = new DataSourcesSnapshot(dataSources);
System.out.println("Setup Time " + (System.currentTimeMillis() - start) + " ms");
}
Aggregations