Search in sources :

Example 21 with ImmutableDruidServer

use of org.apache.druid.client.ImmutableDruidServer in project druid by druid-io.

the class ServerHolderTest method testCompareTo.

@Test
public void testCompareTo() {
    // available size of 100
    final ServerHolder h1 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 100L, ServerType.HISTORICAL, "tier1", 0), 0L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
    // available size of 100
    final ServerHolder h2 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 200L, ServerType.HISTORICAL, "tier1", 0), 100L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
    // available size of 10
    final ServerHolder h3 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 1000L, ServerType.HISTORICAL, "tier1", 0), 990L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
    // available size of 50
    final ServerHolder h4 = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("name1", "host1", null, 50L, ServerType.HISTORICAL, "tier1", 0), 0L, ImmutableMap.of("src1", DATA_SOURCES.get("src1")), 1), new LoadQueuePeonTester());
    Assert.assertEquals(0, h1.compareTo(h2));
    Assert.assertEquals(-1, h3.compareTo(h1));
    Assert.assertEquals(-1, h3.compareTo(h4));
}
Also used : DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Test(org.junit.Test)

Example 22 with ImmutableDruidServer

use of org.apache.druid.client.ImmutableDruidServer in project druid by druid-io.

the class CostBalancerStrategyTest method setupDummyCluster.

/**
 * Create Druid cluster with serverCount servers having maxSegments segments each, and 1 server with 98 segment
 * Cost Balancer Strategy should assign the next segment to the server with less segments.
 */
public static List<ServerHolder> setupDummyCluster(int serverCount, int maxSegments) {
    List<ServerHolder> serverHolderList = new ArrayList<>();
    // Each having having 100 segments
    for (int i = 0; i < serverCount; i++) {
        LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
        List<DataSegment> segments = IntStream.range(0, maxSegments).mapToObj(j -> getSegment(j)).collect(Collectors.toList());
        ImmutableDruidDataSource dataSource = new ImmutableDruidDataSource("DUMMY", Collections.emptyMap(), segments);
        String serverName = "DruidServer_Name_" + i;
        ServerHolder serverHolder = new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata(serverName, "localhost", null, 10000000L, ServerType.HISTORICAL, "hot", 1), 3000L, ImmutableMap.of("DUMMY", dataSource), segments.size()), fromPeon);
        serverHolderList.add(serverHolder);
    }
    // The best server to be available for next segment assignment has only 98 Segments
    LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
    ImmutableDruidServer druidServer = EasyMock.createMock(ImmutableDruidServer.class);
    EasyMock.expect(druidServer.getName()).andReturn("BEST_SERVER").anyTimes();
    EasyMock.expect(druidServer.getCurrSize()).andReturn(3000L).anyTimes();
    EasyMock.expect(druidServer.getMaxSize()).andReturn(10000000L).anyTimes();
    EasyMock.expect(druidServer.getSegment(EasyMock.anyObject())).andReturn(null).anyTimes();
    Map<SegmentId, DataSegment> segments = new HashMap<>();
    for (int j = 0; j < (maxSegments - 2); j++) {
        DataSegment segment = getSegment(j);
        segments.put(segment.getId(), segment);
        EasyMock.expect(druidServer.getSegment(segment.getId())).andReturn(segment).anyTimes();
    }
    ImmutableDruidServerTests.expectSegments(druidServer, segments.values());
    EasyMock.replay(druidServer);
    serverHolderList.add(new ServerHolder(druidServer, fromPeon));
    return serverHolderList;
}
Also used : IntStream(java.util.stream.IntStream) MoreExecutors(com.google.common.util.concurrent.MoreExecutors) Intervals(org.apache.druid.java.util.common.Intervals) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Interval(org.joda.time.Interval) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) Map(java.util.Map) ServerType(org.apache.druid.server.coordination.ServerType) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) DateTimes(org.apache.druid.java.util.common.DateTimes) ImmutableDruidServerTests(org.apache.druid.client.ImmutableDruidServerTests) ImmutableMap(com.google.common.collect.ImmutableMap) DateTime(org.joda.time.DateTime) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) List(java.util.List) DataSegment(org.apache.druid.timeline.DataSegment) SegmentId(org.apache.druid.timeline.SegmentId) Assert(org.junit.Assert) Collections(java.util.Collections) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) SegmentId(org.apache.druid.timeline.SegmentId) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ArrayList(java.util.ArrayList) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) DataSegment(org.apache.druid.timeline.DataSegment) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer)

Example 23 with ImmutableDruidServer

use of org.apache.druid.client.ImmutableDruidServer in project druid by druid-io.

the class DiskNormalizedCostBalancerStrategyTest method setupDummyCluster.

/**
 * Create Druid cluster with serverCount servers having maxSegments segments each, and 1 server with 98 segment
 * Cost Balancer Strategy should assign the next segment to the server with less segments.
 */
public static List<ServerHolder> setupDummyCluster(int serverCount, int maxSegments) {
    List<ServerHolder> serverHolderList = new ArrayList<>();
    // Each having having 100 segments
    for (int i = 0; i < serverCount; i++) {
        LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
        List<DataSegment> segments = IntStream.range(0, maxSegments).mapToObj(j -> getSegment(j)).collect(Collectors.toList());
        ImmutableDruidDataSource dataSource = new ImmutableDruidDataSource("DUMMY", Collections.emptyMap(), segments);
        serverHolderList.add(new ServerHolder(new ImmutableDruidServer(new DruidServerMetadata("DruidServer_Name_" + i, "localhost", null, 10000000L, ServerType.HISTORICAL, "hot", 1), 3000L, ImmutableMap.of("DUMMY", dataSource), segments.size()), fromPeon));
    }
    // The best server to be available for next segment assignment has greater max Size
    LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
    ImmutableDruidServer druidServer = EasyMock.createMock(ImmutableDruidServer.class);
    EasyMock.expect(druidServer.getName()).andReturn("BEST_SERVER").anyTimes();
    EasyMock.expect(druidServer.getCurrSize()).andReturn(3000L).anyTimes();
    EasyMock.expect(druidServer.getMaxSize()).andReturn(100000000L).anyTimes();
    EasyMock.expect(druidServer.getSegment(EasyMock.anyObject())).andReturn(null).anyTimes();
    List<DataSegment> segments = new ArrayList<>();
    for (int j = 0; j < maxSegments; j++) {
        DataSegment segment = getSegment(j);
        segments.add(segment);
        EasyMock.expect(druidServer.getSegment(segment.getId())).andReturn(segment).anyTimes();
    }
    ImmutableDruidServerTests.expectSegments(druidServer, segments);
    EasyMock.replay(druidServer);
    serverHolderList.add(new ServerHolder(druidServer, fromPeon));
    return serverHolderList;
}
Also used : IntStream(java.util.stream.IntStream) MoreExecutors(com.google.common.util.concurrent.MoreExecutors) ImmutableDruidServerTests(org.apache.druid.client.ImmutableDruidServerTests) ImmutableMap(com.google.common.collect.ImmutableMap) Intervals(org.apache.druid.java.util.common.Intervals) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) ArrayList(java.util.ArrayList) Interval(org.joda.time.Interval) List(java.util.List) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) ServerType(org.apache.druid.server.coordination.ServerType) DataSegment(org.apache.druid.timeline.DataSegment) Assert(org.junit.Assert) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) Collections(java.util.Collections) ImmutableDruidDataSource(org.apache.druid.client.ImmutableDruidDataSource) ArrayList(java.util.ArrayList) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) DataSegment(org.apache.druid.timeline.DataSegment) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer)

Aggregations

ImmutableDruidServer (org.apache.druid.client.ImmutableDruidServer)23 DataSegment (org.apache.druid.timeline.DataSegment)17 Test (org.junit.Test)13 DruidServerMetadata (org.apache.druid.server.coordination.DruidServerMetadata)9 LoadQueuePeon (org.apache.druid.server.coordinator.LoadQueuePeon)8 List (java.util.List)7 ImmutableDruidDataSource (org.apache.druid.client.ImmutableDruidDataSource)7 ServerHolder (org.apache.druid.server.coordinator.ServerHolder)7 SegmentId (org.apache.druid.timeline.SegmentId)7 DruidServer (org.apache.druid.client.DruidServer)6 CoordinatorStats (org.apache.druid.server.coordinator.CoordinatorStats)6 ArrayList (java.util.ArrayList)5 Collectors (java.util.stream.Collectors)5 DruidCluster (org.apache.druid.server.coordinator.DruidCluster)5 ImmutableMap (com.google.common.collect.ImmutableMap)4 Map (java.util.Map)4 HashMap (java.util.HashMap)3 Intervals (org.apache.druid.java.util.common.Intervals)3 ServerType (org.apache.druid.server.coordination.ServerType)3 EasyMock (org.easymock.EasyMock)3