Search in sources :

Example 21 with ListeningExecutorService

use of com.google.common.util.concurrent.ListeningExecutorService in project druid by druid-io.

the class DruidCoordinatorRuleRunnerTest method testRunTwoTiersTierDoesNotExist.

@Test
public void testRunTwoTiersTierDoesNotExist() throws Exception {
    mockCoordinator();
    mockPeon.loadSegment(EasyMock.<DataSegment>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    EasyMock.expect(mockPeon.getSegmentsToLoad()).andReturn(Sets.<DataSegment>newHashSet()).atLeastOnce();
    EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(0L).atLeastOnce();
    EasyMock.replay(mockPeon);
    emitter.emit(EasyMock.<ServiceEventBuilder>anyObject());
    EasyMock.expectLastCall().times(12);
    EasyMock.replay(emitter);
    EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(Lists.<Rule>newArrayList(new IntervalLoadRule(new Interval("2012-01-01T00:00:00.000Z/2012-01-01T12:00:00.000Z"), ImmutableMap.<String, Integer>of("hot", 1)), new IntervalLoadRule(new Interval("2012-01-01T00:00:00.000Z/2012-01-02T00:00:00.000Z"), ImmutableMap.<String, Integer>of("normal", 1)))).atLeastOnce();
    EasyMock.replay(databaseRuleManager);
    DruidCluster druidCluster = new DruidCluster(ImmutableMap.of("normal", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(new DruidServer("serverNorm", "hostNorm", 1000, "historical", "normal", 0).toImmutableDruidServer(), mockPeon)))));
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    DruidCoordinatorRuntimeParams params = new DruidCoordinatorRuntimeParams.Builder().withEmitter(emitter).withDruidCluster(druidCluster).withAvailableSegments(availableSegments).withDatabaseRuleManager(databaseRuleManager).withSegmentReplicantLookup(SegmentReplicantLookup.make(new DruidCluster())).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).build();
    ruleRunner.run(params);
    exec.shutdown();
    EasyMock.verify(emitter);
    EasyMock.verify(mockPeon);
}
Also used : IntervalLoadRule(io.druid.server.coordinator.rules.IntervalLoadRule) ServiceEventBuilder(com.metamx.emitter.service.ServiceEventBuilder) DruidServer(io.druid.client.DruidServer) DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) IntervalLoadRule(io.druid.server.coordinator.rules.IntervalLoadRule) ForeverLoadRule(io.druid.server.coordinator.rules.ForeverLoadRule) Rule(io.druid.server.coordinator.rules.Rule) IntervalDropRule(io.druid.server.coordinator.rules.IntervalDropRule) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 22 with ListeningExecutorService

use of com.google.common.util.concurrent.ListeningExecutorService in project druid by druid-io.

the class DruidCoordinatorRuleRunnerTest method testDropReplicantThrottle.

@Test
public void testDropReplicantThrottle() throws Exception {
    mockCoordinator();
    mockPeon.dropSegment(EasyMock.<DataSegment>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    EasyMock.expect(mockPeon.getSegmentsToLoad()).andReturn(Sets.<DataSegment>newHashSet()).atLeastOnce();
    EasyMock.expect(mockPeon.getLoadQueueSize()).andReturn(0L).atLeastOnce();
    EasyMock.replay(mockPeon);
    EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(Lists.<Rule>newArrayList(new IntervalLoadRule(new Interval("2012-01-01T00:00:00.000Z/2013-01-02T00:00:00.000Z"), ImmutableMap.<String, Integer>of("normal", 1)))).atLeastOnce();
    EasyMock.replay(databaseRuleManager);
    DataSegment overFlowSegment = new DataSegment("test", new Interval("2012-02-01/2012-02-02"), new DateTime().toString(), Maps.<String, Object>newHashMap(), Lists.<String>newArrayList(), Lists.<String>newArrayList(), NoneShardSpec.instance(), 1, 0);
    List<DataSegment> longerAvailableSegments = Lists.newArrayList(availableSegments);
    longerAvailableSegments.add(overFlowSegment);
    DruidServer server1 = new DruidServer("serverNorm1", "hostNorm1", 1000, "historical", "normal", 0);
    for (DataSegment availableSegment : longerAvailableSegments) {
        server1.addDataSegment(availableSegment.getIdentifier(), availableSegment);
    }
    DruidServer server2 = new DruidServer("serverNorm2", "hostNorm2", 1000, "historical", "normal", 0);
    for (DataSegment availableSegment : longerAvailableSegments) {
        server2.addDataSegment(availableSegment.getIdentifier(), availableSegment);
    }
    DruidCluster druidCluster = new DruidCluster(ImmutableMap.of("normal", MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create(Arrays.asList(new ServerHolder(server1.toImmutableDruidServer(), mockPeon), new ServerHolder(server2.toImmutableDruidServer(), mockPeon)))));
    SegmentReplicantLookup segmentReplicantLookup = SegmentReplicantLookup.make(druidCluster);
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    DruidCoordinatorRuntimeParams params = new DruidCoordinatorRuntimeParams.Builder().withDruidCluster(druidCluster).withDynamicConfigs(new CoordinatorDynamicConfig.Builder().withMillisToWaitBeforeDeleting(0L).build()).withAvailableSegments(longerAvailableSegments).withDatabaseRuleManager(databaseRuleManager).withSegmentReplicantLookup(segmentReplicantLookup).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).build();
    DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
    CoordinatorStats stats = afterParams.getCoordinatorStats();
    // There is no throttling on drop
    Assert.assertTrue(stats.getPerTierStats().get("droppedCount").get("normal").get() == 25);
    EasyMock.verify(mockPeon);
    exec.shutdown();
}
Also used : IntervalLoadRule(io.druid.server.coordinator.rules.IntervalLoadRule) ServiceEventBuilder(com.metamx.emitter.service.ServiceEventBuilder) DruidServer(io.druid.client.DruidServer) DataSegment(io.druid.timeline.DataSegment) DateTime(org.joda.time.DateTime) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) IntervalLoadRule(io.druid.server.coordinator.rules.IntervalLoadRule) ForeverLoadRule(io.druid.server.coordinator.rules.ForeverLoadRule) Rule(io.druid.server.coordinator.rules.Rule) IntervalDropRule(io.druid.server.coordinator.rules.IntervalDropRule) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 23 with ListeningExecutorService

use of com.google.common.util.concurrent.ListeningExecutorService in project druid by druid-io.

the class DruidCoordinatorBalancerTest method testMoveToEmptyServerBalancer.

@Test
public void testMoveToEmptyServerBalancer() throws IOException {
    EasyMock.expect(druidServer1.getName()).andReturn("from").atLeastOnce();
    EasyMock.expect(druidServer1.getCurrSize()).andReturn(30L).atLeastOnce();
    EasyMock.expect(druidServer1.getMaxSize()).andReturn(100L).atLeastOnce();
    EasyMock.expect(druidServer1.getSegments()).andReturn(segments).anyTimes();
    EasyMock.expect(druidServer1.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
    EasyMock.replay(druidServer1);
    EasyMock.expect(druidServer2.getName()).andReturn("to").atLeastOnce();
    EasyMock.expect(druidServer2.getTier()).andReturn("normal").anyTimes();
    EasyMock.expect(druidServer2.getCurrSize()).andReturn(0L).atLeastOnce();
    EasyMock.expect(druidServer2.getMaxSize()).andReturn(100L).atLeastOnce();
    EasyMock.expect(druidServer2.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
    EasyMock.expect(druidServer2.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
    EasyMock.replay(druidServer2);
    EasyMock.replay(druidServer3);
    EasyMock.replay(druidServer4);
    // Mock stuff that the coordinator needs
    coordinator.moveSegment(EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<String>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().anyTimes();
    EasyMock.replay(coordinator);
    LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
    LoadQueuePeonTester toPeon = new LoadQueuePeonTester();
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder().withDruidCluster(new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal", MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator).create(Arrays.asList(new ServerHolder(druidServer1, fromPeon), new ServerHolder(druidServer2, toPeon)))))).withLoadManagementPeons(ImmutableMap.<String, LoadQueuePeon>of("from", fromPeon, "to", toPeon)).withAvailableSegments(segments.values()).withDynamicConfigs(new CoordinatorDynamicConfig.Builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE).build()).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).build();
    params = new DruidCoordinatorBalancerTester(coordinator).run(params);
    Assert.assertTrue(params.getCoordinatorStats().getPerTierStats().get("movedCount").get("normal").get() > 0);
    Assert.assertTrue(params.getCoordinatorStats().getPerTierStats().get("movedCount").get("normal").get() < segments.size());
    exec.shutdown();
}
Also used : HashMap(java.util.HashMap) DateTime(org.joda.time.DateTime) MinMaxPriorityQueue(com.google.common.collect.MinMaxPriorityQueue) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Test(org.junit.Test)

Example 24 with ListeningExecutorService

use of com.google.common.util.concurrent.ListeningExecutorService in project druid by druid-io.

the class DruidCoordinatorBalancerTest method testRun1.

@Test
public void testRun1() throws IOException {
    // Mock some servers of different usages
    EasyMock.expect(druidServer1.getName()).andReturn("from").atLeastOnce();
    EasyMock.expect(druidServer1.getCurrSize()).andReturn(30L).atLeastOnce();
    EasyMock.expect(druidServer1.getMaxSize()).andReturn(100L).atLeastOnce();
    EasyMock.expect(druidServer1.getSegments()).andReturn(segments).anyTimes();
    EasyMock.expect(druidServer1.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
    EasyMock.replay(druidServer1);
    EasyMock.expect(druidServer2.getName()).andReturn("to").atLeastOnce();
    EasyMock.expect(druidServer2.getTier()).andReturn("normal").anyTimes();
    EasyMock.expect(druidServer2.getCurrSize()).andReturn(0L).atLeastOnce();
    EasyMock.expect(druidServer2.getMaxSize()).andReturn(100L).atLeastOnce();
    EasyMock.expect(druidServer2.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
    EasyMock.expect(druidServer2.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
    EasyMock.replay(druidServer2);
    EasyMock.replay(druidServer3);
    EasyMock.replay(druidServer4);
    // Mock stuff that the coordinator needs
    coordinator.moveSegment(EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<String>anyObject(), EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().anyTimes();
    EasyMock.replay(coordinator);
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
    LoadQueuePeonTester toPeon = new LoadQueuePeonTester();
    DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder().withDruidCluster(new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal", MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator).create(Arrays.asList(new ServerHolder(druidServer1, fromPeon), new ServerHolder(druidServer2, toPeon)))))).withLoadManagementPeons(ImmutableMap.<String, LoadQueuePeon>of("from", fromPeon, "to", toPeon)).withAvailableSegments(segments.values()).withDynamicConfigs(new CoordinatorDynamicConfig.Builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE).build()).withBalancerStrategy(balancerStrategy).withBalancerReferenceTimestamp(new DateTime("2013-01-01")).build();
    params = new DruidCoordinatorBalancerTester(coordinator).run(params);
    Assert.assertTrue(params.getCoordinatorStats().getPerTierStats().get("movedCount").get("normal").get() > 0);
    exec.shutdown();
}
Also used : HashMap(java.util.HashMap) DateTime(org.joda.time.DateTime) MinMaxPriorityQueue(com.google.common.collect.MinMaxPriorityQueue) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) Test(org.junit.Test)

Example 25 with ListeningExecutorService

use of com.google.common.util.concurrent.ListeningExecutorService in project druid by druid-io.

the class CompressionStrategyTest method testKnownSizeConcurrency.

@Test(timeout = 60000)
public void testKnownSizeConcurrency() throws Exception {
    final int numThreads = 20;
    ListeningExecutorService threadPoolExecutor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(numThreads));
    List<ListenableFuture<?>> results = new ArrayList<>();
    for (int i = 0; i < numThreads; ++i) {
        results.add(threadPoolExecutor.submit(new Runnable() {

            @Override
            public void run() {
                ByteBuffer compressed = ByteBuffer.wrap(compressionStrategy.getCompressor().compress(originalData));
                ByteBuffer output = ByteBuffer.allocate(originalData.length);
                // TODO: Lambdas would be nice here whenever we use Java 8
                compressionStrategy.getDecompressor().decompress(compressed, compressed.array().length, output, originalData.length);
                byte[] checkArray = new byte[DATA_SIZER];
                output.get(checkArray);
                Assert.assertArrayEquals("Uncompressed data does not match", originalData, checkArray);
            }
        }));
    }
    Futures.allAsList(results).get();
}
Also used : ArrayList(java.util.ArrayList) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Aggregations

ListeningExecutorService (com.google.common.util.concurrent.ListeningExecutorService)201 Test (org.junit.Test)115 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)75 ArrayList (java.util.ArrayList)43 CountDownLatch (java.util.concurrent.CountDownLatch)29 ExecutorService (java.util.concurrent.ExecutorService)28 IOException (java.io.IOException)25 ExecutionException (java.util.concurrent.ExecutionException)25 Interval (org.joda.time.Interval)25 DateTime (org.joda.time.DateTime)23 List (java.util.List)21 Callable (java.util.concurrent.Callable)20 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)20 DruidServer (io.druid.client.DruidServer)18 DataSegment (io.druid.timeline.DataSegment)18 DruidServer (org.apache.druid.client.DruidServer)17 ImmutableMap (com.google.common.collect.ImmutableMap)16 File (java.io.File)16 Map (java.util.Map)16 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)15