Search in sources :

Example 41 with Pair

use of io.druid.java.util.common.Pair in project druid by druid-io.

the class StringDimensionHandlerTest method getAdapters.

private static Pair<IncrementalIndexAdapter, IncrementalIndexAdapter> getAdapters(List<String> dims, Map<String, Object> event1, Map<String, Object> event2) throws Exception {
    IncrementalIndex incrementalIndex1 = new OnheapIncrementalIndex(TEST_INTERVAL.getStartMillis(), Granularities.NONE, true, new DimensionsSpec(DimensionsSpec.getDefaultSchemas(dims), null, null), new AggregatorFactory[] { new CountAggregatorFactory("count") }, 1000);
    IncrementalIndex incrementalIndex2 = new OnheapIncrementalIndex(TEST_INTERVAL.getStartMillis(), Granularities.NONE, true, new DimensionsSpec(DimensionsSpec.getDefaultSchemas(dims), null, null), new AggregatorFactory[] { new CountAggregatorFactory("count") }, 1000);
    incrementalIndex1.add(new MapBasedInputRow(TEST_INTERVAL.getStartMillis(), dims, event1));
    incrementalIndex2.add(new MapBasedInputRow(TEST_INTERVAL.getStartMillis() + 3, dims, event2));
    IncrementalIndexAdapter adapter1 = new IncrementalIndexAdapter(TEST_INTERVAL, incrementalIndex1, INDEX_SPEC.getBitmapSerdeFactory().getBitmapFactory());
    IncrementalIndexAdapter adapter2 = new IncrementalIndexAdapter(TEST_INTERVAL, incrementalIndex2, INDEX_SPEC.getBitmapSerdeFactory().getBitmapFactory());
    return new Pair<>(adapter1, adapter2);
}
Also used : IncrementalIndexAdapter(io.druid.segment.incremental.IncrementalIndexAdapter) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) IncrementalIndex(io.druid.segment.incremental.IncrementalIndex) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) DimensionsSpec(io.druid.data.input.impl.DimensionsSpec) MapBasedInputRow(io.druid.data.input.MapBasedInputRow) Pair(io.druid.java.util.common.Pair)

Example 42 with Pair

use of io.druid.java.util.common.Pair in project druid by druid-io.

the class VarianceAggregatorCollectorTest method testVariance.

@Test
public void testVariance() {
    Random random = new Random();
    for (float[] values : Arrays.asList(market_upfront, market_total_market)) {
        double sum = 0;
        for (float f : values) {
            sum += f;
        }
        final double mean = sum / values.length;
        double temp = 0;
        for (float f : values) {
            temp += Math.pow(f - mean, 2);
        }
        final double variance_pop = temp / values.length;
        final double variance_sample = temp / (values.length - 1);
        VarianceAggregatorCollector holder = new VarianceAggregatorCollector();
        for (float f : values) {
            holder.add(f);
        }
        Assert.assertEquals(holder.getVariance(true), variance_pop, 0.001);
        Assert.assertEquals(holder.getVariance(false), variance_sample, 0.001);
        for (int mergeOn : new int[] { 2, 3, 5, 9 }) {
            List<VarianceAggregatorCollector> holders1 = Lists.newArrayListWithCapacity(mergeOn);
            List<Pair<VarianceBufferAggregator, ByteBuffer>> holders2 = Lists.newArrayListWithCapacity(mergeOn);
            FloatHandOver valueHandOver = new FloatHandOver();
            for (int i = 0; i < mergeOn; i++) {
                holders1.add(new VarianceAggregatorCollector());
                holders2.add(Pair.<VarianceBufferAggregator, ByteBuffer>of(new VarianceBufferAggregator.FloatVarianceAggregator("XX", valueHandOver), ByteBuffer.allocate(VarianceAggregatorCollector.getMaxIntermediateSize())));
            }
            for (float f : values) {
                valueHandOver.v = f;
                int index = random.nextInt(mergeOn);
                holders1.get(index).add(f);
                holders2.get(index).lhs.aggregate(holders2.get(index).rhs, 0);
            }
            VarianceAggregatorCollector holder1 = holders1.get(0);
            for (int i = 1; i < mergeOn; i++) {
                holder1 = (VarianceAggregatorCollector) VarianceAggregatorCollector.combineValues(holder1, holders1.get(i));
            }
            ObjectHandOver collectHandOver = new ObjectHandOver();
            ByteBuffer buffer = ByteBuffer.allocate(VarianceAggregatorCollector.getMaxIntermediateSize());
            VarianceBufferAggregator.ObjectVarianceAggregator merger = new VarianceBufferAggregator.ObjectVarianceAggregator("xxx", collectHandOver);
            for (int i = 0; i < mergeOn; i++) {
                collectHandOver.v = holders2.get(i).lhs.get(holders2.get(i).rhs, 0);
                merger.aggregate(buffer, 0);
            }
            VarianceAggregatorCollector holder2 = (VarianceAggregatorCollector) merger.get(buffer, 0);
            Assert.assertEquals(holder2.getVariance(true), variance_pop, 0.01);
            Assert.assertEquals(holder2.getVariance(false), variance_sample, 0.01);
        }
    }
}
Also used : ByteBuffer(java.nio.ByteBuffer) Random(java.util.Random) Pair(io.druid.java.util.common.Pair) Test(org.junit.Test)

Example 43 with Pair

use of io.druid.java.util.common.Pair in project druid by druid-io.

the class TieredBrokerHostSelector method select.

public Pair<String, ServerDiscoverySelector> select(final Query<T> query) {
    synchronized (lock) {
        if (!ruleManager.isStarted() || !started) {
            return getDefaultLookup();
        }
    }
    String brokerServiceName = null;
    for (TieredBrokerSelectorStrategy strategy : strategies) {
        final Optional<String> optionalName = strategy.getBrokerServiceName(tierConfig, query);
        if (optionalName.isPresent()) {
            brokerServiceName = optionalName.get();
            break;
        }
    }
    if (brokerServiceName == null) {
        // For Union Queries tier will be selected on the rules for first dataSource.
        List<Rule> rules = ruleManager.getRulesWithDefault(Iterables.getFirst(query.getDataSource().getNames(), null));
        // find the rule that can apply to the entire set of intervals
        DateTime now = new DateTime();
        int lastRulePosition = -1;
        LoadRule baseRule = null;
        for (Interval interval : query.getIntervals()) {
            int currRulePosition = 0;
            for (Rule rule : rules) {
                if (rule instanceof LoadRule && currRulePosition > lastRulePosition && rule.appliesTo(interval, now)) {
                    lastRulePosition = currRulePosition;
                    baseRule = (LoadRule) rule;
                    break;
                }
                currRulePosition++;
            }
        }
        if (baseRule == null) {
            return getDefaultLookup();
        }
        // in the baseRule, find the broker of highest priority
        for (Map.Entry<String, String> entry : tierConfig.getTierToBrokerMap().entrySet()) {
            if (baseRule.getTieredReplicants().containsKey(entry.getKey())) {
                brokerServiceName = entry.getValue();
                break;
            }
        }
    }
    if (brokerServiceName == null) {
        log.error("WTF?! No brokerServiceName found for datasource[%s], intervals[%s]. Using default[%s].", query.getDataSource(), query.getIntervals(), tierConfig.getDefaultBrokerServiceName());
        brokerServiceName = tierConfig.getDefaultBrokerServiceName();
    }
    ServerDiscoverySelector retVal = selectorMap.get(brokerServiceName);
    if (retVal == null) {
        log.error("WTF?! No selector found for brokerServiceName[%s]. Using default selector for[%s]", brokerServiceName, tierConfig.getDefaultBrokerServiceName());
        retVal = selectorMap.get(tierConfig.getDefaultBrokerServiceName());
    }
    return new Pair<>(brokerServiceName, retVal);
}
Also used : DateTime(org.joda.time.DateTime) ServerDiscoverySelector(io.druid.curator.discovery.ServerDiscoverySelector) LoadRule(io.druid.server.coordinator.rules.LoadRule) LoadRule(io.druid.server.coordinator.rules.LoadRule) Rule(io.druid.server.coordinator.rules.Rule) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Map(java.util.Map) Interval(org.joda.time.Interval) Pair(io.druid.java.util.common.Pair)

Example 44 with Pair

use of io.druid.java.util.common.Pair in project druid by druid-io.

the class BrokerServerViewTest method assertValues.

private void assertValues(List<Pair<Interval, Pair<String, Pair<DruidServer, DataSegment>>>> expected, List<TimelineObjectHolder> actual) {
    Assert.assertEquals(expected.size(), actual.size());
    for (int i = 0; i < expected.size(); ++i) {
        Pair<Interval, Pair<String, Pair<DruidServer, DataSegment>>> expectedPair = expected.get(i);
        TimelineObjectHolder<String, ServerSelector> actualTimelineObjectHolder = actual.get(i);
        Assert.assertEquals(expectedPair.lhs, actualTimelineObjectHolder.getInterval());
        Assert.assertEquals(expectedPair.rhs.lhs, actualTimelineObjectHolder.getVersion());
        PartitionHolder<ServerSelector> actualPartitionHolder = actualTimelineObjectHolder.getObject();
        Assert.assertTrue(actualPartitionHolder.isComplete());
        Assert.assertEquals(1, Iterables.size(actualPartitionHolder));
        ServerSelector selector = ((SingleElementPartitionChunk<ServerSelector>) actualPartitionHolder.iterator().next()).getObject();
        Assert.assertFalse(selector.isEmpty());
        Assert.assertEquals(expectedPair.rhs.rhs.lhs, selector.pick().getServer());
        Assert.assertEquals(expectedPair.rhs.rhs.rhs, selector.getSegment());
    }
}
Also used : ServerSelector(io.druid.client.selector.ServerSelector) DataSegment(io.druid.timeline.DataSegment) SingleElementPartitionChunk(io.druid.timeline.partition.SingleElementPartitionChunk) Interval(org.joda.time.Interval) Pair(io.druid.java.util.common.Pair)

Example 45 with Pair

use of io.druid.java.util.common.Pair in project druid by druid-io.

the class BrokerServerViewTest method testMultipleServerAddedRemovedSegment.

@Test
public void testMultipleServerAddedRemovedSegment() throws Exception {
    segmentViewInitLatch = new CountDownLatch(1);
    segmentAddedLatch = new CountDownLatch(5);
    // temporarily set latch count to 1
    segmentRemovedLatch = new CountDownLatch(1);
    setupViews();
    final List<DruidServer> druidServers = Lists.transform(ImmutableList.<String>of("locahost:0", "localhost:1", "localhost:2", "localhost:3", "localhost:4"), new Function<String, DruidServer>() {

        @Override
        public DruidServer apply(String input) {
            return new DruidServer(input, input, 10000000L, "historical", "default_tier", 0);
        }
    });
    for (DruidServer druidServer : druidServers) {
        setupZNodeForServer(druidServer, zkPathsConfig, jsonMapper);
    }
    final List<DataSegment> segments = Lists.transform(ImmutableList.<Pair<String, String>>of(Pair.of("2011-04-01/2011-04-03", "v1"), Pair.of("2011-04-03/2011-04-06", "v1"), Pair.of("2011-04-01/2011-04-09", "v2"), Pair.of("2011-04-06/2011-04-09", "v3"), Pair.of("2011-04-01/2011-04-02", "v3")), new Function<Pair<String, String>, DataSegment>() {

        @Override
        public DataSegment apply(Pair<String, String> input) {
            return dataSegmentWithIntervalAndVersion(input.lhs, input.rhs);
        }
    });
    for (int i = 0; i < 5; ++i) {
        announceSegmentForServer(druidServers.get(i), segments.get(i), zkPathsConfig, jsonMapper);
    }
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentAddedLatch));
    TimelineLookup timeline = brokerServerView.getTimeline(new TableDataSource("test_broker_server_view"));
    assertValues(Arrays.asList(createExpected("2011-04-01/2011-04-02", "v3", druidServers.get(4), segments.get(4)), createExpected("2011-04-02/2011-04-06", "v2", druidServers.get(2), segments.get(2)), createExpected("2011-04-06/2011-04-09", "v3", druidServers.get(3), segments.get(3))), (List<TimelineObjectHolder>) timeline.lookup(new Interval("2011-04-01/2011-04-09")));
    // unannounce the segment created by dataSegmentWithIntervalAndVersion("2011-04-01/2011-04-09", "v2")
    unannounceSegmentForServer(druidServers.get(2), segments.get(2), zkPathsConfig);
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
    // renew segmentRemovedLatch since we still have 4 segments to unannounce
    segmentRemovedLatch = new CountDownLatch(4);
    timeline = brokerServerView.getTimeline(new TableDataSource("test_broker_server_view"));
    assertValues(Arrays.asList(createExpected("2011-04-01/2011-04-02", "v3", druidServers.get(4), segments.get(4)), createExpected("2011-04-02/2011-04-03", "v1", druidServers.get(0), segments.get(0)), createExpected("2011-04-03/2011-04-06", "v1", druidServers.get(1), segments.get(1)), createExpected("2011-04-06/2011-04-09", "v3", druidServers.get(3), segments.get(3))), (List<TimelineObjectHolder>) timeline.lookup(new Interval("2011-04-01/2011-04-09")));
    // unannounce all the segments
    for (int i = 0; i < 5; ++i) {
        // skip the one that was previously unannounced
        if (i != 2) {
            unannounceSegmentForServer(druidServers.get(i), segments.get(i), zkPathsConfig);
        }
    }
    Assert.assertTrue(timing.forWaiting().awaitLatch(segmentRemovedLatch));
    Assert.assertEquals(0, ((List<TimelineObjectHolder>) timeline.lookup(new Interval("2011-04-01/2011-04-09"))).size());
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch) DataSegment(io.druid.timeline.DataSegment) TimelineObjectHolder(io.druid.timeline.TimelineObjectHolder) TableDataSource(io.druid.query.TableDataSource) TimelineLookup(io.druid.timeline.TimelineLookup) Pair(io.druid.java.util.common.Pair) Interval(org.joda.time.Interval) Test(org.junit.Test)

Aggregations

Pair (io.druid.java.util.common.Pair)62 Test (org.junit.Test)26 Interval (org.joda.time.Interval)15 DataSegment (io.druid.timeline.DataSegment)11 Map (java.util.Map)11 ByteBuffer (java.nio.ByteBuffer)10 HashMap (java.util.HashMap)9 SerializablePair (io.druid.collections.SerializablePair)8 SegmentDescriptor (io.druid.query.SegmentDescriptor)8 List (java.util.List)8 ImmutableMap (com.google.common.collect.ImmutableMap)7 Executor (java.util.concurrent.Executor)7 DateTime (org.joda.time.DateTime)7 Function (com.google.common.base.Function)6 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)6 MapBasedInputRow (io.druid.data.input.MapBasedInputRow)6 TaskStatus (io.druid.indexing.common.TaskStatus)6 ISE (io.druid.java.util.common.ISE)6 Access (io.druid.server.security.Access)6 Action (io.druid.server.security.Action)6