Search in sources :

Example 1 with Range

use of com.google.common.collect.Range in project pinot by linkedin.

the class TimeOnTimeComparisonHandler method handle.

public TimeOnTimeComparisonResponse handle(TimeOnTimeComparisonRequest comparisonRequest) throws Exception {
    ThirdEyeRequestBuilder builder = new ThirdEyeRequestBuilder();
    builder.setCollection(comparisonRequest.getCollectionName());
    List<Range<DateTime>> baselineTimeranges = new ArrayList<>();
    List<Range<DateTime>> currentTimeranges = new ArrayList<>();
    TimeGranularity aggregationTimeGranularity = comparisonRequest.getAggregationTimeGranularity();
    // baseline time ranges
    DateTime baselineStart = comparisonRequest.getBaselineStart();
    DateTime baselineEnd = comparisonRequest.getBaselineEnd();
    // current time ranges
    DateTime currentStart = comparisonRequest.getCurrentStart();
    DateTime currentEnd = comparisonRequest.getCurrentEnd();
    if (comparisonRequest.isEndDateInclusive()) {
        // ThirdEyeRequest is exclusive endpoint, so increment end by one bucket
        currentEnd = TimeRangeUtils.increment(currentEnd, aggregationTimeGranularity);
        baselineEnd = TimeRangeUtils.increment(baselineEnd, aggregationTimeGranularity);
    }
    baselineTimeranges = TimeRangeUtils.computeTimeRanges(aggregationTimeGranularity, baselineStart, baselineEnd);
    currentTimeranges = TimeRangeUtils.computeTimeRanges(aggregationTimeGranularity, currentStart, currentEnd);
    // create baseline request
    ThirdEyeRequest baselineRequest = createThirdEyeRequest(BASELINE, comparisonRequest, baselineStart, baselineEnd);
    // create current request
    ThirdEyeRequest currentRequest = createThirdEyeRequest(CURRENT, comparisonRequest, currentStart, currentEnd);
    List<ThirdEyeRequest> requests = new ArrayList<>();
    requests.add(baselineRequest);
    requests.add(currentRequest);
    Map<ThirdEyeRequest, Future<ThirdEyeResponse>> futureResponseMap;
    futureResponseMap = queryCache.getQueryResultsAsync(requests);
    ThirdEyeResponse baselineResponse = null;
    ThirdEyeResponse currentResponse = null;
    for (Entry<ThirdEyeRequest, Future<ThirdEyeResponse>> entry : futureResponseMap.entrySet()) {
        ThirdEyeRequest request = entry.getKey();
        Future<ThirdEyeResponse> responseFuture = entry.getValue();
        ThirdEyeResponse response = responseFuture.get(60000, TimeUnit.SECONDS);
        if (BASELINE.equals(request.getRequestReference())) {
            baselineResponse = response;
        } else if (CURRENT.equals(request.getRequestReference())) {
            currentResponse = response;
        }
    }
    TimeOnTimeResponseParser timeOnTimeResponseParser = new TimeOnTimeResponseParser(baselineResponse, currentResponse, baselineTimeranges, currentTimeranges, comparisonRequest.getAggregationTimeGranularity(), comparisonRequest.getGroupByDimensions());
    List<Row> rows = timeOnTimeResponseParser.parseResponse();
    // compute the derived metrics
    computeDerivedMetrics(comparisonRequest, rows);
    return new TimeOnTimeComparisonResponse(rows);
}
Also used : ThirdEyeRequestBuilder(com.linkedin.thirdeye.client.ThirdEyeRequest.ThirdEyeRequestBuilder) ArrayList(java.util.ArrayList) ThirdEyeResponse(com.linkedin.thirdeye.client.ThirdEyeResponse) Range(com.google.common.collect.Range) DateTime(org.joda.time.DateTime) ThirdEyeRequest(com.linkedin.thirdeye.client.ThirdEyeRequest) TimeGranularity(com.linkedin.thirdeye.api.TimeGranularity) Future(java.util.concurrent.Future)

Example 2 with Range

use of com.google.common.collect.Range in project pinot by linkedin.

the class TimeRangeUtils method computeTimeRanges.

public static List<Range<DateTime>> computeTimeRanges(TimeGranularity granularity, DateTime start, DateTime end) {
    List<Range<DateTime>> timeranges = new ArrayList<>();
    if (granularity == null) {
        timeranges.add(Range.closedOpen(start, end));
        return timeranges;
    }
    DateTime current = start;
    DateTime newCurrent = null;
    // Duration duration = new Duration(granularity.toMillis());
    while (current.isBefore(end)) {
        // newCurrent = current.plus(granularity.toMillis());
        newCurrent = increment(current, granularity);
        timeranges.add(Range.closedOpen(current, newCurrent));
        current = newCurrent;
    }
    return timeranges;
}
Also used : ArrayList(java.util.ArrayList) Range(com.google.common.collect.Range) TimeRange(com.linkedin.thirdeye.api.TimeRange) DateTime(org.joda.time.DateTime)

Example 3 with Range

use of com.google.common.collect.Range in project pulsar by yahoo.

the class ManagedLedgerOfflineBacklog method calculateCursorBacklogs.

private void calculateCursorBacklogs(final ManagedLedgerFactoryImpl factory, final DestinationName dn, final NavigableMap<Long, MLDataFormats.ManagedLedgerInfo.LedgerInfo> ledgers, final PersistentOfflineTopicStats offlineTopicStats) throws Exception {
    if (ledgers.size() == 0) {
        return;
    }
    String managedLedgerName = dn.getPersistenceNamingEncoding();
    MetaStore store = factory.getMetaStore();
    BookKeeper bk = factory.getBookKeeper();
    final CountDownLatch allCursorsCounter = new CountDownLatch(1);
    final long errorInReadingCursor = (long) -1;
    ConcurrentOpenHashMap<String, Long> ledgerRetryMap = new ConcurrentOpenHashMap<>();
    final MLDataFormats.ManagedLedgerInfo.LedgerInfo ledgerInfo = ledgers.lastEntry().getValue();
    final PositionImpl lastLedgerPosition = new PositionImpl(ledgerInfo.getLedgerId(), ledgerInfo.getEntries() - 1);
    if (log.isDebugEnabled()) {
        log.debug("[{}] Last ledger position {}", managedLedgerName, lastLedgerPosition);
    }
    store.getCursors(managedLedgerName, new MetaStore.MetaStoreCallback<List<String>>() {

        @Override
        public void operationComplete(List<String> cursors, MetaStore.Stat v) {
            // Load existing cursors
            if (log.isDebugEnabled()) {
                log.debug("[{}] Found {} cursors", managedLedgerName, cursors.size());
            }
            if (cursors.isEmpty()) {
                allCursorsCounter.countDown();
                return;
            }
            final CountDownLatch cursorCounter = new CountDownLatch(cursors.size());
            for (final String cursorName : cursors) {
                // determine subscription position from cursor ledger
                if (log.isDebugEnabled()) {
                    log.debug("[{}] Loading cursor {}", managedLedgerName, cursorName);
                }
                AsyncCallback.OpenCallback cursorLedgerOpenCb = (rc, lh, ctx1) -> {
                    long ledgerId = lh.getId();
                    if (log.isDebugEnabled()) {
                        log.debug("[{}] Opened cursor ledger {} for cursor {}. rc={}", managedLedgerName, ledgerId, cursorName, rc);
                    }
                    if (rc != BKException.Code.OK) {
                        log.warn("[{}] Error opening metadata ledger {} for cursor {}: {}", managedLedgerName, ledgerId, cursorName, BKException.getMessage(rc));
                        cursorCounter.countDown();
                        return;
                    }
                    long lac = lh.getLastAddConfirmed();
                    if (log.isDebugEnabled()) {
                        log.debug("[{}] Cursor {} LAC {} read from ledger {}", managedLedgerName, cursorName, lac, ledgerId);
                    }
                    if (lac == LedgerHandle.INVALID_ENTRY_ID) {
                        // save the ledger id and cursor to retry outside of this call back
                        // since we are trying to read the same cursor ledger, we will block until
                        // this current callback completes, since an attempt to read the entry
                        // will block behind this current operation to complete
                        ledgerRetryMap.put(cursorName, ledgerId);
                        log.info("[{}] Cursor {} LAC {} read from ledger {}", managedLedgerName, cursorName, lac, ledgerId);
                        cursorCounter.countDown();
                        return;
                    }
                    final long entryId = lac;
                    // read last acked message position for subscription
                    lh.asyncReadEntries(entryId, entryId, new AsyncCallback.ReadCallback() {

                        @Override
                        public void readComplete(int rc, LedgerHandle lh, Enumeration<LedgerEntry> seq, Object ctx) {
                            try {
                                if (log.isDebugEnabled()) {
                                    log.debug("readComplete rc={} entryId={}", rc, entryId);
                                }
                                if (rc != BKException.Code.OK) {
                                    log.warn("[{}] Error reading from metadata ledger {} for cursor {}: {}", managedLedgerName, ledgerId, cursorName, BKException.getMessage(rc));
                                    // indicate that this cursor should be excluded
                                    offlineTopicStats.addCursorDetails(cursorName, errorInReadingCursor, lh.getId());
                                } else {
                                    LedgerEntry entry = seq.nextElement();
                                    MLDataFormats.PositionInfo positionInfo;
                                    try {
                                        positionInfo = MLDataFormats.PositionInfo.parseFrom(entry.getEntry());
                                    } catch (InvalidProtocolBufferException e) {
                                        log.warn("[{}] Error reading position from metadata ledger {} for cursor {}: {}", managedLedgerName, ledgerId, cursorName, e);
                                        offlineTopicStats.addCursorDetails(cursorName, errorInReadingCursor, lh.getId());
                                        return;
                                    }
                                    final PositionImpl lastAckedMessagePosition = new PositionImpl(positionInfo);
                                    if (log.isDebugEnabled()) {
                                        log.debug("[{}] Cursor {} MD {} read last ledger position {}", managedLedgerName, cursorName, lastAckedMessagePosition, lastLedgerPosition);
                                    }
                                    // calculate cursor backlog
                                    Range<PositionImpl> range = Range.openClosed(lastAckedMessagePosition, lastLedgerPosition);
                                    if (log.isDebugEnabled()) {
                                        log.debug("[{}] Calculating backlog for cursor {} using range {}", managedLedgerName, cursorName, range);
                                    }
                                    long cursorBacklog = getNumberOfEntries(range, ledgers);
                                    offlineTopicStats.messageBacklog += cursorBacklog;
                                    offlineTopicStats.addCursorDetails(cursorName, cursorBacklog, lh.getId());
                                }
                            } finally {
                                cursorCounter.countDown();
                            }
                        }
                    }, null);
                };
                // end of cursor meta read callback
                store.asyncGetCursorInfo(managedLedgerName, cursorName, new MetaStore.MetaStoreCallback<MLDataFormats.ManagedCursorInfo>() {

                    @Override
                    public void operationComplete(MLDataFormats.ManagedCursorInfo info, MetaStore.Stat version) {
                        long cursorLedgerId = info.getCursorsLedgerId();
                        if (log.isDebugEnabled()) {
                            log.debug("[{}] Cursor {} meta-data read ledger id {}", managedLedgerName, cursorName, cursorLedgerId);
                        }
                        if (cursorLedgerId != -1) {
                            bk.asyncOpenLedgerNoRecovery(cursorLedgerId, digestType, password, cursorLedgerOpenCb, null);
                        } else {
                            PositionImpl lastAckedMessagePosition = new PositionImpl(info.getMarkDeleteLedgerId(), info.getMarkDeleteEntryId());
                            Range<PositionImpl> range = Range.openClosed(lastAckedMessagePosition, lastLedgerPosition);
                            if (log.isDebugEnabled()) {
                                log.debug("[{}] Calculating backlog for cursor {} using range {}", managedLedgerName, cursorName, range);
                            }
                            long cursorBacklog = getNumberOfEntries(range, ledgers);
                            offlineTopicStats.messageBacklog += cursorBacklog;
                            offlineTopicStats.addCursorDetails(cursorName, cursorBacklog, cursorLedgerId);
                            cursorCounter.countDown();
                        }
                    }

                    @Override
                    public void operationFailed(ManagedLedgerException.MetaStoreException e) {
                        log.warn("[{}] Unable to obtain cursor ledger for cursor {}: {}", managedLedgerName, cursorName, e);
                        cursorCounter.countDown();
                    }
                });
            }
            // for every cursor find backlog
            try {
                if (accurate) {
                    cursorCounter.await();
                } else {
                    cursorCounter.await(META_READ_TIMEOUT_SECONDS, TimeUnit.SECONDS);
                }
            } catch (Exception e) {
                log.warn("[{}] Error reading subscription positions{}", managedLedgerName, e);
            } finally {
                allCursorsCounter.countDown();
            }
        }

        @Override
        public void operationFailed(ManagedLedgerException.MetaStoreException e) {
            log.warn("[{}] Failed to get the cursors list", managedLedgerName, e);
            allCursorsCounter.countDown();
        }
    });
    if (accurate) {
        allCursorsCounter.await();
    } else {
        allCursorsCounter.await(META_READ_TIMEOUT_SECONDS, TimeUnit.SECONDS);
    }
    // go through ledgers where LAC was -1
    if (accurate && ledgerRetryMap.size() > 0) {
        ledgerRetryMap.forEach((cursorName, ledgerId) -> {
            if (log.isDebugEnabled()) {
                log.debug("Cursor {} Ledger {} Trying to obtain MD from BkAdmin", cursorName, ledgerId);
            }
            PositionImpl lastAckedMessagePosition = tryGetMDPosition(bk, ledgerId, cursorName);
            if (lastAckedMessagePosition == null) {
                log.warn("[{}] Cursor {} read from ledger {}. Unable to determine cursor position", managedLedgerName, cursorName, ledgerId);
            } else {
                if (log.isDebugEnabled()) {
                    log.debug("[{}] Cursor {} read from ledger using bk admin {}. position {}", managedLedgerName, cursorName, ledgerId, lastAckedMessagePosition);
                }
                // calculate cursor backlog
                Range<PositionImpl> range = Range.openClosed(lastAckedMessagePosition, lastLedgerPosition);
                if (log.isDebugEnabled()) {
                    log.debug("[{}] Calculating backlog for cursor {} using range {}", managedLedgerName, cursorName, range);
                }
                long cursorBacklog = getNumberOfEntries(range, ledgers);
                offlineTopicStats.messageBacklog += cursorBacklog;
                offlineTopicStats.addCursorDetails(cursorName, cursorBacklog, ledgerId);
            }
        });
    }
}
Also used : ConcurrentOpenHashMap(com.yahoo.pulsar.common.util.collections.ConcurrentOpenHashMap) AsyncCallback(org.apache.bookkeeper.client.AsyncCallback) ManagedLedgerException(org.apache.bookkeeper.mledger.ManagedLedgerException) MLDataFormats(org.apache.bookkeeper.mledger.proto.MLDataFormats) List(java.util.List) LedgerHandle(org.apache.bookkeeper.client.LedgerHandle) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException) BookKeeper(org.apache.bookkeeper.client.BookKeeper) CountDownLatch(java.util.concurrent.CountDownLatch) Range(com.google.common.collect.Range) InvalidProtocolBufferException(com.google.protobuf.InvalidProtocolBufferException) BKException(org.apache.bookkeeper.client.BKException) ManagedLedgerException(org.apache.bookkeeper.mledger.ManagedLedgerException) LedgerEntry(org.apache.bookkeeper.client.LedgerEntry)

Example 4 with Range

use of com.google.common.collect.Range in project graylog2-server by Graylog2.

the class FormatStringDecoratorTest method getSearchResponse.

private SearchResponse getSearchResponse() {
    final IndexRangeSummary indexRangeSummary = IndexRangeSummary.create("graylog_0", Tools.nowUTC().minusDays(1), Tools.nowUTC(), null, 100);
    final ImmutableMultimap<String, Range<Integer>> hlRanges = ImmutableMultimap.of();
    final List<ResultMessageSummary> messages = ImmutableList.of(create(hlRanges, ImmutableMap.of("_id", "h", "field_a", "1", "field_b", "b"), "graylog_0"), create(hlRanges, ImmutableMap.of("_id", "h", "field_a", "1"), "graylog_0"), create(hlRanges, ImmutableMap.of("_id", "h", "field_b", "b"), "graylog_0"), create(hlRanges, ImmutableMap.of("_id", "i", "foo", "1"), "graylog_0"));
    return SearchResponse.builder().query("foo").builtQuery("foo").usedIndices(ImmutableSet.of(indexRangeSummary)).messages(messages).fields(ImmutableSet.of("field_a", "field_b", "foo")).time(100L).totalResults(messages.size()).from(Tools.nowUTC().minusHours(1)).to(Tools.nowUTC()).build();
}
Also used : IndexRangeSummary(org.graylog2.rest.models.system.indexer.responses.IndexRangeSummary) Range(com.google.common.collect.Range) ResultMessageSummary(org.graylog2.rest.models.messages.responses.ResultMessageSummary)

Example 5 with Range

use of com.google.common.collect.Range in project drill by apache.

the class BlockMapBuilder method getEndpointByteMap.

/**
   * For a given FileWork, calculate how many bytes are available on each on drillbit endpoint
   *
   * @param work the FileWork to calculate endpoint bytes for
   * @throws IOException
   */
public EndpointByteMap getEndpointByteMap(FileWork work) throws IOException {
    Stopwatch watch = Stopwatch.createStarted();
    Path fileName = new Path(work.getPath());
    ImmutableRangeMap<Long, BlockLocation> blockMap = getBlockMap(fileName);
    EndpointByteMapImpl endpointByteMap = new EndpointByteMapImpl();
    long start = work.getStart();
    long end = start + work.getLength();
    Range<Long> rowGroupRange = Range.closedOpen(start, end);
    // Find submap of ranges that intersect with the rowGroup
    ImmutableRangeMap<Long, BlockLocation> subRangeMap = blockMap.subRangeMap(rowGroupRange);
    // Iterate through each block in this submap and get the host for the block location
    for (Map.Entry<Range<Long>, BlockLocation> block : subRangeMap.asMapOfRanges().entrySet()) {
        String[] hosts;
        Range<Long> blockRange = block.getKey();
        try {
            hosts = block.getValue().getHosts();
        } catch (IOException ioe) {
            throw new RuntimeException("Failed to get hosts for block location", ioe);
        }
        Range<Long> intersection = rowGroupRange.intersection(blockRange);
        long bytes = intersection.upperEndpoint() - intersection.lowerEndpoint();
        // For each host in the current block location, add the intersecting bytes to the corresponding endpoint
        for (String host : hosts) {
            DrillbitEndpoint endpoint = getDrillBitEndpoint(host);
            if (endpoint != null) {
                endpointByteMap.add(endpoint, bytes);
            } else {
                logger.info("Failure finding Drillbit running on host {}.  Skipping affinity to that host.", host);
            }
        }
    }
    logger.debug("FileWork group ({},{}) max bytes {}", work.getPath(), work.getStart(), endpointByteMap.getMaxBytes());
    logger.debug("Took {} ms to set endpoint bytes", watch.stop().elapsed(TimeUnit.MILLISECONDS));
    return endpointByteMap;
}
Also used : Path(org.apache.hadoop.fs.Path) Stopwatch(com.google.common.base.Stopwatch) IOException(java.io.IOException) BlockLocation(org.apache.hadoop.fs.BlockLocation) Range(com.google.common.collect.Range) DrillbitEndpoint(org.apache.drill.exec.proto.CoordinationProtos.DrillbitEndpoint) HashMap(java.util.HashMap) ImmutableRangeMap(com.google.common.collect.ImmutableRangeMap) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap)

Aggregations

Range (com.google.common.collect.Range)12 ArrayList (java.util.ArrayList)4 HashMap (java.util.HashMap)4 DateTime (org.joda.time.DateTime)4 TimeGranularity (com.linkedin.thirdeye.api.TimeGranularity)3 Map (java.util.Map)3 ImmutableMap (com.google.common.collect.ImmutableMap)2 Collection (java.util.Collection)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 Stopwatch (com.google.common.base.Stopwatch)1 ImmutableRangeMap (com.google.common.collect.ImmutableRangeMap)1 ImmutableSet (com.google.common.collect.ImmutableSet)1 LinkedListMultimap (com.google.common.collect.LinkedListMultimap)1 Multimap (com.google.common.collect.Multimap)1 RangeSet (com.google.common.collect.RangeSet)1 ActionCacheChecker (com.google.devtools.build.lib.actions.ActionCacheChecker)1 ActionExecutionStatusReporter (com.google.devtools.build.lib.actions.ActionExecutionStatusReporter)1 ActionInputFileCache (com.google.devtools.build.lib.actions.ActionInputFileCache)1 ActionLogBufferPathGenerator (com.google.devtools.build.lib.actions.ActionLogBufferPathGenerator)1 Artifact (com.google.devtools.build.lib.actions.Artifact)1