Search in sources :

Example 66 with ISE

use of org.apache.druid.java.util.common.ISE in project druid by druid-io.

the class BaseFilterTest method selectCountUsingVectorizedFilteredAggregator.

private long selectCountUsingVectorizedFilteredAggregator(final DimFilter dimFilter) {
    Preconditions.checkState(makeFilter(dimFilter).canVectorizeMatcher(adapter), "Cannot vectorize filter: %s", dimFilter);
    try (final VectorCursor cursor = makeVectorCursor(null)) {
        final FilteredAggregatorFactory aggregatorFactory = new FilteredAggregatorFactory(new CountAggregatorFactory("count"), maybeOptimize(dimFilter));
        final VectorAggregator aggregator = aggregatorFactory.factorizeVector(cursor.getColumnSelectorFactory());
        final ByteBuffer buf = ByteBuffer.allocate(aggregatorFactory.getMaxIntermediateSizeWithNulls() * 2);
        // Use two slots: one for each form of aggregate.
        aggregator.init(buf, 0);
        aggregator.init(buf, aggregatorFactory.getMaxIntermediateSizeWithNulls());
        for (; !cursor.isDone(); cursor.advance()) {
            aggregator.aggregate(buf, 0, 0, cursor.getCurrentVectorSize());
            final int[] positions = new int[cursor.getCurrentVectorSize()];
            Arrays.fill(positions, aggregatorFactory.getMaxIntermediateSizeWithNulls());
            final int[] allRows = new int[cursor.getCurrentVectorSize()];
            for (int i = 0; i < allRows.length; i++) {
                allRows[i] = i;
            }
            aggregator.aggregate(buf, cursor.getCurrentVectorSize(), positions, allRows, 0);
        }
        final long val1 = (long) aggregator.get(buf, 0);
        final long val2 = (long) aggregator.get(buf, aggregatorFactory.getMaxIntermediateSizeWithNulls());
        if (val1 != val2) {
            throw new ISE("Oh no, val1[%d] != val2[%d]", val1, val2);
        }
        return val1;
    }
}
Also used : FilteredAggregatorFactory(org.apache.druid.query.aggregation.FilteredAggregatorFactory) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) VectorAggregator(org.apache.druid.query.aggregation.VectorAggregator) ISE(org.apache.druid.java.util.common.ISE) VectorCursor(org.apache.druid.segment.vector.VectorCursor) ByteBuffer(java.nio.ByteBuffer)

Example 67 with ISE

use of org.apache.druid.java.util.common.ISE in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinator method announceHistoricalSegmentBatch.

/**
 * Attempts to insert a single segment to the database. If the segment already exists, will do nothing; although,
 * this checking is imperfect and callers must be prepared to retry their entire transaction on exceptions.
 *
 * @return DataSegment set inserted
 */
private Set<DataSegment> announceHistoricalSegmentBatch(final Handle handle, final Set<DataSegment> segments, final Set<DataSegment> usedSegments) throws IOException {
    final Set<DataSegment> toInsertSegments = new HashSet<>();
    try {
        Set<String> existedSegments = segmentExistsBatch(handle, segments);
        log.info("Found these segments already exist in DB: %s", existedSegments);
        for (DataSegment segment : segments) {
            if (!existedSegments.contains(segment.getId().toString())) {
                toInsertSegments.add(segment);
            }
        }
        // SELECT -> INSERT can fail due to races; callers must be prepared to retry.
        // Avoiding ON DUPLICATE KEY since it's not portable.
        // Avoiding try/catch since it may cause inadvertent transaction-splitting.
        final List<List<DataSegment>> partitionedSegments = Lists.partition(new ArrayList<>(toInsertSegments), MAX_NUM_SEGMENTS_TO_ANNOUNCE_AT_ONCE);
        PreparedBatch preparedBatch = handle.prepareBatch(StringUtils.format("INSERT INTO %1$s (id, dataSource, created_date, start, %2$send%2$s, partitioned, version, used, payload) " + "VALUES (:id, :dataSource, :created_date, :start, :end, :partitioned, :version, :used, :payload)", dbTables.getSegmentsTable(), connector.getQuoteString()));
        for (List<DataSegment> partition : partitionedSegments) {
            for (DataSegment segment : partition) {
                preparedBatch.add().bind("id", segment.getId().toString()).bind("dataSource", segment.getDataSource()).bind("created_date", DateTimes.nowUtc().toString()).bind("start", segment.getInterval().getStart().toString()).bind("end", segment.getInterval().getEnd().toString()).bind("partitioned", (segment.getShardSpec() instanceof NoneShardSpec) ? false : true).bind("version", segment.getVersion()).bind("used", usedSegments.contains(segment)).bind("payload", jsonMapper.writeValueAsBytes(segment));
            }
            final int[] affectedRows = preparedBatch.execute();
            final boolean succeeded = Arrays.stream(affectedRows).allMatch(eachAffectedRows -> eachAffectedRows == 1);
            if (succeeded) {
                log.infoSegments(partition, "Published segments to DB");
            } else {
                final List<DataSegment> failedToPublish = IntStream.range(0, partition.size()).filter(i -> affectedRows[i] != 1).mapToObj(partition::get).collect(Collectors.toList());
                throw new ISE("Failed to publish segments to DB: %s", SegmentUtils.commaSeparatedIdentifiers(failedToPublish));
            }
        }
    } catch (Exception e) {
        log.errorSegments(segments, "Exception inserting segments");
        throw e;
    }
    return toInsertSegments;
}
Also used : NoneShardSpec(org.apache.druid.timeline.partition.NoneShardSpec) DataSegment(org.apache.druid.timeline.DataSegment) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) IOException(java.io.IOException) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) PreparedBatch(org.skife.jdbi.v2.PreparedBatch) ISE(org.apache.druid.java.util.common.ISE) HashSet(java.util.HashSet)

Example 68 with ISE

use of org.apache.druid.java.util.common.ISE in project druid by druid-io.

the class CuratorDruidNodeDiscoveryProvider method stop.

@LifecycleStop
public void stop() throws IOException {
    if (!lifecycleLock.canStop()) {
        throw new ISE("can't stop.");
    }
    log.debug("Stopping.");
    Closer closer = Closer.create();
    closer.registerAll(nodeRoleWatchers.values());
    closer.registerAll(nodeDiscoverers);
    CloseableUtils.closeAll(closer, listenerExecutor::shutdownNow);
}
Also used : Closer(org.apache.druid.java.util.common.io.Closer) ISE(org.apache.druid.java.util.common.ISE) LifecycleStop(org.apache.druid.java.util.common.lifecycle.LifecycleStop)

Example 69 with ISE

use of org.apache.druid.java.util.common.ISE in project druid by druid-io.

the class DruidLeaderClient method findCurrentLeader.

public String findCurrentLeader() {
    Preconditions.checkState(lifecycleLock.awaitStarted(1, TimeUnit.MILLISECONDS));
    final StringFullResponseHolder responseHolder;
    try {
        responseHolder = go(makeRequest(HttpMethod.GET, leaderRequestPath));
    } catch (Exception ex) {
        throw new ISE(ex, "Couldn't find leader.");
    }
    if (responseHolder.getStatus().getCode() == 200) {
        String leaderUrl = responseHolder.getContent();
        // verify this is valid url
        try {
            URL validatedUrl = new URL(leaderUrl);
            currentKnownLeader.set(leaderUrl);
            // the rule of ignoring new URL(leaderUrl) object.
            return validatedUrl.toString();
        } catch (MalformedURLException ex) {
            log.error(ex, "Received malformed leader url[%s].", leaderUrl);
        }
    }
    throw new ISE("Couldn't find leader, failed response status is [%s] and content [%s].", responseHolder.getStatus().getCode(), responseHolder.getContent());
}
Also used : StringFullResponseHolder(org.apache.druid.java.util.http.client.response.StringFullResponseHolder) MalformedURLException(java.net.MalformedURLException) ISE(org.apache.druid.java.util.common.ISE) MalformedURLException(java.net.MalformedURLException) ChannelException(org.jboss.netty.channel.ChannelException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) URL(java.net.URL)

Example 70 with ISE

use of org.apache.druid.java.util.common.ISE in project druid by druid-io.

the class StreamAppenderator method push.

@Override
public ListenableFuture<SegmentsAndCommitMetadata> push(final Collection<SegmentIdWithShardSpec> identifiers, @Nullable final Committer committer, final boolean useUniquePath) {
    final Map<SegmentIdWithShardSpec, Sink> theSinks = new HashMap<>();
    AtomicLong pushedHydrantsCount = new AtomicLong();
    for (final SegmentIdWithShardSpec identifier : identifiers) {
        final Sink sink = sinks.get(identifier);
        if (sink == null) {
            throw new ISE("No sink for identifier: %s", identifier);
        }
        theSinks.put(identifier, sink);
        if (sink.finishWriting()) {
            totalRows.addAndGet(-sink.getNumRows());
        }
        // count hydrants for stats:
        pushedHydrantsCount.addAndGet(Iterables.size(sink));
    }
    return Futures.transform(// segments.
    persistAll(committer), (Function<Object, SegmentsAndCommitMetadata>) commitMetadata -> {
        final List<DataSegment> dataSegments = new ArrayList<>();
        log.info("Preparing to push (stats): processed rows: [%d], sinks: [%d], fireHydrants (across sinks): [%d]", rowIngestionMeters.getProcessed(), theSinks.size(), pushedHydrantsCount.get());
        log.debug("Building and pushing segments: %s", theSinks.keySet().stream().map(SegmentIdWithShardSpec::toString).collect(Collectors.joining(", ")));
        for (Map.Entry<SegmentIdWithShardSpec, Sink> entry : theSinks.entrySet()) {
            if (droppingSinks.contains(entry.getKey())) {
                log.warn("Skipping push of currently-dropping sink[%s]", entry.getKey());
                continue;
            }
            final DataSegment dataSegment = mergeAndPush(entry.getKey(), entry.getValue(), useUniquePath);
            if (dataSegment != null) {
                dataSegments.add(dataSegment);
            } else {
                log.warn("mergeAndPush[%s] returned null, skipping.", entry.getKey());
            }
        }
        log.info("Push complete...");
        return new SegmentsAndCommitMetadata(dataSegments, commitMetadata);
    }, pushExecutor);
}
Also used : DataSegmentAnnouncer(org.apache.druid.server.coordination.DataSegmentAnnouncer) Arrays(java.util.Arrays) FireDepartmentMetrics(org.apache.druid.segment.realtime.FireDepartmentMetrics) Pair(org.apache.druid.java.util.common.Pair) FileLock(java.nio.channels.FileLock) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) QueryRunner(org.apache.druid.query.QueryRunner) IAE(org.apache.druid.java.util.common.IAE) FileUtils(org.apache.druid.java.util.common.FileUtils) DateTimes(org.apache.druid.java.util.common.DateTimes) Function(com.google.common.base.Function) Execs(org.apache.druid.java.util.common.concurrent.Execs) Closer(org.apache.druid.java.util.common.io.Closer) Collection(java.util.Collection) QueryableIndex(org.apache.druid.segment.QueryableIndex) StandardOpenOption(java.nio.file.StandardOpenOption) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StringUtils(org.apache.druid.java.util.common.StringUtils) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) InputRow(org.apache.druid.data.input.InputRow) IndexSizeExceededException(org.apache.druid.segment.incremental.IndexSizeExceededException) List(java.util.List) DataSegment(org.apache.druid.timeline.DataSegment) MutableLong(org.apache.commons.lang.mutable.MutableLong) QueryableIndexSegment(org.apache.druid.segment.QueryableIndexSegment) Joiner(com.google.common.base.Joiner) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) DataSegmentPusher(org.apache.druid.segment.loading.DataSegmentPusher) Iterables(com.google.common.collect.Iterables) MoreExecutors(com.google.common.util.concurrent.MoreExecutors) ParseExceptionHandler(org.apache.druid.segment.incremental.ParseExceptionHandler) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) Stopwatch(com.google.common.base.Stopwatch) Supplier(com.google.common.base.Supplier) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) Callable(java.util.concurrent.Callable) RowIngestionMeters(org.apache.druid.segment.incremental.RowIngestionMeters) ArrayList(java.util.ArrayList) ConcurrentMap(java.util.concurrent.ConcurrentMap) BaseProgressIndicator(org.apache.druid.segment.BaseProgressIndicator) Interval(org.joda.time.Interval) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) Query(org.apache.druid.query.Query) Sink(org.apache.druid.segment.realtime.plumber.Sink) RetryUtils(org.apache.druid.java.util.common.RetryUtils) QuerySegmentWalker(org.apache.druid.query.QuerySegmentWalker) Nullable(javax.annotation.Nullable) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) ReentrantLock(java.util.concurrent.locks.ReentrantLock) RE(org.apache.druid.java.util.common.RE) IndexMerger(org.apache.druid.segment.IndexMerger) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) FireHydrant(org.apache.druid.segment.realtime.FireHydrant) IOException(java.io.IOException) Ints(com.google.common.primitives.Ints) ReferenceCountingSegment(org.apache.druid.segment.ReferenceCountingSegment) FutureCallback(com.google.common.util.concurrent.FutureCallback) File(java.io.File) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) AtomicLong(java.util.concurrent.atomic.AtomicLong) Futures(com.google.common.util.concurrent.Futures) Lock(java.util.concurrent.locks.Lock) Closeable(java.io.Closeable) Committer(org.apache.druid.data.input.Committer) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) SegmentDescriptor(org.apache.druid.query.SegmentDescriptor) Cache(org.apache.druid.client.cache.Cache) IndexIO(org.apache.druid.segment.IndexIO) IncrementalIndexAddResult(org.apache.druid.segment.incremental.IncrementalIndexAddResult) DataSchema(org.apache.druid.segment.indexing.DataSchema) FileChannel(java.nio.channels.FileChannel) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) DataSegment(org.apache.druid.timeline.DataSegment) AtomicLong(java.util.concurrent.atomic.AtomicLong) Sink(org.apache.druid.segment.realtime.plumber.Sink) ISE(org.apache.druid.java.util.common.ISE) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList)

Aggregations

ISE (org.apache.druid.java.util.common.ISE)354 IOException (java.io.IOException)95 ArrayList (java.util.ArrayList)90 Map (java.util.Map)68 List (java.util.List)60 File (java.io.File)48 Interval (org.joda.time.Interval)48 DataSegment (org.apache.druid.timeline.DataSegment)44 HashMap (java.util.HashMap)43 Nullable (javax.annotation.Nullable)43 URL (java.net.URL)36 StatusResponseHolder (org.apache.druid.java.util.http.client.response.StatusResponseHolder)33 Request (org.apache.druid.java.util.http.client.Request)30 ExecutionException (java.util.concurrent.ExecutionException)29 ImmutableMap (com.google.common.collect.ImmutableMap)28 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)28 VisibleForTesting (com.google.common.annotations.VisibleForTesting)27 Collectors (java.util.stream.Collectors)27 IAE (org.apache.druid.java.util.common.IAE)27 ImmutableList (com.google.common.collect.ImmutableList)26