Search in sources :

Example 41 with ISE

use of org.apache.druid.java.util.common.ISE in project druid by druid-io.

the class TestHttpClient method go.

@Override
public <Intermediate, Final> ListenableFuture<Final> go(Request request, HttpResponseHandler<Intermediate, Final> handler, Duration readTimeout) {
    try {
        final Query query = objectMapper.readValue(request.getContent().array(), Query.class);
        final QueryRunner queryRunner = servers.get(request.getUrl()).getQueryRunner();
        if (queryRunner == null) {
            throw new ISE("Can't find queryRunner for url[%s]", request.getUrl());
        }
        final ResponseContext responseContext = ResponseContext.createEmpty();
        final Sequence sequence = queryRunner.run(QueryPlus.wrap(query), responseContext);
        final byte[] serializedContent;
        try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
            objectMapper.writeValue(baos, sequence);
            serializedContent = baos.toByteArray();
        }
        final ResponseContext.SerializationResult serializationResult = responseContext.serializeWith(objectMapper, RESPONSE_CTX_HEADER_LEN_LIMIT);
        final HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
        response.headers().add(QueryResource.HEADER_RESPONSE_CONTEXT, serializationResult.getResult());
        response.setContent(HeapChannelBufferFactory.getInstance().getBuffer(serializedContent, 0, serializedContent.length));
        final ClientResponse<Intermediate> intermClientResponse = handler.handleResponse(response, NOOP_TRAFFIC_COP);
        final ClientResponse<Final> finalClientResponse = handler.done(intermClientResponse);
        return Futures.immediateFuture(finalClientResponse.getObj());
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}
Also used : Query(org.apache.druid.query.Query) DefaultHttpResponse(org.jboss.netty.handler.codec.http.DefaultHttpResponse) HttpResponse(org.jboss.netty.handler.codec.http.HttpResponse) Sequence(org.apache.druid.java.util.common.guava.Sequence) ByteArrayOutputStream(java.io.ByteArrayOutputStream) IOException(java.io.IOException) QueryRunner(org.apache.druid.query.QueryRunner) ReportTimelineMissingSegmentQueryRunner(org.apache.druid.query.ReportTimelineMissingSegmentQueryRunner) DefaultHttpResponse(org.jboss.netty.handler.codec.http.DefaultHttpResponse) ResponseContext(org.apache.druid.query.context.ResponseContext) ISE(org.apache.druid.java.util.common.ISE)

Example 42 with ISE

use of org.apache.druid.java.util.common.ISE in project druid by druid-io.

the class BytesBoundedLinkedQueueTest method testPoll.

@Test
public void testPoll() throws InterruptedException {
    final BlockingQueue q = getQueue(10);
    long startTime = System.nanoTime();
    Assert.assertNull(q.poll(delayMS, TimeUnit.MILLISECONDS));
    Assert.assertTrue(TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime) >= delayMS);
    TestObject obj = new TestObject(2);
    Assert.assertTrue(q.offer(obj, delayMS, TimeUnit.MILLISECONDS));
    Assert.assertSame(obj, q.poll(delayMS, TimeUnit.MILLISECONDS));
    Thread.currentThread().interrupt();
    try {
        q.poll(delayMS, TimeUnit.MILLISECONDS);
        throw new ISE("FAIL");
    } catch (InterruptedException success) {
    }
    Assert.assertFalse(Thread.interrupted());
}
Also used : BlockingQueue(java.util.concurrent.BlockingQueue) ISE(org.apache.druid.java.util.common.ISE) Test(org.junit.Test)

Example 43 with ISE

use of org.apache.druid.java.util.common.ISE in project druid by druid-io.

the class DruidQuery method computeSorting.

@Nonnull
private static Sorting computeSorting(final PartialDruidQuery partialQuery, final PlannerContext plannerContext, final RowSignature rowSignature, @Nullable final VirtualColumnRegistry virtualColumnRegistry) {
    final Sort sort = Preconditions.checkNotNull(partialQuery.getSort(), "sort");
    final Project sortProject = partialQuery.getSortProject();
    // Extract limit and offset.
    final OffsetLimit offsetLimit = OffsetLimit.fromSort(sort);
    // Extract orderBy column specs.
    final List<OrderByColumnSpec> orderBys = new ArrayList<>(sort.getChildExps().size());
    for (int sortKey = 0; sortKey < sort.getChildExps().size(); sortKey++) {
        final RexNode sortExpression = sort.getChildExps().get(sortKey);
        final RelFieldCollation collation = sort.getCollation().getFieldCollations().get(sortKey);
        final OrderByColumnSpec.Direction direction;
        final StringComparator comparator;
        if (collation.getDirection() == RelFieldCollation.Direction.ASCENDING) {
            direction = OrderByColumnSpec.Direction.ASCENDING;
        } else if (collation.getDirection() == RelFieldCollation.Direction.DESCENDING) {
            direction = OrderByColumnSpec.Direction.DESCENDING;
        } else {
            throw new ISE("Don't know what to do with direction[%s]", collation.getDirection());
        }
        final SqlTypeName sortExpressionType = sortExpression.getType().getSqlTypeName();
        if (SqlTypeName.NUMERIC_TYPES.contains(sortExpressionType) || SqlTypeName.TIMESTAMP == sortExpressionType || SqlTypeName.DATE == sortExpressionType) {
            comparator = StringComparators.NUMERIC;
        } else {
            comparator = StringComparators.LEXICOGRAPHIC;
        }
        if (sortExpression.isA(SqlKind.INPUT_REF)) {
            final RexInputRef ref = (RexInputRef) sortExpression;
            final String fieldName = rowSignature.getColumnName(ref.getIndex());
            orderBys.add(new OrderByColumnSpec(fieldName, direction, comparator));
        } else {
            // We don't support sorting by anything other than refs which actually appear in the query result.
            throw new CannotBuildQueryException(sort, sortExpression);
        }
    }
    // Extract any post-sort Projection.
    final Projection projection;
    if (sortProject == null) {
        projection = null;
    } else if (partialQuery.getAggregate() == null) {
        if (virtualColumnRegistry == null) {
            throw new ISE("Must provide 'virtualColumnRegistry' for pre-aggregation Projection!");
        }
        projection = Projection.preAggregation(sortProject, plannerContext, rowSignature, virtualColumnRegistry);
    } else {
        projection = Projection.postAggregation(sortProject, plannerContext, rowSignature, "s");
    }
    return Sorting.create(orderBys, offsetLimit, projection);
}
Also used : OffsetLimit(org.apache.druid.sql.calcite.planner.OffsetLimit) SqlTypeName(org.apache.calcite.sql.type.SqlTypeName) IntArrayList(it.unimi.dsi.fastutil.ints.IntArrayList) ArrayList(java.util.ArrayList) StringComparator(org.apache.druid.query.ordering.StringComparator) OrderByColumnSpec(org.apache.druid.query.groupby.orderby.OrderByColumnSpec) Project(org.apache.calcite.rel.core.Project) RelFieldCollation(org.apache.calcite.rel.RelFieldCollation) Sort(org.apache.calcite.rel.core.Sort) RexInputRef(org.apache.calcite.rex.RexInputRef) ISE(org.apache.druid.java.util.common.ISE) RexNode(org.apache.calcite.rex.RexNode) Nonnull(javax.annotation.Nonnull)

Example 44 with ISE

use of org.apache.druid.java.util.common.ISE in project druid by druid-io.

the class DruidSchema method refreshSegmentsForDataSource.

/**
 * Attempt to refresh "segmentSignatures" for a set of segments for a particular dataSource. Returns the set of
 * segments actually refreshed, which may be a subset of the asked-for set.
 */
private Set<SegmentId> refreshSegmentsForDataSource(final String dataSource, final Set<SegmentId> segments) throws IOException {
    if (!segments.stream().allMatch(segmentId -> segmentId.getDataSource().equals(dataSource))) {
        // Sanity check. We definitely expect this to pass.
        throw new ISE("'segments' must all match 'dataSource'!");
    }
    log.debug("Refreshing metadata for dataSource[%s].", dataSource);
    final long startTime = System.currentTimeMillis();
    // Segment id string -> SegmentId object.
    final Map<String, SegmentId> segmentIdMap = Maps.uniqueIndex(segments, SegmentId::toString);
    final Set<SegmentId> retVal = new HashSet<>();
    final Sequence<SegmentAnalysis> sequence = runSegmentMetadataQuery(Iterables.limit(segments, MAX_SEGMENTS_PER_QUERY));
    Yielder<SegmentAnalysis> yielder = Yielders.each(sequence);
    try {
        while (!yielder.isDone()) {
            final SegmentAnalysis analysis = yielder.get();
            final SegmentId segmentId = segmentIdMap.get(analysis.getId());
            if (segmentId == null) {
                log.warn("Got analysis for segment[%s] we didn't ask for, ignoring.", analysis.getId());
            } else {
                final RowSignature rowSignature = analysisToRowSignature(analysis);
                log.debug("Segment[%s] has signature[%s].", segmentId, rowSignature);
                segmentMetadataInfo.compute(dataSource, (datasourceKey, dataSourceSegments) -> {
                    if (dataSourceSegments == null) {
                        // Datasource may have been removed or become unavailable while this refresh was ongoing.
                        log.warn("No segment map found with datasource[%s], skipping refresh of segment[%s]", datasourceKey, segmentId);
                        return null;
                    } else {
                        dataSourceSegments.compute(segmentId, (segmentIdKey, segmentMetadata) -> {
                            if (segmentMetadata == null) {
                                log.warn("No segment[%s] found, skipping refresh", segmentId);
                                return null;
                            } else {
                                final AvailableSegmentMetadata updatedSegmentMetadata = AvailableSegmentMetadata.from(segmentMetadata).withRowSignature(rowSignature).withNumRows(analysis.getNumRows()).build();
                                retVal.add(segmentId);
                                return updatedSegmentMetadata;
                            }
                        });
                        if (dataSourceSegments.isEmpty()) {
                            return null;
                        } else {
                            return dataSourceSegments;
                        }
                    }
                });
            }
            yielder = yielder.next(null);
        }
    } finally {
        yielder.close();
    }
    log.debug("Refreshed metadata for dataSource[%s] in %,d ms (%d segments queried, %d segments left).", dataSource, System.currentTimeMillis() - startTime, retVal.size(), segments.size() - retVal.size());
    return retVal;
}
Also used : SegmentManager(org.apache.druid.server.SegmentManager) Inject(com.google.inject.Inject) LifecycleStart(org.apache.druid.java.util.common.lifecycle.LifecycleStart) AllColumnIncluderator(org.apache.druid.query.metadata.metadata.AllColumnIncluderator) FluentIterable(com.google.common.collect.FluentIterable) Map(java.util.Map) ServerType(org.apache.druid.server.coordination.ServerType) ManageLifecycle(org.apache.druid.guice.ManageLifecycle) TimelineServerView(org.apache.druid.client.TimelineServerView) EnumSet(java.util.EnumSet) DateTimes(org.apache.druid.java.util.common.DateTimes) Sequence(org.apache.druid.java.util.common.guava.Sequence) ImmutableSet(com.google.common.collect.ImmutableSet) Execs(org.apache.druid.java.util.common.concurrent.Execs) ImmutableMap(com.google.common.collect.ImmutableMap) GuardedBy(com.google.errorprone.annotations.concurrent.GuardedBy) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Table(org.apache.calcite.schema.Table) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) Interner(com.google.common.collect.Interner) PlannerConfig(org.apache.druid.sql.calcite.planner.PlannerConfig) Collectors(java.util.stream.Collectors) Sets(com.google.common.collect.Sets) CountDownLatch(java.util.concurrent.CountDownLatch) SegmentMetadataQuery(org.apache.druid.query.metadata.metadata.SegmentMetadataQuery) BrokerInternalQueryConfig(org.apache.druid.client.BrokerInternalQueryConfig) DataSegment(org.apache.druid.timeline.DataSegment) Optional(java.util.Optional) SegmentId(org.apache.druid.timeline.SegmentId) QueryLifecycleFactory(org.apache.druid.server.QueryLifecycleFactory) Iterables(com.google.common.collect.Iterables) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) JoinableFactory(org.apache.druid.segment.join.JoinableFactory) Function(java.util.function.Function) TreeSet(java.util.TreeSet) ConcurrentMap(java.util.concurrent.ConcurrentMap) ServerView(org.apache.druid.client.ServerView) Yielders(org.apache.druid.java.util.common.guava.Yielders) MultipleSpecificSegmentSpec(org.apache.druid.query.spec.MultipleSpecificSegmentSpec) HashSet(java.util.HashSet) LifecycleStop(org.apache.druid.java.util.common.lifecycle.LifecycleStop) DruidServerMetadata(org.apache.druid.server.coordination.DruidServerMetadata) SegmentAnalysis(org.apache.druid.query.metadata.metadata.SegmentAnalysis) Predicates(com.google.common.base.Predicates) AbstractSchema(org.apache.calcite.schema.impl.AbstractSchema) StreamSupport(java.util.stream.StreamSupport) Yielder(org.apache.druid.java.util.common.guava.Yielder) ExecutorService(java.util.concurrent.ExecutorService) Access(org.apache.druid.server.security.Access) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) Interners(com.google.common.collect.Interners) IOException(java.io.IOException) Maps(com.google.common.collect.Maps) TableDataSource(org.apache.druid.query.TableDataSource) ColumnAnalysis(org.apache.druid.query.metadata.metadata.ColumnAnalysis) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap) TreeMap(java.util.TreeMap) DruidTable(org.apache.druid.sql.calcite.table.DruidTable) RowSignature(org.apache.druid.segment.column.RowSignature) ColumnType(org.apache.druid.segment.column.ColumnType) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Escalator(org.apache.druid.server.security.Escalator) Comparator(java.util.Comparator) SegmentId(org.apache.druid.timeline.SegmentId) ISE(org.apache.druid.java.util.common.ISE) SegmentAnalysis(org.apache.druid.query.metadata.metadata.SegmentAnalysis) RowSignature(org.apache.druid.segment.column.RowSignature) HashSet(java.util.HashSet)

Example 45 with ISE

use of org.apache.druid.java.util.common.ISE in project druid by druid-io.

the class DruidSchema method buildDruidTable.

@VisibleForTesting
DruidTable buildDruidTable(final String dataSource) {
    ConcurrentSkipListMap<SegmentId, AvailableSegmentMetadata> segmentsMap = segmentMetadataInfo.get(dataSource);
    final Map<String, ColumnType> columnTypes = new TreeMap<>();
    if (segmentsMap != null) {
        for (AvailableSegmentMetadata availableSegmentMetadata : segmentsMap.values()) {
            final RowSignature rowSignature = availableSegmentMetadata.getRowSignature();
            if (rowSignature != null) {
                for (String column : rowSignature.getColumnNames()) {
                    // Newer column types should override older ones.
                    final ColumnType columnType = rowSignature.getColumnType(column).orElseThrow(() -> new ISE("Encountered null type for column[%s]", column));
                    columnTypes.putIfAbsent(column, columnType);
                }
            }
        }
    }
    final RowSignature.Builder builder = RowSignature.builder();
    columnTypes.forEach(builder::add);
    final TableDataSource tableDataSource;
    // to be a GlobalTableDataSource instead of a TableDataSource, it must appear on all servers (inferred by existing
    // in the segment cache, which in this case belongs to the broker meaning only broadcast segments live here)
    // to be joinable, it must be possibly joinable according to the factory. we only consider broadcast datasources
    // at this time, and isGlobal is currently strongly coupled with joinable, so only make a global table datasource
    // if also joinable
    final GlobalTableDataSource maybeGlobal = new GlobalTableDataSource(dataSource);
    final boolean isJoinable = joinableFactory.isDirectlyJoinable(maybeGlobal);
    final boolean isBroadcast = segmentManager.getDataSourceNames().contains(dataSource);
    if (isBroadcast && isJoinable) {
        tableDataSource = maybeGlobal;
    } else {
        tableDataSource = new TableDataSource(dataSource);
    }
    return new DruidTable(tableDataSource, builder.build(), null, isJoinable, isBroadcast);
}
Also used : ColumnType(org.apache.druid.segment.column.ColumnType) SegmentId(org.apache.druid.timeline.SegmentId) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) DruidTable(org.apache.druid.sql.calcite.table.DruidTable) TreeMap(java.util.TreeMap) GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) TableDataSource(org.apache.druid.query.TableDataSource) ISE(org.apache.druid.java.util.common.ISE) RowSignature(org.apache.druid.segment.column.RowSignature) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Aggregations

ISE (org.apache.druid.java.util.common.ISE)354 IOException (java.io.IOException)95 ArrayList (java.util.ArrayList)90 Map (java.util.Map)68 List (java.util.List)60 File (java.io.File)48 Interval (org.joda.time.Interval)48 DataSegment (org.apache.druid.timeline.DataSegment)44 HashMap (java.util.HashMap)43 Nullable (javax.annotation.Nullable)43 URL (java.net.URL)36 StatusResponseHolder (org.apache.druid.java.util.http.client.response.StatusResponseHolder)33 Request (org.apache.druid.java.util.http.client.Request)30 ExecutionException (java.util.concurrent.ExecutionException)29 ImmutableMap (com.google.common.collect.ImmutableMap)28 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)28 VisibleForTesting (com.google.common.annotations.VisibleForTesting)27 Collectors (java.util.stream.Collectors)27 IAE (org.apache.druid.java.util.common.IAE)27 ImmutableList (com.google.common.collect.ImmutableList)26