Search in sources :

Example 41 with DataContext

use of org.apache.calcite.DataContext in project druid by apache.

the class DruidPlanner method planWithBindableConvention.

/**
 * Construct a {@link PlannerResult} for a fall-back 'bindable' rel, for things that are not directly translatable
 * to native Druid queries such as system tables and just a general purpose (but definitely not optimized) fall-back.
 *
 * See {@link #planWithDruidConvention} which will handle things which are directly translatable
 * to native Druid queries.
 */
private PlannerResult planWithBindableConvention(final RelRoot root, @Nullable final SqlExplain explain) throws RelConversionException {
    BindableRel bindableRel = (BindableRel) planner.transform(Rules.BINDABLE_CONVENTION_RULES, planner.getEmptyTraitSet().replace(BindableConvention.INSTANCE).plus(root.collation), root.rel);
    if (!root.isRefTrivial()) {
        // Add a projection on top to accommodate root.fields.
        final List<RexNode> projects = new ArrayList<>();
        final RexBuilder rexBuilder = bindableRel.getCluster().getRexBuilder();
        for (int field : Pair.left(root.fields)) {
            projects.add(rexBuilder.makeInputRef(bindableRel, field));
        }
        bindableRel = new Bindables.BindableProject(bindableRel.getCluster(), bindableRel.getTraitSet(), bindableRel, projects, root.validatedRowType);
    }
    if (explain != null) {
        return planExplanation(bindableRel, explain, false);
    } else {
        final BindableRel theRel = bindableRel;
        final DataContext dataContext = plannerContext.createDataContext((JavaTypeFactory) planner.getTypeFactory(), plannerContext.getParameters());
        final Supplier<Sequence<Object[]>> resultsSupplier = () -> {
            final Enumerable<?> enumerable = theRel.bind(dataContext);
            final Enumerator<?> enumerator = enumerable.enumerator();
            return Sequences.withBaggage(new BaseSequence<>(new BaseSequence.IteratorMaker<Object[], EnumeratorIterator<Object[]>>() {

                @Override
                public EnumeratorIterator<Object[]> make() {
                    return new EnumeratorIterator<>(new Iterator<Object[]>() {

                        @Override
                        public boolean hasNext() {
                            return enumerator.moveNext();
                        }

                        @Override
                        public Object[] next() {
                            return (Object[]) enumerator.current();
                        }
                    });
                }

                @Override
                public void cleanup(EnumeratorIterator<Object[]> iterFromMake) {
                }
            }), enumerator::close);
        };
        return new PlannerResult(resultsSupplier, root.validatedRowType);
    }
}
Also used : ArrayList(java.util.ArrayList) BindableRel(org.apache.calcite.interpreter.BindableRel) Bindables(org.apache.calcite.interpreter.Bindables) Sequence(org.apache.druid.java.util.common.guava.Sequence) BaseSequence(org.apache.druid.java.util.common.guava.BaseSequence) BaseSequence(org.apache.druid.java.util.common.guava.BaseSequence) DataContext(org.apache.calcite.DataContext) Enumerator(org.apache.calcite.linq4j.Enumerator) Iterator(java.util.Iterator) RexBuilder(org.apache.calcite.rex.RexBuilder) Enumerable(org.apache.calcite.linq4j.Enumerable) RexNode(org.apache.calcite.rex.RexNode)

Example 42 with DataContext

use of org.apache.calcite.DataContext in project druid by apache.

the class PlannerContext method createDataContext.

public DataContext createDataContext(final JavaTypeFactory typeFactory, List<TypedValue> parameters) {
    class DruidDataContext implements DataContext {

        private final Map<String, Object> base_context = ImmutableMap.of(DataContext.Variable.UTC_TIMESTAMP.camelName, localNow.getMillis(), DataContext.Variable.CURRENT_TIMESTAMP.camelName, localNow.getMillis(), DataContext.Variable.LOCAL_TIMESTAMP.camelName, new Interval(new DateTime("1970-01-01T00:00:00.000", localNow.getZone()), localNow).toDurationMillis(), DataContext.Variable.TIME_ZONE.camelName, localNow.getZone().toTimeZone().clone());

        private final Map<String, Object> context;

        DruidDataContext() {
            ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder();
            builder.putAll(base_context);
            int i = 0;
            for (TypedValue parameter : parameters) {
                builder.put("?" + i, parameter.value);
                i++;
            }
            if (authenticationResult != null) {
                builder.put(DATA_CTX_AUTHENTICATION_RESULT, authenticationResult);
            }
            context = builder.build();
        }

        @Override
        public SchemaPlus getRootSchema() {
            throw new UnsupportedOperationException();
        }

        @Override
        public JavaTypeFactory getTypeFactory() {
            return typeFactory;
        }

        @Override
        public QueryProvider getQueryProvider() {
            throw new UnsupportedOperationException();
        }

        @Override
        public Object get(final String name) {
            return context.get(name);
        }
    }
    return new DruidDataContext();
}
Also used : DataContext(org.apache.calcite.DataContext) HashMap(java.util.HashMap) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) DateTime(org.joda.time.DateTime) ImmutableMap(com.google.common.collect.ImmutableMap) Interval(org.joda.time.Interval) TypedValue(org.apache.calcite.avatica.remote.TypedValue)

Example 43 with DataContext

use of org.apache.calcite.DataContext in project druid by apache.

the class SystemSchemaTest method testSegmentsTable.

@Test
public void testSegmentsTable() throws Exception {
    final SegmentsTable segmentsTable = new SegmentsTable(druidSchema, metadataView, new ObjectMapper(), authMapper);
    final Set<SegmentWithOvershadowedStatus> publishedSegments = new HashSet<>(Arrays.asList(new SegmentWithOvershadowedStatus(publishedCompactedSegment1, true), new SegmentWithOvershadowedStatus(publishedCompactedSegment2, false), new SegmentWithOvershadowedStatus(publishedUncompactedSegment3, false), new SegmentWithOvershadowedStatus(segment1, true), new SegmentWithOvershadowedStatus(segment2, false)));
    EasyMock.expect(metadataView.getPublishedSegments()).andReturn(publishedSegments.iterator()).once();
    EasyMock.replay(client, request, responseHolder, responseHandler, metadataView);
    DataContext dataContext = createDataContext(Users.SUPER);
    final List<Object[]> rows = segmentsTable.scan(dataContext).toList();
    rows.sort((Object[] row1, Object[] row2) -> ((Comparable) row1[0]).compareTo(row2[0]));
    // total segments = 8
    // segments test1, test2  are published and available
    // segment test3 is served by historical but unpublished or unused
    // segments test4, test5 are not published but available (realtime segments)
    // segment test2 is both published and served by a realtime server.
    Assert.assertEquals(8, rows.size());
    verifyRow(rows.get(0), "test1_2010-01-01T00:00:00.000Z_2011-01-01T00:00:00.000Z_version1", 100L, // partition_num
    0L, // num_replicas
    1L, // numRows
    3L, // is_published
    1L, // is_available
    1L, // is_realtime
    0L, // is_overshadowed
    1L, // is_compacted
    null);
    verifyRow(rows.get(1), "test2_2011-01-01T00:00:00.000Z_2012-01-01T00:00:00.000Z_version2", 100L, // partition_num
    0L, // x§segment test2 is served by historical and realtime servers
    2L, // numRows
    3L, // is_published
    1L, // is_available
    1L, // is_realtime
    0L, // is_overshadowed,
    0L, // is_compacted
    null);
    // segment test3 is unpublished and has a NumberedShardSpec with partitionNum = 2
    verifyRow(rows.get(2), "test3_2012-01-01T00:00:00.000Z_2013-01-01T00:00:00.000Z_version3_2", 100L, // partition_num
    2L, // num_replicas
    1L, // numRows
    2L, // is_published
    0L, // is_available
    1L, // is_realtime
    0L, // is_overshadowed
    0L, // is_compacted
    null);
    verifyRow(rows.get(3), "test4_2014-01-01T00:00:00.000Z_2015-01-01T00:00:00.000Z_version4", 100L, // partition_num
    0L, // num_replicas
    1L, // numRows
    0L, // is_published
    0L, // is_available
    1L, // is_realtime
    1L, // is_overshadowed
    0L, // is_compacted
    null);
    verifyRow(rows.get(4), "test5_2015-01-01T00:00:00.000Z_2016-01-01T00:00:00.000Z_version5", 100L, // partition_num
    0L, // num_replicas
    1L, // numRows
    0L, // is_published
    0L, // is_available
    1L, // is_realtime
    1L, // is_overshadowed
    0L, // is_compacted
    null);
    // wikipedia segments are published and unavailable, num_replicas is 0
    // wikipedia segment 1 and 2 are compacted while 3 are not compacted
    verifyRow(rows.get(5), "wikipedia1_2007-01-01T00:00:00.000Z_2008-01-01T00:00:00.000Z_version1", 53000L, // partition_num
    0L, // num_replicas
    0L, // numRows
    0L, // is_published
    1L, // is_available
    0L, // is_realtime
    0L, // is_overshadowed
    1L, // is_compacted
    expectedCompactionState);
    verifyRow(rows.get(6), "wikipedia2_2008-01-01T00:00:00.000Z_2009-01-01T00:00:00.000Z_version2", 83000L, // partition_num
    0L, // num_replicas
    0L, // numRows
    0L, // is_published
    1L, // is_available
    0L, // is_realtime
    0L, // is_overshadowed
    0L, // is_compacted
    expectedCompactionState);
    verifyRow(rows.get(7), "wikipedia3_2009-01-01T00:00:00.000Z_2010-01-01T00:00:00.000Z_version3", 47000L, // partition_num
    0L, // num_replicas
    0L, // numRows
    0L, // is_published
    1L, // is_available
    0L, // is_realtime
    0L, // is_overshadowed
    0L, // is_compacted
    null);
    // Verify value types.
    verifyTypes(rows, SystemSchema.SEGMENTS_SIGNATURE);
}
Also used : DataContext(org.apache.calcite.DataContext) SegmentsTable(org.apache.druid.sql.calcite.schema.SystemSchema.SegmentsTable) SegmentWithOvershadowedStatus(org.apache.druid.timeline.SegmentWithOvershadowedStatus) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 44 with DataContext

use of org.apache.calcite.DataContext in project druid by apache.

the class SystemSchemaTest method testSupervisorTable.

@Test
public void testSupervisorTable() throws Exception {
    SystemSchema.SupervisorsTable supervisorTable = EasyMock.createMockBuilder(SystemSchema.SupervisorsTable.class).withConstructor(client, mapper, authMapper).createMock();
    EasyMock.replay(supervisorTable);
    EasyMock.expect(client.makeRequest(HttpMethod.GET, "/druid/indexer/v1/supervisor?system")).andReturn(request).anyTimes();
    HttpResponse httpResp = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
    InputStreamFullResponseHolder responseHolder = new InputStreamFullResponseHolder(httpResp);
    EasyMock.expect(client.go(EasyMock.eq(request), EasyMock.anyObject(InputStreamFullResponseHandler.class))).andReturn(responseHolder).once();
    EasyMock.expect(responseHandler.getStatus()).andReturn(httpResp.getStatus().getCode()).anyTimes();
    EasyMock.expect(request.getUrl()).andReturn(new URL("http://test-host:1234/druid/indexer/v1/supervisor?system")).anyTimes();
    String json = "[{\n" + "\t\"id\": \"wikipedia\",\n" + "\t\"state\": \"UNHEALTHY_SUPERVISOR\",\n" + "\t\"detailedState\": \"UNABLE_TO_CONNECT_TO_STREAM\",\n" + "\t\"healthy\": false,\n" + "\t\"specString\": \"{\\\"type\\\":\\\"kafka\\\",\\\"dataSchema\\\":{\\\"dataSource\\\":\\\"wikipedia\\\"}" + ",\\\"context\\\":null,\\\"suspended\\\":false}\",\n" + "\t\"type\": \"kafka\",\n" + "\t\"source\": \"wikipedia\",\n" + "\t\"suspended\": false\n" + "}]";
    byte[] bytesToWrite = json.getBytes(StandardCharsets.UTF_8);
    responseHolder.addChunk(bytesToWrite);
    responseHolder.done();
    EasyMock.replay(client, request, responseHandler);
    DataContext dataContext = createDataContext(Users.SUPER);
    final List<Object[]> rows = supervisorTable.scan(dataContext).toList();
    Object[] row0 = rows.get(0);
    Assert.assertEquals("wikipedia", row0[0].toString());
    Assert.assertEquals("UNHEALTHY_SUPERVISOR", row0[1].toString());
    Assert.assertEquals("UNABLE_TO_CONNECT_TO_STREAM", row0[2].toString());
    Assert.assertEquals(0L, row0[3]);
    Assert.assertEquals("kafka", row0[4].toString());
    Assert.assertEquals("wikipedia", row0[5].toString());
    Assert.assertEquals(0L, row0[6]);
    Assert.assertEquals("{\"type\":\"kafka\",\"dataSchema\":{\"dataSource\":\"wikipedia\"},\"context\":null,\"suspended\":false}", row0[7].toString());
    // Verify value types.
    verifyTypes(rows, SystemSchema.SUPERVISOR_SIGNATURE);
}
Also used : DataContext(org.apache.calcite.DataContext) InputStreamFullResponseHolder(org.apache.druid.java.util.http.client.response.InputStreamFullResponseHolder) DefaultHttpResponse(org.jboss.netty.handler.codec.http.DefaultHttpResponse) DefaultHttpResponse(org.jboss.netty.handler.codec.http.DefaultHttpResponse) HttpResponse(org.jboss.netty.handler.codec.http.HttpResponse) URL(java.net.URL) Test(org.junit.Test)

Example 45 with DataContext

use of org.apache.calcite.DataContext in project druid by apache.

the class SystemSchemaTest method testServersTable.

@Test
public void testServersTable() {
    SystemSchema.ServersTable serversTable = EasyMock.createMockBuilder(SystemSchema.ServersTable.class).withConstructor(druidNodeDiscoveryProvider, serverInventoryView, authMapper, overlordClient, coordinatorClient).createMock();
    EasyMock.replay(serversTable);
    final DruidNodeDiscovery coordinatorNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class);
    final DruidNodeDiscovery overlordNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class);
    final DruidNodeDiscovery brokerNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class);
    final DruidNodeDiscovery routerNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class);
    final DruidNodeDiscovery historicalNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class);
    final DruidNodeDiscovery mmNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class);
    final DruidNodeDiscovery peonNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class);
    final DruidNodeDiscovery indexerNodeDiscovery = EasyMock.createMock(DruidNodeDiscovery.class);
    EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.COORDINATOR)).andReturn(coordinatorNodeDiscovery).once();
    EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.OVERLORD)).andReturn(overlordNodeDiscovery).once();
    EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.BROKER)).andReturn(brokerNodeDiscovery).once();
    EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.ROUTER)).andReturn(routerNodeDiscovery).once();
    EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.HISTORICAL)).andReturn(historicalNodeDiscovery).once();
    EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.MIDDLE_MANAGER)).andReturn(mmNodeDiscovery).once();
    EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.INDEXER)).andReturn(indexerNodeDiscovery).once();
    EasyMock.expect(druidNodeDiscoveryProvider.getForNodeRole(NodeRole.PEON)).andReturn(peonNodeDiscovery).once();
    EasyMock.expect(coordinatorNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of(coordinator, coordinator2)).once();
    EasyMock.expect(overlordNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of(overlord, overlord2)).once();
    EasyMock.expect(brokerNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of(broker1, broker2, brokerWithBroadcastSegments)).once();
    EasyMock.expect(routerNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of(router)).once();
    EasyMock.expect(historicalNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of(historical1, historical2, lameHistorical)).once();
    EasyMock.expect(mmNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of(middleManager)).once();
    EasyMock.expect(peonNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of(peon1, peon2)).once();
    EasyMock.expect(indexerNodeDiscovery.getAllNodes()).andReturn(ImmutableList.of(indexer)).once();
    EasyMock.expect(coordinatorClient.findCurrentLeader()).andReturn(coordinator.getDruidNode().getHostAndPortToUse()).once();
    EasyMock.expect(overlordClient.findCurrentLeader()).andReturn(overlord.getDruidNode().getHostAndPortToUse()).once();
    final List<DruidServer> servers = new ArrayList<>();
    servers.add(mockDataServer(historical1.getDruidNode().getHostAndPortToUse(), 200L, 1000L, "tier"));
    servers.add(mockDataServer(historical2.getDruidNode().getHostAndPortToUse(), 400L, 1000L, "tier"));
    servers.add(mockDataServer(peon1.getDruidNode().getHostAndPortToUse(), 0L, 1000L, "tier"));
    servers.add(mockDataServer(peon2.getDruidNode().getHostAndPortToUse(), 0L, 1000L, "tier"));
    servers.add(mockDataServer(broker1.getDruidNode().getHostAndPortToUse(), 0L, 1000L, "tier"));
    servers.add(mockDataServer(broker2.getDruidNode().getHostAndPortToUse(), 0L, 1000L, "tier"));
    servers.add(mockDataServer(indexer.getDruidNode().getHostAndPortToUse(), 0L, 1000L, "tier"));
    servers.add(mockDataServer(brokerWithBroadcastSegments.getDruidNode().getHostAndPortToUse(), 0L, 1000L, "tier"));
    EasyMock.expect(serverInventoryView.getInventoryValue(lameHistorical.getDruidNode().getHostAndPortToUse())).andReturn(null).once();
    EasyMock.replay(druidNodeDiscoveryProvider, serverInventoryView, coordinatorClient, overlordClient);
    EasyMock.replay(servers.toArray(new Object[0]));
    EasyMock.replay(coordinatorNodeDiscovery, overlordNodeDiscovery, brokerNodeDiscovery, routerNodeDiscovery, historicalNodeDiscovery, mmNodeDiscovery, peonNodeDiscovery, indexerNodeDiscovery);
    DataContext dataContext = createDataContext(Users.SUPER);
    final List<Object[]> rows = serversTable.scan(dataContext).toList();
    rows.sort((Object[] row1, Object[] row2) -> ((Comparable) row1[0]).compareTo(row2[0]));
    final List<Object[]> expectedRows = new ArrayList<>();
    final Long nonLeader = NullHandling.defaultLongValue();
    expectedRows.add(createExpectedRow("brokerHost:8082", "brokerHost", 8082, -1, NodeRole.BROKER, null, 0L, 0L, nonLeader));
    expectedRows.add(createExpectedRow("brokerHostWithBroadcastSegments:8282", "brokerHostWithBroadcastSegments", 8082, 8282, NodeRole.BROKER, "tier", 0L, 1000L, nonLeader));
    expectedRows.add(createExpectedRow("histHost:8083", "histHost", 8083, -1, NodeRole.HISTORICAL, "tier", 400L, 1000L, nonLeader));
    expectedRows.add(createExpectedRow("indexerHost:8091", "indexerHost", 8091, -1, NodeRole.INDEXER, "tier", 0L, 1000L, nonLeader));
    expectedRows.add(createExpectedRow("lameHost:8083", "lameHost", 8083, -1, NodeRole.HISTORICAL, "tier", 0L, 1000L, nonLeader));
    expectedRows.add(createExpectedRow("localhost:8080", "localhost", 8080, -1, NodeRole.PEON, "tier", 0L, 1000L, nonLeader));
    expectedRows.add(createExpectedRow("localhost:8081", "localhost", 8081, -1, NodeRole.COORDINATOR, null, 0L, 0L, 1L));
    expectedRows.add(createExpectedRow("localhost:8082", "localhost", 8082, -1, NodeRole.BROKER, null, 0L, 0L, nonLeader));
    expectedRows.add(createExpectedRow("localhost:8083", "localhost", 8083, -1, NodeRole.HISTORICAL, "tier", 200L, 1000L, nonLeader));
    expectedRows.add(createExpectedRow("localhost:8090", "localhost", 8090, -1, NodeRole.OVERLORD, null, 0L, 0L, 1L));
    expectedRows.add(createExpectedRow("localhost:8181", "localhost", 8181, -1, NodeRole.COORDINATOR, null, 0L, 0L, 0L));
    expectedRows.add(createExpectedRow("localhost:8190", "localhost", 8190, -1, NodeRole.OVERLORD, null, 0L, 0L, 0L));
    expectedRows.add(createExpectedRow("localhost:8888", "localhost", 8888, -1, NodeRole.ROUTER, null, 0L, 0L, nonLeader));
    expectedRows.add(createExpectedRow("mmHost:8091", "mmHost", 8091, -1, NodeRole.MIDDLE_MANAGER, null, 0L, 0L, nonLeader));
    expectedRows.add(createExpectedRow("peonHost:8080", "peonHost", 8080, -1, NodeRole.PEON, "tier", 0L, 1000L, nonLeader));
    Assert.assertEquals(expectedRows.size(), rows.size());
    for (int i = 0; i < rows.size(); i++) {
        Assert.assertArrayEquals(expectedRows.get(i), rows.get(i));
    }
    // Verify value types.
    verifyTypes(rows, SystemSchema.SERVERS_SIGNATURE);
}
Also used : DataContext(org.apache.calcite.DataContext) DruidNodeDiscovery(org.apache.druid.discovery.DruidNodeDiscovery) ArrayList(java.util.ArrayList) ImmutableDruidServer(org.apache.druid.client.ImmutableDruidServer) DruidServer(org.apache.druid.client.DruidServer) Test(org.junit.Test)

Aggregations

DataContext (org.apache.calcite.DataContext)47 Test (org.junit.Test)13 ArrayList (java.util.ArrayList)10 RexNode (org.apache.calcite.rex.RexNode)8 Nullable (org.checkerframework.checker.nullness.qual.Nullable)8 HashMap (java.util.HashMap)7 RelDataType (org.apache.calcite.rel.type.RelDataType)7 RelDataTypeFactory (org.apache.calcite.rel.type.RelDataTypeFactory)7 Values (org.apache.storm.tuple.Values)7 Context (org.apache.calcite.interpreter.Context)6 StormContext (org.apache.calcite.interpreter.StormContext)6 SQLException (java.sql.SQLException)5 AvaticaStatement (org.apache.calcite.avatica.AvaticaStatement)5 TypedValue (org.apache.calcite.avatica.remote.TypedValue)5 Enumerable (org.apache.calcite.linq4j.Enumerable)5 ScannableTable (org.apache.calcite.schema.ScannableTable)5 URL (java.net.URL)4 JavaTypeFactory (org.apache.calcite.adapter.java.JavaTypeFactory)4 Enumerator (org.apache.calcite.linq4j.Enumerator)4 RexBuilder (org.apache.calcite.rex.RexBuilder)4