use of org.apache.calcite.DataContext in project druid by druid-io.
the class SystemSchemaTest method testSegmentsTable.
@Test
public void testSegmentsTable() throws Exception {
final SegmentsTable segmentsTable = new SegmentsTable(druidSchema, metadataView, new ObjectMapper(), authMapper);
final Set<SegmentWithOvershadowedStatus> publishedSegments = new HashSet<>(Arrays.asList(new SegmentWithOvershadowedStatus(publishedCompactedSegment1, true), new SegmentWithOvershadowedStatus(publishedCompactedSegment2, false), new SegmentWithOvershadowedStatus(publishedUncompactedSegment3, false), new SegmentWithOvershadowedStatus(segment1, true), new SegmentWithOvershadowedStatus(segment2, false)));
EasyMock.expect(metadataView.getPublishedSegments()).andReturn(publishedSegments.iterator()).once();
EasyMock.replay(client, request, responseHolder, responseHandler, metadataView);
DataContext dataContext = createDataContext(Users.SUPER);
final List<Object[]> rows = segmentsTable.scan(dataContext).toList();
rows.sort((Object[] row1, Object[] row2) -> ((Comparable) row1[0]).compareTo(row2[0]));
// total segments = 8
// segments test1, test2 are published and available
// segment test3 is served by historical but unpublished or unused
// segments test4, test5 are not published but available (realtime segments)
// segment test2 is both published and served by a realtime server.
Assert.assertEquals(8, rows.size());
verifyRow(rows.get(0), "test1_2010-01-01T00:00:00.000Z_2011-01-01T00:00:00.000Z_version1", 100L, // partition_num
0L, // num_replicas
1L, // numRows
3L, // is_published
1L, // is_available
1L, // is_realtime
0L, // is_overshadowed
1L, // is_compacted
null);
verifyRow(rows.get(1), "test2_2011-01-01T00:00:00.000Z_2012-01-01T00:00:00.000Z_version2", 100L, // partition_num
0L, // x§segment test2 is served by historical and realtime servers
2L, // numRows
3L, // is_published
1L, // is_available
1L, // is_realtime
0L, // is_overshadowed,
0L, // is_compacted
null);
// segment test3 is unpublished and has a NumberedShardSpec with partitionNum = 2
verifyRow(rows.get(2), "test3_2012-01-01T00:00:00.000Z_2013-01-01T00:00:00.000Z_version3_2", 100L, // partition_num
2L, // num_replicas
1L, // numRows
2L, // is_published
0L, // is_available
1L, // is_realtime
0L, // is_overshadowed
0L, // is_compacted
null);
verifyRow(rows.get(3), "test4_2014-01-01T00:00:00.000Z_2015-01-01T00:00:00.000Z_version4", 100L, // partition_num
0L, // num_replicas
1L, // numRows
0L, // is_published
0L, // is_available
1L, // is_realtime
1L, // is_overshadowed
0L, // is_compacted
null);
verifyRow(rows.get(4), "test5_2015-01-01T00:00:00.000Z_2016-01-01T00:00:00.000Z_version5", 100L, // partition_num
0L, // num_replicas
1L, // numRows
0L, // is_published
0L, // is_available
1L, // is_realtime
1L, // is_overshadowed
0L, // is_compacted
null);
// wikipedia segments are published and unavailable, num_replicas is 0
// wikipedia segment 1 and 2 are compacted while 3 are not compacted
verifyRow(rows.get(5), "wikipedia1_2007-01-01T00:00:00.000Z_2008-01-01T00:00:00.000Z_version1", 53000L, // partition_num
0L, // num_replicas
0L, // numRows
0L, // is_published
1L, // is_available
0L, // is_realtime
0L, // is_overshadowed
1L, // is_compacted
expectedCompactionState);
verifyRow(rows.get(6), "wikipedia2_2008-01-01T00:00:00.000Z_2009-01-01T00:00:00.000Z_version2", 83000L, // partition_num
0L, // num_replicas
0L, // numRows
0L, // is_published
1L, // is_available
0L, // is_realtime
0L, // is_overshadowed
0L, // is_compacted
expectedCompactionState);
verifyRow(rows.get(7), "wikipedia3_2009-01-01T00:00:00.000Z_2010-01-01T00:00:00.000Z_version3", 47000L, // partition_num
0L, // num_replicas
0L, // numRows
0L, // is_published
1L, // is_available
0L, // is_realtime
0L, // is_overshadowed
0L, // is_compacted
null);
// Verify value types.
verifyTypes(rows, SystemSchema.SEGMENTS_SIGNATURE);
}
use of org.apache.calcite.DataContext in project druid by druid-io.
the class SystemSchemaTest method testServerSegmentsTable.
@Test
public void testServerSegmentsTable() {
SystemSchema.ServerSegmentsTable serverSegmentsTable = EasyMock.createMockBuilder(SystemSchema.ServerSegmentsTable.class).withConstructor(serverView, authMapper).createMock();
EasyMock.replay(serverSegmentsTable);
EasyMock.expect(serverView.getDruidServers()).andReturn(immutableDruidServers).once();
EasyMock.replay(serverView);
DataContext dataContext = createDataContext(Users.SUPER);
// server_segments table is the join of servers and segments table
// it will have 5 rows as follows
// localhost:0000 | test1_2010-01-01T00:00:00.000Z_2011-01-01T00:00:00.000Z_version1(segment1)
// localhost:0000 | test2_2011-01-01T00:00:00.000Z_2012-01-01T00:00:00.000Z_version2(segment2)
// server2:1234 | test3_2012-01-01T00:00:00.000Z_2013-01-01T00:00:00.000Z_version3(segment3)
// server2:1234 | test4_2017-01-01T00:00:00.000Z_2018-01-01T00:00:00.000Z_version4(segment4)
// server2:1234 | test5_2017-01-01T00:00:00.000Z_2018-01-01T00:00:00.000Z_version5(segment5)
final List<Object[]> rows = serverSegmentsTable.scan(dataContext).toList();
Assert.assertEquals(5, rows.size());
Object[] row0 = rows.get(0);
Assert.assertEquals("localhost:0000", row0[0]);
Assert.assertEquals("test1_2010-01-01T00:00:00.000Z_2011-01-01T00:00:00.000Z_version1", row0[1].toString());
Object[] row1 = rows.get(1);
Assert.assertEquals("localhost:0000", row1[0]);
Assert.assertEquals("test2_2011-01-01T00:00:00.000Z_2012-01-01T00:00:00.000Z_version2", row1[1].toString());
Object[] row2 = rows.get(2);
Assert.assertEquals("server2:1234", row2[0]);
Assert.assertEquals("test3_2012-01-01T00:00:00.000Z_2013-01-01T00:00:00.000Z_version3_2", row2[1].toString());
Object[] row3 = rows.get(3);
Assert.assertEquals("server2:1234", row3[0]);
Assert.assertEquals("test4_2014-01-01T00:00:00.000Z_2015-01-01T00:00:00.000Z_version4", row3[1].toString());
Object[] row4 = rows.get(4);
Assert.assertEquals("server2:1234", row4[0]);
Assert.assertEquals("test5_2015-01-01T00:00:00.000Z_2016-01-01T00:00:00.000Z_version5", row4[1].toString());
// Verify value types.
verifyTypes(rows, SystemSchema.SERVER_SEGMENTS_SIGNATURE);
}
use of org.apache.calcite.DataContext in project druid by druid-io.
the class SystemSchemaTest method testSupervisorTable.
@Test
public void testSupervisorTable() throws Exception {
SystemSchema.SupervisorsTable supervisorTable = EasyMock.createMockBuilder(SystemSchema.SupervisorsTable.class).withConstructor(client, mapper, authMapper).createMock();
EasyMock.replay(supervisorTable);
EasyMock.expect(client.makeRequest(HttpMethod.GET, "/druid/indexer/v1/supervisor?system")).andReturn(request).anyTimes();
HttpResponse httpResp = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
InputStreamFullResponseHolder responseHolder = new InputStreamFullResponseHolder(httpResp);
EasyMock.expect(client.go(EasyMock.eq(request), EasyMock.anyObject(InputStreamFullResponseHandler.class))).andReturn(responseHolder).once();
EasyMock.expect(responseHandler.getStatus()).andReturn(httpResp.getStatus().getCode()).anyTimes();
EasyMock.expect(request.getUrl()).andReturn(new URL("http://test-host:1234/druid/indexer/v1/supervisor?system")).anyTimes();
String json = "[{\n" + "\t\"id\": \"wikipedia\",\n" + "\t\"state\": \"UNHEALTHY_SUPERVISOR\",\n" + "\t\"detailedState\": \"UNABLE_TO_CONNECT_TO_STREAM\",\n" + "\t\"healthy\": false,\n" + "\t\"specString\": \"{\\\"type\\\":\\\"kafka\\\",\\\"dataSchema\\\":{\\\"dataSource\\\":\\\"wikipedia\\\"}" + ",\\\"context\\\":null,\\\"suspended\\\":false}\",\n" + "\t\"type\": \"kafka\",\n" + "\t\"source\": \"wikipedia\",\n" + "\t\"suspended\": false\n" + "}]";
byte[] bytesToWrite = json.getBytes(StandardCharsets.UTF_8);
responseHolder.addChunk(bytesToWrite);
responseHolder.done();
EasyMock.replay(client, request, responseHandler);
DataContext dataContext = createDataContext(Users.SUPER);
final List<Object[]> rows = supervisorTable.scan(dataContext).toList();
Object[] row0 = rows.get(0);
Assert.assertEquals("wikipedia", row0[0].toString());
Assert.assertEquals("UNHEALTHY_SUPERVISOR", row0[1].toString());
Assert.assertEquals("UNABLE_TO_CONNECT_TO_STREAM", row0[2].toString());
Assert.assertEquals(0L, row0[3]);
Assert.assertEquals("kafka", row0[4].toString());
Assert.assertEquals("wikipedia", row0[5].toString());
Assert.assertEquals(0L, row0[6]);
Assert.assertEquals("{\"type\":\"kafka\",\"dataSchema\":{\"dataSource\":\"wikipedia\"},\"context\":null,\"suspended\":false}", row0[7].toString());
// Verify value types.
verifyTypes(rows, SystemSchema.SUPERVISOR_SIGNATURE);
}
use of org.apache.calcite.DataContext in project druid by druid-io.
the class SystemSchemaTest method testTasksTable.
@Test
public void testTasksTable() throws Exception {
SystemSchema.TasksTable tasksTable = EasyMock.createMockBuilder(SystemSchema.TasksTable.class).withConstructor(client, mapper, authMapper).createMock();
EasyMock.replay(tasksTable);
EasyMock.expect(client.makeRequest(HttpMethod.GET, "/druid/indexer/v1/tasks")).andReturn(request).anyTimes();
HttpResponse httpResp = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
InputStreamFullResponseHolder responseHolder = new InputStreamFullResponseHolder(httpResp);
EasyMock.expect(client.go(EasyMock.eq(request), EasyMock.anyObject(InputStreamFullResponseHandler.class))).andReturn(responseHolder).once();
EasyMock.expect(request.getUrl()).andReturn(new URL("http://test-host:1234/druid/indexer/v1/tasks")).anyTimes();
String json = "[{\n" + "\t\"id\": \"index_wikipedia_2018-09-20T22:33:44.911Z\",\n" + "\t\"groupId\": \"group_index_wikipedia_2018-09-20T22:33:44.911Z\",\n" + "\t\"type\": \"index\",\n" + "\t\"createdTime\": \"2018-09-20T22:33:44.922Z\",\n" + "\t\"queueInsertionTime\": \"1970-01-01T00:00:00.000Z\",\n" + "\t\"statusCode\": \"FAILED\",\n" + "\t\"runnerStatusCode\": \"NONE\",\n" + "\t\"duration\": -1,\n" + "\t\"location\": {\n" + "\t\t\"host\": \"testHost\",\n" + "\t\t\"port\": 1234,\n" + "\t\t\"tlsPort\": -1\n" + "\t},\n" + "\t\"dataSource\": \"wikipedia\",\n" + "\t\"errorMsg\": null\n" + "}, {\n" + "\t\"id\": \"index_wikipedia_2018-09-21T18:38:47.773Z\",\n" + "\t\"groupId\": \"group_index_wikipedia_2018-09-21T18:38:47.773Z\",\n" + "\t\"type\": \"index\",\n" + "\t\"createdTime\": \"2018-09-21T18:38:47.873Z\",\n" + "\t\"queueInsertionTime\": \"2018-09-21T18:38:47.910Z\",\n" + "\t\"statusCode\": \"RUNNING\",\n" + "\t\"runnerStatusCode\": \"RUNNING\",\n" + "\t\"duration\": null,\n" + "\t\"location\": {\n" + "\t\t\"host\": \"192.168.1.6\",\n" + "\t\t\"port\": 8100,\n" + "\t\t\"tlsPort\": -1\n" + "\t},\n" + "\t\"dataSource\": \"wikipedia\",\n" + "\t\"errorMsg\": null\n" + "}]";
byte[] bytesToWrite = json.getBytes(StandardCharsets.UTF_8);
responseHolder.addChunk(bytesToWrite);
responseHolder.done();
EasyMock.replay(client, request, responseHandler);
DataContext dataContext = createDataContext(Users.SUPER);
final List<Object[]> rows = tasksTable.scan(dataContext).toList();
Object[] row0 = rows.get(0);
Assert.assertEquals("index_wikipedia_2018-09-20T22:33:44.911Z", row0[0].toString());
Assert.assertEquals("group_index_wikipedia_2018-09-20T22:33:44.911Z", row0[1].toString());
Assert.assertEquals("index", row0[2].toString());
Assert.assertEquals("wikipedia", row0[3].toString());
Assert.assertEquals("2018-09-20T22:33:44.922Z", row0[4].toString());
Assert.assertEquals("1970-01-01T00:00:00.000Z", row0[5].toString());
Assert.assertEquals("FAILED", row0[6].toString());
Assert.assertEquals("NONE", row0[7].toString());
Assert.assertEquals(-1L, row0[8]);
Assert.assertEquals("testHost:1234", row0[9]);
Assert.assertEquals("testHost", row0[10]);
Assert.assertEquals(1234L, row0[11]);
Assert.assertEquals(-1L, row0[12]);
Assert.assertEquals(null, row0[13]);
Object[] row1 = rows.get(1);
Assert.assertEquals("index_wikipedia_2018-09-21T18:38:47.773Z", row1[0].toString());
Assert.assertEquals("group_index_wikipedia_2018-09-21T18:38:47.773Z", row1[1].toString());
Assert.assertEquals("index", row1[2].toString());
Assert.assertEquals("wikipedia", row1[3].toString());
Assert.assertEquals("2018-09-21T18:38:47.873Z", row1[4].toString());
Assert.assertEquals("2018-09-21T18:38:47.910Z", row1[5].toString());
Assert.assertEquals("RUNNING", row1[6].toString());
Assert.assertEquals("RUNNING", row1[7].toString());
Assert.assertEquals(0L, row1[8]);
Assert.assertEquals("192.168.1.6:8100", row1[9]);
Assert.assertEquals("192.168.1.6", row1[10]);
Assert.assertEquals(8100L, row1[11]);
Assert.assertEquals(-1L, row1[12]);
Assert.assertEquals(null, row1[13]);
// Verify value types.
verifyTypes(rows, SystemSchema.TASKS_SIGNATURE);
}
use of org.apache.calcite.DataContext in project druid by druid-io.
the class DruidPlanner method planWithBindableConvention.
/**
* Construct a {@link PlannerResult} for a fall-back 'bindable' rel, for things that are not directly translatable
* to native Druid queries such as system tables and just a general purpose (but definitely not optimized) fall-back.
*
* See {@link #planWithDruidConvention} which will handle things which are directly translatable
* to native Druid queries.
*/
private PlannerResult planWithBindableConvention(final RelRoot root, @Nullable final SqlExplain explain) throws RelConversionException {
BindableRel bindableRel = (BindableRel) planner.transform(Rules.BINDABLE_CONVENTION_RULES, planner.getEmptyTraitSet().replace(BindableConvention.INSTANCE).plus(root.collation), root.rel);
if (!root.isRefTrivial()) {
// Add a projection on top to accommodate root.fields.
final List<RexNode> projects = new ArrayList<>();
final RexBuilder rexBuilder = bindableRel.getCluster().getRexBuilder();
for (int field : Pair.left(root.fields)) {
projects.add(rexBuilder.makeInputRef(bindableRel, field));
}
bindableRel = new Bindables.BindableProject(bindableRel.getCluster(), bindableRel.getTraitSet(), bindableRel, projects, root.validatedRowType);
}
if (explain != null) {
return planExplanation(bindableRel, explain, false);
} else {
final BindableRel theRel = bindableRel;
final DataContext dataContext = plannerContext.createDataContext((JavaTypeFactory) planner.getTypeFactory(), plannerContext.getParameters());
final Supplier<Sequence<Object[]>> resultsSupplier = () -> {
final Enumerable<?> enumerable = theRel.bind(dataContext);
final Enumerator<?> enumerator = enumerable.enumerator();
return Sequences.withBaggage(new BaseSequence<>(new BaseSequence.IteratorMaker<Object[], EnumeratorIterator<Object[]>>() {
@Override
public EnumeratorIterator<Object[]> make() {
return new EnumeratorIterator<>(new Iterator<Object[]>() {
@Override
public boolean hasNext() {
return enumerator.moveNext();
}
@Override
public Object[] next() {
return (Object[]) enumerator.current();
}
});
}
@Override
public void cleanup(EnumeratorIterator<Object[]> iterFromMake) {
}
}), enumerator::close);
};
return new PlannerResult(resultsSupplier, root.validatedRowType);
}
}
Aggregations