Search in sources :

Example 1 with SegmentsTable

use of org.apache.druid.sql.calcite.schema.SystemSchema.SegmentsTable in project druid by druid-io.

the class SystemSchemaTest method testSegmentsTable.

@Test
public void testSegmentsTable() throws Exception {
    final SegmentsTable segmentsTable = new SegmentsTable(druidSchema, metadataView, new ObjectMapper(), authMapper);
    final Set<SegmentWithOvershadowedStatus> publishedSegments = new HashSet<>(Arrays.asList(new SegmentWithOvershadowedStatus(publishedCompactedSegment1, true), new SegmentWithOvershadowedStatus(publishedCompactedSegment2, false), new SegmentWithOvershadowedStatus(publishedUncompactedSegment3, false), new SegmentWithOvershadowedStatus(segment1, true), new SegmentWithOvershadowedStatus(segment2, false)));
    EasyMock.expect(metadataView.getPublishedSegments()).andReturn(publishedSegments.iterator()).once();
    EasyMock.replay(client, request, responseHolder, responseHandler, metadataView);
    DataContext dataContext = createDataContext(Users.SUPER);
    final List<Object[]> rows = segmentsTable.scan(dataContext).toList();
    rows.sort((Object[] row1, Object[] row2) -> ((Comparable) row1[0]).compareTo(row2[0]));
    // total segments = 8
    // segments test1, test2  are published and available
    // segment test3 is served by historical but unpublished or unused
    // segments test4, test5 are not published but available (realtime segments)
    // segment test2 is both published and served by a realtime server.
    Assert.assertEquals(8, rows.size());
    verifyRow(rows.get(0), "test1_2010-01-01T00:00:00.000Z_2011-01-01T00:00:00.000Z_version1", 100L, // partition_num
    0L, // num_replicas
    1L, // numRows
    3L, // is_published
    1L, // is_available
    1L, // is_realtime
    0L, // is_overshadowed
    1L, // is_compacted
    null);
    verifyRow(rows.get(1), "test2_2011-01-01T00:00:00.000Z_2012-01-01T00:00:00.000Z_version2", 100L, // partition_num
    0L, // x§segment test2 is served by historical and realtime servers
    2L, // numRows
    3L, // is_published
    1L, // is_available
    1L, // is_realtime
    0L, // is_overshadowed,
    0L, // is_compacted
    null);
    // segment test3 is unpublished and has a NumberedShardSpec with partitionNum = 2
    verifyRow(rows.get(2), "test3_2012-01-01T00:00:00.000Z_2013-01-01T00:00:00.000Z_version3_2", 100L, // partition_num
    2L, // num_replicas
    1L, // numRows
    2L, // is_published
    0L, // is_available
    1L, // is_realtime
    0L, // is_overshadowed
    0L, // is_compacted
    null);
    verifyRow(rows.get(3), "test4_2014-01-01T00:00:00.000Z_2015-01-01T00:00:00.000Z_version4", 100L, // partition_num
    0L, // num_replicas
    1L, // numRows
    0L, // is_published
    0L, // is_available
    1L, // is_realtime
    1L, // is_overshadowed
    0L, // is_compacted
    null);
    verifyRow(rows.get(4), "test5_2015-01-01T00:00:00.000Z_2016-01-01T00:00:00.000Z_version5", 100L, // partition_num
    0L, // num_replicas
    1L, // numRows
    0L, // is_published
    0L, // is_available
    1L, // is_realtime
    1L, // is_overshadowed
    0L, // is_compacted
    null);
    // wikipedia segments are published and unavailable, num_replicas is 0
    // wikipedia segment 1 and 2 are compacted while 3 are not compacted
    verifyRow(rows.get(5), "wikipedia1_2007-01-01T00:00:00.000Z_2008-01-01T00:00:00.000Z_version1", 53000L, // partition_num
    0L, // num_replicas
    0L, // numRows
    0L, // is_published
    1L, // is_available
    0L, // is_realtime
    0L, // is_overshadowed
    1L, // is_compacted
    expectedCompactionState);
    verifyRow(rows.get(6), "wikipedia2_2008-01-01T00:00:00.000Z_2009-01-01T00:00:00.000Z_version2", 83000L, // partition_num
    0L, // num_replicas
    0L, // numRows
    0L, // is_published
    1L, // is_available
    0L, // is_realtime
    0L, // is_overshadowed
    0L, // is_compacted
    expectedCompactionState);
    verifyRow(rows.get(7), "wikipedia3_2009-01-01T00:00:00.000Z_2010-01-01T00:00:00.000Z_version3", 47000L, // partition_num
    0L, // num_replicas
    0L, // numRows
    0L, // is_published
    1L, // is_available
    0L, // is_realtime
    0L, // is_overshadowed
    0L, // is_compacted
    null);
    // Verify value types.
    verifyTypes(rows, SystemSchema.SEGMENTS_SIGNATURE);
}
Also used : DataContext(org.apache.calcite.DataContext) SegmentsTable(org.apache.druid.sql.calcite.schema.SystemSchema.SegmentsTable) SegmentWithOvershadowedStatus(org.apache.druid.timeline.SegmentWithOvershadowedStatus) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 2 with SegmentsTable

use of org.apache.druid.sql.calcite.schema.SystemSchema.SegmentsTable in project druid by druid-io.

the class SystemSchemaTest method testGetTableMap.

@Test
public void testGetTableMap() {
    Assert.assertEquals(ImmutableSet.of("segments", "servers", "server_segments", "tasks", "supervisors"), schema.getTableNames());
    final Map<String, Table> tableMap = schema.getTableMap();
    Assert.assertEquals(ImmutableSet.of("segments", "servers", "server_segments", "tasks", "supervisors"), tableMap.keySet());
    final SystemSchema.SegmentsTable segmentsTable = (SystemSchema.SegmentsTable) schema.getTableMap().get("segments");
    final RelDataType rowType = segmentsTable.getRowType(new JavaTypeFactoryImpl());
    final List<RelDataTypeField> fields = rowType.getFieldList();
    Assert.assertEquals(17, fields.size());
    final SystemSchema.TasksTable tasksTable = (SystemSchema.TasksTable) schema.getTableMap().get("tasks");
    final RelDataType sysRowType = tasksTable.getRowType(new JavaTypeFactoryImpl());
    final List<RelDataTypeField> sysFields = sysRowType.getFieldList();
    Assert.assertEquals(14, sysFields.size());
    Assert.assertEquals("task_id", sysFields.get(0).getName());
    Assert.assertEquals(SqlTypeName.VARCHAR, sysFields.get(0).getType().getSqlTypeName());
    final SystemSchema.ServersTable serversTable = (SystemSchema.ServersTable) schema.getTableMap().get("servers");
    final RelDataType serverRowType = serversTable.getRowType(new JavaTypeFactoryImpl());
    final List<RelDataTypeField> serverFields = serverRowType.getFieldList();
    Assert.assertEquals(9, serverFields.size());
    Assert.assertEquals("server", serverFields.get(0).getName());
    Assert.assertEquals(SqlTypeName.VARCHAR, serverFields.get(0).getType().getSqlTypeName());
}
Also used : SegmentsTable(org.apache.druid.sql.calcite.schema.SystemSchema.SegmentsTable) Table(org.apache.calcite.schema.Table) SegmentsTable(org.apache.druid.sql.calcite.schema.SystemSchema.SegmentsTable) RelDataType(org.apache.calcite.rel.type.RelDataType) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) SegmentsTable(org.apache.druid.sql.calcite.schema.SystemSchema.SegmentsTable) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) Test(org.junit.Test)

Aggregations

SegmentsTable (org.apache.druid.sql.calcite.schema.SystemSchema.SegmentsTable)2 Test (org.junit.Test)2 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)1 HashSet (java.util.HashSet)1 DataContext (org.apache.calcite.DataContext)1 JavaTypeFactoryImpl (org.apache.calcite.jdbc.JavaTypeFactoryImpl)1 RelDataType (org.apache.calcite.rel.type.RelDataType)1 RelDataTypeField (org.apache.calcite.rel.type.RelDataTypeField)1 Table (org.apache.calcite.schema.Table)1 SegmentWithOvershadowedStatus (org.apache.druid.timeline.SegmentWithOvershadowedStatus)1