use of org.apache.drill.exec.record.metadata.TupleMetadata in project drill by apache.
the class ParquetTableMetadataUtils method getRowGroupMetadata.
/**
* Returns {@link RowGroupMetadata} instance converted from specified parquet {@code rowGroupMetadata}.
*
* @param tableMetadata table metadata which contains row group metadata to convert
* @param rowGroupMetadata row group metadata to convert
* @param rgIndexInFile index of current row group within the file
* @param location location of file with current row group
* @return {@link RowGroupMetadata} instance converted from specified parquet {@code rowGroupMetadata}
*/
public static RowGroupMetadata getRowGroupMetadata(MetadataBase.ParquetTableMetadataBase tableMetadata, MetadataBase.RowGroupMetadata rowGroupMetadata, int rgIndexInFile, Path location) {
Map<SchemaPath, ColumnStatistics<?>> columnsStatistics = getRowGroupColumnStatistics(tableMetadata, rowGroupMetadata);
List<StatisticsHolder<?>> rowGroupStatistics = new ArrayList<>();
rowGroupStatistics.add(new StatisticsHolder<>(rowGroupMetadata.getRowCount(), TableStatisticsKind.ROW_COUNT));
rowGroupStatistics.add(new StatisticsHolder<>(rowGroupMetadata.getStart(), new BaseStatisticsKind<>(ExactStatisticsConstants.START, true)));
rowGroupStatistics.add(new StatisticsHolder<>(rowGroupMetadata.getLength(), new BaseStatisticsKind<>(ExactStatisticsConstants.LENGTH, true)));
Map<SchemaPath, TypeProtos.MajorType> columns = getRowGroupFields(tableMetadata, rowGroupMetadata);
Map<SchemaPath, TypeProtos.MajorType> intermediateColumns = getIntermediateFields(tableMetadata, rowGroupMetadata);
TupleMetadata schema = new TupleSchema();
columns.forEach((schemaPath, majorType) -> SchemaPathUtils.addColumnMetadata(schema, schemaPath, majorType, intermediateColumns));
MetadataInfo metadataInfo = MetadataInfo.builder().type(MetadataType.ROW_GROUP).build();
return RowGroupMetadata.builder().tableInfo(TableInfo.UNKNOWN_TABLE_INFO).metadataInfo(metadataInfo).schema(schema).columnsStatistics(columnsStatistics).metadataStatistics(rowGroupStatistics).hostAffinity(rowGroupMetadata.getHostAffinity()).rowGroupIndex(rgIndexInFile).path(location).build();
}
use of org.apache.drill.exec.record.metadata.TupleMetadata in project drill by apache.
the class TestResultSetLoaderRepeatedList method test3DEarlySchema.
// Adapted from TestRepeatedListAccessors.testSchema3DWriterReader
// That test exercises the low-level schema and writer mechanisms.
// Here we simply ensure that the 3D case continues to work when
// wrapped in the Result Set Loader
@Test
public void test3DEarlySchema() {
final TupleMetadata schema = new SchemaBuilder().add("id", MinorType.INT).addArray("cube", MinorType.VARCHAR, 3).buildSchema();
final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
rsLoader.startBatch();
final RowSetLoader writer = rsLoader.writer();
writer.addRow(1, objArray(objArray(strArray("a", "b"), strArray("c")), objArray(strArray("d", "e", "f"), null), null, objArray())).addRow(2, null).addRow(3, objArray()).addRow(4, objArray(objArray())).addRow(5, singleObjArray(objArray(strArray("g", "h"), strArray("i"))));
final SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(1, objArray(objArray(strArray("a", "b"), strArray("c")), objArray(strArray("d", "e", "f"), strArray()), objArray(), objArray())).addRow(2, objArray()).addRow(3, objArray()).addRow(4, objArray(objArray())).addRow(5, singleObjArray(objArray(strArray("g", "h"), strArray("i")))).build();
RowSetUtilities.verify(expected, fixture.wrap(rsLoader.harvest()));
}
use of org.apache.drill.exec.record.metadata.TupleMetadata in project drill by apache.
the class TestResultSetLoaderRepeatedList method test2DEarlySchema.
@Test
public void test2DEarlySchema() {
final TupleMetadata schema = new SchemaBuilder().add("id", MinorType.INT).addRepeatedList("list2").addArray(MinorType.VARCHAR).resumeSchema().buildSchema();
final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
do2DTest(schema, rsLoader);
rsLoader.close();
}
use of org.apache.drill.exec.record.metadata.TupleMetadata in project drill by apache.
the class TestResultSetLoaderRepeatedList method test2DOverflow.
@Test
public void test2DOverflow() {
final TupleMetadata schema = new SchemaBuilder().add("id", MinorType.INT).addRepeatedList("list2").addArray(MinorType.VARCHAR).resumeSchema().buildSchema();
final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT).readerSchema(schema).build();
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
final RowSetLoader writer = rsLoader.writer();
// Fill the batch with enough data to cause overflow.
// Data must be large enough to cause overflow before 64K rows
// Make a bit bigger to overflow early.
final int outerSize = 7;
final int innerSize = 5;
final int strLength = ValueVector.MAX_BUFFER_SIZE / ValueVector.MAX_ROW_COUNT / outerSize / innerSize + 20;
final byte[] value = new byte[strLength - 6];
Arrays.fill(value, (byte) 'X');
final String strValue = new String(value, Charsets.UTF_8);
int rowCount = 0;
int elementCount = 0;
final ArrayWriter outerWriter = writer.array(1);
final ArrayWriter innerWriter = outerWriter.array();
final ScalarWriter elementWriter = innerWriter.scalar();
rsLoader.startBatch();
while (!writer.isFull()) {
writer.start();
writer.scalar(0).setInt(rowCount);
for (int j = 0; j < outerSize; j++) {
for (int k = 0; k < innerSize; k++) {
elementWriter.setString(String.format("%s%06d", strValue, elementCount));
elementCount++;
}
outerWriter.save();
}
writer.save();
rowCount++;
}
// Number of rows should be driven by vector size.
// Our row count should include the overflow row
final int expectedCount = ValueVector.MAX_BUFFER_SIZE / (strLength * innerSize * outerSize);
assertEquals(expectedCount + 1, rowCount);
// Loader's row count should include only "visible" rows
assertEquals(expectedCount, writer.rowCount());
// Total count should include invisible and look-ahead rows.
assertEquals(expectedCount + 1, rsLoader.totalRowCount());
// Result should exclude the overflow row
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(expectedCount, result.rowCount());
// Verify the data.
RowSetReader reader = result.reader();
ArrayReader outerReader = reader.array(1);
ArrayReader innerReader = outerReader.array();
ScalarReader strReader = innerReader.scalar();
int readRowCount = 0;
int readElementCount = 0;
while (reader.next()) {
assertEquals(readRowCount, reader.scalar(0).getInt());
for (int i = 0; i < outerSize; i++) {
assertTrue(outerReader.next());
for (int j = 0; j < innerSize; j++) {
assertTrue(innerReader.next());
assertEquals(String.format("%s%06d", strValue, readElementCount), strReader.getString());
readElementCount++;
}
assertFalse(innerReader.next());
}
assertFalse(outerReader.next());
readRowCount++;
}
assertEquals(readRowCount, result.rowCount());
result.clear();
// Write a few more rows to verify the overflow row.
rsLoader.startBatch();
for (int i = 0; i < 1000; i++) {
writer.start();
writer.scalar(0).setInt(rowCount);
for (int j = 0; j < outerSize; j++) {
for (int k = 0; k < innerSize; k++) {
elementWriter.setString(String.format("%s%06d", strValue, elementCount));
elementCount++;
}
outerWriter.save();
}
writer.save();
rowCount++;
}
result = fixture.wrap(rsLoader.harvest());
assertEquals(1001, result.rowCount());
final int startCount = readRowCount;
reader = result.reader();
outerReader = reader.array(1);
innerReader = outerReader.array();
strReader = innerReader.scalar();
while (reader.next()) {
assertEquals(readRowCount, reader.scalar(0).getInt());
for (int i = 0; i < outerSize; i++) {
assertTrue(outerReader.next());
for (int j = 0; j < innerSize; j++) {
assertTrue(innerReader.next());
elementWriter.setString(String.format("%s%06d", strValue, readElementCount));
assertEquals(String.format("%s%06d", strValue, readElementCount), strReader.getString());
readElementCount++;
}
assertFalse(innerReader.next());
}
assertFalse(outerReader.next());
readRowCount++;
}
assertEquals(readRowCount - startCount, result.rowCount());
result.clear();
rsLoader.close();
}
use of org.apache.drill.exec.record.metadata.TupleMetadata in project drill by apache.
the class TestResultSetLoaderUnions method testVariantListDynamic.
/**
* Test a variant list created dynamically at load time.
* The list starts with no type, at which time it can hold
* only null values. Then we add a Varchar, and finally an
* Int.
* <p>
* This test is superficial. There are many odd cases to consider.
* <ul>
* <li>Write nulls to a list with no type. (This test ensures that
* adding a (nullable) scalar "does the right thing."</li>
* <li>Add a map to the list. Maps carry no "bits" vector, so null
* list entries to that point are lost. (For maps, we could go straight
* to a union, with just a map, to preserve the null states. This whole
* area is a huge mess...)</li>
* <li>Do the type transitions when writing to a row. (The tests here
* do the transition between rows.)</li>
* </ul>
*
* The reason for the sparse coverage is that Drill barely supports lists
* and unions; most code is just plain broken. Our goal here is not to fix
* all those problems, just to leave things no more broken than before.
*/
@Test
public void testVariantListDynamic() {
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator());
final RowSetLoader writer = rsLoader.writer();
// Can write a batch as if this was a repeated Varchar, except
// that any value can also be null.
rsLoader.startBatch();
writer.addColumn(MaterializedField.create("id", Types.required(MinorType.INT)));
writer.addColumn(MaterializedField.create("list", Types.optional(MinorType.LIST)));
// Sanity check: should be an array of variants because we said the
// types within the list are expandable (which is the default.)
final ArrayWriter arrWriter = writer.array("list");
assertEquals(ObjectType.VARIANT, arrWriter.entryType());
final VariantWriter variant = arrWriter.variant();
// We need to verify that the internal state is what we expect, so
// the next assertion peeks inside the private bits of the union
// writer. No client code should ever need to do this, of course.
assertTrue(((UnionWriterImpl) variant).shim() instanceof EmptyListShim);
// No types, so all we can do is add a null list, or a list of nulls.
writer.addRow(1, null).addRow(2, variantArray()).addRow(3, variantArray(null, null));
// Add a String. Now we can create a list of strings and/or nulls.
variant.addMember(MinorType.VARCHAR);
assertTrue(variant.hasType(MinorType.VARCHAR));
// Sanity check: sniff inside to ensure that the list contains a single
// type.
assertTrue(((UnionWriterImpl) variant).shim() instanceof SimpleListShim);
assertTrue(((ListWriterImpl) arrWriter).vector().getDataVector() instanceof NullableVarCharVector);
writer.addRow(4, variantArray("fred", null, "barney"));
// Add an integer. The list vector should be promoted to union.
// Now we can add both types.
variant.addMember(MinorType.INT);
// Sanity check: sniff inside to ensure promotion to union occurred
assertTrue(((UnionWriterImpl) variant).shim() instanceof UnionVectorShim);
assertTrue(((ListWriterImpl) arrWriter).vector().getDataVector() instanceof UnionVector);
writer.addRow(5, variantArray("wilma", null, 30));
// Verify
final RowSet result = fixture.wrap(rsLoader.harvest());
final TupleMetadata schema = new SchemaBuilder().add("id", MinorType.INT).addList("list").addType(MinorType.VARCHAR).addType(MinorType.INT).resumeSchema().buildSchema();
final SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(1, null).addRow(2, variantArray()).addRow(3, variantArray(null, null)).addRow(4, variantArray("fred", null, "barney")).addRow(5, variantArray("wilma", null, 30)).build();
RowSetUtilities.verify(expected, result);
}
Aggregations