use of org.apache.drill.exec.record.metadata.ColumnMetadata in project drill by axbaretto.
the class TestTupleSchema method testRequiredVariableWidthColumn.
@Test
public void testRequiredVariableWidthColumn() {
MaterializedField field = SchemaBuilder.columnSchema("c", MinorType.VARCHAR, DataMode.REQUIRED);
ColumnMetadata col = MetadataUtils.fromField(field);
assertEquals(ColumnMetadata.StructureType.PRIMITIVE, col.structureType());
assertNull(col.mapSchema());
assertFalse(col.isNullable());
assertFalse(col.isArray());
assertTrue(col.isVariableWidth());
assertFalse(col.isMap());
assertFalse(col.isVariant());
// A different precision is a different type.
MaterializedField field2 = new ColumnBuilder("c", MinorType.VARCHAR).setMode(DataMode.REQUIRED).setPrecision(10).build();
ColumnMetadata col2 = MetadataUtils.fromField(field2);
assertFalse(col.isEquivalent(col2));
assertEquals(50, col.expectedWidth());
col.setExpectedWidth(10);
assertEquals(10, col.expectedWidth());
assertEquals(1, col.expectedElementCount());
col.setExpectedElementCount(2);
assertEquals(1, col.expectedElementCount());
// If precision is provided, then that is the default width
col = MetadataUtils.fromField(field2);
assertEquals(10, col.expectedWidth());
}
use of org.apache.drill.exec.record.metadata.ColumnMetadata in project drill by axbaretto.
the class TestTupleSchema method testMapColumn.
/**
* Tests a map column. Maps can only be required or repeated, not nullable.
* (But, the columns in the map can be nullable.)
*/
@Test
public void testMapColumn() {
MaterializedField field = SchemaBuilder.columnSchema("m", MinorType.MAP, DataMode.REQUIRED);
ColumnMetadata col = MetadataUtils.fromField(field);
assertTrue(col instanceof MapColumnMetadata);
assertNotNull(col.mapSchema());
assertEquals(0, col.mapSchema().size());
assertSame(col, col.mapSchema().parent());
MapColumnMetadata mapCol = (MapColumnMetadata) col;
assertNull(mapCol.parentTuple());
assertEquals(ColumnMetadata.StructureType.TUPLE, col.structureType());
assertFalse(col.isNullable());
assertFalse(col.isArray());
assertFalse(col.isVariableWidth());
assertTrue(col.isMap());
assertFalse(col.isVariant());
assertEquals(0, col.expectedWidth());
col.setExpectedWidth(10);
assertEquals(0, col.expectedWidth());
assertEquals(1, col.expectedElementCount());
col.setExpectedElementCount(2);
assertEquals(1, col.expectedElementCount());
}
use of org.apache.drill.exec.record.metadata.ColumnMetadata in project drill by axbaretto.
the class TestTupleSchema method testNestedSchema.
@Test
public void testNestedSchema() {
TupleMetadata schema = new SchemaBuilder().addList("list").addType(MinorType.BIGINT).addType(MinorType.VARCHAR).addMap().add("a", MinorType.INT).add("b", MinorType.VARCHAR).resumeUnion().addList().addType(MinorType.FLOAT8).addType(MinorType.DECIMAL18).buildNested().resumeSchema().buildSchema();
assertEquals(1, schema.size());
ColumnMetadata col = schema.metadata(0);
assertTrue(col.isVariant());
VariantMetadata union = col.variantSchema();
assertNotNull(union);
assertEquals(4, union.size());
assertTrue(union.hasType(MinorType.MAP));
assertTrue(union.hasType(MinorType.LIST));
ColumnMetadata mapCol = union.member(MinorType.MAP);
TupleMetadata mapSchema = mapCol.mapSchema();
assertEquals(2, mapSchema.size());
ColumnMetadata listCol = union.member(MinorType.LIST);
VariantMetadata listSchema = listCol.variantSchema();
assertEquals(2, listSchema.size());
assertTrue(listSchema.hasType(MinorType.FLOAT8));
assertTrue(listSchema.hasType(MinorType.DECIMAL18));
}
use of org.apache.drill.exec.record.metadata.ColumnMetadata in project drill by axbaretto.
the class TestTupleSchema method testRepeatedFixedWidthColumn.
@Test
public void testRepeatedFixedWidthColumn() {
MaterializedField field = SchemaBuilder.columnSchema("c", MinorType.INT, DataMode.REPEATED);
ColumnMetadata col = MetadataUtils.fromField(field);
assertFalse(col.isNullable());
assertTrue(col.isArray());
assertFalse(col.isVariableWidth());
assertFalse(col.isMap());
assertFalse(col.isVariant());
assertEquals(4, col.expectedWidth());
col.setExpectedWidth(10);
assertEquals(4, col.expectedWidth());
assertEquals(ColumnMetadata.DEFAULT_ARRAY_SIZE, col.expectedElementCount());
col.setExpectedElementCount(2);
assertEquals(2, col.expectedElementCount());
col.setExpectedElementCount(0);
assertEquals(1, col.expectedElementCount());
}
use of org.apache.drill.exec.record.metadata.ColumnMetadata in project drill by axbaretto.
the class TestResultSetLoaderProtocol method testCaseInsensitiveSchema.
/**
* Schemas are case insensitive by default. Verify that
* the schema mechanism works, with emphasis on the
* case insensitive case.
* <p>
* The tests here and elsewhere build columns from a
* <tt>MaterializedField</tt>. Doing so is rather old-school;
* better to use the newer <tt>ColumnMetadata</tt> which provides
* additional information. The code here simply uses the <tt>MaterializedField</tt>
* to create a <tt>ColumnMetadata</tt> implicitly.
*/
@Test
public void testCaseInsensitiveSchema() {
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator());
RowSetLoader rootWriter = rsLoader.writer();
TupleMetadata schema = rootWriter.schema();
assertEquals(0, rsLoader.schemaVersion());
// No columns defined in schema
assertNull(schema.metadata("a"));
try {
schema.column(0);
fail();
} catch (IndexOutOfBoundsException e) {
// Expected
}
try {
rootWriter.column("a");
fail();
} catch (UndefinedColumnException e) {
// Expected
}
try {
rootWriter.column(0);
fail();
} catch (IndexOutOfBoundsException e) {
// Expected
}
// Define a column
assertEquals(0, rsLoader.schemaVersion());
MaterializedField colSchema = SchemaBuilder.columnSchema("a", MinorType.VARCHAR, DataMode.REQUIRED);
rootWriter.addColumn(colSchema);
assertEquals(1, rsLoader.schemaVersion());
// Can now be found, case insensitive
assertTrue(colSchema.isEquivalent(schema.column(0)));
ColumnMetadata colMetadata = schema.metadata(0);
assertSame(colMetadata, schema.metadata("a"));
assertSame(colMetadata, schema.metadata("A"));
assertNotNull(rootWriter.column(0));
assertNotNull(rootWriter.column("a"));
assertNotNull(rootWriter.column("A"));
assertEquals(1, schema.size());
assertEquals(0, schema.index("a"));
assertEquals(0, schema.index("A"));
try {
rootWriter.addColumn(colSchema);
fail();
} catch (IllegalArgumentException e) {
// Expected
}
try {
MaterializedField testCol = SchemaBuilder.columnSchema("A", MinorType.VARCHAR, DataMode.REQUIRED);
rootWriter.addColumn(testCol);
fail();
} catch (IllegalArgumentException e) {
// Expected
assertTrue(e.getMessage().contains("Duplicate"));
}
// Can still add required fields while writing the first row.
rsLoader.startBatch();
rootWriter.start();
rootWriter.scalar(0).setString("foo");
MaterializedField col2 = SchemaBuilder.columnSchema("b", MinorType.VARCHAR, DataMode.REQUIRED);
rootWriter.addColumn(col2);
assertTrue(col2.isEquivalent(schema.column(1)));
ColumnMetadata col2Metadata = schema.metadata(1);
assertSame(col2Metadata, schema.metadata("b"));
assertSame(col2Metadata, schema.metadata("B"));
assertEquals(2, schema.size());
assertEquals(1, schema.index("b"));
assertEquals(1, schema.index("B"));
rootWriter.scalar(1).setString("second");
// After first row, can add an optional or repeated.
// Also allows a required field: values will be back-filled.
rootWriter.save();
rootWriter.start();
rootWriter.scalar(0).setString("bar");
rootWriter.scalar(1).setString("");
MaterializedField col3 = SchemaBuilder.columnSchema("c", MinorType.VARCHAR, DataMode.REQUIRED);
rootWriter.addColumn(col3);
assertTrue(col3.isEquivalent(schema.column(2)));
ColumnMetadata col3Metadata = schema.metadata(2);
assertSame(col3Metadata, schema.metadata("c"));
assertSame(col3Metadata, schema.metadata("C"));
assertEquals(3, schema.size());
assertEquals(2, schema.index("c"));
assertEquals(2, schema.index("C"));
rootWriter.scalar("c").setString("c.2");
MaterializedField col4 = SchemaBuilder.columnSchema("d", MinorType.VARCHAR, DataMode.OPTIONAL);
rootWriter.addColumn(col4);
assertTrue(col4.isEquivalent(schema.column(3)));
ColumnMetadata col4Metadata = schema.metadata(3);
assertSame(col4Metadata, schema.metadata("d"));
assertSame(col4Metadata, schema.metadata("D"));
assertEquals(4, schema.size());
assertEquals(3, schema.index("d"));
assertEquals(3, schema.index("D"));
rootWriter.scalar("d").setString("d.2");
MaterializedField col5 = SchemaBuilder.columnSchema("e", MinorType.VARCHAR, DataMode.REPEATED);
rootWriter.addColumn(col5);
assertTrue(col5.isEquivalent(schema.column(4)));
ColumnMetadata col5Metadata = schema.metadata(4);
assertSame(col5Metadata, schema.metadata("e"));
assertSame(col5Metadata, schema.metadata("E"));
assertEquals(5, schema.size());
assertEquals(4, schema.index("e"));
assertEquals(4, schema.index("E"));
rootWriter.array(4).set("e1", "e2", "e3");
rootWriter.save();
// Verify. No reason to expect problems, but might as well check.
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(5, rsLoader.schemaVersion());
SingleRowSet expected = fixture.rowSetBuilder(result.batchSchema()).addRow("foo", "second", "", null, strArray()).addRow("bar", "", "c.2", "d.2", strArray("e1", "e2", "e3")).build();
new RowSetComparison(expected).verifyAndClearAll(result);
// Handy way to test that close works to abort an in-flight batch
// and clean up.
rsLoader.close();
}
Aggregations