use of org.apache.drill.exec.vector.accessor.ObjectReader in project drill by apache.
the class MetadataControllerBatch method getIncomingLocations.
private Set<Path> getIncomingLocations(TupleReader reader) {
Set<Path> childLocations = new HashSet<>();
ObjectReader metadataColumnReader = reader.column(MetastoreAnalyzeConstants.METADATA_TYPE);
Preconditions.checkNotNull(metadataColumnReader, "metadataType column wasn't found");
MetadataType metadataType = MetadataType.valueOf(metadataColumnReader.scalar().getString());
switch(metadataType) {
case SEGMENT:
case PARTITION:
{
ObjectReader locationsReader = reader.column(MetastoreAnalyzeConstants.LOCATIONS_FIELD);
// populate list of file locations from "locations" field if it is present in the schema
if (locationsReader != null && locationsReader.type() == ObjectType.ARRAY) {
ArrayReader array = locationsReader.array();
while (array.next()) {
childLocations.add(new Path(array.scalar().getString()));
}
break;
}
// in the opposite case, populate list of file locations using underlying metadata
ObjectReader underlyingMetadataReader = reader.column(MetastoreAnalyzeConstants.COLLECTED_MAP_FIELD);
if (underlyingMetadataReader != null) {
// current row contains information about underlying metadata
ArrayReader array = underlyingMetadataReader.array();
array.rewind();
while (array.next()) {
childLocations.addAll(getIncomingLocations(array.tuple()));
}
}
break;
}
case FILE:
{
childLocations.add(new Path(reader.column(MetastoreAnalyzeConstants.LOCATION_FIELD).scalar().getString()));
}
default:
break;
}
return childLocations;
}
use of org.apache.drill.exec.vector.accessor.ObjectReader in project drill by apache.
the class JsonWriter method writeArray.
private void writeArray(ArrayReader reader) throws IOException {
gen.writeStartArray();
ObjectReader entryWriter = reader.entry();
while (reader.next()) {
writeValue(entryWriter);
}
gen.writeEndArray();
}
use of org.apache.drill.exec.vector.accessor.ObjectReader in project drill by apache.
the class AbstractArrayWriter method copy.
@Override
public void copy(ColumnReader from) {
ArrayReader source = (ArrayReader) from;
// Inefficient initial implementation
ObjectReader entryReader = source.entry();
while (source.next()) {
elementObjWriter.writer().copy(entryReader.reader());
save();
}
}
use of org.apache.drill.exec.vector.accessor.ObjectReader in project drill by apache.
the class AbstractObjectWriter method copy.
@Override
public void copy(ColumnReader from) {
ObjectReader source = (ObjectReader) from;
writer().copy(source.reader());
}
use of org.apache.drill.exec.vector.accessor.ObjectReader in project drill by apache.
the class TestVariantAccessors method testUnionWithMap.
/**
* Test a variant (AKA "union vector") at the top level which
* includes a map.
*/
@Test
public void testUnionWithMap() {
final TupleMetadata schema = new SchemaBuilder().addUnion("u").addType(MinorType.VARCHAR).addMap().addNullable("a", MinorType.INT).addNullable("b", MinorType.VARCHAR).resumeUnion().resumeSchema().buildSchema();
SingleRowSet result;
// Write values
{
final ExtendableRowSet rs = fixture.rowSet(schema);
final RowSetWriter writer = rs.writer();
// Sanity check of writer structure
final ObjectWriter wo = writer.column(0);
assertEquals(ObjectType.VARIANT, wo.type());
final VariantWriter vw = wo.variant();
assertTrue(vw.hasType(MinorType.VARCHAR));
final ObjectWriter strObj = vw.member(MinorType.VARCHAR);
final ScalarWriter strWriter = strObj.scalar();
assertSame(strWriter, vw.scalar(MinorType.VARCHAR));
assertTrue(vw.hasType(MinorType.MAP));
final ObjectWriter mapObj = vw.member(MinorType.MAP);
final TupleWriter mWriter = mapObj.tuple();
assertSame(mWriter, vw.tuple());
final ScalarWriter aWriter = mWriter.scalar("a");
final ScalarWriter bWriter = mWriter.scalar("b");
// First row: string "first"
vw.setType(MinorType.VARCHAR);
strWriter.setString("first");
writer.save();
// Second row: a map
vw.setType(MinorType.MAP);
aWriter.setInt(20);
bWriter.setString("fred");
writer.save();
// Third row: null
vw.setNull();
writer.save();
// Fourth row: map with a null string
vw.setType(MinorType.MAP);
aWriter.setInt(40);
bWriter.setNull();
writer.save();
// Fifth row: string "last"
vw.setType(MinorType.VARCHAR);
strWriter.setString("last");
writer.save();
result = writer.done();
assertEquals(5, result.rowCount());
}
// Read the values.
{
final RowSetReader reader = result.reader();
// Sanity check of structure
final ObjectReader ro = reader.column(0);
assertEquals(ObjectType.VARIANT, ro.type());
final VariantReader vr = ro.variant();
assertTrue(vr.hasType(MinorType.VARCHAR));
final ObjectReader strObj = vr.member(MinorType.VARCHAR);
final ScalarReader strReader = strObj.scalar();
assertSame(strReader, vr.scalar(MinorType.VARCHAR));
assertTrue(vr.hasType(MinorType.MAP));
final ObjectReader mapObj = vr.member(MinorType.MAP);
final TupleReader mReader = mapObj.tuple();
assertSame(mReader, vr.tuple());
final ScalarReader aReader = mReader.scalar("a");
final ScalarReader bReader = mReader.scalar("b");
// First row: string "first"
assertTrue(reader.next());
assertFalse(vr.isNull());
assertEquals(MinorType.VARCHAR, vr.dataType());
assertFalse(strReader.isNull());
assertTrue(mReader.isNull());
assertEquals("first", strReader.getString());
// Second row: a map
assertTrue(reader.next());
assertFalse(vr.isNull());
assertEquals(MinorType.MAP, vr.dataType());
assertTrue(strReader.isNull());
assertFalse(mReader.isNull());
assertFalse(aReader.isNull());
assertEquals(20, aReader.getInt());
assertFalse(bReader.isNull());
assertEquals("fred", bReader.getString());
// Third row: null
assertTrue(reader.next());
assertTrue(vr.isNull());
assertTrue(strReader.isNull());
assertTrue(mReader.isNull());
assertTrue(aReader.isNull());
assertTrue(bReader.isNull());
// Fourth row: map with a null string
assertTrue(reader.next());
assertEquals(MinorType.MAP, vr.dataType());
assertEquals(40, aReader.getInt());
assertTrue(bReader.isNull());
// Fifth row: string "last"
assertTrue(reader.next());
assertEquals(MinorType.VARCHAR, vr.dataType());
assertEquals("last", strReader.getString());
assertFalse(reader.next());
}
result.clear();
}
Aggregations