use of org.apache.drill.exec.proto.UserBitShared.SerializedField in project drill by apache.
the class RepeatedMapVector method getMetadata.
@Override
public SerializedField getMetadata() {
SerializedField.Builder builder = //
getField().getAsBuilder().setBufferLength(//
getBufferSize()).setValueCount(accessor.getValueCount());
builder.addChild(offsets.getMetadata());
for (final ValueVector child : getChildren()) {
builder.addChild(child.getMetadata());
}
return builder.build();
}
use of org.apache.drill.exec.proto.UserBitShared.SerializedField in project drill by apache.
the class RepeatedMapVector method load.
@Override
public void load(SerializedField metadata, DrillBuf buffer) {
final List<SerializedField> children = metadata.getChildList();
final SerializedField offsetField = children.get(0);
offsets.load(offsetField, buffer);
int bufOffset = offsetField.getBufferLength();
for (int i = 1; i < children.size(); i++) {
final SerializedField child = children.get(i);
final MaterializedField fieldDef = MaterializedField.create(child);
ValueVector vector = getChild(fieldDef.getLastName());
if (vector == null) {
// if we arrive here, we didn't have a matching vector.
vector = BasicTypeHelper.getNewVector(fieldDef, allocator);
putChild(fieldDef.getLastName(), vector);
}
final int vectorLength = child.getBufferLength();
vector.load(child, buffer.slice(bufOffset, vectorLength));
bufOffset += vectorLength;
}
assert bufOffset == buffer.capacity();
}
use of org.apache.drill.exec.proto.UserBitShared.SerializedField in project drill by axbaretto.
the class RecordBatchLoader method load.
/**
* Load a record batch from a single buffer.
*
* @param def
* The definition for the record batch.
* @param buf
* The buffer that holds the data associated with the record batch.
* @return Whether the schema changed since the previous load.
* @throws SchemaChangeException
* TODO: Clean: DRILL-2933 load(...) never actually throws SchemaChangeException.
*/
@SuppressWarnings("resource")
public boolean load(RecordBatchDef def, DrillBuf buf) throws SchemaChangeException {
if (logger.isTraceEnabled()) {
logger.trace("Loading record batch with def {} and data {}", def, buf);
logger.trace("Load, ThreadID: {}\n{}", Thread.currentThread().getId(), new StackTrace());
}
container.zeroVectors();
valueCount = def.getRecordCount();
boolean schemaChanged = schema == null;
// Load vectors from the batch buffer, while tracking added and/or removed
// vectors (relative to the previous call) in order to determine whether the
// the schema has changed since the previous call.
// Set up to recognize previous fields that no longer exist.
final Map<String, ValueVector> oldFields = CaseInsensitiveMap.newHashMap();
for (final VectorWrapper<?> wrapper : container) {
final ValueVector vector = wrapper.getValueVector();
oldFields.put(vector.getField().getName(), vector);
}
final VectorContainer newVectors = new VectorContainer();
try {
final List<SerializedField> fields = def.getFieldList();
int bufOffset = 0;
for (final SerializedField field : fields) {
final MaterializedField fieldDef = MaterializedField.create(field);
ValueVector vector = oldFields.remove(fieldDef.getName());
if (vector == null) {
// Field did not exist previously--is schema change.
schemaChanged = true;
vector = TypeHelper.getNewVector(fieldDef, allocator);
} else if (!vector.getField().getType().equals(fieldDef.getType())) {
// Field had different type before--is schema change.
// clear previous vector
vector.clear();
schemaChanged = true;
vector = TypeHelper.getNewVector(fieldDef, allocator);
// If the field is a map, check if the map schema changed.
} else if (vector.getField().getType().getMinorType() == MinorType.MAP && !isSameSchema(vector.getField().getChildren(), field.getChildList())) {
// The map schema changed. Discard the old map and create a new one.
schemaChanged = true;
vector.clear();
vector = TypeHelper.getNewVector(fieldDef, allocator);
}
// Load the vector.
if (buf == null) {
// Schema only
} else if (field.getValueCount() == 0) {
AllocationHelper.allocate(vector, 0, 0, 0);
} else {
vector.load(field, buf.slice(bufOffset, field.getBufferLength()));
}
bufOffset += field.getBufferLength();
newVectors.add(vector);
}
// rebuild the schema.
final SchemaBuilder builder = BatchSchema.newBuilder();
for (final VectorWrapper<?> v : newVectors) {
builder.addField(v.getField());
}
builder.setSelectionVectorMode(BatchSchema.SelectionVectorMode.NONE);
schema = builder.build();
newVectors.buildSchema(BatchSchema.SelectionVectorMode.NONE);
container = newVectors;
} catch (final Throwable cause) {
// adjudicate to call upper layer specific clean up logic.
for (final VectorWrapper<?> wrapper : newVectors) {
wrapper.getValueVector().clear();
}
throw cause;
} finally {
if (!oldFields.isEmpty()) {
schemaChanged = true;
for (final ValueVector vector : oldFields.values()) {
vector.clear();
}
}
}
return schemaChanged;
}
use of org.apache.drill.exec.proto.UserBitShared.SerializedField in project drill by axbaretto.
the class WritableBatch method getBatchNoHV.
public static WritableBatch getBatchNoHV(int recordCount, Iterable<ValueVector> vectors, boolean isSV2) {
List<DrillBuf> buffers = Lists.newArrayList();
List<SerializedField> metadata = Lists.newArrayList();
for (ValueVector vv : vectors) {
metadata.add(vv.getMetadata());
// don't try to get the buffers if we don't have any records. It is possible the buffers are dead buffers.
if (recordCount == 0) {
vv.clear();
continue;
}
for (DrillBuf b : vv.getBuffers(true)) {
buffers.add(b);
}
// remove vv access to buffers.
vv.clear();
}
RecordBatchDef batchDef = RecordBatchDef.newBuilder().addAllField(metadata).setRecordCount(recordCount).setCarriesTwoByteSelectionVector(isSV2).build();
WritableBatch b = new WritableBatch(batchDef, buffers);
return b;
}
use of org.apache.drill.exec.proto.UserBitShared.SerializedField in project drill by axbaretto.
the class WritableBatch method reconstructContainer.
public void reconstructContainer(BufferAllocator allocator, VectorContainer container) {
Preconditions.checkState(!cleared, "Attempted to reconstruct a container from a WritableBatch after it had been cleared");
if (buffers.length > 0) {
/* If we have DrillBuf's associated with value vectors */
int len = 0;
for (DrillBuf b : buffers) {
len += b.capacity();
}
@SuppressWarnings("resource") DrillBuf newBuf = allocator.buffer(len);
try {
/* Copy data from each buffer into the compound buffer */
int offset = 0;
for (DrillBuf buf : buffers) {
newBuf.setBytes(offset, buf);
offset += buf.capacity();
buf.release();
}
List<SerializedField> fields = def.getFieldList();
int bufferOffset = 0;
/*
* For each value vector slice up the appropriate size from the compound buffer and load it into the value vector
*/
int vectorIndex = 0;
for (VectorWrapper<?> vv : container) {
SerializedField fmd = fields.get(vectorIndex);
@SuppressWarnings("resource") ValueVector v = vv.getValueVector();
@SuppressWarnings("resource") DrillBuf bb = newBuf.slice(bufferOffset, fmd.getBufferLength());
// v.load(fmd, cbb.slice(bufferOffset, fmd.getBufferLength()));
v.load(fmd, bb);
vectorIndex++;
bufferOffset += fmd.getBufferLength();
}
} finally {
// Any vectors that loaded material from newBuf slices above will retain those.
newBuf.release(1);
}
}
SelectionVectorMode svMode;
if (def.hasCarriesTwoByteSelectionVector() && def.getCarriesTwoByteSelectionVector()) {
svMode = SelectionVectorMode.TWO_BYTE;
} else {
svMode = SelectionVectorMode.NONE;
}
container.buildSchema(svMode);
/* Set the record count in the value vector */
for (VectorWrapper<?> v : container) {
ValueVector.Mutator m = v.getValueVector().getMutator();
m.setValueCount(def.getRecordCount());
}
}
Aggregations