use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class SingleBatchSorterTemplate method setup.
@Override
public void setup(FragmentExecContext context, SelectionVector2 vector2, VectorAccessible incoming) throws SchemaChangeException {
Preconditions.checkNotNull(vector2);
this.vector2 = vector2;
try {
doSetup(context, incoming, null);
} catch (IllegalStateException e) {
throw new SchemaChangeException(e);
}
}
use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class PojoRecordReader method setup.
@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
operatorContext = context;
try {
Field[] fields = pojoClass.getDeclaredFields();
List<PojoWriter> writers = Lists.newArrayList();
for (int i = 0; i < fields.length; i++) {
Field f = fields[i];
if (Modifier.isStatic(f.getModifiers())) {
continue;
}
Class<?> type = f.getType();
PojoWriter w = null;
if (type == int.class) {
w = new IntWriter(f);
} else if (type == Integer.class) {
w = new NIntWriter(f);
} else if (type == Long.class) {
w = new NBigIntWriter(f);
} else if (type == Boolean.class) {
w = new NBooleanWriter(f);
} else if (type == double.class) {
w = new DoubleWriter(f);
} else if (type == Double.class) {
w = new NDoubleWriter(f);
} else if (type.isEnum()) {
w = new EnumWriter(f, output.getManagedBuffer());
} else if (type == boolean.class) {
w = new BitWriter(f);
} else if (type == long.class) {
w = new LongWriter(f);
} else if (type == String.class) {
w = new StringWriter(f, output.getManagedBuffer());
} else if (type == Timestamp.class) {
w = new NTimeStampWriter(f);
} else {
throw new ExecutionSetupException(String.format("PojoRecord reader doesn't yet support conversions from type [%s].", type));
}
writers.add(w);
w.init(output);
}
this.writers = writers.toArray(new PojoWriter[writers.size()]);
} catch (SchemaChangeException e) {
throw new ExecutionSetupException("Failure while setting up schema for PojoRecordReader.", e);
}
currentIterator = pojoObjects.iterator();
}
use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class DrillCursor method nextRowInternally.
/**
* ...
* <p>
* Is to be called (once) from {@link #loadInitialSchema} for
* {@link DrillResultSetImpl#execute()}, and then (repeatedly) from
* {@link #next()} for {@link AvaticaResultSet#next()}.
* </p>
*
* @return whether cursor is positioned at a row (false when after end of
* results)
*/
private boolean nextRowInternally() throws SQLException {
if (currentRecordNumber + 1 < currentBatchHolder.getRecordCount()) {
// Have next row in current batch--just advance index and report "at a row."
currentRecordNumber++;
return true;
} else {
try {
QueryDataBatch qrb = resultsListener.getNext();
// the (initial) schema but no rows)).
if (afterFirstBatch) {
while (qrb != null && (qrb.getHeader().getRowCount() == 0 || qrb.getData() == null)) {
// Empty message--dispose of and try to get another.
logger.warn("Spurious batch read: {}", qrb);
qrb.release();
qrb = resultsListener.getNext();
}
}
afterFirstBatch = true;
if (qrb == null) {
// End of batches--clean up, set state to done, report after last row.
// (We load it so we clear it.)
currentBatchHolder.clear();
afterLastRow = true;
return false;
} else {
// Got next (or first) batch--reset record offset to beginning;
// assimilate schema if changed; set up return value for first call
// to next().
currentRecordNumber = 0;
final boolean schemaChanged;
try {
schemaChanged = currentBatchHolder.load(qrb.getHeader().getDef(), qrb.getData());
} finally {
qrb.release();
}
schema = currentBatchHolder.getSchema();
if (schemaChanged) {
updateColumns();
}
if (returnTrueForNextCallToNext && currentBatchHolder.getRecordCount() == 0) {
returnTrueForNextCallToNext = false;
}
return true;
}
} catch (UserException e) {
// error type is accessible, of course. :-( )
throw new SQLException(e.getMessage(), e);
} catch (InterruptedException e) {
// but JDBC client certainly could.
throw new SQLException("Interrupted.", e);
} catch (SchemaChangeException e) {
// throws SchemaChangeException, so check/clean catch clause.
throw new SQLException("Unexpected SchemaChangeException from RecordBatchLoader.load(...)");
} catch (RuntimeException e) {
throw new SQLException("Unexpected RuntimeException: " + e.toString(), e);
}
}
}
use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class QueryBuilder method rowSet.
/**
* Run the query and return the first result set as a
* {@link DirectRowSet} object that can be inspected directly
* by the code using a {@link RowSetReader}.
* <p>
* An enhancement is to provide a way to read a series of result
* batches as row sets.
* @return a row set that represents the first batch returned from
* the query
* @throws RpcException if anything goes wrong
*/
public DirectRowSet rowSet() throws RpcException {
// Ignore all but the first non-empty batch.
QueryDataBatch dataBatch = null;
for (QueryDataBatch batch : results()) {
if (dataBatch == null && batch.getHeader().getRowCount() != 0) {
dataBatch = batch;
} else {
batch.release();
}
}
if (dataBatch == null) {
return null;
}
// Unload the batch and convert to a row set.
final RecordBatchLoader loader = new RecordBatchLoader(client.allocator());
try {
loader.load(dataBatch.getHeader().getDef(), dataBatch.getData());
dataBatch.release();
VectorContainer container = loader.getContainer();
container.setRecordCount(loader.getRecordCount());
return new DirectRowSet(client.allocator(), container);
} catch (SchemaChangeException e) {
throw new IllegalStateException(e);
}
}
use of org.apache.drill.exec.exception.SchemaChangeException in project drill by apache.
the class TestOutputMutator method addField.
@Override
public <T extends ValueVector> T addField(MaterializedField field, Class<T> clazz) throws SchemaChangeException {
ValueVector v = TypeHelper.getNewVector(field, allocator);
if (!clazz.isAssignableFrom(v.getClass())) {
throw new SchemaChangeException(String.format("The class that was provided %s does not correspond to the expected vector type of %s.", clazz.getSimpleName(), v.getClass().getSimpleName()));
}
addField(v);
return (T) v;
}
Aggregations