use of org.apache.drill.common.exceptions.UserException in project drill by apache.
the class TestUnnestWithLateralCorrectness method testUnnestNonArrayColumn.
@Test
public void testUnnestNonArrayColumn() {
Object[][] data = { { new Integer(1), new Integer(3) }, { new Integer(6), new Integer(10) } };
// Create input schema
TupleMetadata incomingSchema = new SchemaBuilder().add("rowNumber", TypeProtos.MinorType.INT).add("unnestColumn", TypeProtos.MinorType.INT).buildSchema();
TupleMetadata[] incomingSchemas = { incomingSchema, incomingSchema };
// We expect an Exception
Integer[][][] baseline = {};
RecordBatch.IterOutcome[] iterOutcomes = { RecordBatch.IterOutcome.OK_NEW_SCHEMA, RecordBatch.IterOutcome.OK };
try {
testUnnest(incomingSchemas, iterOutcomes, data, baseline, false);
} catch (UserException | UnsupportedOperationException e) {
// succeeded
return;
} catch (Exception e) {
fail("Failed due to exception: " + e.getMessage());
}
}
use of org.apache.drill.common.exceptions.UserException in project drill by apache.
the class TestProjectionFilter method testSchemaFilter.
@Test
public void testSchemaFilter() {
TupleMetadata schema = new SchemaBuilder().add(A_COL.copy()).add(B_COL.copy()).addMap("m").add("a", MinorType.INT).resumeSchema().build();
ProjectionFilter filter = new SchemaProjectionFilter(schema, EmptyErrorContext.INSTANCE);
assertFalse(filter.isEmpty());
assertTrue(filter.isProjected("a"));
assertTrue(filter.projection(A_COL).isProjected);
assertTrue(filter.isProjected("b"));
assertTrue(filter.projection(B_COL).isProjected);
assertFalse(filter.isProjected("c"));
assertFalse(filter.projection(MetadataUtils.newScalar("c", Types.required(MinorType.BIGINT))).isProjected);
ColumnMetadata typeConflict = MetadataUtils.newScalar("a", Types.required(MinorType.BIGINT));
try {
filter.projection(typeConflict);
fail();
} catch (UserException e) {
assertTrue(e.getMessage().contains("conflict"));
}
ColumnMetadata modeConflict = MetadataUtils.newScalar("a", Types.optional(MinorType.INT));
try {
filter.projection(modeConflict);
fail();
} catch (UserException e) {
assertTrue(e.getMessage().contains("conflict"));
}
try {
ColumnMetadata aMap = MetadataUtils.newMap("a", new TupleSchema());
filter.projection(aMap);
fail();
} catch (UserException e) {
assertTrue(e.getMessage().contains("type conflict"));
}
ProjResult result = filter.projection(MAP_COL);
assertTrue(result.isProjected);
ProjectionFilter child = result.mapFilter;
assertTrue(child.isProjected("a"));
assertFalse(child.isProjected("b"));
}
use of org.apache.drill.common.exceptions.UserException in project drill by apache.
the class TestResultSetLoaderOverflow method testLargeArray.
/**
* Create an array that contains more than 64K values. Drill has no numeric
* limit on array lengths. (Well, it does, but the limit is about 2 billion
* which, even for bytes, is too large to fit into a vector...)
*/
@Test
public void testLargeArray() {
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator());
RowSetLoader rootWriter = rsLoader.writer();
MaterializedField field = SchemaBuilder.columnSchema("a", MinorType.INT, DataMode.REPEATED);
rootWriter.addColumn(field);
// Create a single array as the column value in the first row. When
// this overflows, an exception is thrown since overflow is not possible.
rsLoader.startBatch();
rootWriter.start();
ScalarWriter array = rootWriter.array(0).scalar();
try {
for (int i = 0; i < Integer.MAX_VALUE; i++) {
array.setInt(i + 1);
}
fail();
} catch (UserException e) {
// Expected
}
rsLoader.close();
}
use of org.apache.drill.common.exceptions.UserException in project drill by apache.
the class TestResultSetLoaderOverflow method testOversizeArray.
/**
* Case where a single array fills up the vector to the maximum size
* limit. Overflow won't work here; the attempt will fail with a user
* exception.
*/
@Test
public void testOversizeArray() {
TupleMetadata schema = new SchemaBuilder().addArray("s", MinorType.VARCHAR).buildSchema();
ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT).readerSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
// Create a single array as the column value in the first row. When
// this overflows, an exception is thrown since overflow is not possible.
rsLoader.startBatch();
byte[] value = new byte[473];
Arrays.fill(value, (byte) 'X');
rootWriter.start();
ScalarWriter array = rootWriter.array(0).scalar();
try {
for (int i = 0; i < ValueVector.MAX_ROW_COUNT; i++) {
array.setBytes(value, value.length);
}
fail();
} catch (UserException e) {
assertTrue(e.getMessage().contains("column value is larger than the maximum"));
}
rsLoader.close();
}
use of org.apache.drill.common.exceptions.UserException in project drill by apache.
the class TestScalarAccessors method testVarDecimalOverflow.
@Test
public void testVarDecimalOverflow() {
TupleMetadata schema = new SchemaBuilder().add("col", MinorType.VARDECIMAL, 8, 4).buildSchema();
RowSetBuilder rsb = new RowSetBuilder(fixture.allocator(), schema, 100);
try {
// With rounding due to scale, value exceeds allowed precision.
rsb.addSingleCol(dec("9999.99999"));
fail();
} catch (UserException e) {
// Expected
}
rsb.build().clear();
}
Aggregations