use of org.apache.drill.exec.record.RecordBatchLoader in project drill by apache.
the class TestDateTypes method testSortDate.
@Test
public void testSortDate() throws Exception {
try (RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
Drillbit bit = new Drillbit(CONFIG, serviceSet);
DrillClient client = new DrillClient(CONFIG, serviceSet.getCoordinator())) {
// run query.
bit.run();
client.connect();
List<QueryDataBatch> results = client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL, Files.toString(FileUtils.getResourceAsFile("/record/vector/test_sort_date.json"), Charsets.UTF_8).replace("#{TEST_FILE}", "/test_simple_date.json"));
RecordBatchLoader batchLoader = new RecordBatchLoader(bit.getContext().getAllocator());
QueryDataBatch batch = results.get(1);
assertTrue(batchLoader.load(batch.getHeader().getDef(), batch.getData()));
for (VectorWrapper<?> v : batchLoader) {
ValueVector.Accessor accessor = v.getValueVector().getAccessor();
assertEquals((accessor.getObject(0).toString()), new String("1970-01-02"));
assertEquals((accessor.getObject(1).toString()), new String("2000-02-27"));
assertEquals((accessor.getObject(2).toString()), new String("2008-12-28"));
}
batchLoader.clear();
for (QueryDataBatch b : results) {
b.release();
}
}
}
use of org.apache.drill.exec.record.RecordBatchLoader in project drill by apache.
the class TestInfoSchema method describeSchemaOutput.
@Test
public void describeSchemaOutput() throws Exception {
final List<QueryDataBatch> result = testSqlWithResults("describe schema dfs_test.tmp");
assertTrue(result.size() == 1);
final QueryDataBatch batch = result.get(0);
final RecordBatchLoader loader = new RecordBatchLoader(getDrillbitContext().getAllocator());
loader.load(batch.getHeader().getDef(), batch.getData());
// check schema column value
final VectorWrapper schemaValueVector = loader.getValueAccessorById(NullableVarCharVector.class, loader.getValueVectorId(SchemaPath.getCompoundPath("schema")).getFieldIds());
String schema = schemaValueVector.getValueVector().getAccessor().getObject(0).toString();
assertEquals("dfs_test.tmp", schema);
// check properties column value
final VectorWrapper propertiesValueVector = loader.getValueAccessorById(NullableVarCharVector.class, loader.getValueVectorId(SchemaPath.getCompoundPath("properties")).getFieldIds());
String properties = propertiesValueVector.getValueVector().getAccessor().getObject(0).toString();
final Map configMap = mapper.readValue(properties, Map.class);
// check some stable properties existence
assertTrue(configMap.containsKey("connection"));
assertTrue(configMap.containsKey("config"));
assertTrue(configMap.containsKey("formats"));
assertFalse(configMap.containsKey("workspaces"));
// check some stable properties values
assertEquals("file", configMap.get("type"));
final FileSystemConfig testConfig = (FileSystemConfig) bits[0].getContext().getStorage().getPlugin("dfs_test").getConfig();
final String tmpSchemaLocation = testConfig.workspaces.get("tmp").getLocation();
assertEquals(tmpSchemaLocation, configMap.get("location"));
batch.release();
loader.clear();
}
use of org.apache.drill.exec.record.RecordBatchLoader in project drill by apache.
the class TestMultiInputAdd method testMultiInputAdd.
@Test
public void testMultiInputAdd(@Injectable final DrillbitContext bitContext, @Injectable UserClientConnection connection) throws Throwable {
try (RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
Drillbit bit = new Drillbit(CONFIG, serviceSet);
DrillClient client = new DrillClient(CONFIG, serviceSet.getCoordinator())) {
// run query.
bit.run();
client.connect();
List<QueryDataBatch> results = client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL, Files.toString(FileUtils.getResourceAsFile("/functions/multi_input_add_test.json"), Charsets.UTF_8));
RecordBatchLoader batchLoader = new RecordBatchLoader(bit.getContext().getAllocator());
QueryDataBatch batch = results.get(0);
assertTrue(batchLoader.load(batch.getHeader().getDef(), batch.getData()));
for (VectorWrapper<?> v : batchLoader) {
ValueVector.Accessor accessor = v.getValueVector().getAccessor();
assertTrue((accessor.getObject(0)).equals(10));
}
batchLoader.clear();
for (QueryDataBatch b : results) {
b.release();
}
}
}
use of org.apache.drill.exec.record.RecordBatchLoader in project drill by apache.
the class TestConvertFunctions method getRunResult.
protected Object[] getRunResult(QueryType queryType, String planString) throws Exception {
List<QueryDataBatch> resultList = testRunAndReturn(queryType, planString);
List<Object> res = new ArrayList<Object>();
RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
for (QueryDataBatch result : resultList) {
if (result.getData() != null) {
loader.load(result.getHeader().getDef(), result.getData());
@SuppressWarnings("resource") ValueVector v = loader.iterator().next().getValueVector();
for (int j = 0; j < v.getAccessor().getValueCount(); j++) {
if (v instanceof VarCharVector) {
res.add(new String(((VarCharVector) v).getAccessor().get(j)));
} else {
res.add(v.getAccessor().getObject(j));
}
}
loader.clear();
result.release();
}
}
return res.toArray();
}
use of org.apache.drill.exec.record.RecordBatchLoader in project drill by apache.
the class TestSimpleTopN method sortOneKeyAscending.
@Test
public void sortOneKeyAscending() throws Throwable {
RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
try (Drillbit bit1 = new Drillbit(CONFIG, serviceSet);
Drillbit bit2 = new Drillbit(CONFIG, serviceSet);
DrillClient client = new DrillClient(CONFIG, serviceSet.getCoordinator())) {
bit1.run();
bit2.run();
client.connect();
List<QueryDataBatch> results = client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL, Files.toString(FileUtils.getResourceAsFile("/topN/one_key_sort.json"), Charsets.UTF_8));
int count = 0;
for (QueryDataBatch b : results) {
if (b.getHeader().getRowCount() != 0) {
count += b.getHeader().getRowCount();
}
}
assertEquals(100, count);
long previousBigInt = Long.MIN_VALUE;
int recordCount = 0;
int batchCount = 0;
for (QueryDataBatch b : results) {
if (b.getHeader().getRowCount() == 0) {
continue;
}
batchCount++;
RecordBatchLoader loader = new RecordBatchLoader(bit1.getContext().getAllocator());
loader.load(b.getHeader().getDef(), b.getData());
BigIntVector c1 = (BigIntVector) loader.getValueAccessorById(BigIntVector.class, loader.getValueVectorId(new SchemaPath("blue", ExpressionPosition.UNKNOWN)).getFieldIds()).getValueVector();
BigIntVector.Accessor a1 = c1.getAccessor();
for (int i = 0; i < c1.getAccessor().getValueCount(); i++) {
recordCount++;
assertTrue(previousBigInt <= a1.get(i));
previousBigInt = a1.get(i);
}
loader.clear();
b.release();
}
System.out.println(String.format("Sorted %,d records in %d batches.", recordCount, batchCount));
}
}
Aggregations