use of org.apache.drill.exec.vector.BigIntVector in project drill by apache.
the class TestSimpleTopN method sortOneKeyAscending.
@Test
public void sortOneKeyAscending() throws Throwable {
RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
try (Drillbit bit1 = new Drillbit(CONFIG, serviceSet);
Drillbit bit2 = new Drillbit(CONFIG, serviceSet);
DrillClient client = new DrillClient(CONFIG, serviceSet.getCoordinator())) {
bit1.run();
bit2.run();
client.connect();
List<QueryDataBatch> results = client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL, Files.toString(FileUtils.getResourceAsFile("/topN/one_key_sort.json"), Charsets.UTF_8));
int count = 0;
for (QueryDataBatch b : results) {
if (b.getHeader().getRowCount() != 0) {
count += b.getHeader().getRowCount();
}
}
assertEquals(100, count);
long previousBigInt = Long.MIN_VALUE;
int recordCount = 0;
int batchCount = 0;
for (QueryDataBatch b : results) {
if (b.getHeader().getRowCount() == 0) {
continue;
}
batchCount++;
RecordBatchLoader loader = new RecordBatchLoader(bit1.getContext().getAllocator());
loader.load(b.getHeader().getDef(), b.getData());
BigIntVector c1 = (BigIntVector) loader.getValueAccessorById(BigIntVector.class, loader.getValueVectorId(new SchemaPath("blue", ExpressionPosition.UNKNOWN)).getFieldIds()).getValueVector();
BigIntVector.Accessor a1 = c1.getAccessor();
for (int i = 0; i < c1.getAccessor().getValueCount(); i++) {
recordCount++;
assertTrue(previousBigInt <= a1.get(i));
previousBigInt = a1.get(i);
}
loader.clear();
b.release();
}
System.out.println(String.format("Sorted %,d records in %d batches.", recordCount, batchCount));
}
}
use of org.apache.drill.exec.vector.BigIntVector in project drill by apache.
the class TestSimpleSort method sortTwoKeysOneAscendingOneDescending.
@Test
public void sortTwoKeysOneAscendingOneDescending(@Injectable final DrillbitContext bitContext, @Injectable UserClientConnection connection) throws Throwable {
mockDrillbitContext(bitContext);
final PhysicalPlanReader reader = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(c);
final PhysicalPlan plan = reader.readPhysicalPlan(Files.toString(FileUtils.getResourceAsFile("/sort/two_key_sort.json"), Charsets.UTF_8));
final FunctionImplementationRegistry registry = new FunctionImplementationRegistry(c);
final FragmentContext context = new FragmentContext(bitContext, PlanFragment.getDefaultInstance(), connection, registry);
final SimpleRootExec exec = new SimpleRootExec(ImplCreator.getExec(context, (FragmentRoot) plan.getSortedOperators(false).iterator().next()));
int previousInt = Integer.MIN_VALUE;
long previousLong = Long.MAX_VALUE;
int recordCount = 0;
int batchCount = 0;
while (exec.next()) {
batchCount++;
final IntVector c1 = exec.getValueVectorById(new SchemaPath("blue", ExpressionPosition.UNKNOWN), IntVector.class);
final BigIntVector c2 = exec.getValueVectorById(new SchemaPath("alt", ExpressionPosition.UNKNOWN), BigIntVector.class);
final IntVector.Accessor a1 = c1.getAccessor();
final BigIntVector.Accessor a2 = c2.getAccessor();
for (int i = 0; i < c1.getAccessor().getValueCount(); i++) {
recordCount++;
assertTrue(previousInt <= a1.get(i));
if (previousInt != a1.get(i)) {
previousLong = Long.MAX_VALUE;
previousInt = a1.get(i);
}
assertTrue(previousLong >= a2.get(i));
//System.out.println(previousInt + "\t" + a2.get(i));
}
}
System.out.println(String.format("Sorted %,d records in %d batches.", recordCount, batchCount));
if (context.getFailureCause() != null) {
throw context.getFailureCause();
}
assertTrue(!context.isFailed());
}
use of org.apache.drill.exec.vector.BigIntVector in project drill by apache.
the class TestOrderedPartitionExchange method twoBitTwoExchangeRun.
/**
* Starts two drillbits and runs a physical plan with a Mock scan, project, OrderedParititionExchange, Union Exchange,
* and sort. The final sort is done first on the partition column, and verifies that the partitions are correct, in that
* all rows in partition 0 should come in the sort order before any row in partition 1, etc. Also verifies that the standard
* deviation of the size of the partitions is less than one tenth the mean size of the partitions, because we expect all
* the partitions to be roughly equal in size.
* @throws Exception
*/
@Test
public void twoBitTwoExchangeRun() throws Exception {
RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
try (Drillbit bit1 = new Drillbit(CONFIG, serviceSet);
Drillbit bit2 = new Drillbit(CONFIG, serviceSet);
DrillClient client = new DrillClient(CONFIG, serviceSet.getCoordinator())) {
bit1.run();
bit2.run();
client.connect();
List<QueryDataBatch> results = client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL, Files.toString(FileUtils.getResourceAsFile("/sender/ordered_exchange.json"), Charsets.UTF_8));
int count = 0;
List<Integer> partitionRecordCounts = Lists.newArrayList();
for (QueryDataBatch b : results) {
if (b.getData() != null) {
int rows = b.getHeader().getRowCount();
count += rows;
DrillConfig config = DrillConfig.create();
RecordBatchLoader loader = new RecordBatchLoader(new BootStrapContext(config, ClassPathScanner.fromPrescan(config)).getAllocator());
loader.load(b.getHeader().getDef(), b.getData());
BigIntVector vv1 = (BigIntVector) loader.getValueAccessorById(BigIntVector.class, loader.getValueVectorId(new SchemaPath("col1", ExpressionPosition.UNKNOWN)).getFieldIds()).getValueVector();
Float8Vector vv2 = (Float8Vector) loader.getValueAccessorById(Float8Vector.class, loader.getValueVectorId(new SchemaPath("col2", ExpressionPosition.UNKNOWN)).getFieldIds()).getValueVector();
IntVector pVector = (IntVector) loader.getValueAccessorById(IntVector.class, loader.getValueVectorId(new SchemaPath("partition", ExpressionPosition.UNKNOWN)).getFieldIds()).getValueVector();
long previous1 = Long.MIN_VALUE;
double previous2 = Double.MIN_VALUE;
int partPrevious = -1;
long current1 = Long.MIN_VALUE;
double current2 = Double.MIN_VALUE;
int partCurrent = -1;
int partitionRecordCount = 0;
for (int i = 0; i < rows; i++) {
previous1 = current1;
previous2 = current2;
partPrevious = partCurrent;
current1 = vv1.getAccessor().get(i);
current2 = vv2.getAccessor().get(i);
partCurrent = pVector.getAccessor().get(i);
Assert.assertTrue(current1 >= previous1);
if (current1 == previous1) {
Assert.assertTrue(current2 <= previous2);
}
if (partCurrent == partPrevious || partPrevious == -1) {
partitionRecordCount++;
} else {
partitionRecordCounts.add(partitionRecordCount);
partitionRecordCount = 0;
}
}
partitionRecordCounts.add(partitionRecordCount);
loader.clear();
}
b.release();
}
double[] values = new double[partitionRecordCounts.size()];
int i = 0;
for (Integer rc : partitionRecordCounts) {
values[i++] = rc.doubleValue();
}
StandardDeviation stdDev = new StandardDeviation();
Mean mean = new Mean();
double std = stdDev.evaluate(values);
double m = mean.evaluate(values);
System.out.println("mean: " + m + " std dev: " + std);
//Assert.assertTrue(std < 0.1 * m);
assertEquals(31000, count);
}
}
use of org.apache.drill.exec.vector.BigIntVector in project drill by apache.
the class TestAgg method oneKeyAgg.
@Test
public void oneKeyAgg(@Injectable final DrillbitContext bitContext, @Injectable UserClientConnection connection) throws Throwable {
final SimpleRootExec exec = doTest(bitContext, connection, "/agg/test1.json");
while (exec.next()) {
final BigIntVector cnt = exec.getValueVectorById(SchemaPath.getSimplePath("cnt"), BigIntVector.class);
final IntVector key = exec.getValueVectorById(SchemaPath.getSimplePath("blue"), IntVector.class);
final long[] cntArr = { 10001, 9999 };
final int[] keyArr = { Integer.MIN_VALUE, Integer.MAX_VALUE };
for (int i = 0; i < exec.getRecordCount(); i++) {
assertEquals((Long) cntArr[i], cnt.getAccessor().getObject(i));
assertEquals((Integer) keyArr[i], key.getAccessor().getObject(i));
}
}
if (exec.getContext().getFailureCause() != null) {
throw exec.getContext().getFailureCause();
}
assertTrue(!exec.getContext().isFailed());
}
Aggregations