use of org.apache.phoenix.schema.tuple.SingleKeyValueTuple in project phoenix by apache.
the class RowKeyOrderedAggregateResultIterator method advance.
@Override
protected Tuple advance() throws SQLException {
Tuple current = this.next;
boolean traversedIterators = nextTraversedIterators;
if (current == null) {
current = nextTuple();
traversedIterators = this.traversedIterator;
}
if (current != null) {
Tuple previous = current;
Aggregator[] rowAggregators = null;
while (true) {
current = nextTuple();
if (!traversedIterators || !continueAggregating(previous, current)) {
break;
}
if (rowAggregators == null) {
rowAggregators = aggregate(previous);
}
aggregators.aggregate(rowAggregators, current);
traversedIterators = this.traversedIterator;
}
this.next = current;
this.nextTraversedIterators = this.traversedIterator;
if (rowAggregators == null) {
current = previous;
} else {
byte[] value = aggregators.toBytes(rowAggregators);
current = new SingleKeyValueTuple(KeyValueUtil.newKeyValue(previousKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length));
}
}
if (current == null) {
// Close underlying ResultIterators to free resources sooner rather than later
close();
}
return current;
}
use of org.apache.phoenix.schema.tuple.SingleKeyValueTuple in project phoenix by apache.
the class MutatingParallelIteratorFactory method newIterator.
@Override
public PeekingResultIterator newIterator(final StatementContext parentContext, ResultIterator iterator, Scan scan, String tableName, QueryPlan plan) throws SQLException {
final PhoenixConnection clonedConnection = new PhoenixConnection(this.connection);
MutationState state = mutate(parentContext, iterator, clonedConnection);
long totalRowCount = state.getUpdateCount();
if (clonedConnection.getAutoCommit()) {
clonedConnection.getMutationState().join(state);
state = clonedConnection.getMutationState();
}
final MutationState finalState = state;
byte[] value = PLong.INSTANCE.toBytes(totalRowCount);
KeyValue keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
final Tuple tuple = new SingleKeyValueTuple(keyValue);
return new PeekingResultIterator() {
private boolean done = false;
@Override
public Tuple next() throws SQLException {
if (done) {
return null;
}
done = true;
return tuple;
}
@Override
public void explain(List<String> planSteps) {
}
@Override
public void close() throws SQLException {
try {
/*
* Join the child mutation states in close, since this is called in a single threaded manner
* after the parallel results have been processed.
* If auto-commit is on for the cloned child connection, then the finalState here is an empty mutation
* state (with no mutations). However, it still has the metrics for mutation work done by the
* mutating-iterator. Joining the mutation state makes sure those metrics are passed over
* to the parent connection.
*/
MutatingParallelIteratorFactory.this.connection.getMutationState().join(finalState);
} finally {
clonedConnection.close();
}
}
@Override
public Tuple peek() throws SQLException {
return done ? null : tuple;
}
};
}
use of org.apache.phoenix.schema.tuple.SingleKeyValueTuple in project phoenix by apache.
the class SpillManager method getAggregators.
// Instantiate Aggregators from a serialized byte array
private Aggregator[] getAggregators(byte[] data) throws IOException {
DataInputStream input = null;
try {
input = new DataInputStream(new ByteArrayInputStream(data));
// key length
int keyLength = WritableUtils.readVInt(input);
int vIntKeyLength = WritableUtils.getVIntSize(keyLength);
ImmutableBytesPtr ptr = new ImmutableBytesPtr(data, vIntKeyLength, keyLength);
// value length
input.skip(keyLength);
int valueLength = WritableUtils.readVInt(input);
int vIntValLength = WritableUtils.getVIntSize(keyLength);
KeyValue keyValue = KeyValueUtil.newKeyValue(ptr.get(), ptr.getOffset(), ptr.getLength(), QueryConstants.SINGLE_COLUMN_FAMILY, QueryConstants.SINGLE_COLUMN, QueryConstants.AGG_TIMESTAMP, data, vIntKeyLength + keyLength + vIntValLength, valueLength);
Tuple result = new SingleKeyValueTuple(keyValue);
TupleUtil.getAggregateValue(result, ptr);
KeyValueSchema schema = aggregators.getValueSchema();
ValueBitSet tempValueSet = ValueBitSet.newInstance(schema);
tempValueSet.clear();
tempValueSet.or(ptr);
int i = 0, maxOffset = ptr.getOffset() + ptr.getLength();
SingleAggregateFunction[] funcArray = aggregators.getFunctions();
Aggregator[] sAggs = new Aggregator[funcArray.length];
Boolean hasValue;
schema.iterator(ptr);
while ((hasValue = schema.next(ptr, i, maxOffset, tempValueSet)) != null) {
SingleAggregateFunction func = funcArray[i];
sAggs[i++] = hasValue ? func.newServerAggregator(conf, ptr) : func.newServerAggregator(conf);
}
return sAggs;
} finally {
Closeables.closeQuietly(input);
}
}
use of org.apache.phoenix.schema.tuple.SingleKeyValueTuple in project phoenix by apache.
the class CorrelatePlanTest method newLiteralResultIterationPlan.
private QueryPlan newLiteralResultIterationPlan(Object[][] rows, Integer offset) throws SQLException {
List<Tuple> tuples = Lists.newArrayList();
Tuple baseTuple = new SingleKeyValueTuple(KeyValue.LOWESTKEY);
for (Object[] row : rows) {
Expression[] exprs = new Expression[row.length];
for (int i = 0; i < row.length; i++) {
exprs[i] = LiteralExpression.newConstant(row[i]);
}
TupleProjector projector = new TupleProjector(exprs);
tuples.add(projector.projectResults(baseTuple));
}
return new LiteralResultIterationPlan(tuples, CONTEXT, SelectStatement.SELECT_ONE, TableRef.EMPTY_TABLE_REF, RowProjector.EMPTY_PROJECTOR, null, offset, OrderBy.EMPTY_ORDER_BY, null);
}
use of org.apache.phoenix.schema.tuple.SingleKeyValueTuple in project phoenix by apache.
the class UnnestArrayPlanTest method toTuples.
private List<Tuple> toTuples(PArrayDataType arrayType, List<Object[]> arrays) {
List<Tuple> tuples = Lists.newArrayListWithExpectedSize(arrays.size());
PDataType baseType = PDataType.fromTypeId(arrayType.getSqlType() - PDataType.ARRAY_TYPE_BASE);
for (Object[] array : arrays) {
PhoenixArray pArray = new PhoenixArray(baseType, array);
byte[] bytes = arrayType.toBytes(pArray);
tuples.add(new SingleKeyValueTuple(KeyValueUtil.newKeyValue(bytes, 0, bytes.length, bytes, 0, 0, bytes, 0, 0, 0, bytes, 0, 0)));
}
return tuples;
}
Aggregations