use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.
the class MutatingParallelIteratorFactory method newIterator.
@Override
public PeekingResultIterator newIterator(final StatementContext parentContext, ResultIterator iterator, Scan scan, String tableName, QueryPlan plan) throws SQLException {
final PhoenixConnection clonedConnection = new PhoenixConnection(this.connection);
MutationState state = mutate(parentContext, iterator, clonedConnection);
long totalRowCount = state.getUpdateCount();
if (clonedConnection.getAutoCommit()) {
clonedConnection.getMutationState().join(state);
state = clonedConnection.getMutationState();
}
final MutationState finalState = state;
byte[] value = PLong.INSTANCE.toBytes(totalRowCount);
KeyValue keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
final Tuple tuple = new SingleKeyValueTuple(keyValue);
return new PeekingResultIterator() {
private boolean done = false;
@Override
public Tuple next() throws SQLException {
if (done) {
return null;
}
done = true;
return tuple;
}
@Override
public void explain(List<String> planSteps) {
}
@Override
public void close() throws SQLException {
try {
/*
* Join the child mutation states in close, since this is called in a single threaded manner
* after the parallel results have been processed.
* If auto-commit is on for the cloned child connection, then the finalState here is an empty mutation
* state (with no mutations). However, it still has the metrics for mutation work done by the
* mutating-iterator. Joining the mutation state makes sure those metrics are passed over
* to the parent connection.
*/
MutatingParallelIteratorFactory.this.connection.getMutationState().join(finalState);
} finally {
clonedConnection.close();
}
}
@Override
public Tuple peek() throws SQLException {
return done ? null : tuple;
}
};
}
use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.
the class PostLocalIndexDDLCompiler method compile.
public MutationPlan compile(PTable index) throws SQLException {
try (final PhoenixStatement statement = new PhoenixStatement(connection)) {
String query = "SELECT count(*) FROM " + tableName;
final QueryPlan plan = statement.compileQuery(query);
TableRef tableRef = plan.getTableRef();
Scan scan = plan.getContext().getScan();
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
final PTable dataTable = tableRef.getTable();
List<PTable> indexes = Lists.newArrayListWithExpectedSize(1);
for (PTable indexTable : dataTable.getIndexes()) {
if (indexTable.getKey().equals(index.getKey())) {
index = indexTable;
break;
}
}
// Only build newly created index.
indexes.add(index);
IndexMaintainer.serialize(dataTable, ptr, indexes, plan.getContext().getConnection());
// Set attribute on scan that UngroupedAggregateRegionObserver will switch on.
// We'll detect that this attribute was set the server-side and write the index
// rows per region as a result. The value of the attribute will be our persisted
// index maintainers.
// Define the LOCAL_INDEX_BUILD as a new static in BaseScannerRegionObserver
scan.setAttribute(BaseScannerRegionObserver.LOCAL_INDEX_BUILD_PROTO, ByteUtil.copyKeyBytesIfNecessary(ptr));
// By default, we'd use a FirstKeyOnly filter as nothing else needs to be projected for count(*).
// However, in this case, we need to project all of the data columns that contribute to the index.
IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable, connection);
for (ColumnReference columnRef : indexMaintainer.getAllColumns()) {
if (index.getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS) {
scan.addFamily(columnRef.getFamily());
} else {
scan.addColumn(columnRef.getFamily(), columnRef.getQualifier());
}
}
// with a connectionless connection (which makes testing easier).
return new BaseMutationPlan(plan.getContext(), Operation.UPSERT) {
@Override
public MutationState execute() throws SQLException {
connection.getMutationState().commitDDLFence(dataTable);
Tuple tuple = plan.iterator().next();
long rowCount = 0;
if (tuple != null) {
Cell kv = tuple.getValue(0);
ImmutableBytesWritable tmpPtr = new ImmutableBytesWritable(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());
// A single Cell will be returned with the count(*) - we decode that here
rowCount = PLong.INSTANCE.getCodec().decodeLong(tmpPtr, SortOrder.getDefault());
}
// rows that were added.
return new MutationState(0, 0, connection, rowCount);
}
};
}
}
use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.
the class SpillManager method getAggregators.
// Instantiate Aggregators from a serialized byte array
private Aggregator[] getAggregators(byte[] data) throws IOException {
DataInputStream input = null;
try {
input = new DataInputStream(new ByteArrayInputStream(data));
// key length
int keyLength = WritableUtils.readVInt(input);
int vIntKeyLength = WritableUtils.getVIntSize(keyLength);
ImmutableBytesPtr ptr = new ImmutableBytesPtr(data, vIntKeyLength, keyLength);
// value length
input.skip(keyLength);
int valueLength = WritableUtils.readVInt(input);
int vIntValLength = WritableUtils.getVIntSize(keyLength);
KeyValue keyValue = KeyValueUtil.newKeyValue(ptr.get(), ptr.getOffset(), ptr.getLength(), QueryConstants.SINGLE_COLUMN_FAMILY, QueryConstants.SINGLE_COLUMN, QueryConstants.AGG_TIMESTAMP, data, vIntKeyLength + keyLength + vIntValLength, valueLength);
Tuple result = new SingleKeyValueTuple(keyValue);
TupleUtil.getAggregateValue(result, ptr);
KeyValueSchema schema = aggregators.getValueSchema();
ValueBitSet tempValueSet = ValueBitSet.newInstance(schema);
tempValueSet.clear();
tempValueSet.or(ptr);
int i = 0, maxOffset = ptr.getOffset() + ptr.getLength();
SingleAggregateFunction[] funcArray = aggregators.getFunctions();
Aggregator[] sAggs = new Aggregator[funcArray.length];
Boolean hasValue;
schema.iterator(ptr);
while ((hasValue = schema.next(ptr, i, maxOffset, tempValueSet)) != null) {
SingleAggregateFunction func = funcArray[i];
sAggs[i++] = hasValue ? func.newServerAggregator(conf, ptr) : func.newServerAggregator(conf);
}
return sAggs;
} finally {
Closeables.closeQuietly(input);
}
}
use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.
the class CorrelatePlanTest method newLiteralResultIterationPlan.
private QueryPlan newLiteralResultIterationPlan(Object[][] rows, Integer offset) throws SQLException {
List<Tuple> tuples = Lists.newArrayList();
Tuple baseTuple = new SingleKeyValueTuple(KeyValue.LOWESTKEY);
for (Object[] row : rows) {
Expression[] exprs = new Expression[row.length];
for (int i = 0; i < row.length; i++) {
exprs[i] = LiteralExpression.newConstant(row[i]);
}
TupleProjector projector = new TupleProjector(exprs);
tuples.add(projector.projectResults(baseTuple));
}
return new LiteralResultIterationPlan(tuples, CONTEXT, SelectStatement.SELECT_ONE, TableRef.EMPTY_TABLE_REF, RowProjector.EMPTY_PROJECTOR, null, offset, OrderBy.EMPTY_ORDER_BY, null);
}
use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.
the class UnnestArrayPlanTest method toTuples.
private List<Tuple> toTuples(PArrayDataType arrayType, List<Object[]> arrays) {
List<Tuple> tuples = Lists.newArrayListWithExpectedSize(arrays.size());
PDataType baseType = PDataType.fromTypeId(arrayType.getSqlType() - PDataType.ARRAY_TYPE_BASE);
for (Object[] array : arrays) {
PhoenixArray pArray = new PhoenixArray(baseType, array);
byte[] bytes = arrayType.toBytes(pArray);
tuples.add(new SingleKeyValueTuple(KeyValueUtil.newKeyValue(bytes, 0, bytes.length, bytes, 0, 0, bytes, 0, 0, 0, bytes, 0, 0)));
}
return tuples;
}
Aggregations