Search in sources :

Example 21 with Tuple

use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.

the class MutatingParallelIteratorFactory method newIterator.

@Override
public PeekingResultIterator newIterator(final StatementContext parentContext, ResultIterator iterator, Scan scan, String tableName, QueryPlan plan) throws SQLException {
    final PhoenixConnection clonedConnection = new PhoenixConnection(this.connection);
    MutationState state = mutate(parentContext, iterator, clonedConnection);
    long totalRowCount = state.getUpdateCount();
    if (clonedConnection.getAutoCommit()) {
        clonedConnection.getMutationState().join(state);
        state = clonedConnection.getMutationState();
    }
    final MutationState finalState = state;
    byte[] value = PLong.INSTANCE.toBytes(totalRowCount);
    KeyValue keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
    final Tuple tuple = new SingleKeyValueTuple(keyValue);
    return new PeekingResultIterator() {

        private boolean done = false;

        @Override
        public Tuple next() throws SQLException {
            if (done) {
                return null;
            }
            done = true;
            return tuple;
        }

        @Override
        public void explain(List<String> planSteps) {
        }

        @Override
        public void close() throws SQLException {
            try {
                /* 
                     * Join the child mutation states in close, since this is called in a single threaded manner
                     * after the parallel results have been processed. 
                     * If auto-commit is on for the cloned child connection, then the finalState here is an empty mutation 
                     * state (with no mutations). However, it still has the metrics for mutation work done by the 
                     * mutating-iterator. Joining the mutation state makes sure those metrics are passed over
                     * to the parent connection.
                     */
                MutatingParallelIteratorFactory.this.connection.getMutationState().join(finalState);
            } finally {
                clonedConnection.close();
            }
        }

        @Override
        public Tuple peek() throws SQLException {
            return done ? null : tuple;
        }
    };
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) KeyValue(org.apache.hadoop.hbase.KeyValue) MutationState(org.apache.phoenix.execute.MutationState) SingleKeyValueTuple(org.apache.phoenix.schema.tuple.SingleKeyValueTuple) List(java.util.List) Tuple(org.apache.phoenix.schema.tuple.Tuple) SingleKeyValueTuple(org.apache.phoenix.schema.tuple.SingleKeyValueTuple) PeekingResultIterator(org.apache.phoenix.iterate.PeekingResultIterator)

Example 22 with Tuple

use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.

the class PostLocalIndexDDLCompiler method compile.

public MutationPlan compile(PTable index) throws SQLException {
    try (final PhoenixStatement statement = new PhoenixStatement(connection)) {
        String query = "SELECT count(*) FROM " + tableName;
        final QueryPlan plan = statement.compileQuery(query);
        TableRef tableRef = plan.getTableRef();
        Scan scan = plan.getContext().getScan();
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        final PTable dataTable = tableRef.getTable();
        List<PTable> indexes = Lists.newArrayListWithExpectedSize(1);
        for (PTable indexTable : dataTable.getIndexes()) {
            if (indexTable.getKey().equals(index.getKey())) {
                index = indexTable;
                break;
            }
        }
        // Only build newly created index.
        indexes.add(index);
        IndexMaintainer.serialize(dataTable, ptr, indexes, plan.getContext().getConnection());
        // Set attribute on scan that UngroupedAggregateRegionObserver will switch on.
        // We'll detect that this attribute was set the server-side and write the index
        // rows per region as a result. The value of the attribute will be our persisted
        // index maintainers.
        // Define the LOCAL_INDEX_BUILD as a new static in BaseScannerRegionObserver
        scan.setAttribute(BaseScannerRegionObserver.LOCAL_INDEX_BUILD_PROTO, ByteUtil.copyKeyBytesIfNecessary(ptr));
        // By default, we'd use a FirstKeyOnly filter as nothing else needs to be projected for count(*).
        // However, in this case, we need to project all of the data columns that contribute to the index.
        IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable, connection);
        for (ColumnReference columnRef : indexMaintainer.getAllColumns()) {
            if (index.getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS) {
                scan.addFamily(columnRef.getFamily());
            } else {
                scan.addColumn(columnRef.getFamily(), columnRef.getQualifier());
            }
        }
        // with a connectionless connection (which makes testing easier).
        return new BaseMutationPlan(plan.getContext(), Operation.UPSERT) {

            @Override
            public MutationState execute() throws SQLException {
                connection.getMutationState().commitDDLFence(dataTable);
                Tuple tuple = plan.iterator().next();
                long rowCount = 0;
                if (tuple != null) {
                    Cell kv = tuple.getValue(0);
                    ImmutableBytesWritable tmpPtr = new ImmutableBytesWritable(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());
                    // A single Cell will be returned with the count(*) - we decode that here
                    rowCount = PLong.INSTANCE.getCodec().decodeLong(tmpPtr, SortOrder.getDefault());
                }
                // rows that were added.
                return new MutationState(0, 0, connection, rowCount);
            }
        };
    }
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PTable(org.apache.phoenix.schema.PTable) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) MutationState(org.apache.phoenix.execute.MutationState) Scan(org.apache.hadoop.hbase.client.Scan) Cell(org.apache.hadoop.hbase.Cell) TableRef(org.apache.phoenix.schema.TableRef) Tuple(org.apache.phoenix.schema.tuple.Tuple) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 23 with Tuple

use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.

the class SpillManager method getAggregators.

// Instantiate Aggregators from a serialized byte array
private Aggregator[] getAggregators(byte[] data) throws IOException {
    DataInputStream input = null;
    try {
        input = new DataInputStream(new ByteArrayInputStream(data));
        // key length
        int keyLength = WritableUtils.readVInt(input);
        int vIntKeyLength = WritableUtils.getVIntSize(keyLength);
        ImmutableBytesPtr ptr = new ImmutableBytesPtr(data, vIntKeyLength, keyLength);
        // value length
        input.skip(keyLength);
        int valueLength = WritableUtils.readVInt(input);
        int vIntValLength = WritableUtils.getVIntSize(keyLength);
        KeyValue keyValue = KeyValueUtil.newKeyValue(ptr.get(), ptr.getOffset(), ptr.getLength(), QueryConstants.SINGLE_COLUMN_FAMILY, QueryConstants.SINGLE_COLUMN, QueryConstants.AGG_TIMESTAMP, data, vIntKeyLength + keyLength + vIntValLength, valueLength);
        Tuple result = new SingleKeyValueTuple(keyValue);
        TupleUtil.getAggregateValue(result, ptr);
        KeyValueSchema schema = aggregators.getValueSchema();
        ValueBitSet tempValueSet = ValueBitSet.newInstance(schema);
        tempValueSet.clear();
        tempValueSet.or(ptr);
        int i = 0, maxOffset = ptr.getOffset() + ptr.getLength();
        SingleAggregateFunction[] funcArray = aggregators.getFunctions();
        Aggregator[] sAggs = new Aggregator[funcArray.length];
        Boolean hasValue;
        schema.iterator(ptr);
        while ((hasValue = schema.next(ptr, i, maxOffset, tempValueSet)) != null) {
            SingleAggregateFunction func = funcArray[i];
            sAggs[i++] = hasValue ? func.newServerAggregator(conf, ptr) : func.newServerAggregator(conf);
        }
        return sAggs;
    } finally {
        Closeables.closeQuietly(input);
    }
}
Also used : KeyValue(org.apache.hadoop.hbase.KeyValue) ValueBitSet(org.apache.phoenix.schema.ValueBitSet) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) SingleKeyValueTuple(org.apache.phoenix.schema.tuple.SingleKeyValueTuple) Aggregator(org.apache.phoenix.expression.aggregator.Aggregator) DataInputStream(java.io.DataInputStream) ByteArrayInputStream(java.io.ByteArrayInputStream) SingleAggregateFunction(org.apache.phoenix.expression.function.SingleAggregateFunction) KeyValueSchema(org.apache.phoenix.schema.KeyValueSchema) Tuple(org.apache.phoenix.schema.tuple.Tuple) SingleKeyValueTuple(org.apache.phoenix.schema.tuple.SingleKeyValueTuple)

Example 24 with Tuple

use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.

the class CorrelatePlanTest method newLiteralResultIterationPlan.

private QueryPlan newLiteralResultIterationPlan(Object[][] rows, Integer offset) throws SQLException {
    List<Tuple> tuples = Lists.newArrayList();
    Tuple baseTuple = new SingleKeyValueTuple(KeyValue.LOWESTKEY);
    for (Object[] row : rows) {
        Expression[] exprs = new Expression[row.length];
        for (int i = 0; i < row.length; i++) {
            exprs[i] = LiteralExpression.newConstant(row[i]);
        }
        TupleProjector projector = new TupleProjector(exprs);
        tuples.add(projector.projectResults(baseTuple));
    }
    return new LiteralResultIterationPlan(tuples, CONTEXT, SelectStatement.SELECT_ONE, TableRef.EMPTY_TABLE_REF, RowProjector.EMPTY_PROJECTOR, null, offset, OrderBy.EMPTY_ORDER_BY, null);
}
Also used : Expression(org.apache.phoenix.expression.Expression) ProjectedColumnExpression(org.apache.phoenix.expression.ProjectedColumnExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ComparisonExpression(org.apache.phoenix.expression.ComparisonExpression) CorrelateVariableFieldAccessExpression(org.apache.phoenix.expression.CorrelateVariableFieldAccessExpression) SingleKeyValueTuple(org.apache.phoenix.schema.tuple.SingleKeyValueTuple) Tuple(org.apache.phoenix.schema.tuple.Tuple) SingleKeyValueTuple(org.apache.phoenix.schema.tuple.SingleKeyValueTuple)

Example 25 with Tuple

use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.

the class UnnestArrayPlanTest method toTuples.

private List<Tuple> toTuples(PArrayDataType arrayType, List<Object[]> arrays) {
    List<Tuple> tuples = Lists.newArrayListWithExpectedSize(arrays.size());
    PDataType baseType = PDataType.fromTypeId(arrayType.getSqlType() - PDataType.ARRAY_TYPE_BASE);
    for (Object[] array : arrays) {
        PhoenixArray pArray = new PhoenixArray(baseType, array);
        byte[] bytes = arrayType.toBytes(pArray);
        tuples.add(new SingleKeyValueTuple(KeyValueUtil.newKeyValue(bytes, 0, bytes.length, bytes, 0, 0, bytes, 0, 0, 0, bytes, 0, 0)));
    }
    return tuples;
}
Also used : PDataType(org.apache.phoenix.schema.types.PDataType) PhoenixArray(org.apache.phoenix.schema.types.PhoenixArray) SingleKeyValueTuple(org.apache.phoenix.schema.tuple.SingleKeyValueTuple) Tuple(org.apache.phoenix.schema.tuple.Tuple) SingleKeyValueTuple(org.apache.phoenix.schema.tuple.SingleKeyValueTuple)

Aggregations

Tuple (org.apache.phoenix.schema.tuple.Tuple)48 SingleKeyValueTuple (org.apache.phoenix.schema.tuple.SingleKeyValueTuple)22 KeyValue (org.apache.hadoop.hbase.KeyValue)16 List (java.util.List)10 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)10 ArrayList (java.util.ArrayList)9 Test (org.junit.Test)9 Expression (org.apache.phoenix.expression.Expression)8 SQLException (java.sql.SQLException)7 Cell (org.apache.hadoop.hbase.Cell)6 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)6 IOException (java.io.IOException)5 Region (org.apache.hadoop.hbase.regionserver.Region)5 ProjectedColumnExpression (org.apache.phoenix.expression.ProjectedColumnExpression)5 Aggregator (org.apache.phoenix.expression.aggregator.Aggregator)5 ResultIterator (org.apache.phoenix.iterate.ResultIterator)5 PColumn (org.apache.phoenix.schema.PColumn)5 ResultTuple (org.apache.phoenix.schema.tuple.ResultTuple)5 ClientAggregators (org.apache.phoenix.expression.aggregator.ClientAggregators)4 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)4