Search in sources :

Example 16 with Tuple

use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.

the class PostDDLCompiler method compile.

public MutationPlan compile(final List<TableRef> tableRefs, final byte[] emptyCF, final List<byte[]> projectCFs, final List<PColumn> deleteList, final long timestamp) throws SQLException {
    PhoenixStatement statement = new PhoenixStatement(connection);
    final StatementContext context = new StatementContext(statement, new ColumnResolver() {

        @Override
        public List<TableRef> getTables() {
            return tableRefs;
        }

        @Override
        public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
            throw new UnsupportedOperationException();
        }

        @Override
        public List<PFunction> getFunctions() {
            return Collections.<PFunction>emptyList();
        }

        @Override
        public PFunction resolveFunction(String functionName) throws SQLException {
            throw new FunctionNotFoundException(functionName);
        }

        @Override
        public boolean hasUDFs() {
            return false;
        }

        @Override
        public PSchema resolveSchema(String schemaName) throws SQLException {
            throw new SchemaNotFoundException(schemaName);
        }

        @Override
        public List<PSchema> getSchemas() {
            throw new UnsupportedOperationException();
        }
    }, scan, new SequenceManager(statement));
    return new BaseMutationPlan(context, Operation.UPSERT) {

        /* FIXME */
        @Override
        public MutationState execute() throws SQLException {
            if (tableRefs.isEmpty()) {
                return new MutationState(0, 1000, connection);
            }
            boolean wasAutoCommit = connection.getAutoCommit();
            try {
                connection.setAutoCommit(true);
                SQLException sqlE = null;
                /*
                     * Handles:
                     * 1) deletion of all rows for a DROP TABLE and subsequently deletion of all rows for a DROP INDEX;
                     * 2) deletion of all column values for a ALTER TABLE DROP COLUMN
                     * 3) updating the necessary rows to have an empty KV
                     * 4) updating table stats
                     */
                long totalMutationCount = 0;
                for (final TableRef tableRef : tableRefs) {
                    Scan scan = ScanUtil.newScan(context.getScan());
                    SelectStatement select = SelectStatement.COUNT_ONE;
                    // We need to use this tableRef
                    ColumnResolver resolver = new ColumnResolver() {

                        @Override
                        public List<TableRef> getTables() {
                            return Collections.singletonList(tableRef);
                        }

                        @Override
                        public java.util.List<PFunction> getFunctions() {
                            return Collections.emptyList();
                        }

                        @Override
                        public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
                            throw new UnsupportedOperationException();
                        }

                        @Override
                        public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
                            PColumn column = tableName != null ? tableRef.getTable().getColumnFamily(tableName).getPColumnForColumnName(colName) : tableRef.getTable().getColumnForColumnName(colName);
                            return new ColumnRef(tableRef, column.getPosition());
                        }

                        @Override
                        public PFunction resolveFunction(String functionName) throws SQLException {
                            throw new UnsupportedOperationException();
                        }

                        @Override
                        public boolean hasUDFs() {
                            return false;
                        }

                        @Override
                        public List<PSchema> getSchemas() {
                            throw new UnsupportedOperationException();
                        }

                        @Override
                        public PSchema resolveSchema(String schemaName) throws SQLException {
                            throw new SchemaNotFoundException(schemaName);
                        }
                    };
                    PhoenixStatement statement = new PhoenixStatement(connection);
                    StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
                    long ts = timestamp;
                    // in this case, so maybe this is ok.
                    if (ts != HConstants.LATEST_TIMESTAMP && tableRef.getTable().isTransactional()) {
                        ts = TransactionUtil.convertToNanoseconds(ts);
                    }
                    ScanUtil.setTimeRange(scan, scan.getTimeRange().getMin(), ts);
                    if (emptyCF != null) {
                        scan.setAttribute(BaseScannerRegionObserver.EMPTY_CF, emptyCF);
                        scan.setAttribute(BaseScannerRegionObserver.EMPTY_COLUMN_QUALIFIER, EncodedColumnsUtil.getEmptyKeyValueInfo(tableRef.getTable()).getFirst());
                    }
                    ServerCache cache = null;
                    try {
                        if (deleteList != null) {
                            if (deleteList.isEmpty()) {
                                scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
                            // In the case of a row deletion, add index metadata so mutable secondary indexing works
                            /* TODO: we currently manually run a scan to delete the index data here
                                    ImmutableBytesWritable ptr = context.getTempPtr();
                                    tableRef.getTable().getIndexMaintainers(ptr);
                                    if (ptr.getLength() > 0) {
                                        IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
                                        cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
                                        byte[] uuidValue = cache.getId();
                                        scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                                    }
                                    */
                            } else {
                                // In the case of the empty key value column family changing, do not send the index
                                // metadata, as we're currently managing this from the client. It's possible for the
                                // data empty column family to stay the same, while the index empty column family
                                // changes.
                                PColumn column = deleteList.get(0);
                                byte[] cq = column.getColumnQualifierBytes();
                                if (emptyCF == null) {
                                    scan.addColumn(column.getFamilyName().getBytes(), cq);
                                }
                                scan.setAttribute(BaseScannerRegionObserver.DELETE_CF, column.getFamilyName().getBytes());
                                scan.setAttribute(BaseScannerRegionObserver.DELETE_CQ, cq);
                            }
                        }
                        List<byte[]> columnFamilies = Lists.newArrayListWithExpectedSize(tableRef.getTable().getColumnFamilies().size());
                        if (projectCFs == null) {
                            for (PColumnFamily family : tableRef.getTable().getColumnFamilies()) {
                                columnFamilies.add(family.getName().getBytes());
                            }
                        } else {
                            for (byte[] projectCF : projectCFs) {
                                columnFamilies.add(projectCF);
                            }
                        }
                        // Need to project all column families into the scan, since we haven't yet created our empty key value
                        RowProjector projector = ProjectionCompiler.compile(context, SelectStatement.COUNT_ONE, GroupBy.EMPTY_GROUP_BY);
                        context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
                        // since at this point we haven't added the empty key value everywhere.
                        if (columnFamilies != null) {
                            scan.getFamilyMap().clear();
                            for (byte[] family : columnFamilies) {
                                scan.addFamily(family);
                            }
                            projector = new RowProjector(projector, false);
                        }
                        // any other Post DDL operations.
                        try {
                            // Since dropping a VIEW does not affect the underlying data, we do
                            // not need to pass through the view statement here.
                            // Push where clause into scan
                            WhereCompiler.compile(context, select);
                        } catch (ColumnFamilyNotFoundException e) {
                            continue;
                        } catch (ColumnNotFoundException e) {
                            continue;
                        } catch (AmbiguousColumnException e) {
                            continue;
                        }
                        QueryPlan plan = new AggregatePlan(context, select, tableRef, projector, null, null, OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null, null);
                        try {
                            ResultIterator iterator = plan.iterator();
                            try {
                                Tuple row = iterator.next();
                                ImmutableBytesWritable ptr = context.getTempPtr();
                                totalMutationCount += (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
                            } catch (SQLException e) {
                                sqlE = e;
                            } finally {
                                try {
                                    iterator.close();
                                } catch (SQLException e) {
                                    if (sqlE == null) {
                                        sqlE = e;
                                    } else {
                                        sqlE.setNextException(e);
                                    }
                                } finally {
                                    if (sqlE != null) {
                                        throw sqlE;
                                    }
                                }
                            }
                        } catch (TableNotFoundException e) {
                        // Ignore and continue, as HBase throws when table hasn't been written to
                        // FIXME: Remove if this is fixed in 0.96
                        }
                    } finally {
                        if (cache != null) {
                            // Remove server cache if there is one
                            cache.close();
                        }
                    }
                }
                final long count = totalMutationCount;
                return new MutationState(1, 1000, connection) {

                    @Override
                    public long getUpdateCount() {
                        return count;
                    }
                };
            } finally {
                if (!wasAutoCommit)
                    connection.setAutoCommit(wasAutoCommit);
            }
        }
    };
}
Also used : ServerCache(org.apache.phoenix.cache.ServerCacheClient.ServerCache) PFunction(org.apache.phoenix.parse.PFunction) SQLException(java.sql.SQLException) PhoenixStatement(org.apache.phoenix.jdbc.PhoenixStatement) PColumn(org.apache.phoenix.schema.PColumn) SelectStatement(org.apache.phoenix.parse.SelectStatement) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) List(java.util.List) AmbiguousColumnException(org.apache.phoenix.schema.AmbiguousColumnException) AggregatePlan(org.apache.phoenix.execute.AggregatePlan) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ResultIterator(org.apache.phoenix.iterate.ResultIterator) PSchema(org.apache.phoenix.parse.PSchema) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) FunctionNotFoundException(org.apache.phoenix.schema.FunctionNotFoundException) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) MutationState(org.apache.phoenix.execute.MutationState) Scan(org.apache.hadoop.hbase.client.Scan) ColumnRef(org.apache.phoenix.schema.ColumnRef) SchemaNotFoundException(org.apache.phoenix.schema.SchemaNotFoundException) TableRef(org.apache.phoenix.schema.TableRef) Tuple(org.apache.phoenix.schema.tuple.Tuple)

Example 17 with Tuple

use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.

the class GroupedAggregateRegionObserver method scanOrdered.

/**
 * Used for an aggregate query in which the key order match the group by key order. In this
 * case, we can do the aggregation as we scan, by detecting when the group by key changes.
 * @param limit TODO
 * @throws IOException
 */
private RegionScanner scanOrdered(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner scanner, final List<Expression> expressions, final ServerAggregators aggregators, final long limit) throws IOException {
    if (logger.isDebugEnabled()) {
        logger.debug(LogUtil.addCustomAnnotations("Grouped aggregation over ordered rows with scan " + scan + ", group by " + expressions + ", aggregators " + aggregators, ScanUtil.getCustomAnnotations(scan)));
    }
    final Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan);
    final boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(minMaxQualifiers);
    return new BaseRegionScanner(scanner) {

        private long rowCount = 0;

        private ImmutableBytesPtr currentKey = null;

        @Override
        public boolean next(List<Cell> results) throws IOException {
            boolean hasMore;
            boolean atLimit;
            boolean aggBoundary = false;
            Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
            ImmutableBytesPtr key = null;
            Aggregator[] rowAggregators = aggregators.getAggregators();
            // If we're calculating no aggregate functions, we can exit at the
            // start of a new row. Otherwise, we have to wait until an agg
            int countOffset = rowAggregators.length == 0 ? 1 : 0;
            Region region = c.getEnvironment().getRegion();
            boolean acquiredLock = false;
            try {
                region.startRegionOperation();
                acquiredLock = true;
                synchronized (scanner) {
                    do {
                        List<Cell> kvs = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), encodingScheme) : new ArrayList<Cell>();
                        // Results are potentially returned even when the return
                        // value of s.next is false
                        // since this is an indication of whether or not there
                        // are more values after the
                        // ones returned
                        hasMore = scanner.nextRaw(kvs);
                        if (!kvs.isEmpty()) {
                            result.setKeyValues(kvs);
                            key = TupleUtil.getConcatenatedValue(result, expressions);
                            aggBoundary = currentKey != null && currentKey.compareTo(key) != 0;
                            if (!aggBoundary) {
                                aggregators.aggregate(rowAggregators, result);
                                if (logger.isDebugEnabled()) {
                                    logger.debug(LogUtil.addCustomAnnotations("Row passed filters: " + kvs + ", aggregated values: " + Arrays.asList(rowAggregators), ScanUtil.getCustomAnnotations(scan)));
                                }
                                currentKey = key;
                            }
                        }
                        atLimit = rowCount + countOffset >= limit;
                    // Do rowCount + 1 b/c we don't have to wait for a complete
                    // row in the case of a DISTINCT with a LIMIT
                    } while (hasMore && !aggBoundary && !atLimit);
                }
            } finally {
                if (acquiredLock)
                    region.closeRegionOperation();
            }
            if (currentKey != null) {
                byte[] value = aggregators.toBytes(rowAggregators);
                KeyValue keyValue = KeyValueUtil.newKeyValue(currentKey.get(), currentKey.getOffset(), currentKey.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
                results.add(keyValue);
                if (logger.isDebugEnabled()) {
                    logger.debug(LogUtil.addCustomAnnotations("Adding new aggregate row: " + keyValue + ",for current key " + Bytes.toStringBinary(currentKey.get(), currentKey.getOffset(), currentKey.getLength()) + ", aggregated values: " + Arrays.asList(rowAggregators), ScanUtil.getCustomAnnotations(scan)));
                }
                // the returned result).
                if (aggBoundary) {
                    aggregators.reset(rowAggregators);
                    aggregators.aggregate(rowAggregators, result);
                    currentKey = key;
                    rowCount++;
                    atLimit |= rowCount >= limit;
                }
            }
            // Continue if there are more
            if (!atLimit && (hasMore || aggBoundary)) {
                return true;
            }
            currentKey = null;
            return false;
        }
    };
}
Also used : EncodedColumnQualiferCellsList(org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList) KeyValue(org.apache.hadoop.hbase.KeyValue) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Aggregator(org.apache.phoenix.expression.aggregator.Aggregator) PInteger(org.apache.phoenix.schema.types.PInteger) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple) MultiKeyValueTuple(org.apache.phoenix.schema.tuple.MultiKeyValueTuple) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple) Region(org.apache.hadoop.hbase.regionserver.Region) EncodedColumnQualiferCellsList(org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList) List(java.util.List) ArrayList(java.util.ArrayList) Cell(org.apache.hadoop.hbase.Cell) MultiKeyValueTuple(org.apache.phoenix.schema.tuple.MultiKeyValueTuple) Tuple(org.apache.phoenix.schema.tuple.Tuple) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple)

Example 18 with Tuple

use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.

the class HashJoinRegionScanner method processResults.

private void processResults(List<Cell> result, boolean hasBatchLimit) throws IOException {
    if (result.isEmpty())
        return;
    Tuple tuple = useQualifierAsListIndex ? new PositionBasedResultTuple(result) : new ResultTuple(Result.create(result));
    // always returns true.
    if (joinInfo.forceProjection()) {
        tuple = projector.projectResults(tuple, useNewValueColumnQualifier);
    }
    // TODO: fix below Scanner.next() and Scanner.nextRaw() methods as well.
    if (hasBatchLimit)
        throw new UnsupportedOperationException("Cannot support join operations in scans with limit");
    int count = joinInfo.getJoinIds().length;
    boolean cont = true;
    for (int i = 0; i < count; i++) {
        if (!(joinInfo.earlyEvaluation()[i]) || hashCaches[i] == null)
            continue;
        ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(tuple, joinInfo.getJoinExpressions()[i]);
        tempTuples[i] = hashCaches[i].get(key);
        JoinType type = joinInfo.getJoinTypes()[i];
        if (((type == JoinType.Inner || type == JoinType.Semi) && tempTuples[i] == null) || (type == JoinType.Anti && tempTuples[i] != null)) {
            cont = false;
            break;
        }
    }
    if (cont) {
        if (projector == null) {
            int dup = 1;
            for (int i = 0; i < count; i++) {
                dup *= (tempTuples[i] == null ? 1 : tempTuples[i].size());
            }
            for (int i = 0; i < dup; i++) {
                resultQueue.offer(tuple);
            }
        } else {
            KeyValueSchema schema = joinInfo.getJoinedSchema();
            if (!joinInfo.forceProjection()) {
                // backward compatibility
                tuple = projector.projectResults(tuple, useNewValueColumnQualifier);
            }
            resultQueue.offer(tuple);
            for (int i = 0; i < count; i++) {
                boolean earlyEvaluation = joinInfo.earlyEvaluation()[i];
                JoinType type = joinInfo.getJoinTypes()[i];
                if (earlyEvaluation && (type == JoinType.Semi || type == JoinType.Anti))
                    continue;
                int j = resultQueue.size();
                while (j-- > 0) {
                    Tuple lhs = resultQueue.poll();
                    if (!earlyEvaluation) {
                        ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(lhs, joinInfo.getJoinExpressions()[i]);
                        tempTuples[i] = hashCaches[i].get(key);
                        if (tempTuples[i] == null) {
                            if (type == JoinType.Inner || type == JoinType.Semi) {
                                continue;
                            } else if (type == JoinType.Anti) {
                                resultQueue.offer(lhs);
                                continue;
                            }
                        }
                    }
                    if (tempTuples[i] == null) {
                        Tuple joined = tempSrcBitSet[i] == ValueBitSet.EMPTY_VALUE_BITSET ? lhs : TupleProjector.mergeProjectedValue((ProjectedValueTuple) lhs, schema, tempDestBitSet, null, joinInfo.getSchemas()[i], tempSrcBitSet[i], joinInfo.getFieldPositions()[i], useNewValueColumnQualifier);
                        resultQueue.offer(joined);
                        continue;
                    }
                    for (Tuple t : tempTuples[i]) {
                        Tuple joined = tempSrcBitSet[i] == ValueBitSet.EMPTY_VALUE_BITSET ? lhs : TupleProjector.mergeProjectedValue((ProjectedValueTuple) lhs, schema, tempDestBitSet, t, joinInfo.getSchemas()[i], tempSrcBitSet[i], joinInfo.getFieldPositions()[i], useNewValueColumnQualifier);
                        resultQueue.offer(joined);
                    }
                }
            }
        }
        // apply post-join filter
        Expression postFilter = joinInfo.getPostJoinFilterExpression();
        if (postFilter != null) {
            for (Iterator<Tuple> iter = resultQueue.iterator(); iter.hasNext(); ) {
                Tuple t = iter.next();
                postFilter.reset();
                ImmutableBytesPtr tempPtr = new ImmutableBytesPtr();
                try {
                    if (!postFilter.evaluate(t, tempPtr) || tempPtr.getLength() == 0) {
                        iter.remove();
                        continue;
                    }
                } catch (IllegalDataException e) {
                    iter.remove();
                    continue;
                }
                Boolean b = (Boolean) postFilter.getDataType().toObject(tempPtr);
                if (!Boolean.TRUE.equals(b)) {
                    iter.remove();
                }
            }
        }
    }
}
Also used : PositionBasedResultTuple(org.apache.phoenix.schema.tuple.PositionBasedResultTuple) PositionBasedResultTuple(org.apache.phoenix.schema.tuple.PositionBasedResultTuple) ResultTuple(org.apache.phoenix.schema.tuple.ResultTuple) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) JoinType(org.apache.phoenix.parse.JoinTableNode.JoinType) ProjectedValueTuple(org.apache.phoenix.execute.TupleProjector.ProjectedValueTuple) Expression(org.apache.phoenix.expression.Expression) KeyValueSchema(org.apache.phoenix.schema.KeyValueSchema) PositionBasedResultTuple(org.apache.phoenix.schema.tuple.PositionBasedResultTuple) Tuple(org.apache.phoenix.schema.tuple.Tuple) ResultTuple(org.apache.phoenix.schema.tuple.ResultTuple) ProjectedValueTuple(org.apache.phoenix.execute.TupleProjector.ProjectedValueTuple) IllegalDataException(org.apache.phoenix.schema.IllegalDataException)

Example 19 with Tuple

use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.

the class RowKeyOrderedAggregateResultIterator method advance.

@Override
protected Tuple advance() throws SQLException {
    Tuple current = this.next;
    boolean traversedIterators = nextTraversedIterators;
    if (current == null) {
        current = nextTuple();
        traversedIterators = this.traversedIterator;
    }
    if (current != null) {
        Tuple previous = current;
        Aggregator[] rowAggregators = null;
        while (true) {
            current = nextTuple();
            if (!traversedIterators || !continueAggregating(previous, current)) {
                break;
            }
            if (rowAggregators == null) {
                rowAggregators = aggregate(previous);
            }
            aggregators.aggregate(rowAggregators, current);
            traversedIterators = this.traversedIterator;
        }
        this.next = current;
        this.nextTraversedIterators = this.traversedIterator;
        if (rowAggregators == null) {
            current = previous;
        } else {
            byte[] value = aggregators.toBytes(rowAggregators);
            current = new SingleKeyValueTuple(KeyValueUtil.newKeyValue(previousKey, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length));
        }
    }
    if (current == null) {
        // Close underlying ResultIterators to free resources sooner rather than later
        close();
    }
    return current;
}
Also used : SingleKeyValueTuple(org.apache.phoenix.schema.tuple.SingleKeyValueTuple) Aggregator(org.apache.phoenix.expression.aggregator.Aggregator) Tuple(org.apache.phoenix.schema.tuple.Tuple) SingleKeyValueTuple(org.apache.phoenix.schema.tuple.SingleKeyValueTuple)

Example 20 with Tuple

use of org.apache.phoenix.schema.tuple.Tuple in project phoenix by apache.

the class EncodedColumnsUtil method createColumnExpressionArray.

public static Expression[] createColumnExpressionArray(int maxEncodedColumnQualifier) {
    // reserve the first position and offset maxEncodedColumnQualifier by ENCODED_CQ_COUNTER_INITIAL_VALUE (which is the minimum encoded column qualifier)
    int numElements = maxEncodedColumnQualifier - QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE + 2;
    Expression[] colValues = new Expression[numElements];
    Arrays.fill(colValues, new DelegateExpression(LiteralExpression.newConstant(null)) {

        @Override
        public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
            return false;
        }
    });
    // 0 is a reserved position, set it to a non-null value so that we can represent absence of a value using a negative offset
    colValues[0] = LiteralExpression.newConstant(QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
    return colValues;
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) Expression(org.apache.phoenix.expression.Expression) DelegateExpression(org.apache.phoenix.expression.DelegateExpression) DelegateExpression(org.apache.phoenix.expression.DelegateExpression) Tuple(org.apache.phoenix.schema.tuple.Tuple)

Aggregations

Tuple (org.apache.phoenix.schema.tuple.Tuple)48 SingleKeyValueTuple (org.apache.phoenix.schema.tuple.SingleKeyValueTuple)22 KeyValue (org.apache.hadoop.hbase.KeyValue)16 List (java.util.List)10 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)10 ArrayList (java.util.ArrayList)9 Test (org.junit.Test)9 Expression (org.apache.phoenix.expression.Expression)8 SQLException (java.sql.SQLException)7 Cell (org.apache.hadoop.hbase.Cell)6 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)6 IOException (java.io.IOException)5 Region (org.apache.hadoop.hbase.regionserver.Region)5 ProjectedColumnExpression (org.apache.phoenix.expression.ProjectedColumnExpression)5 Aggregator (org.apache.phoenix.expression.aggregator.Aggregator)5 ResultIterator (org.apache.phoenix.iterate.ResultIterator)5 PColumn (org.apache.phoenix.schema.PColumn)5 ResultTuple (org.apache.phoenix.schema.tuple.ResultTuple)5 ClientAggregators (org.apache.phoenix.expression.aggregator.ClientAggregators)4 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)4