Search in sources :

Example 61 with PDataType

use of org.apache.phoenix.schema.types.PDataType in project phoenix by apache.

the class SumAggregateFunction method evaluate.

@Override
public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
    if (!super.evaluate(tuple, ptr)) {
        return false;
    }
    if (isConstantExpression()) {
        PDataType type = getDataType();
        Object constantValue = ((LiteralExpression) children.get(0)).getValue();
        if (type == PDecimal.INSTANCE) {
            BigDecimal value = ((BigDecimal) constantValue).multiply((BigDecimal) PDecimal.INSTANCE.toObject(ptr, PLong.INSTANCE));
            ptr.set(PDecimal.INSTANCE.toBytes(value));
        } else {
            long constantLongValue = ((Number) constantValue).longValue();
            long value = constantLongValue * type.getCodec().decodeLong(ptr, SortOrder.getDefault());
            byte[] resultPtr = new byte[type.getByteSize()];
            type.getCodec().encodeLong(value, resultPtr, 0);
            ptr.set(resultPtr);
        }
    }
    return true;
}
Also used : PDataType(org.apache.phoenix.schema.types.PDataType) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) BigDecimal(java.math.BigDecimal)

Example 62 with PDataType

use of org.apache.phoenix.schema.types.PDataType in project phoenix by apache.

the class RTrimFunction method newKeyPart.

@Override
public KeyPart newKeyPart(final KeyPart childPart) {
    return new KeyPart() {

        @Override
        public KeyRange getKeyRange(CompareOp op, Expression rhs) {
            byte[] lowerRange = KeyRange.UNBOUND;
            byte[] upperRange = KeyRange.UNBOUND;
            boolean lowerInclusive = true;
            boolean upperInclusive = false;
            PDataType type = getColumn().getDataType();
            SortOrder sortOrder = getColumn().getSortOrder();
            switch(op) {
                case LESS_OR_EQUAL:
                    lowerInclusive = false;
                case EQUAL:
                    upperRange = evaluateExpression(rhs);
                    if (op == CompareOp.EQUAL) {
                        lowerRange = upperRange;
                    }
                    if (sortOrder == SortOrder.ASC || !getTable().rowKeyOrderOptimizable()) {
                        upperRange = Arrays.copyOf(upperRange, upperRange.length + 1);
                        upperRange[upperRange.length - 1] = StringUtil.SPACE_UTF8;
                        ByteUtil.nextKey(upperRange, upperRange.length);
                    } else {
                        upperInclusive = true;
                        if (op == CompareOp.LESS_OR_EQUAL) {
                            // will be the RHS value.
                            break;
                        }
                        /*
                         * Somewhat tricky to get the range correct for the DESC equality case.
                         * The lower range is the RHS value followed by any number of inverted spaces.
                         * We need to add a zero byte as the lower range will have an \xFF byte
                         * appended to it and otherwise we'd skip past any rows where there is more
                         * than one space following the RHS.
                         * The upper range should span up to and including the RHS value. We need
                         * to add our own \xFF as otherwise this will look like a degenerate query
                         * since the lower would be bigger than the upper range.
                         */
                        lowerRange = Arrays.copyOf(lowerRange, lowerRange.length + 2);
                        lowerRange[lowerRange.length - 2] = StringUtil.INVERTED_SPACE_UTF8;
                        lowerRange[lowerRange.length - 1] = QueryConstants.SEPARATOR_BYTE;
                        upperRange = Arrays.copyOf(upperRange, upperRange.length + 1);
                        upperRange[upperRange.length - 1] = QueryConstants.DESC_SEPARATOR_BYTE;
                    }
                    break;
                default:
                    // TOOD: Is this ok for DESC?
                    return childPart.getKeyRange(op, rhs);
            }
            Integer length = getColumn().getMaxLength();
            if (type.isFixedWidth() && length != null) {
                // *after* rows with no padding.
                if (lowerRange != KeyRange.UNBOUND) {
                    lowerRange = type.pad(lowerRange, length, SortOrder.ASC);
                }
                if (upperRange != KeyRange.UNBOUND) {
                    upperRange = type.pad(upperRange, length, SortOrder.ASC);
                }
            }
            return KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive);
        }

        @Override
        public List<Expression> getExtractNodes() {
            // non blank characters such as 'foo  bar' where the RHS constant is 'foo'.
            return Collections.<Expression>emptyList();
        }

        @Override
        public PColumn getColumn() {
            return childPart.getColumn();
        }

        @Override
        public PTable getTable() {
            return childPart.getTable();
        }
    };
}
Also used : PDataType(org.apache.phoenix.schema.types.PDataType) Expression(org.apache.phoenix.expression.Expression) KeyPart(org.apache.phoenix.compile.KeyPart) SortOrder(org.apache.phoenix.schema.SortOrder) CompareOp(org.apache.hadoop.hbase.filter.CompareFilter.CompareOp)

Example 63 with PDataType

use of org.apache.phoenix.schema.types.PDataType in project phoenix by apache.

the class IndexMaintainer method fromProto.

public static IndexMaintainer fromProto(ServerCachingProtos.IndexMaintainer proto, RowKeySchema dataTableRowKeySchema, boolean isDataTableSalted) throws IOException {
    IndexMaintainer maintainer = new IndexMaintainer(dataTableRowKeySchema, isDataTableSalted);
    maintainer.nIndexSaltBuckets = proto.getSaltBuckets();
    maintainer.isMultiTenant = proto.getIsMultiTenant();
    maintainer.viewIndexId = proto.hasViewIndexId() ? proto.getViewIndexId().toByteArray() : null;
    List<ServerCachingProtos.ColumnReference> indexedColumnsList = proto.getIndexedColumnsList();
    maintainer.indexedColumns = new HashSet<ColumnReference>(indexedColumnsList.size());
    for (ServerCachingProtos.ColumnReference colRefFromProto : indexedColumnsList) {
        maintainer.indexedColumns.add(new ColumnReference(colRefFromProto.getFamily().toByteArray(), colRefFromProto.getQualifier().toByteArray()));
    }
    List<Integer> indexedColumnTypes = proto.getIndexedColumnTypeOrdinalList();
    maintainer.indexedColumnTypes = new ArrayList<PDataType>(indexedColumnTypes.size());
    for (Integer typeOrdinal : indexedColumnTypes) {
        maintainer.indexedColumnTypes.add(PDataType.values()[typeOrdinal]);
    }
    maintainer.indexTableName = proto.getIndexTableName().toByteArray();
    maintainer.rowKeyOrderOptimizable = proto.getRowKeyOrderOptimizable();
    maintainer.dataEmptyKeyValueCF = proto.getDataTableEmptyKeyValueColFamily().toByteArray();
    ServerCachingProtos.ImmutableBytesWritable emptyKeyValueColFamily = proto.getEmptyKeyValueColFamily();
    maintainer.emptyKeyValueCFPtr = new ImmutableBytesPtr(emptyKeyValueColFamily.getByteArray().toByteArray(), emptyKeyValueColFamily.getOffset(), emptyKeyValueColFamily.getLength());
    maintainer.indexedExpressions = new ArrayList<>();
    try (ByteArrayInputStream stream = new ByteArrayInputStream(proto.getIndexedExpressions().toByteArray())) {
        DataInput input = new DataInputStream(stream);
        while (stream.available() > 0) {
            int expressionOrdinal = WritableUtils.readVInt(input);
            Expression expression = ExpressionType.values()[expressionOrdinal].newInstance();
            expression.readFields(input);
            maintainer.indexedExpressions.add(expression);
        }
    }
    maintainer.rowKeyMetaData = newRowKeyMetaData(maintainer, dataTableRowKeySchema, maintainer.indexedExpressions.size(), isDataTableSalted, maintainer.isMultiTenant);
    try (ByteArrayInputStream stream = new ByteArrayInputStream(proto.getRowKeyMetadata().toByteArray())) {
        DataInput input = new DataInputStream(stream);
        maintainer.rowKeyMetaData.readFields(input);
    }
    maintainer.nDataCFs = proto.getNumDataTableColFamilies();
    maintainer.indexWALDisabled = proto.getIndexWalDisabled();
    maintainer.estimatedIndexRowKeyBytes = proto.getIndexRowKeyByteSize();
    maintainer.immutableRows = proto.getImmutable();
    List<ColumnInfo> indexedColumnInfoList = proto.getIndexedColumnInfoList();
    maintainer.indexedColumnsInfo = Sets.newHashSet();
    for (ColumnInfo info : indexedColumnInfoList) {
        maintainer.indexedColumnsInfo.add(new Pair<>(info.getFamilyName(), info.getColumnName()));
    }
    // proto doesn't support single byte so need an explicit cast here
    maintainer.encodingScheme = PTable.QualifierEncodingScheme.fromSerializedValue((byte) proto.getEncodingScheme());
    maintainer.immutableStorageScheme = PTable.ImmutableStorageScheme.fromSerializedValue((byte) proto.getImmutableStorageScheme());
    maintainer.isLocalIndex = proto.getIsLocalIndex();
    List<ServerCachingProtos.ColumnReference> dataTableColRefsForCoveredColumnsList = proto.getDataTableColRefForCoveredColumnsList();
    List<ServerCachingProtos.ColumnReference> indexTableColRefsForCoveredColumnsList = proto.getIndexTableColRefForCoveredColumnsList();
    maintainer.coveredColumnsMap = Maps.newHashMapWithExpectedSize(dataTableColRefsForCoveredColumnsList.size());
    boolean encodedColumnNames = maintainer.encodingScheme != NON_ENCODED_QUALIFIERS;
    Iterator<ServerCachingProtos.ColumnReference> indexTableColRefItr = indexTableColRefsForCoveredColumnsList.iterator();
    for (ServerCachingProtos.ColumnReference colRefFromProto : dataTableColRefsForCoveredColumnsList) {
        ColumnReference dataTableColRef = new ColumnReference(colRefFromProto.getFamily().toByteArray(), colRefFromProto.getQualifier().toByteArray());
        ColumnReference indexTableColRef;
        if (encodedColumnNames) {
            ServerCachingProtos.ColumnReference fromProto = indexTableColRefItr.next();
            indexTableColRef = new ColumnReference(fromProto.getFamily().toByteArray(), fromProto.getQualifier().toByteArray());
        } else {
            byte[] cq = IndexUtil.getIndexColumnName(dataTableColRef.getFamily(), dataTableColRef.getQualifier());
            byte[] cf = maintainer.isLocalIndex ? IndexUtil.getLocalIndexColumnFamily(dataTableColRef.getFamily()) : dataTableColRef.getFamily();
            indexTableColRef = new ColumnReference(cf, cq);
        }
        maintainer.coveredColumnsMap.put(dataTableColRef, indexTableColRef);
    }
    maintainer.initCachedState();
    return maintainer;
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ColumnInfo(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ColumnInfo) DataInputStream(java.io.DataInputStream) DataInput(java.io.DataInput) ServerCachingProtos(org.apache.phoenix.coprocessor.generated.ServerCachingProtos) PDataType(org.apache.phoenix.schema.types.PDataType) ByteArrayInputStream(java.io.ByteArrayInputStream) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) SingleCellConstructorExpression(org.apache.phoenix.expression.SingleCellConstructorExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) CoerceExpression(org.apache.phoenix.expression.CoerceExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 64 with PDataType

use of org.apache.phoenix.schema.types.PDataType in project phoenix by apache.

the class IndexMaintainer method toProto.

public static ServerCachingProtos.IndexMaintainer toProto(IndexMaintainer maintainer) throws IOException {
    ServerCachingProtos.IndexMaintainer.Builder builder = ServerCachingProtos.IndexMaintainer.newBuilder();
    builder.setSaltBuckets(maintainer.nIndexSaltBuckets);
    builder.setIsMultiTenant(maintainer.isMultiTenant);
    if (maintainer.viewIndexId != null) {
        builder.setViewIndexId(ByteStringer.wrap(maintainer.viewIndexId));
    }
    for (ColumnReference colRef : maintainer.indexedColumns) {
        ServerCachingProtos.ColumnReference.Builder cRefBuilder = ServerCachingProtos.ColumnReference.newBuilder();
        cRefBuilder.setFamily(ByteStringer.wrap(colRef.getFamily()));
        cRefBuilder.setQualifier(ByteStringer.wrap(colRef.getQualifier()));
        builder.addIndexedColumns(cRefBuilder.build());
    }
    for (PDataType dataType : maintainer.indexedColumnTypes) {
        builder.addIndexedColumnTypeOrdinal(dataType.ordinal());
    }
    for (Entry<ColumnReference, ColumnReference> e : maintainer.coveredColumnsMap.entrySet()) {
        ServerCachingProtos.ColumnReference.Builder cRefBuilder = ServerCachingProtos.ColumnReference.newBuilder();
        ColumnReference dataTableColRef = e.getKey();
        cRefBuilder.setFamily(ByteStringer.wrap(dataTableColRef.getFamily()));
        cRefBuilder.setQualifier(ByteStringer.wrap(dataTableColRef.getQualifier()));
        builder.addDataTableColRefForCoveredColumns(cRefBuilder.build());
        if (maintainer.encodingScheme != NON_ENCODED_QUALIFIERS) {
            // We need to serialize the colRefs of index tables only in case of encoded column names.
            ColumnReference indexTableColRef = e.getValue();
            cRefBuilder = ServerCachingProtos.ColumnReference.newBuilder();
            cRefBuilder.setFamily(ByteStringer.wrap(indexTableColRef.getFamily()));
            cRefBuilder.setQualifier(ByteStringer.wrap(indexTableColRef.getQualifier()));
            builder.addIndexTableColRefForCoveredColumns(cRefBuilder.build());
        }
    }
    builder.setIsLocalIndex(maintainer.isLocalIndex);
    builder.setIndexTableName(ByteStringer.wrap(maintainer.indexTableName));
    builder.setRowKeyOrderOptimizable(maintainer.rowKeyOrderOptimizable);
    builder.setDataTableEmptyKeyValueColFamily(ByteStringer.wrap(maintainer.dataEmptyKeyValueCF));
    ServerCachingProtos.ImmutableBytesWritable.Builder ibwBuilder = ServerCachingProtos.ImmutableBytesWritable.newBuilder();
    ibwBuilder.setByteArray(ByteStringer.wrap(maintainer.emptyKeyValueCFPtr.get()));
    ibwBuilder.setLength(maintainer.emptyKeyValueCFPtr.getLength());
    ibwBuilder.setOffset(maintainer.emptyKeyValueCFPtr.getOffset());
    builder.setEmptyKeyValueColFamily(ibwBuilder.build());
    try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) {
        DataOutput output = new DataOutputStream(stream);
        for (Expression expression : maintainer.indexedExpressions) {
            WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal());
            expression.write(output);
        }
        builder.setIndexedExpressions(ByteStringer.wrap(stream.toByteArray()));
    }
    try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) {
        DataOutput output = new DataOutputStream(stream);
        maintainer.rowKeyMetaData.write(output);
        builder.setRowKeyMetadata(ByteStringer.wrap(stream.toByteArray()));
    }
    builder.setNumDataTableColFamilies(maintainer.nDataCFs);
    builder.setIndexWalDisabled(maintainer.indexWALDisabled);
    builder.setIndexRowKeyByteSize(maintainer.estimatedIndexRowKeyBytes);
    builder.setImmutable(maintainer.immutableRows);
    for (Pair<String, String> p : maintainer.indexedColumnsInfo) {
        ServerCachingProtos.ColumnInfo.Builder ciBuilder = ServerCachingProtos.ColumnInfo.newBuilder();
        if (p.getFirst() != null) {
            ciBuilder.setFamilyName(p.getFirst());
        }
        ciBuilder.setColumnName(p.getSecond());
        builder.addIndexedColumnInfo(ciBuilder.build());
    }
    builder.setEncodingScheme(maintainer.encodingScheme.getSerializedMetadataValue());
    builder.setImmutableStorageScheme(maintainer.immutableStorageScheme.getSerializedMetadataValue());
    return builder.build();
}
Also used : DataOutput(java.io.DataOutput) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) DataOutputStream(java.io.DataOutputStream) ColumnInfo(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ColumnInfo) TrustedByteArrayOutputStream(org.apache.phoenix.util.TrustedByteArrayOutputStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) PDataType(org.apache.phoenix.schema.types.PDataType) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) SingleCellConstructorExpression(org.apache.phoenix.expression.SingleCellConstructorExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) CoerceExpression(org.apache.phoenix.expression.CoerceExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 65 with PDataType

use of org.apache.phoenix.schema.types.PDataType in project phoenix by apache.

the class IndexMaintainer method readFields.

// Only called by code older than our 4.10 release
@Deprecated
@Override
public void readFields(DataInput input) throws IOException {
    int encodedIndexSaltBucketsAndMultiTenant = WritableUtils.readVInt(input);
    isMultiTenant = encodedIndexSaltBucketsAndMultiTenant < 0;
    nIndexSaltBuckets = Math.abs(encodedIndexSaltBucketsAndMultiTenant) - 1;
    int encodedIndexedColumnsAndViewId = WritableUtils.readVInt(input);
    boolean hasViewIndexId = encodedIndexedColumnsAndViewId < 0;
    if (hasViewIndexId) {
        // Fixed length
        viewIndexId = new byte[MetaDataUtil.getViewIndexIdDataType().getByteSize()];
        input.readFully(viewIndexId);
    }
    int nIndexedColumns = Math.abs(encodedIndexedColumnsAndViewId) - 1;
    indexedColumns = Sets.newLinkedHashSetWithExpectedSize(nIndexedColumns);
    for (int i = 0; i < nIndexedColumns; i++) {
        byte[] cf = Bytes.readByteArray(input);
        byte[] cq = Bytes.readByteArray(input);
        indexedColumns.add(new ColumnReference(cf, cq));
    }
    indexedColumnTypes = Lists.newArrayListWithExpectedSize(nIndexedColumns);
    for (int i = 0; i < nIndexedColumns; i++) {
        PDataType type = PDataType.values()[WritableUtils.readVInt(input)];
        indexedColumnTypes.add(type);
    }
    int encodedCoveredolumnsAndLocalIndex = WritableUtils.readVInt(input);
    isLocalIndex = encodedCoveredolumnsAndLocalIndex < 0;
    int nCoveredColumns = Math.abs(encodedCoveredolumnsAndLocalIndex) - 1;
    coveredColumnsMap = Maps.newHashMapWithExpectedSize(nCoveredColumns);
    for (int i = 0; i < nCoveredColumns; i++) {
        byte[] dataTableCf = Bytes.readByteArray(input);
        byte[] dataTableCq = Bytes.readByteArray(input);
        ColumnReference dataTableRef = new ColumnReference(dataTableCf, dataTableCq);
        byte[] indexTableCf = isLocalIndex ? IndexUtil.getLocalIndexColumnFamily(dataTableCf) : dataTableCf;
        byte[] indexTableCq = IndexUtil.getIndexColumnName(dataTableCf, dataTableCq);
        ColumnReference indexTableRef = new ColumnReference(indexTableCf, indexTableCq);
        coveredColumnsMap.put(dataTableRef, indexTableRef);
    }
    // Hack to serialize whether the index row key is optimizable
    int len = WritableUtils.readVInt(input);
    if (len < 0) {
        rowKeyOrderOptimizable = false;
        len *= -1;
    } else {
        rowKeyOrderOptimizable = true;
    }
    indexTableName = new byte[len];
    input.readFully(indexTableName, 0, len);
    dataEmptyKeyValueCF = Bytes.readByteArray(input);
    len = WritableUtils.readVInt(input);
    //TODO remove this in the next major release
    boolean isNewClient = false;
    if (len < 0) {
        isNewClient = true;
        len = Math.abs(len);
    }
    byte[] emptyKeyValueCF = new byte[len];
    input.readFully(emptyKeyValueCF, 0, len);
    emptyKeyValueCFPtr = new ImmutableBytesPtr(emptyKeyValueCF);
    if (isNewClient) {
        int numIndexedExpressions = WritableUtils.readVInt(input);
        indexedExpressions = Lists.newArrayListWithExpectedSize(numIndexedExpressions);
        for (int i = 0; i < numIndexedExpressions; i++) {
            Expression expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance();
            expression.readFields(input);
            indexedExpressions.add(expression);
        }
    } else {
        indexedExpressions = Lists.newArrayListWithExpectedSize(indexedColumns.size());
        Iterator<ColumnReference> colReferenceIter = indexedColumns.iterator();
        Iterator<PDataType> dataTypeIter = indexedColumnTypes.iterator();
        while (colReferenceIter.hasNext()) {
            ColumnReference colRef = colReferenceIter.next();
            final PDataType dataType = dataTypeIter.next();
            indexedExpressions.add(new KeyValueColumnExpression(new PDatum() {

                @Override
                public boolean isNullable() {
                    return true;
                }

                @Override
                public SortOrder getSortOrder() {
                    return SortOrder.getDefault();
                }

                @Override
                public Integer getScale() {
                    return null;
                }

                @Override
                public Integer getMaxLength() {
                    return null;
                }

                @Override
                public PDataType getDataType() {
                    return dataType;
                }
            }, colRef.getFamily(), colRef.getQualifier()));
        }
    }
    rowKeyMetaData = newRowKeyMetaData();
    rowKeyMetaData.readFields(input);
    int nDataCFs = WritableUtils.readVInt(input);
    // Encode indexWALDisabled in nDataCFs
    indexWALDisabled = nDataCFs < 0;
    this.nDataCFs = Math.abs(nDataCFs) - 1;
    int encodedEstimatedIndexRowKeyBytesAndImmutableRows = WritableUtils.readVInt(input);
    this.immutableRows = encodedEstimatedIndexRowKeyBytesAndImmutableRows < 0;
    this.estimatedIndexRowKeyBytes = Math.abs(encodedEstimatedIndexRowKeyBytesAndImmutableRows);
    // Needed for backward compatibility. Clients older than 4.10 will have non-encoded tables.
    this.immutableStorageScheme = ImmutableStorageScheme.ONE_CELL_PER_COLUMN;
    this.encodingScheme = QualifierEncodingScheme.NON_ENCODED_QUALIFIERS;
    initCachedState();
}
Also used : PDatum(org.apache.phoenix.schema.PDatum) PDataType(org.apache.phoenix.schema.types.PDataType) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) SingleCellConstructorExpression(org.apache.phoenix.expression.SingleCellConstructorExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) CoerceExpression(org.apache.phoenix.expression.CoerceExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Aggregations

PDataType (org.apache.phoenix.schema.types.PDataType)152 Expression (org.apache.phoenix.expression.Expression)54 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)31 SortOrder (org.apache.phoenix.schema.SortOrder)29 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)21 CoerceExpression (org.apache.phoenix.expression.CoerceExpression)18 Test (org.junit.Test)15 PDatum (org.apache.phoenix.schema.PDatum)12 BigDecimal (java.math.BigDecimal)11 SQLException (java.sql.SQLException)11 ArrayList (java.util.ArrayList)11 List (java.util.List)10 KeyValueColumnExpression (org.apache.phoenix.expression.KeyValueColumnExpression)10 RowKeyColumnExpression (org.apache.phoenix.expression.RowKeyColumnExpression)10 SingleCellConstructorExpression (org.apache.phoenix.expression.SingleCellConstructorExpression)10 IOException (java.io.IOException)9 PreparedStatement (java.sql.PreparedStatement)7 Pair (org.apache.hadoop.hbase.util.Pair)7 Date (java.sql.Date)6 AndExpression (org.apache.phoenix.expression.AndExpression)6