Search in sources :

Example 1 with ColumnReference

use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.

the class IndexMaintainer method fromProto.

public static IndexMaintainer fromProto(ServerCachingProtos.IndexMaintainer proto, RowKeySchema dataTableRowKeySchema, boolean isDataTableSalted) throws IOException {
    IndexMaintainer maintainer = new IndexMaintainer(dataTableRowKeySchema, isDataTableSalted);
    maintainer.nIndexSaltBuckets = proto.getSaltBuckets();
    maintainer.isMultiTenant = proto.getIsMultiTenant();
    maintainer.viewIndexId = proto.hasViewIndexId() ? proto.getViewIndexId().toByteArray() : null;
    List<ServerCachingProtos.ColumnReference> indexedColumnsList = proto.getIndexedColumnsList();
    maintainer.indexedColumns = new HashSet<ColumnReference>(indexedColumnsList.size());
    for (ServerCachingProtos.ColumnReference colRefFromProto : indexedColumnsList) {
        maintainer.indexedColumns.add(new ColumnReference(colRefFromProto.getFamily().toByteArray(), colRefFromProto.getQualifier().toByteArray()));
    }
    List<Integer> indexedColumnTypes = proto.getIndexedColumnTypeOrdinalList();
    maintainer.indexedColumnTypes = new ArrayList<PDataType>(indexedColumnTypes.size());
    for (Integer typeOrdinal : indexedColumnTypes) {
        maintainer.indexedColumnTypes.add(PDataType.values()[typeOrdinal]);
    }
    maintainer.indexTableName = proto.getIndexTableName().toByteArray();
    maintainer.rowKeyOrderOptimizable = proto.getRowKeyOrderOptimizable();
    maintainer.dataEmptyKeyValueCF = proto.getDataTableEmptyKeyValueColFamily().toByteArray();
    ServerCachingProtos.ImmutableBytesWritable emptyKeyValueColFamily = proto.getEmptyKeyValueColFamily();
    maintainer.emptyKeyValueCFPtr = new ImmutableBytesPtr(emptyKeyValueColFamily.getByteArray().toByteArray(), emptyKeyValueColFamily.getOffset(), emptyKeyValueColFamily.getLength());
    maintainer.indexedExpressions = new ArrayList<>();
    try (ByteArrayInputStream stream = new ByteArrayInputStream(proto.getIndexedExpressions().toByteArray())) {
        DataInput input = new DataInputStream(stream);
        while (stream.available() > 0) {
            int expressionOrdinal = WritableUtils.readVInt(input);
            Expression expression = ExpressionType.values()[expressionOrdinal].newInstance();
            expression.readFields(input);
            maintainer.indexedExpressions.add(expression);
        }
    }
    maintainer.rowKeyMetaData = newRowKeyMetaData(maintainer, dataTableRowKeySchema, maintainer.indexedExpressions.size(), isDataTableSalted, maintainer.isMultiTenant);
    try (ByteArrayInputStream stream = new ByteArrayInputStream(proto.getRowKeyMetadata().toByteArray())) {
        DataInput input = new DataInputStream(stream);
        maintainer.rowKeyMetaData.readFields(input);
    }
    maintainer.nDataCFs = proto.getNumDataTableColFamilies();
    maintainer.indexWALDisabled = proto.getIndexWalDisabled();
    maintainer.estimatedIndexRowKeyBytes = proto.getIndexRowKeyByteSize();
    maintainer.immutableRows = proto.getImmutable();
    List<ColumnInfo> indexedColumnInfoList = proto.getIndexedColumnInfoList();
    maintainer.indexedColumnsInfo = Sets.newHashSet();
    for (ColumnInfo info : indexedColumnInfoList) {
        maintainer.indexedColumnsInfo.add(new Pair<>(info.getFamilyName(), info.getColumnName()));
    }
    // proto doesn't support single byte so need an explicit cast here
    maintainer.encodingScheme = PTable.QualifierEncodingScheme.fromSerializedValue((byte) proto.getEncodingScheme());
    maintainer.immutableStorageScheme = PTable.ImmutableStorageScheme.fromSerializedValue((byte) proto.getImmutableStorageScheme());
    maintainer.isLocalIndex = proto.getIsLocalIndex();
    List<ServerCachingProtos.ColumnReference> dataTableColRefsForCoveredColumnsList = proto.getDataTableColRefForCoveredColumnsList();
    List<ServerCachingProtos.ColumnReference> indexTableColRefsForCoveredColumnsList = proto.getIndexTableColRefForCoveredColumnsList();
    maintainer.coveredColumnsMap = Maps.newHashMapWithExpectedSize(dataTableColRefsForCoveredColumnsList.size());
    boolean encodedColumnNames = maintainer.encodingScheme != NON_ENCODED_QUALIFIERS;
    Iterator<ServerCachingProtos.ColumnReference> indexTableColRefItr = indexTableColRefsForCoveredColumnsList.iterator();
    for (ServerCachingProtos.ColumnReference colRefFromProto : dataTableColRefsForCoveredColumnsList) {
        ColumnReference dataTableColRef = new ColumnReference(colRefFromProto.getFamily().toByteArray(), colRefFromProto.getQualifier().toByteArray());
        ColumnReference indexTableColRef;
        if (encodedColumnNames) {
            ServerCachingProtos.ColumnReference fromProto = indexTableColRefItr.next();
            indexTableColRef = new ColumnReference(fromProto.getFamily().toByteArray(), fromProto.getQualifier().toByteArray());
        } else {
            byte[] cq = IndexUtil.getIndexColumnName(dataTableColRef.getFamily(), dataTableColRef.getQualifier());
            byte[] cf = maintainer.isLocalIndex ? IndexUtil.getLocalIndexColumnFamily(dataTableColRef.getFamily()) : dataTableColRef.getFamily();
            indexTableColRef = new ColumnReference(cf, cq);
        }
        maintainer.coveredColumnsMap.put(dataTableColRef, indexTableColRef);
    }
    maintainer.initCachedState();
    return maintainer;
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ColumnInfo(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ColumnInfo) DataInputStream(java.io.DataInputStream) DataInput(java.io.DataInput) ServerCachingProtos(org.apache.phoenix.coprocessor.generated.ServerCachingProtos) PDataType(org.apache.phoenix.schema.types.PDataType) ByteArrayInputStream(java.io.ByteArrayInputStream) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) SingleCellConstructorExpression(org.apache.phoenix.expression.SingleCellConstructorExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) CoerceExpression(org.apache.phoenix.expression.CoerceExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 2 with ColumnReference

use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.

the class IndexMaintainer method createGetterFromKeyValues.

public ValueGetter createGetterFromKeyValues(final byte[] rowKey, Collection<? extends Cell> pendingUpdates) {
    final Map<ColumnReference, ImmutableBytesPtr> valueMap = Maps.newHashMapWithExpectedSize(pendingUpdates.size());
    for (Cell kv : pendingUpdates) {
        // create new pointers to each part of the kv
        ImmutableBytesPtr value = new ImmutableBytesPtr(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());
        valueMap.put(new ColumnReference(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength()), value);
    }
    return new ValueGetter() {

        @Override
        public ImmutableBytesWritable getLatestValue(ColumnReference ref) {
            if (ref.equals(dataEmptyKeyValueRef))
                return null;
            return valueMap.get(ref);
        }

        @Override
        public byte[] getRowKey() {
            return rowKey;
        }
    };
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Cell(org.apache.hadoop.hbase.Cell) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference) ValueGetter(org.apache.phoenix.hbase.index.ValueGetter)

Example 3 with ColumnReference

use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.

the class IndexMaintainer method toProto.

public static ServerCachingProtos.IndexMaintainer toProto(IndexMaintainer maintainer) throws IOException {
    ServerCachingProtos.IndexMaintainer.Builder builder = ServerCachingProtos.IndexMaintainer.newBuilder();
    builder.setSaltBuckets(maintainer.nIndexSaltBuckets);
    builder.setIsMultiTenant(maintainer.isMultiTenant);
    if (maintainer.viewIndexId != null) {
        builder.setViewIndexId(ByteStringer.wrap(maintainer.viewIndexId));
    }
    for (ColumnReference colRef : maintainer.indexedColumns) {
        ServerCachingProtos.ColumnReference.Builder cRefBuilder = ServerCachingProtos.ColumnReference.newBuilder();
        cRefBuilder.setFamily(ByteStringer.wrap(colRef.getFamily()));
        cRefBuilder.setQualifier(ByteStringer.wrap(colRef.getQualifier()));
        builder.addIndexedColumns(cRefBuilder.build());
    }
    for (PDataType dataType : maintainer.indexedColumnTypes) {
        builder.addIndexedColumnTypeOrdinal(dataType.ordinal());
    }
    for (Entry<ColumnReference, ColumnReference> e : maintainer.coveredColumnsMap.entrySet()) {
        ServerCachingProtos.ColumnReference.Builder cRefBuilder = ServerCachingProtos.ColumnReference.newBuilder();
        ColumnReference dataTableColRef = e.getKey();
        cRefBuilder.setFamily(ByteStringer.wrap(dataTableColRef.getFamily()));
        cRefBuilder.setQualifier(ByteStringer.wrap(dataTableColRef.getQualifier()));
        builder.addDataTableColRefForCoveredColumns(cRefBuilder.build());
        if (maintainer.encodingScheme != NON_ENCODED_QUALIFIERS) {
            // We need to serialize the colRefs of index tables only in case of encoded column names.
            ColumnReference indexTableColRef = e.getValue();
            cRefBuilder = ServerCachingProtos.ColumnReference.newBuilder();
            cRefBuilder.setFamily(ByteStringer.wrap(indexTableColRef.getFamily()));
            cRefBuilder.setQualifier(ByteStringer.wrap(indexTableColRef.getQualifier()));
            builder.addIndexTableColRefForCoveredColumns(cRefBuilder.build());
        }
    }
    builder.setIsLocalIndex(maintainer.isLocalIndex);
    builder.setIndexTableName(ByteStringer.wrap(maintainer.indexTableName));
    builder.setRowKeyOrderOptimizable(maintainer.rowKeyOrderOptimizable);
    builder.setDataTableEmptyKeyValueColFamily(ByteStringer.wrap(maintainer.dataEmptyKeyValueCF));
    ServerCachingProtos.ImmutableBytesWritable.Builder ibwBuilder = ServerCachingProtos.ImmutableBytesWritable.newBuilder();
    ibwBuilder.setByteArray(ByteStringer.wrap(maintainer.emptyKeyValueCFPtr.get()));
    ibwBuilder.setLength(maintainer.emptyKeyValueCFPtr.getLength());
    ibwBuilder.setOffset(maintainer.emptyKeyValueCFPtr.getOffset());
    builder.setEmptyKeyValueColFamily(ibwBuilder.build());
    try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) {
        DataOutput output = new DataOutputStream(stream);
        for (Expression expression : maintainer.indexedExpressions) {
            WritableUtils.writeVInt(output, ExpressionType.valueOf(expression).ordinal());
            expression.write(output);
        }
        builder.setIndexedExpressions(ByteStringer.wrap(stream.toByteArray()));
    }
    try (ByteArrayOutputStream stream = new ByteArrayOutputStream()) {
        DataOutput output = new DataOutputStream(stream);
        maintainer.rowKeyMetaData.write(output);
        builder.setRowKeyMetadata(ByteStringer.wrap(stream.toByteArray()));
    }
    builder.setNumDataTableColFamilies(maintainer.nDataCFs);
    builder.setIndexWalDisabled(maintainer.indexWALDisabled);
    builder.setIndexRowKeyByteSize(maintainer.estimatedIndexRowKeyBytes);
    builder.setImmutable(maintainer.immutableRows);
    for (Pair<String, String> p : maintainer.indexedColumnsInfo) {
        ServerCachingProtos.ColumnInfo.Builder ciBuilder = ServerCachingProtos.ColumnInfo.newBuilder();
        if (p.getFirst() != null) {
            ciBuilder.setFamilyName(p.getFirst());
        }
        ciBuilder.setColumnName(p.getSecond());
        builder.addIndexedColumnInfo(ciBuilder.build());
    }
    builder.setEncodingScheme(maintainer.encodingScheme.getSerializedMetadataValue());
    builder.setImmutableStorageScheme(maintainer.immutableStorageScheme.getSerializedMetadataValue());
    return builder.build();
}
Also used : DataOutput(java.io.DataOutput) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) DataOutputStream(java.io.DataOutputStream) ColumnInfo(org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ColumnInfo) TrustedByteArrayOutputStream(org.apache.phoenix.util.TrustedByteArrayOutputStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) PDataType(org.apache.phoenix.schema.types.PDataType) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) SingleCellConstructorExpression(org.apache.phoenix.expression.SingleCellConstructorExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) CoerceExpression(org.apache.phoenix.expression.CoerceExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 4 with ColumnReference

use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.

the class IndexMaintainer method readFields.

// Only called by code older than our 4.10 release
@Deprecated
@Override
public void readFields(DataInput input) throws IOException {
    int encodedIndexSaltBucketsAndMultiTenant = WritableUtils.readVInt(input);
    isMultiTenant = encodedIndexSaltBucketsAndMultiTenant < 0;
    nIndexSaltBuckets = Math.abs(encodedIndexSaltBucketsAndMultiTenant) - 1;
    int encodedIndexedColumnsAndViewId = WritableUtils.readVInt(input);
    boolean hasViewIndexId = encodedIndexedColumnsAndViewId < 0;
    if (hasViewIndexId) {
        // Fixed length
        viewIndexId = new byte[MetaDataUtil.getViewIndexIdDataType().getByteSize()];
        input.readFully(viewIndexId);
    }
    int nIndexedColumns = Math.abs(encodedIndexedColumnsAndViewId) - 1;
    indexedColumns = Sets.newLinkedHashSetWithExpectedSize(nIndexedColumns);
    for (int i = 0; i < nIndexedColumns; i++) {
        byte[] cf = Bytes.readByteArray(input);
        byte[] cq = Bytes.readByteArray(input);
        indexedColumns.add(new ColumnReference(cf, cq));
    }
    indexedColumnTypes = Lists.newArrayListWithExpectedSize(nIndexedColumns);
    for (int i = 0; i < nIndexedColumns; i++) {
        PDataType type = PDataType.values()[WritableUtils.readVInt(input)];
        indexedColumnTypes.add(type);
    }
    int encodedCoveredolumnsAndLocalIndex = WritableUtils.readVInt(input);
    isLocalIndex = encodedCoveredolumnsAndLocalIndex < 0;
    int nCoveredColumns = Math.abs(encodedCoveredolumnsAndLocalIndex) - 1;
    coveredColumnsMap = Maps.newHashMapWithExpectedSize(nCoveredColumns);
    for (int i = 0; i < nCoveredColumns; i++) {
        byte[] dataTableCf = Bytes.readByteArray(input);
        byte[] dataTableCq = Bytes.readByteArray(input);
        ColumnReference dataTableRef = new ColumnReference(dataTableCf, dataTableCq);
        byte[] indexTableCf = isLocalIndex ? IndexUtil.getLocalIndexColumnFamily(dataTableCf) : dataTableCf;
        byte[] indexTableCq = IndexUtil.getIndexColumnName(dataTableCf, dataTableCq);
        ColumnReference indexTableRef = new ColumnReference(indexTableCf, indexTableCq);
        coveredColumnsMap.put(dataTableRef, indexTableRef);
    }
    // Hack to serialize whether the index row key is optimizable
    int len = WritableUtils.readVInt(input);
    if (len < 0) {
        rowKeyOrderOptimizable = false;
        len *= -1;
    } else {
        rowKeyOrderOptimizable = true;
    }
    indexTableName = new byte[len];
    input.readFully(indexTableName, 0, len);
    dataEmptyKeyValueCF = Bytes.readByteArray(input);
    len = WritableUtils.readVInt(input);
    //TODO remove this in the next major release
    boolean isNewClient = false;
    if (len < 0) {
        isNewClient = true;
        len = Math.abs(len);
    }
    byte[] emptyKeyValueCF = new byte[len];
    input.readFully(emptyKeyValueCF, 0, len);
    emptyKeyValueCFPtr = new ImmutableBytesPtr(emptyKeyValueCF);
    if (isNewClient) {
        int numIndexedExpressions = WritableUtils.readVInt(input);
        indexedExpressions = Lists.newArrayListWithExpectedSize(numIndexedExpressions);
        for (int i = 0; i < numIndexedExpressions; i++) {
            Expression expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance();
            expression.readFields(input);
            indexedExpressions.add(expression);
        }
    } else {
        indexedExpressions = Lists.newArrayListWithExpectedSize(indexedColumns.size());
        Iterator<ColumnReference> colReferenceIter = indexedColumns.iterator();
        Iterator<PDataType> dataTypeIter = indexedColumnTypes.iterator();
        while (colReferenceIter.hasNext()) {
            ColumnReference colRef = colReferenceIter.next();
            final PDataType dataType = dataTypeIter.next();
            indexedExpressions.add(new KeyValueColumnExpression(new PDatum() {

                @Override
                public boolean isNullable() {
                    return true;
                }

                @Override
                public SortOrder getSortOrder() {
                    return SortOrder.getDefault();
                }

                @Override
                public Integer getScale() {
                    return null;
                }

                @Override
                public Integer getMaxLength() {
                    return null;
                }

                @Override
                public PDataType getDataType() {
                    return dataType;
                }
            }, colRef.getFamily(), colRef.getQualifier()));
        }
    }
    rowKeyMetaData = newRowKeyMetaData();
    rowKeyMetaData.readFields(input);
    int nDataCFs = WritableUtils.readVInt(input);
    // Encode indexWALDisabled in nDataCFs
    indexWALDisabled = nDataCFs < 0;
    this.nDataCFs = Math.abs(nDataCFs) - 1;
    int encodedEstimatedIndexRowKeyBytesAndImmutableRows = WritableUtils.readVInt(input);
    this.immutableRows = encodedEstimatedIndexRowKeyBytesAndImmutableRows < 0;
    this.estimatedIndexRowKeyBytes = Math.abs(encodedEstimatedIndexRowKeyBytesAndImmutableRows);
    // Needed for backward compatibility. Clients older than 4.10 will have non-encoded tables.
    this.immutableStorageScheme = ImmutableStorageScheme.ONE_CELL_PER_COLUMN;
    this.encodingScheme = QualifierEncodingScheme.NON_ENCODED_QUALIFIERS;
    initCachedState();
}
Also used : PDatum(org.apache.phoenix.schema.PDatum) PDataType(org.apache.phoenix.schema.types.PDataType) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) SingleCellConstructorExpression(org.apache.phoenix.expression.SingleCellConstructorExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) CoerceExpression(org.apache.phoenix.expression.CoerceExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 5 with ColumnReference

use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.

the class IndexMaintainer method buildDeleteMutation.

public Delete buildDeleteMutation(KeyValueBuilder kvBuilder, ValueGetter oldState, ImmutableBytesWritable dataRowKeyPtr, Collection<KeyValue> pendingUpdates, long ts, byte[] regionStartKey, byte[] regionEndKey) throws IOException {
    byte[] indexRowKey = this.buildRowKey(oldState, dataRowKeyPtr, regionStartKey, regionEndKey);
    // Delete the entire row if any of the indexed columns changed
    DeleteType deleteType = null;
    if (oldState == null || (deleteType = getDeleteTypeOrNull(pendingUpdates)) != null || hasIndexedColumnChanged(oldState, pendingUpdates)) {
        // Deleting the entire row
        byte[] emptyCF = emptyKeyValueCFPtr.copyBytesIfNecessary();
        Delete delete = new Delete(indexRowKey);
        for (ColumnReference ref : getCoveredColumns()) {
            ColumnReference indexColumn = coveredColumnsMap.get(ref);
            // If table delete was single version, then index delete should be as well
            if (deleteType == DeleteType.SINGLE_VERSION) {
                delete.deleteFamilyVersion(indexColumn.getFamily(), ts);
            } else {
                delete.deleteFamily(indexColumn.getFamily(), ts);
            }
        }
        if (deleteType == DeleteType.SINGLE_VERSION) {
            delete.deleteFamilyVersion(emptyCF, ts);
        } else {
            delete.deleteFamily(emptyCF, ts);
        }
        delete.setDurability(!indexWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
        return delete;
    }
    Delete delete = null;
    Set<ColumnReference> dataTableColRefs = coveredColumnsMap.keySet();
    // Delete columns for missing key values
    for (Cell kv : pendingUpdates) {
        if (kv.getTypeByte() != KeyValue.Type.Put.getCode()) {
            ColumnReference ref = new ColumnReference(kv.getFamily(), kv.getQualifier());
            if (dataTableColRefs.contains(ref)) {
                if (delete == null) {
                    delete = new Delete(indexRowKey);
                    delete.setDurability(!indexWALDisabled ? Durability.USE_DEFAULT : Durability.SKIP_WAL);
                }
                ColumnReference indexColumn = coveredColumnsMap.get(ref);
                // If point delete for data table, then use point delete for index as well
                if (kv.getTypeByte() == KeyValue.Type.Delete.getCode()) {
                    delete.deleteColumn(indexColumn.getFamily(), indexColumn.getQualifier(), ts);
                } else {
                    delete.deleteColumns(indexColumn.getFamily(), indexColumn.getQualifier(), ts);
                }
            }
        }
    }
    return delete;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Cell(org.apache.hadoop.hbase.Cell) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Aggregations

ColumnReference (org.apache.phoenix.hbase.index.covered.update.ColumnReference)37 Put (org.apache.hadoop.hbase.client.Put)12 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)11 Test (org.junit.Test)11 Expression (org.apache.phoenix.expression.Expression)10 ArrayList (java.util.ArrayList)9 Cell (org.apache.hadoop.hbase.Cell)9 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)9 Scan (org.apache.hadoop.hbase.client.Scan)8 Region (org.apache.hadoop.hbase.regionserver.Region)8 KeyValueColumnExpression (org.apache.phoenix.expression.KeyValueColumnExpression)8 SingleCellColumnExpression (org.apache.phoenix.expression.SingleCellColumnExpression)8 List (java.util.List)7 Mutation (org.apache.hadoop.hbase.client.Mutation)7 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)7 CoerceExpression (org.apache.phoenix.expression.CoerceExpression)7 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)7 SingleCellConstructorExpression (org.apache.phoenix.expression.SingleCellConstructorExpression)7 IndexMaintainer (org.apache.phoenix.index.IndexMaintainer)7 PDataType (org.apache.phoenix.schema.types.PDataType)7