Search in sources :

Example 26 with ColumnReference

use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.

the class IndexMaintainer method hasIndexedColumnChanged.

private boolean hasIndexedColumnChanged(ValueGetter oldState, Collection<KeyValue> pendingUpdates) throws IOException {
    if (pendingUpdates.isEmpty()) {
        return false;
    }
    Map<ColumnReference, Cell> newState = Maps.newHashMapWithExpectedSize(pendingUpdates.size());
    for (Cell kv : pendingUpdates) {
        newState.put(new ColumnReference(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv)), kv);
    }
    for (ColumnReference ref : indexedColumns) {
        Cell newValue = newState.get(ref);
        if (newValue != null) {
            // Indexed column has potentially changed
            ImmutableBytesWritable oldValue = oldState.getLatestValue(ref);
            boolean newValueSetAsNull = (newValue.getTypeByte() == Type.DeleteColumn.getCode() || newValue.getTypeByte() == Type.Delete.getCode() || CellUtil.matchingValue(newValue, HConstants.EMPTY_BYTE_ARRAY));
            //then just skip to the next indexed column.
            if (newValueSetAsNull && oldValue == null) {
                continue;
            }
            if ((oldValue == null && !newValueSetAsNull) || (oldValue != null && newValueSetAsNull)) {
                return true;
            }
            // If the old value is different than the new value, the index row needs to be deleted
            if (Bytes.compareTo(oldValue.get(), oldValue.getOffset(), oldValue.getLength(), newValue.getValueArray(), newValue.getValueOffset(), newValue.getValueLength()) != 0) {
                return true;
            }
        }
    }
    return false;
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) Cell(org.apache.hadoop.hbase.Cell) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 27 with ColumnReference

use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.

the class IndexManagementUtil method newLocalStateScan.

public static Scan newLocalStateScan(Scan scan, List<? extends Iterable<? extends ColumnReference>> refsArray) {
    Scan s = scan;
    if (scan == null) {
        s = new Scan();
    }
    s.setRaw(true);
    // add the necessary columns to the scan
    for (Iterable<? extends ColumnReference> refs : refsArray) {
        for (ColumnReference ref : refs) {
            s.addFamily(ref.getFamily());
        }
    }
    s.setMaxVersions();
    return s;
}
Also used : Scan(org.apache.hadoop.hbase.client.Scan) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 28 with ColumnReference

use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.

the class IndexMaintainer method initCachedState.

/**
     * Init calculated state reading/creating
     */
private void initCachedState() {
    byte[] emptyKvQualifier = EncodedColumnsUtil.getEmptyKeyValueInfo(encodingScheme).getFirst();
    dataEmptyKeyValueRef = new ColumnReference(emptyKeyValueCFPtr.copyBytesIfNecessary(), emptyKvQualifier);
    this.allColumns = Sets.newLinkedHashSetWithExpectedSize(indexedExpressions.size() + coveredColumnsMap.size());
    // columns that are required to evaluate all expressions in indexedExpressions (not including columns in data row key)
    this.indexedColumns = Sets.newLinkedHashSetWithExpectedSize(indexedExpressions.size());
    for (Expression expression : indexedExpressions) {
        KeyValueExpressionVisitor visitor = new KeyValueExpressionVisitor() {

            @Override
            public Void visit(KeyValueColumnExpression expression) {
                if (indexedColumns.add(new ColumnReference(expression.getColumnFamily(), expression.getColumnQualifier()))) {
                    indexedColumnTypes.add(expression.getDataType());
                }
                return null;
            }
        };
        expression.accept(visitor);
    }
    allColumns.addAll(indexedColumns);
    for (ColumnReference colRef : coveredColumnsMap.keySet()) {
        if (immutableStorageScheme == ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
            allColumns.add(colRef);
        } else {
            allColumns.add(new ColumnReference(colRef.getFamily(), QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES));
        }
    }
    int dataPkOffset = (isDataTableSalted ? 1 : 0) + (isMultiTenant ? 1 : 0);
    int nIndexPkColumns = getIndexPkColumnCount();
    dataPkPosition = new int[nIndexPkColumns];
    Arrays.fill(dataPkPosition, EXPRESSION_NOT_PRESENT);
    int numViewConstantColumns = 0;
    BitSet viewConstantColumnBitSet = rowKeyMetaData.getViewConstantColumnBitSet();
    for (int i = dataPkOffset; i < dataRowKeySchema.getFieldCount(); i++) {
        if (!viewConstantColumnBitSet.get(i)) {
            int indexPkPosition = rowKeyMetaData.getIndexPkPosition(i - dataPkOffset);
            this.dataPkPosition[indexPkPosition] = i;
        } else {
            numViewConstantColumns++;
        }
    }
    // Calculate the max number of trailing nulls that we should get rid of after building the index row key.
    // We only get rid of nulls for variable length types, so we have to be careful to consider the type of the
    // index table, not the data type of the data table
    int expressionsPos = indexedExpressions.size();
    int indexPkPos = nIndexPkColumns - numViewConstantColumns - 1;
    while (indexPkPos >= 0) {
        int dataPkPos = dataPkPosition[indexPkPos];
        boolean isDataNullable;
        PDataType dataType;
        if (dataPkPos == EXPRESSION_NOT_PRESENT) {
            isDataNullable = true;
            dataType = indexedExpressions.get(--expressionsPos).getDataType();
        } else {
            Field dataField = dataRowKeySchema.getField(dataPkPos);
            dataType = dataField.getDataType();
            isDataNullable = dataField.isNullable();
        }
        PDataType indexDataType = IndexUtil.getIndexColumnDataType(isDataNullable, dataType);
        if (indexDataType.isFixedWidth()) {
            break;
        }
        indexPkPos--;
    }
    maxTrailingNulls = nIndexPkColumns - indexPkPos - 1;
}
Also used : Field(org.apache.phoenix.schema.ValueSchema.Field) PDataType(org.apache.phoenix.schema.types.PDataType) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) SingleCellConstructorExpression(org.apache.phoenix.expression.SingleCellConstructorExpression) Expression(org.apache.phoenix.expression.Expression) SingleCellColumnExpression(org.apache.phoenix.expression.SingleCellColumnExpression) CoerceExpression(org.apache.phoenix.expression.CoerceExpression) LiteralExpression(org.apache.phoenix.expression.LiteralExpression) BitSet(org.apache.phoenix.util.BitSet) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) KeyValueExpressionVisitor(org.apache.phoenix.expression.visitor.KeyValueExpressionVisitor) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 29 with ColumnReference

use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.

the class IndexUtil method generateIndexData.

public static List<Mutation> generateIndexData(final PTable table, PTable index, final Map<ImmutableBytesPtr, RowMutationState> valuesMap, List<Mutation> dataMutations, final KeyValueBuilder kvBuilder, PhoenixConnection connection) throws SQLException {
    try {
        final ImmutableBytesPtr ptr = new ImmutableBytesPtr();
        IndexMaintainer maintainer = index.getIndexMaintainer(table, connection);
        List<Mutation> indexMutations = Lists.newArrayListWithExpectedSize(dataMutations.size());
        for (final Mutation dataMutation : dataMutations) {
            long ts = MetaDataUtil.getClientTimeStamp(dataMutation);
            ptr.set(dataMutation.getRow());
            /*
                 * We only need to generate the additional mutations for a Put for immutable indexes.
                 * Deletes of rows are handled by running a re-written query against the index table,
                 * and Deletes of column values should never be necessary, as you should never be
                 * updating an existing row.
                 */
            if (dataMutation instanceof Put) {
                ValueGetter valueGetter = new ValueGetter() {

                    @Override
                    public byte[] getRowKey() {
                        return dataMutation.getRow();
                    }

                    @Override
                    public ImmutableBytesWritable getLatestValue(ColumnReference ref) {
                        // maintainer to always treat this Put as a new row.
                        if (isEmptyKeyValue(table, ref)) {
                            return null;
                        }
                        byte[] family = ref.getFamily();
                        byte[] qualifier = ref.getQualifier();
                        Map<byte[], List<Cell>> familyMap = dataMutation.getFamilyCellMap();
                        List<Cell> kvs = familyMap.get(family);
                        if (kvs == null) {
                            return null;
                        }
                        for (Cell kv : kvs) {
                            if (Bytes.compareTo(kv.getFamilyArray(), kv.getFamilyOffset(), kv.getFamilyLength(), family, 0, family.length) == 0 && Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), qualifier, 0, qualifier.length) == 0) {
                                ImmutableBytesPtr ptr = new ImmutableBytesPtr();
                                kvBuilder.getValueAsPtr(kv, ptr);
                                return ptr;
                            }
                        }
                        return null;
                    }
                };
                byte[] regionStartKey = null;
                byte[] regionEndkey = null;
                if (maintainer.isLocalIndex()) {
                    HRegionLocation tableRegionLocation = connection.getQueryServices().getTableRegionLocation(table.getPhysicalName().getBytes(), dataMutation.getRow());
                    regionStartKey = tableRegionLocation.getRegionInfo().getStartKey();
                    regionEndkey = tableRegionLocation.getRegionInfo().getEndKey();
                }
                indexMutations.add(maintainer.buildUpdateMutation(kvBuilder, valueGetter, ptr, ts, regionStartKey, regionEndkey));
            }
        }
        return indexMutations;
    } catch (IOException e) {
        throw new SQLException(e);
    }
}
Also used : SQLException(java.sql.SQLException) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) IOException(java.io.IOException) Put(org.apache.hadoop.hbase.client.Put) ValueGetter(org.apache.phoenix.hbase.index.ValueGetter) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) IndexMaintainer(org.apache.phoenix.index.IndexMaintainer) List(java.util.List) ArrayList(java.util.ArrayList) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 30 with ColumnReference

use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.

the class IndexUtil method deserializeDataTableColumnsToJoin.

public static ColumnReference[] deserializeDataTableColumnsToJoin(Scan scan) {
    byte[] columnsBytes = scan.getAttribute(BaseScannerRegionObserver.DATA_TABLE_COLUMNS_TO_JOIN);
    if (columnsBytes == null)
        return null;
    // TODO: size?
    ByteArrayInputStream stream = new ByteArrayInputStream(columnsBytes);
    try {
        DataInputStream input = new DataInputStream(stream);
        int numColumns = WritableUtils.readVInt(input);
        ColumnReference[] dataColumns = new ColumnReference[numColumns];
        for (int i = 0; i < numColumns; i++) {
            dataColumns[i] = new ColumnReference(Bytes.readByteArray(input), Bytes.readByteArray(input));
        }
        return dataColumns;
    } catch (IOException e) {
        throw new RuntimeException(e);
    } finally {
        try {
            stream.close();
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }
}
Also used : ByteArrayInputStream(java.io.ByteArrayInputStream) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Aggregations

ColumnReference (org.apache.phoenix.hbase.index.covered.update.ColumnReference)37 Put (org.apache.hadoop.hbase.client.Put)12 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)11 Test (org.junit.Test)11 Expression (org.apache.phoenix.expression.Expression)10 ArrayList (java.util.ArrayList)9 Cell (org.apache.hadoop.hbase.Cell)9 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)9 Scan (org.apache.hadoop.hbase.client.Scan)8 Region (org.apache.hadoop.hbase.regionserver.Region)8 KeyValueColumnExpression (org.apache.phoenix.expression.KeyValueColumnExpression)8 SingleCellColumnExpression (org.apache.phoenix.expression.SingleCellColumnExpression)8 List (java.util.List)7 Mutation (org.apache.hadoop.hbase.client.Mutation)7 RegionScanner (org.apache.hadoop.hbase.regionserver.RegionScanner)7 CoerceExpression (org.apache.phoenix.expression.CoerceExpression)7 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)7 SingleCellConstructorExpression (org.apache.phoenix.expression.SingleCellConstructorExpression)7 IndexMaintainer (org.apache.phoenix.index.IndexMaintainer)7 PDataType (org.apache.phoenix.schema.types.PDataType)7