Search in sources :

Example 26 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class GroupedAggregateRegionObserver method scanOrdered.

/**
     * Used for an aggregate query in which the key order match the group by key order. In this
     * case, we can do the aggregation as we scan, by detecting when the group by key changes.
     * @param limit TODO
     * @throws IOException
     */
private RegionScanner scanOrdered(final ObserverContext<RegionCoprocessorEnvironment> c, final Scan scan, final RegionScanner scanner, final List<Expression> expressions, final ServerAggregators aggregators, final long limit) throws IOException {
    if (logger.isDebugEnabled()) {
        logger.debug(LogUtil.addCustomAnnotations("Grouped aggregation over ordered rows with scan " + scan + ", group by " + expressions + ", aggregators " + aggregators, ScanUtil.getCustomAnnotations(scan)));
    }
    final Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan);
    final boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(minMaxQualifiers);
    return new BaseRegionScanner(scanner) {

        private long rowCount = 0;

        private ImmutableBytesPtr currentKey = null;

        @Override
        public boolean next(List<Cell> results) throws IOException {
            boolean hasMore;
            boolean atLimit;
            boolean aggBoundary = false;
            Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
            ImmutableBytesPtr key = null;
            Aggregator[] rowAggregators = aggregators.getAggregators();
            // If we're calculating no aggregate functions, we can exit at the
            // start of a new row. Otherwise, we have to wait until an agg
            int countOffset = rowAggregators.length == 0 ? 1 : 0;
            Region region = c.getEnvironment().getRegion();
            boolean acquiredLock = false;
            try {
                region.startRegionOperation();
                acquiredLock = true;
                synchronized (scanner) {
                    do {
                        List<Cell> kvs = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), encodingScheme) : new ArrayList<Cell>();
                        // Results are potentially returned even when the return
                        // value of s.next is false
                        // since this is an indication of whether or not there
                        // are more values after the
                        // ones returned
                        hasMore = scanner.nextRaw(kvs);
                        if (!kvs.isEmpty()) {
                            result.setKeyValues(kvs);
                            key = TupleUtil.getConcatenatedValue(result, expressions);
                            aggBoundary = currentKey != null && currentKey.compareTo(key) != 0;
                            if (!aggBoundary) {
                                aggregators.aggregate(rowAggregators, result);
                                if (logger.isDebugEnabled()) {
                                    logger.debug(LogUtil.addCustomAnnotations("Row passed filters: " + kvs + ", aggregated values: " + Arrays.asList(rowAggregators), ScanUtil.getCustomAnnotations(scan)));
                                }
                                currentKey = key;
                            }
                        }
                        atLimit = rowCount + countOffset >= limit;
                    // Do rowCount + 1 b/c we don't have to wait for a complete
                    // row in the case of a DISTINCT with a LIMIT
                    } while (hasMore && !aggBoundary && !atLimit);
                }
            } finally {
                if (acquiredLock)
                    region.closeRegionOperation();
            }
            if (currentKey != null) {
                byte[] value = aggregators.toBytes(rowAggregators);
                KeyValue keyValue = KeyValueUtil.newKeyValue(currentKey.get(), currentKey.getOffset(), currentKey.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
                results.add(keyValue);
                if (logger.isDebugEnabled()) {
                    logger.debug(LogUtil.addCustomAnnotations("Adding new aggregate row: " + keyValue + ",for current key " + Bytes.toStringBinary(currentKey.get(), currentKey.getOffset(), currentKey.getLength()) + ", aggregated values: " + Arrays.asList(rowAggregators), ScanUtil.getCustomAnnotations(scan)));
                }
                // the returned result).
                if (aggBoundary) {
                    aggregators.reset(rowAggregators);
                    aggregators.aggregate(rowAggregators, result);
                    currentKey = key;
                    rowCount++;
                    atLimit |= rowCount >= limit;
                }
            }
            // Continue if there are more
            if (!atLimit && (hasMore || aggBoundary)) {
                return true;
            }
            currentKey = null;
            return false;
        }
    };
}
Also used : EncodedColumnQualiferCellsList(org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList) KeyValue(org.apache.hadoop.hbase.KeyValue) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Aggregator(org.apache.phoenix.expression.aggregator.Aggregator) PInteger(org.apache.phoenix.schema.types.PInteger) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple) MultiKeyValueTuple(org.apache.phoenix.schema.tuple.MultiKeyValueTuple) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple) Region(org.apache.hadoop.hbase.regionserver.Region) EncodedColumnQualiferCellsList(org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList) List(java.util.List) ArrayList(java.util.ArrayList) Cell(org.apache.hadoop.hbase.Cell) MultiKeyValueTuple(org.apache.phoenix.schema.tuple.MultiKeyValueTuple) Tuple(org.apache.phoenix.schema.tuple.Tuple) PositionBasedMultiKeyValueTuple(org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple)

Example 27 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class HashJoinRegionScanner method processResults.

private void processResults(List<Cell> result, boolean hasBatchLimit) throws IOException {
    if (result.isEmpty())
        return;
    Tuple tuple = useQualifierAsListIndex ? new PositionBasedResultTuple(result) : new ResultTuple(Result.create(result));
    // always returns true.
    if (joinInfo.forceProjection()) {
        tuple = projector.projectResults(tuple, useNewValueColumnQualifier);
    }
    // TODO: fix below Scanner.next() and Scanner.nextRaw() methods as well.
    if (hasBatchLimit)
        throw new UnsupportedOperationException("Cannot support join operations in scans with limit");
    int count = joinInfo.getJoinIds().length;
    boolean cont = true;
    for (int i = 0; i < count; i++) {
        if (!(joinInfo.earlyEvaluation()[i]) || hashCaches[i] == null)
            continue;
        ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(tuple, joinInfo.getJoinExpressions()[i]);
        tempTuples[i] = hashCaches[i].get(key);
        JoinType type = joinInfo.getJoinTypes()[i];
        if (((type == JoinType.Inner || type == JoinType.Semi) && tempTuples[i] == null) || (type == JoinType.Anti && tempTuples[i] != null)) {
            cont = false;
            break;
        }
    }
    if (cont) {
        if (projector == null) {
            int dup = 1;
            for (int i = 0; i < count; i++) {
                dup *= (tempTuples[i] == null ? 1 : tempTuples[i].size());
            }
            for (int i = 0; i < dup; i++) {
                resultQueue.offer(tuple);
            }
        } else {
            KeyValueSchema schema = joinInfo.getJoinedSchema();
            if (!joinInfo.forceProjection()) {
                // backward compatibility
                tuple = projector.projectResults(tuple, useNewValueColumnQualifier);
            }
            resultQueue.offer(tuple);
            for (int i = 0; i < count; i++) {
                boolean earlyEvaluation = joinInfo.earlyEvaluation()[i];
                JoinType type = joinInfo.getJoinTypes()[i];
                if (earlyEvaluation && (type == JoinType.Semi || type == JoinType.Anti))
                    continue;
                int j = resultQueue.size();
                while (j-- > 0) {
                    Tuple lhs = resultQueue.poll();
                    if (!earlyEvaluation) {
                        ImmutableBytesPtr key = TupleUtil.getConcatenatedValue(lhs, joinInfo.getJoinExpressions()[i]);
                        tempTuples[i] = hashCaches[i].get(key);
                        if (tempTuples[i] == null) {
                            if (type == JoinType.Inner || type == JoinType.Semi) {
                                continue;
                            } else if (type == JoinType.Anti) {
                                resultQueue.offer(lhs);
                                continue;
                            }
                        }
                    }
                    if (tempTuples[i] == null) {
                        Tuple joined = tempSrcBitSet[i] == ValueBitSet.EMPTY_VALUE_BITSET ? lhs : TupleProjector.mergeProjectedValue((ProjectedValueTuple) lhs, schema, tempDestBitSet, null, joinInfo.getSchemas()[i], tempSrcBitSet[i], joinInfo.getFieldPositions()[i], useNewValueColumnQualifier);
                        resultQueue.offer(joined);
                        continue;
                    }
                    for (Tuple t : tempTuples[i]) {
                        Tuple joined = tempSrcBitSet[i] == ValueBitSet.EMPTY_VALUE_BITSET ? lhs : TupleProjector.mergeProjectedValue((ProjectedValueTuple) lhs, schema, tempDestBitSet, t, joinInfo.getSchemas()[i], tempSrcBitSet[i], joinInfo.getFieldPositions()[i], useNewValueColumnQualifier);
                        resultQueue.offer(joined);
                    }
                }
            }
        }
        // apply post-join filter
        Expression postFilter = joinInfo.getPostJoinFilterExpression();
        if (postFilter != null) {
            for (Iterator<Tuple> iter = resultQueue.iterator(); iter.hasNext(); ) {
                Tuple t = iter.next();
                postFilter.reset();
                ImmutableBytesPtr tempPtr = new ImmutableBytesPtr();
                try {
                    if (!postFilter.evaluate(t, tempPtr)) {
                        iter.remove();
                        continue;
                    }
                } catch (IllegalDataException e) {
                    iter.remove();
                    continue;
                }
                Boolean b = (Boolean) postFilter.getDataType().toObject(tempPtr);
                if (!b.booleanValue()) {
                    iter.remove();
                }
            }
        }
    }
}
Also used : PositionBasedResultTuple(org.apache.phoenix.schema.tuple.PositionBasedResultTuple) PositionBasedResultTuple(org.apache.phoenix.schema.tuple.PositionBasedResultTuple) ResultTuple(org.apache.phoenix.schema.tuple.ResultTuple) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) JoinType(org.apache.phoenix.parse.JoinTableNode.JoinType) ProjectedValueTuple(org.apache.phoenix.execute.TupleProjector.ProjectedValueTuple) Expression(org.apache.phoenix.expression.Expression) KeyValueSchema(org.apache.phoenix.schema.KeyValueSchema) PositionBasedResultTuple(org.apache.phoenix.schema.tuple.PositionBasedResultTuple) Tuple(org.apache.phoenix.schema.tuple.Tuple) ResultTuple(org.apache.phoenix.schema.tuple.ResultTuple) ProjectedValueTuple(org.apache.phoenix.execute.TupleProjector.ProjectedValueTuple) IllegalDataException(org.apache.phoenix.schema.IllegalDataException)

Example 28 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class MetaDataEndpointImpl method addViewIndexesHeaderRowMutations.

private void addViewIndexesHeaderRowMutations(List<Mutation> mutationsForAddingColumnsToViews, List<ImmutableBytesPtr> invalidateList, long clientTimeStamp, PTable view, short deltaNumPkColsSoFar) {
    if (deltaNumPkColsSoFar > 0) {
        for (PTable index : view.getIndexes()) {
            byte[] indexHeaderRowKey = getViewIndexHeaderRowKey(index);
            Put indexHeaderRowMutation = new Put(indexHeaderRowKey);
            // increment sequence number
            long newSequenceNumber = index.getSequenceNumber() + 1;
            byte[] newSequenceNumberPtr = new byte[PLong.INSTANCE.getByteSize()];
            PLong.INSTANCE.getCodec().encodeLong(newSequenceNumber, newSequenceNumberPtr, 0);
            indexHeaderRowMutation.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, newSequenceNumberPtr);
            // increase the column count
            int newColumnCount = index.getColumns().size() + deltaNumPkColsSoFar;
            byte[] newColumnCountPtr = new byte[PInteger.INSTANCE.getByteSize()];
            PInteger.INSTANCE.getCodec().encodeInt(newColumnCount, newColumnCountPtr, 0);
            indexHeaderRowMutation.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.COLUMN_COUNT_BYTES, newColumnCountPtr);
            // add index row header key to the invalidate list to force clients to fetch the latest meta-data
            invalidateList.add(new ImmutableBytesPtr(indexHeaderRowKey));
            if (index.rowKeyOrderOptimizable()) {
                UpgradeUtil.addRowKeyOrderOptimizableCell(mutationsForAddingColumnsToViews, indexHeaderRowKey, clientTimeStamp);
            }
            mutationsForAddingColumnsToViews.add(indexHeaderRowMutation);
        }
    }
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PTable(org.apache.phoenix.schema.PTable) Put(org.apache.hadoop.hbase.client.Put) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint)

Example 29 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class UpsertCompiler method setValues.

private static void setValues(byte[][] values, int[] pkSlotIndex, int[] columnIndexes, PTable table, Map<ImmutableBytesPtr, RowMutationState> mutation, PhoenixStatement statement, boolean useServerTimestamp, IndexMaintainer maintainer, byte[][] viewConstants, byte[] onDupKeyBytes, int numSplColumns) throws SQLException {
    Map<PColumn, byte[]> columnValues = Maps.newHashMapWithExpectedSize(columnIndexes.length);
    byte[][] pkValues = new byte[table.getPKColumns().size()][];
    // here and we will fill in the byte later in PRowImpl.
    if (table.getBucketNum() != null) {
        pkValues[0] = new byte[] { 0 };
    }
    for (int i = 0; i < numSplColumns; i++) {
        pkValues[i + (table.getBucketNum() != null ? 1 : 0)] = values[i];
    }
    // case when the table doesn't have a row timestamp column
    Long rowTimestamp = null;
    RowTimestampColInfo rowTsColInfo = new RowTimestampColInfo(useServerTimestamp, rowTimestamp);
    for (int i = 0, j = numSplColumns; j < values.length; j++, i++) {
        byte[] value = values[j];
        PColumn column = table.getColumns().get(columnIndexes[i]);
        if (SchemaUtil.isPKColumn(column)) {
            pkValues[pkSlotIndex[i]] = value;
            if (SchemaUtil.getPKPosition(table, column) == table.getRowTimestampColPos()) {
                if (!useServerTimestamp) {
                    PColumn rowTimestampCol = table.getPKColumns().get(table.getRowTimestampColPos());
                    rowTimestamp = PLong.INSTANCE.getCodec().decodeLong(value, 0, rowTimestampCol.getSortOrder());
                    if (rowTimestamp < 0) {
                        throw new IllegalDataException("Value of a column designated as ROW_TIMESTAMP cannot be less than zero");
                    }
                    rowTsColInfo = new RowTimestampColInfo(useServerTimestamp, rowTimestamp);
                }
            }
        } else {
            columnValues.put(column, value);
        }
    }
    ImmutableBytesPtr ptr = new ImmutableBytesPtr();
    table.newKey(ptr, pkValues);
    if (table.getIndexType() == IndexType.LOCAL && maintainer != null) {
        byte[] rowKey = maintainer.buildDataRowKey(ptr, viewConstants);
        HRegionLocation region = statement.getConnection().getQueryServices().getTableRegionLocation(table.getParentName().getBytes(), rowKey);
        byte[] regionPrefix = region.getRegionInfo().getStartKey().length == 0 ? new byte[region.getRegionInfo().getEndKey().length] : region.getRegionInfo().getStartKey();
        if (regionPrefix.length != 0) {
            ptr.set(ScanRanges.prefixKey(ptr.get(), 0, regionPrefix, regionPrefix.length));
        }
    }
    mutation.put(ptr, new RowMutationState(columnValues, statement.getConnection().getStatementExecutionCounter(), rowTsColInfo, onDupKeyBytes));
}
Also used : PColumn(org.apache.phoenix.schema.PColumn) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) PLong(org.apache.phoenix.schema.types.PLong) PUnsignedLong(org.apache.phoenix.schema.types.PUnsignedLong) RowTimestampColInfo(org.apache.phoenix.execute.MutationState.RowTimestampColInfo) Hint(org.apache.phoenix.parse.HintNode.Hint) PSmallint(org.apache.phoenix.schema.types.PSmallint) IllegalDataException(org.apache.phoenix.schema.IllegalDataException) RowMutationState(org.apache.phoenix.execute.MutationState.RowMutationState)

Example 30 with ImmutableBytesPtr

use of org.apache.phoenix.hbase.index.util.ImmutableBytesPtr in project phoenix by apache.

the class MetaDataEndpointImpl method updateIndexState.

@Override
public void updateIndexState(RpcController controller, UpdateIndexStateRequest request, RpcCallback<MetaDataResponse> done) {
    MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
    byte[] schemaName = null;
    byte[] tableName = null;
    try {
        byte[][] rowKeyMetaData = new byte[3][];
        List<Mutation> tableMetadata = ProtobufUtil.getMutations(request);
        MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData);
        byte[] tenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
        schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
        tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
        final byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
        Region region = env.getRegion();
        MetaDataMutationResult result = checkTableKeyInRegion(key, region);
        if (result != null) {
            done.run(MetaDataMutationResult.toProto(result));
            return;
        }
        long timeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
        ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
        List<Cell> newKVs = tableMetadata.get(0).getFamilyCellMap().get(TABLE_FAMILY_BYTES);
        Cell newKV = null;
        int disableTimeStampKVIndex = -1;
        int indexStateKVIndex = 0;
        int index = 0;
        for (Cell cell : newKVs) {
            if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), INDEX_STATE_BYTES, 0, INDEX_STATE_BYTES.length) == 0) {
                newKV = cell;
                indexStateKVIndex = index;
            } else if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), INDEX_DISABLE_TIMESTAMP_BYTES, 0, INDEX_DISABLE_TIMESTAMP_BYTES.length) == 0) {
                disableTimeStampKVIndex = index;
            }
            index++;
        }
        PIndexState newState = PIndexState.fromSerializedValue(newKV.getValueArray()[newKV.getValueOffset()]);
        RowLock rowLock = region.getRowLock(key, false);
        if (rowLock == null) {
            throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
        }
        try {
            Get get = new Get(key);
            get.setTimeRange(PTable.INITIAL_SEQ_NUM, timeStamp);
            get.addColumn(TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES);
            get.addColumn(TABLE_FAMILY_BYTES, INDEX_STATE_BYTES);
            get.addColumn(TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES);
            get.addColumn(TABLE_FAMILY_BYTES, ROW_KEY_ORDER_OPTIMIZABLE_BYTES);
            Result currentResult = region.get(get);
            if (currentResult.rawCells().length == 0) {
                builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND);
                builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                done.run(builder.build());
                return;
            }
            Cell dataTableKV = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES);
            Cell currentStateKV = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, INDEX_STATE_BYTES);
            Cell currentDisableTimeStamp = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES);
            boolean rowKeyOrderOptimizable = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, ROW_KEY_ORDER_OPTIMIZABLE_BYTES) != null;
            PIndexState currentState = PIndexState.fromSerializedValue(currentStateKV.getValueArray()[currentStateKV.getValueOffset()]);
            if ((currentDisableTimeStamp != null && currentDisableTimeStamp.getValueLength() > 0) && (disableTimeStampKVIndex >= 0)) {
                long curTimeStampVal = (Long) PLong.INSTANCE.toObject(currentDisableTimeStamp.getValueArray(), currentDisableTimeStamp.getValueOffset(), currentDisableTimeStamp.getValueLength());
                // new DisableTimeStamp is passed in
                Cell newDisableTimeStampCell = newKVs.get(disableTimeStampKVIndex);
                long newDisableTimeStamp = (Long) PLong.INSTANCE.toObject(newDisableTimeStampCell.getValueArray(), newDisableTimeStampCell.getValueOffset(), newDisableTimeStampCell.getValueLength());
                // when a new data table write occurs.
                if (curTimeStampVal != 0 && Math.abs(curTimeStampVal) < Math.abs(newDisableTimeStamp)) {
                    // not reset disable timestamp
                    newKVs.remove(disableTimeStampKVIndex);
                    disableTimeStampKVIndex = -1;
                }
            }
            // Detect invalid transitions
            if (currentState == PIndexState.BUILDING) {
                if (newState == PIndexState.USABLE) {
                    builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    done.run(builder.build());
                    return;
                }
            } else if (currentState == PIndexState.DISABLE) {
                if (newState != PIndexState.BUILDING && newState != PIndexState.DISABLE && newState != PIndexState.INACTIVE) {
                    builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    done.run(builder.build());
                    return;
                }
                // Done building, but was disable before that, so that in disabled state
                if (newState == PIndexState.ACTIVE) {
                    newState = PIndexState.DISABLE;
                }
            }
            if (currentState == PIndexState.BUILDING && newState != PIndexState.ACTIVE) {
                timeStamp = currentStateKV.getTimestamp();
            }
            if ((currentState == PIndexState.UNUSABLE && newState == PIndexState.ACTIVE) || (currentState == PIndexState.ACTIVE && newState == PIndexState.UNUSABLE)) {
                newState = PIndexState.INACTIVE;
                newKVs.set(indexStateKVIndex, KeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, INDEX_STATE_BYTES, timeStamp, Bytes.toBytes(newState.getSerializedValue())));
            } else if (currentState == PIndexState.INACTIVE && newState == PIndexState.USABLE) {
                newState = PIndexState.ACTIVE;
                newKVs.set(indexStateKVIndex, KeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, INDEX_STATE_BYTES, timeStamp, Bytes.toBytes(newState.getSerializedValue())));
            }
            PTable returnTable = null;
            if (currentState != newState || disableTimeStampKVIndex != -1) {
                byte[] dataTableKey = null;
                if (dataTableKV != null) {
                    dataTableKey = SchemaUtil.getTableKey(tenantId, schemaName, dataTableKV.getValue());
                }
                if (dataTableKey != null) {
                    // make a copy of tableMetadata
                    tableMetadata = new ArrayList<Mutation>(tableMetadata);
                    // insert an empty KV to trigger time stamp update on data table row
                    Put p = new Put(dataTableKey);
                    p.add(TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timeStamp, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
                    tableMetadata.add(p);
                }
                boolean setRowKeyOrderOptimizableCell = newState == PIndexState.BUILDING && !rowKeyOrderOptimizable;
                // so that the row keys get generated using the new row key format
                if (setRowKeyOrderOptimizableCell) {
                    UpgradeUtil.addRowKeyOrderOptimizableCell(tableMetadata, key, timeStamp);
                }
                region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
                // Invalidate from cache
                Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
                metaDataCache.invalidate(cacheKey);
                if (dataTableKey != null) {
                    metaDataCache.invalidate(new ImmutableBytesPtr(dataTableKey));
                }
                if (setRowKeyOrderOptimizableCell || disableTimeStampKVIndex != -1 || currentState == PIndexState.DISABLE || newState == PIndexState.BUILDING) {
                    returnTable = doGetTable(key, HConstants.LATEST_TIMESTAMP, rowLock);
                }
            }
            // Get client timeStamp from mutations, since it may get updated by the
            // mutateRowsWithLocks call
            long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata);
            builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS);
            builder.setMutationTime(currentTime);
            if (returnTable != null) {
                builder.setTable(PTableImpl.toProto(returnTable));
            }
            done.run(builder.build());
            return;
        } finally {
            rowLock.release();
        }
    } catch (Throwable t) {
        logger.error("updateIndexState failed", t);
        ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
    }
}
Also used : PTable(org.apache.phoenix.schema.PTable) Result(org.apache.hadoop.hbase.client.Result) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) Cell(org.apache.hadoop.hbase.Cell) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock) MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) PIndexState(org.apache.phoenix.schema.PIndexState) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) Put(org.apache.hadoop.hbase.client.Put) Get(org.apache.hadoop.hbase.client.Get) PLong(org.apache.phoenix.schema.types.PLong) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation)

Aggregations

ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)120 Mutation (org.apache.hadoop.hbase.client.Mutation)31 PTable (org.apache.phoenix.schema.PTable)28 ArrayList (java.util.ArrayList)27 Region (org.apache.hadoop.hbase.regionserver.Region)22 PMetaDataEntity (org.apache.phoenix.schema.PMetaDataEntity)22 Test (org.junit.Test)21 Cell (org.apache.hadoop.hbase.Cell)20 Put (org.apache.hadoop.hbase.client.Put)18 List (java.util.List)15 Scan (org.apache.hadoop.hbase.client.Scan)15 Pair (org.apache.hadoop.hbase.util.Pair)15 IOException (java.io.IOException)14 Expression (org.apache.phoenix.expression.Expression)14 PColumn (org.apache.phoenix.schema.PColumn)14 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)13 PSmallint (org.apache.phoenix.schema.types.PSmallint)12 HashMap (java.util.HashMap)11 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)11 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)11