Search in sources :

Example 1 with PIndexState

use of org.apache.phoenix.schema.PIndexState in project phoenix by apache.

the class ConnectionlessQueryServicesImpl method updateIndexState.

@Override
public MetaDataMutationResult updateIndexState(List<Mutation> tableMetadata, String parentTableName) throws SQLException {
    byte[][] rowKeyMetadata = new byte[3][];
    SchemaUtil.getVarChars(tableMetadata.get(0).getRow(), rowKeyMetadata);
    Mutation m = MetaDataUtil.getTableHeaderRow(tableMetadata);
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    if (!MetaDataUtil.getMutationValue(m, INDEX_STATE_BYTES, kvBuilder, ptr)) {
        throw new IllegalStateException();
    }
    PIndexState newState = PIndexState.fromSerializedValue(ptr.get()[ptr.getOffset()]);
    byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
    String schemaName = Bytes.toString(rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]);
    String indexName = Bytes.toString(rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]);
    String indexTableName = SchemaUtil.getTableName(schemaName, indexName);
    PName tenantId = tenantIdBytes.length == 0 ? null : PNameFactory.newName(tenantIdBytes);
    PTable index = metaData.getTableRef(new PTableKey(tenantId, indexTableName)).getTable();
    index = PTableImpl.makePTable(index, newState == PIndexState.USABLE ? PIndexState.ACTIVE : newState == PIndexState.UNUSABLE ? PIndexState.INACTIVE : newState);
    return new MetaDataMutationResult(MutationCode.TABLE_ALREADY_EXISTS, 0, index);
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) PIndexState(org.apache.phoenix.schema.PIndexState) PName(org.apache.phoenix.schema.PName) Mutation(org.apache.hadoop.hbase.client.Mutation) PTableKey(org.apache.phoenix.schema.PTableKey) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) PTable(org.apache.phoenix.schema.PTable)

Example 2 with PIndexState

use of org.apache.phoenix.schema.PIndexState in project phoenix by apache.

the class IndexStateNameFunction method evaluate.

@Override
public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
    Expression child = children.get(0);
    if (!child.evaluate(tuple, ptr)) {
        return false;
    }
    if (ptr.getLength() == 0) {
        return true;
    }
    byte serializedByte = ptr.get()[ptr.getOffset()];
    PIndexState indexState = PIndexState.fromSerializedValue(serializedByte);
    ptr.set(indexState.toBytes());
    return true;
}
Also used : Expression(org.apache.phoenix.expression.Expression) PIndexState(org.apache.phoenix.schema.PIndexState)

Example 3 with PIndexState

use of org.apache.phoenix.schema.PIndexState in project phoenix by apache.

the class MetaDataEndpointImpl method getTable.

private PTable getTable(RegionScanner scanner, long clientTimeStamp, long tableTimeStamp) throws IOException, SQLException {
    List<Cell> results = Lists.newArrayList();
    scanner.next(results);
    if (results.isEmpty()) {
        return null;
    }
    Cell[] tableKeyValues = new Cell[TABLE_KV_COLUMNS.size()];
    Cell[] colKeyValues = new Cell[COLUMN_KV_COLUMNS.size()];
    // Create PTable based on KeyValues from scan
    Cell keyValue = results.get(0);
    byte[] keyBuffer = keyValue.getRowArray();
    int keyLength = keyValue.getRowLength();
    int keyOffset = keyValue.getRowOffset();
    PName tenantId = newPName(keyBuffer, keyOffset, keyLength);
    int tenantIdLength = (tenantId == null) ? 0 : tenantId.getBytes().length;
    if (tenantIdLength == 0) {
        tenantId = null;
    }
    PName schemaName = newPName(keyBuffer, keyOffset + tenantIdLength + 1, keyLength);
    int schemaNameLength = schemaName.getBytes().length;
    int tableNameLength = keyLength - schemaNameLength - 1 - tenantIdLength - 1;
    byte[] tableNameBytes = new byte[tableNameLength];
    System.arraycopy(keyBuffer, keyOffset + schemaNameLength + 1 + tenantIdLength + 1, tableNameBytes, 0, tableNameLength);
    PName tableName = PNameFactory.newName(tableNameBytes);
    int offset = tenantIdLength + schemaNameLength + tableNameLength + 3;
    // This will prevent the client from continually looking for the current
    // table when we know that there will never be one since we disallow updates
    // unless the table is the latest
    // If we already have a table newer than the one we just found and
    // the client timestamp is less that the existing table time stamp,
    // bump up the timeStamp to right before the client time stamp, since
    // we know it can't possibly change.
    long timeStamp = keyValue.getTimestamp();
    // long timeStamp = tableTimeStamp > keyValue.getTimestamp() &&
    // clientTimeStamp < tableTimeStamp
    // ? clientTimeStamp-1
    // : keyValue.getTimestamp();
    int i = 0;
    int j = 0;
    while (i < results.size() && j < TABLE_KV_COLUMNS.size()) {
        Cell kv = results.get(i);
        Cell searchKv = TABLE_KV_COLUMNS.get(j);
        int cmp = Bytes.compareTo(kv.getQualifierArray(), kv.getQualifierOffset(), kv.getQualifierLength(), searchKv.getQualifierArray(), searchKv.getQualifierOffset(), searchKv.getQualifierLength());
        if (cmp == 0) {
            // Find max timestamp of table
            timeStamp = Math.max(timeStamp, kv.getTimestamp());
            // header row
            tableKeyValues[j++] = kv;
            i++;
        } else if (cmp > 0) {
            timeStamp = Math.max(timeStamp, kv.getTimestamp());
            tableKeyValues[j++] = null;
        } else {
            // shouldn't happen - means unexpected KV in system table header row
            i++;
        }
    }
    // TABLE_TYPE, TABLE_SEQ_NUM and COLUMN_COUNT are required.
    if (tableKeyValues[TABLE_TYPE_INDEX] == null || tableKeyValues[TABLE_SEQ_NUM_INDEX] == null || tableKeyValues[COLUMN_COUNT_INDEX] == null) {
        throw new IllegalStateException("Didn't find expected key values for table row in metadata row");
    }
    Cell tableTypeKv = tableKeyValues[TABLE_TYPE_INDEX];
    PTableType tableType = PTableType.fromSerializedValue(tableTypeKv.getValueArray()[tableTypeKv.getValueOffset()]);
    Cell tableSeqNumKv = tableKeyValues[TABLE_SEQ_NUM_INDEX];
    long tableSeqNum = PLong.INSTANCE.getCodec().decodeLong(tableSeqNumKv.getValueArray(), tableSeqNumKv.getValueOffset(), SortOrder.getDefault());
    Cell columnCountKv = tableKeyValues[COLUMN_COUNT_INDEX];
    int columnCount = PInteger.INSTANCE.getCodec().decodeInt(columnCountKv.getValueArray(), columnCountKv.getValueOffset(), SortOrder.getDefault());
    Cell pkNameKv = tableKeyValues[PK_NAME_INDEX];
    PName pkName = pkNameKv != null ? newPName(pkNameKv.getValueArray(), pkNameKv.getValueOffset(), pkNameKv.getValueLength()) : null;
    Cell saltBucketNumKv = tableKeyValues[SALT_BUCKETS_INDEX];
    Integer saltBucketNum = saltBucketNumKv != null ? (Integer) PInteger.INSTANCE.getCodec().decodeInt(saltBucketNumKv.getValueArray(), saltBucketNumKv.getValueOffset(), SortOrder.getDefault()) : null;
    if (saltBucketNum != null && saltBucketNum.intValue() == 0) {
        // Zero salt buckets means not salted
        saltBucketNum = null;
    }
    Cell dataTableNameKv = tableKeyValues[DATA_TABLE_NAME_INDEX];
    PName dataTableName = dataTableNameKv != null ? newPName(dataTableNameKv.getValueArray(), dataTableNameKv.getValueOffset(), dataTableNameKv.getValueLength()) : null;
    Cell indexStateKv = tableKeyValues[INDEX_STATE_INDEX];
    PIndexState indexState = indexStateKv == null ? null : PIndexState.fromSerializedValue(indexStateKv.getValueArray()[indexStateKv.getValueOffset()]);
    Cell immutableRowsKv = tableKeyValues[IMMUTABLE_ROWS_INDEX];
    boolean isImmutableRows = immutableRowsKv == null ? false : (Boolean) PBoolean.INSTANCE.toObject(immutableRowsKv.getValueArray(), immutableRowsKv.getValueOffset(), immutableRowsKv.getValueLength());
    Cell defaultFamilyNameKv = tableKeyValues[DEFAULT_COLUMN_FAMILY_INDEX];
    PName defaultFamilyName = defaultFamilyNameKv != null ? newPName(defaultFamilyNameKv.getValueArray(), defaultFamilyNameKv.getValueOffset(), defaultFamilyNameKv.getValueLength()) : null;
    Cell viewStatementKv = tableKeyValues[VIEW_STATEMENT_INDEX];
    String viewStatement = viewStatementKv != null ? (String) PVarchar.INSTANCE.toObject(viewStatementKv.getValueArray(), viewStatementKv.getValueOffset(), viewStatementKv.getValueLength()) : null;
    Cell disableWALKv = tableKeyValues[DISABLE_WAL_INDEX];
    boolean disableWAL = disableWALKv == null ? PTable.DEFAULT_DISABLE_WAL : Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(disableWALKv.getValueArray(), disableWALKv.getValueOffset(), disableWALKv.getValueLength()));
    Cell multiTenantKv = tableKeyValues[MULTI_TENANT_INDEX];
    boolean multiTenant = multiTenantKv == null ? false : Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(multiTenantKv.getValueArray(), multiTenantKv.getValueOffset(), multiTenantKv.getValueLength()));
    Cell storeNullsKv = tableKeyValues[STORE_NULLS_INDEX];
    boolean storeNulls = storeNullsKv == null ? false : Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(storeNullsKv.getValueArray(), storeNullsKv.getValueOffset(), storeNullsKv.getValueLength()));
    Cell transactionalKv = tableKeyValues[TRANSACTIONAL_INDEX];
    boolean transactional = transactionalKv == null ? false : Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(transactionalKv.getValueArray(), transactionalKv.getValueOffset(), transactionalKv.getValueLength()));
    Cell viewTypeKv = tableKeyValues[VIEW_TYPE_INDEX];
    ViewType viewType = viewTypeKv == null ? null : ViewType.fromSerializedValue(viewTypeKv.getValueArray()[viewTypeKv.getValueOffset()]);
    Cell viewIndexIdKv = tableKeyValues[VIEW_INDEX_ID_INDEX];
    Short viewIndexId = viewIndexIdKv == null ? null : (Short) MetaDataUtil.getViewIndexIdDataType().getCodec().decodeShort(viewIndexIdKv.getValueArray(), viewIndexIdKv.getValueOffset(), SortOrder.getDefault());
    Cell indexTypeKv = tableKeyValues[INDEX_TYPE_INDEX];
    IndexType indexType = indexTypeKv == null ? null : IndexType.fromSerializedValue(indexTypeKv.getValueArray()[indexTypeKv.getValueOffset()]);
    Cell baseColumnCountKv = tableKeyValues[BASE_COLUMN_COUNT_INDEX];
    int baseColumnCount = baseColumnCountKv == null ? 0 : PInteger.INSTANCE.getCodec().decodeInt(baseColumnCountKv.getValueArray(), baseColumnCountKv.getValueOffset(), SortOrder.getDefault());
    Cell rowKeyOrderOptimizableKv = tableKeyValues[ROW_KEY_ORDER_OPTIMIZABLE_INDEX];
    boolean rowKeyOrderOptimizable = rowKeyOrderOptimizableKv == null ? false : Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(rowKeyOrderOptimizableKv.getValueArray(), rowKeyOrderOptimizableKv.getValueOffset(), rowKeyOrderOptimizableKv.getValueLength()));
    Cell updateCacheFrequencyKv = tableKeyValues[UPDATE_CACHE_FREQUENCY_INDEX];
    long updateCacheFrequency = updateCacheFrequencyKv == null ? 0 : PLong.INSTANCE.getCodec().decodeLong(updateCacheFrequencyKv.getValueArray(), updateCacheFrequencyKv.getValueOffset(), SortOrder.getDefault());
    Cell indexDisableTimestampKv = tableKeyValues[INDEX_DISABLE_TIMESTAMP];
    long indexDisableTimestamp = indexDisableTimestampKv == null ? 0L : PLong.INSTANCE.getCodec().decodeLong(indexDisableTimestampKv.getValueArray(), indexDisableTimestampKv.getValueOffset(), SortOrder.getDefault());
    Cell isNamespaceMappedKv = tableKeyValues[IS_NAMESPACE_MAPPED_INDEX];
    boolean isNamespaceMapped = isNamespaceMappedKv == null ? false : Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(isNamespaceMappedKv.getValueArray(), isNamespaceMappedKv.getValueOffset(), isNamespaceMappedKv.getValueLength()));
    Cell autoPartitionSeqKv = tableKeyValues[AUTO_PARTITION_SEQ_INDEX];
    String autoPartitionSeq = autoPartitionSeqKv != null ? (String) PVarchar.INSTANCE.toObject(autoPartitionSeqKv.getValueArray(), autoPartitionSeqKv.getValueOffset(), autoPartitionSeqKv.getValueLength()) : null;
    Cell isAppendOnlySchemaKv = tableKeyValues[APPEND_ONLY_SCHEMA_INDEX];
    boolean isAppendOnlySchema = isAppendOnlySchemaKv == null ? false : Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(isAppendOnlySchemaKv.getValueArray(), isAppendOnlySchemaKv.getValueOffset(), isAppendOnlySchemaKv.getValueLength()));
    Cell storageSchemeKv = tableKeyValues[STORAGE_SCHEME_INDEX];
    //TODO: change this once we start having other values for storage schemes
    ImmutableStorageScheme storageScheme = storageSchemeKv == null ? ImmutableStorageScheme.ONE_CELL_PER_COLUMN : ImmutableStorageScheme.fromSerializedValue((byte) PTinyint.INSTANCE.toObject(storageSchemeKv.getValueArray(), storageSchemeKv.getValueOffset(), storageSchemeKv.getValueLength()));
    Cell encodingSchemeKv = tableKeyValues[QUALIFIER_ENCODING_SCHEME_INDEX];
    QualifierEncodingScheme encodingScheme = encodingSchemeKv == null ? QualifierEncodingScheme.NON_ENCODED_QUALIFIERS : QualifierEncodingScheme.fromSerializedValue((byte) PTinyint.INSTANCE.toObject(encodingSchemeKv.getValueArray(), encodingSchemeKv.getValueOffset(), encodingSchemeKv.getValueLength()));
    Cell useStatsForParallelizationKv = tableKeyValues[USE_STATS_FOR_PARALLELIZATION_INDEX];
    boolean useStatsForParallelization = useStatsForParallelizationKv == null ? true : Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(useStatsForParallelizationKv.getValueArray(), useStatsForParallelizationKv.getValueOffset(), useStatsForParallelizationKv.getValueLength()));
    List<PColumn> columns = Lists.newArrayListWithExpectedSize(columnCount);
    List<PTable> indexes = Lists.newArrayList();
    List<PName> physicalTables = Lists.newArrayList();
    PName parentTableName = tableType == INDEX ? dataTableName : null;
    PName parentSchemaName = tableType == INDEX ? schemaName : null;
    EncodedCQCounter cqCounter = (!EncodedColumnsUtil.usesEncodedColumnNames(encodingScheme) || tableType == PTableType.VIEW) ? PTable.EncodedCQCounter.NULL_COUNTER : new EncodedCQCounter();
    while (true) {
        results.clear();
        scanner.next(results);
        if (results.isEmpty()) {
            break;
        }
        Cell colKv = results.get(LINK_TYPE_INDEX);
        int colKeyLength = colKv.getRowLength();
        PName colName = newPName(colKv.getRowArray(), colKv.getRowOffset() + offset, colKeyLength - offset);
        int colKeyOffset = offset + colName.getBytes().length + 1;
        PName famName = newPName(colKv.getRowArray(), colKv.getRowOffset() + colKeyOffset, colKeyLength - colKeyOffset);
        if (isQualifierCounterKV(colKv)) {
            Integer value = PInteger.INSTANCE.getCodec().decodeInt(colKv.getValueArray(), colKv.getValueOffset(), SortOrder.ASC);
            cqCounter.setValue(famName.getString(), value);
        } else if (Bytes.compareTo(LINK_TYPE_BYTES, 0, LINK_TYPE_BYTES.length, colKv.getQualifierArray(), colKv.getQualifierOffset(), colKv.getQualifierLength()) == 0) {
            LinkType linkType = LinkType.fromSerializedValue(colKv.getValueArray()[colKv.getValueOffset()]);
            if (linkType == LinkType.INDEX_TABLE) {
                addIndexToTable(tenantId, schemaName, famName, tableName, clientTimeStamp, indexes);
            } else if (linkType == LinkType.PHYSICAL_TABLE) {
                physicalTables.add(famName);
            } else if (linkType == LinkType.PARENT_TABLE) {
                parentTableName = PNameFactory.newName(SchemaUtil.getTableNameFromFullName(famName.getBytes()));
                parentSchemaName = PNameFactory.newName(SchemaUtil.getSchemaNameFromFullName(famName.getBytes()));
            }
        } else {
            addColumnToTable(results, colName, famName, colKeyValues, columns, saltBucketNum != null);
        }
    }
    // server while holding this lock is a bad idea and likely to cause contention.
    return PTableImpl.makePTable(tenantId, schemaName, tableName, tableType, indexState, timeStamp, tableSeqNum, pkName, saltBucketNum, columns, parentSchemaName, parentTableName, indexes, isImmutableRows, physicalTables, defaultFamilyName, viewStatement, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType, rowKeyOrderOptimizable, transactional, updateCacheFrequency, baseColumnCount, indexDisableTimestamp, isNamespaceMapped, autoPartitionSeq, isAppendOnlySchema, storageScheme, encodingScheme, cqCounter, useStatsForParallelization);
}
Also used : ByteString(com.google.protobuf.ByteString) PTable(org.apache.phoenix.schema.PTable) PColumn(org.apache.phoenix.schema.PColumn) QualifierEncodingScheme(org.apache.phoenix.schema.PTable.QualifierEncodingScheme) ImmutableStorageScheme(org.apache.phoenix.schema.PTable.ImmutableStorageScheme) IndexType(org.apache.phoenix.schema.PTable.IndexType) Cell(org.apache.hadoop.hbase.Cell) PIndexState(org.apache.phoenix.schema.PIndexState) PTableType(org.apache.phoenix.schema.PTableType) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) PInteger(org.apache.phoenix.schema.types.PInteger) EncodedCQCounter(org.apache.phoenix.schema.PTable.EncodedCQCounter) PName(org.apache.phoenix.schema.PName) LinkType(org.apache.phoenix.schema.PTable.LinkType) ViewType(org.apache.phoenix.schema.PTable.ViewType)

Example 4 with PIndexState

use of org.apache.phoenix.schema.PIndexState in project phoenix by apache.

the class MetaDataEndpointImpl method updateIndexState.

@Override
public void updateIndexState(RpcController controller, UpdateIndexStateRequest request, RpcCallback<MetaDataResponse> done) {
    MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
    byte[] schemaName = null;
    byte[] tableName = null;
    try {
        byte[][] rowKeyMetaData = new byte[3][];
        List<Mutation> tableMetadata = ProtobufUtil.getMutations(request);
        MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData);
        byte[] tenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
        schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
        tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
        final byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
        Region region = env.getRegion();
        MetaDataMutationResult result = checkTableKeyInRegion(key, region);
        if (result != null) {
            done.run(MetaDataMutationResult.toProto(result));
            return;
        }
        long timeStamp = MetaDataUtil.getClientTimeStamp(tableMetadata);
        ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
        List<Cell> newKVs = tableMetadata.get(0).getFamilyCellMap().get(TABLE_FAMILY_BYTES);
        Cell newKV = null;
        int disableTimeStampKVIndex = -1;
        int indexStateKVIndex = 0;
        int index = 0;
        for (Cell cell : newKVs) {
            if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), INDEX_STATE_BYTES, 0, INDEX_STATE_BYTES.length) == 0) {
                newKV = cell;
                indexStateKVIndex = index;
            } else if (Bytes.compareTo(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(), INDEX_DISABLE_TIMESTAMP_BYTES, 0, INDEX_DISABLE_TIMESTAMP_BYTES.length) == 0) {
                disableTimeStampKVIndex = index;
            }
            index++;
        }
        PIndexState newState = PIndexState.fromSerializedValue(newKV.getValueArray()[newKV.getValueOffset()]);
        RowLock rowLock = region.getRowLock(key, false);
        if (rowLock == null) {
            throw new IOException("Failed to acquire lock on " + Bytes.toStringBinary(key));
        }
        try {
            Get get = new Get(key);
            get.setTimeRange(PTable.INITIAL_SEQ_NUM, timeStamp);
            get.addColumn(TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES);
            get.addColumn(TABLE_FAMILY_BYTES, INDEX_STATE_BYTES);
            get.addColumn(TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES);
            get.addColumn(TABLE_FAMILY_BYTES, ROW_KEY_ORDER_OPTIMIZABLE_BYTES);
            Result currentResult = region.get(get);
            if (currentResult.rawCells().length == 0) {
                builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND);
                builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                done.run(builder.build());
                return;
            }
            Cell dataTableKV = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES);
            Cell currentStateKV = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, INDEX_STATE_BYTES);
            Cell currentDisableTimeStamp = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, INDEX_DISABLE_TIMESTAMP_BYTES);
            boolean rowKeyOrderOptimizable = currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, ROW_KEY_ORDER_OPTIMIZABLE_BYTES) != null;
            PIndexState currentState = PIndexState.fromSerializedValue(currentStateKV.getValueArray()[currentStateKV.getValueOffset()]);
            if ((currentDisableTimeStamp != null && currentDisableTimeStamp.getValueLength() > 0) && (disableTimeStampKVIndex >= 0)) {
                long curTimeStampVal = (Long) PLong.INSTANCE.toObject(currentDisableTimeStamp.getValueArray(), currentDisableTimeStamp.getValueOffset(), currentDisableTimeStamp.getValueLength());
                // new DisableTimeStamp is passed in
                Cell newDisableTimeStampCell = newKVs.get(disableTimeStampKVIndex);
                long newDisableTimeStamp = (Long) PLong.INSTANCE.toObject(newDisableTimeStampCell.getValueArray(), newDisableTimeStampCell.getValueOffset(), newDisableTimeStampCell.getValueLength());
                // when a new data table write occurs.
                if (curTimeStampVal != 0 && Math.abs(curTimeStampVal) < Math.abs(newDisableTimeStamp)) {
                    // not reset disable timestamp
                    newKVs.remove(disableTimeStampKVIndex);
                    disableTimeStampKVIndex = -1;
                }
            }
            // Detect invalid transitions
            if (currentState == PIndexState.BUILDING) {
                if (newState == PIndexState.USABLE) {
                    builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    done.run(builder.build());
                    return;
                }
            } else if (currentState == PIndexState.DISABLE) {
                if (newState != PIndexState.BUILDING && newState != PIndexState.DISABLE && newState != PIndexState.INACTIVE) {
                    builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION);
                    builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
                    done.run(builder.build());
                    return;
                }
                // Done building, but was disable before that, so that in disabled state
                if (newState == PIndexState.ACTIVE) {
                    newState = PIndexState.DISABLE;
                }
            }
            if (currentState == PIndexState.BUILDING && newState != PIndexState.ACTIVE) {
                timeStamp = currentStateKV.getTimestamp();
            }
            if ((currentState == PIndexState.UNUSABLE && newState == PIndexState.ACTIVE) || (currentState == PIndexState.ACTIVE && newState == PIndexState.UNUSABLE)) {
                newState = PIndexState.INACTIVE;
                newKVs.set(indexStateKVIndex, KeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, INDEX_STATE_BYTES, timeStamp, Bytes.toBytes(newState.getSerializedValue())));
            } else if (currentState == PIndexState.INACTIVE && newState == PIndexState.USABLE) {
                newState = PIndexState.ACTIVE;
                newKVs.set(indexStateKVIndex, KeyValueUtil.newKeyValue(key, TABLE_FAMILY_BYTES, INDEX_STATE_BYTES, timeStamp, Bytes.toBytes(newState.getSerializedValue())));
            }
            PTable returnTable = null;
            if (currentState != newState || disableTimeStampKVIndex != -1) {
                byte[] dataTableKey = null;
                if (dataTableKV != null) {
                    dataTableKey = SchemaUtil.getTableKey(tenantId, schemaName, dataTableKV.getValue());
                }
                if (dataTableKey != null) {
                    // make a copy of tableMetadata
                    tableMetadata = new ArrayList<Mutation>(tableMetadata);
                    // insert an empty KV to trigger time stamp update on data table row
                    Put p = new Put(dataTableKey);
                    p.add(TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timeStamp, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
                    tableMetadata.add(p);
                }
                boolean setRowKeyOrderOptimizableCell = newState == PIndexState.BUILDING && !rowKeyOrderOptimizable;
                // so that the row keys get generated using the new row key format
                if (setRowKeyOrderOptimizableCell) {
                    UpgradeUtil.addRowKeyOrderOptimizableCell(tableMetadata, key, timeStamp);
                }
                region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
                // Invalidate from cache
                Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
                metaDataCache.invalidate(cacheKey);
                if (dataTableKey != null) {
                    metaDataCache.invalidate(new ImmutableBytesPtr(dataTableKey));
                }
                if (setRowKeyOrderOptimizableCell || disableTimeStampKVIndex != -1 || currentState == PIndexState.DISABLE || newState == PIndexState.BUILDING) {
                    returnTable = doGetTable(key, HConstants.LATEST_TIMESTAMP, rowLock);
                }
            }
            // Get client timeStamp from mutations, since it may get updated by the
            // mutateRowsWithLocks call
            long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata);
            builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS);
            builder.setMutationTime(currentTime);
            if (returnTable != null) {
                builder.setTable(PTableImpl.toProto(returnTable));
            }
            done.run(builder.build());
            return;
        } finally {
            rowLock.release();
        }
    } catch (Throwable t) {
        logger.error("updateIndexState failed", t);
        ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
    }
}
Also used : PTable(org.apache.phoenix.schema.PTable) Result(org.apache.hadoop.hbase.client.Result) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) Cell(org.apache.hadoop.hbase.Cell) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock) MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) PIndexState(org.apache.phoenix.schema.PIndexState) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) PTinyint(org.apache.phoenix.schema.types.PTinyint) PSmallint(org.apache.phoenix.schema.types.PSmallint) Put(org.apache.hadoop.hbase.client.Put) Get(org.apache.hadoop.hbase.client.Get) PLong(org.apache.phoenix.schema.types.PLong) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 5 with PIndexState

use of org.apache.phoenix.schema.PIndexState in project phoenix by apache.

the class PhoenixIndexFailurePolicy method handleFailureWithExceptions.

private long handleFailureWithExceptions(Multimap<HTableInterfaceReference, Mutation> attempted, Exception cause) throws Throwable {
    Set<HTableInterfaceReference> refs = attempted.asMap().keySet();
    Map<String, Long> indexTableNames = new HashMap<String, Long>(refs.size());
    // start by looking at all the tables to which we attempted to write
    long timestamp = 0;
    boolean leaveIndexActive = blockDataTableWritesOnFailure || !disableIndexOnFailure;
    for (HTableInterfaceReference ref : refs) {
        long minTimeStamp = 0;
        // get the minimum timestamp across all the mutations we attempted on that table
        // FIXME: all cell timestamps should be the same
        Collection<Mutation> mutations = attempted.get(ref);
        if (mutations != null) {
            for (Mutation m : mutations) {
                for (List<Cell> kvs : m.getFamilyCellMap().values()) {
                    for (Cell kv : kvs) {
                        if (minTimeStamp == 0 || (kv.getTimestamp() >= 0 && minTimeStamp > kv.getTimestamp())) {
                            minTimeStamp = kv.getTimestamp();
                        }
                    }
                }
            }
        }
        timestamp = minTimeStamp;
        // If the data table has local index column families then get local indexes to disable.
        if (ref.getTableName().equals(env.getRegion().getTableDesc().getNameAsString()) && MetaDataUtil.hasLocalIndexColumnFamily(env.getRegion().getTableDesc())) {
            for (String tableName : getLocalIndexNames(ref, mutations)) {
                indexTableNames.put(tableName, minTimeStamp);
            }
        } else {
            indexTableNames.put(ref.getTableName(), minTimeStamp);
        }
    }
    // Nothing to do if we're not disabling the index and not rebuilding on failure
    if (!disableIndexOnFailure && !rebuildIndexOnFailure) {
        return timestamp;
    }
    PIndexState newState = disableIndexOnFailure ? PIndexState.DISABLE : PIndexState.ACTIVE;
    // for all the index tables that we've found, try to disable them and if that fails, try to
    for (Map.Entry<String, Long> tableTimeElement : indexTableNames.entrySet()) {
        String indexTableName = tableTimeElement.getKey();
        long minTimeStamp = tableTimeElement.getValue();
        // time stamp to differentiate.
        if (!disableIndexOnFailure && !blockDataTableWritesOnFailure) {
            minTimeStamp *= -1;
        }
        // Disable the index by using the updateIndexState method of MetaDataProtocol end point coprocessor.
        HTableInterface systemTable = env.getTable(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration()));
        MetaDataMutationResult result = IndexUtil.setIndexDisableTimeStamp(indexTableName, minTimeStamp, systemTable, newState);
        if (result.getMutationCode() == MutationCode.TABLE_NOT_FOUND) {
            LOG.info("Index " + indexTableName + " has been dropped. Ignore uncommitted mutations");
            continue;
        }
        if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
            if (leaveIndexActive) {
                LOG.warn("Attempt to update INDEX_DISABLE_TIMESTAMP " + " failed with code = " + result.getMutationCode());
                // will lead to the RS being shutdown.
                if (blockDataTableWritesOnFailure) {
                    throw new DoNotRetryIOException("Attempt to update INDEX_DISABLE_TIMESTAMP failed.");
                }
            } else {
                LOG.warn("Attempt to disable index " + indexTableName + " failed with code = " + result.getMutationCode() + ". Will use default failure policy instead.");
                throw new DoNotRetryIOException("Attempt to disable " + indexTableName + " failed.");
            }
        }
        if (leaveIndexActive)
            LOG.info("Successfully update INDEX_DISABLE_TIMESTAMP for " + indexTableName + " due to an exception while writing updates.", cause);
        else
            LOG.info("Successfully disabled index " + indexTableName + " due to an exception while writing updates.", cause);
    }
    // Return the cell time stamp (note they should all be the same)
    return timestamp;
}
Also used : HashMap(java.util.HashMap) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) PIndexState(org.apache.phoenix.schema.PIndexState) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell) HashMap(java.util.HashMap) Map(java.util.Map) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Aggregations

PIndexState (org.apache.phoenix.schema.PIndexState)5 Cell (org.apache.hadoop.hbase.Cell)3 Mutation (org.apache.hadoop.hbase.client.Mutation)3 PTable (org.apache.phoenix.schema.PTable)3 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)2 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)2 PName (org.apache.phoenix.schema.PName)2 PSmallint (org.apache.phoenix.schema.types.PSmallint)2 PTinyint (org.apache.phoenix.schema.types.PTinyint)2 ByteString (com.google.protobuf.ByteString)1 IOException (java.io.IOException)1 HashMap (java.util.HashMap)1 Map (java.util.Map)1 Get (org.apache.hadoop.hbase.client.Get)1 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)1 Put (org.apache.hadoop.hbase.client.Put)1 Result (org.apache.hadoop.hbase.client.Result)1 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)1 Region (org.apache.hadoop.hbase.regionserver.Region)1 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)1