Search in sources :

Example 11 with MetaDataService

use of org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService in project phoenix by apache.

the class ConnectionQueryServicesImpl method clearTableFromCache.

@Override
public void clearTableFromCache(final byte[] tenantId, final byte[] schemaName, final byte[] tableName, final long clientTS) throws SQLException {
    // clear the meta data cache for the table here
    try {
        SQLException sqlE = null;
        HTableInterface htable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName());
        try {
            htable.coprocessorService(MetaDataService.class, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, new Batch.Call<MetaDataService, ClearTableFromCacheResponse>() {

                @Override
                public ClearTableFromCacheResponse call(MetaDataService instance) throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<ClearTableFromCacheResponse> rpcCallback = new BlockingRpcCallback<ClearTableFromCacheResponse>();
                    ClearTableFromCacheRequest.Builder builder = ClearTableFromCacheRequest.newBuilder();
                    builder.setTenantId(ByteStringer.wrap(tenantId));
                    builder.setTableName(ByteStringer.wrap(tableName));
                    builder.setSchemaName(ByteStringer.wrap(schemaName));
                    builder.setClientTimestamp(clientTS);
                    builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER));
                    instance.clearTableFromCache(controller, builder.build(), rpcCallback);
                    if (controller.getFailedOn() != null) {
                        throw controller.getFailedOn();
                    }
                    return rpcCallback.get();
                }
            });
        } catch (IOException e) {
            throw ServerUtil.parseServerException(e);
        } catch (Throwable e) {
            sqlE = new SQLException(e);
        } finally {
            try {
                htable.close();
            } catch (IOException e) {
                if (sqlE == null) {
                    sqlE = ServerUtil.parseServerException(e);
                } else {
                    sqlE.setNextException(ServerUtil.parseServerException(e));
                }
            } finally {
                if (sqlE != null) {
                    throw sqlE;
                }
            }
        }
    } catch (Exception e) {
        throw new SQLException(ServerUtil.parseServerException(e));
    }
}
Also used : SQLException(java.sql.SQLException) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) NonTxIndexBuilder(org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) TableAlreadyExistsException(org.apache.phoenix.schema.TableAlreadyExistsException) UpgradeInProgressException(org.apache.phoenix.exception.UpgradeInProgressException) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) RetriableUpgradeException(org.apache.phoenix.exception.RetriableUpgradeException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) UpgradeNotRequiredException(org.apache.phoenix.exception.UpgradeNotRequiredException) NewerTableAlreadyExistsException(org.apache.phoenix.schema.NewerTableAlreadyExistsException) NewerSchemaAlreadyExistsException(org.apache.phoenix.schema.NewerSchemaAlreadyExistsException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) TableExistsException(org.apache.hadoop.hbase.TableExistsException) RemoteException(org.apache.hadoop.ipc.RemoteException) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) SQLException(java.sql.SQLException) ColumnAlreadyExistsException(org.apache.phoenix.schema.ColumnAlreadyExistsException) TimeoutException(java.util.concurrent.TimeoutException) FunctionNotFoundException(org.apache.phoenix.schema.FunctionNotFoundException) NamespaceNotFoundException(org.apache.hadoop.hbase.NamespaceNotFoundException) ReadOnlyTableException(org.apache.phoenix.schema.ReadOnlyTableException) EmptySequenceCacheException(org.apache.phoenix.schema.EmptySequenceCacheException) ClearTableFromCacheResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearTableFromCacheResponse) MetaDataService(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback)

Example 12 with MetaDataService

use of org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService in project phoenix by apache.

the class IndexUtil method updateIndexState.

public static MetaDataMutationResult updateIndexState(byte[] indexTableKey, long minTimeStamp, HTableInterface metaTable, PIndexState newState) throws Throwable {
    // Mimic the Put that gets generated by the client on an update of the index state
    Put put = new Put(indexTableKey);
    put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_STATE_BYTES, newState.getSerializedBytes());
    put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, PLong.INSTANCE.toBytes(minTimeStamp));
    put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP_BYTES, PLong.INSTANCE.toBytes(0));
    final List<Mutation> tableMetadata = Collections.<Mutation>singletonList(put);
    final Map<byte[], MetaDataResponse> results = metaTable.coprocessorService(MetaDataService.class, indexTableKey, indexTableKey, new Batch.Call<MetaDataService, MetaDataResponse>() {

        @Override
        public MetaDataResponse call(MetaDataService instance) throws IOException {
            ServerRpcController controller = new ServerRpcController();
            BlockingRpcCallback<MetaDataResponse> rpcCallback = new BlockingRpcCallback<MetaDataResponse>();
            UpdateIndexStateRequest.Builder builder = UpdateIndexStateRequest.newBuilder();
            for (Mutation m : tableMetadata) {
                MutationProto mp = ProtobufUtil.toProto(m);
                builder.addTableMetadataMutations(mp.toByteString());
            }
            builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER));
            instance.updateIndexState(controller, builder.build(), rpcCallback);
            if (controller.getFailedOn() != null) {
                throw controller.getFailedOn();
            }
            return rpcCallback.get();
        }
    });
    if (results.isEmpty()) {
        throw new IOException("Didn't get expected result size");
    }
    MetaDataResponse tmpResponse = results.values().iterator().next();
    return MetaDataMutationResult.constructFromProto(tmpResponse);
}
Also used : MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) IOException(java.io.IOException) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) Put(org.apache.hadoop.hbase.client.Put) MutationProto(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto) MetaDataService(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 13 with MetaDataService

use of org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService in project phoenix by apache.

the class ConnectionQueryServicesImpl method createTable.

@Override
public MetaDataMutationResult createTable(final List<Mutation> tableMetaData, final byte[] physicalTableName, PTableType tableType, Map<String, Object> tableProps, final List<Pair<byte[], Map<String, Object>>> families, byte[][] splits, boolean isNamespaceMapped, final boolean allocateIndexId) throws SQLException {
    byte[][] rowKeyMetadata = new byte[3][];
    Mutation m = MetaDataUtil.getPutOnlyTableHeaderRow(tableMetaData);
    byte[] key = m.getRow();
    SchemaUtil.getVarChars(key, rowKeyMetadata);
    byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
    byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
    byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
    byte[] tableName = physicalTableName != null ? physicalTableName : SchemaUtil.getPhysicalHBaseTableName(schemaBytes, tableBytes, isNamespaceMapped).getBytes();
    boolean localIndexTable = false;
    for (Pair<byte[], Map<String, Object>> family : families) {
        if (Bytes.toString(family.getFirst()).startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) {
            localIndexTable = true;
            break;
        }
    }
    if ((tableType == PTableType.VIEW && physicalTableName != null) || (tableType != PTableType.VIEW && (physicalTableName == null || localIndexTable))) {
        // For views this will ensure that metadata already exists
        // For tables and indexes, this will create the metadata if it doesn't already exist
        ensureTableCreated(tableName, tableType, tableProps, families, splits, true, isNamespaceMapped);
    }
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    if (tableType == PTableType.INDEX) {
        // TODO: if viewIndexId is Short.MIN_VALUE, then we don't need to attempt to create it
        if (physicalTableName != null) {
            if (!localIndexTable && !MetaDataUtil.isMultiTenant(m, kvBuilder, ptr)) {
                ensureViewIndexTableCreated(tenantIdBytes.length == 0 ? null : PNameFactory.newName(tenantIdBytes), physicalTableName, MetaDataUtil.getClientTimeStamp(m), isNamespaceMapped);
            }
        }
    } else if (tableType == PTableType.TABLE && MetaDataUtil.isMultiTenant(m, kvBuilder, ptr)) {
        // Create view index table up front for multi tenant tables
        ptr.set(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
        MetaDataUtil.getMutationValue(m, PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME_BYTES, kvBuilder, ptr);
        List<Pair<byte[], Map<String, Object>>> familiesPlusDefault = null;
        for (Pair<byte[], Map<String, Object>> family : families) {
            byte[] cf = family.getFirst();
            if (Bytes.compareTo(cf, 0, cf.length, ptr.get(), ptr.getOffset(), ptr.getLength()) == 0) {
                familiesPlusDefault = families;
                break;
            }
        }
        // Don't override if default family already present
        if (familiesPlusDefault == null) {
            byte[] defaultCF = ByteUtil.copyKeyBytesIfNecessary(ptr);
            // Only use splits if table is salted, otherwise it may not be applicable
            // Always add default column family, as we don't know in advance if we'll need it
            familiesPlusDefault = Lists.newArrayList(families);
            familiesPlusDefault.add(new Pair<byte[], Map<String, Object>>(defaultCF, Collections.<String, Object>emptyMap()));
        }
        ensureViewIndexTableCreated(tableName, tableProps, familiesPlusDefault, MetaDataUtil.isSalted(m, kvBuilder, ptr) ? splits : null, MetaDataUtil.getClientTimeStamp(m), isNamespaceMapped);
    }
    byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes);
    MetaDataMutationResult result = metaDataCoprocessorExec(tableKey, new Batch.Call<MetaDataService, MetaDataResponse>() {

        @Override
        public MetaDataResponse call(MetaDataService instance) throws IOException {
            ServerRpcController controller = new ServerRpcController();
            BlockingRpcCallback<MetaDataResponse> rpcCallback = new BlockingRpcCallback<MetaDataResponse>();
            CreateTableRequest.Builder builder = CreateTableRequest.newBuilder();
            for (Mutation m : tableMetaData) {
                MutationProto mp = ProtobufUtil.toProto(m);
                builder.addTableMetadataMutations(mp.toByteString());
            }
            builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER));
            if (allocateIndexId) {
                builder.setAllocateIndexId(allocateIndexId);
            }
            CreateTableRequest build = builder.build();
            instance.createTable(controller, build, rpcCallback);
            if (controller.getFailedOn() != null) {
                throw controller.getFailedOn();
            }
            return rpcCallback.get();
        }
    });
    return result;
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) NonTxIndexBuilder(org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) MutationProto(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto) MetaDataService(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) ImmutableList(com.google.common.collect.ImmutableList) ArrayList(java.util.ArrayList) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) Map(java.util.Map) TreeMap(java.util.TreeMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) CreateTableRequest(org.apache.phoenix.coprocessor.generated.MetaDataProtos.CreateTableRequest) Pair(org.apache.hadoop.hbase.util.Pair)

Example 14 with MetaDataService

use of org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService in project phoenix by apache.

the class ConnectionQueryServicesImpl method addColumn.

@Override
public MetaDataMutationResult addColumn(final List<Mutation> tableMetaData, PTable table, Map<String, List<Pair<String, Object>>> stmtProperties, Set<String> colFamiliesForPColumnsToBeAdded, List<PColumn> columns) throws SQLException {
    List<Pair<byte[], Map<String, Object>>> families = new ArrayList<>(stmtProperties.size());
    Map<String, Object> tableProps = new HashMap<String, Object>();
    Set<HTableDescriptor> tableDescriptors = Collections.emptySet();
    Set<HTableDescriptor> origTableDescriptors = Collections.emptySet();
    boolean nonTxToTx = false;
    Pair<HTableDescriptor, HTableDescriptor> tableDescriptorPair = separateAndValidateProperties(table, stmtProperties, colFamiliesForPColumnsToBeAdded, tableProps);
    HTableDescriptor tableDescriptor = tableDescriptorPair.getSecond();
    HTableDescriptor origTableDescriptor = tableDescriptorPair.getFirst();
    if (tableDescriptor != null) {
        tableDescriptors = Sets.newHashSetWithExpectedSize(3 + table.getIndexes().size());
        origTableDescriptors = Sets.newHashSetWithExpectedSize(3 + table.getIndexes().size());
        tableDescriptors.add(tableDescriptor);
        origTableDescriptors.add(origTableDescriptor);
        nonTxToTx = Boolean.TRUE.equals(tableProps.get(PhoenixTransactionContext.READ_NON_TX_DATA));
        /*
             * If the table was transitioned from non transactional to transactional, we need
             * to also transition the index tables.
             */
        if (nonTxToTx) {
            updateDescriptorForTx(table, tableProps, tableDescriptor, Boolean.TRUE.toString(), tableDescriptors, origTableDescriptors);
        }
    }
    boolean success = false;
    boolean metaDataUpdated = !tableDescriptors.isEmpty();
    boolean pollingNeeded = !(!tableProps.isEmpty() && families.isEmpty() && colFamiliesForPColumnsToBeAdded.isEmpty());
    MetaDataMutationResult result = null;
    try {
        boolean modifyHTable = true;
        if (table.getType() == PTableType.VIEW) {
            boolean canViewsAddNewCF = props.getBoolean(QueryServices.ALLOW_VIEWS_ADD_NEW_CF_BASE_TABLE, QueryServicesOptions.DEFAULT_ALLOW_VIEWS_ADD_NEW_CF_BASE_TABLE);
            // When adding a column to a view, base physical table should only be modified when new column families are being added.
            modifyHTable = canViewsAddNewCF && !existingColumnFamiliesForBaseTable(table.getPhysicalName()).containsAll(colFamiliesForPColumnsToBeAdded);
        }
        if (modifyHTable) {
            sendHBaseMetaData(tableDescriptors, pollingNeeded);
        }
        // Also, could be used to update property values on ALTER TABLE t SET prop=xxx
        if ((tableMetaData.isEmpty()) || (tableMetaData.size() == 1 && tableMetaData.get(0).isEmpty())) {
            return new MetaDataMutationResult(MutationCode.NO_OP, EnvironmentEdgeManager.currentTimeMillis(), table);
        }
        byte[][] rowKeyMetaData = new byte[3][];
        PTableType tableType = table.getType();
        Mutation m = tableMetaData.get(0);
        byte[] rowKey = m.getRow();
        SchemaUtil.getVarChars(rowKey, rowKeyMetaData);
        byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
        byte[] schemaBytes = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
        byte[] tableBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
        byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes);
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        result = metaDataCoprocessorExec(tableKey, new Batch.Call<MetaDataService, MetaDataResponse>() {

            @Override
            public MetaDataResponse call(MetaDataService instance) throws IOException {
                ServerRpcController controller = new ServerRpcController();
                BlockingRpcCallback<MetaDataResponse> rpcCallback = new BlockingRpcCallback<MetaDataResponse>();
                AddColumnRequest.Builder builder = AddColumnRequest.newBuilder();
                for (Mutation m : tableMetaData) {
                    MutationProto mp = ProtobufUtil.toProto(m);
                    builder.addTableMetadataMutations(mp.toByteString());
                }
                builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER));
                instance.addColumn(controller, builder.build(), rpcCallback);
                if (controller.getFailedOn() != null) {
                    throw controller.getFailedOn();
                }
                return rpcCallback.get();
            }
        });
        if (result.getMutationCode() == MutationCode.COLUMN_NOT_FOUND || result.getMutationCode() == MutationCode.TABLE_ALREADY_EXISTS) {
            // Success
            success = true;
            // Flush the table if transitioning DISABLE_WAL from TRUE to FALSE
            if (MetaDataUtil.getMutationValue(m, PhoenixDatabaseMetaData.DISABLE_WAL_BYTES, kvBuilder, ptr) && Boolean.FALSE.equals(PBoolean.INSTANCE.toObject(ptr))) {
                flushTable(table.getPhysicalName().getBytes());
            }
            if (tableType == PTableType.TABLE) {
                // If we're changing MULTI_TENANT to true or false, create or drop the view index table
                if (MetaDataUtil.getMutationValue(m, PhoenixDatabaseMetaData.MULTI_TENANT_BYTES, kvBuilder, ptr)) {
                    long timestamp = MetaDataUtil.getClientTimeStamp(m);
                    if (Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(ptr.get(), ptr.getOffset(), ptr.getLength()))) {
                        this.ensureViewIndexTableCreated(table, timestamp, table.isNamespaceMapped());
                    } else {
                        this.ensureViewIndexTableDropped(table.getPhysicalName().getBytes(), timestamp);
                    }
                }
            }
        }
    } finally {
        // Note that if this fails, we're in a corrupt state.
        if (!success && metaDataUpdated && nonTxToTx) {
            sendHBaseMetaData(origTableDescriptors, pollingNeeded);
        }
    }
    return result;
}
Also used : ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) MutationProto(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto) MetaDataService(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) Pair(org.apache.hadoop.hbase.util.Pair) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) PTableType(org.apache.phoenix.schema.PTableType) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) AddColumnRequest(org.apache.phoenix.coprocessor.generated.MetaDataProtos.AddColumnRequest) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 15 with MetaDataService

use of org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService in project phoenix by apache.

the class ConnectionQueryServicesImpl method updateIndexState.

@Override
public MetaDataMutationResult updateIndexState(final List<Mutation> tableMetaData, String parentTableName) throws SQLException {
    byte[][] rowKeyMetadata = new byte[3][];
    SchemaUtil.getVarChars(tableMetaData.get(0).getRow(), rowKeyMetadata);
    byte[] tableKey = SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX], rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]);
    return metaDataCoprocessorExec(tableKey, new Batch.Call<MetaDataService, MetaDataResponse>() {

        @Override
        public MetaDataResponse call(MetaDataService instance) throws IOException {
            ServerRpcController controller = new ServerRpcController();
            BlockingRpcCallback<MetaDataResponse> rpcCallback = new BlockingRpcCallback<MetaDataResponse>();
            UpdateIndexStateRequest.Builder builder = UpdateIndexStateRequest.newBuilder();
            for (Mutation m : tableMetaData) {
                MutationProto mp = ProtobufUtil.toProto(m);
                builder.addTableMetadataMutations(mp.toByteString());
            }
            builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER));
            instance.updateIndexState(controller, builder.build(), rpcCallback);
            if (controller.getFailedOn() != null) {
                throw controller.getFailedOn();
            }
            return rpcCallback.get();
        }
    });
}
Also used : MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) NonTxIndexBuilder(org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) MutationProto(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto) MetaDataService(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) Mutation(org.apache.hadoop.hbase.client.Mutation)

Aggregations

BlockingRpcCallback (org.apache.hadoop.hbase.ipc.BlockingRpcCallback)16 ServerRpcController (org.apache.hadoop.hbase.ipc.ServerRpcController)16 MetaDataService (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService)16 IOException (java.io.IOException)15 Batch (org.apache.hadoop.hbase.client.coprocessor.Batch)15 KeyValueBuilder (org.apache.phoenix.hbase.index.util.KeyValueBuilder)14 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)12 PhoenixIOException (org.apache.phoenix.exception.PhoenixIOException)12 NonTxIndexBuilder (org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder)12 PhoenixIndexBuilder (org.apache.phoenix.index.PhoenixIndexBuilder)12 Mutation (org.apache.hadoop.hbase.client.Mutation)11 MutationProto (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto)11 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)8 HashMap (java.util.HashMap)4 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)4 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)4 ImmutableMap (com.google.common.collect.ImmutableMap)3 SQLException (java.sql.SQLException)3 Map (java.util.Map)3 TreeMap (java.util.TreeMap)3