Search in sources :

Example 51 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class ConnectionQueryServicesImpl method checkClientServerCompatibility.

private void checkClientServerCompatibility(byte[] metaTable) throws SQLException {
    StringBuilder buf = new StringBuilder("The following servers require an updated " + QueryConstants.DEFAULT_COPROCESS_PATH + " to be put in the classpath of HBase: ");
    boolean isIncompatible = false;
    int minHBaseVersion = Integer.MAX_VALUE;
    boolean isTableNamespaceMappingEnabled = false;
    HTableInterface ht = null;
    try {
        List<HRegionLocation> locations = this.getAllTableRegions(metaTable);
        Set<HRegionLocation> serverMap = Sets.newHashSetWithExpectedSize(locations.size());
        TreeMap<byte[], HRegionLocation> regionMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
        List<byte[]> regionKeys = Lists.newArrayListWithExpectedSize(locations.size());
        for (HRegionLocation entry : locations) {
            if (!serverMap.contains(entry)) {
                regionKeys.add(entry.getRegionInfo().getStartKey());
                regionMap.put(entry.getRegionInfo().getRegionName(), entry);
                serverMap.add(entry);
            }
        }
        ht = this.getTable(metaTable);
        final Map<byte[], Long> results = ht.coprocessorService(MetaDataService.class, null, null, new Batch.Call<MetaDataService, Long>() {

            @Override
            public Long call(MetaDataService instance) throws IOException {
                ServerRpcController controller = new ServerRpcController();
                BlockingRpcCallback<GetVersionResponse> rpcCallback = new BlockingRpcCallback<GetVersionResponse>();
                GetVersionRequest.Builder builder = GetVersionRequest.newBuilder();
                builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER));
                instance.getVersion(controller, builder.build(), rpcCallback);
                if (controller.getFailedOn() != null) {
                    throw controller.getFailedOn();
                }
                return rpcCallback.get().getVersion();
            }
        });
        for (Map.Entry<byte[], Long> result : results.entrySet()) {
            // This is the "phoenix.jar" is in-place, but server is out-of-sync with client case.
            long version = result.getValue();
            isTableNamespaceMappingEnabled |= MetaDataUtil.decodeTableNamespaceMappingEnabled(version);
            if (!isCompatible(result.getValue())) {
                isIncompatible = true;
                HRegionLocation name = regionMap.get(result.getKey());
                buf.append(name);
                buf.append(';');
            }
            hasIndexWALCodec &= hasIndexWALCodec(result.getValue());
            if (minHBaseVersion > MetaDataUtil.decodeHBaseVersion(result.getValue())) {
                minHBaseVersion = MetaDataUtil.decodeHBaseVersion(result.getValue());
            }
        }
        if (isTableNamespaceMappingEnabled != SchemaUtil.isNamespaceMappingEnabled(PTableType.TABLE, getProps())) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.INCONSISTENET_NAMESPACE_MAPPING_PROPERTIES).setMessage("Ensure that config " + QueryServices.IS_NAMESPACE_MAPPING_ENABLED + " is consitent on client and server.").build().buildException();
        }
        lowestClusterHBaseVersion = minHBaseVersion;
    } catch (SQLException e) {
        throw e;
    } catch (Throwable t) {
        // This is the case if the "phoenix.jar" is not on the classpath of HBase on the region server
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.INCOMPATIBLE_CLIENT_SERVER_JAR).setRootCause(t).setMessage("Ensure that " + QueryConstants.DEFAULT_COPROCESS_PATH + " is put on the classpath of HBase in every region server: " + t.getMessage()).build().buildException();
    } finally {
        if (ht != null) {
            try {
                ht.close();
            } catch (IOException e) {
                logger.warn("Could not close HTable", e);
            }
        }
    }
    if (isIncompatible) {
        buf.setLength(buf.length() - 1);
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.OUTDATED_JARS).setMessage(buf.toString()).build().buildException();
    }
}
Also used : SQLException(java.sql.SQLException) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) NonTxIndexBuilder(org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) MetaDataService(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) PTinyint(org.apache.phoenix.schema.types.PTinyint) PUnsignedTinyint(org.apache.phoenix.schema.types.PUnsignedTinyint) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) GetVersionResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionResponse) PLong(org.apache.phoenix.schema.types.PLong) Map(java.util.Map) TreeMap(java.util.TreeMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap)

Example 52 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class ConnectionQueryServicesImpl method clearCache.

/**
     * Clears the Phoenix meta data cache on each region server
     * @throws SQLException
     */
@Override
public long clearCache() throws SQLException {
    try {
        SQLException sqlE = null;
        HTableInterface htable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, this.getProps()).getName());
        try {
            tableStatsCache.invalidateAll();
            final Map<byte[], Long> results = htable.coprocessorService(MetaDataService.class, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, new Batch.Call<MetaDataService, Long>() {

                @Override
                public Long call(MetaDataService instance) throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<ClearCacheResponse> rpcCallback = new BlockingRpcCallback<ClearCacheResponse>();
                    ClearCacheRequest.Builder builder = ClearCacheRequest.newBuilder();
                    builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER));
                    instance.clearCache(controller, builder.build(), rpcCallback);
                    if (controller.getFailedOn() != null) {
                        throw controller.getFailedOn();
                    }
                    return rpcCallback.get().getUnfreedBytes();
                }
            });
            long unfreedBytes = 0;
            for (Map.Entry<byte[], Long> result : results.entrySet()) {
                if (result.getValue() != null) {
                    unfreedBytes += result.getValue();
                }
            }
            return unfreedBytes;
        } catch (IOException e) {
            throw ServerUtil.parseServerException(e);
        } catch (Throwable e) {
            sqlE = new SQLException(e);
        } finally {
            try {
                tableStatsCache.invalidateAll();
                htable.close();
            } catch (IOException e) {
                if (sqlE == null) {
                    sqlE = ServerUtil.parseServerException(e);
                } else {
                    sqlE.setNextException(ServerUtil.parseServerException(e));
                }
            } finally {
                if (sqlE != null) {
                    throw sqlE;
                }
            }
        }
    } catch (Exception e) {
        throw new SQLException(ServerUtil.parseServerException(e));
    }
    return 0;
}
Also used : SQLException(java.sql.SQLException) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) NonTxIndexBuilder(org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) ClearCacheResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.ClearCacheResponse) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) TableAlreadyExistsException(org.apache.phoenix.schema.TableAlreadyExistsException) UpgradeInProgressException(org.apache.phoenix.exception.UpgradeInProgressException) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) RetriableUpgradeException(org.apache.phoenix.exception.RetriableUpgradeException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) UpgradeNotRequiredException(org.apache.phoenix.exception.UpgradeNotRequiredException) NewerTableAlreadyExistsException(org.apache.phoenix.schema.NewerTableAlreadyExistsException) NewerSchemaAlreadyExistsException(org.apache.phoenix.schema.NewerSchemaAlreadyExistsException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) TableExistsException(org.apache.hadoop.hbase.TableExistsException) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) SQLException(java.sql.SQLException) ColumnAlreadyExistsException(org.apache.phoenix.schema.ColumnAlreadyExistsException) TimeoutException(java.util.concurrent.TimeoutException) FunctionNotFoundException(org.apache.phoenix.schema.FunctionNotFoundException) ReadOnlyTableException(org.apache.phoenix.schema.ReadOnlyTableException) EmptySequenceCacheException(org.apache.phoenix.schema.EmptySequenceCacheException) MetaDataService(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) PLong(org.apache.phoenix.schema.types.PLong) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) Map(java.util.Map) TreeMap(java.util.TreeMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap)

Example 53 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class ConnectionQueryServicesImpl method returnAllSequences.

// Take no locks, as this only gets run when there are no open connections
// so there's no danger of contention.
@SuppressWarnings("deprecation")
private void returnAllSequences(ConcurrentMap<SequenceKey, Sequence> sequenceMap) throws SQLException {
    List<Append> mutations = Lists.newArrayListWithExpectedSize(sequenceMap.size());
    for (Sequence sequence : sequenceMap.values()) {
        mutations.addAll(sequence.newReturns());
    }
    if (mutations.isEmpty()) {
        return;
    }
    HTableInterface hTable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
    SQLException sqlE = null;
    try {
        hTable.batch(mutations);
    } catch (IOException e) {
        sqlE = ServerUtil.parseServerException(e);
    } catch (InterruptedException e) {
        // restore the interrupt status
        Thread.currentThread().interrupt();
        sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
    } finally {
        try {
            hTable.close();
        } catch (IOException e) {
            if (sqlE == null) {
                sqlE = ServerUtil.parseServerException(e);
            } else {
                sqlE.setNextException(ServerUtil.parseServerException(e));
            }
        }
        if (sqlE != null) {
            throw sqlE;
        }
    }
}
Also used : Append(org.apache.hadoop.hbase.client.Append) SQLException(java.sql.SQLException) Sequence(org.apache.phoenix.schema.Sequence) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo)

Example 54 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class ConnectionQueryServicesImpl method createSequence.

@Override
public long createSequence(String tenantId, String schemaName, String sequenceName, long startWith, long incrementBy, long cacheSize, long minValue, long maxValue, boolean cycle, long timestamp) throws SQLException {
    SequenceKey sequenceKey = new SequenceKey(tenantId, schemaName, sequenceName, nSequenceSaltBuckets);
    Sequence newSequences = new Sequence(sequenceKey);
    Sequence sequence = sequenceMap.putIfAbsent(sequenceKey, newSequences);
    if (sequence == null) {
        sequence = newSequences;
    }
    try {
        sequence.getLock().lock();
        // Now that we have the lock we need, create the sequence
        Append append = sequence.createSequence(startWith, incrementBy, cacheSize, timestamp, minValue, maxValue, cycle);
        HTableInterface htable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
        htable.setAutoFlush(true);
        try {
            Result result = htable.append(append);
            return sequence.createSequence(result, minValue, maxValue, cycle);
        } catch (IOException e) {
            throw ServerUtil.parseServerException(e);
        } finally {
            Closeables.closeQuietly(htable);
        }
    } finally {
        sequence.getLock().unlock();
    }
}
Also used : Append(org.apache.hadoop.hbase.client.Append) SequenceKey(org.apache.phoenix.schema.SequenceKey) Sequence(org.apache.phoenix.schema.Sequence) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) Result(org.apache.hadoop.hbase.client.Result)

Example 55 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class ConnectionQueryServicesImpl method metaDataCoprocessorExec.

/**
     * Invoke meta data coprocessor with one retry if the key was found to not be in the regions
     * (due to a table split)
     */
private MetaDataMutationResult metaDataCoprocessorExec(byte[] tableKey, Batch.Call<MetaDataService, MetaDataResponse> callable, byte[] tableName) throws SQLException {
    try {
        boolean retried = false;
        while (true) {
            if (retried) {
                connection.relocateRegion(SchemaUtil.getPhysicalName(tableName, this.getProps()), tableKey);
            }
            HTableInterface ht = this.getTable(SchemaUtil.getPhysicalName(tableName, this.getProps()).getName());
            try {
                final Map<byte[], MetaDataResponse> results = ht.coprocessorService(MetaDataService.class, tableKey, tableKey, callable);
                assert (results.size() == 1);
                MetaDataResponse result = results.values().iterator().next();
                if (result.getReturnCode() == MetaDataProtos.MutationCode.TABLE_NOT_IN_REGION || result.getReturnCode() == MetaDataProtos.MutationCode.FUNCTION_NOT_IN_REGION) {
                    if (retried)
                        return MetaDataMutationResult.constructFromProto(result);
                    retried = true;
                    continue;
                }
                return MetaDataMutationResult.constructFromProto(result);
            } finally {
                Closeables.closeQuietly(ht);
            }
        }
    } catch (IOException e) {
        throw ServerUtil.parseServerException(e);
    } catch (Throwable t) {
        throw new SQLException(t);
    }
}
Also used : MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) SQLException(java.sql.SQLException) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface)

Aggregations

HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)122 Result (org.apache.hadoop.hbase.client.Result)43 Put (org.apache.hadoop.hbase.client.Put)42 IOException (java.io.IOException)38 ArrayList (java.util.ArrayList)27 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)23 Get (org.apache.hadoop.hbase.client.Get)21 Scan (org.apache.hadoop.hbase.client.Scan)21 Test (org.junit.Test)20 SQLException (java.sql.SQLException)19 HashMap (java.util.HashMap)17 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)17 Connection (java.sql.Connection)15 Delete (org.apache.hadoop.hbase.client.Delete)12 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)12 Mutation (org.apache.hadoop.hbase.client.Mutation)12 PhoenixIOException (org.apache.phoenix.exception.PhoenixIOException)11 ResultSet (java.sql.ResultSet)10 Map (java.util.Map)9 Configuration (org.apache.hadoop.conf.Configuration)9