Search in sources :

Example 61 with HRegionLocation

use of org.apache.hadoop.hbase.HRegionLocation in project phoenix by apache.

the class ConnectionQueryServicesImpl method checkClientServerCompatibility.

private void checkClientServerCompatibility(byte[] metaTable) throws SQLException {
    StringBuilder buf = new StringBuilder("The following servers require an updated " + QueryConstants.DEFAULT_COPROCESS_PATH + " to be put in the classpath of HBase: ");
    boolean isIncompatible = false;
    int minHBaseVersion = Integer.MAX_VALUE;
    boolean isTableNamespaceMappingEnabled = false;
    HTableInterface ht = null;
    try {
        List<HRegionLocation> locations = this.getAllTableRegions(metaTable);
        Set<HRegionLocation> serverMap = Sets.newHashSetWithExpectedSize(locations.size());
        TreeMap<byte[], HRegionLocation> regionMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
        List<byte[]> regionKeys = Lists.newArrayListWithExpectedSize(locations.size());
        for (HRegionLocation entry : locations) {
            if (!serverMap.contains(entry)) {
                regionKeys.add(entry.getRegionInfo().getStartKey());
                regionMap.put(entry.getRegionInfo().getRegionName(), entry);
                serverMap.add(entry);
            }
        }
        ht = this.getTable(metaTable);
        final Map<byte[], Long> results = ht.coprocessorService(MetaDataService.class, null, null, new Batch.Call<MetaDataService, Long>() {

            @Override
            public Long call(MetaDataService instance) throws IOException {
                ServerRpcController controller = new ServerRpcController();
                BlockingRpcCallback<GetVersionResponse> rpcCallback = new BlockingRpcCallback<GetVersionResponse>();
                GetVersionRequest.Builder builder = GetVersionRequest.newBuilder();
                builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER));
                instance.getVersion(controller, builder.build(), rpcCallback);
                if (controller.getFailedOn() != null) {
                    throw controller.getFailedOn();
                }
                return rpcCallback.get().getVersion();
            }
        });
        for (Map.Entry<byte[], Long> result : results.entrySet()) {
            // This is the "phoenix.jar" is in-place, but server is out-of-sync with client case.
            long version = result.getValue();
            isTableNamespaceMappingEnabled |= MetaDataUtil.decodeTableNamespaceMappingEnabled(version);
            if (!isCompatible(result.getValue())) {
                isIncompatible = true;
                HRegionLocation name = regionMap.get(result.getKey());
                buf.append(name);
                buf.append(';');
            }
            hasIndexWALCodec &= hasIndexWALCodec(result.getValue());
            if (minHBaseVersion > MetaDataUtil.decodeHBaseVersion(result.getValue())) {
                minHBaseVersion = MetaDataUtil.decodeHBaseVersion(result.getValue());
            }
        }
        if (isTableNamespaceMappingEnabled != SchemaUtil.isNamespaceMappingEnabled(PTableType.TABLE, getProps())) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.INCONSISTENET_NAMESPACE_MAPPING_PROPERTIES).setMessage("Ensure that config " + QueryServices.IS_NAMESPACE_MAPPING_ENABLED + " is consitent on client and server.").build().buildException();
        }
        lowestClusterHBaseVersion = minHBaseVersion;
    } catch (SQLException e) {
        throw e;
    } catch (Throwable t) {
        // This is the case if the "phoenix.jar" is not on the classpath of HBase on the region server
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.INCOMPATIBLE_CLIENT_SERVER_JAR).setRootCause(t).setMessage("Ensure that " + QueryConstants.DEFAULT_COPROCESS_PATH + " is put on the classpath of HBase in every region server: " + t.getMessage()).build().buildException();
    } finally {
        if (ht != null) {
            try {
                ht.close();
            } catch (IOException e) {
                logger.warn("Could not close HTable", e);
            }
        }
    }
    if (isIncompatible) {
        buf.setLength(buf.length() - 1);
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.OUTDATED_JARS).setMessage(buf.toString()).build().buildException();
    }
}
Also used : SQLException(java.sql.SQLException) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) NonTxIndexBuilder(org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ServerRpcController(org.apache.hadoop.hbase.ipc.ServerRpcController) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) MetaDataService(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService) Batch(org.apache.hadoop.hbase.client.coprocessor.Batch) BlockingRpcCallback(org.apache.hadoop.hbase.ipc.BlockingRpcCallback) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) PTinyint(org.apache.phoenix.schema.types.PTinyint) PUnsignedTinyint(org.apache.phoenix.schema.types.PUnsignedTinyint) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) GetVersionResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.GetVersionResponse) PLong(org.apache.phoenix.schema.types.PLong) Map(java.util.Map) TreeMap(java.util.TreeMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap)

Example 62 with HRegionLocation

use of org.apache.hadoop.hbase.HRegionLocation in project phoenix by apache.

the class ConnectionQueryServicesImpl method getAllTableRegions.

@Override
public List<HRegionLocation> getAllTableRegions(byte[] tableName) throws SQLException {
    /*
         * Use HConnection.getRegionLocation as it uses the cache in HConnection, while getting
         * all region locations from the HTable doesn't.
         */
    int retryCount = 0, maxRetryCount = 1;
    boolean reload = false;
    while (true) {
        try {
            // We could surface the package projected HConnectionImplementation.getNumberOfCachedRegionLocations
            // to get the sizing info we need, but this would require a new class in the same package and a cast
            // to this implementation class, so it's probably not worth it.
            List<HRegionLocation> locations = Lists.newArrayList();
            byte[] currentKey = HConstants.EMPTY_START_ROW;
            do {
                HRegionLocation regionLocation = connection.getRegionLocation(TableName.valueOf(tableName), currentKey, reload);
                locations.add(regionLocation);
                currentKey = regionLocation.getRegionInfo().getEndKey();
            } while (!Bytes.equals(currentKey, HConstants.EMPTY_END_ROW));
            return locations;
        } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
            String fullName = Bytes.toString(tableName);
            throw new TableNotFoundException(fullName);
        } catch (IOException e) {
            if (retryCount++ < maxRetryCount) {
                // One retry, in case split occurs while navigating
                reload = true;
                continue;
            }
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.GET_TABLE_REGIONS_FAIL).setRootCause(e).build().buildException();
        }
    }
}
Also used : TableNotFoundException(org.apache.phoenix.schema.TableNotFoundException) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) KeyValueBuilder(org.apache.phoenix.hbase.index.util.KeyValueBuilder) NonTxIndexBuilder(org.apache.phoenix.hbase.index.covered.NonTxIndexBuilder) ThreadFactoryBuilder(com.google.common.util.concurrent.ThreadFactoryBuilder) PhoenixIndexBuilder(org.apache.phoenix.index.PhoenixIndexBuilder) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) PTinyint(org.apache.phoenix.schema.types.PTinyint) PUnsignedTinyint(org.apache.phoenix.schema.types.PUnsignedTinyint) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)

Example 63 with HRegionLocation

use of org.apache.hadoop.hbase.HRegionLocation in project phoenix by apache.

the class ConnectionlessQueryServicesImpl method generateRegionLocations.

private static List<HRegionLocation> generateRegionLocations(byte[] physicalName, byte[][] splits) {
    byte[] startKey = HConstants.EMPTY_START_ROW;
    List<HRegionLocation> regions = Lists.newArrayListWithExpectedSize(splits.length);
    for (byte[] split : splits) {
        regions.add(new HRegionLocation(new HRegionInfo(TableName.valueOf(physicalName), startKey, split), SERVER_NAME, -1));
        startKey = split;
    }
    regions.add(new HRegionLocation(new HRegionInfo(TableName.valueOf(physicalName), startKey, HConstants.EMPTY_END_ROW), SERVER_NAME, -1));
    return regions;
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation)

Example 64 with HRegionLocation

use of org.apache.hadoop.hbase.HRegionLocation in project phoenix by apache.

the class PhoenixInputFormat method generateSplits.

private List<InputSplit> generateSplits(final JobConf jobConf, final QueryPlan qplan, final List<KeyRange> splits, String query) throws IOException {
    Preconditions.checkNotNull(qplan);
    Preconditions.checkNotNull(splits);
    final List<InputSplit> psplits = Lists.newArrayListWithExpectedSize(splits.size());
    Path[] tablePaths = FileInputFormat.getInputPaths(ShimLoader.getHadoopShims().newJobContext(new Job(jobConf)));
    boolean splitByStats = jobConf.getBoolean(PhoenixStorageHandlerConstants.SPLIT_BY_STATS, false);
    setScanCacheSize(jobConf);
    // Adding Localization
    HConnection connection = HConnectionManager.createConnection(PhoenixConnectionUtil.getConfiguration(jobConf));
    RegionLocator regionLocator = connection.getRegionLocator(TableName.valueOf(qplan.getTableRef().getTable().getPhysicalName().toString()));
    RegionSizeCalculator sizeCalculator = new RegionSizeCalculator(regionLocator, connection.getAdmin());
    for (List<Scan> scans : qplan.getScans()) {
        PhoenixInputSplit inputSplit;
        HRegionLocation location = regionLocator.getRegionLocation(scans.get(0).getStartRow(), false);
        long regionSize = sizeCalculator.getRegionSize(location.getRegionInfo().getRegionName());
        String regionLocation = PhoenixStorageHandlerUtil.getRegionLocation(location, LOG);
        if (splitByStats) {
            for (Scan aScan : scans) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Split for  scan : " + aScan + "with scanAttribute : " + aScan.getAttributesMap() + " [scanCache, cacheBlock, scanBatch] : [" + aScan.getCaching() + ", " + aScan.getCacheBlocks() + ", " + aScan.getBatch() + "] and  regionLocation : " + regionLocation);
                }
                inputSplit = new PhoenixInputSplit(Lists.newArrayList(aScan), tablePaths[0], regionLocation, regionSize);
                inputSplit.setQuery(query);
                psplits.add(inputSplit);
            }
        } else {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Scan count[" + scans.size() + "] : " + Bytes.toStringBinary(scans.get(0).getStartRow()) + " ~ " + Bytes.toStringBinary(scans.get(scans.size() - 1).getStopRow()));
                LOG.debug("First scan : " + scans.get(0) + "with scanAttribute : " + scans.get(0).getAttributesMap() + " [scanCache, cacheBlock, scanBatch] : " + "[" + scans.get(0).getCaching() + ", " + scans.get(0).getCacheBlocks() + ", " + scans.get(0).getBatch() + "] and  regionLocation : " + regionLocation);
                for (int i = 0, limit = scans.size(); i < limit; i++) {
                    LOG.debug("EXPECTED_UPPER_REGION_KEY[" + i + "] : " + Bytes.toStringBinary(scans.get(i).getAttribute(BaseScannerRegionObserver.EXPECTED_UPPER_REGION_KEY)));
                }
            }
            inputSplit = new PhoenixInputSplit(scans, tablePaths[0], regionLocation, regionSize);
            inputSplit.setQuery(query);
            psplits.add(inputSplit);
        }
    }
    return psplits;
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) RegionSizeCalculator(org.apache.hadoop.hbase.util.RegionSizeCalculator) HConnection(org.apache.hadoop.hbase.client.HConnection) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) Scan(org.apache.hadoop.hbase.client.Scan) Job(org.apache.hadoop.mapreduce.Job) InputSplit(org.apache.hadoop.mapred.InputSplit)

Example 65 with HRegionLocation

use of org.apache.hadoop.hbase.HRegionLocation in project phoenix by apache.

the class PhoenixInputFormat method generateSplits.

private List<InputSplit> generateSplits(final QueryPlan qplan, final List<KeyRange> splits, Configuration config) throws IOException {
    Preconditions.checkNotNull(qplan);
    Preconditions.checkNotNull(splits);
    // Get the RegionSizeCalculator
    org.apache.hadoop.hbase.client.Connection connection = ConnectionFactory.createConnection(config);
    RegionLocator regionLocator = connection.getRegionLocator(TableName.valueOf(qplan.getTableRef().getTable().getPhysicalName().toString()));
    RegionSizeCalculator sizeCalculator = new RegionSizeCalculator(regionLocator, connection.getAdmin());
    final List<InputSplit> psplits = Lists.newArrayListWithExpectedSize(splits.size());
    for (List<Scan> scans : qplan.getScans()) {
        // Get the region location
        HRegionLocation location = regionLocator.getRegionLocation(scans.get(0).getStartRow(), false);
        String regionLocation = location.getHostname();
        // Get the region size
        long regionSize = sizeCalculator.getRegionSize(location.getRegionInfo().getRegionName());
        // Generate splits based off statistics, or just region splits?
        boolean splitByStats = PhoenixConfigurationUtil.getSplitByStats(config);
        if (splitByStats) {
            for (Scan aScan : scans) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Split for  scan : " + aScan + "with scanAttribute : " + aScan.getAttributesMap() + " [scanCache, cacheBlock, scanBatch] : [" + aScan.getCaching() + ", " + aScan.getCacheBlocks() + ", " + aScan.getBatch() + "] and  regionLocation : " + regionLocation);
                }
                psplits.add(new PhoenixInputSplit(Collections.singletonList(aScan), regionSize, regionLocation));
            }
        } else {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Scan count[" + scans.size() + "] : " + Bytes.toStringBinary(scans.get(0).getStartRow()) + " ~ " + Bytes.toStringBinary(scans.get(scans.size() - 1).getStopRow()));
                LOG.debug("First scan : " + scans.get(0) + "with scanAttribute : " + scans.get(0).getAttributesMap() + " [scanCache, cacheBlock, scanBatch] : " + "[" + scans.get(0).getCaching() + ", " + scans.get(0).getCacheBlocks() + ", " + scans.get(0).getBatch() + "] and  regionLocation : " + regionLocation);
                for (int i = 0, limit = scans.size(); i < limit; i++) {
                    LOG.debug("EXPECTED_UPPER_REGION_KEY[" + i + "] : " + Bytes.toStringBinary(scans.get(i).getAttribute(BaseScannerRegionObserver.EXPECTED_UPPER_REGION_KEY)));
                }
            }
            psplits.add(new PhoenixInputSplit(scans, regionSize, regionLocation));
        }
    }
    return psplits;
}
Also used : RegionSizeCalculator(org.apache.hadoop.hbase.util.RegionSizeCalculator) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) org.apache.hadoop.hbase.client(org.apache.hadoop.hbase.client) InputSplit(org.apache.hadoop.mapreduce.InputSplit)

Aggregations

HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)132 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)52 Test (org.junit.Test)50 ServerName (org.apache.hadoop.hbase.ServerName)44 TableName (org.apache.hadoop.hbase.TableName)39 IOException (java.io.IOException)31 RegionLocator (org.apache.hadoop.hbase.client.RegionLocator)30 RegionLocations (org.apache.hadoop.hbase.RegionLocations)29 ArrayList (java.util.ArrayList)25 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)18 Table (org.apache.hadoop.hbase.client.Table)18 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)16 List (java.util.List)12 HashMap (java.util.HashMap)11 Map (java.util.Map)11 Result (org.apache.hadoop.hbase.client.Result)10 MultiRowMutationEndpoint (org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint)10 Connection (org.apache.hadoop.hbase.client.Connection)9 HRegionServer (org.apache.hadoop.hbase.regionserver.HRegionServer)9 Admin (org.apache.hadoop.hbase.client.Admin)8