Search in sources :

Example 81 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class HFileReplicator method replicate.

public Void replicate() throws IOException {
    // Copy all the hfiles to the local file system
    Map<String, Path> tableStagingDirsMap = copyHFilesToStagingDir();
    int maxRetries = conf.getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10);
    for (Entry<String, Path> tableStagingDir : tableStagingDirsMap.entrySet()) {
        String tableNameString = tableStagingDir.getKey();
        Path stagingDir = tableStagingDir.getValue();
        LoadIncrementalHFiles loadHFiles = null;
        try {
            loadHFiles = new LoadIncrementalHFiles(conf);
        } catch (Exception e) {
            LOG.error("Failed to initialize LoadIncrementalHFiles for replicating bulk loaded" + " data.", e);
            throw new IOException(e);
        }
        Configuration newConf = HBaseConfiguration.create(conf);
        newConf.set(LoadIncrementalHFiles.CREATE_TABLE_CONF_KEY, "no");
        loadHFiles.setConf(newConf);
        TableName tableName = TableName.valueOf(tableNameString);
        Table table = this.connection.getTable(tableName);
        // Prepare collection of queue of hfiles to be loaded(replicated)
        Deque<LoadQueueItem> queue = new LinkedList<>();
        loadHFiles.prepareHFileQueue(stagingDir, table, queue, false);
        if (queue.isEmpty()) {
            LOG.warn("Replication process did not find any files to replicate in directory " + stagingDir.toUri());
            return null;
        }
        try (RegionLocator locator = connection.getRegionLocator(tableName)) {
            fsDelegationToken.acquireDelegationToken(sinkFs);
            // Set the staging directory which will be used by LoadIncrementalHFiles for loading the
            // data
            loadHFiles.setBulkToken(stagingDir.toString());
            doBulkLoad(loadHFiles, table, queue, locator, maxRetries);
        } finally {
            cleanup(stagingDir.toString(), table);
        }
    }
    return null;
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) ExecutionException(java.util.concurrent.ExecutionException) LinkedList(java.util.LinkedList) TableName(org.apache.hadoop.hbase.TableName) LoadIncrementalHFiles(org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles) LoadQueueItem(org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.LoadQueueItem)

Example 82 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.

the class ThriftHBaseServiceHandler method getRegionLocation.

@Override
public THRegionLocation getRegionLocation(ByteBuffer table, ByteBuffer row, boolean reload) throws TIOError, TException {
    RegionLocator locator = null;
    try {
        locator = getLocator(table);
        byte[] rowBytes = byteBufferToByteArray(row);
        HRegionLocation hrl = locator.getRegionLocation(rowBytes, reload);
        return ThriftUtilities.regionLocationFromHBase(hrl);
    } catch (IOException e) {
        throw getTIOError(e);
    } finally {
        if (locator != null) {
            try {
                locator.close();
            } catch (IOException e) {
                LOG.warn("Couldn't close the locator.", e);
            }
        }
    }
}
Also used : RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) THRegionLocation(org.apache.hadoop.hbase.thrift2.generated.THRegionLocation) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) IOException(java.io.IOException)

Example 83 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project drill by apache.

the class BinaryTableGroupScan method init.

private void init() {
    logger.debug("Getting region locations");
    TableName tableName = TableName.valueOf(hbaseScanSpec.getTableName());
    try (Admin admin = formatPlugin.getConnection().getAdmin();
        RegionLocator locator = formatPlugin.getConnection().getRegionLocator(tableName)) {
        hTableDesc = admin.getTableDescriptor(tableName);
        // Fetch tableStats only once and cache it.
        if (tableStats == null) {
            tableStats = new MapRDBTableStats(getHBaseConf(), hbaseScanSpec.getTableName());
        }
        boolean foundStartRegion = false;
        regionsToScan = new TreeMap<TabletFragmentInfo, String>();
        List<HRegionLocation> regionLocations = locator.getAllRegionLocations();
        for (HRegionLocation regionLocation : regionLocations) {
            HRegionInfo regionInfo = regionLocation.getRegionInfo();
            if (!foundStartRegion && hbaseScanSpec.getStartRow() != null && hbaseScanSpec.getStartRow().length != 0 && !regionInfo.containsRow(hbaseScanSpec.getStartRow())) {
                continue;
            }
            foundStartRegion = true;
            regionsToScan.put(new TabletFragmentInfo(regionInfo), regionLocation.getHostname());
            if (hbaseScanSpec.getStopRow() != null && hbaseScanSpec.getStopRow().length != 0 && regionInfo.containsRow(hbaseScanSpec.getStopRow())) {
                break;
            }
        }
    } catch (Exception e) {
        throw new DrillRuntimeException("Error getting region info for table: " + hbaseScanSpec.getTableName(), e);
    }
    verifyColumns();
}
Also used : MapRDBTableStats(org.apache.drill.exec.store.mapr.db.MapRDBTableStats) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Admin(org.apache.hadoop.hbase.client.Admin) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) IOException(java.io.IOException) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) TabletFragmentInfo(org.apache.drill.exec.store.mapr.db.TabletFragmentInfo)

Example 84 with RegionLocator

use of org.apache.hadoop.hbase.client.RegionLocator in project drill by apache.

the class HBaseGroupScan method init.

private void init() {
    logger.debug("Getting region locations");
    TableName tableName = TableName.valueOf(hbaseScanSpec.getTableName());
    Connection conn = storagePlugin.getConnection();
    try (Admin admin = conn.getAdmin();
        RegionLocator locator = conn.getRegionLocator(tableName)) {
        this.hTableDesc = admin.getTableDescriptor(tableName);
        List<HRegionLocation> regionLocations = locator.getAllRegionLocations();
        statsCalculator = new TableStatsCalculator(conn, hbaseScanSpec, storagePlugin.getContext().getConfig(), storagePluginConfig);
        boolean foundStartRegion = false;
        regionsToScan = new TreeMap<HRegionInfo, ServerName>();
        for (HRegionLocation regionLocation : regionLocations) {
            HRegionInfo regionInfo = regionLocation.getRegionInfo();
            if (!foundStartRegion && hbaseScanSpec.getStartRow() != null && hbaseScanSpec.getStartRow().length != 0 && !regionInfo.containsRow(hbaseScanSpec.getStartRow())) {
                continue;
            }
            foundStartRegion = true;
            regionsToScan.put(regionInfo, regionLocation.getServerName());
            scanSizeInBytes += statsCalculator.getRegionSizeInBytes(regionInfo.getRegionName());
            if (hbaseScanSpec.getStopRow() != null && hbaseScanSpec.getStopRow().length != 0 && regionInfo.containsRow(hbaseScanSpec.getStopRow())) {
                break;
            }
        }
    } catch (IOException e) {
        throw new DrillRuntimeException("Error getting region info for table: " + hbaseScanSpec.getTableName(), e);
    }
    verifyColumns();
}
Also used : RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) TableName(org.apache.hadoop.hbase.TableName) HRegionLocation(org.apache.hadoop.hbase.HRegionLocation) ServerName(org.apache.hadoop.hbase.ServerName) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException)

Aggregations

RegionLocator (org.apache.hadoop.hbase.client.RegionLocator)84 Table (org.apache.hadoop.hbase.client.Table)59 Test (org.junit.Test)49 TableName (org.apache.hadoop.hbase.TableName)39 Admin (org.apache.hadoop.hbase.client.Admin)33 Path (org.apache.hadoop.fs.Path)31 HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)30 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)29 Connection (org.apache.hadoop.hbase.client.Connection)25 Configuration (org.apache.hadoop.conf.Configuration)21 IOException (java.io.IOException)19 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)15 FileSystem (org.apache.hadoop.fs.FileSystem)14 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)13 ServerName (org.apache.hadoop.hbase.ServerName)13 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)12 ClusterConnection (org.apache.hadoop.hbase.client.ClusterConnection)10 Put (org.apache.hadoop.hbase.client.Put)10 ArrayList (java.util.ArrayList)9 Result (org.apache.hadoop.hbase.client.Result)8