use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class HFileReplicator method replicate.
public Void replicate() throws IOException {
// Copy all the hfiles to the local file system
Map<String, Path> tableStagingDirsMap = copyHFilesToStagingDir();
int maxRetries = conf.getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10);
for (Entry<String, Path> tableStagingDir : tableStagingDirsMap.entrySet()) {
String tableNameString = tableStagingDir.getKey();
Path stagingDir = tableStagingDir.getValue();
LoadIncrementalHFiles loadHFiles = null;
try {
loadHFiles = new LoadIncrementalHFiles(conf);
} catch (Exception e) {
LOG.error("Failed to initialize LoadIncrementalHFiles for replicating bulk loaded" + " data.", e);
throw new IOException(e);
}
Configuration newConf = HBaseConfiguration.create(conf);
newConf.set(LoadIncrementalHFiles.CREATE_TABLE_CONF_KEY, "no");
loadHFiles.setConf(newConf);
TableName tableName = TableName.valueOf(tableNameString);
Table table = this.connection.getTable(tableName);
// Prepare collection of queue of hfiles to be loaded(replicated)
Deque<LoadQueueItem> queue = new LinkedList<>();
loadHFiles.prepareHFileQueue(stagingDir, table, queue, false);
if (queue.isEmpty()) {
LOG.warn("Replication process did not find any files to replicate in directory " + stagingDir.toUri());
return null;
}
try (RegionLocator locator = connection.getRegionLocator(tableName)) {
fsDelegationToken.acquireDelegationToken(sinkFs);
// Set the staging directory which will be used by LoadIncrementalHFiles for loading the
// data
loadHFiles.setBulkToken(stagingDir.toString());
doBulkLoad(loadHFiles, table, queue, locator, maxRetries);
} finally {
cleanup(stagingDir.toString(), table);
}
}
return null;
}
use of org.apache.hadoop.hbase.client.RegionLocator in project hbase by apache.
the class ThriftHBaseServiceHandler method getRegionLocation.
@Override
public THRegionLocation getRegionLocation(ByteBuffer table, ByteBuffer row, boolean reload) throws TIOError, TException {
RegionLocator locator = null;
try {
locator = getLocator(table);
byte[] rowBytes = byteBufferToByteArray(row);
HRegionLocation hrl = locator.getRegionLocation(rowBytes, reload);
return ThriftUtilities.regionLocationFromHBase(hrl);
} catch (IOException e) {
throw getTIOError(e);
} finally {
if (locator != null) {
try {
locator.close();
} catch (IOException e) {
LOG.warn("Couldn't close the locator.", e);
}
}
}
}
use of org.apache.hadoop.hbase.client.RegionLocator in project drill by apache.
the class BinaryTableGroupScan method init.
private void init() {
logger.debug("Getting region locations");
TableName tableName = TableName.valueOf(hbaseScanSpec.getTableName());
try (Admin admin = formatPlugin.getConnection().getAdmin();
RegionLocator locator = formatPlugin.getConnection().getRegionLocator(tableName)) {
hTableDesc = admin.getTableDescriptor(tableName);
// Fetch tableStats only once and cache it.
if (tableStats == null) {
tableStats = new MapRDBTableStats(getHBaseConf(), hbaseScanSpec.getTableName());
}
boolean foundStartRegion = false;
regionsToScan = new TreeMap<TabletFragmentInfo, String>();
List<HRegionLocation> regionLocations = locator.getAllRegionLocations();
for (HRegionLocation regionLocation : regionLocations) {
HRegionInfo regionInfo = regionLocation.getRegionInfo();
if (!foundStartRegion && hbaseScanSpec.getStartRow() != null && hbaseScanSpec.getStartRow().length != 0 && !regionInfo.containsRow(hbaseScanSpec.getStartRow())) {
continue;
}
foundStartRegion = true;
regionsToScan.put(new TabletFragmentInfo(regionInfo), regionLocation.getHostname());
if (hbaseScanSpec.getStopRow() != null && hbaseScanSpec.getStopRow().length != 0 && regionInfo.containsRow(hbaseScanSpec.getStopRow())) {
break;
}
}
} catch (Exception e) {
throw new DrillRuntimeException("Error getting region info for table: " + hbaseScanSpec.getTableName(), e);
}
verifyColumns();
}
use of org.apache.hadoop.hbase.client.RegionLocator in project drill by apache.
the class HBaseGroupScan method init.
private void init() {
logger.debug("Getting region locations");
TableName tableName = TableName.valueOf(hbaseScanSpec.getTableName());
Connection conn = storagePlugin.getConnection();
try (Admin admin = conn.getAdmin();
RegionLocator locator = conn.getRegionLocator(tableName)) {
this.hTableDesc = admin.getTableDescriptor(tableName);
List<HRegionLocation> regionLocations = locator.getAllRegionLocations();
statsCalculator = new TableStatsCalculator(conn, hbaseScanSpec, storagePlugin.getContext().getConfig(), storagePluginConfig);
boolean foundStartRegion = false;
regionsToScan = new TreeMap<HRegionInfo, ServerName>();
for (HRegionLocation regionLocation : regionLocations) {
HRegionInfo regionInfo = regionLocation.getRegionInfo();
if (!foundStartRegion && hbaseScanSpec.getStartRow() != null && hbaseScanSpec.getStartRow().length != 0 && !regionInfo.containsRow(hbaseScanSpec.getStartRow())) {
continue;
}
foundStartRegion = true;
regionsToScan.put(regionInfo, regionLocation.getServerName());
scanSizeInBytes += statsCalculator.getRegionSizeInBytes(regionInfo.getRegionName());
if (hbaseScanSpec.getStopRow() != null && hbaseScanSpec.getStopRow().length != 0 && regionInfo.containsRow(hbaseScanSpec.getStopRow())) {
break;
}
}
} catch (IOException e) {
throw new DrillRuntimeException("Error getting region info for table: " + hbaseScanSpec.getTableName(), e);
}
verifyColumns();
}
Aggregations