use of org.apache.hadoop.hbase.client.SecureBulkLoadClient in project hbase by apache.
the class LoadIncrementalHFiles method doBulkLoad.
/**
* Perform a bulk load of the given directory into the given
* pre-existing table. This method is not threadsafe.
*
* @param hfofDir the directory that was provided as the output path
* of a job using HFileOutputFormat
* @param admin the Admin
* @param table the table to load into
* @param regionLocator region locator
* @param silence true to ignore unmatched column families
* @param copyFile always copy hfiles if true
* @throws TableNotFoundException if table does not yet exist
*/
public void doBulkLoad(Path hfofDir, final Admin admin, Table table, RegionLocator regionLocator, boolean silence, boolean copyFile) throws TableNotFoundException, IOException {
if (!admin.isTableAvailable(regionLocator.getName())) {
throw new TableNotFoundException("Table " + table.getName() + " is not currently available.");
}
/*
* Checking hfile format is a time-consuming operation, we should have an option to skip
* this step when bulkloading millions of HFiles. See HBASE-13985.
*/
boolean validateHFile = getConf().getBoolean("hbase.loadincremental.validate.hfile", true);
if (!validateHFile) {
LOG.warn("You are skipping HFiles validation, it might cause some data loss if files " + "are not correct. If you fail to read data from your table after using this " + "option, consider removing the files and bulkload again without this option. " + "See HBASE-13985");
}
// LQI queue does not need to be threadsafe -- all operations on this queue
// happen in this thread
Deque<LoadQueueItem> queue = new LinkedList<>();
ExecutorService pool = null;
SecureBulkLoadClient secureClient = null;
try {
prepareHFileQueue(hfofDir, table, queue, validateHFile, silence);
if (queue.isEmpty()) {
LOG.warn("Bulk load operation did not find any files to load in " + "directory " + hfofDir != null ? hfofDir.toUri() : "" + ". Does it contain files in " + "subdirectories that correspond to column family names?");
return;
}
pool = createExecutorService();
secureClient = new SecureBulkLoadClient(table.getConfiguration(), table);
retValue = performBulkLoad(admin, table, regionLocator, queue, pool, secureClient, copyFile);
} finally {
cleanup(admin, queue, pool, secureClient);
}
}
use of org.apache.hadoop.hbase.client.SecureBulkLoadClient in project hbase by apache.
the class LoadIncrementalHFiles method doBulkLoad.
/**
* Perform a bulk load of the given directory into the given
* pre-existing table. This method is not threadsafe.
*
* @param map map of family to List of hfiles
* @param admin the Admin
* @param table the table to load into
* @param regionLocator region locator
* @param silence true to ignore unmatched column families
* @param copyFile always copy hfiles if true
* @throws TableNotFoundException if table does not yet exist
*/
public void doBulkLoad(Map<byte[], List<Path>> map, final Admin admin, Table table, RegionLocator regionLocator, boolean silence, boolean copyFile) throws TableNotFoundException, IOException {
if (!admin.isTableAvailable(regionLocator.getName())) {
throw new TableNotFoundException("Table " + table.getName() + " is not currently available.");
}
// LQI queue does not need to be threadsafe -- all operations on this queue
// happen in this thread
Deque<LoadQueueItem> queue = new LinkedList<>();
ExecutorService pool = null;
SecureBulkLoadClient secureClient = null;
try {
prepareHFileQueue(map, table, queue, silence);
if (queue.isEmpty()) {
LOG.warn("Bulk load operation did not get any files to load");
return;
}
pool = createExecutorService();
secureClient = new SecureBulkLoadClient(table.getConfiguration(), table);
for (Map.Entry<byte[], List<Path>> entry : map.entrySet()) {
for (Path p : entry.getValue()) {
fs = p.getFileSystem(table.getConfiguration());
break;
}
}
retValue = performBulkLoad(admin, table, regionLocator, queue, pool, secureClient, copyFile);
} finally {
cleanup(admin, queue, pool, secureClient);
}
}
use of org.apache.hadoop.hbase.client.SecureBulkLoadClient in project hbase by apache.
the class LoadIncrementalHFiles method buildClientServiceCallable.
protected ClientServiceCallable<byte[]> buildClientServiceCallable(final Connection conn, TableName tableName, byte[] first, Collection<LoadQueueItem> lqis, boolean copyFile) {
final List<Pair<byte[], String>> famPaths = new ArrayList<>(lqis.size());
for (LoadQueueItem lqi : lqis) {
famPaths.add(Pair.newPair(lqi.family, lqi.hfilePath.toString()));
}
return new ClientServiceCallable<byte[]>(conn, tableName, first, rpcControllerFactory.newController()) {
@Override
protected byte[] rpcCall() throws Exception {
SecureBulkLoadClient secureClient = null;
boolean success = false;
try {
LOG.debug("Going to connect to server " + getLocation() + " for row " + Bytes.toStringBinary(getRow()) + " with hfile group " + LoadIncrementalHFiles.this.toString(famPaths));
byte[] regionName = getLocation().getRegionInfo().getRegionName();
try (Table table = conn.getTable(getTableName())) {
secureClient = new SecureBulkLoadClient(getConf(), table);
success = secureClient.secureBulkLoadHFiles(getStub(), famPaths, regionName, assignSeqIds, fsDelegationToken.getUserToken(), bulkToken, copyFile);
}
return success ? regionName : null;
} finally {
//in user directory
if (secureClient != null && !success) {
FileSystem targetFs = FileSystem.get(getConf());
// fs is the source filesystem
if (fs == null) {
fs = lqis.iterator().next().hfilePath.getFileSystem(getConf());
}
// because previously we moved them to the staging directory.
if (FSHDFSUtils.isSameHdfs(getConf(), fs, targetFs)) {
for (Pair<byte[], String> el : famPaths) {
Path hfileStagingPath = null;
Path hfileOrigPath = new Path(el.getSecond());
try {
hfileStagingPath = new Path(new Path(bulkToken, Bytes.toString(el.getFirst())), hfileOrigPath.getName());
if (targetFs.rename(hfileStagingPath, hfileOrigPath)) {
LOG.debug("Moved back file " + hfileOrigPath + " from " + hfileStagingPath);
} else if (targetFs.exists(hfileStagingPath)) {
LOG.debug("Unable to move back file " + hfileOrigPath + " from " + hfileStagingPath);
}
} catch (Exception ex) {
LOG.debug("Unable to move back file " + hfileOrigPath + " from " + hfileStagingPath, ex);
}
}
}
}
}
}
};
}
Aggregations