use of org.apache.hadoop.hbase.tool.BulkLoadHFilesTool in project hbase by apache.
the class HFileReplicator method doBulkLoad.
private void doBulkLoad(Configuration conf, TableName tableName, Path stagingDir, Deque<LoadQueueItem> queue, int maxRetries) throws IOException {
BulkLoadHFilesTool loader = new BulkLoadHFilesTool(conf);
// Set the staging directory which will be used by BulkLoadHFilesTool for loading the data
loader.setBulkToken(stagingDir.toString());
// updating list of cluster ids where this bulkload event has already been processed
loader.setClusterIds(sourceClusterIds);
for (int count = 0; !queue.isEmpty(); count++) {
if (count != 0) {
LOG.warn("Error replicating HFiles; retry={} with {} remaining.", count, queue.size());
}
if (maxRetries != 0 && count >= maxRetries) {
throw new IOException("Retry attempted " + count + " times without completing, bailing.");
}
// Try bulk load
loader.loadHFileQueue(connection, tableName, queue, false);
}
}
use of org.apache.hadoop.hbase.tool.BulkLoadHFilesTool in project hbase by apache.
the class TestBulkLoadReplication method bulkLoadOnCluster.
protected void bulkLoadOnCluster(TableName tableName, byte[] row, byte[] value, HBaseTestingUtil cluster) throws Exception {
String bulkLoadFilePath = createHFileForFamilies(row, value, cluster.getConfiguration());
copyToHdfs(bulkLoadFilePath, cluster.getDFSCluster());
BulkLoadHFilesTool bulkLoadHFilesTool = new BulkLoadHFilesTool(cluster.getConfiguration());
bulkLoadHFilesTool.bulkLoad(tableName, BULK_LOAD_BASE_DIR);
}
use of org.apache.hadoop.hbase.tool.BulkLoadHFilesTool in project hbase by apache.
the class TestBulkLoadReplicationHFileRefs method bulkLoadOnCluster.
protected void bulkLoadOnCluster(TableName tableName, byte[] family) throws Exception {
String bulkLoadFilePath = createHFileForFamilies(family);
copyToHdfs(family, bulkLoadFilePath, UTIL1.getDFSCluster());
BulkLoadHFilesTool bulkLoadHFilesTool = new BulkLoadHFilesTool(UTIL1.getConfiguration());
bulkLoadHFilesTool.bulkLoad(tableName, BULK_LOAD_BASE_DIR);
}
use of org.apache.hadoop.hbase.tool.BulkLoadHFilesTool in project hbase by apache.
the class TestSecureBulkLoadManager method doBulkloadWithoutRetry.
private void doBulkloadWithoutRetry(Path dir) throws Exception {
BulkLoadHFilesTool h = new BulkLoadHFilesTool(conf) {
@Override
protected void bulkLoadPhase(AsyncClusterConnection conn, TableName tableName, Deque<LoadQueueItem> queue, Multimap<ByteBuffer, LoadQueueItem> regionGroups, boolean copyFiles, Map<LoadQueueItem, ByteBuffer> item2RegionMap) throws IOException {
super.bulkLoadPhase(conn, tableName, queue, regionGroups, copyFiles, item2RegionMap);
// throw exception to avoid retry
throw new MyExceptionToAvoidRetry();
}
};
try {
h.bulkLoad(TABLE, dir);
Assert.fail("MyExceptionToAvoidRetry is expected");
} catch (MyExceptionToAvoidRetry e) {
// expected
}
}
use of org.apache.hadoop.hbase.tool.BulkLoadHFilesTool in project hbase by apache.
the class IntegrationTestImportTsv method doLoadIncrementalHFiles.
/**
* Verify the data described by <code>simple_tsv</code> matches
* <code>simple_expected</code>.
*/
protected void doLoadIncrementalHFiles(Path hfiles, TableName tableName) throws Exception {
String[] args = { hfiles.toString(), tableName.getNameAsString() };
LOG.info(format("Running LoadIncrememntalHFiles with args: %s", Arrays.asList(args)));
assertEquals("Loading HFiles failed.", 0, ToolRunner.run(new BulkLoadHFilesTool(getConf()), args));
Table table = null;
Scan scan = new Scan() {
{
setCacheBlocks(false);
setCaching(1000);
}
};
try {
table = util.getConnection().getTable(tableName);
Iterator<Result> resultsIt = table.getScanner(scan).iterator();
Iterator<KeyValue> expectedIt = simple_expected.iterator();
while (resultsIt.hasNext() && expectedIt.hasNext()) {
Result r = resultsIt.next();
for (Cell actual : r.rawCells()) {
assertTrue("Ran out of expected values prematurely!", expectedIt.hasNext());
KeyValue expected = expectedIt.next();
assertEquals("Scan produced surprising result", 0, CellComparator.getInstance().compare(expected, actual));
}
}
assertFalse("Did not consume all expected values.", expectedIt.hasNext());
assertFalse("Did not consume all scan results.", resultsIt.hasNext());
} finally {
if (null != table)
table.close();
}
}
Aggregations