use of org.apache.accumulo.core.clientImpl.TableOperationsImpl in project accumulo by apache.
the class BulkImport method load.
@Override
public void load() throws TableNotFoundException, IOException, AccumuloException, AccumuloSecurityException {
TableId tableId = context.getTableId(tableName);
FileSystem fs = VolumeConfiguration.fileSystemForPath(dir, context.getHadoopConf());
Path srcPath = checkPath(fs, dir);
SortedMap<KeyExtent, Bulk.Files> mappings;
TableOperationsImpl tableOps = new TableOperationsImpl(context);
int maxTablets = 0;
for (var prop : tableOps.getProperties(tableName)) {
if (prop.getKey().equals(Property.TABLE_BULK_MAX_TABLETS.getKey())) {
maxTablets = Integer.parseInt(prop.getValue());
break;
}
}
Retry retry = Retry.builder().infiniteRetries().retryAfter(100, MILLISECONDS).incrementBy(100, MILLISECONDS).maxWait(2, MINUTES).backOffFactor(1.5).logInterval(3, MINUTES).createRetry();
// retry if a merge occurs
boolean shouldRetry = true;
while (shouldRetry) {
if (plan == null) {
mappings = computeMappingFromFiles(fs, tableId, srcPath, maxTablets);
} else {
mappings = computeMappingFromPlan(fs, tableId, srcPath, maxTablets);
}
if (mappings.isEmpty()) {
if (ignoreEmptyDir == true) {
log.info("Attempted to import files from empty directory - {}. Zero files imported", srcPath);
return;
} else {
throw new IllegalArgumentException("Attempted to import zero files from " + srcPath);
}
}
BulkSerialize.writeLoadMapping(mappings, srcPath.toString(), fs::create);
List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableId.canonical().getBytes(UTF_8)), ByteBuffer.wrap(srcPath.toString().getBytes(UTF_8)), ByteBuffer.wrap((setTime + "").getBytes(UTF_8)));
try {
tableOps.doBulkFateOperation(args, tableName);
shouldRetry = false;
} catch (AccumuloBulkMergeException ae) {
if (plan != null) {
checkPlanForSplits(ae);
}
try {
retry.waitForNextAttempt();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
log.info(ae.getMessage() + ". Retrying bulk import to " + tableName);
}
}
}
Aggregations