use of org.apache.hadoop.hbase.client.Row in project hbase by apache.
the class ReplicationSink method batch.
/**
* Do the changes and handle the pool
* @param tableName table to insert into
* @param allRows list of actions
* @param batchRowSizeThreshold rowSize threshold for batch mutation
*/
private void batch(TableName tableName, Collection<List<Row>> allRows, int batchRowSizeThreshold) throws IOException {
if (allRows.isEmpty()) {
return;
}
AsyncTable<?> table = getConnection().getTable(tableName);
List<Future<?>> futures = new ArrayList<>();
for (List<Row> rows : allRows) {
List<List<Row>> batchRows;
if (rows.size() > batchRowSizeThreshold) {
batchRows = Lists.partition(rows, batchRowSizeThreshold);
} else {
batchRows = Collections.singletonList(rows);
}
futures.addAll(batchRows.stream().map(table::batchAll).collect(Collectors.toList()));
}
for (Future<?> future : futures) {
try {
FutureUtils.get(future);
} catch (RetriesExhaustedException e) {
if (e.getCause() instanceof TableNotFoundException) {
throw new TableNotFoundException("'" + tableName + "'");
}
throw e;
}
}
}
use of org.apache.hadoop.hbase.client.Row in project hbase by apache.
the class TestIncrementTimeRange method checkHTableInterfaceMethods.
private void checkHTableInterfaceMethods() throws Exception {
long time = EnvironmentEdgeManager.currentTime();
mee.setValue(time);
hTableInterface.put(new Put(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, Bytes.toBytes(1L)));
checkRowValue(ROW_A, Bytes.toBytes(1L));
time = EnvironmentEdgeManager.currentTime();
mee.setValue(time);
TimeRange range10 = TimeRange.between(1, time + 10);
hTableInterface.increment(new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 10L).setTimeRange(range10.getMin(), range10.getMax()));
checkRowValue(ROW_A, Bytes.toBytes(11L));
assertEquals(MyObserver.tr10.getMin(), range10.getMin());
assertEquals(MyObserver.tr10.getMax(), range10.getMax());
time = EnvironmentEdgeManager.currentTime();
mee.setValue(time);
TimeRange range2 = TimeRange.between(1, time + 20);
List<Row> actions = Arrays.asList(new Row[] { new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 2L).setTimeRange(range2.getMin(), range2.getMax()), new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 2L).setTimeRange(range2.getMin(), range2.getMax()) });
Object[] results3 = new Object[actions.size()];
Object[] results1 = results3;
hTableInterface.batch(actions, results1);
assertEquals(MyObserver.tr2.getMin(), range2.getMin());
assertEquals(MyObserver.tr2.getMax(), range2.getMax());
for (Object r2 : results1) {
assertTrue(r2 instanceof Result);
}
checkRowValue(ROW_A, Bytes.toBytes(15L));
hTableInterface.close();
}
use of org.apache.hadoop.hbase.client.Row in project hbase by apache.
the class TestAppendTimeRange method testHTableInterfaceMethods.
@Test
public void testHTableInterfaceMethods() throws Exception {
try (Table table = util.createTable(TableName.valueOf(name.getMethodName()), TEST_FAMILY)) {
table.put(new Put(ROW).addColumn(TEST_FAMILY, QUAL, VALUE));
long time = EnvironmentEdgeManager.currentTime();
mee.setValue(time);
table.put(new Put(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("a")));
checkRowValue(table, ROW, Bytes.toBytes("a"));
time = EnvironmentEdgeManager.currentTime();
mee.setValue(time);
TimeRange range10 = TimeRange.between(1, time + 10);
table.append(new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("b")).setTimeRange(range10.getMin(), range10.getMax()));
checkRowValue(table, ROW, Bytes.toBytes("ab"));
assertEquals(MyObserver.tr10.getMin(), range10.getMin());
assertEquals(MyObserver.tr10.getMax(), range10.getMax());
time = EnvironmentEdgeManager.currentTime();
mee.setValue(time);
TimeRange range2 = TimeRange.between(1, time + 20);
List<Row> actions = Arrays.asList(new Row[] { new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("c")).setTimeRange(range2.getMin(), range2.getMax()), new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("c")).setTimeRange(range2.getMin(), range2.getMax()) });
Object[] results1 = new Object[actions.size()];
table.batch(actions, results1);
assertEquals(MyObserver.tr2.getMin(), range2.getMin());
assertEquals(MyObserver.tr2.getMax(), range2.getMax());
for (Object r2 : results1) {
assertTrue(r2 instanceof Result);
}
checkRowValue(table, ROW, Bytes.toBytes("abcc"));
}
}
use of org.apache.hadoop.hbase.client.Row in project bagheera by mozilla-metrics.
the class FlushResult method flushTable.
private FlushResult flushTable(HTableInterface table, List<Row> puts) throws IOException, InterruptedException {
List<Row> currentAttempt = puts;
Object[] batch = null;
FlushResult result = null;
int successfulPuts = 0;
int successfulDeletes = 0;
TimerContext htableTimerContext = htableTimer.time();
try {
for (int attempt = 0; attempt < retryCount; attempt++) {
// TODO: wrap each attempt in a try/catch?
batch = table.batch(currentAttempt);
table.flushCommits();
List<Row> fails = new ArrayList<Row>(currentAttempt.size());
if (batch != null) {
for (int i = 0; i < batch.length; i++) {
if (batch[i] == null) {
fails.add(currentAttempt.get(i));
} else {
// figure out what type it was
Row row = currentAttempt.get(i);
if (row instanceof Delete) {
successfulDeletes++;
} else if (row instanceof Put) {
successfulPuts++;
} else {
LOG.warn("We succeeded in flushing something that's neither a Delete nor a Put");
}
}
}
currentAttempt = fails;
if (currentAttempt.isEmpty()) {
break;
}
} else {
// something badly broke, retry the whole list.
LOG.error("Result of table.batch() was null");
}
}
int failedPuts = 0;
int failedDeletes = 0;
if (!currentAttempt.isEmpty()) {
for (Row row : currentAttempt) {
if (row instanceof Delete) {
failedDeletes++;
} else if (row instanceof Put) {
failedPuts++;
} else {
LOG.error("We failed to flush something that's neither a Delete nor a Put");
}
}
}
result = new FlushResult(failedPuts, failedDeletes, successfulPuts, successfulDeletes);
} finally {
htableTimerContext.stop();
}
return result;
}
use of org.apache.hadoop.hbase.client.Row in project janusgraph by JanusGraph.
the class HBaseStoreManager method mutateMany.
@Override
public void mutateMany(Map<String, Map<StaticBuffer, KCVMutation>> mutations, StoreTransaction txh) throws BackendException {
Long putTimestamp = null;
Long delTimestamp = null;
MaskedTimestamp commitTime = null;
if (assignTimestamp) {
commitTime = new MaskedTimestamp(txh);
putTimestamp = commitTime.getAdditionTime(times);
delTimestamp = commitTime.getDeletionTime(times);
}
// In case of an addition and deletion with identical timestamps, the
// deletion tombstone wins.
// https://hbase.apache.org/book/versions.html#d244e4250
final Map<StaticBuffer, Pair<List<Put>, Delete>> commandsPerKey = convertToCommands(mutations, putTimestamp, delTimestamp);
// actual batch operation
final List<Row> batch = new ArrayList<>(commandsPerKey.size());
// convert sorted commands into representation required for 'batch' operation
for (Pair<List<Put>, Delete> commands : commandsPerKey.values()) {
if (commands.getFirst() != null && !commands.getFirst().isEmpty())
batch.addAll(commands.getFirst());
if (commands.getSecond() != null)
batch.add(commands.getSecond());
}
try {
Table table = null;
try {
table = cnx.getTable(tableName);
table.batch(batch, new Object[batch.size()]);
} finally {
IOUtils.closeQuietly(table);
}
} catch (IOException | InterruptedException e) {
throw new TemporaryBackendException(e);
}
if (commitTime != null) {
sleepAfterWrite(commitTime);
}
}
Aggregations