use of java.util.concurrent.ExecutionException in project hbase by apache.
the class TestAsyncTable method testCheckAndPut.
@Test
public void testCheckAndPut() throws InterruptedException, ExecutionException {
AsyncTableBase table = getTable.get();
AtomicInteger successCount = new AtomicInteger(0);
AtomicInteger successIndex = new AtomicInteger(-1);
int count = 10;
CountDownLatch latch = new CountDownLatch(count);
IntStream.range(0, count).forEach(i -> table.checkAndPut(row, FAMILY, QUALIFIER, null, new Put(row).addColumn(FAMILY, QUALIFIER, concat(VALUE, i))).thenAccept(x -> {
if (x) {
successCount.incrementAndGet();
successIndex.set(i);
}
latch.countDown();
}));
latch.await();
assertEquals(1, successCount.get());
String actual = Bytes.toString(table.get(new Get(row)).get().getValue(FAMILY, QUALIFIER));
assertTrue(actual.endsWith(Integer.toString(successIndex.get())));
}
use of java.util.concurrent.ExecutionException in project hbase by apache.
the class TestAsyncTable method testCheckAndDelete.
@Test
public void testCheckAndDelete() throws InterruptedException, ExecutionException {
AsyncTableBase table = getTable.get();
int count = 10;
CountDownLatch putLatch = new CountDownLatch(count + 1);
table.put(new Put(row).addColumn(FAMILY, QUALIFIER, VALUE)).thenRun(() -> putLatch.countDown());
IntStream.range(0, count).forEach(i -> table.put(new Put(row).addColumn(FAMILY, concat(QUALIFIER, i), VALUE)).thenRun(() -> putLatch.countDown()));
putLatch.await();
AtomicInteger successCount = new AtomicInteger(0);
AtomicInteger successIndex = new AtomicInteger(-1);
CountDownLatch deleteLatch = new CountDownLatch(count);
IntStream.range(0, count).forEach(i -> table.checkAndDelete(row, FAMILY, QUALIFIER, VALUE, new Delete(row).addColumn(FAMILY, QUALIFIER).addColumn(FAMILY, concat(QUALIFIER, i))).thenAccept(x -> {
if (x) {
successCount.incrementAndGet();
successIndex.set(i);
}
deleteLatch.countDown();
}));
deleteLatch.await();
assertEquals(1, successCount.get());
Result result = table.get(new Get(row)).get();
IntStream.range(0, count).forEach(i -> {
if (i == successIndex.get()) {
assertFalse(result.containsColumn(FAMILY, concat(QUALIFIER, i)));
} else {
assertArrayEquals(VALUE, result.getValue(FAMILY, concat(QUALIFIER, i)));
}
});
}
use of java.util.concurrent.ExecutionException in project hbase by apache.
the class TestAsyncTable method testAppend.
@Test
public void testAppend() throws InterruptedException, ExecutionException {
AsyncTableBase table = getTable.get();
int count = 10;
CountDownLatch latch = new CountDownLatch(count);
char suffix = ':';
AtomicLong suffixCount = new AtomicLong(0L);
IntStream.range(0, count).forEachOrdered(i -> table.append(new Append(row).add(FAMILY, QUALIFIER, Bytes.toBytes("" + i + suffix))).thenAccept(r -> {
suffixCount.addAndGet(Bytes.toString(r.getValue(FAMILY, QUALIFIER)).chars().filter(x -> x == suffix).count());
latch.countDown();
}));
latch.await();
assertEquals((1 + count) * count / 2, suffixCount.get());
String value = Bytes.toString(table.get(new Get(row).addColumn(FAMILY, QUALIFIER)).get().getValue(FAMILY, QUALIFIER));
int[] actual = Arrays.asList(value.split("" + suffix)).stream().mapToInt(Integer::parseInt).sorted().toArray();
assertArrayEquals(IntStream.range(0, count).toArray(), actual);
}
use of java.util.concurrent.ExecutionException in project hive by apache.
the class CLIService method getOperationStatus.
/* (non-Javadoc)
* @see org.apache.hive.service.cli.ICLIService#getOperationStatus(org.apache.hive.service.cli.OperationHandle)
*/
@Override
public OperationStatus getOperationStatus(OperationHandle opHandle, boolean getProgressUpdate) throws HiveSQLException {
Operation operation = sessionManager.getOperationManager().getOperation(opHandle);
/**
* If this is a background operation run asynchronously,
* we block for a duration determined by a step function, before we return
* However, if the background operation is complete, we return immediately.
*/
HiveConf conf = operation.getParentSession().getHiveConf();
if (operation.shouldRunAsync()) {
long maxTimeout = HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_SERVER2_LONG_POLLING_TIMEOUT, TimeUnit.MILLISECONDS);
final long elapsed = System.currentTimeMillis() - operation.getBeginTime();
// A step function to increase the polling timeout by 500 ms every 10 sec,
// starting from 500 ms up to HIVE_SERVER2_LONG_POLLING_TIMEOUT
final long timeout = Math.min(maxTimeout, (elapsed / TimeUnit.SECONDS.toMillis(10) + 1) * 500);
try {
operation.getBackgroundHandle().get(timeout, TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
// No Op, return to the caller since long polling timeout has expired
LOG.trace(opHandle + ": Long polling timed out");
} catch (CancellationException e) {
// The background operation thread was cancelled
LOG.trace(opHandle + ": The background operation was cancelled", e);
} catch (ExecutionException e) {
// Note: Hive ops do not use the normal Future failure path, so this will not happen
// in case of actual failure; the Future will just be done.
// The background operation thread was aborted
LOG.warn(opHandle + ": The background operation was aborted", e);
} catch (InterruptedException e) {
// No op, this thread was interrupted
// In this case, the call might return sooner than long polling timeout
}
}
OperationStatus opStatus = operation.getStatus();
LOG.debug(opHandle + ": getOperationStatus()");
opStatus.setJobProgressUpdate(progressUpdateLog(getProgressUpdate, operation, conf));
return opStatus;
}
use of java.util.concurrent.ExecutionException in project kafka by apache.
the class KafkaConfigBackingStore method removeConnectorConfig.
/**
* Remove configuration for a given connector.
* @param connector name of the connector to remove
*/
@Override
public void removeConnectorConfig(String connector) {
log.debug("Removing connector configuration for connector {}", connector);
try {
configLog.send(CONNECTOR_KEY(connector), null);
configLog.send(TARGET_STATE_KEY(connector), null);
configLog.readToEnd().get(READ_TO_END_TIMEOUT_MS, TimeUnit.MILLISECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
log.error("Failed to remove connector configuration from Kafka: ", e);
throw new ConnectException("Error removing connector configuration from Kafka", e);
}
}
Aggregations