use of com.google.common.util.concurrent.UncheckedExecutionException in project bookkeeper by apache.
the class FileSystemUpgrade method upgrade.
public static void upgrade(ServerConfiguration conf) throws BookieException.UpgradeException, InterruptedException {
LOG.info("Upgrading...");
try {
runFunctionWithRegistrationManager(conf, rm -> {
try {
upgrade(conf, rm);
} catch (UpgradeException e) {
throw new UncheckedExecutionException(e.getMessage(), e);
}
return null;
});
} catch (MetadataException e) {
throw new UpgradeException(e);
} catch (ExecutionException e) {
throw new UpgradeException(e.getCause());
}
LOG.info("Done");
}
use of com.google.common.util.concurrent.UncheckedExecutionException in project bookkeeper by apache.
the class FileSystemUpgrade method rollback.
public static void rollback(ServerConfiguration conf) throws BookieException.UpgradeException, InterruptedException {
LOG.info("Rolling back upgrade...");
try {
runFunctionWithRegistrationManager(conf, rm -> {
try {
rollback(conf, rm);
} catch (UpgradeException e) {
throw new UncheckedExecutionException(e.getMessage(), e);
}
return null;
});
} catch (MetadataException e) {
throw new UpgradeException(e);
} catch (ExecutionException e) {
throw new UpgradeException(e.getCause());
}
LOG.info("Done");
}
use of com.google.common.util.concurrent.UncheckedExecutionException in project bookkeeper by apache.
the class IndexPersistenceMgr method getFileInfo.
/**
* Get the FileInfo and increase reference count.
* When we get FileInfo from cache, we need to make sure it is synchronized
* with eviction, otherwise there might be a race condition as we get
* the FileInfo from cache, that FileInfo is then evicted and closed before we
* could even increase the reference counter.
*/
CachedFileInfo getFileInfo(final Long ledger, final byte[] masterKey) throws IOException {
try {
CachedFileInfo fi;
pendingGetFileInfoCounter.inc();
Callable<CachedFileInfo> loader = () -> {
CachedFileInfo fileInfo = fileInfoBackingCache.loadFileInfo(ledger, masterKey);
activeLedgers.put(ledger, true);
return fileInfo;
};
do {
if (null != masterKey) {
fi = writeFileInfoCache.get(ledger, loader);
} else {
fi = readFileInfoCache.get(ledger, loader);
}
if (!fi.tryRetain()) {
// defensively ensure that dead fileinfo objects don't exist in the
// cache. They shouldn't if refcounting is correct, but if someone
// does a double release, the fileinfo will be cleaned up, while
// remaining in the cache, which could cause a tight loop in this method.
boolean inWriteMap = writeFileInfoCache.asMap().remove(ledger, fi);
boolean inReadMap = readFileInfoCache.asMap().remove(ledger, fi);
if (inWriteMap || inReadMap) {
LOG.error("Dead fileinfo({}) forced out of cache (write:{}, read:{}). " + "It must have been double-released somewhere.", fi, inWriteMap, inReadMap);
}
fi = null;
}
} while (fi == null);
return fi;
} catch (ExecutionException | UncheckedExecutionException ee) {
if (ee.getCause() instanceof IOException) {
throw (IOException) ee.getCause();
} else {
throw new IOException("Failed to load file info for ledger " + ledger, ee);
}
} finally {
pendingGetFileInfoCounter.dec();
}
}
use of com.google.common.util.concurrent.UncheckedExecutionException in project bookkeeper by apache.
the class TestHttpService method testListUnderReplicatedLedgerService.
@Test
public void testListUnderReplicatedLedgerService() throws Exception {
baseConf.setZkServers(zkUtil.getZooKeeperConnectString());
runFunctionWithLedgerManagerFactory(baseConf, mFactory -> {
try {
testListUnderReplicatedLedgerService(mFactory);
} catch (Exception e) {
throw new UncheckedExecutionException(e.getMessage(), e.getCause());
}
return null;
});
}
use of com.google.common.util.concurrent.UncheckedExecutionException in project cdap by caskdata.
the class SingleThreadDatasetCache method getDataset.
@Override
public <T extends Dataset> T getDataset(DatasetCacheKey key, boolean bypass) throws DatasetInstantiationException {
Dataset dataset;
try {
if (bypass) {
dataset = datasetLoader.load(key);
} else {
try {
dataset = datasetCache.get(key);
} catch (ExecutionException | UncheckedExecutionException e) {
throw e.getCause();
}
}
} catch (DatasetInstantiationException | ServiceUnavailableException e) {
throw e;
} catch (Throwable t) {
throw new DatasetInstantiationException(String.format("Could not instantiate dataset '%s:%s'", key.getNamespace(), key.getName()), t);
}
// make sure the dataset exists and is of the right type
if (dataset == null) {
throw new DatasetInstantiationException(String.format("Dataset '%s' does not exist", key.getName()));
}
T typedDataset;
try {
@SuppressWarnings("unchecked") T t = (T) dataset;
typedDataset = t;
} catch (Throwable t) {
// must be ClassCastException
throw new DatasetInstantiationException(String.format("Could not cast dataset '%s' to requested type. Actual type is %s.", key.getName(), dataset.getClass().getName()), t);
}
// any transaction aware that is not in the active tx-awares is added to the current tx context (if there is one).
if (!bypass && dataset instanceof TransactionAware) {
TransactionAware txAware = (TransactionAware) dataset;
TransactionAware existing = activeTxAwares.get(key);
if (existing == null) {
activeTxAwares.put(key, txAware);
if (txContext != null) {
txContext.addTransactionAware(txAware);
}
} else if (existing != dataset) {
// this better be the same dataset, otherwise the cache did not work
throw new IllegalStateException(String.format("Unexpected state: Cache returned %s for %s, which is different from the " + "active transaction aware %s for the same key. This should never happen.", dataset, key, existing));
}
}
return typedDataset;
}
Aggregations