use of org.apache.accumulo.fate.util.Retry in project accumulo by apache.
the class ZooReaderWriter method mutate.
@Override
public byte[] mutate(String zPath, byte[] createValue, List<ACL> acl, Mutator mutator) throws Exception {
if (createValue != null) {
while (true) {
final Retry retry = getRetryFactory().createRetry();
try {
getZooKeeper().create(zPath, createValue, acl, CreateMode.PERSISTENT);
return createValue;
} catch (KeeperException ex) {
final Code code = ex.code();
if (code == Code.NODEEXISTS) {
// expected
break;
} else if (code == Code.OPERATIONTIMEOUT || code == Code.CONNECTIONLOSS || code == Code.SESSIONEXPIRED) {
retryOrThrow(retry, ex);
} else {
throw ex;
}
}
retry.waitForNextAttempt();
}
}
do {
final Retry retry = getRetryFactory().createRetry();
Stat stat = new Stat();
byte[] data = getData(zPath, false, stat);
data = mutator.mutate(data);
if (data == null)
return data;
try {
getZooKeeper().setData(zPath, data, stat.getVersion());
return data;
} catch (KeeperException ex) {
final Code code = ex.code();
if (code == Code.BADVERSION) {
// Retry, but don't increment. This makes it backwards compatible with the infinite
// loop that previously happened. I'm not sure if that's really desirable though.
} else if (code == Code.OPERATIONTIMEOUT || code == Code.CONNECTIONLOSS || code == Code.SESSIONEXPIRED) {
retryOrThrow(retry, ex);
retry.waitForNextAttempt();
} else {
throw ex;
}
}
} while (true);
}
use of org.apache.accumulo.fate.util.Retry in project accumulo by apache.
the class ZooUtil method recursiveDelete.
/**
* This method will delete a node and all its children from zookeeper
*
* @param zPath
* the path to delete
*/
static void recursiveDelete(ZooKeeperConnectionInfo info, String zPath, NodeMissingPolicy policy) throws KeeperException, InterruptedException {
if (policy.equals(NodeMissingPolicy.CREATE))
throw new IllegalArgumentException(policy.name() + " is invalid for this operation");
try {
List<String> children;
final Retry retry = RETRY_FACTORY.createRetry();
while (true) {
try {
children = getZooKeeper(info).getChildren(zPath, false);
break;
} catch (KeeperException e) {
final Code c = e.code();
if (c == Code.CONNECTIONLOSS || c == Code.OPERATIONTIMEOUT || c == Code.SESSIONEXPIRED) {
retryOrThrow(retry, e);
} else {
throw e;
}
}
retry.waitForNextAttempt();
}
for (String child : children) recursiveDelete(info, zPath + "/" + child, NodeMissingPolicy.SKIP);
Stat stat;
while (true) {
try {
stat = getZooKeeper(info).exists(zPath, null);
// Node exists
if (stat != null) {
try {
// Try to delete it. We don't care if there was an update to the node
// since we got the Stat, just delete all versions (-1).
getZooKeeper(info).delete(zPath, -1);
return;
} catch (NoNodeException e) {
// If the node is gone now, it's ok if we have SKIP
if (policy.equals(NodeMissingPolicy.SKIP)) {
return;
}
throw e;
}
// Let other KeeperException bubble to the outer catch
} else {
// If the stat is null, the node is now gone which is fine.
return;
}
} catch (KeeperException e) {
final Code c = e.code();
if (c == Code.CONNECTIONLOSS || c == Code.OPERATIONTIMEOUT || c == Code.SESSIONEXPIRED) {
retryOrThrow(retry, e);
} else {
throw e;
}
}
retry.waitForNextAttempt();
}
} catch (KeeperException e) {
if (policy.equals(NodeMissingPolicy.SKIP) && e.code().equals(KeeperException.Code.NONODE))
return;
throw e;
}
}
use of org.apache.accumulo.fate.util.Retry in project accumulo by apache.
the class TableOperationsImpl method locate.
@Override
public Locations locate(String tableName, Collection<Range> ranges) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
requireNonNull(tableName, "tableName must be non null");
requireNonNull(ranges, "ranges must be non null");
Table.ID tableId = Tables.getTableId(context.getInstance(), tableName);
TabletLocator locator = TabletLocator.getLocator(context, tableId);
List<Range> rangeList = null;
if (ranges instanceof List) {
rangeList = (List<Range>) ranges;
} else {
rangeList = new ArrayList<>(ranges);
}
Map<String, Map<KeyExtent, List<Range>>> binnedRanges = new HashMap<>();
locator.invalidateCache();
Retry retry = Retry.builder().infiniteRetries().retryAfter(100, MILLISECONDS).incrementBy(100, MILLISECONDS).maxWait(2, SECONDS).logInterval(3, TimeUnit.MINUTES).createRetry();
while (!locator.binRanges(context, rangeList, binnedRanges).isEmpty()) {
if (!Tables.exists(context.getInstance(), tableId))
throw new TableNotFoundException(tableId.canonicalID(), tableName, null);
if (Tables.getTableState(context.getInstance(), tableId) == TableState.OFFLINE)
throw new TableOfflineException(context.getInstance(), tableId.canonicalID());
binnedRanges.clear();
try {
retry.waitForNextAttempt();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
locator.invalidateCache();
}
return new LoctionsImpl(binnedRanges);
}
use of org.apache.accumulo.fate.util.Retry in project accumulo by apache.
the class ZooUtil method isLockHeld.
public static boolean isLockHeld(ZooKeeperConnectionInfo info, LockID lid) throws KeeperException, InterruptedException {
final Retry retry = RETRY_FACTORY.createRetry();
while (true) {
try {
List<String> children = getZooKeeper(info).getChildren(lid.path, false);
if (children.size() == 0) {
return false;
}
Collections.sort(children);
String lockNode = children.get(0);
if (!lid.node.equals(lockNode))
return false;
Stat stat = getZooKeeper(info).exists(lid.path + "/" + lid.node, false);
return stat != null && stat.getEphemeralOwner() == lid.eid;
} catch (KeeperException ex) {
final Code c = ex.code();
if (c == Code.CONNECTIONLOSS || c == Code.OPERATIONTIMEOUT || c == Code.SESSIONEXPIRED) {
retryOrThrow(retry, ex);
}
}
retry.waitForNextAttempt();
}
}
use of org.apache.accumulo.fate.util.Retry in project accumulo by apache.
the class ZooUtil method recursiveCopyPersistent.
public static void recursiveCopyPersistent(ZooKeeperConnectionInfo info, String source, String destination, NodeExistsPolicy policy) throws KeeperException, InterruptedException {
Stat stat = null;
if (!exists(info, source))
throw KeeperException.create(Code.NONODE, source);
if (exists(info, destination)) {
switch(policy) {
case OVERWRITE:
break;
case SKIP:
return;
case FAIL:
default:
throw KeeperException.create(Code.NODEEXISTS, source);
}
}
stat = new Stat();
byte[] data = getData(info, source, stat);
if (stat.getEphemeralOwner() == 0) {
if (data == null)
throw KeeperException.create(Code.NONODE, source);
putPersistentData(info, destination, data, policy);
if (stat.getNumChildren() > 0) {
List<String> children;
final Retry retry = RETRY_FACTORY.createRetry();
while (true) {
try {
children = getZooKeeper(info).getChildren(source, false);
break;
} catch (KeeperException e) {
final Code c = e.code();
if (c == Code.CONNECTIONLOSS || c == Code.OPERATIONTIMEOUT || c == Code.SESSIONEXPIRED) {
retryOrThrow(retry, e);
} else {
throw e;
}
}
retry.waitForNextAttempt();
}
for (String child : children) {
recursiveCopyPersistent(info, source + "/" + child, destination + "/" + child, policy);
}
}
}
}
Aggregations