use of java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock in project hadoop by apache.
the class ProportionalCapacityPreemptionPolicy method cloneQueues.
/**
* This method walks a tree of CSQueue and clones the portion of the state
* relevant for preemption in TempQueue(s). It also maintains a pointer to
* the leaves. Finally it aggregates pending resources in each queue and rolls
* it up to higher levels.
*
* @param curQueue current queue which I'm looking at now
* @param partitionResource the total amount of resources in the cluster
* @return the root of the cloned queue hierarchy
*/
private TempQueuePerPartition cloneQueues(CSQueue curQueue, Resource partitionResource, String partitionToLookAt) {
TempQueuePerPartition ret;
ReadLock readLock = curQueue.getReadLock();
try {
// Acquire a read lock from Parent/LeafQueue.
readLock.lock();
String queueName = curQueue.getQueueName();
QueueCapacities qc = curQueue.getQueueCapacities();
float absCap = qc.getAbsoluteCapacity(partitionToLookAt);
float absMaxCap = qc.getAbsoluteMaximumCapacity(partitionToLookAt);
boolean preemptionDisabled = curQueue.getPreemptionDisabled();
Resource current = Resources.clone(curQueue.getQueueResourceUsage().getUsed(partitionToLookAt));
Resource killable = Resources.none();
Resource reserved = Resources.clone(curQueue.getQueueResourceUsage().getReserved(partitionToLookAt));
if (null != preemptableQueues.get(queueName)) {
killable = Resources.clone(preemptableQueues.get(queueName).getKillableResource(partitionToLookAt));
}
// could more than specified maxCapacity
try {
if (!scheduler.getRMContext().getNodeLabelManager().isExclusiveNodeLabel(partitionToLookAt)) {
absMaxCap = 1.0f;
}
} catch (IOException e) {
// This may cause by partition removed when running capacity monitor,
// just ignore the error, this will be corrected when doing next check.
}
ret = new TempQueuePerPartition(queueName, current, preemptionDisabled, partitionToLookAt, killable, absCap, absMaxCap, partitionResource, reserved, curQueue);
if (curQueue instanceof ParentQueue) {
String configuredOrderingPolicy = ((ParentQueue) curQueue).getQueueOrderingPolicy().getConfigName();
// Recursively add children
for (CSQueue c : curQueue.getChildQueues()) {
TempQueuePerPartition subq = cloneQueues(c, partitionResource, partitionToLookAt);
// If we respect priority
if (StringUtils.equals(CapacitySchedulerConfiguration.QUEUE_PRIORITY_UTILIZATION_ORDERING_POLICY, configuredOrderingPolicy)) {
subq.relativePriority = c.getPriority().getPriority();
}
ret.addChild(subq);
subq.parent = ret;
}
}
} finally {
readLock.unlock();
}
addTempQueuePartition(ret);
return ret;
}
use of java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock in project torodb by torodb.
the class MvccMetainfoRepository method startSnapshotStage.
@Override
@Nonnull
@SuppressFBWarnings(value = { "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE", "UL_UNRELEASED_LOCK" })
public SnapshotStage startSnapshotStage() {
ReadLock readLock = lock.readLock();
LOGGER.trace("Trying to create a {}", MvccSnapshotStage.class);
readLock.lock();
SnapshotStage snapshotStage = null;
try {
snapshotStage = new MvccSnapshotStage(readLock);
LOGGER.trace("{} created", MvccSnapshotStage.class);
} finally {
if (snapshotStage == null) {
LOGGER.error("Error while trying to create a {}", MvccMergerStage.class);
readLock.unlock();
}
}
assert snapshotStage != null;
return snapshotStage;
}
use of java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock in project jgnash by ccavanaugh.
the class AbstractExpandingTableModel method getExpansionState.
private String getExpansionState() {
ReadLock readLock = rwl.readLock();
readLock.lock();
try {
StringBuilder builder = new StringBuilder();
ArrayList<ExpandingTableNode<E>> values = new ArrayList<>(objects.values());
Collections.sort(values);
for (ExpandingTableNode<E> node : values) {
builder.append(node.isExpanded() ? '1' : '0');
}
return builder.toString();
} finally {
readLock.unlock();
}
}
use of java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock in project jgnash by ccavanaugh.
the class AbstractExpandingTableModel method getExpandingTableNodeAt.
/**
* Returns the encapsulating object wrapper for the visible row.
*
* @param rowIndex visible row index
* @return node
*/
protected ExpandingTableNode<E> getExpandingTableNodeAt(final int rowIndex) {
// wait for the worker to complete
try {
initWorker.get();
} catch (InterruptedException | ExecutionException ex) {
Logger.getLogger(AbstractExpandingTableModel.class.getName()).log(Level.SEVERE, null, ex);
}
ReadLock readLock = rwl.readLock();
readLock.lock();
try {
return visibleObjects.get(rowIndex);
} finally {
readLock.unlock();
}
}
use of java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock in project jgnash by ccavanaugh.
the class ExpandingAccountTableModel method getValueAt.
@Override
public Object getValueAt(final int rowIndex, final int columnIndex) {
ReadLock readLock = rwl.readLock();
try {
readLock.lock();
final ExpandingTableNode<Account> node = getExpandingTableNodeAt(rowIndex);
if (node == null || node.getObject() == null) {
logger.log(Level.WARNING, "Null data", new Exception());
return "";
}
final Account account = node.getObject();
switch(columnIndex) {
case 0:
return account.getName();
case 1:
return account.getTransactionCount();
case 2:
BigDecimal balance = AccountBalanceDisplayManager.convertToSelectedBalanceMode(account.getAccountType(), account.getTreeBalance());
NumberFormat format = CommodityFormat.getFullNumberFormat(account.getCurrencyNode());
return format.format(balance);
case 3:
BigDecimal reconciledBalance = AccountBalanceDisplayManager.convertToSelectedBalanceMode(account.getAccountType(), account.getReconciledTreeBalance());
NumberFormat numberFormat = CommodityFormat.getFullNumberFormat(account.getCurrencyNode());
return numberFormat.format(reconciledBalance);
case 4:
return account.getCurrencyNode().getSymbol();
case 5:
return account.getAccountType().toString();
default:
return "Error";
}
} finally {
readLock.unlock();
}
}
Aggregations