use of com.orientechnologies.common.concur.lock.OInterruptedException in project orientdb by orientechnologies.
the class OResourcePool method getResource.
public V getResource(K key, final long maxWaitMillis, Object... additionalArgs) throws OLockException {
// First, get permission to take or create a resource
try {
if (!sem.tryAcquire(maxWaitMillis, TimeUnit.MILLISECONDS))
throw new OLockException("No more resources available in pool (max=" + maxResources + "). Requested resource: " + key);
} catch (InterruptedException e) {
throw new OInterruptedException("Acquiring of resources was interrupted");
}
V res;
do {
// POP A RESOURCE
res = resources.poll();
if (res != null) {
// TRY TO REUSE IT
if (listener.reuseResource(key, additionalArgs, res)) {
// OK: REUSE IT
break;
} else
res = null;
// UNABLE TO REUSE IT: THE RESOURE WILL BE DISCARDED AND TRY WITH THE NEXT ONE, IF ANY
}
} while (!resources.isEmpty());
// NO AVAILABLE RESOURCES: CREATE A NEW ONE
try {
if (res == null) {
res = listener.createNewResource(key, additionalArgs);
created.incrementAndGet();
if (OLogManager.instance().isDebugEnabled())
OLogManager.instance().debug(this, "pool:'%s' created new resource '%s', new resource count '%d'", this, res, created.get());
}
resourcesOut.add(res);
if (OLogManager.instance().isDebugEnabled())
OLogManager.instance().debug(this, "pool:'%s' acquired resource '%s' available %d out %d ", this, res, sem.availablePermits(), resourcesOut.size());
return res;
} catch (RuntimeException e) {
sem.release();
// PROPAGATE IT
throw e;
} catch (Exception e) {
sem.release();
throw OException.wrapException(new OLockException("Error on creation of the new resource in the pool"), e);
}
}
use of com.orientechnologies.common.concur.lock.OInterruptedException in project orientdb by orientechnologies.
the class ODiskWriteAheadLog method internalLog.
/**
* it log a record getting the serialized content as paramenter.
*
* @param record
* @param recordContent
*
* @return
*
* @throws IOException
*/
private OLogSequenceNumber internalLog(OWALRecord record, byte[] recordContent) throws IOException {
syncObject.lock();
try {
checkForClose();
if (segmentCreationFlag && record instanceof OOperationUnitRecord && !activeOperations.contains(((OOperationUnitRecord) record).getOperationUnitId())) {
while (segmentCreationFlag) {
try {
segmentCreationComplete.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new OInterruptedException("Segment creation was interrupted");
}
}
}
OLogSegment last = logSegments.get(logSegments.size() - 1);
long lastSize = last.filledUpTo();
final OLogSequenceNumber lsn = last.logRecord(recordContent);
record.setLsn(lsn);
if (record.isUpdateMasterRecord()) {
lastCheckpoint = lsn;
if (useFirstMasterRecord) {
firstMasterRecord = lsn;
writeMasterRecord(0, firstMasterRecord);
useFirstMasterRecord = false;
} else {
secondMasterRecord = lsn;
writeMasterRecord(1, secondMasterRecord);
useFirstMasterRecord = true;
}
}
final long sizeDiff = last.filledUpTo() - lastSize;
logSize += sizeDiff;
if (last.filledUpTo() >= maxSegmentSize) {
segmentCreationFlag = true;
if (record instanceof OAtomicUnitEndRecord && activeOperations.size() == 1 || (!(record instanceof OOperationUnitRecord) && activeOperations.isEmpty())) {
last.stopFlush(true);
last = new OLogSegment(this, new File(walLocation, getSegmentName(last.getOrder() + 1)), fileTTL, maxPagesCacheSize, performanceStatisticManager, new SubScheduledExecutorService(autoFileCloser), new SubScheduledExecutorService(commitExecutor));
last.init(fileDataBuffer);
last.startFlush();
logSegments.add(last);
segmentCreationFlag = false;
segmentCreationComplete.signalAll();
}
}
if (logSize > walSizeLimit && logSegments.size() > 1) {
for (WeakReference<OFullCheckpointRequestListener> listenerWeakReference : fullCheckpointListeners) {
final OFullCheckpointRequestListener listener = listenerWeakReference.get();
if (listener != null)
listener.requestCheckpoint();
}
}
return lsn;
} finally {
syncObject.unlock();
}
}
use of com.orientechnologies.common.concur.lock.OInterruptedException in project orientdb by orientechnologies.
the class OHazelcastPlugin method getRemoteServer.
public ORemoteServerController getRemoteServer(final String rNodeName) throws IOException {
if (rNodeName == null)
throw new IllegalArgumentException("Server name is NULL");
ORemoteServerController remoteServer = remoteServers.get(rNodeName);
if (remoteServer == null) {
Member member = activeNodes.get(rNodeName);
if (member == null) {
// SYNC PROBLEMS? TRY TO RETRIEVE THE SERVER INFORMATION FROM THE CLUSTER MAP
for (Iterator<Map.Entry<String, Object>> it = getConfigurationMap().localEntrySet().iterator(); it.hasNext(); ) {
final Map.Entry<String, Object> entry = it.next();
if (entry.getKey().startsWith(CONFIG_NODE_PREFIX)) {
final ODocument nodeCfg = (ODocument) entry.getValue();
if (rNodeName.equals(nodeCfg.field("name"))) {
// FOUND: USE THIS
final String uuid = entry.getKey().substring(CONFIG_NODE_PREFIX.length());
for (Member m : hazelcastInstance.getCluster().getMembers()) {
if (m.getUuid().equals(uuid)) {
member = m;
registerNode(member, rNodeName);
break;
}
}
}
}
}
if (member == null)
throw new ODistributedException("Cannot find node '" + rNodeName + "'");
}
for (int retry = 0; retry < 100; ++retry) {
ODocument cfg = getNodeConfigurationByUuid(member.getUuid(), false);
if (cfg == null || cfg.field("listeners") == null) {
try {
Thread.sleep(100);
continue;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new ODistributedException("Cannot find node '" + rNodeName + "'");
}
}
final String url = ODistributedAbstractPlugin.getListeningBinaryAddress(cfg);
if (url == null)
throw new ODatabaseException("Cannot connect to a remote node because the url was not found");
final String userPassword = cfg.field("user_replicator");
if (userPassword != null) {
// OK
remoteServer = new ORemoteServerController(this, rNodeName, url, REPLICATOR_USER, userPassword);
final ORemoteServerController old = remoteServers.putIfAbsent(rNodeName, remoteServer);
if (old != null) {
remoteServer.close();
remoteServer = old;
}
break;
}
// RETRY TO GET USR+PASSWORD IN A WHILE
try {
Thread.sleep(100);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new OInterruptedException("Cannot connect to remote sevrer " + rNodeName);
}
}
}
if (remoteServer == null)
throw new ODistributedException("Cannot find node '" + rNodeName + "'");
return remoteServer;
}
use of com.orientechnologies.common.concur.lock.OInterruptedException in project orientdb by orientechnologies.
the class OPartitionedDatabasePool method acquire.
public ODatabaseDocumentTx acquire() {
checkForClose();
final PoolData data = poolData.get();
if (data.acquireCount > 0) {
data.acquireCount++;
assert data.acquiredDatabase != null;
final ODatabaseDocumentTx db = data.acquiredDatabase;
db.activateOnCurrentThread();
for (Map.Entry<String, Object> entry : properties.entrySet()) {
db.setProperty(entry.getKey(), entry.getValue());
}
return db;
}
try {
if (connectionsCounter != null)
connectionsCounter.acquire();
} catch (InterruptedException ie) {
throw OException.wrapException(new OInterruptedException("Acquiring of new connection was interrupted"), ie);
}
boolean acquired = false;
try {
while (true) {
final PoolPartition[] pts = partitions;
final int index = (pts.length - 1) & data.hashCode;
PoolPartition partition = pts[index];
if (partition == null) {
if (!poolBusy.get() && poolBusy.compareAndSet(false, true)) {
if (pts == partitions) {
partition = pts[index];
if (partition == null) {
partition = new PoolPartition();
initQueue(url, partition);
pts[index] = partition;
}
}
poolBusy.set(false);
}
continue;
} else {
final DatabaseDocumentTxPooled db = partition.queue.poll();
if (db == null) {
if (pts.length < maxPartitions) {
if (!poolBusy.get() && poolBusy.compareAndSet(false, true)) {
if (pts == partitions) {
final PoolPartition[] newPartitions = new PoolPartition[partitions.length << 1];
System.arraycopy(partitions, 0, newPartitions, 0, partitions.length);
partitions = newPartitions;
}
poolBusy.set(false);
}
continue;
} else {
if (partition.currentSize.get() >= maxPartitonSize)
throw new IllegalStateException("You have reached maximum pool size for given partition");
final DatabaseDocumentTxPooled newDb = new DatabaseDocumentTxPooled(url);
for (Map.Entry<String, Object> entry : properties.entrySet()) {
newDb.setProperty(entry.getKey(), entry.getValue());
}
openDatabase(newDb);
newDb.partition = partition;
data.acquireCount = 1;
data.acquiredDatabase = newDb;
partition.acquiredConnections.incrementAndGet();
partition.currentSize.incrementAndGet();
acquired = true;
return newDb;
}
} else {
for (Map.Entry<String, Object> entry : properties.entrySet()) {
db.setProperty(entry.getKey(), entry.getValue());
}
openDatabase(db);
db.partition = partition;
partition.acquiredConnections.incrementAndGet();
data.acquireCount = 1;
data.acquiredDatabase = db;
acquired = true;
return db;
}
}
}
} finally {
if (!acquired && connectionsCounter != null)
connectionsCounter.release();
}
}
use of com.orientechnologies.common.concur.lock.OInterruptedException in project orientdb by orientechnologies.
the class OByteBufferPool method acquireDirect.
/**
* Acquires direct memory buffer. If there is free (already released) direct memory buffer we reuse it, otherwise either new
* memory chunk is allocated from direct memory or slice of already preallocated memory chunk is used as new byte buffer instance.
* <p>
* If we reached maximum amount of preallocated memory chunks then small portion of direct memory equals to page size is
* allocated. Byte order of returned direct memory buffer equals to native byte order.
* <p>
* Position of returned buffer is always zero.
*
* @param clear Whether returned buffer should be filled with zeros before return.
*
* @return Direct memory buffer instance.
*/
public ByteBuffer acquireDirect(boolean clear) {
// check the pool first.
final ByteBuffer buffer = pool.poll();
if (buffer != null) {
if (clear) {
buffer.position(0);
buffer.put(zeroPage.duplicate());
}
buffer.position(0);
return trackBuffer(buffer);
}
if (maxPagesPerSingleArea > 1) {
final long currentAllocationPosition = nextAllocationPosition.getAndIncrement();
//all chucks consumes maxPagesPerSingleArea space with exception of last one
final int position = (int) (currentAllocationPosition & (maxPagesPerSingleArea - 1));
final int bufferIndex = (int) (currentAllocationPosition / maxPagesPerSingleArea);
//if we hit the end of preallocation buffer we allocate by small chunks
if (currentAllocationPosition >= preAllocationLimit) {
return trackBuffer(ByteBuffer.allocateDirect(pageSize).order(ByteOrder.nativeOrder()));
}
//allocation size should be the same for all buffers from chuck with the same index
final int allocationSize = (int) Math.min(maxPagesPerSingleArea * pageSize, (preAllocationLimit - bufferIndex * maxPagesPerSingleArea) * pageSize);
// we cannot free chunk of allocated memory so we set place holder first
// if operation successful we allocate part of direct memory.
BufferHolder bfh = preallocatedAreas.get(bufferIndex);
if (bfh == null) {
bfh = new BufferHolder();
BufferHolder replacedBufferHolder = preallocatedAreas.putIfAbsent(bufferIndex, bfh);
if (replacedBufferHolder == null) {
allocateBuffer(bfh, allocationSize);
} else {
bfh = replacedBufferHolder;
}
}
if (bfh.buffer == null) {
// so we wait till buffer instance will be shared by other thread
try {
bfh.latch.await();
} catch (InterruptedException e) {
throw OException.wrapException(new OInterruptedException("Wait of new preallocated memory area was interrupted"), e);
}
}
final int rawPosition = position * pageSize;
// duplicate buffer to have thread local version of buffer position.
final ByteBuffer db = bfh.buffer.duplicate();
db.position(rawPosition);
db.limit(rawPosition + pageSize);
ByteBuffer slice = db.slice();
slice.order(ByteOrder.nativeOrder());
if (clear) {
slice.position(0);
slice.put(zeroPage.duplicate());
}
slice.position(0);
return trackBuffer(slice);
}
// this should not happen if amount of pages is needed for storage is calculated correctly
overflowBufferCount.incrementAndGet();
return trackBuffer(ByteBuffer.allocateDirect(pageSize).order(ByteOrder.nativeOrder()));
}
Aggregations