use of java.util.concurrent.locks.Condition in project kafka by apache.
the class BufferPoolTest method testCleanupMemoryAvailabilityWaiterOnInterruption.
/**
* Test if the waiter that is waiting on availability of more memory is cleaned up when an interruption occurs
*/
@Test
public void testCleanupMemoryAvailabilityWaiterOnInterruption() throws Exception {
BufferPool pool = new BufferPool(2, 1, metrics, time, metricGroup);
long blockTime = 5000;
pool.allocate(1, maxBlockTimeMs);
Thread t1 = new Thread(new BufferPoolAllocator(pool, blockTime));
Thread t2 = new Thread(new BufferPoolAllocator(pool, blockTime));
// start thread t1 which will try to allocate more memory on to the Buffer pool
t1.start();
// sleep for 500ms. Condition variable c1 associated with pool.allocate() by thread t1 will be inserted in the waiters queue.
Thread.sleep(500);
Deque<Condition> waiters = pool.waiters();
// get the condition object associated with pool.allocate() by thread t1
Condition c1 = waiters.getFirst();
// start thread t2 which will try to allocate more memory on to the Buffer pool
t2.start();
// sleep for 500ms. Condition variable c2 associated with pool.allocate() by thread t2 will be inserted in the waiters queue. The waiters queue will have 2 entries c1 and c2.
Thread.sleep(500);
t1.interrupt();
// sleep for 500ms.
Thread.sleep(500);
// get the condition object associated with allocate() by thread t2
Condition c2 = waiters.getLast();
t2.interrupt();
assertNotEquals(c1, c2);
t1.join();
t2.join();
// both the allocate() called by threads t1 and t2 should have been interrupted and the waiters queue should be empty
assertEquals(pool.queued(), 0);
}
use of java.util.concurrent.locks.Condition in project kafka by apache.
the class BufferPool method allocate.
/**
* Allocate a buffer of the given size. This method blocks if there is not enough memory and the buffer pool
* is configured with blocking mode.
*
* @param size The buffer size to allocate in bytes
* @param maxTimeToBlockMs The maximum time in milliseconds to block for buffer memory to be available
* @return The buffer
* @throws InterruptedException If the thread is interrupted while blocked
* @throws IllegalArgumentException if size is larger than the total memory controlled by the pool (and hence we would block
* forever)
*/
public ByteBuffer allocate(int size, long maxTimeToBlockMs) throws InterruptedException {
if (size > this.totalMemory)
throw new IllegalArgumentException("Attempt to allocate " + size + " bytes, but there is a hard limit of " + this.totalMemory + " on memory allocations.");
this.lock.lock();
try {
// check if we have a free buffer of the right size pooled
if (size == poolableSize && !this.free.isEmpty())
return this.free.pollFirst();
// now check if the request is immediately satisfiable with the
// memory on hand or if we need to block
int freeListSize = freeSize() * this.poolableSize;
if (this.availableMemory + freeListSize >= size) {
// we have enough unallocated or pooled memory to immediately
// satisfy the request
freeUp(size);
this.availableMemory -= size;
lock.unlock();
return allocateByteBuffer(size);
} else {
// we are out of memory and will have to block
int accumulated = 0;
ByteBuffer buffer = null;
Condition moreMemory = this.lock.newCondition();
long remainingTimeToBlockNs = TimeUnit.MILLISECONDS.toNanos(maxTimeToBlockMs);
this.waiters.addLast(moreMemory);
// enough memory to allocate one
while (accumulated < size) {
long startWaitNs = time.nanoseconds();
long timeNs;
boolean waitingTimeElapsed;
try {
waitingTimeElapsed = !moreMemory.await(remainingTimeToBlockNs, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
this.waiters.remove(moreMemory);
throw e;
} finally {
long endWaitNs = time.nanoseconds();
timeNs = Math.max(0L, endWaitNs - startWaitNs);
this.waitTime.record(timeNs, time.milliseconds());
}
if (waitingTimeElapsed) {
this.waiters.remove(moreMemory);
throw new TimeoutException("Failed to allocate memory within the configured max blocking time " + maxTimeToBlockMs + " ms.");
}
remainingTimeToBlockNs -= timeNs;
// otherwise allocate memory
if (accumulated == 0 && size == this.poolableSize && !this.free.isEmpty()) {
// just grab a buffer from the free list
buffer = this.free.pollFirst();
accumulated = size;
} else {
// we'll need to allocate memory, but we may only get
// part of what we need on this iteration
freeUp(size - accumulated);
int got = (int) Math.min(size - accumulated, this.availableMemory);
this.availableMemory -= got;
accumulated += got;
}
}
// remove the condition for this thread to let the next thread
// in line start getting memory
Condition removed = this.waiters.removeFirst();
if (removed != moreMemory)
throw new IllegalStateException("Wrong condition: this shouldn't happen.");
// over for them
if (this.availableMemory > 0 || !this.free.isEmpty()) {
if (!this.waiters.isEmpty())
this.waiters.peekFirst().signal();
}
// unlock and return the buffer
lock.unlock();
if (buffer == null)
return allocateByteBuffer(size);
else
return buffer;
}
} finally {
if (lock.isHeldByCurrentThread())
lock.unlock();
}
}
use of java.util.concurrent.locks.Condition in project kafka by apache.
the class BufferPool method deallocate.
/**
* Return buffers to the pool. If they are of the poolable size add them to the free list, otherwise just mark the
* memory as free.
*
* @param buffer The buffer to return
* @param size The size of the buffer to mark as deallocated, note that this may be smaller than buffer.capacity
* since the buffer may re-allocate itself during in-place compression
*/
public void deallocate(ByteBuffer buffer, int size) {
lock.lock();
try {
if (size == this.poolableSize && size == buffer.capacity()) {
buffer.clear();
this.free.add(buffer);
} else {
this.availableMemory += size;
}
Condition moreMem = this.waiters.peekFirst();
if (moreMem != null)
moreMem.signal();
} finally {
lock.unlock();
}
}
use of java.util.concurrent.locks.Condition in project hive by apache.
the class TestLlapTaskCommunicator method testFinishableStateUpdateFailure.
@Test(timeout = 5000)
public void testFinishableStateUpdateFailure() throws Exception {
LlapTaskCommunicatorWrapperForTest wrapper = null;
Lock lock = new ReentrantLock();
Condition condition = lock.newCondition();
final AtomicBoolean opDone = new AtomicBoolean(false);
LlapProtocolClientProxy proxy = mock(LlapProtocolClientProxy.class, new FinishableStatusUpdateTestAnswer(lock, condition, opDone));
try {
wrapper = new LlapTaskCommunicatorWrapperForTest(proxy);
// Register tasks on 2 nodes, with a dependency on vertex1 completing.
ContainerId cId11 = wrapper.registerContainer(1, 0);
TaskSpec ts11 = wrapper.registerRunningTaskAttemptWithSourceVertex(cId11, 1);
ContainerId cId12 = wrapper.registerContainer(2, 0);
TaskSpec ts12 = wrapper.registerRunningTaskAttemptWithSourceVertex(cId12, 2);
ContainerId cId21 = wrapper.registerContainer(3, 1);
TaskSpec ts21 = wrapper.registerRunningTaskAttemptWithSourceVertex(cId21, 3);
// Send a state update for vertex1 completion. This triggers a status update to be sent out.
VertexStateUpdate vertexStateUpdate = new VertexStateUpdate(LlapTaskCommunicatorWrapperForTest.VERTEX_NAME1, VertexState.SUCCEEDED);
wrapper.getTaskCommunicator().onVertexStateUpdated(vertexStateUpdate);
// Wait for all invocations to complete.
lock.lock();
try {
while (!opDone.get()) {
condition.await();
}
} finally {
lock.unlock();
}
// Verify that a task kill went out for all nodes running on the specified host.
verify(wrapper.getTaskCommunicatorContext(), times(2)).taskKilled(any(TezTaskAttemptID.class), any(TaskAttemptEndReason.class), any(String.class));
verify(wrapper.getTaskCommunicatorContext()).taskKilled(eq(ts11.getTaskAttemptID()), eq(TaskAttemptEndReason.NODE_FAILED), any(String.class));
verify(wrapper.getTaskCommunicatorContext()).taskKilled(eq(ts12.getTaskAttemptID()), eq(TaskAttemptEndReason.NODE_FAILED), any(String.class));
wrapper.getTaskCommunicator().sendStateUpdate(LlapNodeId.getInstance(LlapTaskCommunicatorWrapperForTest.HOSTS[1], LlapTaskCommunicatorWrapperForTest.RPC_PORT), LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto.getDefaultInstance());
// Verify no more invocations in case of success.
verify(wrapper.getTaskCommunicatorContext(), times(2)).taskKilled(any(TezTaskAttemptID.class), any(TaskAttemptEndReason.class), any(String.class));
} finally {
if (wrapper != null) {
wrapper.shutdown();
}
}
}
use of java.util.concurrent.locks.Condition in project caffeine by ben-manes.
the class NonReentrantLockTest method lock_error.
@Test(dataProvider = "lock")
public void lock_error(NonReentrantLock lock) {
Condition condition = Mockito.mock(Condition.class);
try {
lock.hasWaiters(condition);
Assert.fail();
} catch (IllegalArgumentException e) {
}
try {
lock.getWaitQueueLength(condition);
Assert.fail();
} catch (IllegalArgumentException e) {
}
try {
lock.getWaitingThreads(condition);
Assert.fail();
} catch (IllegalArgumentException e) {
}
try {
lock.sync.tryRelease(1);
Assert.fail();
} catch (IllegalMonitorStateException e) {
}
}
Aggregations