use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.
the class NIOWriteStreamBase method serializeQueuedWrites.
/**
* Serialize all queued writes into the queue of pending buffers, which are allocated from
* thread local memory pool.
* @return number of queued writes processed
* @throws IOException
*/
int serializeQueuedWrites(final NetworkDBBPool pool) throws IOException {
int processedWrites = 0;
final Deque<DeferredSerialization> oldlist = getQueuedWrites();
if (oldlist.isEmpty())
return 0;
DeferredSerialization ds = null;
int bytesQueued = 0;
while ((ds = oldlist.poll()) != null) {
processedWrites++;
final int serializedSize = ds.getSerializedSize();
if (serializedSize == DeferredSerialization.EMPTY_MESSAGE_LENGTH)
continue;
BBContainer outCont = m_queuedBuffers.peekLast();
ByteBuffer outbuf = null;
if (outCont == null || !outCont.b().hasRemaining()) {
outCont = pool.acquire();
outCont.b().clear();
m_queuedBuffers.offer(outCont);
}
outbuf = outCont.b();
if (outbuf.remaining() >= serializedSize) {
// Fast path, serialize to direct buffer creating no garbage
final int oldLimit = outbuf.limit();
outbuf.limit(outbuf.position() + serializedSize);
final ByteBuffer slice = outbuf.slice();
ds.serialize(slice);
checkSloppySerialization(slice, ds);
slice.position(0);
bytesQueued += slice.remaining();
outbuf.position(outbuf.limit());
outbuf.limit(oldLimit);
} else {
// Slow path serialize to heap, and then put in buffers
ByteBuffer buf = ByteBuffer.allocate(serializedSize);
ds.serialize(buf);
checkSloppySerialization(buf, ds);
buf.position(0);
bytesQueued += buf.remaining();
// Copy data allocated in heap buffer to direct buffer
while (buf.hasRemaining()) {
if (!outbuf.hasRemaining()) {
outCont = pool.acquire();
outbuf = outCont.b();
outbuf.clear();
m_queuedBuffers.offer(outCont);
}
if (outbuf.remaining() >= buf.remaining()) {
outbuf.put(buf);
} else {
final int oldLimit = buf.limit();
buf.limit(buf.position() + outbuf.remaining());
outbuf.put(buf);
buf.limit(oldLimit);
}
}
}
}
updateQueued(bytesQueued, true);
return processedWrites;
}
use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.
the class NetworkDBBPool method acquire.
BBContainer acquire() {
final BBContainer cont = m_buffers.poll();
if (cont == null) {
final BBContainer originContainer = DBBPool.allocateDirect(m_allocationSize);
return new BBContainer(originContainer.b()) {
@Override
public void discard() {
checkDoubleFree();
//If we had to allocate over the desired limit, start discarding
if (m_buffers.size() > m_numBuffers) {
originContainer.discard();
return;
}
m_buffers.push(originContainer);
}
};
}
return new BBContainer(cont.b()) {
@Override
public void discard() {
checkDoubleFree();
if (m_buffers.size() > m_numBuffers) {
cont.discard();
return;
}
m_buffers.push(cont);
}
};
}
use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.
the class NIOReadStream method getSlice.
Slice getSlice(final int size) {
if (size < 0) {
throw new IllegalArgumentException("negative slice size: " + size);
}
if (m_totalAvailable < size) {
throw new IllegalStateException("Requested " + size + " bytes; only have " + m_totalAvailable + " bytes; call tryRead() first");
}
ImmutableList.Builder<ContainerSlice> slices = ImmutableList.builder();
int bytesSliced = 0;
while (bytesSliced < size) {
BBContainer firstC = m_readBBContainers.peekFirst();
if (firstC == null) {
// Steal the write buffer
m_poolBBContainer.b().flip();
m_readBBContainers.add(m_poolBBContainer);
firstC = m_poolBBContainer;
m_poolBBContainer = null;
}
ByteBuffer first = firstC.b();
assert first.remaining() > 0 : "no remaining bytes to read";
int bytesRemaining = first.remaining();
int bytesToCopy = size - bytesSliced;
if (bytesToCopy > bytesRemaining) {
bytesToCopy = bytesRemaining;
}
slices.add(new ContainerSlice(firstC, bytesToCopy));
first.position(first.position() + bytesToCopy);
bytesSliced += bytesToCopy;
m_totalAvailable -= bytesToCopy;
if (first.remaining() == 0) {
m_readBBContainers.poll();
}
}
return new Slice(slices.build());
}
use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.
the class NIOReadStream method getBytes.
/**
* Move all bytes in current read buffers to output array, free read buffers
* back to thread local memory pool.
* @param output
*/
void getBytes(byte[] output) {
if (m_totalAvailable < output.length) {
throw new IllegalStateException("Requested " + output.length + " bytes; only have " + m_totalAvailable + " bytes; call tryRead() first");
}
int bytesCopied = 0;
while (bytesCopied < output.length) {
BBContainer firstC = m_readBBContainers.peekFirst();
if (firstC == null) {
// Steal the write buffer
m_poolBBContainer.b().flip();
m_readBBContainers.add(m_poolBBContainer);
firstC = m_poolBBContainer;
m_poolBBContainer = null;
}
ByteBuffer first = firstC.b();
assert first.remaining() > 0;
// Copy bytes from first into output
int bytesRemaining = first.remaining();
int bytesToCopy = output.length - bytesCopied;
if (bytesToCopy > bytesRemaining)
bytesToCopy = bytesRemaining;
first.get(output, bytesCopied, bytesToCopy);
bytesCopied += bytesToCopy;
m_totalAvailable -= bytesToCopy;
if (first.remaining() == 0) {
// read an entire block: move it to the empty buffers list
m_readBBContainers.poll();
firstC.discard();
}
}
}
use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.
the class DeprecatedDefaultSnapshotDataTarget method write.
private ListenableFuture<?> write(final Callable<BBContainer> tupleDataC, final boolean prependLength) {
/*
* Unwrap the data to be written. For the traditional
* snapshot data target this should be a noop.
*/
BBContainer tupleDataTemp;
try {
tupleDataTemp = tupleDataC.call();
} catch (Throwable t) {
return Futures.immediateFailedFuture(t);
}
final BBContainer tupleData = tupleDataTemp;
if (m_writeFailed) {
tupleData.discard();
return null;
}
if (prependLength) {
tupleData.b().putInt(tupleData.b().remaining() - 4);
tupleData.b().position(0);
}
m_outstandingWriteTasks.incrementAndGet();
ListenableFuture<?> writeTask = m_es.submit(new Callable<Object>() {
@Override
public Object call() throws Exception {
try {
if (m_acceptOneWrite) {
m_acceptOneWrite = false;
} else {
if (m_simulateFullDiskWritingChunk) {
throw new IOException("Disk full");
}
}
m_bytesAllowedBeforeSync.acquire(tupleData.b().remaining());
int totalWritten = 0;
while (tupleData.b().hasRemaining()) {
totalWritten += m_channel.write(tupleData.b());
}
m_bytesWritten += totalWritten;
m_bytesWrittenSinceLastSync.addAndGet(totalWritten);
} catch (IOException e) {
m_writeException = e;
SNAP_LOG.error("Error while attempting to write snapshot data to file " + m_file, e);
m_writeFailed = true;
throw e;
} finally {
tupleData.discard();
m_outstandingWriteTasksLock.lock();
try {
if (m_outstandingWriteTasks.decrementAndGet() == 0) {
m_noMoreOutstandingWriteTasksCondition.signalAll();
}
} finally {
m_outstandingWriteTasksLock.unlock();
}
}
return null;
}
});
return writeTask;
}
Aggregations