use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.
the class TableSaveFile method getNextChunk.
// Will get the next chunk of the table that is just over the chunk size
public synchronized BBContainer getNextChunk() throws IOException {
if (m_chunkReaderException != null) {
throw m_chunkReaderException;
}
if (!m_hasMoreChunks) {
final Container c = m_availableChunks.poll();
return c;
}
if (m_chunkReader == null) {
m_chunkReader = new ChunkReader();
m_chunkReaderThread = new Thread(m_chunkReader, "ChunkReader");
m_chunkReaderThread.start();
}
Container c = null;
while (c == null && (m_hasMoreChunks || !m_availableChunks.isEmpty())) {
c = m_availableChunks.poll();
if (c == null) {
try {
wait();
} catch (InterruptedException e) {
throw new IOException(e);
}
}
}
if (c != null) {
m_chunkReads.release();
} else {
if (m_chunkReaderException != null) {
throw m_chunkReaderException;
}
}
return c;
}
use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.
the class PersistentBinaryDeque method push.
@Override
public synchronized void push(BBContainer[] objects) throws IOException {
assertions();
if (m_closed) {
throw new IOException("Cannot push(): PBD has been Closed");
}
ArrayDeque<ArrayDeque<BBContainer>> segments = new ArrayDeque<ArrayDeque<BBContainer>>();
ArrayDeque<BBContainer> currentSegment = new ArrayDeque<BBContainer>();
//Take the objects that were provided and separate them into deques of objects
//that will fit in a single write segment
int available = PBDSegment.CHUNK_SIZE - 4;
for (BBContainer object : objects) {
int needed = PBDSegment.OBJECT_HEADER_BYTES + object.b().remaining();
if (available - needed < 0) {
if (needed > PBDSegment.CHUNK_SIZE - 4) {
throw new IOException("Maximum object size is " + (PBDSegment.CHUNK_SIZE - 4));
}
segments.offer(currentSegment);
currentSegment = new ArrayDeque<BBContainer>();
available = PBDSegment.CHUNK_SIZE - 4;
}
available -= needed;
currentSegment.add(object);
}
segments.add(currentSegment);
assert (segments.size() > 0);
//Calculate the index for the first segment to push at the front
//This will be the index before the first segment available for read or
//before the write segment if there are no finished segments
Long nextIndex = 0L;
if (m_segments.size() > 0) {
nextIndex = peekFirstSegment().segmentId() - 1;
}
while (segments.peek() != null) {
ArrayDeque<BBContainer> currentSegmentContents = segments.poll();
PBDSegment writeSegment = newSegment(nextIndex, new VoltFile(m_path, m_nonce + "." + nextIndex + ".pbd"));
writeSegment.openForWrite(true);
nextIndex--;
if (m_usageSpecificLog.isDebugEnabled()) {
m_usageSpecificLog.debug("Segment " + writeSegment.file() + " has been created because of a push");
}
while (currentSegmentContents.peek() != null) {
writeSegment.offer(currentSegmentContents.pollFirst(), false);
m_numObjects++;
}
// Don't close the last one, it'll be used for writes
if (!m_segments.isEmpty()) {
writeSegment.close();
}
m_segments.put(writeSegment.segmentId(), writeSegment);
}
// Because we inserted at the beginning, cursors need to be rewound to the beginning
rewindCursors();
assertions();
}
Aggregations