use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.
the class SnapshotUtil method retrieveSnapshotFilesInternal.
private static void retrieveSnapshotFilesInternal(File directory, NamedSnapshots namedSnapshots, FileFilter filter, boolean validate, SnapshotPathType stype, VoltLogger logger, int recursion) {
if (recursion == 32) {
return;
}
if (!directory.exists()) {
System.err.println("Error: Directory " + directory.getPath() + " doesn't exist");
return;
}
if (!directory.canRead()) {
System.err.println("Error: Directory " + directory.getPath() + " is not readable");
return;
}
if (!directory.canExecute()) {
System.err.println("Error: Directory " + directory.getPath() + " is not executable");
return;
}
for (File f : directory.listFiles(filter)) {
if (f.isDirectory()) {
if (!f.canRead() || !f.canExecute()) {
System.err.println("Warning: Skipping directory " + f.getPath() + " due to lack of read permission");
} else {
retrieveSnapshotFilesInternal(f, namedSnapshots, filter, validate, stype, logger, recursion++);
}
continue;
}
if (!f.canRead()) {
System.err.println("Warning: " + f.getPath() + " is not readable");
continue;
}
FileInputStream fis = null;
try {
fis = new FileInputStream(f);
} catch (FileNotFoundException e1) {
System.err.println(e1.getMessage());
continue;
}
try {
if (f.getName().endsWith(".digest")) {
JSONObject digest = CRCCheck(f, logger);
if (digest == null)
continue;
Long snapshotTxnId = digest.getLong("txnId");
String nonce = parseNonceFromSnapshotFilename(f.getName());
Snapshot named_s = namedSnapshots.get(nonce);
named_s.setTxnId(snapshotTxnId);
InstanceId iid = new InstanceId(0, 0);
if (digest.has("instanceId")) {
iid = new InstanceId(digest.getJSONObject("instanceId"));
}
named_s.setInstanceId(iid);
TreeSet<String> tableSet = new TreeSet<String>();
JSONArray tables = digest.getJSONArray("tables");
for (int ii = 0; ii < tables.length(); ii++) {
tableSet.add(tables.getString(ii));
}
named_s.m_digestTables.add(tableSet);
named_s.m_digests.add(f);
} else if (f.getName().endsWith(".jar")) {
String nonce = parseNonceFromSnapshotFilename(f.getName());
Snapshot named_s = namedSnapshots.get(nonce);
named_s.m_catalogFile = f;
} else if (f.getName().endsWith(HASH_EXTENSION)) {
String nonce = parseNonceFromSnapshotFilename(f.getName());
Snapshot named_s = namedSnapshots.get(nonce);
if (validate) {
try {
// Retrieve hashinator config data for validation only.
// Throws IOException when the CRC check fails.
HashinatorSnapshotData hashData = new HashinatorSnapshotData();
hashData.restoreFromFile(f);
named_s.m_hashConfig = f;
} catch (IOException e) {
logger.warn(String.format("Skipping bad hashinator snapshot file '%s'", f.getPath()));
// Skip bad hashinator files.
continue;
}
}
} else {
HashSet<Integer> partitionIds = new HashSet<Integer>();
TableSaveFile saveFile = new TableSaveFile(fis, 1, null, true);
try {
for (Integer partitionId : saveFile.getPartitionIds()) {
partitionIds.add(partitionId);
}
if (validate && saveFile.getCompleted()) {
while (saveFile.hasMoreChunks()) {
BBContainer cont = saveFile.getNextChunk();
if (cont != null) {
cont.discard();
}
}
}
partitionIds.removeAll(saveFile.getCorruptedPartitionIds());
String nonce = parseNonceFromSnapshotFilename(f.getName());
Snapshot named_s = namedSnapshots.get(nonce);
named_s.setTxnId(saveFile.getTxnId());
TableFiles namedTableFiles = named_s.m_tableFiles.get(saveFile.getTableName());
if (namedTableFiles == null) {
namedTableFiles = new TableFiles(saveFile.isReplicated());
named_s.m_tableFiles.put(saveFile.getTableName(), namedTableFiles);
}
namedTableFiles.m_files.add(f);
namedTableFiles.m_completed.add(saveFile.getCompleted());
namedTableFiles.m_validPartitionIds.add(partitionIds);
namedTableFiles.m_corruptParititionIds.add(saveFile.getCorruptedPartitionIds());
namedTableFiles.m_totalPartitionCounts.add(saveFile.getTotalPartitions());
} finally {
saveFile.close();
}
}
} catch (IOException e) {
System.err.println(e.getMessage());
System.err.println("Error: Unable to process " + f.getPath());
} catch (JSONException e) {
System.err.println(e.getMessage());
System.err.println("Error: Unable to process " + f.getPath());
} finally {
try {
if (fis != null) {
fis.close();
}
} catch (IOException e) {
}
}
}
}
use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.
the class VoltFile method moveSubRootContents.
/*
* Merge one directory into another via copying. Useful for simulating
* node removal or files being moved from node to node or duplicated.
*/
public static void moveSubRootContents(File fromSubRoot, File toSubRoot) throws IOException {
assert (fromSubRoot.exists() && fromSubRoot.isDirectory());
assert (toSubRoot.exists() && toSubRoot.isDirectory());
for (File file : fromSubRoot.listFiles()) {
File fInOtherSubroot = new File(toSubRoot, file.getName());
if (file.isDirectory()) {
if (!fInOtherSubroot.exists()) {
if (!fInOtherSubroot.mkdir()) {
throw new IOException("Can't create directory " + fInOtherSubroot);
}
}
moveSubRootContents(file, fInOtherSubroot);
} else {
if (fInOtherSubroot.exists()) {
throw new IOException(fInOtherSubroot + " already exists");
}
if (!fInOtherSubroot.createNewFile()) {
throw new IOException();
}
FileInputStream fis = new FileInputStream(file);
FileOutputStream fos = new FileOutputStream(fInOtherSubroot);
FileChannel inputChannel = fis.getChannel();
FileChannel outputChannel = fos.getChannel();
BBContainer bufC = DBBPool.allocateDirect(8192);
ByteBuffer buf = bufC.b();
try {
while (inputChannel.read(buf) != -1) {
buf.flip();
outputChannel.write(buf);
buf.clear();
}
} finally {
// These calls to close() also close the channels.
fis.close();
fos.close();
bufC.discard();
}
}
}
}
use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.
the class StreamSnapshotDataTarget method write.
@Override
public ListenableFuture<?> write(Callable<BBContainer> tupleData, int tableId) {
rejoinLog.trace("Starting write");
try {
BBContainer chunkC;
ByteBuffer chunk;
try {
chunkC = tupleData.call();
chunk = chunkC.b();
} catch (Exception e) {
return Futures.immediateFailedFuture(e);
}
// or on null imput
if (m_writeFailed.get() != null || (chunkC == null)) {
if (chunkC != null) {
chunkC.discard();
}
if (m_failureReported) {
return null;
} else {
m_failureReported = true;
return Futures.immediateFailedFuture(m_writeFailed.get());
}
}
// but here, throw an exception because this isn't supposed to happen
if (m_closed.get()) {
chunkC.discard();
IOException e = new IOException("Trying to write snapshot data " + "after the stream is closed");
m_writeFailed.set(e);
return Futures.immediateFailedFuture(e);
}
// Have we seen this table before, if not, send schema
if (m_schemas.containsKey(tableId)) {
// remove the schema once sent
byte[] schema = m_schemas.remove(tableId);
rejoinLog.debug("Sending schema for table " + tableId);
rejoinLog.trace("Writing schema as part of this write");
send(StreamSnapshotMessageType.SCHEMA, tableId, schema);
}
chunk.put((byte) StreamSnapshotMessageType.DATA.ordinal());
// put chunk index
chunk.putInt(m_blockIndex);
// put table ID
chunk.putInt(tableId);
chunk.position(0);
return send(m_blockIndex++, chunkC);
} finally {
rejoinLog.trace("Finished call to write");
}
}
use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.
the class StreamSnapshotSink method processMessage.
/**
* Process a message pulled off from the network thread, and discard the
* container once it's processed.
*
* @param msg A pair of <sourceHSId, blockContainer>
* @return The restore work, or null if there's no data block to return
* to the site.
*/
private RestoreWork processMessage(Pair<Long, Pair<Long, BBContainer>> msg, CachedByteBufferAllocator resultBufferAllocator) {
if (msg == null) {
return null;
}
RestoreWork restoreWork = null;
long hsId = msg.getFirst();
long targetId = msg.getSecond().getFirst();
BBContainer container = msg.getSecond().getSecond();
try {
ByteBuffer block = container.b();
byte typeByte = block.get(StreamSnapshotDataTarget.typeOffset);
final int blockIndex = block.getInt(StreamSnapshotDataTarget.blockIndexOffset);
StreamSnapshotMessageType type = StreamSnapshotMessageType.values()[typeByte];
if (type == StreamSnapshotMessageType.FAILURE) {
VoltDB.crashLocalVoltDB("Rejoin source sent failure message.", false, null);
// for test code only
if (m_expectedEOFs.decrementAndGet() == 0) {
m_EOF = true;
}
} else if (type == StreamSnapshotMessageType.END) {
if (rejoinLog.isTraceEnabled()) {
rejoinLog.trace("Got END message " + blockIndex);
}
// End of stream, no need to ack this buffer
if (m_expectedEOFs.decrementAndGet() == 0) {
m_EOF = true;
}
} else if (type == StreamSnapshotMessageType.SCHEMA) {
rejoinLog.trace("Got SCHEMA message");
block.position(StreamSnapshotDataTarget.contentOffset);
byte[] schemaBytes = new byte[block.remaining()];
block.get(schemaBytes);
m_schemas.put(block.getInt(StreamSnapshotDataTarget.tableIdOffset), schemaBytes);
} else if (type == StreamSnapshotMessageType.HASHINATOR) {
block.position(StreamSnapshotDataTarget.contentOffset);
long version = block.getLong();
byte[] hashinatorConfig = new byte[block.remaining()];
block.get(hashinatorConfig);
restoreWork = new HashinatorRestoreWork(version, hashinatorConfig);
} else {
// It's normal snapshot data afterwards
final int tableId = block.getInt(StreamSnapshotDataTarget.tableIdOffset);
if (!m_schemas.containsKey(tableId)) {
VoltDB.crashLocalVoltDB("No schema for table with ID " + tableId, false, null);
}
// Get the byte buffer ready to be consumed
block.position(StreamSnapshotDataTarget.contentOffset);
ByteBuffer nextChunk = getNextChunk(m_schemas.get(tableId), block, resultBufferAllocator);
m_bytesReceived += nextChunk.remaining();
restoreWork = new TableRestoreWork(tableId, nextChunk);
}
// Queue ack to this block
m_ack.ack(hsId, m_EOF, targetId, blockIndex);
return restoreWork;
} finally {
container.discard();
}
}
use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.
the class StreamSnapshotDataReceiver method run.
@Override
public void run() {
BlockingQueue<BBContainer> bufferQueue = m_bufferPool.getQueue(SnapshotSiteProcessor.m_snapshotBufferLength);
BlockingQueue<BBContainer> compressionBufferQueue = m_bufferPool.getQueue(SnapshotSiteProcessor.m_snapshotBufferCompressedLen);
try {
while (true) {
BBContainer container = null;
BBContainer compressionBufferC = null;
ByteBuffer compressionBuffer = null;
boolean success = false;
try {
VoltMessage msg = m_mb.recvBlocking();
if (msg == null) {
// If interrupted, break
break;
}
assert (msg instanceof RejoinDataMessage);
RejoinDataMessage dataMsg = (RejoinDataMessage) msg;
byte[] data = dataMsg.getData();
// Only grab the buffer from the pool after receiving a message from the
// mailbox. If the buffer is grabbed before receiving the message,
// this thread could hold on to a buffer it may not need and other receivers
// will be blocked if the pool has no more buffers left.
container = bufferQueue.take();
ByteBuffer messageBuffer = container.b();
messageBuffer.clear();
compressionBufferC = compressionBufferQueue.take();
compressionBuffer = compressionBufferC.b();
compressionBuffer.clear();
compressionBuffer.limit(data.length);
compressionBuffer.put(data);
compressionBuffer.flip();
int uncompressedSize = CompressionService.decompressBuffer(compressionBuffer, messageBuffer);
messageBuffer.limit(uncompressedSize);
m_queue.offer(Pair.of(dataMsg.m_sourceHSId, Pair.of(dataMsg.getTargetId(), container)));
success = true;
} finally {
if (!success && container != null) {
container.discard();
}
if (compressionBuffer != null) {
compressionBufferC.discard();
}
}
}
} catch (IOException e) {
/*
* Wait until the last message is delivered and then wait some more
* so it can be processed so that closed can be set and the
* exception suppressed.
*/
try {
while (!m_queue.isEmpty()) {
Thread.sleep(50);
}
Thread.sleep(300);
} catch (InterruptedException e2) {
}
if (m_closed) {
return;
}
rejoinLog.error("Error reading a message from a recovery stream.", e);
} catch (InterruptedException e) {
return;
}
}
Aggregations