use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.
the class StreamBlockQueue method pollPersistentDeque.
/**
* Wrapper around the common operation of pulling an element out of the persistent deque.
* The behavior is complicated (and might change) since the persistent deque can throw an IOException.
* The poll always removes the element from the persistent queue
* (although not necessarily removing the file backing, that happens at deleteContents) and will add
* a reference to the block to the in memory deque unless actuallyPoll is true.
* @param actuallyPoll
* @return
*/
private StreamBlock pollPersistentDeque(boolean actuallyPoll) {
BBContainer cont = null;
try {
cont = m_reader.poll(PersistentBinaryDeque.UNSAFE_CONTAINER_FACTORY);
} catch (IOException e) {
exportLog.error(e);
}
if (cont == null) {
return null;
} else {
//If the container is not null, unpack it.
final BBContainer fcont = cont;
long uso = cont.b().getLong(0);
//Pass the stream block a subset of the bytes, provide
//a container that discards the original returned by the persistent deque
StreamBlock block = new StreamBlock(fcont, uso, true);
//Optionally store a reference to the block in the in memory deque
if (!actuallyPoll) {
m_memoryDeque.offer(block);
}
return block;
}
}
use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.
the class TestExecutionEngine method testStreamTables.
public void testStreamTables() throws Exception {
sourceEngine.loadCatalog(0, m_catalog.serialize());
// Each EE needs its own thread for correct initialization.
final AtomicReference<ExecutionEngine> destinationEngine = new AtomicReference<ExecutionEngine>();
final byte[] configBytes = LegacyHashinator.getConfigureBytes(1);
Thread destEEThread = new Thread() {
@Override
public void run() {
destinationEngine.set(new ExecutionEngineJNI(CLUSTER_ID, NODE_ID, 0, 0, "", 0, 64 * 1024, 100, new HashinatorConfig(HashinatorType.LEGACY, configBytes, 0, 0), false));
}
};
destEEThread.start();
destEEThread.join();
destinationEngine.get().loadCatalog(0, m_catalog.serialize());
int WAREHOUSE_TABLEID = warehouseTableId(m_catalog);
int STOCK_TABLEID = stockTableId(m_catalog);
loadTestTables(sourceEngine, m_catalog);
sourceEngine.activateTableStream(WAREHOUSE_TABLEID, TableStreamType.RECOVERY, Long.MAX_VALUE, new SnapshotPredicates(-1).toBytes());
sourceEngine.activateTableStream(STOCK_TABLEID, TableStreamType.RECOVERY, Long.MAX_VALUE, new SnapshotPredicates(-1).toBytes());
final BBContainer origin = DBBPool.allocateDirect(1024 * 1024 * 2);
origin.b().clear();
final BBContainer container = new BBContainer(origin.b()) {
@Override
public void discard() {
checkDoubleFree();
origin.discard();
}
};
try {
List<BBContainer> output = new ArrayList<BBContainer>();
output.add(container);
int serialized = sourceEngine.tableStreamSerializeMore(WAREHOUSE_TABLEID, TableStreamType.RECOVERY, output).getSecond()[0];
assertTrue(serialized > 0);
container.b().limit(serialized);
destinationEngine.get().processRecoveryMessage(container.b(), container.address());
serialized = sourceEngine.tableStreamSerializeMore(WAREHOUSE_TABLEID, TableStreamType.RECOVERY, output).getSecond()[0];
assertEquals(5, serialized);
assertEquals(RecoveryMessageType.Complete.ordinal(), container.b().get());
assertEquals(sourceEngine.tableHashCode(WAREHOUSE_TABLEID), destinationEngine.get().tableHashCode(WAREHOUSE_TABLEID));
container.b().clear();
serialized = sourceEngine.tableStreamSerializeMore(STOCK_TABLEID, TableStreamType.RECOVERY, output).getSecond()[0];
assertTrue(serialized > 0);
container.b().limit(serialized);
destinationEngine.get().processRecoveryMessage(container.b(), container.address());
serialized = sourceEngine.tableStreamSerializeMore(STOCK_TABLEID, TableStreamType.RECOVERY, output).getSecond()[0];
assertEquals(5, serialized);
assertEquals(RecoveryMessageType.Complete.ordinal(), container.b().get());
assertEquals(STOCK_TABLEID, container.b().getInt());
assertEquals(sourceEngine.tableHashCode(STOCK_TABLEID), destinationEngine.get().tableHashCode(STOCK_TABLEID));
} finally {
container.discard();
}
}
use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.
the class TestExecutionEngine method testStreamIndex.
public void testStreamIndex() throws Exception {
sourceEngine.loadCatalog(0, m_catalog.serialize());
// Each EE needs its own thread for correct initialization.
final AtomicReference<ExecutionEngine> destinationEngine = new AtomicReference<ExecutionEngine>();
final byte[] configBytes = LegacyHashinator.getConfigureBytes(1);
Thread destEEThread = new Thread() {
@Override
public void run() {
destinationEngine.set(new ExecutionEngineJNI(CLUSTER_ID, NODE_ID, 0, 0, "", 0, 64 * 1024, 100, new HashinatorConfig(HashinatorType.LEGACY, configBytes, 0, 0), false));
}
};
destEEThread.start();
destEEThread.join();
destinationEngine.get().loadCatalog(0, m_catalog.serialize());
int STOCK_TABLEID = stockTableId(m_catalog);
loadTestTables(sourceEngine, m_catalog);
SnapshotPredicates predicates = new SnapshotPredicates(-1);
predicates.addPredicate(new HashRangeExpressionBuilder().put(0x00000000, 0x7fffffff).build(0), true);
// Build the index
sourceEngine.activateTableStream(STOCK_TABLEID, TableStreamType.ELASTIC_INDEX, Long.MAX_VALUE, predicates.toBytes());
// Humor serializeMore() by providing a buffer, even though it's not used.
final BBContainer origin = DBBPool.allocateDirect(1024 * 1024 * 2);
origin.b().clear();
BBContainer container = new BBContainer(origin.b()) {
@Override
public void discard() {
checkDoubleFree();
origin.discard();
}
};
try {
List<BBContainer> output = new ArrayList<BBContainer>();
output.add(container);
assertEquals(0, sourceEngine.tableStreamSerializeMore(STOCK_TABLEID, TableStreamType.ELASTIC_INDEX, output).getSecond()[0]);
} finally {
container.discard();
}
}
use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.
the class SnapshotSiteProcessor method getOutputBuffers.
/**
* Create an output buffer for each task.
* @return null if there aren't enough buffers left in the pool.
*/
private List<BBContainer> getOutputBuffers(Collection<SnapshotTableTask> tableTasks, boolean noSchedule) {
final int desired = tableTasks.size();
while (true) {
int available = m_availableSnapshotBuffers.get();
//Limit the number of buffers used concurrently
if (desired > available) {
return null;
}
if (m_availableSnapshotBuffers.compareAndSet(available, available - desired))
break;
}
List<BBContainer> outputBuffers = new ArrayList<BBContainer>(tableTasks.size());
for (int ii = 0; ii < tableTasks.size(); ii++) {
final BBContainer origin = DBBPool.allocateDirectAndPool(m_snapshotBufferLength);
outputBuffers.add(createNewBuffer(origin, noSchedule));
}
return outputBuffers;
}
use of org.voltcore.utils.DBBPool.BBContainer in project voltdb by VoltDB.
the class SnapshotUtil method retrieveSnapshotFilesInternal.
private static void retrieveSnapshotFilesInternal(File directory, NamedSnapshots namedSnapshots, FileFilter filter, boolean validate, SnapshotPathType stype, VoltLogger logger, int recursion) {
if (recursion == 32) {
return;
}
if (!directory.exists()) {
System.err.println("Error: Directory " + directory.getPath() + " doesn't exist");
return;
}
if (!directory.canRead()) {
System.err.println("Error: Directory " + directory.getPath() + " is not readable");
return;
}
if (!directory.canExecute()) {
System.err.println("Error: Directory " + directory.getPath() + " is not executable");
return;
}
for (File f : directory.listFiles(filter)) {
if (f.isDirectory()) {
if (!f.canRead() || !f.canExecute()) {
System.err.println("Warning: Skipping directory " + f.getPath() + " due to lack of read permission");
} else {
retrieveSnapshotFilesInternal(f, namedSnapshots, filter, validate, stype, logger, recursion++);
}
continue;
}
if (!f.canRead()) {
System.err.println("Warning: " + f.getPath() + " is not readable");
continue;
}
FileInputStream fis = null;
try {
fis = new FileInputStream(f);
} catch (FileNotFoundException e1) {
System.err.println(e1.getMessage());
continue;
}
try {
if (f.getName().endsWith(".digest")) {
JSONObject digest = CRCCheck(f, logger);
if (digest == null)
continue;
Long snapshotTxnId = digest.getLong("txnId");
String nonce = parseNonceFromSnapshotFilename(f.getName());
Snapshot named_s = namedSnapshots.get(nonce);
named_s.setTxnId(snapshotTxnId);
InstanceId iid = new InstanceId(0, 0);
if (digest.has("instanceId")) {
iid = new InstanceId(digest.getJSONObject("instanceId"));
}
named_s.setInstanceId(iid);
TreeSet<String> tableSet = new TreeSet<String>();
JSONArray tables = digest.getJSONArray("tables");
for (int ii = 0; ii < tables.length(); ii++) {
tableSet.add(tables.getString(ii));
}
named_s.m_digestTables.add(tableSet);
named_s.m_digests.add(f);
} else if (f.getName().endsWith(".jar")) {
String nonce = parseNonceFromSnapshotFilename(f.getName());
Snapshot named_s = namedSnapshots.get(nonce);
named_s.m_catalogFile = f;
} else if (f.getName().endsWith(HASH_EXTENSION)) {
String nonce = parseNonceFromSnapshotFilename(f.getName());
Snapshot named_s = namedSnapshots.get(nonce);
if (validate) {
try {
// Retrieve hashinator config data for validation only.
// Throws IOException when the CRC check fails.
HashinatorSnapshotData hashData = new HashinatorSnapshotData();
hashData.restoreFromFile(f);
named_s.m_hashConfig = f;
} catch (IOException e) {
logger.warn(String.format("Skipping bad hashinator snapshot file '%s'", f.getPath()));
// Skip bad hashinator files.
continue;
}
}
} else {
HashSet<Integer> partitionIds = new HashSet<Integer>();
TableSaveFile saveFile = new TableSaveFile(fis, 1, null, true);
try {
for (Integer partitionId : saveFile.getPartitionIds()) {
partitionIds.add(partitionId);
}
if (validate && saveFile.getCompleted()) {
while (saveFile.hasMoreChunks()) {
BBContainer cont = saveFile.getNextChunk();
if (cont != null) {
cont.discard();
}
}
}
partitionIds.removeAll(saveFile.getCorruptedPartitionIds());
String nonce = parseNonceFromSnapshotFilename(f.getName());
Snapshot named_s = namedSnapshots.get(nonce);
named_s.setTxnId(saveFile.getTxnId());
TableFiles namedTableFiles = named_s.m_tableFiles.get(saveFile.getTableName());
if (namedTableFiles == null) {
namedTableFiles = new TableFiles(saveFile.isReplicated());
named_s.m_tableFiles.put(saveFile.getTableName(), namedTableFiles);
}
namedTableFiles.m_files.add(f);
namedTableFiles.m_completed.add(saveFile.getCompleted());
namedTableFiles.m_validPartitionIds.add(partitionIds);
namedTableFiles.m_corruptParititionIds.add(saveFile.getCorruptedPartitionIds());
namedTableFiles.m_totalPartitionCounts.add(saveFile.getTotalPartitions());
} finally {
saveFile.close();
}
}
} catch (IOException e) {
System.err.println(e.getMessage());
System.err.println("Error: Unable to process " + f.getPath());
} catch (JSONException e) {
System.err.println(e.getMessage());
System.err.println("Error: Unable to process " + f.getPath());
} finally {
try {
if (fis != null) {
fis.close();
}
} catch (IOException e) {
}
}
}
}
Aggregations