use of org.voltdb.messaging.FastDeserializer in project voltdb by VoltDB.
the class TestTwoSitePlans method setUp.
@SuppressWarnings("deprecation")
@Override
public void setUp() throws IOException, InterruptedException {
VoltDB.instance().readBuildInfo("Test");
// compile a catalog
String testDir = BuildDirectoryUtils.getBuildDirectoryPath();
String catalogJar = testDir + File.separator + JAR;
TPCCProjectBuilder pb = new TPCCProjectBuilder();
pb.addDefaultSchema();
pb.addDefaultPartitioning();
pb.addProcedures(MultiSiteSelect.class, InsertNewOrder.class);
pb.compile(catalogJar, 2, 0);
// load a catalog
byte[] bytes = MiscUtils.fileToBytes(new File(catalogJar));
String serializedCatalog = CatalogUtil.getSerializedCatalogStringFromJar(CatalogUtil.loadAndUpgradeCatalogFromJar(bytes, false).getFirst());
// create the catalog (that will be passed to the ClientInterface
catalog = new Catalog();
catalog.execute(serializedCatalog);
// update the catalog with the data from the deployment file
String pathToDeployment = pb.getPathToDeployment();
assertTrue(CatalogUtil.compileDeployment(catalog, pathToDeployment, false) == null);
cluster = catalog.getClusters().get("cluster");
CatalogMap<Procedure> procedures = cluster.getDatabases().get("database").getProcedures();
Procedure insertProc = procedures.get("InsertNewOrder");
assert (insertProc != null);
selectProc = procedures.get("MultiSiteSelect");
assert (selectProc != null);
// Each EE needs its own thread for correct initialization.
final AtomicReference<ExecutionEngine> site1Reference = new AtomicReference<ExecutionEngine>();
final byte[] configBytes = LegacyHashinator.getConfigureBytes(2);
Thread site1Thread = new Thread() {
@Override
public void run() {
site1Reference.set(new ExecutionEngineJNI(cluster.getRelativeIndex(), 1, 0, 0, "", 0, 64 * 1024, 100, new HashinatorConfig(HashinatorType.LEGACY, configBytes, 0, 0), false));
}
};
site1Thread.start();
site1Thread.join();
final AtomicReference<ExecutionEngine> site2Reference = new AtomicReference<ExecutionEngine>();
Thread site2Thread = new Thread() {
@Override
public void run() {
site2Reference.set(new ExecutionEngineJNI(cluster.getRelativeIndex(), 2, 1, 0, "", 0, 64 * 1024, 100, new HashinatorConfig(HashinatorType.LEGACY, configBytes, 0, 0), false));
}
};
site2Thread.start();
site2Thread.join();
// create two EEs
ee1 = site1Reference.get();
ee1.loadCatalog(0, catalog.serialize());
ee2 = site2Reference.get();
ee2.loadCatalog(0, catalog.serialize());
// cache some plan fragments
selectStmt = selectProc.getStatements().get("selectAll");
assert (selectStmt != null);
int i = 0;
// this kinda assumes the right order
for (PlanFragment f : selectStmt.getFragments()) {
if (i == 0)
selectTopFrag = f;
else
selectBottomFrag = f;
i++;
}
assert (selectTopFrag != null);
assert (selectBottomFrag != null);
if (selectTopFrag.getHasdependencies() == false) {
PlanFragment temp = selectTopFrag;
selectTopFrag = selectBottomFrag;
selectBottomFrag = temp;
}
// get the insert frag
Statement insertStmt = insertProc.getStatements().get("insert");
assert (insertStmt != null);
for (PlanFragment f : insertStmt.getFragments()) insertFrag = f;
// populate plan cache
ActivePlanRepository.clear();
ActivePlanRepository.addFragmentForTest(CatalogUtil.getUniqueIdForFragment(selectBottomFrag), Encoder.decodeBase64AndDecompressToBytes(selectBottomFrag.getPlannodetree()), selectStmt.getSqltext());
ActivePlanRepository.addFragmentForTest(CatalogUtil.getUniqueIdForFragment(selectTopFrag), Encoder.decodeBase64AndDecompressToBytes(selectTopFrag.getPlannodetree()), selectStmt.getSqltext());
ActivePlanRepository.addFragmentForTest(CatalogUtil.getUniqueIdForFragment(insertFrag), Encoder.decodeBase64AndDecompressToBytes(insertFrag.getPlannodetree()), insertStmt.getSqltext());
// insert some data
ParameterSet params = ParameterSet.fromArrayNoCopy(1L, 1L, 1L);
FastDeserializer fragResult2 = ee2.executePlanFragments(1, new long[] { CatalogUtil.getUniqueIdForFragment(insertFrag) }, null, new ParameterSet[] { params }, null, new String[] { selectStmt.getSqltext() }, null, null, 1, 1, 0, 42, Long.MAX_VALUE, false);
// ignore totalsize field in message
fragResult2.readInt();
VoltTable[] results = TableHelper.convertBackedBufferToTables(fragResult2.buffer(), 1);
assert (results[0].asScalarLong() == 1L);
params = ParameterSet.fromArrayNoCopy(2L, 2L, 2L);
FastDeserializer fragResult1 = ee1.executePlanFragments(1, new long[] { CatalogUtil.getUniqueIdForFragment(insertFrag) }, null, new ParameterSet[] { params }, null, new String[] { selectStmt.getSqltext() }, null, null, 2, 2, 1, 42, Long.MAX_VALUE, false);
// ignore totalsize field in message
fragResult1.readInt();
results = TableHelper.convertBackedBufferToTables(fragResult1.buffer(), 1);
assert (fragResult1.buffer() != fragResult2.buffer());
assert (results[0].asScalarLong() == 1L);
}
use of org.voltdb.messaging.FastDeserializer in project voltdb by VoltDB.
the class ExecutionEngineJNI method coreExecutePlanFragments.
/**
* @param undoToken Token identifying undo quantum for generated undo info
* @param traceOn
*/
@Override
protected FastDeserializer coreExecutePlanFragments(final int batchIndex, final int numFragmentIds, final long[] planFragmentIds, final long[] inputDepIds, final Object[] parameterSets, DeterminismHash determinismHash, boolean[] isWriteFrags, int[] sqlCRCs, final long txnId, final long spHandle, final long lastCommittedSpHandle, long uniqueId, final long undoToken, final boolean traceOn) throws EEException {
// plan frag zero is invalid
assert ((numFragmentIds == 0) || (planFragmentIds[0] != 0));
if (numFragmentIds == 0)
return m_emptyDeserializer;
final int batchSize = numFragmentIds;
if (HOST_TRACE_ENABLED) {
for (int i = 0; i < batchSize; ++i) {
LOG.trace("Batch Executing planfragment:" + planFragmentIds[i] + ", params=" + parameterSets[i].toString());
}
}
// serialize the param sets
int allPsetSize = 0;
for (int i = 0; i < batchSize; ++i) {
if (parameterSets[i] instanceof ByteBuffer) {
allPsetSize += ((ByteBuffer) parameterSets[i]).limit();
} else {
allPsetSize += ((ParameterSet) parameterSets[i]).getSerializedSize();
}
}
clearPsetAndEnsureCapacity(allPsetSize);
for (int i = 0; i < batchSize; ++i) {
int paramStart = m_psetBuffer.position();
Object param = parameterSets[i];
if (param instanceof ByteBuffer) {
ByteBuffer buf = (ByteBuffer) param;
m_psetBuffer.put(buf);
} else {
ParameterSet pset = (ParameterSet) param;
try {
pset.flattenToBuffer(m_psetBuffer);
} catch (final IOException exception) {
throw new RuntimeException("Error serializing parameters for SQL batch element: " + i + " with plan fragment ID: " + planFragmentIds[i] + " and with params: " + pset.toJSONString(), exception);
}
}
// determinismHash can be null in FragmentTask.processFragmentTask() and many tests
if (determinismHash != null && isWriteFrags[i]) {
determinismHash.offerStatement(sqlCRCs[i], paramStart, m_psetBuffer);
}
}
// checkMaxFsSize();
clearPerFragmentStatsAndEnsureCapacity(batchSize);
// Execute the plan, passing a raw pointer to the byte buffers for input and output
//Clear is destructive, do it before the native call
FastDeserializer targetDeserializer = (batchIndex == 0) ? m_firstDeserializer : m_nextDeserializer;
targetDeserializer.clear();
final int errorCode = nativeExecutePlanFragments(pointer, batchIndex, numFragmentIds, planFragmentIds, inputDepIds, txnId, spHandle, lastCommittedSpHandle, uniqueId, undoToken, traceOn);
try {
checkErrorCode(errorCode);
m_usingFallbackBuffer = m_fallbackBuffer != null;
FastDeserializer fds = m_usingFallbackBuffer ? new FastDeserializer(m_fallbackBuffer) : targetDeserializer;
assert (fds != null);
try {
// check if anything was changed
m_dirty |= fds.readBoolean();
} catch (final IOException ex) {
LOG.error("Failed to deserialize result table" + ex);
throw new EEException(ERRORCODE_WRONG_SERIALIZED_BYTES);
}
return fds;
} finally {
m_fallbackBuffer = null;
}
}
use of org.voltdb.messaging.FastDeserializer in project voltdb by VoltDB.
the class ExecutionEngineIPC method coreExecutePlanFragments.
@Override
protected FastDeserializer coreExecutePlanFragments(final int bufferHint, final int numFragmentIds, final long[] planFragmentIds, final long[] inputDepIds, final Object[] parameterSets, DeterminismHash determinismHash, boolean[] isWriteFrags, int[] sqlCRCs, final long txnId, final long spHandle, final long lastCommittedSpHandle, final long uniqueId, final long undoToken, boolean traceOn) throws EEException {
sendPlanFragmentsInvocation(Commands.QueryPlanFragments, numFragmentIds, planFragmentIds, inputDepIds, parameterSets, determinismHash, isWriteFrags, sqlCRCs, txnId, spHandle, lastCommittedSpHandle, uniqueId, undoToken);
int result = ExecutionEngine.ERRORCODE_ERROR;
if (m_perFragmentTimingEnabled) {
m_executionTimes = new long[numFragmentIds];
}
while (true) {
try {
result = m_connection.readStatusByte();
ByteBuffer resultTables = null;
if (result == ExecutionEngine.ERRORCODE_NEED_PLAN) {
long fragmentId = m_connection.readLong();
byte[] plan = planForFragmentId(fragmentId);
m_data.clear();
m_data.put(plan);
m_data.flip();
m_connection.write();
} else if (result == ExecutionEngine.ERRORCODE_SUCCESS) {
try {
resultTables = m_connection.readResultsBuffer();
} catch (final IOException e) {
throw new EEException(ExecutionEngine.ERRORCODE_WRONG_SERIALIZED_BYTES);
}
return new FastDeserializer(resultTables);
} else {
// failure
return null;
}
} catch (final IOException e) {
m_history.append("GOT IOException: " + e.toString());
System.out.println("Exception: " + e.getMessage());
throw new RuntimeException(e);
} catch (final Throwable thrown) {
thrown.printStackTrace();
m_history.append("GOT Throwable: " + thrown.toString());
throw thrown;
}
}
}
Aggregations