use of org.voltdb.exceptions.InterruptException in project voltdb by VoltDB.
the class FragmentTask method processFragmentTask.
// Cut and pasted from ExecutionSite processFragmentTask(), then
// modifed to work in the new world
public FragmentResponseMessage processFragmentTask(SiteProcedureConnection siteConnection) {
// Ensure default procs loaded here
// Also used for LoadMultipartitionTable
String procNameToLoad = m_fragmentMsg.getProcNameToLoad();
if (procNameToLoad != null) {
// this will ensure proc is loaded
ProcedureRunner runner = siteConnection.getProcedureRunner(procNameToLoad);
assert (runner != null);
}
// IZZY: actually need the "executor" HSId these days?
final FragmentResponseMessage currentFragResponse = new FragmentResponseMessage(m_fragmentMsg, m_initiator.getHSId());
currentFragResponse.setStatus(FragmentResponseMessage.SUCCESS, null);
if (m_inputDeps != null) {
siteConnection.stashWorkUnitDependencies(m_inputDeps);
}
if (m_fragmentMsg.isEmptyForRestart()) {
int outputDepId = m_fragmentMsg.getOutputDepId(0);
currentFragResponse.addDependency(new DependencyPair.BufferDependencyPair(outputDepId, m_rawDummyResponse, 0, m_rawDummyResponse.length));
return currentFragResponse;
}
ProcedureRunner currRunner = siteConnection.getProcedureRunner(m_fragmentMsg.getProcedureName());
long[] executionTimes = null;
int succeededFragmentsCount = 0;
if (currRunner != null) {
currRunner.getExecutionEngine().setPerFragmentTimingEnabled(m_fragmentMsg.isPerFragmentStatsRecording());
if (m_fragmentMsg.isPerFragmentStatsRecording()) {
// At this point, we will execute the fragments one by one.
executionTimes = new long[1];
}
}
for (int frag = 0; frag < m_fragmentMsg.getFragmentCount(); frag++) {
byte[] planHash = m_fragmentMsg.getPlanHash(frag);
final int outputDepId = m_fragmentMsg.getOutputDepId(frag);
ParameterSet params = m_fragmentMsg.getParameterSetForFragment(frag);
final int inputDepId = m_fragmentMsg.getOnlyInputDepId(frag);
long fragmentId = 0;
byte[] fragmentPlan = null;
/*
* Currently the error path when executing plan fragments
* does not adequately distinguish between fatal errors and
* abort type errors that should result in a roll back.
* Assume that it is ninja: succeeds or doesn't return.
* No roll back support.
*
* AW in 2012, the preceding comment might be wrong,
* I am pretty sure what we don't support is partial rollback.
* The entire procedure will roll back successfully on failure
*/
VoltTable dependency = null;
try {
FastDeserializer fragResult;
fragmentPlan = m_fragmentMsg.getFragmentPlan(frag);
String stmtText = null;
// if custom fragment, load the plan and get local fragment id
if (fragmentPlan != null) {
// statement text for unplanned fragments are pulled from the message,
// to ensure that we get the correct constants from the most recent
// invocation.
stmtText = m_fragmentMsg.getStmtText(frag);
fragmentId = ActivePlanRepository.loadOrAddRefPlanFragment(planHash, fragmentPlan, null);
} else // otherwise ask the plan source for a local fragment id
{
fragmentId = ActivePlanRepository.getFragmentIdForPlanHash(planHash);
stmtText = ActivePlanRepository.getStmtTextForPlanHash(planHash);
}
// set up the batch context for the fragment set
siteConnection.setBatch(m_fragmentMsg.getCurrentBatchIndex());
fragResult = siteConnection.executePlanFragments(1, new long[] { fragmentId }, new long[] { inputDepId }, new ParameterSet[] { params }, null, // for long-running queries
stmtText == null ? null : new String[] { stmtText }, // FragmentTasks don't generate statement hashes,
new boolean[] { false }, null, m_txnState.txnId, m_txnState.m_spHandle, m_txnState.uniqueId, m_txnState.isReadOnly(), VoltTrace.log(VoltTrace.Category.EE) != null);
// get a copy of the result buffers from the cache buffer so we can post the
// fragment response to the network
final int tableSize;
final byte[] fullBacking;
try {
// read the complete size of the buffer used
fragResult.readInt();
// read number of dependencies (1)
fragResult.readInt();
// read the dependencyId() -1;
fragResult.readInt();
tableSize = fragResult.readInt();
fullBacking = new byte[tableSize];
// get a copy of the buffer
fragResult.readFully(fullBacking);
} catch (final IOException ex) {
LOG.error("Failed to deserialze result table" + ex);
throw new EEException(ExecutionEngine.ERRORCODE_WRONG_SERIALIZED_BYTES);
}
if (hostLog.isTraceEnabled()) {
hostLog.l7dlog(Level.TRACE, LogKeys.org_voltdb_ExecutionSite_SendingDependency.name(), new Object[] { outputDepId }, null);
}
currentFragResponse.addDependency(new DependencyPair.BufferDependencyPair(outputDepId, fullBacking, 0, tableSize));
} catch (final EEException e) {
hostLog.l7dlog(Level.TRACE, LogKeys.host_ExecutionSite_ExceptionExecutingPF.name(), new Object[] { Encoder.hexEncode(planHash) }, e);
currentFragResponse.setStatus(FragmentResponseMessage.UNEXPECTED_ERROR, e);
if (currentFragResponse.getTableCount() == 0) {
// Make sure the response has at least 1 result with a valid DependencyId
currentFragResponse.addDependency(new DependencyPair.BufferDependencyPair(outputDepId, m_rawDummyResult, 0, m_rawDummyResult.length));
}
break;
} catch (final SQLException e) {
hostLog.l7dlog(Level.TRACE, LogKeys.host_ExecutionSite_ExceptionExecutingPF.name(), new Object[] { Encoder.hexEncode(planHash) }, e);
currentFragResponse.setStatus(FragmentResponseMessage.UNEXPECTED_ERROR, e);
if (currentFragResponse.getTableCount() == 0) {
// Make sure the response has at least 1 result with a valid DependencyId
currentFragResponse.addDependency(new DependencyPair.BufferDependencyPair(outputDepId, m_rawDummyResult, 0, m_rawDummyResult.length));
}
break;
} catch (final InterruptException e) {
hostLog.l7dlog(Level.TRACE, LogKeys.host_ExecutionSite_ExceptionExecutingPF.name(), new Object[] { Encoder.hexEncode(planHash) }, e);
currentFragResponse.setStatus(FragmentResponseMessage.UNEXPECTED_ERROR, e);
if (currentFragResponse.getTableCount() == 0) {
// Make sure the response has at least 1 result with a valid DependencyId
currentFragResponse.addDependency(new DependencyPair.BufferDependencyPair(outputDepId, m_rawDummyResult, 0, m_rawDummyResult.length));
}
break;
} finally {
// ensure adhoc plans are unloaded
if (fragmentPlan != null) {
ActivePlanRepository.decrefPlanFragmentById(fragmentId);
}
// The single-partition stored procedure handler is in the ProcedureRunner.
if (currRunner != null) {
succeededFragmentsCount = currRunner.getExecutionEngine().extractPerFragmentStats(1, executionTimes);
long stmtDuration = 0;
int stmtResultSize = 0;
int stmtParameterSetSize = 0;
if (m_fragmentMsg.isPerFragmentStatsRecording()) {
stmtDuration = executionTimes == null ? 0 : executionTimes[0];
stmtResultSize = dependency == null ? 0 : dependency.getSerializedSize();
stmtParameterSetSize = params == null ? 0 : params.getSerializedSize();
}
currRunner.getStatsCollector().endFragment(m_fragmentMsg.getStmtName(frag), m_fragmentMsg.isCoordinatorTask(), succeededFragmentsCount == 0, m_fragmentMsg.isPerFragmentStatsRecording(), stmtDuration, stmtResultSize, stmtParameterSetSize);
}
}
}
return currentFragResponse;
}
Aggregations