use of org.apache.hadoop.HadoopIllegalArgumentException in project SSM by Intel-bigdata.
the class RSRawDecoderLegacy method doDecode.
@Override
protected void doDecode(ByteBufferDecodingState decodingState) {
int dataLen = decodingState.decodeLength;
CoderUtil.resetOutputBuffers(decodingState.outputs, dataLen);
/**
* As passed parameters are friendly to callers but not to the underlying
* implementations, so we have to adjust them before calling doDecodeImpl.
*/
int[] erasedOrNotToReadIndexes = CoderUtil.getNullIndexes(decodingState.inputs);
ByteBuffer[] directBuffers = new ByteBuffer[getNumParityUnits()];
ByteBuffer[] adjustedDirectBufferOutputsParameter = new ByteBuffer[getNumParityUnits()];
// Use the caller passed buffers in erasedIndexes positions
for (int outputIdx = 0, i = 0; i < decodingState.erasedIndexes.length; i++) {
boolean found = false;
for (int j = 0; j < erasedOrNotToReadIndexes.length; j++) {
// we use the passed output buffer to avoid copying data thereafter.
if (decodingState.erasedIndexes[i] == erasedOrNotToReadIndexes[j]) {
found = true;
adjustedDirectBufferOutputsParameter[j] = CoderUtil.resetBuffer(decodingState.outputs[outputIdx++], dataLen);
}
}
if (!found) {
throw new HadoopIllegalArgumentException("Inputs not fully corresponding to erasedIndexes in null places");
}
}
// Use shared buffers for other positions (not set yet)
for (int bufferIdx = 0, i = 0; i < erasedOrNotToReadIndexes.length; i++) {
if (adjustedDirectBufferOutputsParameter[i] == null) {
ByteBuffer buffer = checkGetDirectBuffer(directBuffers, bufferIdx, dataLen);
buffer.position(0);
buffer.limit(dataLen);
adjustedDirectBufferOutputsParameter[i] = CoderUtil.resetBuffer(buffer, dataLen);
bufferIdx++;
}
}
doDecodeImpl(decodingState.inputs, erasedOrNotToReadIndexes, adjustedDirectBufferOutputsParameter);
}
Aggregations