use of java.nio.ByteBuffer in project hadoop by apache.
the class HHXORErasureDecodingStep method doDecodeSingle.
private void doDecodeSingle(ByteBuffer[][] inputs, ByteBuffer[][] outputs, int erasedLocationToFix, int bufSize, boolean isDirect) {
final int numDataUnits = rsRawDecoder.getNumDataUnits();
final int numParityUnits = rsRawDecoder.getNumParityUnits();
final int subPacketSize = getSubPacketSize();
int[][] inputPositions = new int[subPacketSize][inputs[0].length];
for (int i = 0; i < subPacketSize; ++i) {
for (int j = 0; j < inputs[i].length; ++j) {
if (inputs[i][j] != null) {
inputPositions[i][j] = inputs[i][j].position();
}
}
}
ByteBuffer[] tempInputs = new ByteBuffer[numDataUnits + numParityUnits];
for (int i = 0; i < tempInputs.length; ++i) {
tempInputs[i] = inputs[1][i];
}
ByteBuffer[][] tmpOutputs = new ByteBuffer[subPacketSize][numParityUnits];
for (int i = 0; i < getSubPacketSize(); ++i) {
for (int j = 0; j < erasedIndexes.length; ++j) {
tmpOutputs[i][j] = outputs[i][j];
}
for (int m = erasedIndexes.length; m < numParityUnits; ++m) {
tmpOutputs[i][m] = HHUtil.allocateByteBuffer(isDirect, bufSize);
}
}
// First consider the second subPacket
int[] erasedLocation = new int[numParityUnits];
erasedLocation[0] = erasedLocationToFix;
// second subPacket but from decoding
for (int i = 1; i < numParityUnits; i++) {
erasedLocation[i] = numDataUnits + i;
tempInputs[numDataUnits + i] = null;
}
rsRawDecoder.decode(tempInputs, erasedLocation, tmpOutputs[1]);
int piggyBackParityIndex = piggyBackFullIndex[erasedLocationToFix];
ByteBuffer piggyBack = HHUtil.getPiggyBackForDecode(inputs, tmpOutputs, piggyBackParityIndex, numDataUnits, numParityUnits, pbIndex);
// get the value of the piggyback associated with the erased location
if (isDirect) {
// decode the erased value in the first subPacket by using the piggyback
int idxToWrite = 0;
doDecodeByPiggyBack(inputs[0], tmpOutputs[0][idxToWrite], piggyBack, erasedLocationToFix);
} else {
ByteBuffer buffer;
byte[][][] newInputs = new byte[getSubPacketSize()][inputs[0].length][];
int[][] inputOffsets = new int[getSubPacketSize()][inputs[0].length];
byte[][][] newOutputs = new byte[getSubPacketSize()][numParityUnits][];
int[][] outOffsets = new int[getSubPacketSize()][numParityUnits];
for (int i = 0; i < getSubPacketSize(); ++i) {
for (int j = 0; j < inputs[0].length; ++j) {
buffer = inputs[i][j];
if (buffer != null) {
inputOffsets[i][j] = buffer.arrayOffset() + buffer.position();
newInputs[i][j] = buffer.array();
}
}
}
for (int i = 0; i < getSubPacketSize(); ++i) {
for (int j = 0; j < numParityUnits; ++j) {
buffer = tmpOutputs[i][j];
if (buffer != null) {
outOffsets[i][j] = buffer.arrayOffset() + buffer.position();
newOutputs[i][j] = buffer.array();
}
}
}
byte[] newPiggyBack = piggyBack.array();
// decode the erased value in the first subPacket by using the piggyback
int idxToWrite = 0;
doDecodeByPiggyBack(newInputs[0], inputOffsets[0], newOutputs[0][idxToWrite], outOffsets[0][idxToWrite], newPiggyBack, erasedLocationToFix, bufSize);
}
for (int i = 0; i < subPacketSize; ++i) {
for (int j = 0; j < inputs[i].length; ++j) {
if (inputs[i][j] != null) {
inputs[i][j].position(inputPositions[i][j] + bufSize);
}
}
}
}
use of java.nio.ByteBuffer in project hadoop by apache.
the class HHUtil method cloneBufferData.
private static ByteBuffer cloneBufferData(ByteBuffer srcBuffer) {
ByteBuffer destBuffer;
byte[] bytesArr = new byte[srcBuffer.remaining()];
srcBuffer.mark();
srcBuffer.get(bytesArr);
srcBuffer.reset();
if (!srcBuffer.isDirect()) {
destBuffer = ByteBuffer.wrap(bytesArr);
} else {
destBuffer = ByteBuffer.allocateDirect(srcBuffer.remaining());
destBuffer.put(bytesArr);
destBuffer.flip();
}
return destBuffer;
}
use of java.nio.ByteBuffer in project hadoop by apache.
the class AbstractNativeRawEncoder method doEncode.
@Override
protected void doEncode(ByteBufferEncodingState encodingState) {
int[] inputOffsets = new int[encodingState.inputs.length];
int[] outputOffsets = new int[encodingState.outputs.length];
int dataLen = encodingState.inputs[0].remaining();
ByteBuffer buffer;
for (int i = 0; i < encodingState.inputs.length; ++i) {
buffer = encodingState.inputs[i];
inputOffsets[i] = buffer.position();
}
for (int i = 0; i < encodingState.outputs.length; ++i) {
buffer = encodingState.outputs[i];
outputOffsets[i] = buffer.position();
}
performEncodeImpl(encodingState.inputs, inputOffsets, dataLen, encodingState.outputs, outputOffsets);
}
use of java.nio.ByteBuffer in project hadoop by apache.
the class ByteBufferEncodingState method convertToByteArrayState.
/**
* Convert to a ByteArrayEncodingState when it's backed by on-heap arrays.
*/
ByteArrayEncodingState convertToByteArrayState() {
int[] inputOffsets = new int[inputs.length];
int[] outputOffsets = new int[outputs.length];
byte[][] newInputs = new byte[inputs.length][];
byte[][] newOutputs = new byte[outputs.length][];
ByteBuffer buffer;
for (int i = 0; i < inputs.length; ++i) {
buffer = inputs[i];
inputOffsets[i] = buffer.arrayOffset() + buffer.position();
newInputs[i] = buffer.array();
}
for (int i = 0; i < outputs.length; ++i) {
buffer = outputs[i];
outputOffsets[i] = buffer.arrayOffset() + buffer.position();
newOutputs[i] = buffer.array();
}
ByteArrayEncodingState baeState = new ByteArrayEncodingState(encoder, encodeLength, newInputs, inputOffsets, newOutputs, outputOffsets);
return baeState;
}
use of java.nio.ByteBuffer in project hadoop by apache.
the class GaloisField method remainder.
/**
* The "bulk" version of the remainder, using ByteBuffer.
* Warning: This function will modify the "dividend" inputs.
*/
public void remainder(ByteBuffer[] dividend, int[] divisor) {
int idx1, idx2;
ByteBuffer b1, b2;
for (int i = dividend.length - divisor.length; i >= 0; i--) {
for (int j = 0; j < divisor.length; j++) {
b1 = dividend[i + divisor.length - 1];
b2 = dividend[j + i];
for (idx1 = b1.position(), idx2 = b2.position(); idx1 < b1.limit(); idx1++, idx2++) {
int ratio = divTable[b1.get(idx1) & 0x00FF][divisor[divisor.length - 1]];
b2.put(idx2, (byte) ((b2.get(idx2) & 0x00FF) ^ mulTable[ratio][divisor[j]]));
}
}
}
}
Aggregations