Search in sources :

Example 1 with InputBuffer

use of org.apache.hadoop.mapred.nativetask.buffer.InputBuffer in project hadoop by apache.

the class BufferPushee method collect.

public boolean collect(InputBuffer buffer) throws IOException {
    if (closed) {
        return false;
    }
    final ByteBuffer input = buffer.getByteBuffer();
    if (null != asideBuffer && asideBuffer.length() > 0) {
        if (asideBuffer.remaining() > 0) {
            final byte[] output = asideBuffer.getByteBuffer().array();
            final int write = Math.min(asideBuffer.remaining(), input.remaining());
            input.get(output, asideBuffer.position(), write);
            asideBuffer.position(asideBuffer.position() + write);
        }
        if (asideBuffer.remaining() == 0 && asideBuffer.position() > 0) {
            asideBuffer.position(0);
            write(asideBuffer);
            asideBuffer.rewind(0, 0);
        }
    }
    if (input.remaining() == 0) {
        return true;
    }
    if (input.remaining() < KV_HEADER_LENGTH) {
        throw new IOException("incomplete data, input length is: " + input.remaining());
    }
    final int position = input.position();
    final int keyLength = input.getInt();
    final int valueLength = input.getInt();
    input.position(position);
    final int kvLength = keyLength + valueLength + KV_HEADER_LENGTH;
    final int remaining = input.remaining();
    if (kvLength > remaining) {
        if (null == asideBuffer || asideBuffer.capacity() < kvLength) {
            asideBuffer = new InputBuffer(BufferType.HEAP_BUFFER, kvLength);
        }
        asideBuffer.rewind(0, kvLength);
        input.get(asideBuffer.array(), 0, remaining);
        asideBuffer.position(remaining);
    } else {
        write(buffer);
    }
    return true;
}
Also used : InputBuffer(org.apache.hadoop.mapred.nativetask.buffer.InputBuffer) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer)

Example 2 with InputBuffer

use of org.apache.hadoop.mapred.nativetask.buffer.InputBuffer in project hadoop by apache.

the class NativeBatchProcessor method create.

public static INativeHandler create(String nativeHandlerName, Configuration conf, DataChannel channel) throws IOException {
    final int bufferSize = conf.getInt(Constants.NATIVE_PROCESSOR_BUFFER_KB, 1024) * 1024;
    LOG.info("NativeHandler: direct buffer size: " + bufferSize);
    OutputBuffer out = null;
    InputBuffer in = null;
    switch(channel) {
        case IN:
            in = new InputBuffer(BufferType.DIRECT_BUFFER, bufferSize);
            break;
        case OUT:
            out = new OutputBuffer(BufferType.DIRECT_BUFFER, bufferSize);
            break;
        case INOUT:
            in = new InputBuffer(BufferType.DIRECT_BUFFER, bufferSize);
            out = new OutputBuffer(BufferType.DIRECT_BUFFER, bufferSize);
            break;
        case NONE:
    }
    final INativeHandler handler = new NativeBatchProcessor(nativeHandlerName, in, out);
    handler.init(conf);
    return handler;
}
Also used : InputBuffer(org.apache.hadoop.mapred.nativetask.buffer.InputBuffer) OutputBuffer(org.apache.hadoop.mapred.nativetask.buffer.OutputBuffer)

Example 3 with InputBuffer

use of org.apache.hadoop.mapred.nativetask.buffer.InputBuffer in project hadoop by apache.

the class BufferPuller method receiveData.

@Override
public boolean receiveData() throws IOException {
    if (closed) {
        return false;
    }
    final ByteBuffer input = inputBuffer.getByteBuffer();
    if (null != asideBuffer && asideBuffer.length() > 0) {
        if (asideBuffer.remaining() > 0) {
            final byte[] output = asideBuffer.getByteBuffer().array();
            final int write = Math.min(asideBuffer.remaining(), input.remaining());
            input.get(output, asideBuffer.position(), write);
            asideBuffer.position(asideBuffer.position() + write);
        }
        if (asideBuffer.remaining() == 0) {
            asideBuffer.position(0);
        }
    }
    if (input.remaining() == 0) {
        return true;
    }
    if (input.remaining() < KV_HEADER_LENGTH) {
        throw new IOException("incomplete data, input length is: " + input.remaining());
    }
    final int position = input.position();
    final int keyLength = input.getInt();
    final int valueLength = input.getInt();
    input.position(position);
    final int kvLength = keyLength + valueLength + KV_HEADER_LENGTH;
    final int remaining = input.remaining();
    if (kvLength > remaining) {
        if (null == asideBuffer || asideBuffer.capacity() < kvLength) {
            asideBuffer = new InputBuffer(BufferType.HEAP_BUFFER, kvLength);
        }
        asideBuffer.rewind(0, kvLength);
        input.get(asideBuffer.array(), 0, remaining);
        asideBuffer.position(remaining);
    }
    return true;
}
Also used : InputBuffer(org.apache.hadoop.mapred.nativetask.buffer.InputBuffer) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer)

Example 4 with InputBuffer

use of org.apache.hadoop.mapred.nativetask.buffer.InputBuffer in project hadoop by apache.

the class TestCombineHandler method setUp.

@Before
public void setUp() throws IOException {
    this.nativeHandler = Mockito.mock(INativeHandler.class);
    this.pusher = Mockito.mock(BufferPusher.class);
    this.puller = Mockito.mock(BufferPuller.class);
    this.combinerRunner = Mockito.mock(CombinerRunner.class);
    Mockito.when(nativeHandler.getInputBuffer()).thenReturn(new InputBuffer(BufferType.HEAP_BUFFER, 100));
}
Also used : CombinerRunner(org.apache.hadoop.mapred.Task.CombinerRunner) InputBuffer(org.apache.hadoop.mapred.nativetask.buffer.InputBuffer) INativeHandler(org.apache.hadoop.mapred.nativetask.INativeHandler) Before(org.junit.Before)

Example 5 with InputBuffer

use of org.apache.hadoop.mapred.nativetask.buffer.InputBuffer in project hadoop by apache.

the class TestNativeCollectorOnlyHandler method setUp.

@Before
public void setUp() throws IOException {
    this.nativeHandler = Mockito.mock(INativeHandler.class);
    this.pusher = Mockito.mock(BufferPusher.class);
    this.combiner = Mockito.mock(ICombineHandler.class);
    JobConf jobConf = new JobConf();
    jobConf.set(OutputUtil.NATIVE_TASK_OUTPUT_MANAGER, "org.apache.hadoop.mapred.nativetask.util.LocalJobOutputFiles");
    jobConf.set("mapred.local.dir", LOCAL_DIR);
    this.taskContext = new TaskContext(jobConf, BytesWritable.class, BytesWritable.class, BytesWritable.class, BytesWritable.class, null, null);
    Mockito.when(nativeHandler.getInputBuffer()).thenReturn(new InputBuffer(BufferType.HEAP_BUFFER, 100));
}
Also used : TaskContext(org.apache.hadoop.mapred.nativetask.TaskContext) InputBuffer(org.apache.hadoop.mapred.nativetask.buffer.InputBuffer) INativeHandler(org.apache.hadoop.mapred.nativetask.INativeHandler) ICombineHandler(org.apache.hadoop.mapred.nativetask.ICombineHandler) BytesWritable(org.apache.hadoop.io.BytesWritable) JobConf(org.apache.hadoop.mapred.JobConf) Before(org.junit.Before)

Aggregations

InputBuffer (org.apache.hadoop.mapred.nativetask.buffer.InputBuffer)5 IOException (java.io.IOException)2 ByteBuffer (java.nio.ByteBuffer)2 INativeHandler (org.apache.hadoop.mapred.nativetask.INativeHandler)2 Before (org.junit.Before)2 BytesWritable (org.apache.hadoop.io.BytesWritable)1 DataInputBuffer (org.apache.hadoop.io.DataInputBuffer)1 JobConf (org.apache.hadoop.mapred.JobConf)1 CombinerRunner (org.apache.hadoop.mapred.Task.CombinerRunner)1 ICombineHandler (org.apache.hadoop.mapred.nativetask.ICombineHandler)1 TaskContext (org.apache.hadoop.mapred.nativetask.TaskContext)1 OutputBuffer (org.apache.hadoop.mapred.nativetask.buffer.OutputBuffer)1