Search in sources :

Example 11 with Writable

use of org.apache.hadoop.io.Writable in project hadoop by apache.

the class Client method call.

/**
   * Make a call, passing <code>rpcRequest</code>, to the IPC server defined by
   * <code>remoteId</code>, returning the rpc response.
   *
   * @param rpcKind
   * @param rpcRequest -  contains serialized method and method parameters
   * @param remoteId - the target rpc server
   * @param serviceClass - service class for RPC
   * @param fallbackToSimpleAuth - set to true or false during this method to
   *   indicate if a secure client falls back to simple auth
   * @returns the rpc response
   * Throws exceptions if there are network problems or if the remote code
   * threw an exception.
   */
Writable call(RPC.RpcKind rpcKind, Writable rpcRequest, ConnectionId remoteId, int serviceClass, AtomicBoolean fallbackToSimpleAuth) throws IOException {
    final Call call = createCall(rpcKind, rpcRequest);
    final Connection connection = getConnection(remoteId, call, serviceClass, fallbackToSimpleAuth);
    try {
        checkAsyncCall();
        try {
            // send the rpc request
            connection.sendRpcRequest(call);
        } catch (RejectedExecutionException e) {
            throw new IOException("connection has been closed", e);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            LOG.warn("interrupted waiting to send rpc request to server", e);
            throw new IOException(e);
        }
    } catch (Exception e) {
        if (isAsynchronousMode()) {
            releaseAsyncCall();
        }
        throw e;
    }
    if (isAsynchronousMode()) {
        final AsyncGet<Writable, IOException> asyncGet = new AsyncGet<Writable, IOException>() {

            @Override
            public Writable get(long timeout, TimeUnit unit) throws IOException, TimeoutException {
                boolean done = true;
                try {
                    final Writable w = getRpcResponse(call, connection, timeout, unit);
                    if (w == null) {
                        done = false;
                        throw new TimeoutException(call + " timed out " + timeout + " " + unit);
                    }
                    return w;
                } finally {
                    if (done) {
                        releaseAsyncCall();
                    }
                }
            }

            @Override
            public boolean isDone() {
                synchronized (call) {
                    return call.done;
                }
            }
        };
        ASYNC_RPC_RESPONSE.set(asyncGet);
        return null;
    } else {
        return getRpcResponse(call, connection, -1, null);
    }
}
Also used : AsyncGet(org.apache.hadoop.util.concurrent.AsyncGet) Writable(org.apache.hadoop.io.Writable) ConnectTimeoutException(org.apache.hadoop.net.ConnectTimeoutException) ConnectTimeoutException(org.apache.hadoop.net.ConnectTimeoutException)

Example 12 with Writable

use of org.apache.hadoop.io.Writable in project hadoop by apache.

the class TestServer method testLogExceptions.

@Test(timeout = 300000)
public void testLogExceptions() throws Exception {
    final Configuration conf = new Configuration();
    final Call dummyCall = new Call(0, 0, null, null);
    Log logger = mock(Log.class);
    Server server = new Server("0.0.0.0", 0, LongWritable.class, 1, conf) {

        @Override
        public Writable call(RPC.RpcKind rpcKind, String protocol, Writable param, long receiveTime) throws Exception {
            return null;
        }
    };
    server.addSuppressedLoggingExceptions(TestException1.class);
    server.addTerseExceptions(TestException2.class);
    // Nothing should be logged for a suppressed exception.
    server.logException(logger, new TestException1(), dummyCall);
    verifyZeroInteractions(logger);
    // No stack trace should be logged for a terse exception.
    server.logException(logger, new TestException2(), dummyCall);
    verify(logger, times(1)).info(anyObject());
    // Full stack trace should be logged for other exceptions.
    final Throwable te3 = new TestException3();
    server.logException(logger, te3, dummyCall);
    verify(logger, times(1)).info(anyObject(), eq(te3));
}
Also used : Call(org.apache.hadoop.ipc.Server.Call) Configuration(org.apache.hadoop.conf.Configuration) Log(org.apache.commons.logging.Log) Writable(org.apache.hadoop.io.Writable) LongWritable(org.apache.hadoop.io.LongWritable) Test(org.junit.Test)

Example 13 with Writable

use of org.apache.hadoop.io.Writable in project hadoop by apache.

the class TestAsyncIPC method testCallIdAndRetry.

/**
   * Test if (1) the rpc server uses the call id/retry provided by the rpc
   * client, and (2) the rpc client receives the same call id/retry from the rpc
   * server.
   *
   * @throws ExecutionException
   * @throws InterruptedException
   */
@Test(timeout = 60000)
public void testCallIdAndRetry() throws IOException, InterruptedException, ExecutionException {
    final Map<Integer, CallInfo> infoMap = new HashMap<Integer, CallInfo>();
    // Override client to store the call info and check response
    final Client client = new Client(LongWritable.class, conf) {

        @Override
        Call createCall(RpcKind rpcKind, Writable rpcRequest) {
            // Set different call id and retry count for the next call
            Client.setCallIdAndRetryCount(Client.nextCallId(), TestIPC.RANDOM.nextInt(255), null);
            final Call call = super.createCall(rpcKind, rpcRequest);
            CallInfo info = new CallInfo();
            info.id = call.id;
            info.retry = call.retry;
            infoMap.put(call.id, info);
            return call;
        }

        @Override
        void checkResponse(RpcResponseHeaderProto header) throws IOException {
            super.checkResponse(header);
            Assert.assertEquals(infoMap.get(header.getCallId()).retry, header.getRetryCount());
        }
    };
    // Attach a listener that tracks every call received by the server.
    final TestServer server = new TestIPC.TestServer(1, false, conf);
    server.callListener = new Runnable() {

        @Override
        public void run() {
            Assert.assertEquals(infoMap.get(Server.getCallId()).retry, Server.getCallRetryCount());
        }
    };
    try {
        InetSocketAddress addr = NetUtils.getConnectAddress(server);
        server.start();
        final AsyncCaller caller = new AsyncCaller(client, addr, 4);
        caller.run();
        caller.assertReturnValues();
    } finally {
        client.stop();
        server.stop();
    }
}
Also used : InetSocketAddress(java.net.InetSocketAddress) CallInfo(org.apache.hadoop.ipc.TestIPC.CallInfo) Writable(org.apache.hadoop.io.Writable) LongWritable(org.apache.hadoop.io.LongWritable) RpcKind(org.apache.hadoop.ipc.RPC.RpcKind) TestServer(org.apache.hadoop.ipc.TestIPC.TestServer) RpcResponseHeaderProto(org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto) Test(org.junit.Test)

Example 14 with Writable

use of org.apache.hadoop.io.Writable in project hadoop by apache.

the class MapFileOutputFormat method getRecordWriter.

public RecordWriter<WritableComparable<?>, Writable> getRecordWriter(TaskAttemptContext context) throws IOException {
    Configuration conf = context.getConfiguration();
    CompressionCodec codec = null;
    CompressionType compressionType = CompressionType.NONE;
    if (getCompressOutput(context)) {
        // find the kind of compression to do
        compressionType = SequenceFileOutputFormat.getOutputCompressionType(context);
        // find the right codec
        Class<?> codecClass = getOutputCompressorClass(context, DefaultCodec.class);
        codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, conf);
    }
    Path file = getDefaultWorkFile(context, "");
    FileSystem fs = file.getFileSystem(conf);
    // ignore the progress parameter, since MapFile is local
    final MapFile.Writer out = new MapFile.Writer(conf, fs, file.toString(), context.getOutputKeyClass().asSubclass(WritableComparable.class), context.getOutputValueClass().asSubclass(Writable.class), compressionType, codec, context);
    return new RecordWriter<WritableComparable<?>, Writable>() {

        public void write(WritableComparable<?> key, Writable value) throws IOException {
            out.append(key, value);
        }

        public void close(TaskAttemptContext context) throws IOException {
            out.close();
        }
    };
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) Writable(org.apache.hadoop.io.Writable) MapFile(org.apache.hadoop.io.MapFile) TaskAttemptContext(org.apache.hadoop.mapreduce.TaskAttemptContext) RecordWriter(org.apache.hadoop.mapreduce.RecordWriter) WritableComparable(org.apache.hadoop.io.WritableComparable) FileSystem(org.apache.hadoop.fs.FileSystem) CompressionCodec(org.apache.hadoop.io.compress.CompressionCodec) CompressionType(org.apache.hadoop.io.SequenceFile.CompressionType) RecordWriter(org.apache.hadoop.mapreduce.RecordWriter)

Example 15 with Writable

use of org.apache.hadoop.io.Writable in project hadoop by apache.

the class TupleWritable method readFields.

/**
   * {@inheritDoc}
   */
// No static typeinfo on Tuples
@SuppressWarnings("unchecked")
public void readFields(DataInput in) throws IOException {
    int card = WritableUtils.readVInt(in);
    values = new Writable[card];
    readBitSet(in, card, written);
    Class<? extends Writable>[] cls = new Class[card];
    try {
        for (int i = 0; i < card; ++i) {
            cls[i] = Class.forName(Text.readString(in)).asSubclass(Writable.class);
        }
        for (int i = 0; i < card; ++i) {
            if (cls[i].equals(NullWritable.class)) {
                values[i] = NullWritable.get();
            } else {
                values[i] = cls[i].newInstance();
            }
            if (has(i)) {
                values[i].readFields(in);
            }
        }
    } catch (ClassNotFoundException e) {
        throw new IOException("Failed tuple init", e);
    } catch (IllegalAccessException e) {
        throw new IOException("Failed tuple init", e);
    } catch (InstantiationException e) {
        throw new IOException("Failed tuple init", e);
    }
}
Also used : NullWritable(org.apache.hadoop.io.NullWritable) Writable(org.apache.hadoop.io.Writable) IOException(java.io.IOException)

Aggregations

Writable (org.apache.hadoop.io.Writable)221 IntWritable (org.apache.hadoop.io.IntWritable)103 LongWritable (org.apache.hadoop.io.LongWritable)91 BooleanWritable (org.apache.hadoop.io.BooleanWritable)75 BytesWritable (org.apache.hadoop.io.BytesWritable)74 FloatWritable (org.apache.hadoop.io.FloatWritable)73 Test (org.junit.Test)68 IOException (java.io.IOException)43 Path (org.apache.hadoop.fs.Path)43 Text (org.apache.hadoop.io.Text)40 ArrayWritable (org.apache.hadoop.io.ArrayWritable)37 ShortWritable (org.apache.hadoop.hive.serde2.io.ShortWritable)34 SequenceFile (org.apache.hadoop.io.SequenceFile)32 Configuration (org.apache.hadoop.conf.Configuration)31 DoubleWritable (org.apache.hadoop.io.DoubleWritable)30 DoubleWritable (org.apache.hadoop.hive.serde2.io.DoubleWritable)29 ByteWritable (org.apache.hadoop.io.ByteWritable)28 ByteWritable (org.apache.hadoop.hive.serde2.io.ByteWritable)25 FileSystem (org.apache.hadoop.fs.FileSystem)24 ArrayList (java.util.ArrayList)23