use of org.apache.hadoop.io.Writable in project hadoop by apache.
the class Client method call.
/**
* Make a call, passing <code>rpcRequest</code>, to the IPC server defined by
* <code>remoteId</code>, returning the rpc response.
*
* @param rpcKind
* @param rpcRequest - contains serialized method and method parameters
* @param remoteId - the target rpc server
* @param serviceClass - service class for RPC
* @param fallbackToSimpleAuth - set to true or false during this method to
* indicate if a secure client falls back to simple auth
* @returns the rpc response
* Throws exceptions if there are network problems or if the remote code
* threw an exception.
*/
Writable call(RPC.RpcKind rpcKind, Writable rpcRequest, ConnectionId remoteId, int serviceClass, AtomicBoolean fallbackToSimpleAuth) throws IOException {
final Call call = createCall(rpcKind, rpcRequest);
final Connection connection = getConnection(remoteId, call, serviceClass, fallbackToSimpleAuth);
try {
checkAsyncCall();
try {
// send the rpc request
connection.sendRpcRequest(call);
} catch (RejectedExecutionException e) {
throw new IOException("connection has been closed", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.warn("interrupted waiting to send rpc request to server", e);
throw new IOException(e);
}
} catch (Exception e) {
if (isAsynchronousMode()) {
releaseAsyncCall();
}
throw e;
}
if (isAsynchronousMode()) {
final AsyncGet<Writable, IOException> asyncGet = new AsyncGet<Writable, IOException>() {
@Override
public Writable get(long timeout, TimeUnit unit) throws IOException, TimeoutException {
boolean done = true;
try {
final Writable w = getRpcResponse(call, connection, timeout, unit);
if (w == null) {
done = false;
throw new TimeoutException(call + " timed out " + timeout + " " + unit);
}
return w;
} finally {
if (done) {
releaseAsyncCall();
}
}
}
@Override
public boolean isDone() {
synchronized (call) {
return call.done;
}
}
};
ASYNC_RPC_RESPONSE.set(asyncGet);
return null;
} else {
return getRpcResponse(call, connection, -1, null);
}
}
use of org.apache.hadoop.io.Writable in project hadoop by apache.
the class TestServer method testLogExceptions.
@Test(timeout = 300000)
public void testLogExceptions() throws Exception {
final Configuration conf = new Configuration();
final Call dummyCall = new Call(0, 0, null, null);
Log logger = mock(Log.class);
Server server = new Server("0.0.0.0", 0, LongWritable.class, 1, conf) {
@Override
public Writable call(RPC.RpcKind rpcKind, String protocol, Writable param, long receiveTime) throws Exception {
return null;
}
};
server.addSuppressedLoggingExceptions(TestException1.class);
server.addTerseExceptions(TestException2.class);
// Nothing should be logged for a suppressed exception.
server.logException(logger, new TestException1(), dummyCall);
verifyZeroInteractions(logger);
// No stack trace should be logged for a terse exception.
server.logException(logger, new TestException2(), dummyCall);
verify(logger, times(1)).info(anyObject());
// Full stack trace should be logged for other exceptions.
final Throwable te3 = new TestException3();
server.logException(logger, te3, dummyCall);
verify(logger, times(1)).info(anyObject(), eq(te3));
}
use of org.apache.hadoop.io.Writable in project hadoop by apache.
the class TestAsyncIPC method testCallIdAndRetry.
/**
* Test if (1) the rpc server uses the call id/retry provided by the rpc
* client, and (2) the rpc client receives the same call id/retry from the rpc
* server.
*
* @throws ExecutionException
* @throws InterruptedException
*/
@Test(timeout = 60000)
public void testCallIdAndRetry() throws IOException, InterruptedException, ExecutionException {
final Map<Integer, CallInfo> infoMap = new HashMap<Integer, CallInfo>();
// Override client to store the call info and check response
final Client client = new Client(LongWritable.class, conf) {
@Override
Call createCall(RpcKind rpcKind, Writable rpcRequest) {
// Set different call id and retry count for the next call
Client.setCallIdAndRetryCount(Client.nextCallId(), TestIPC.RANDOM.nextInt(255), null);
final Call call = super.createCall(rpcKind, rpcRequest);
CallInfo info = new CallInfo();
info.id = call.id;
info.retry = call.retry;
infoMap.put(call.id, info);
return call;
}
@Override
void checkResponse(RpcResponseHeaderProto header) throws IOException {
super.checkResponse(header);
Assert.assertEquals(infoMap.get(header.getCallId()).retry, header.getRetryCount());
}
};
// Attach a listener that tracks every call received by the server.
final TestServer server = new TestIPC.TestServer(1, false, conf);
server.callListener = new Runnable() {
@Override
public void run() {
Assert.assertEquals(infoMap.get(Server.getCallId()).retry, Server.getCallRetryCount());
}
};
try {
InetSocketAddress addr = NetUtils.getConnectAddress(server);
server.start();
final AsyncCaller caller = new AsyncCaller(client, addr, 4);
caller.run();
caller.assertReturnValues();
} finally {
client.stop();
server.stop();
}
}
use of org.apache.hadoop.io.Writable in project hadoop by apache.
the class MapFileOutputFormat method getRecordWriter.
public RecordWriter<WritableComparable<?>, Writable> getRecordWriter(TaskAttemptContext context) throws IOException {
Configuration conf = context.getConfiguration();
CompressionCodec codec = null;
CompressionType compressionType = CompressionType.NONE;
if (getCompressOutput(context)) {
// find the kind of compression to do
compressionType = SequenceFileOutputFormat.getOutputCompressionType(context);
// find the right codec
Class<?> codecClass = getOutputCompressorClass(context, DefaultCodec.class);
codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, conf);
}
Path file = getDefaultWorkFile(context, "");
FileSystem fs = file.getFileSystem(conf);
// ignore the progress parameter, since MapFile is local
final MapFile.Writer out = new MapFile.Writer(conf, fs, file.toString(), context.getOutputKeyClass().asSubclass(WritableComparable.class), context.getOutputValueClass().asSubclass(Writable.class), compressionType, codec, context);
return new RecordWriter<WritableComparable<?>, Writable>() {
public void write(WritableComparable<?> key, Writable value) throws IOException {
out.append(key, value);
}
public void close(TaskAttemptContext context) throws IOException {
out.close();
}
};
}
use of org.apache.hadoop.io.Writable in project hadoop by apache.
the class TupleWritable method readFields.
/**
* {@inheritDoc}
*/
// No static typeinfo on Tuples
@SuppressWarnings("unchecked")
public void readFields(DataInput in) throws IOException {
int card = WritableUtils.readVInt(in);
values = new Writable[card];
readBitSet(in, card, written);
Class<? extends Writable>[] cls = new Class[card];
try {
for (int i = 0; i < card; ++i) {
cls[i] = Class.forName(Text.readString(in)).asSubclass(Writable.class);
}
for (int i = 0; i < card; ++i) {
if (cls[i].equals(NullWritable.class)) {
values[i] = NullWritable.get();
} else {
values[i] = cls[i].newInstance();
}
if (has(i)) {
values[i].readFields(in);
}
}
} catch (ClassNotFoundException e) {
throw new IOException("Failed tuple init", e);
} catch (IllegalAccessException e) {
throw new IOException("Failed tuple init", e);
} catch (InstantiationException e) {
throw new IOException("Failed tuple init", e);
}
}
Aggregations