use of org.jboss.netty.channel.Channel in project hadoop by apache.
the class TestShuffleHandler method testClientClosesConnection.
/**
* Verify client prematurely closing a connection.
*
* @throws Exception exception.
*/
@Test(timeout = 10000)
public void testClientClosesConnection() throws Exception {
final ArrayList<Throwable> failures = new ArrayList<Throwable>(1);
Configuration conf = new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
ShuffleHandler shuffleHandler = new ShuffleHandler() {
@Override
protected Shuffle getShuffle(Configuration conf) {
// replace the shuffle handler with one stubbed for testing
return new Shuffle(conf) {
@Override
protected MapOutputInfo getMapOutputInfo(String mapId, int reduce, String jobId, String user) throws IOException {
return null;
}
@Override
protected void populateHeaders(List<String> mapIds, String jobId, String user, int reduce, HttpRequest request, HttpResponse response, boolean keepAliveParam, Map<String, MapOutputInfo> infoMap) throws IOException {
// Only set response headers and skip everything else
// send some dummy value for content-length
super.setResponseHeaders(response, keepAliveParam, 100);
}
@Override
protected void verifyRequest(String appid, ChannelHandlerContext ctx, HttpRequest request, HttpResponse response, URL requestUri) throws IOException {
}
@Override
protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx, Channel ch, String user, String mapId, int reduce, MapOutputInfo info) throws IOException {
// send a shuffle header and a lot of data down the channel
// to trigger a broken pipe
ShuffleHeader header = new ShuffleHeader("attempt_12345_1_m_1_0", 5678, 5678, 1);
DataOutputBuffer dob = new DataOutputBuffer();
header.write(dob);
ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength()));
dob = new DataOutputBuffer();
for (int i = 0; i < 100000; ++i) {
header.write(dob);
}
return ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength()));
}
@Override
protected void sendError(ChannelHandlerContext ctx, HttpResponseStatus status) {
if (failures.size() == 0) {
failures.add(new Error());
ctx.getChannel().close();
}
}
@Override
protected void sendError(ChannelHandlerContext ctx, String message, HttpResponseStatus status) {
if (failures.size() == 0) {
failures.add(new Error());
ctx.getChannel().close();
}
}
};
}
};
shuffleHandler.init(conf);
shuffleHandler.start();
// simulate a reducer that closes early by reading a single shuffle header
// then closing the connection
URL url = new URL("http://127.0.0.1:" + shuffleHandler.getConfig().get(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY) + "/mapOutput?job=job_12345_1&reduce=1&map=attempt_12345_1_m_1_0");
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_NAME, ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
conn.setRequestProperty(ShuffleHeader.HTTP_HEADER_VERSION, ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
conn.connect();
DataInputStream input = new DataInputStream(conn.getInputStream());
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
Assert.assertEquals("close", conn.getHeaderField(HttpHeader.CONNECTION.asString()));
ShuffleHeader header = new ShuffleHeader();
header.readFields(input);
input.close();
shuffleHandler.stop();
Assert.assertTrue("sendError called when client closed connection", failures.size() == 0);
}
use of org.jboss.netty.channel.Channel in project hadoop by apache.
the class ShuffleHandler method serviceStart.
// TODO change AbstractService to throw InterruptedException
@Override
protected void serviceStart() throws Exception {
Configuration conf = getConfig();
userRsrc = new ConcurrentHashMap<String, String>();
secretManager = new JobTokenSecretManager();
recoverState(conf);
ServerBootstrap bootstrap = new ServerBootstrap(selector);
try {
pipelineFact = new HttpPipelineFactory(conf);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
bootstrap.setOption("backlog", conf.getInt(SHUFFLE_LISTEN_QUEUE_SIZE, DEFAULT_SHUFFLE_LISTEN_QUEUE_SIZE));
bootstrap.setOption("child.keepAlive", true);
bootstrap.setPipelineFactory(pipelineFact);
port = conf.getInt(SHUFFLE_PORT_CONFIG_KEY, DEFAULT_SHUFFLE_PORT);
Channel ch = bootstrap.bind(new InetSocketAddress(port));
accepted.add(ch);
port = ((InetSocketAddress) ch.getLocalAddress()).getPort();
conf.set(SHUFFLE_PORT_CONFIG_KEY, Integer.toString(port));
pipelineFact.SHUFFLE.setPort(port);
LOG.info(getName() + " listening on port " + port);
super.serviceStart();
sslFileBufferSize = conf.getInt(SUFFLE_SSL_FILE_BUFFER_SIZE_KEY, DEFAULT_SUFFLE_SSL_FILE_BUFFER_SIZE);
connectionKeepAliveEnabled = conf.getBoolean(SHUFFLE_CONNECTION_KEEP_ALIVE_ENABLED, DEFAULT_SHUFFLE_CONNECTION_KEEP_ALIVE_ENABLED);
connectionKeepAliveTimeOut = Math.max(1, conf.getInt(SHUFFLE_CONNECTION_KEEP_ALIVE_TIME_OUT, DEFAULT_SHUFFLE_CONNECTION_KEEP_ALIVE_TIME_OUT));
mapOutputMetaInfoCacheSize = Math.max(1, conf.getInt(SHUFFLE_MAPOUTPUT_META_INFO_CACHE_SIZE, DEFAULT_SHUFFLE_MAPOUTPUT_META_INFO_CACHE_SIZE));
}
use of org.jboss.netty.channel.Channel in project storm by apache.
the class SaslStormClientHandler method channelConnected.
@Override
public void channelConnected(ChannelHandlerContext ctx, ChannelStateEvent event) {
// register the newly established channel
Channel channel = ctx.getChannel();
client.channelConnected(channel);
try {
SaslNettyClient saslNettyClient = SaslNettyClientState.getSaslNettyClient.get(channel);
if (saslNettyClient == null) {
LOG.debug("Creating saslNettyClient now " + "for channel: " + channel);
saslNettyClient = new SaslNettyClient(name, token);
SaslNettyClientState.getSaslNettyClient.set(channel, saslNettyClient);
}
LOG.debug("Sending SASL_TOKEN_MESSAGE_REQUEST");
channel.write(ControlMessage.SASL_TOKEN_MESSAGE_REQUEST);
} catch (Exception e) {
LOG.error("Failed to authenticate with server " + "due to error: ", e);
}
}
use of org.jboss.netty.channel.Channel in project storm by apache.
the class SaslStormServerAuthorizeHandler method messageReceived.
@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
Object msg = e.getMessage();
if (msg == null)
return;
Channel channel = ctx.getChannel();
LOG.debug("messageReceived: Checking whether the client is authorized to send messages to the server ");
// Authorize: client is allowed to doRequest() if and only if the client
// has successfully authenticated with this server.
SaslNettyServer saslNettyServer = SaslNettyServerState.getSaslNettyServer.get(channel);
if (saslNettyServer == null) {
LOG.warn("messageReceived: This client is *NOT* authorized to perform " + "this action since there's no saslNettyServer to " + "authenticate the client: " + "refusing to perform requested action: " + msg);
return;
}
if (!saslNettyServer.isComplete()) {
LOG.warn("messageReceived: This client is *NOT* authorized to perform " + "this action because SASL authentication did not complete: " + "refusing to perform requested action: " + msg);
// not authorized.
return;
}
LOG.debug("messageReceived: authenticated client: " + saslNettyServer.getUserName() + " is authorized to do request " + "on server.");
// We call fireMessageReceived since the client is allowed to perform
// this request. The client's request will now proceed to the next
// pipeline component.
Channels.fireMessageReceived(ctx, msg);
}
use of org.jboss.netty.channel.Channel in project storm by apache.
the class PacemakerClient method send.
public HBMessage send(HBMessage m) throws PacemakerConnectionException {
LOG.debug("Sending message: {}", m.toString());
try {
int next = availableMessageSlots.take();
synchronized (m) {
m.set_message_id(next);
messages[next] = m;
LOG.debug("Put message in slot: {}", Integer.toString(next));
do {
waitUntilReady();
Channel channel = channelRef.get();
if (channel != null) {
channel.write(m);
m.wait(1000);
}
} while (messages[next] == m);
}
HBMessage ret = messages[next];
if (ret == null) {
// This can happen if we lost the connection and subsequently reconnected or timed out.
send(m);
}
messages[next] = null;
LOG.debug("Got Response: {}", ret);
return ret;
} catch (InterruptedException e) {
LOG.error("PacemakerClient send interrupted: ", e);
throw new RuntimeException(e);
}
}
Aggregations