use of org.jboss.netty.channel.ChannelFuture in project hadoop by apache.
the class TestShuffleHandler method createMockChannelFuture.
public ChannelFuture createMockChannelFuture(Channel mockCh, final List<ShuffleHandler.ReduceMapFileCount> listenerList) {
final ChannelFuture mockFuture = Mockito.mock(ChannelFuture.class);
Mockito.when(mockFuture.getChannel()).thenReturn(mockCh);
Mockito.doReturn(true).when(mockFuture).isSuccess();
Mockito.doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
//Add ReduceMapFileCount listener to a list
if (invocation.getArguments()[0].getClass() == ShuffleHandler.ReduceMapFileCount.class)
listenerList.add((ShuffleHandler.ReduceMapFileCount) invocation.getArguments()[0]);
return null;
}
}).when(mockFuture).addListener(Mockito.any(ShuffleHandler.ReduceMapFileCount.class));
return mockFuture;
}
use of org.jboss.netty.channel.ChannelFuture in project hadoop by apache.
the class TestShuffleHandler method testShuffleMetrics.
/**
* Validate shuffle connection and input/output metrics.
*
* @throws Exception exception
*/
@Test(timeout = 10000)
public void testShuffleMetrics() throws Exception {
MetricsSystem ms = new MetricsSystemImpl();
ShuffleHandler sh = new ShuffleHandler(ms);
ChannelFuture cf = make(stub(ChannelFuture.class).returning(true, false).from.isSuccess());
sh.metrics.shuffleConnections.incr();
sh.metrics.shuffleOutputBytes.incr(1 * MiB);
sh.metrics.shuffleConnections.incr();
sh.metrics.shuffleOutputBytes.incr(2 * MiB);
checkShuffleMetrics(ms, 3 * MiB, 0, 0, 2);
sh.metrics.operationComplete(cf);
sh.metrics.operationComplete(cf);
checkShuffleMetrics(ms, 3 * MiB, 1, 1, 0);
}
use of org.jboss.netty.channel.ChannelFuture in project pinpoint by naver.
the class DefaultPinpointClientFactory method reconnect.
public PinpointClient reconnect(String host, int port) throws PinpointSocketException {
SocketAddress address = new InetSocketAddress(host, port);
ChannelFuture connectFuture = bootstrap.connect(address);
PinpointClientHandler pinpointClientHandler = getSocketHandler(connectFuture, address);
PinpointClient pinpointClient = new DefaultPinpointClient(pinpointClientHandler);
traceSocket(pinpointClient);
return pinpointClient;
}
use of org.jboss.netty.channel.ChannelFuture in project neo4j by neo4j.
the class BufferReusingChunkingChannelBufferTest method triggerOperationCompleteCallback.
private static ChannelBuffer triggerOperationCompleteCallback(BufferReusingChunkingChannelBuffer buffer) throws Exception {
ChannelBuffer reusedBuffer = spy(ChannelBuffers.dynamicBuffer());
ChannelFuture channelFuture = mock(ChannelFuture.class);
when(channelFuture.isDone()).thenReturn(true);
when(channelFuture.isSuccess()).thenReturn(true);
buffer.newChannelFutureListener(reusedBuffer).operationComplete(channelFuture);
return reusedBuffer;
}
use of org.jboss.netty.channel.ChannelFuture in project weave by continuuity.
the class SimpleKafkaClient method preparePublish.
@Override
public PreparePublish preparePublish(final String topic, final Compression compression) {
final Map<Integer, MessageSetEncoder> encoders = Maps.newHashMap();
return new PreparePublish() {
@Override
public PreparePublish add(byte[] payload, Object partitionKey) {
return add(ByteBuffer.wrap(payload), partitionKey);
}
@Override
public PreparePublish add(ByteBuffer payload, Object partitionKey) {
// TODO: Partition
int partition = 0;
MessageSetEncoder encoder = encoders.get(partition);
if (encoder == null) {
encoder = getEncoder(compression);
encoders.put(partition, encoder);
}
encoder.add(ChannelBuffers.wrappedBuffer(payload));
return this;
}
@Override
public ListenableFuture<?> publish() {
List<ListenableFuture<?>> futures = Lists.newArrayListWithCapacity(encoders.size());
for (Map.Entry<Integer, MessageSetEncoder> entry : encoders.entrySet()) {
futures.add(doPublish(topic, entry.getKey(), entry.getValue().finish()));
}
encoders.clear();
return Futures.allAsList(futures);
}
private ListenableFuture<?> doPublish(String topic, int partition, ChannelBuffer messageSet) {
final KafkaRequest request = KafkaRequest.createProduce(topic, partition, messageSet);
final SettableFuture<?> result = SettableFuture.create();
final ConnectionPool.ConnectResult connection = connectionPool.connect(getTopicBroker(topic, partition).getAddress());
connection.getChannelFuture().addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
try {
future.getChannel().write(request).addListener(getPublishChannelFutureListener(result, null, connection));
} catch (Exception e) {
result.setException(e);
}
}
});
return result;
}
};
}
Aggregations