use of org.apache.flume.channel.MemoryChannel in project phoenix by apache.
the class JsonEventSerializerIT method initChannel.
private Channel initChannel() {
// Channel configuration
Context channelContext = new Context();
channelContext.put("capacity", "10000");
channelContext.put("transactionCapacity", "200");
Channel channel = new MemoryChannel();
channel.setName("memorychannel");
Configurables.configure(channel, channelContext);
return channel;
}
use of org.apache.flume.channel.MemoryChannel in project phoenix by apache.
the class RegexEventSerializerIT method initChannel.
private Channel initChannel() {
//Channel configuration
Context channelContext = new Context();
channelContext.put("capacity", "10000");
channelContext.put("transactionCapacity", "200");
Channel channel = new MemoryChannel();
channel.setName("memorychannel");
Configurables.configure(channel, channelContext);
return channel;
}
use of org.apache.flume.channel.MemoryChannel in project MSEC by Tencent.
the class TestProfobufSource method testBasic.
@Test
public void testBasic() throws Exception {
Context context = new Context();
context.put("bind", "localhost");
context.put("port", "45673");
context.put("threads", "1");
ProtobufSource source = new ProtobufSource();
source.configure(context);
Map<String, String> channelContext = new HashMap();
channelContext.put("capacity", "1000000");
// for faster tests
channelContext.put("keep-alive", "0");
Channel channel = new MemoryChannel();
Configurables.configure(channel, new Context(channelContext));
Sink sink = new LoggerSink();
sink.setChannel(channel);
sink.start();
DefaultSinkProcessor proc = new DefaultSinkProcessor();
proc.setSinks(Collections.singletonList(sink));
SinkRunner sinkRunner = new SinkRunner(proc);
sinkRunner.start();
ChannelSelector rcs = new ReplicatingChannelSelector();
rcs.setChannels(Collections.singletonList(channel));
ChannelProcessor chp = new ChannelProcessor(rcs);
source.setChannelProcessor(chp);
source.start();
Thread.sleep(5000);
source.stop();
sinkRunner.stop();
sink.stop();
}
use of org.apache.flume.channel.MemoryChannel in project ignite by apache.
the class IgniteSinkTest method testSink.
/**
* @throws Exception {@link Exception}.
*/
public void testSink() throws Exception {
IgniteConfiguration cfg = loadConfiguration("modules/flume/src/test/resources/example-ignite.xml");
cfg.setClientMode(false);
final Ignite grid = startGrid("igniteServerNode", cfg);
Context channelContext = new Context();
channelContext.put("capacity", String.valueOf(EVENT_CNT));
channelContext.put("transactionCapacity", String.valueOf(EVENT_CNT));
Channel memoryChannel = new MemoryChannel();
Configurables.configure(memoryChannel, channelContext);
final CountDownLatch latch = new CountDownLatch(EVENT_CNT);
final IgnitePredicate<Event> putLsnr = new IgnitePredicate<Event>() {
@Override
public boolean apply(Event evt) {
assert evt != null;
latch.countDown();
return true;
}
};
IgniteSink sink = new IgniteSink() {
// Setting the listener on cache before sink processing starts.
@Override
public synchronized void start() {
super.start();
grid.events(grid.cluster().forCacheNodes(CACHE_NAME)).localListen(putLsnr, EVT_CACHE_OBJECT_PUT);
}
};
sink.setName("IgniteSink");
sink.setChannel(memoryChannel);
Context ctx = new Context();
ctx.put(IgniteSinkConstants.CFG_CACHE_NAME, CACHE_NAME);
ctx.put(IgniteSinkConstants.CFG_PATH, "example-ignite.xml");
ctx.put(IgniteSinkConstants.CFG_EVENT_TRANSFORMER, "org.apache.ignite.stream.flume.TestEventTransformer");
Configurables.configure(sink, ctx);
sink.start();
try {
Transaction tx = memoryChannel.getTransaction();
tx.begin();
for (int i = 0; i < EVENT_CNT; i++) memoryChannel.put(EventBuilder.withBody((String.valueOf(i) + ": " + i).getBytes()));
tx.commit();
tx.close();
Sink.Status status = Sink.Status.READY;
while (status != Sink.Status.BACKOFF) {
status = sink.process();
}
} finally {
sink.stop();
}
// Checks that 10000 events successfully processed in 10 seconds.
assertTrue(latch.await(10, TimeUnit.SECONDS));
grid.events(grid.cluster().forCacheNodes(CACHE_NAME)).stopLocalListen(putLsnr);
IgniteCache<String, Integer> cache = grid.cache(CACHE_NAME);
// Checks that each event was processed properly.
for (int i = 0; i < EVENT_CNT; i++) {
assertEquals(i, (int) cache.get(String.valueOf(i)));
}
assertEquals(EVENT_CNT, cache.size(CachePeekMode.PRIMARY));
}
use of org.apache.flume.channel.MemoryChannel in project logging-log4j2 by apache.
the class FlumeAppenderTest method setUp.
@Before
public void setUp() throws Exception {
eventSource = new AvroSource();
channel = new MemoryChannel();
Configurables.configure(channel, new Context());
avroLogger = (Logger) LogManager.getLogger("avrologger");
/*
* Clear out all other appenders associated with this logger to ensure
* we're only hitting the Avro appender.
*/
removeAppenders(avroLogger);
final Context context = new Context();
testPort = String.valueOf(AvailablePortFinder.getNextAvailable());
context.put("port", testPort);
context.put("bind", "0.0.0.0");
Configurables.configure(eventSource, context);
final List<Channel> channels = new ArrayList<>();
channels.add(channel);
final ChannelSelector cs = new ReplicatingChannelSelector();
cs.setChannels(channels);
eventSource.setChannelProcessor(new ChannelProcessor(cs));
eventSource.start();
Assert.assertTrue("Reached start or error", LifecycleController.waitForOneOf(eventSource, LifecycleState.START_OR_ERROR));
Assert.assertEquals("Server is started", LifecycleState.START, eventSource.getLifecycleState());
}
Aggregations