use of org.apache.ignite.events.CacheEvent in project ignite by apache.
the class GridCacheEventManager method addEvent.
/**
* @param part Partition.
* @param key Key for the event.
* @param evtNodeId Event node ID.
* @param xid Transaction ID.
* @param lockId Lock ID.
* @param type Event type.
* @param newVal New value.
* @param hasNewVal Whether new value is present or not.
* @param oldVal Old value.
* @param hasOldVal Whether old value is present or not.
* @param subjId Subject ID.
* @param cloClsName Closure class name.
* @param taskName Task class name.
*/
public void addEvent(int part, KeyCacheObject key, UUID evtNodeId, @Nullable IgniteUuid xid, @Nullable Object lockId, int type, @Nullable CacheObject newVal, boolean hasNewVal, @Nullable CacheObject oldVal, boolean hasOldVal, UUID subjId, @Nullable String cloClsName, @Nullable String taskName, boolean keepBinary) {
assert key != null || type == EVT_CACHE_STARTED || type == EVT_CACHE_STOPPED;
if (!cctx.events().isRecordable(type))
LT.warn(log, "Added event without checking if event is recordable: " + U.gridEventName(type));
// Events are not fired for internal entry.
if (key == null || !key.internal()) {
ClusterNode evtNode = cctx.discovery().node(evtNodeId);
if (evtNode == null)
evtNode = findNodeInHistory(evtNodeId);
if (evtNode == null)
LT.warn(log, "Failed to find event node in grid topology history " + "(try to increase topology history size configuration property of configured " + "discovery SPI): " + evtNodeId);
keepBinary = keepBinary || forceKeepBinary;
Object key0;
Object val0;
Object oldVal0;
try {
key0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(key, keepBinary, false);
val0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(newVal, keepBinary, false);
oldVal0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(oldVal, keepBinary, false);
} catch (Exception e) {
if (!cctx.cacheObjectContext().kernalContext().cacheObjects().isBinaryEnabled(cctx.config()))
throw e;
if (log.isDebugEnabled())
log.debug("Failed to unmarshall cache object value for the event notification: " + e);
if (!forceKeepBinary)
LT.warn(log, "Failed to unmarshall cache object value for the event notification " + "(all further notifications will keep binary object format).");
forceKeepBinary = true;
key0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(key, true, false);
val0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(newVal, true, false);
oldVal0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(oldVal, true, false);
}
cctx.gridEvents().record(new CacheEvent(cctx.name(), cctx.localNode(), evtNode, "Cache event.", type, part, cctx.isNear(), key0, xid, lockId, val0, hasNewVal, oldVal0, hasOldVal, subjId, cloClsName, taskName));
}
}
use of org.apache.ignite.events.CacheEvent in project ignite by apache.
the class KafkaIgniteStreamerSelfTest method consumerStream.
/**
* Consumes Kafka stream via Ignite.
*
* @param topic Topic name.
* @param keyValMap Expected key value map.
* @throws TimeoutException If timed out.
* @throws InterruptedException If interrupted.
*/
private void consumerStream(String topic, Map<String, String> keyValMap) throws TimeoutException, InterruptedException {
KafkaStreamer<String, String> kafkaStmr = null;
Ignite ignite = grid();
try (IgniteDataStreamer<String, String> stmr = ignite.dataStreamer(DEFAULT_CACHE_NAME)) {
stmr.allowOverwrite(true);
stmr.autoFlushFrequency(10);
// Configure Kafka streamer.
kafkaStmr = new KafkaStreamer<>();
// Get the cache.
IgniteCache<String, String> cache = ignite.cache(DEFAULT_CACHE_NAME);
// Set Ignite instance.
kafkaStmr.setIgnite(ignite);
// Set data streamer instance.
kafkaStmr.setStreamer(stmr);
// Set the topic.
kafkaStmr.setTopic(topic);
// Set the number of threads.
kafkaStmr.setThreads(4);
// Set the consumer configuration.
kafkaStmr.setConsumerConfig(createDefaultConsumerConfig(embeddedBroker.getZookeeperAddress(), "groupX"));
kafkaStmr.setMultipleTupleExtractor(new StreamMultipleTupleExtractor<MessageAndMetadata<byte[], byte[]>, String, String>() {
@Override
public Map<String, String> extract(MessageAndMetadata<byte[], byte[]> msg) {
Map<String, String> entries = new HashMap<>();
try {
String key = new String(msg.key());
String val = new String(msg.message());
// Convert the message into number of cache entries with same key or dynamic key from actual message.
// For now using key as cache entry key and value as cache entry value - for test purpose.
entries.put(key, val);
} catch (Exception ex) {
fail("Unexpected error." + ex);
}
return entries;
}
});
// Start kafka streamer.
kafkaStmr.start();
final CountDownLatch latch = new CountDownLatch(CNT);
IgniteBiPredicate<UUID, CacheEvent> locLsnr = new IgniteBiPredicate<UUID, CacheEvent>() {
@Override
public boolean apply(UUID uuid, CacheEvent evt) {
latch.countDown();
return true;
}
};
ignite.events(ignite.cluster().forCacheNodes(DEFAULT_CACHE_NAME)).remoteListen(locLsnr, null, EVT_CACHE_OBJECT_PUT);
// Checks all events successfully processed in 10 seconds.
assertTrue(latch.await(10, TimeUnit.SECONDS));
for (Map.Entry<String, String> entry : keyValMap.entrySet()) assertEquals(entry.getValue(), cache.get(entry.getKey()));
} finally {
if (kafkaStmr != null)
kafkaStmr.stop();
}
}
use of org.apache.ignite.events.CacheEvent in project ignite by apache.
the class IgniteJmsStreamerTest method subscribeToPutEvents.
/**
* @param expect Expected events number.
* @return Event receive latch.
*/
private CountDownLatch subscribeToPutEvents(int expect) {
Ignite ignite = grid();
// Listen to cache PUT events and expect as many as messages as test data items
final CountDownLatch latch = new CountDownLatch(expect);
@SuppressWarnings("serial") IgniteBiPredicate<UUID, CacheEvent> cb = new IgniteBiPredicate<UUID, CacheEvent>() {
@Override
public boolean apply(UUID uuid, CacheEvent evt) {
latch.countDown();
return true;
}
};
ignite.events(ignite.cluster().forCacheNodes(DEFAULT_CACHE_NAME)).remoteListen(cb, null, EVT_CACHE_OBJECT_PUT);
return latch;
}
use of org.apache.ignite.events.CacheEvent in project ignite by apache.
the class SocketStreamerSelfTest method test.
/**
* @param converter Converter.
* @param r Runnable..
*/
private void test(@Nullable SocketMessageConverter<Message> converter, @Nullable byte[] delim, Runnable r, boolean oneMessagePerTuple) throws Exception {
SocketStreamer<Message, Integer, String> sockStmr = null;
Ignite ignite = grid(0);
IgniteCache<Integer, String> cache = ignite.cache(DEFAULT_CACHE_NAME);
cache.clear();
try (IgniteDataStreamer<Integer, String> stmr = ignite.dataStreamer(DEFAULT_CACHE_NAME)) {
stmr.allowOverwrite(true);
stmr.autoFlushFrequency(10);
sockStmr = new SocketStreamer<>();
sockStmr.setIgnite(ignite);
sockStmr.setStreamer(stmr);
sockStmr.setPort(port);
sockStmr.setDelimiter(delim);
if (oneMessagePerTuple) {
sockStmr.setSingleTupleExtractor(new StreamSingleTupleExtractor<Message, Integer, String>() {
@Override
public Map.Entry<Integer, String> extract(Message msg) {
return new IgniteBiTuple<>(msg.key, msg.val);
}
});
} else {
sockStmr.setMultipleTupleExtractor(new StreamMultipleTupleExtractor<Message, Integer, String>() {
@Override
public Map<Integer, String> extract(Message msg) {
Map<Integer, String> answer = new HashMap<>();
for (int value : msg.values) {
answer.put(value, Integer.toString(value));
}
return answer;
}
});
}
if (converter != null)
sockStmr.setConverter(converter);
final CountDownLatch latch = new CountDownLatch(CNT);
final GridConcurrentHashSet<CacheEvent> evts = new GridConcurrentHashSet<>();
IgniteBiPredicate<UUID, CacheEvent> locLsnr = new IgniteBiPredicate<UUID, CacheEvent>() {
@Override
public boolean apply(UUID uuid, CacheEvent evt) {
evts.add(evt);
latch.countDown();
return true;
}
};
ignite.events(ignite.cluster().forCacheNodes(DEFAULT_CACHE_NAME)).remoteListen(locLsnr, null, EVT_CACHE_OBJECT_PUT);
sockStmr.start();
r.run();
latch.await();
for (int i = 0; i < CNT; i++) {
Object val = cache.get(i);
String exp = Integer.toString(i);
if (!exp.equals(val))
log.error("Unexpected cache value [key=" + i + ", exp=" + exp + ", val=" + val + ", evts=" + evts + ']');
assertEquals(exp, val);
}
assertEquals(CNT, cache.size(CachePeekMode.PRIMARY));
} finally {
if (sockStmr != null)
sockStmr.stop();
}
}
use of org.apache.ignite.events.CacheEvent in project ignite by apache.
the class IgniteSourceConnectorTest method checkDataDelivered.
/**
* Checks if events were delivered to Kafka server.
*
* @param expectedEventsCnt Expected events count.
* @throws Exception If failed.
*/
private void checkDataDelivered(final int expectedEventsCnt) throws Exception {
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBroker.getBrokerAddress());
props.put(ConsumerConfig.GROUP_ID_CONFIG, "test-grp");
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
props.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, 1);
props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 10000);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.ignite.stream.kafka.connect.serialization.CacheEventDeserializer");
final KafkaConsumer<String, CacheEvent> consumer = new KafkaConsumer<>(props);
consumer.subscribe(Arrays.asList(TOPICS));
final AtomicInteger evtCnt = new AtomicInteger();
try {
// Wait for expected events count.
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
ConsumerRecords<String, CacheEvent> records = consumer.poll(10);
for (ConsumerRecord<String, CacheEvent> record : records) {
info("Record: " + record);
evtCnt.getAndIncrement();
}
return evtCnt.get() >= expectedEventsCnt;
}
}, 20_000);
info("Waiting for unexpected records for 5 secs.");
assertFalse(GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
ConsumerRecords<String, CacheEvent> records = consumer.poll(10);
for (ConsumerRecord<String, CacheEvent> record : records) {
error("Unexpected record: " + record);
evtCnt.getAndIncrement();
}
return evtCnt.get() > expectedEventsCnt;
}
}, 5_000));
} catch (WakeupException ignored) {
// ignore for shutdown.
} finally {
consumer.close();
assertEquals(expectedEventsCnt, evtCnt.get());
}
}
Aggregations