use of co.cask.cdap.notifications.service.NotificationContext in project cdap by caskdata.
the class DistributedStreamService method subscribeToHeartbeatsFeed.
/**
* Subscribe to the streams heartbeat notification feed. One heartbeat contains data for all existing streams,
* we filter that to only take into account the streams that this {@link DistributedStreamService} is a leader
* of.
*
* @return a {@link Cancellable} to cancel the subscription
* @throws NotificationFeedNotFoundException if the heartbeat feed does not exist
*/
private Cancellable subscribeToHeartbeatsFeed() throws NotificationFeedNotFoundException {
LOG.debug("Subscribing to stream heartbeats notification feed");
NotificationFeedId heartbeatsFeed = new NotificationFeedId(NamespaceId.SYSTEM.getNamespace(), Constants.Notification.Stream.STREAM_INTERNAL_FEED_CATEGORY, Constants.Notification.Stream.STREAM_HEARTBEAT_FEED_NAME);
boolean isRetry = false;
while (true) {
try {
return notificationService.subscribe(heartbeatsFeed, new NotificationHandler<StreamWriterHeartbeat>() {
@Override
public Type getNotificationType() {
return StreamWriterHeartbeat.class;
}
@Override
public void received(StreamWriterHeartbeat heartbeat, NotificationContext notificationContext) {
LOG.trace("Received heartbeat {}", heartbeat);
for (Map.Entry<StreamId, Long> entry : heartbeat.getStreamsSizes().entrySet()) {
StreamSizeAggregator streamSizeAggregator = aggregators.get(entry.getKey());
if (streamSizeAggregator == null) {
LOG.trace("Aggregator for stream {} is null", entry.getKey());
continue;
}
streamSizeAggregator.bytesReceived(heartbeat.getInstanceId(), entry.getValue());
}
}
}, heartbeatsSubscriptionExecutor);
} catch (NotificationFeedException e) {
if (!isRetry) {
LOG.warn("Unable to subscribe to HeartbeatsFeed. Will retry until successfully subscribed. " + "Retry failures will be logged at debug level.", e);
} else {
LOG.debug("Unable to subscribe to HeartbeatsFeed. Will retry until successfully subscribed. ", e);
}
isRetry = true;
waitBeforeRetryHeartbeatsFeedOperation();
}
}
}
use of co.cask.cdap.notifications.service.NotificationContext in project cdap by caskdata.
the class NotificationTest method testPubSub.
/**
* Testing publishers/subscribers interaction.
*
* @param pubFeeds set of feeds to publish to
* @param publishersPerFeed number of publishers doing concurrent publishing for each feed
* @param messagesPerPublisher number of messages being published by each publisher
* @param subFeeds set of feeds to subscribe to
* @param subscribersPerFeed number of subscribers for each feed
* @param payloadType Class reprenseting the data type of the payload of the notification
* @param payloadFunction a function that transform {@link SimpleNotification} type to the payload type
* @param <T> type of the payload
*/
private <T> void testPubSub(Set<NotificationFeedId> pubFeeds, int publishersPerFeed, final int messagesPerPublisher, Set<NotificationFeedId> subFeeds, int subscribersPerFeed, final Class<T> payloadType, final Function<SimpleNotification, T> payloadFunction) throws Exception {
for (NotificationFeedId feedId : Sets.union(pubFeeds, subFeeds)) {
NotificationFeedInfo feedInfo = new NotificationFeedInfo(feedId.getNamespace(), feedId.getCategory(), feedId.getFeed(), "");
Assert.assertTrue(feedManager.createFeed(feedInfo));
}
try {
int totalMessages = subFeeds.size() * publishersPerFeed * messagesPerPublisher * subscribersPerFeed;
final CountDownLatch latch = new CountDownLatch(totalMessages);
final Queue<T> receivedMessages = new ConcurrentLinkedQueue<>();
List<Cancellable> cancellables = Lists.newArrayList();
try {
for (NotificationFeedId feedId : subFeeds) {
for (int i = 0; i < subscribersPerFeed; i++) {
Cancellable cancellable = notificationService.subscribe(feedId, new NotificationHandler<T>() {
@Override
public Type getNotificationType() {
return payloadType;
}
@Override
public void received(T notification, NotificationContext notificationContext) {
LOG.debug("Received notification payload: {}", notification);
receivedMessages.offer(notification);
latch.countDown();
}
});
cancellables.add(cancellable);
}
}
// Give the subscriber some time to prepare for published messages before starting the publisher
TimeUnit.MILLISECONDS.sleep(500);
// Starts publishers
final Map<NotificationFeedId, Queue<T>> publishedMessages = new ConcurrentHashMap<>();
ExecutorService executor = Executors.newFixedThreadPool(pubFeeds.size() * publishersPerFeed);
try {
for (final NotificationFeedId feedId : pubFeeds) {
final Queue<T> publishedQueue = new ConcurrentLinkedQueue<>();
publishedMessages.put(feedId, publishedQueue);
// Let all publishers start together
final CyclicBarrier publisherBarrier = new CyclicBarrier(publishersPerFeed);
for (int i = 0; i < publishersPerFeed; i++) {
final int publisherId = i;
executor.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
publisherBarrier.await();
for (int i = 0; i < messagesPerPublisher; i++) {
T notification = payloadFunction.apply(new SimpleNotification(publisherId, String.format("%s-%d", feedId, i)));
notificationService.publish(feedId, notification);
publishedQueue.add(notification);
TimeUnit.MILLISECONDS.sleep(10);
}
return null;
}
});
}
}
// Wait for subscriptions getting all messages
Assert.assertTrue(latch.await(5000, TimeUnit.SECONDS));
} finally {
executor.shutdown();
}
// Verify the result.
Multiset<T> received = HashMultiset.create(receivedMessages);
Assert.assertEquals(totalMessages, received.size());
// there should be (publisher per feed * subscriber per feed) of them
for (NotificationFeedId feedId : subFeeds) {
for (T notification : ImmutableMultiset.copyOf(publishedMessages.get(feedId)).elementSet()) {
Assert.assertEquals(publishersPerFeed * subscribersPerFeed, received.count(notification));
}
}
} finally {
for (Cancellable cancellable : cancellables) {
cancellable.cancel();
}
}
} finally {
for (NotificationFeedId feedId : Sets.union(pubFeeds, subFeeds)) {
feedManager.deleteFeed(feedId);
}
}
}
use of co.cask.cdap.notifications.service.NotificationContext in project cdap by caskdata.
the class NotificationTest method useTransactionTest.
@Test
public void useTransactionTest() throws Exception {
// Performing admin operations to create dataset instance
// keyValueTable is a system dataset module
namespaceAdmin.create(new NamespaceMeta.Builder().setName(namespace).build());
DatasetId myTableInstance = namespace.dataset("myTable");
dsFramework.addInstance("keyValueTable", myTableInstance, DatasetProperties.EMPTY);
final CountDownLatch receivedLatch = new CountDownLatch(1);
Assert.assertTrue(feedManager.createFeed(FEED1_INFO));
try {
Cancellable cancellable = notificationService.subscribe(FEED1, new NotificationHandler<String>() {
private int received = 0;
@Override
public Type getNotificationType() {
return String.class;
}
@Override
public void received(final String notification, NotificationContext notificationContext) {
notificationContext.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
KeyValueTable table = context.getDataset("myTable");
table.write("foo", String.format("%s-%d", notification, received++));
receivedLatch.countDown();
}
}, TxRetryPolicy.maxRetries(5));
}
});
// Short delay for the subscriber to setup the subscription.
TimeUnit.MILLISECONDS.sleep(500);
try {
notificationService.publish(FEED1, "foobar");
// Waiting for the subscriber to receive that notification
Assert.assertTrue(receivedLatch.await(5, TimeUnit.SECONDS));
// Read the KeyValueTable for the value updated from the subscriber.
// Need to poll it couple times since after the received method returned,
// the tx may not yet committed when we try to read it here.
final KeyValueTable table = dsFramework.getDataset(myTableInstance, DatasetDefinition.NO_ARGUMENTS, null);
Assert.assertNotNull(table);
final TransactionContext txContext = new TransactionContext(txClient, table);
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
txContext.start();
try {
return "foobar-0".equals(Bytes.toString(table.read("foo")));
} finally {
txContext.finish();
}
}
}, 5, TimeUnit.SECONDS);
} finally {
cancellable.cancel();
}
} finally {
dsFramework.deleteInstance(myTableInstance);
feedManager.deleteFeed(FEED1);
namespaceAdmin.delete(namespace);
}
}
Aggregations