use of org.apache.twill.common.Cancellable in project cdap by caskdata.
the class AbstractSparkSubmitter method submit.
/**
* Submits the Spark job using {@link SparkSubmit}.
*
* @param runtimeContext context representing the Spark program
* @param args arguments for the {@link SparkSubmit#main(String[])} method.
*/
private void submit(SparkRuntimeContext runtimeContext, String[] args) {
Cancellable cancellable = SparkRuntimeUtils.setContextClassLoader(new SparkClassLoader(runtimeContext));
try {
LOG.debug("Calling SparkSubmit for {} {}: {}", runtimeContext.getProgram().getId(), runtimeContext.getRunId(), Arrays.toString(args));
// Explicitly set the SPARK_SUBMIT property as it is no longer set on the System properties by the SparkSubmit
// after the class rewrite. This property only control logging of a warning when submitting the Spark job,
// hence it's harmless to just leave it there.
System.setProperty("SPARK_SUBMIT", "true");
SparkSubmit.main(args);
LOG.debug("SparkSubmit returned for {} {}", runtimeContext.getProgram().getId(), runtimeContext.getRunId());
} finally {
cancellable.cancel();
}
}
use of org.apache.twill.common.Cancellable in project cdap by caskdata.
the class SparkRuntimeUtils method setContextClassLoader.
/**
* Sets the context ClassLoader to the given {@link SparkClassLoader}. It will also set the
* ClassLoader for the {@link Configuration} contained inside the {@link SparkClassLoader}.
*
* @return a {@link Cancellable} to reset the classloader to the one prior to the call
*/
public static Cancellable setContextClassLoader(final SparkClassLoader sparkClassLoader) {
final Configuration hConf = sparkClassLoader.getRuntimeContext().getConfiguration();
final ClassLoader oldConfClassLoader = hConf.getClassLoader();
// Always wrap it with WeakReference to avoid ClassLoader leakage from Spark.
ClassLoader classLoader = new WeakReferenceDelegatorClassLoader(sparkClassLoader);
hConf.setClassLoader(classLoader);
final ClassLoader oldClassLoader = ClassLoaders.setContextClassLoader(classLoader);
return new Cancellable() {
@Override
public void cancel() {
hConf.setClassLoader(oldConfClassLoader);
ClassLoaders.setContextClassLoader(oldClassLoader);
// Do not remove the next line.
// This is necessary to keep a strong reference to the SparkClassLoader so that it won't get GC until this
// cancel() is called
LOG.trace("Reset context ClassLoader. The SparkClassLoader is: {}", sparkClassLoader);
}
};
}
use of org.apache.twill.common.Cancellable in project cdap by caskdata.
the class MessagingNotificationService method subscribe.
@Override
public <N> Cancellable subscribe(NotificationFeedId feed, NotificationHandler<N> handler, Executor executor) throws NotificationFeedNotFoundException, NotificationFeedException {
Cancellable subscribeCancellable = super.subscribe(feed, handler, executor);
// If already has a thread fetching, just return the cancellable.
if (!needFetch.compareAndSet(false, true)) {
return subscribeCancellable;
}
// Start fetching
subscribeExecutor.execute(new Runnable() {
private final long startTime = System.currentTimeMillis();
private final RetryStrategy scheduleStrategy = RetryStrategies.exponentialDelay(100, 3000, TimeUnit.MILLISECONDS);
private byte[] messageId;
private int emptyFetchCount;
@Override
public void run() {
try {
MessageFetcher fetcher = messagingService.prepareFetch(notificationTopic);
if (messageId == null) {
fetcher.setStartTime(startTime);
} else {
fetcher.setStartMessage(messageId, false);
}
emptyFetchCount++;
try (CloseableIterator<RawMessage> iterator = fetcher.fetch()) {
while (iterator.hasNext()) {
emptyFetchCount = 0;
RawMessage rawMessage = iterator.next();
NotificationMessage message = GSON.fromJson(new String(rawMessage.getPayload(), StandardCharsets.UTF_8), NotificationMessage.class);
try {
LOG.trace("Decoded notification: {}", message);
notificationReceived(message.getFeedId(), message.getNotificationJson());
} catch (Throwable t) {
LOG.warn("Error while processing notification {} with handler {}", message, t);
}
messageId = rawMessage.getId();
}
}
} catch (Exception e) {
LOG.error("Failed to get notification", e);
}
// Back-off if it was empty fetch.
if (emptyFetchCount > 0) {
// Schedule the next fetch. Exponential strategy doesn't use the time component,
// so doesn't matter what we passed in
subscribeExecutor.schedule(this, scheduleStrategy.nextRetry(emptyFetchCount, startTime), TimeUnit.MILLISECONDS);
} else {
subscribeExecutor.execute(this);
}
}
});
return subscribeCancellable;
}
use of org.apache.twill.common.Cancellable in project cdap by caskdata.
the class NotificationTest method testPubSub.
/**
* Testing publishers/subscribers interaction.
*
* @param pubFeeds set of feeds to publish to
* @param publishersPerFeed number of publishers doing concurrent publishing for each feed
* @param messagesPerPublisher number of messages being published by each publisher
* @param subFeeds set of feeds to subscribe to
* @param subscribersPerFeed number of subscribers for each feed
* @param payloadType Class reprenseting the data type of the payload of the notification
* @param payloadFunction a function that transform {@link SimpleNotification} type to the payload type
* @param <T> type of the payload
*/
private <T> void testPubSub(Set<NotificationFeedId> pubFeeds, int publishersPerFeed, final int messagesPerPublisher, Set<NotificationFeedId> subFeeds, int subscribersPerFeed, final Class<T> payloadType, final Function<SimpleNotification, T> payloadFunction) throws Exception {
for (NotificationFeedId feedId : Sets.union(pubFeeds, subFeeds)) {
NotificationFeedInfo feedInfo = new NotificationFeedInfo(feedId.getNamespace(), feedId.getCategory(), feedId.getFeed(), "");
Assert.assertTrue(feedManager.createFeed(feedInfo));
}
try {
int totalMessages = subFeeds.size() * publishersPerFeed * messagesPerPublisher * subscribersPerFeed;
final CountDownLatch latch = new CountDownLatch(totalMessages);
final Queue<T> receivedMessages = new ConcurrentLinkedQueue<>();
List<Cancellable> cancellables = Lists.newArrayList();
try {
for (NotificationFeedId feedId : subFeeds) {
for (int i = 0; i < subscribersPerFeed; i++) {
Cancellable cancellable = notificationService.subscribe(feedId, new NotificationHandler<T>() {
@Override
public Type getNotificationType() {
return payloadType;
}
@Override
public void received(T notification, NotificationContext notificationContext) {
LOG.debug("Received notification payload: {}", notification);
receivedMessages.offer(notification);
latch.countDown();
}
});
cancellables.add(cancellable);
}
}
// Give the subscriber some time to prepare for published messages before starting the publisher
TimeUnit.MILLISECONDS.sleep(500);
// Starts publishers
final Map<NotificationFeedId, Queue<T>> publishedMessages = new ConcurrentHashMap<>();
ExecutorService executor = Executors.newFixedThreadPool(pubFeeds.size() * publishersPerFeed);
try {
for (final NotificationFeedId feedId : pubFeeds) {
final Queue<T> publishedQueue = new ConcurrentLinkedQueue<>();
publishedMessages.put(feedId, publishedQueue);
// Let all publishers start together
final CyclicBarrier publisherBarrier = new CyclicBarrier(publishersPerFeed);
for (int i = 0; i < publishersPerFeed; i++) {
final int publisherId = i;
executor.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
publisherBarrier.await();
for (int i = 0; i < messagesPerPublisher; i++) {
T notification = payloadFunction.apply(new SimpleNotification(publisherId, String.format("%s-%d", feedId, i)));
notificationService.publish(feedId, notification);
publishedQueue.add(notification);
TimeUnit.MILLISECONDS.sleep(10);
}
return null;
}
});
}
}
// Wait for subscriptions getting all messages
Assert.assertTrue(latch.await(5000, TimeUnit.SECONDS));
} finally {
executor.shutdown();
}
// Verify the result.
Multiset<T> received = HashMultiset.create(receivedMessages);
Assert.assertEquals(totalMessages, received.size());
// there should be (publisher per feed * subscriber per feed) of them
for (NotificationFeedId feedId : subFeeds) {
for (T notification : ImmutableMultiset.copyOf(publishedMessages.get(feedId)).elementSet()) {
Assert.assertEquals(publishersPerFeed * subscribersPerFeed, received.count(notification));
}
}
} finally {
for (Cancellable cancellable : cancellables) {
cancellable.cancel();
}
}
} finally {
for (NotificationFeedId feedId : Sets.union(pubFeeds, subFeeds)) {
feedManager.deleteFeed(feedId);
}
}
}
use of org.apache.twill.common.Cancellable in project cdap by caskdata.
the class ResourceBalancerServiceTest method testServiceStopFailure.
@Test
public void testServiceStopFailure() throws Exception {
ZKClientService zkClient = ZKClientService.Builder.of(zkServer.getConnectionStr()).build();
zkClient.startAndWait();
try (ZKDiscoveryService discoveryService = new ZKDiscoveryService(zkClient)) {
// Test the failure on stop case
final TestBalancerService stopFailureService = new TestBalancerService("test", 4, zkClient, discoveryService, discoveryService, false, true);
stopFailureService.startAndWait();
// Should get four partitions
Tasks.waitFor(ImmutableSet.of(0, 1, 2, 3), new Callable<Set<Integer>>() {
@Override
public Set<Integer> call() throws Exception {
return stopFailureService.getPartitions();
}
}, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// Register a new discoverable, this should trigger a partition change in the resource balancer service
Cancellable cancellable = discoveryService.register(new Discoverable("test", new InetSocketAddress(InetAddress.getLoopbackAddress(), 1234)));
try {
// When there is exception thrown by the underlying service during partition change,
// the resource balancer service should fail.
Tasks.waitFor(Service.State.FAILED, new Callable<Service.State>() {
@Override
public Service.State call() throws Exception {
return stopFailureService.state();
}
}, 10, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
} finally {
cancellable.cancel();
}
} finally {
zkClient.stopAndWait();
}
}
Aggregations