use of co.cask.cdap.api.dataset.lib.CloseableIterator in project cdap by caskdata.
the class MessagingAppTestRun method testSparkMessaging.
@Test
public void testSparkMessaging() throws Exception {
ApplicationManager appManager = deployWithArtifact(NAMESPACE, MessagingApp.class, artifactJar);
final SparkManager sparkManager = appManager.getSparkManager(MessagingSpark.class.getSimpleName()).start();
final MessageFetcher fetcher = getMessagingContext().getMessageFetcher();
final AtomicReference<String> messageId = new AtomicReference<>();
// Wait for the Spark to create the topic
final MessagingAdmin messagingAdmin = getMessagingAdmin(NAMESPACE.getNamespace());
Tasks.waitFor(true, new Callable<Boolean>() {
@Override
public Boolean call() throws Exception {
try {
messagingAdmin.getTopicProperties(MessagingApp.TOPIC);
return true;
} catch (TopicNotFoundException e) {
return false;
}
}
}, 60, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
// This is to verify failed transaction is not publishing anything.
for (String expected : Arrays.asList("start", "block")) {
Tasks.waitFor(expected, new Callable<String>() {
@Override
public String call() throws Exception {
try (CloseableIterator<Message> iterator = fetcher.fetch(NAMESPACE.getNamespace(), MessagingApp.TOPIC, 1, messageId.get())) {
if (!iterator.hasNext()) {
return null;
}
Message message = iterator.next();
messageId.set(message.getId());
return message.getPayloadAsString();
}
}
}, 60, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
}
// Publish a control message to unblock the Spark execution
getMessagingContext().getMessagePublisher().publish(NAMESPACE.getNamespace(), MessagingApp.CONTROL_TOPIC, "go");
// Expects a result message as "result-15", where 15 is the sum of 1,2,3,4,5
Tasks.waitFor("result-15", new Callable<String>() {
@Override
public String call() throws Exception {
try (CloseableIterator<Message> iterator = fetcher.fetch(NAMESPACE.getNamespace(), MessagingApp.TOPIC, 1, messageId.get())) {
if (!iterator.hasNext()) {
return null;
}
Message message = iterator.next();
messageId.set(message.getId());
return message.getPayloadAsString();
}
}
}, 60, TimeUnit.SECONDS, 100, TimeUnit.MILLISECONDS);
sparkManager.waitForRun(ProgramRunStatus.COMPLETED, 60, TimeUnit.SECONDS);
}
use of co.cask.cdap.api.dataset.lib.CloseableIterator in project cdap by caskdata.
the class MessagingNotificationService method subscribe.
@Override
public <N> Cancellable subscribe(NotificationFeedId feed, NotificationHandler<N> handler, Executor executor) throws NotificationFeedNotFoundException, NotificationFeedException {
Cancellable subscribeCancellable = super.subscribe(feed, handler, executor);
// If already has a thread fetching, just return the cancellable.
if (!needFetch.compareAndSet(false, true)) {
return subscribeCancellable;
}
// Start fetching
subscribeExecutor.execute(new Runnable() {
private final long startTime = System.currentTimeMillis();
private final RetryStrategy scheduleStrategy = RetryStrategies.exponentialDelay(100, 3000, TimeUnit.MILLISECONDS);
private byte[] messageId;
private int emptyFetchCount;
@Override
public void run() {
try {
MessageFetcher fetcher = messagingService.prepareFetch(notificationTopic);
if (messageId == null) {
fetcher.setStartTime(startTime);
} else {
fetcher.setStartMessage(messageId, false);
}
emptyFetchCount++;
try (CloseableIterator<RawMessage> iterator = fetcher.fetch()) {
while (iterator.hasNext()) {
emptyFetchCount = 0;
RawMessage rawMessage = iterator.next();
NotificationMessage message = GSON.fromJson(new String(rawMessage.getPayload(), StandardCharsets.UTF_8), NotificationMessage.class);
try {
LOG.trace("Decoded notification: {}", message);
notificationReceived(message.getFeedId(), message.getNotificationJson());
} catch (Throwable t) {
LOG.warn("Error while processing notification {} with handler {}", message, t);
}
messageId = rawMessage.getId();
}
}
} catch (Exception e) {
LOG.error("Failed to get notification", e);
}
// Back-off if it was empty fetch.
if (emptyFetchCount > 0) {
// Schedule the next fetch. Exponential strategy doesn't use the time component,
// so doesn't matter what we passed in
subscribeExecutor.schedule(this, scheduleStrategy.nextRetry(emptyFetchCount, startTime), TimeUnit.MILLISECONDS);
} else {
subscribeExecutor.execute(this);
}
}
});
return subscribeCancellable;
}
use of co.cask.cdap.api.dataset.lib.CloseableIterator in project cdap by caskdata.
the class FileLogReader method getLog.
@Override
public CloseableIterator<LogEvent> getLog(LoggingContext loggingContext, final long fromTimeMs, final long toTimeMs, Filter filter) {
try {
final Filter logFilter = new AndFilter(ImmutableList.of(LoggingContextHelper.createFilter(loggingContext), filter));
LOG.trace("Using fromTimeMs={}, toTimeMs={}", fromTimeMs, toTimeMs);
List<LogLocation> sortedFilesInRange = fileMetadataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(loggingContext), fromTimeMs, toTimeMs);
if (sortedFilesInRange.isEmpty()) {
// return empty iterator
return new AbstractCloseableIterator<LogEvent>() {
@Override
protected LogEvent computeNext() {
return endOfData();
}
@Override
public void close() {
// no-op
}
};
}
final Iterator<LogLocation> filesIter = sortedFilesInRange.iterator();
CloseableIterator<CloseableIterator<LogEvent>> closeableIterator = new CloseableIterator<CloseableIterator<LogEvent>>() {
private CloseableIterator<LogEvent> curr = null;
@Override
public void close() {
if (curr != null) {
curr.close();
}
}
@Override
public boolean hasNext() {
return filesIter.hasNext();
}
@Override
public CloseableIterator<LogEvent> next() {
if (curr != null) {
curr.close();
}
LogLocation file = filesIter.next();
LOG.trace("Reading file {}", file);
curr = file.readLog(logFilter, fromTimeMs, toTimeMs, Integer.MAX_VALUE);
return curr;
}
@Override
public void remove() {
throw new UnsupportedOperationException("Remove not supported");
}
};
return concat(closeableIterator);
} catch (Throwable e) {
LOG.error("Got exception: ", e);
throw Throwables.propagate(e);
}
}
use of co.cask.cdap.api.dataset.lib.CloseableIterator in project cdap by caskdata.
the class ETLWorker method initialize.
@Override
public void initialize(final WorkerContext context) throws Exception {
if (Boolean.valueOf(context.getSpecification().getProperty(Constants.STAGE_LOGGING_ENABLED))) {
LogStageInjector.start();
}
super.initialize(context);
Map<String, String> properties = context.getSpecification().getProperties();
appName = context.getApplicationSpecification().getName();
Preconditions.checkArgument(properties.containsKey(Constants.PIPELINEID));
Preconditions.checkArgument(properties.containsKey(UNIQUE_ID));
String uniqueId = properties.get(UNIQUE_ID);
// Each worker instance should have its own unique state.
final String appName = context.getApplicationSpecification().getName();
stateStoreKey = String.format("%s%s%s%s%s", appName, SEPARATOR, uniqueId, SEPARATOR, context.getInstanceId());
stateStoreKeyBytes = Bytes.toBytes(stateStoreKey);
Transactionals.execute(getContext(), new TxRunnable() {
@Override
public void run(DatasetContext dsContext) throws Exception {
KeyValueTable stateTable = dsContext.getDataset(ETLRealtimeApplication.STATE_TABLE);
byte[] startKey = Bytes.toBytes(String.format("%s%s", appName, SEPARATOR));
// Scan the table for appname: prefixes and remove rows which doesn't match the unique id of this application.
try (CloseableIterator<KeyValue<byte[], byte[]>> rows = stateTable.scan(startKey, Bytes.stopKeyForPrefix(startKey))) {
while (rows.hasNext()) {
KeyValue<byte[], byte[]> row = rows.next();
if (Bytes.compareTo(stateStoreKeyBytes, row.getKey()) != 0) {
stateTable.delete(row.getKey());
}
}
}
}
}, Exception.class);
PipelinePhase pipeline = GSON.fromJson(properties.get(Constants.PIPELINEID), PipelinePhase.class);
Map<String, TransformDetail> transformationMap = new HashMap<>();
initializeSource(context, pipeline);
initializeTransforms(context, transformationMap, pipeline);
initializeSinks(context, transformationMap, pipeline);
Set<String> startStages = new HashSet<>();
startStages.addAll(pipeline.getStageOutputs(sourceStageName));
transformExecutor = new TransformExecutor(transformationMap, startStages);
}
use of co.cask.cdap.api.dataset.lib.CloseableIterator in project cdap by caskdata.
the class MockLogReader method getLog.
@Override
public CloseableIterator<LogEvent> getLog(LoggingContext loggingContext, long fromTimeMs, long toTimeMs, Filter filter) {
CollectingCallback collectingCallback = new CollectingCallback();
// since its just for test case, we don't need to lazily read logs (which is the purpose of returning an Iterator)
long fromOffset = getOffset(fromTimeMs / 1000);
long toOffset = getOffset(toTimeMs / 1000);
getLogNext(loggingContext, new ReadRange(fromTimeMs, toTimeMs, fromOffset), (int) (toOffset - fromOffset), filter, collectingCallback);
final Iterator<LogEvent> iterator = collectingCallback.getLogEvents().iterator();
return new CloseableIterator<LogEvent>() {
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public LogEvent next() {
return iterator.next();
}
@Override
public void remove() {
iterator.remove();
}
@Override
public void close() {
// no-op
}
};
}
Aggregations