use of scala.runtime.AbstractFunction0 in project zeppelin by apache.
the class FlinkInterpreter method interpret.
public InterpreterResult interpret(String[] lines, InterpreterContext context) {
final IMain imain = flinkIloop.intp();
String[] linesToRun = new String[lines.length + 1];
for (int i = 0; i < lines.length; i++) {
linesToRun[i] = lines[i];
}
linesToRun[lines.length] = "print(\"\")";
System.setOut(new PrintStream(out));
out.reset();
Code r = null;
String incomplete = "";
boolean inComment = false;
for (int l = 0; l < linesToRun.length; l++) {
final String s = linesToRun[l];
// check if next line starts with "." (but not ".." or "./") it is treated as an invocation
if (l + 1 < linesToRun.length) {
String nextLine = linesToRun[l + 1].trim();
boolean continuation = false;
if (nextLine.isEmpty() || // skip empty line or comment
nextLine.startsWith("//") || nextLine.startsWith("}") || nextLine.startsWith("object")) {
// include "} object" for Scala companion object
continuation = true;
} else if (!inComment && nextLine.startsWith("/*")) {
inComment = true;
continuation = true;
} else if (inComment && nextLine.lastIndexOf("*/") >= 0) {
inComment = false;
continuation = true;
} else if (nextLine.length() > 1 && nextLine.charAt(0) == '.' && // ".."
nextLine.charAt(1) != '.' && nextLine.charAt(1) != '/') {
// "./"
continuation = true;
} else if (inComment) {
continuation = true;
}
if (continuation) {
incomplete += s + "\n";
continue;
}
}
final String currentCommand = incomplete;
scala.tools.nsc.interpreter.Results.Result res = null;
try {
res = Console.withOut(System.out, new AbstractFunction0<Results.Result>() {
@Override
public Results.Result apply() {
return imain.interpret(currentCommand + s);
}
});
} catch (Exception e) {
logger.info("Interpreter exception", e);
return new InterpreterResult(Code.ERROR, InterpreterUtils.getMostRelevantMessage(e));
}
r = getResultCode(res);
if (r == Code.ERROR) {
return new InterpreterResult(r, out.toString());
} else if (r == Code.INCOMPLETE) {
incomplete += s + "\n";
} else {
incomplete = "";
}
}
if (r == Code.INCOMPLETE) {
return new InterpreterResult(r, "Incomplete expression");
} else {
return new InterpreterResult(r, out.toString());
}
}
use of scala.runtime.AbstractFunction0 in project distributedlog by twitter.
the class BKDistributedLogManager method createWriteHandler.
private void createWriteHandler(ZKLogMetadataForWriter logMetadata, boolean lockHandler, final Promise<BKLogWriteHandler> createPromise) {
OrderedScheduler lockStateExecutor = getLockStateExecutor(true);
// Build the locks
DistributedLock lock;
if (conf.isWriteLockEnabled()) {
lock = new ZKDistributedLock(lockStateExecutor, getLockFactory(true), logMetadata.getLockPath(), conf.getLockTimeoutMilliSeconds(), statsLogger);
} else {
lock = NopDistributedLock.INSTANCE;
}
// Build the ledger allocator
LedgerAllocator allocator;
try {
allocator = createLedgerAllocator(logMetadata);
} catch (IOException e) {
FutureUtils.setException(createPromise, e);
return;
}
// Make sure writer handler created before resources are initialized
final BKLogWriteHandler writeHandler = new BKLogWriteHandler(logMetadata, conf, writerZKCBuilder, writerBKCBuilder, writerMetadataStore, scheduler, allocator, statsLogger, perLogStatsLogger, alertStatsLogger, clientId, regionId, writeLimiter, featureProvider, dynConf, lock);
PermitManager manager = getLogSegmentRollingPermitManager();
if (manager instanceof Watcher) {
writeHandler.register((Watcher) manager);
}
if (lockHandler) {
writeHandler.lockHandler().addEventListener(new FutureEventListener<DistributedLock>() {
@Override
public void onSuccess(DistributedLock lock) {
FutureUtils.setValue(createPromise, writeHandler);
}
@Override
public void onFailure(final Throwable cause) {
writeHandler.asyncClose().ensure(new AbstractFunction0<BoxedUnit>() {
@Override
public BoxedUnit apply() {
FutureUtils.setException(createPromise, cause);
return BoxedUnit.UNIT;
}
});
}
});
} else {
FutureUtils.setValue(createPromise, writeHandler);
}
}
use of scala.runtime.AbstractFunction0 in project distributedlog by twitter.
the class BKDistributedLogManager method getAsyncLogReaderWithLock.
protected Future<AsyncLogReader> getAsyncLogReaderWithLock(final Optional<DLSN> fromDLSN, final Optional<String> subscriberId) {
if (!fromDLSN.isPresent() && !subscriberId.isPresent()) {
return Future.exception(new UnexpectedException("Neither from dlsn nor subscriber id is provided."));
}
final BKAsyncLogReaderDLSN reader = new BKAsyncLogReaderDLSN(BKDistributedLogManager.this, scheduler, getLockStateExecutor(true), fromDLSN.isPresent() ? fromDLSN.get() : DLSN.InitialDLSN, subscriberId, false, dynConf.getDeserializeRecordSetOnReads(), statsLogger);
pendingReaders.add(reader);
final Future<Void> lockFuture = reader.lockStream();
final Promise<AsyncLogReader> createPromise = new Promise<AsyncLogReader>(new Function<Throwable, BoxedUnit>() {
@Override
public BoxedUnit apply(Throwable cause) {
// cancel the lock when the creation future is cancelled
lockFuture.cancel();
return BoxedUnit.UNIT;
}
});
// lock the stream - fetch the last commit position on success
lockFuture.flatMap(new Function<Void, Future<AsyncLogReader>>() {
@Override
public Future<AsyncLogReader> apply(Void complete) {
if (fromDLSN.isPresent()) {
return Future.value((AsyncLogReader) reader);
}
LOG.info("Reader {} @ {} reading last commit position from subscription store after acquired lock.", subscriberId.get(), name);
// we acquired lock
final SubscriptionStateStore stateStore = getSubscriptionStateStore(subscriberId.get());
return stateStore.getLastCommitPosition().map(new ExceptionalFunction<DLSN, AsyncLogReader>() {
@Override
public AsyncLogReader applyE(DLSN lastCommitPosition) throws UnexpectedException {
LOG.info("Reader {} @ {} positioned to last commit position {}.", new Object[] { subscriberId.get(), name, lastCommitPosition });
reader.setStartDLSN(lastCommitPosition);
return reader;
}
});
}
}).addEventListener(new FutureEventListener<AsyncLogReader>() {
@Override
public void onSuccess(AsyncLogReader r) {
pendingReaders.remove(reader);
FutureUtils.setValue(createPromise, r);
}
@Override
public void onFailure(final Throwable cause) {
pendingReaders.remove(reader);
reader.asyncClose().ensure(new AbstractFunction0<BoxedUnit>() {
@Override
public BoxedUnit apply() {
FutureUtils.setException(createPromise, cause);
return BoxedUnit.UNIT;
}
});
}
});
return createPromise;
}
use of scala.runtime.AbstractFunction0 in project samza by apache.
the class KafkaSystemAdmin method getSSPMetadata.
/**
* Given a set of SystemStreamPartition, fetch metadata from Kafka for each
* of them, and return a map from ssp to SystemStreamPartitionMetadata for
* each of them. This method will return null for oldest and newest offsets
* if a given SystemStreamPartition is empty. This method will block and
* retry indefinitely until it gets a successful response from Kafka.
* @param ssps a set of strings of SSP
* @param retryBackoff retry backoff strategy
* @return a map from ssp to sspMetadata which has offsets
*/
Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> getSSPMetadata(Set<SystemStreamPartition> ssps, ExponentialSleepStrategy retryBackoff) {
LOG.info("Fetching SSP metadata for: {}", ssps);
List<TopicPartition> topicPartitions = ssps.stream().map(ssp -> new TopicPartition(ssp.getStream(), ssp.getPartition().getPartitionId())).collect(Collectors.toList());
Function1<ExponentialSleepStrategy.RetryLoop, Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata>> fetchTopicPartitionMetadataOperation = new AbstractFunction1<ExponentialSleepStrategy.RetryLoop, Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata>>() {
@Override
public Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> apply(ExponentialSleepStrategy.RetryLoop loop) {
OffsetsMaps topicPartitionsMetadata = fetchTopicPartitionsMetadata(topicPartitions);
Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> sspToSSPMetadata = new HashMap<>();
for (SystemStreamPartition ssp : ssps) {
String oldestOffset = topicPartitionsMetadata.getOldestOffsets().get(ssp);
String newestOffset = topicPartitionsMetadata.getNewestOffsets().get(ssp);
String upcomingOffset = topicPartitionsMetadata.getUpcomingOffsets().get(ssp);
sspToSSPMetadata.put(ssp, new SystemStreamMetadata.SystemStreamPartitionMetadata(oldestOffset, newestOffset, upcomingOffset));
}
loop.done();
return sspToSSPMetadata;
}
};
Function2<Exception, ExponentialSleepStrategy.RetryLoop, BoxedUnit> onExceptionRetryOperation = new AbstractFunction2<Exception, ExponentialSleepStrategy.RetryLoop, BoxedUnit>() {
@Override
public BoxedUnit apply(Exception exception, ExponentialSleepStrategy.RetryLoop loop) {
if (loop.sleepCount() < MAX_RETRIES_ON_EXCEPTION) {
LOG.warn(String.format("Fetching SSP metadata for: %s threw an exception. Retrying.", ssps), exception);
} else {
LOG.error(String.format("Fetching SSP metadata for: %s threw an exception.", ssps), exception);
loop.done();
throw new SamzaException(exception);
}
return null;
}
};
Function0<Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata>> fallbackOperation = new AbstractFunction0<Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata>>() {
@Override
public Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> apply() {
throw new SamzaException("Failed to get SSP metadata");
}
};
return retryBackoff.run(fetchTopicPartitionMetadataOperation, onExceptionRetryOperation).getOrElse(fallbackOperation);
}
Aggregations