Search in sources :

Example 1 with AbstractFunction0

use of scala.runtime.AbstractFunction0 in project zeppelin by apache.

the class FlinkInterpreter method interpret.

public InterpreterResult interpret(String[] lines, InterpreterContext context) {
    final IMain imain = flinkIloop.intp();
    String[] linesToRun = new String[lines.length + 1];
    for (int i = 0; i < lines.length; i++) {
        linesToRun[i] = lines[i];
    }
    linesToRun[lines.length] = "print(\"\")";
    System.setOut(new PrintStream(out));
    out.reset();
    Code r = null;
    String incomplete = "";
    boolean inComment = false;
    for (int l = 0; l < linesToRun.length; l++) {
        final String s = linesToRun[l];
        // check if next line starts with "." (but not ".." or "./") it is treated as an invocation
        if (l + 1 < linesToRun.length) {
            String nextLine = linesToRun[l + 1].trim();
            boolean continuation = false;
            if (nextLine.isEmpty() || // skip empty line or comment
            nextLine.startsWith("//") || nextLine.startsWith("}") || nextLine.startsWith("object")) {
                // include "} object" for Scala companion object
                continuation = true;
            } else if (!inComment && nextLine.startsWith("/*")) {
                inComment = true;
                continuation = true;
            } else if (inComment && nextLine.lastIndexOf("*/") >= 0) {
                inComment = false;
                continuation = true;
            } else if (nextLine.length() > 1 && nextLine.charAt(0) == '.' && // ".."
            nextLine.charAt(1) != '.' && nextLine.charAt(1) != '/') {
                // "./"
                continuation = true;
            } else if (inComment) {
                continuation = true;
            }
            if (continuation) {
                incomplete += s + "\n";
                continue;
            }
        }
        final String currentCommand = incomplete;
        scala.tools.nsc.interpreter.Results.Result res = null;
        try {
            res = Console.withOut(System.out, new AbstractFunction0<Results.Result>() {

                @Override
                public Results.Result apply() {
                    return imain.interpret(currentCommand + s);
                }
            });
        } catch (Exception e) {
            logger.info("Interpreter exception", e);
            return new InterpreterResult(Code.ERROR, InterpreterUtils.getMostRelevantMessage(e));
        }
        r = getResultCode(res);
        if (r == Code.ERROR) {
            return new InterpreterResult(r, out.toString());
        } else if (r == Code.INCOMPLETE) {
            incomplete += s + "\n";
        } else {
            incomplete = "";
        }
    }
    if (r == Code.INCOMPLETE) {
        return new InterpreterResult(r, "Incomplete expression");
    } else {
        return new InterpreterResult(r, out.toString());
    }
}
Also used : PrintStream(java.io.PrintStream) InterpreterResult(org.apache.zeppelin.interpreter.InterpreterResult) IMain(scala.tools.nsc.interpreter.IMain) AbstractFunction0(scala.runtime.AbstractFunction0) Code(org.apache.zeppelin.interpreter.InterpreterResult.Code) InvocationTargetException(java.lang.reflect.InvocationTargetException) Results(scala.tools.nsc.interpreter.Results)

Example 2 with AbstractFunction0

use of scala.runtime.AbstractFunction0 in project distributedlog by twitter.

the class BKDistributedLogManager method createWriteHandler.

private void createWriteHandler(ZKLogMetadataForWriter logMetadata, boolean lockHandler, final Promise<BKLogWriteHandler> createPromise) {
    OrderedScheduler lockStateExecutor = getLockStateExecutor(true);
    // Build the locks
    DistributedLock lock;
    if (conf.isWriteLockEnabled()) {
        lock = new ZKDistributedLock(lockStateExecutor, getLockFactory(true), logMetadata.getLockPath(), conf.getLockTimeoutMilliSeconds(), statsLogger);
    } else {
        lock = NopDistributedLock.INSTANCE;
    }
    // Build the ledger allocator
    LedgerAllocator allocator;
    try {
        allocator = createLedgerAllocator(logMetadata);
    } catch (IOException e) {
        FutureUtils.setException(createPromise, e);
        return;
    }
    // Make sure writer handler created before resources are initialized
    final BKLogWriteHandler writeHandler = new BKLogWriteHandler(logMetadata, conf, writerZKCBuilder, writerBKCBuilder, writerMetadataStore, scheduler, allocator, statsLogger, perLogStatsLogger, alertStatsLogger, clientId, regionId, writeLimiter, featureProvider, dynConf, lock);
    PermitManager manager = getLogSegmentRollingPermitManager();
    if (manager instanceof Watcher) {
        writeHandler.register((Watcher) manager);
    }
    if (lockHandler) {
        writeHandler.lockHandler().addEventListener(new FutureEventListener<DistributedLock>() {

            @Override
            public void onSuccess(DistributedLock lock) {
                FutureUtils.setValue(createPromise, writeHandler);
            }

            @Override
            public void onFailure(final Throwable cause) {
                writeHandler.asyncClose().ensure(new AbstractFunction0<BoxedUnit>() {

                    @Override
                    public BoxedUnit apply() {
                        FutureUtils.setException(createPromise, cause);
                        return BoxedUnit.UNIT;
                    }
                });
            }
        });
    } else {
        FutureUtils.setValue(createPromise, writeHandler);
    }
}
Also used : DistributedLock(com.twitter.distributedlog.lock.DistributedLock) NopDistributedLock(com.twitter.distributedlog.lock.NopDistributedLock) ZKDistributedLock(com.twitter.distributedlog.lock.ZKDistributedLock) PermitManager(com.twitter.distributedlog.util.PermitManager) LedgerAllocator(com.twitter.distributedlog.bk.LedgerAllocator) SimpleLedgerAllocator(com.twitter.distributedlog.bk.SimpleLedgerAllocator) Watcher(org.apache.zookeeper.Watcher) IOException(java.io.IOException) AbstractFunction0(scala.runtime.AbstractFunction0) ZKDistributedLock(com.twitter.distributedlog.lock.ZKDistributedLock) OrderedScheduler(com.twitter.distributedlog.util.OrderedScheduler)

Example 3 with AbstractFunction0

use of scala.runtime.AbstractFunction0 in project distributedlog by twitter.

the class BKDistributedLogManager method getAsyncLogReaderWithLock.

protected Future<AsyncLogReader> getAsyncLogReaderWithLock(final Optional<DLSN> fromDLSN, final Optional<String> subscriberId) {
    if (!fromDLSN.isPresent() && !subscriberId.isPresent()) {
        return Future.exception(new UnexpectedException("Neither from dlsn nor subscriber id is provided."));
    }
    final BKAsyncLogReaderDLSN reader = new BKAsyncLogReaderDLSN(BKDistributedLogManager.this, scheduler, getLockStateExecutor(true), fromDLSN.isPresent() ? fromDLSN.get() : DLSN.InitialDLSN, subscriberId, false, dynConf.getDeserializeRecordSetOnReads(), statsLogger);
    pendingReaders.add(reader);
    final Future<Void> lockFuture = reader.lockStream();
    final Promise<AsyncLogReader> createPromise = new Promise<AsyncLogReader>(new Function<Throwable, BoxedUnit>() {

        @Override
        public BoxedUnit apply(Throwable cause) {
            // cancel the lock when the creation future is cancelled
            lockFuture.cancel();
            return BoxedUnit.UNIT;
        }
    });
    // lock the stream - fetch the last commit position on success
    lockFuture.flatMap(new Function<Void, Future<AsyncLogReader>>() {

        @Override
        public Future<AsyncLogReader> apply(Void complete) {
            if (fromDLSN.isPresent()) {
                return Future.value((AsyncLogReader) reader);
            }
            LOG.info("Reader {} @ {} reading last commit position from subscription store after acquired lock.", subscriberId.get(), name);
            // we acquired lock
            final SubscriptionStateStore stateStore = getSubscriptionStateStore(subscriberId.get());
            return stateStore.getLastCommitPosition().map(new ExceptionalFunction<DLSN, AsyncLogReader>() {

                @Override
                public AsyncLogReader applyE(DLSN lastCommitPosition) throws UnexpectedException {
                    LOG.info("Reader {} @ {} positioned to last commit position {}.", new Object[] { subscriberId.get(), name, lastCommitPosition });
                    reader.setStartDLSN(lastCommitPosition);
                    return reader;
                }
            });
        }
    }).addEventListener(new FutureEventListener<AsyncLogReader>() {

        @Override
        public void onSuccess(AsyncLogReader r) {
            pendingReaders.remove(reader);
            FutureUtils.setValue(createPromise, r);
        }

        @Override
        public void onFailure(final Throwable cause) {
            pendingReaders.remove(reader);
            reader.asyncClose().ensure(new AbstractFunction0<BoxedUnit>() {

                @Override
                public BoxedUnit apply() {
                    FutureUtils.setException(createPromise, cause);
                    return BoxedUnit.UNIT;
                }
            });
        }
    });
    return createPromise;
}
Also used : UnexpectedException(com.twitter.distributedlog.exceptions.UnexpectedException) SubscriptionStateStore(com.twitter.distributedlog.subscription.SubscriptionStateStore) ZKSubscriptionStateStore(com.twitter.distributedlog.subscription.ZKSubscriptionStateStore) AbstractFunction0(scala.runtime.AbstractFunction0) Promise(com.twitter.util.Promise) ExceptionalFunction(com.twitter.util.ExceptionalFunction) Function(com.twitter.util.Function) BoxedUnit(scala.runtime.BoxedUnit)

Example 4 with AbstractFunction0

use of scala.runtime.AbstractFunction0 in project samza by apache.

the class KafkaSystemAdmin method getSSPMetadata.

/**
 * Given a set of SystemStreamPartition, fetch metadata from Kafka for each
 * of them, and return a map from ssp to SystemStreamPartitionMetadata for
 * each of them. This method will return null for oldest and newest offsets
 * if a given SystemStreamPartition is empty. This method will block and
 * retry indefinitely until it gets a successful response from Kafka.
 * @param ssps a set of strings of SSP
 * @param retryBackoff retry backoff strategy
 * @return a map from ssp to sspMetadata which has offsets
 */
Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> getSSPMetadata(Set<SystemStreamPartition> ssps, ExponentialSleepStrategy retryBackoff) {
    LOG.info("Fetching SSP metadata for: {}", ssps);
    List<TopicPartition> topicPartitions = ssps.stream().map(ssp -> new TopicPartition(ssp.getStream(), ssp.getPartition().getPartitionId())).collect(Collectors.toList());
    Function1<ExponentialSleepStrategy.RetryLoop, Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata>> fetchTopicPartitionMetadataOperation = new AbstractFunction1<ExponentialSleepStrategy.RetryLoop, Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata>>() {

        @Override
        public Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> apply(ExponentialSleepStrategy.RetryLoop loop) {
            OffsetsMaps topicPartitionsMetadata = fetchTopicPartitionsMetadata(topicPartitions);
            Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> sspToSSPMetadata = new HashMap<>();
            for (SystemStreamPartition ssp : ssps) {
                String oldestOffset = topicPartitionsMetadata.getOldestOffsets().get(ssp);
                String newestOffset = topicPartitionsMetadata.getNewestOffsets().get(ssp);
                String upcomingOffset = topicPartitionsMetadata.getUpcomingOffsets().get(ssp);
                sspToSSPMetadata.put(ssp, new SystemStreamMetadata.SystemStreamPartitionMetadata(oldestOffset, newestOffset, upcomingOffset));
            }
            loop.done();
            return sspToSSPMetadata;
        }
    };
    Function2<Exception, ExponentialSleepStrategy.RetryLoop, BoxedUnit> onExceptionRetryOperation = new AbstractFunction2<Exception, ExponentialSleepStrategy.RetryLoop, BoxedUnit>() {

        @Override
        public BoxedUnit apply(Exception exception, ExponentialSleepStrategy.RetryLoop loop) {
            if (loop.sleepCount() < MAX_RETRIES_ON_EXCEPTION) {
                LOG.warn(String.format("Fetching SSP metadata for: %s threw an exception. Retrying.", ssps), exception);
            } else {
                LOG.error(String.format("Fetching SSP metadata for: %s threw an exception.", ssps), exception);
                loop.done();
                throw new SamzaException(exception);
            }
            return null;
        }
    };
    Function0<Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata>> fallbackOperation = new AbstractFunction0<Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata>>() {

        @Override
        public Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> apply() {
            throw new SamzaException("Failed to get SSP metadata");
        }
    };
    return retryBackoff.run(fetchTopicPartitionMetadataOperation, onExceptionRetryOperation).getOrElse(fallbackOperation);
}
Also used : LoggerFactory(org.slf4j.LoggerFactory) StartpointTimestamp(org.apache.samza.startpoint.StartpointTimestamp) Startpoint(org.apache.samza.startpoint.Startpoint) StringUtils(org.apache.commons.lang3.StringUtils) AdminClient(org.apache.kafka.clients.admin.AdminClient) StartpointSpecific(org.apache.samza.startpoint.StartpointSpecific) KafkaConfig(org.apache.samza.config.KafkaConfig) Map(java.util.Map) DeleteTopicsResult(org.apache.kafka.clients.admin.DeleteTopicsResult) MapConfig(org.apache.samza.config.MapConfig) TopicConfig(org.apache.kafka.common.config.TopicConfig) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) PartitionInfo(org.apache.kafka.common.PartitionInfo) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp) Collectors(java.util.stream.Collectors) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) List(java.util.List) StartpointUpcoming(org.apache.samza.startpoint.StartpointUpcoming) AbstractFunction0(scala.runtime.AbstractFunction0) AbstractFunction1(scala.runtime.AbstractFunction1) Optional(java.util.Optional) AbstractFunction2(scala.runtime.AbstractFunction2) Config(org.apache.samza.config.Config) NotImplementedException(org.apache.commons.lang3.NotImplementedException) StartpointOldest(org.apache.samza.startpoint.StartpointOldest) Function0(scala.Function0) StreamValidationException(org.apache.samza.system.StreamValidationException) Function1(scala.Function1) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Function2(scala.Function2) HashMap(java.util.HashMap) SystemStreamPartition(org.apache.samza.system.SystemStreamPartition) SystemStreamMetadata(org.apache.samza.system.SystemStreamMetadata) Function(java.util.function.Function) StreamConfig(org.apache.samza.config.StreamConfig) RecordsToDelete(org.apache.kafka.clients.admin.RecordsToDelete) HashSet(java.util.HashSet) StartpointVisitor(org.apache.samza.startpoint.StartpointVisitor) DescribeTopicsResult(org.apache.kafka.clients.admin.DescribeTopicsResult) SystemStream(org.apache.samza.system.SystemStream) CreateTopicsResult(org.apache.kafka.clients.admin.CreateTopicsResult) ApplicationConfig(org.apache.samza.config.ApplicationConfig) SystemConfig(org.apache.samza.config.SystemConfig) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) KafkaUtil(org.apache.samza.util.KafkaUtil) Logger(org.slf4j.Logger) Properties(java.util.Properties) ExponentialSleepStrategy(org.apache.samza.util.ExponentialSleepStrategy) NewTopic(org.apache.kafka.clients.admin.NewTopic) Partition(org.apache.samza.Partition) StreamSpec(org.apache.samza.system.StreamSpec) BoxedUnit(scala.runtime.BoxedUnit) SamzaException(org.apache.samza.SamzaException) TimeUnit(java.util.concurrent.TimeUnit) SystemAdmin(org.apache.samza.system.SystemAdmin) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Collections(java.util.Collections) AbstractFunction2(scala.runtime.AbstractFunction2) HashMap(java.util.HashMap) ExponentialSleepStrategy(org.apache.samza.util.ExponentialSleepStrategy) SystemStreamMetadata(org.apache.samza.system.SystemStreamMetadata) AbstractFunction0(scala.runtime.AbstractFunction0) AbstractFunction1(scala.runtime.AbstractFunction1) SamzaException(org.apache.samza.SamzaException) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) NotImplementedException(org.apache.commons.lang3.NotImplementedException) StreamValidationException(org.apache.samza.system.StreamValidationException) SamzaException(org.apache.samza.SamzaException) TopicPartition(org.apache.kafka.common.TopicPartition) BoxedUnit(scala.runtime.BoxedUnit) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) SystemStreamPartition(org.apache.samza.system.SystemStreamPartition)

Aggregations

AbstractFunction0 (scala.runtime.AbstractFunction0)4 BoxedUnit (scala.runtime.BoxedUnit)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 Preconditions (com.google.common.base.Preconditions)1 ImmutableMap (com.google.common.collect.ImmutableMap)1 ImmutableSet (com.google.common.collect.ImmutableSet)1 LedgerAllocator (com.twitter.distributedlog.bk.LedgerAllocator)1 SimpleLedgerAllocator (com.twitter.distributedlog.bk.SimpleLedgerAllocator)1 UnexpectedException (com.twitter.distributedlog.exceptions.UnexpectedException)1 DistributedLock (com.twitter.distributedlog.lock.DistributedLock)1 NopDistributedLock (com.twitter.distributedlog.lock.NopDistributedLock)1 ZKDistributedLock (com.twitter.distributedlog.lock.ZKDistributedLock)1 SubscriptionStateStore (com.twitter.distributedlog.subscription.SubscriptionStateStore)1 ZKSubscriptionStateStore (com.twitter.distributedlog.subscription.ZKSubscriptionStateStore)1 OrderedScheduler (com.twitter.distributedlog.util.OrderedScheduler)1 PermitManager (com.twitter.distributedlog.util.PermitManager)1 ExceptionalFunction (com.twitter.util.ExceptionalFunction)1 Function (com.twitter.util.Function)1 Promise (com.twitter.util.Promise)1 IOException (java.io.IOException)1