Search in sources :

Example 1 with AbstractFunction1

use of scala.runtime.AbstractFunction1 in project samza by apache.

the class KafkaSystemAdmin method getSystemStreamPartitionCounts.

/**
 * Note! This method does not populate SystemStreamMetadata for each stream with real data.
 * Thus, this method should ONLY be used to get number of partitions for each stream.
 * It will throw NotImplementedException if anyone tries to access the actual metadata.
 * @param streamNames set of streams for which get the partitions counts
 * @param cacheTTL cache TTL if caching the data
 * @return a map, keyed on stream names. Number of partitions in SystemStreamMetadata is the output of this method.
 */
@Override
public Map<String, SystemStreamMetadata> getSystemStreamPartitionCounts(Set<String> streamNames, long cacheTTL) {
    // This optimization omits actual metadata for performance. Instead, we inject a dummy for all partitions.
    final SystemStreamMetadata.SystemStreamPartitionMetadata dummySspm = new SystemStreamMetadata.SystemStreamPartitionMetadata(null, null, null) {

        String msg = "getSystemStreamPartitionCounts does not populate SystemStreaMetadata info. Only number of partitions";

        @Override
        public String getOldestOffset() {
            throw new NotImplementedException(msg);
        }

        @Override
        public String getNewestOffset() {
            throw new NotImplementedException(msg);
        }

        @Override
        public String getUpcomingOffset() {
            throw new NotImplementedException(msg);
        }
    };
    ExponentialSleepStrategy strategy = new ExponentialSleepStrategy(DEFAULT_EXPONENTIAL_SLEEP_BACK_OFF_MULTIPLIER, DEFAULT_EXPONENTIAL_SLEEP_INITIAL_DELAY_MS, DEFAULT_EXPONENTIAL_SLEEP_MAX_DELAY_MS);
    Function1<ExponentialSleepStrategy.RetryLoop, Map<String, SystemStreamMetadata>> fetchMetadataOperation = new AbstractFunction1<ExponentialSleepStrategy.RetryLoop, Map<String, SystemStreamMetadata>>() {

        @Override
        public Map<String, SystemStreamMetadata> apply(ExponentialSleepStrategy.RetryLoop loop) {
            Map<String, SystemStreamMetadata> allMetadata = new HashMap<>();
            streamNames.forEach(streamName -> {
                Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> partitionMetadata = new HashMap<>();
                List<PartitionInfo> partitionInfos = threadSafeKafkaConsumer.execute(consumer -> consumer.partitionsFor(streamName));
                LOG.debug("Stream {} has partitions {}", streamName, partitionInfos);
                partitionInfos.forEach(partitionInfo -> partitionMetadata.put(new Partition(partitionInfo.partition()), dummySspm));
                allMetadata.put(streamName, new SystemStreamMetadata(streamName, partitionMetadata));
            });
            loop.done();
            return allMetadata;
        }
    };
    Map<String, SystemStreamMetadata> result = strategy.run(fetchMetadataOperation, new AbstractFunction2<Exception, ExponentialSleepStrategy.RetryLoop, BoxedUnit>() {

        @Override
        public BoxedUnit apply(Exception exception, ExponentialSleepStrategy.RetryLoop loop) {
            if (loop.sleepCount() < MAX_RETRIES_ON_EXCEPTION) {
                LOG.warn(String.format("Fetching systemstreampartition counts for: %s threw an exception. Retrying.", streamNames), exception);
            } else {
                LOG.error(String.format("Fetching systemstreampartition counts for: %s threw an exception.", streamNames), exception);
                loop.done();
                throw new SamzaException(exception);
            }
            return null;
        }
    }).get();
    LOG.info("SystemStream partition counts for system {}: {}", systemName, result);
    return result;
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) SystemStreamPartition(org.apache.samza.system.SystemStreamPartition) Partition(org.apache.samza.Partition) AbstractFunction2(scala.runtime.AbstractFunction2) HashMap(java.util.HashMap) NotImplementedException(org.apache.commons.lang3.NotImplementedException) ExponentialSleepStrategy(org.apache.samza.util.ExponentialSleepStrategy) SystemStreamMetadata(org.apache.samza.system.SystemStreamMetadata) AbstractFunction1(scala.runtime.AbstractFunction1) SamzaException(org.apache.samza.SamzaException) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) NotImplementedException(org.apache.commons.lang3.NotImplementedException) StreamValidationException(org.apache.samza.system.StreamValidationException) SamzaException(org.apache.samza.SamzaException) PartitionInfo(org.apache.kafka.common.PartitionInfo) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap)

Example 2 with AbstractFunction1

use of scala.runtime.AbstractFunction1 in project distributedlog by twitter.

the class TestAsyncReaderLock method testReaderLockCloseInAcquireCallback.

@Test(timeout = 60000)
public void testReaderLockCloseInAcquireCallback() throws Exception {
    final String name = runtime.getMethodName();
    DistributedLogManager dlm = createNewDLM(conf, name);
    BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned());
    writer.write(DLMTestUtil.getLogRecordInstance(1L));
    writer.closeAndComplete();
    final CountDownLatch latch = new CountDownLatch(1);
    Future<AsyncLogReader> futureReader1 = dlm.getAsyncLogReaderWithLock(DLSN.InitialDLSN);
    futureReader1.flatMap(new ExceptionalFunction<AsyncLogReader, Future<Void>>() {

        @Override
        public Future<Void> applyE(AsyncLogReader reader) throws IOException {
            return reader.asyncClose().map(new AbstractFunction1<Void, Void>() {

                @Override
                public Void apply(Void result) {
                    latch.countDown();
                    return null;
                }
            });
        }
    });
    latch.await();
    dlm.close();
}
Also used : Future(com.twitter.util.Future) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) AbstractFunction1(scala.runtime.AbstractFunction1) Test(org.junit.Test)

Example 3 with AbstractFunction1

use of scala.runtime.AbstractFunction1 in project distributedlog by twitter.

the class BKAsyncLogWriter method markEndOfStream.

Future<Long> markEndOfStream() {
    final Stopwatch stopwatch = Stopwatch.createStarted();
    Future<BKLogSegmentWriter> logSegmentWriterFuture;
    synchronized (this) {
        logSegmentWriterFuture = this.rollingFuture;
    }
    if (null == logSegmentWriterFuture) {
        logSegmentWriterFuture = getLogSegmentWriterForEndOfStream();
    }
    return logSegmentWriterFuture.flatMap(new AbstractFunction1<BKLogSegmentWriter, Future<Long>>() {

        @Override
        public Future<Long> apply(BKLogSegmentWriter w) {
            return w.markEndOfStream();
        }
    }).addEventListener(new OpStatsListener<Long>(markEndOfStreamOpStatsLogger, stopwatch));
}
Also used : Stopwatch(com.google.common.base.Stopwatch) AbstractFunction1(scala.runtime.AbstractFunction1)

Example 4 with AbstractFunction1

use of scala.runtime.AbstractFunction1 in project samza by apache.

the class KafkaSystemAdmin method getSSPMetadata.

/**
 * Given a set of SystemStreamPartition, fetch metadata from Kafka for each
 * of them, and return a map from ssp to SystemStreamPartitionMetadata for
 * each of them. This method will return null for oldest and newest offsets
 * if a given SystemStreamPartition is empty. This method will block and
 * retry indefinitely until it gets a successful response from Kafka.
 * @param ssps a set of strings of SSP
 * @param retryBackoff retry backoff strategy
 * @return a map from ssp to sspMetadata which has offsets
 */
Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> getSSPMetadata(Set<SystemStreamPartition> ssps, ExponentialSleepStrategy retryBackoff) {
    LOG.info("Fetching SSP metadata for: {}", ssps);
    List<TopicPartition> topicPartitions = ssps.stream().map(ssp -> new TopicPartition(ssp.getStream(), ssp.getPartition().getPartitionId())).collect(Collectors.toList());
    Function1<ExponentialSleepStrategy.RetryLoop, Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata>> fetchTopicPartitionMetadataOperation = new AbstractFunction1<ExponentialSleepStrategy.RetryLoop, Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata>>() {

        @Override
        public Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> apply(ExponentialSleepStrategy.RetryLoop loop) {
            OffsetsMaps topicPartitionsMetadata = fetchTopicPartitionsMetadata(topicPartitions);
            Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> sspToSSPMetadata = new HashMap<>();
            for (SystemStreamPartition ssp : ssps) {
                String oldestOffset = topicPartitionsMetadata.getOldestOffsets().get(ssp);
                String newestOffset = topicPartitionsMetadata.getNewestOffsets().get(ssp);
                String upcomingOffset = topicPartitionsMetadata.getUpcomingOffsets().get(ssp);
                sspToSSPMetadata.put(ssp, new SystemStreamMetadata.SystemStreamPartitionMetadata(oldestOffset, newestOffset, upcomingOffset));
            }
            loop.done();
            return sspToSSPMetadata;
        }
    };
    Function2<Exception, ExponentialSleepStrategy.RetryLoop, BoxedUnit> onExceptionRetryOperation = new AbstractFunction2<Exception, ExponentialSleepStrategy.RetryLoop, BoxedUnit>() {

        @Override
        public BoxedUnit apply(Exception exception, ExponentialSleepStrategy.RetryLoop loop) {
            if (loop.sleepCount() < MAX_RETRIES_ON_EXCEPTION) {
                LOG.warn(String.format("Fetching SSP metadata for: %s threw an exception. Retrying.", ssps), exception);
            } else {
                LOG.error(String.format("Fetching SSP metadata for: %s threw an exception.", ssps), exception);
                loop.done();
                throw new SamzaException(exception);
            }
            return null;
        }
    };
    Function0<Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata>> fallbackOperation = new AbstractFunction0<Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata>>() {

        @Override
        public Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> apply() {
            throw new SamzaException("Failed to get SSP metadata");
        }
    };
    return retryBackoff.run(fetchTopicPartitionMetadataOperation, onExceptionRetryOperation).getOrElse(fallbackOperation);
}
Also used : LoggerFactory(org.slf4j.LoggerFactory) StartpointTimestamp(org.apache.samza.startpoint.StartpointTimestamp) Startpoint(org.apache.samza.startpoint.Startpoint) StringUtils(org.apache.commons.lang3.StringUtils) AdminClient(org.apache.kafka.clients.admin.AdminClient) StartpointSpecific(org.apache.samza.startpoint.StartpointSpecific) KafkaConfig(org.apache.samza.config.KafkaConfig) Map(java.util.Map) DeleteTopicsResult(org.apache.kafka.clients.admin.DeleteTopicsResult) MapConfig(org.apache.samza.config.MapConfig) TopicConfig(org.apache.kafka.common.config.TopicConfig) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) ImmutableSet(com.google.common.collect.ImmutableSet) ImmutableMap(com.google.common.collect.ImmutableMap) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) PartitionInfo(org.apache.kafka.common.PartitionInfo) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp) Collectors(java.util.stream.Collectors) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) List(java.util.List) StartpointUpcoming(org.apache.samza.startpoint.StartpointUpcoming) AbstractFunction0(scala.runtime.AbstractFunction0) AbstractFunction1(scala.runtime.AbstractFunction1) Optional(java.util.Optional) AbstractFunction2(scala.runtime.AbstractFunction2) Config(org.apache.samza.config.Config) NotImplementedException(org.apache.commons.lang3.NotImplementedException) StartpointOldest(org.apache.samza.startpoint.StartpointOldest) Function0(scala.Function0) StreamValidationException(org.apache.samza.system.StreamValidationException) Function1(scala.Function1) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Function2(scala.Function2) HashMap(java.util.HashMap) SystemStreamPartition(org.apache.samza.system.SystemStreamPartition) SystemStreamMetadata(org.apache.samza.system.SystemStreamMetadata) Function(java.util.function.Function) StreamConfig(org.apache.samza.config.StreamConfig) RecordsToDelete(org.apache.kafka.clients.admin.RecordsToDelete) HashSet(java.util.HashSet) StartpointVisitor(org.apache.samza.startpoint.StartpointVisitor) DescribeTopicsResult(org.apache.kafka.clients.admin.DescribeTopicsResult) SystemStream(org.apache.samza.system.SystemStream) CreateTopicsResult(org.apache.kafka.clients.admin.CreateTopicsResult) ApplicationConfig(org.apache.samza.config.ApplicationConfig) SystemConfig(org.apache.samza.config.SystemConfig) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) KafkaUtil(org.apache.samza.util.KafkaUtil) Logger(org.slf4j.Logger) Properties(java.util.Properties) ExponentialSleepStrategy(org.apache.samza.util.ExponentialSleepStrategy) NewTopic(org.apache.kafka.clients.admin.NewTopic) Partition(org.apache.samza.Partition) StreamSpec(org.apache.samza.system.StreamSpec) BoxedUnit(scala.runtime.BoxedUnit) SamzaException(org.apache.samza.SamzaException) TimeUnit(java.util.concurrent.TimeUnit) SystemAdmin(org.apache.samza.system.SystemAdmin) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Collections(java.util.Collections) AbstractFunction2(scala.runtime.AbstractFunction2) HashMap(java.util.HashMap) ExponentialSleepStrategy(org.apache.samza.util.ExponentialSleepStrategy) SystemStreamMetadata(org.apache.samza.system.SystemStreamMetadata) AbstractFunction0(scala.runtime.AbstractFunction0) AbstractFunction1(scala.runtime.AbstractFunction1) SamzaException(org.apache.samza.SamzaException) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) NotImplementedException(org.apache.commons.lang3.NotImplementedException) StreamValidationException(org.apache.samza.system.StreamValidationException) SamzaException(org.apache.samza.SamzaException) TopicPartition(org.apache.kafka.common.TopicPartition) BoxedUnit(scala.runtime.BoxedUnit) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) SystemStreamPartition(org.apache.samza.system.SystemStreamPartition)

Aggregations

AbstractFunction1 (scala.runtime.AbstractFunction1)4 ImmutableMap (com.google.common.collect.ImmutableMap)2 HashMap (java.util.HashMap)2 Map (java.util.Map)2 NotImplementedException (org.apache.commons.lang3.NotImplementedException)2 PartitionInfo (org.apache.kafka.common.PartitionInfo)2 TopicPartition (org.apache.kafka.common.TopicPartition)2 TopicExistsException (org.apache.kafka.common.errors.TopicExistsException)2 Partition (org.apache.samza.Partition)2 SamzaException (org.apache.samza.SamzaException)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 Preconditions (com.google.common.base.Preconditions)1 Stopwatch (com.google.common.base.Stopwatch)1 ImmutableSet (com.google.common.collect.ImmutableSet)1 Future (com.twitter.util.Future)1 IOException (java.io.IOException)1 Collections (java.util.Collections)1 HashSet (java.util.HashSet)1 List (java.util.List)1 Optional (java.util.Optional)1