use of scala.runtime.AbstractFunction1 in project samza by apache.
the class KafkaSystemAdmin method getSystemStreamPartitionCounts.
/**
* Note! This method does not populate SystemStreamMetadata for each stream with real data.
* Thus, this method should ONLY be used to get number of partitions for each stream.
* It will throw NotImplementedException if anyone tries to access the actual metadata.
* @param streamNames set of streams for which get the partitions counts
* @param cacheTTL cache TTL if caching the data
* @return a map, keyed on stream names. Number of partitions in SystemStreamMetadata is the output of this method.
*/
@Override
public Map<String, SystemStreamMetadata> getSystemStreamPartitionCounts(Set<String> streamNames, long cacheTTL) {
// This optimization omits actual metadata for performance. Instead, we inject a dummy for all partitions.
final SystemStreamMetadata.SystemStreamPartitionMetadata dummySspm = new SystemStreamMetadata.SystemStreamPartitionMetadata(null, null, null) {
String msg = "getSystemStreamPartitionCounts does not populate SystemStreaMetadata info. Only number of partitions";
@Override
public String getOldestOffset() {
throw new NotImplementedException(msg);
}
@Override
public String getNewestOffset() {
throw new NotImplementedException(msg);
}
@Override
public String getUpcomingOffset() {
throw new NotImplementedException(msg);
}
};
ExponentialSleepStrategy strategy = new ExponentialSleepStrategy(DEFAULT_EXPONENTIAL_SLEEP_BACK_OFF_MULTIPLIER, DEFAULT_EXPONENTIAL_SLEEP_INITIAL_DELAY_MS, DEFAULT_EXPONENTIAL_SLEEP_MAX_DELAY_MS);
Function1<ExponentialSleepStrategy.RetryLoop, Map<String, SystemStreamMetadata>> fetchMetadataOperation = new AbstractFunction1<ExponentialSleepStrategy.RetryLoop, Map<String, SystemStreamMetadata>>() {
@Override
public Map<String, SystemStreamMetadata> apply(ExponentialSleepStrategy.RetryLoop loop) {
Map<String, SystemStreamMetadata> allMetadata = new HashMap<>();
streamNames.forEach(streamName -> {
Map<Partition, SystemStreamMetadata.SystemStreamPartitionMetadata> partitionMetadata = new HashMap<>();
List<PartitionInfo> partitionInfos = threadSafeKafkaConsumer.execute(consumer -> consumer.partitionsFor(streamName));
LOG.debug("Stream {} has partitions {}", streamName, partitionInfos);
partitionInfos.forEach(partitionInfo -> partitionMetadata.put(new Partition(partitionInfo.partition()), dummySspm));
allMetadata.put(streamName, new SystemStreamMetadata(streamName, partitionMetadata));
});
loop.done();
return allMetadata;
}
};
Map<String, SystemStreamMetadata> result = strategy.run(fetchMetadataOperation, new AbstractFunction2<Exception, ExponentialSleepStrategy.RetryLoop, BoxedUnit>() {
@Override
public BoxedUnit apply(Exception exception, ExponentialSleepStrategy.RetryLoop loop) {
if (loop.sleepCount() < MAX_RETRIES_ON_EXCEPTION) {
LOG.warn(String.format("Fetching systemstreampartition counts for: %s threw an exception. Retrying.", streamNames), exception);
} else {
LOG.error(String.format("Fetching systemstreampartition counts for: %s threw an exception.", streamNames), exception);
loop.done();
throw new SamzaException(exception);
}
return null;
}
}).get();
LOG.info("SystemStream partition counts for system {}: {}", systemName, result);
return result;
}
use of scala.runtime.AbstractFunction1 in project distributedlog by twitter.
the class TestAsyncReaderLock method testReaderLockCloseInAcquireCallback.
@Test(timeout = 60000)
public void testReaderLockCloseInAcquireCallback() throws Exception {
final String name = runtime.getMethodName();
DistributedLogManager dlm = createNewDLM(conf, name);
BKAsyncLogWriter writer = (BKAsyncLogWriter) (dlm.startAsyncLogSegmentNonPartitioned());
writer.write(DLMTestUtil.getLogRecordInstance(1L));
writer.closeAndComplete();
final CountDownLatch latch = new CountDownLatch(1);
Future<AsyncLogReader> futureReader1 = dlm.getAsyncLogReaderWithLock(DLSN.InitialDLSN);
futureReader1.flatMap(new ExceptionalFunction<AsyncLogReader, Future<Void>>() {
@Override
public Future<Void> applyE(AsyncLogReader reader) throws IOException {
return reader.asyncClose().map(new AbstractFunction1<Void, Void>() {
@Override
public Void apply(Void result) {
latch.countDown();
return null;
}
});
}
});
latch.await();
dlm.close();
}
use of scala.runtime.AbstractFunction1 in project distributedlog by twitter.
the class BKAsyncLogWriter method markEndOfStream.
Future<Long> markEndOfStream() {
final Stopwatch stopwatch = Stopwatch.createStarted();
Future<BKLogSegmentWriter> logSegmentWriterFuture;
synchronized (this) {
logSegmentWriterFuture = this.rollingFuture;
}
if (null == logSegmentWriterFuture) {
logSegmentWriterFuture = getLogSegmentWriterForEndOfStream();
}
return logSegmentWriterFuture.flatMap(new AbstractFunction1<BKLogSegmentWriter, Future<Long>>() {
@Override
public Future<Long> apply(BKLogSegmentWriter w) {
return w.markEndOfStream();
}
}).addEventListener(new OpStatsListener<Long>(markEndOfStreamOpStatsLogger, stopwatch));
}
use of scala.runtime.AbstractFunction1 in project samza by apache.
the class KafkaSystemAdmin method getSSPMetadata.
/**
* Given a set of SystemStreamPartition, fetch metadata from Kafka for each
* of them, and return a map from ssp to SystemStreamPartitionMetadata for
* each of them. This method will return null for oldest and newest offsets
* if a given SystemStreamPartition is empty. This method will block and
* retry indefinitely until it gets a successful response from Kafka.
* @param ssps a set of strings of SSP
* @param retryBackoff retry backoff strategy
* @return a map from ssp to sspMetadata which has offsets
*/
Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> getSSPMetadata(Set<SystemStreamPartition> ssps, ExponentialSleepStrategy retryBackoff) {
LOG.info("Fetching SSP metadata for: {}", ssps);
List<TopicPartition> topicPartitions = ssps.stream().map(ssp -> new TopicPartition(ssp.getStream(), ssp.getPartition().getPartitionId())).collect(Collectors.toList());
Function1<ExponentialSleepStrategy.RetryLoop, Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata>> fetchTopicPartitionMetadataOperation = new AbstractFunction1<ExponentialSleepStrategy.RetryLoop, Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata>>() {
@Override
public Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> apply(ExponentialSleepStrategy.RetryLoop loop) {
OffsetsMaps topicPartitionsMetadata = fetchTopicPartitionsMetadata(topicPartitions);
Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> sspToSSPMetadata = new HashMap<>();
for (SystemStreamPartition ssp : ssps) {
String oldestOffset = topicPartitionsMetadata.getOldestOffsets().get(ssp);
String newestOffset = topicPartitionsMetadata.getNewestOffsets().get(ssp);
String upcomingOffset = topicPartitionsMetadata.getUpcomingOffsets().get(ssp);
sspToSSPMetadata.put(ssp, new SystemStreamMetadata.SystemStreamPartitionMetadata(oldestOffset, newestOffset, upcomingOffset));
}
loop.done();
return sspToSSPMetadata;
}
};
Function2<Exception, ExponentialSleepStrategy.RetryLoop, BoxedUnit> onExceptionRetryOperation = new AbstractFunction2<Exception, ExponentialSleepStrategy.RetryLoop, BoxedUnit>() {
@Override
public BoxedUnit apply(Exception exception, ExponentialSleepStrategy.RetryLoop loop) {
if (loop.sleepCount() < MAX_RETRIES_ON_EXCEPTION) {
LOG.warn(String.format("Fetching SSP metadata for: %s threw an exception. Retrying.", ssps), exception);
} else {
LOG.error(String.format("Fetching SSP metadata for: %s threw an exception.", ssps), exception);
loop.done();
throw new SamzaException(exception);
}
return null;
}
};
Function0<Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata>> fallbackOperation = new AbstractFunction0<Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata>>() {
@Override
public Map<SystemStreamPartition, SystemStreamMetadata.SystemStreamPartitionMetadata> apply() {
throw new SamzaException("Failed to get SSP metadata");
}
};
return retryBackoff.run(fetchTopicPartitionMetadataOperation, onExceptionRetryOperation).getOrElse(fallbackOperation);
}
Aggregations