use of akka.NotUsed in project torodb by torodb.
the class DefaultOplogApplier method createBatcherFlow.
/**
* Creates a flow that batches and analyze a input of {@link AnalyzedOplogBatch remote jobs}.
*
* This flow tries to accummulate several remote jobs into a bigger one and does not emit until:
* <ul>
* <li>A maximum number of operations are batched</li>
* <li>Or a maximum time has happen since the last emit</li>
* <li>Or the recived job is not {@link AnalyzedOplogBatch#isReadyForMore()}</li>
* </ul>
*
*/
private Flow<OplogBatch, AnalyzedStreamElement, NotUsed> createBatcherFlow(ApplierContext context) {
Predicate<OplogBatch> finishBatchPredicate = (OplogBatch rawBatch) -> !rawBatch.isReadyForMore();
ToIntFunction<OplogBatch> costFunction = (rawBatch) -> rawBatch.count();
Supplier<RawStreamElement> zeroFun = () -> RawStreamElement.INITIAL_ELEMENT;
BiFunction<RawStreamElement, OplogBatch, RawStreamElement> acumFun = (streamElem, newBatch) -> streamElem.concat(newBatch);
BatchAnalyzer batchAnalyzer = batchAnalyzerFactory.createBatchAnalyzer(context);
return Flow.of(OplogBatch.class).via(new BatchFlow<>(batchLimits.maxSize, batchLimits.maxPeriod, finishBatchPredicate, costFunction, zeroFun, acumFun)).filter(rawElem -> rawElem.rawBatch != null && !rawElem.rawBatch.isEmpty()).map(rawElem -> {
List<OplogOperation> rawOps = rawElem.rawBatch.getOps();
List<AnalyzedOplogBatch> analyzed = batchAnalyzer.apply(rawOps);
return new AnalyzedStreamElement(rawElem, analyzed);
});
}
use of akka.NotUsed in project torodb by torodb.
the class AkkaDbCloner method cloneCollection.
private void cloneCollection(MongodServer localServer, MongoConnection remoteConnection, String toDb, CloneOptions opts, Materializer materializer, Entry collToClone) throws MongoException {
String collName = collToClone.getCollectionName();
MongoCursor<BsonDocument> cursor = openCursor(remoteConnection, collName, opts);
CollectionIterator iterator = new CollectionIterator(cursor, retrier);
Source<BsonDocument, NotUsed> source = Source.fromIterator(() -> iterator).buffer(cursorBatchBufferSize, OverflowStrategy.backpressure()).async();
Flow<BsonDocument, Pair<Integer, Integer>, NotUsed> inserterFlow;
if (maxParallelInsertTasks == 1) {
inserterFlow = createCloneDocsWorker(localServer, toDb, collName);
} else {
Graph<FlowShape<BsonDocument, Pair<Integer, Integer>>, NotUsed> graph = GraphDSL.create(builder -> {
UniformFanOutShape<BsonDocument, BsonDocument> balance = builder.add(Balance.create(maxParallelInsertTasks, false));
UniformFanInShape<Pair<Integer, Integer>, Pair<Integer, Integer>> merge = builder.add(Merge.create(maxParallelInsertTasks, false));
for (int i = 0; i < maxParallelInsertTasks; i++) {
builder.from(balance.out(i)).via(builder.add(createCloneDocsWorker(localServer, toDb, collName).async())).toInlet(merge.in(i));
}
return FlowShape.of(balance.in(), merge.out());
});
inserterFlow = Flow.fromGraph(graph);
}
try {
source.via(inserterFlow).fold(new Tuple3<>(0, 0, clock.instant()), (acum, batch) -> postInsertFold(toDb, collName, acum, batch)).toMat(Sink.foreach(tuple -> logCollectionCloning(toDb, collName, tuple.t1(), tuple.t2())), Keep.right()).run(materializer).toCompletableFuture().join();
} catch (CompletionException ex) {
Throwable cause = ex.getCause();
if (cause != null) {
throw new CloningException("Error while cloning " + toDb + "." + collName, cause);
}
throw ex;
}
}
Aggregations