use of io.pravega.controller.store.stream.Segment in project pravega by pravega.
the class ControllerService method createTransaction.
@SuppressWarnings("ReturnCount")
public CompletableFuture<Pair<UUID, List<SegmentRange>>> createTransaction(final String scope, final String stream, final long lease, final long scaleGracePeriod) {
Exceptions.checkNotNullOrEmpty(scope, "scope");
Exceptions.checkNotNullOrEmpty(stream, "stream");
return streamTransactionMetadataTasks.createTxn(scope, stream, lease, scaleGracePeriod, null).thenApply(pair -> {
VersionedTransactionData data = pair.getKey();
List<Segment> segments = pair.getValue();
return new ImmutablePair<>(data.getId(), getSegmentRanges(segments, scope, stream));
});
}
use of io.pravega.controller.store.stream.Segment in project pravega by pravega.
the class TaskTest method setUp.
@Before
public void setUp() throws ExecutionException, InterruptedException {
final String stream2 = "stream2";
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final ScalingPolicy policy2 = ScalingPolicy.fixed(3);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scope(SCOPE).streamName(stream1).scalingPolicy(policy1).build();
final StreamConfiguration configuration2 = StreamConfiguration.builder().scope(SCOPE).streamName(stream2).scalingPolicy(policy2).build();
// region createStream
streamStore.createScope(SCOPE).join();
long start = System.currentTimeMillis();
streamStore.createStream(SCOPE, stream1, configuration1, start, null, executor).join();
streamStore.setState(SCOPE, stream1, State.ACTIVE, null, executor).join();
streamStore.createStream(SCOPE, stream2, configuration2, start, null, executor).join();
streamStore.setState(SCOPE, stream2, State.ACTIVE, null, executor).join();
// endregion
// region scaleSegments
AbstractMap.SimpleEntry<Double, Double> segment1 = new AbstractMap.SimpleEntry<>(0.5, 0.75);
AbstractMap.SimpleEntry<Double, Double> segment2 = new AbstractMap.SimpleEntry<>(0.75, 1.0);
List<Integer> sealedSegments = Collections.singletonList(1);
StartScaleResponse response = streamStore.startScale(SCOPE, stream1, sealedSegments, Arrays.asList(segment1, segment2), start + 20, false, null, executor).get();
List<Segment> segmentsCreated = response.getSegmentsCreated();
streamStore.setState(SCOPE, stream1, State.SCALING, null, executor).get();
streamStore.scaleNewSegmentsCreated(SCOPE, stream1, sealedSegments, segmentsCreated, response.getActiveEpoch(), start + 20, null, executor).get();
streamStore.scaleSegmentsSealed(SCOPE, stream1, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), segmentsCreated, response.getActiveEpoch(), start + 20, null, executor).get();
AbstractMap.SimpleEntry<Double, Double> segment3 = new AbstractMap.SimpleEntry<>(0.0, 0.5);
AbstractMap.SimpleEntry<Double, Double> segment4 = new AbstractMap.SimpleEntry<>(0.5, 0.75);
AbstractMap.SimpleEntry<Double, Double> segment5 = new AbstractMap.SimpleEntry<>(0.75, 1.0);
List<Integer> sealedSegments1 = Arrays.asList(0, 1, 2);
response = streamStore.startScale(SCOPE, stream2, sealedSegments1, Arrays.asList(segment3, segment4, segment5), start + 20, false, null, executor).get();
segmentsCreated = response.getSegmentsCreated();
streamStore.setState(SCOPE, stream2, State.SCALING, null, executor).get();
streamStore.scaleNewSegmentsCreated(SCOPE, stream2, sealedSegments1, segmentsCreated, response.getActiveEpoch(), start + 20, null, executor).get();
streamStore.scaleSegmentsSealed(SCOPE, stream2, sealedSegments1.stream().collect(Collectors.toMap(x -> x, x -> 0L)), segmentsCreated, response.getActiveEpoch(), start + 20, null, executor).get();
// endregion
}
use of io.pravega.controller.store.stream.Segment in project pravega by pravega.
the class ScaleRequestHandlerTest method testScaleRequest.
@Test(timeout = 20000)
public void testScaleRequest() throws ExecutionException, InterruptedException {
AutoScaleTask requestHandler = new AutoScaleTask(streamMetadataTasks, streamStore, executor);
ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
StreamRequestHandler multiplexer = new StreamRequestHandler(requestHandler, scaleRequestHandler, null, null, null, null, executor);
// Send number of splits = 1
AutoScaleEvent request = new AutoScaleEvent(scope, stream, 2, AutoScaleEvent.UP, System.currentTimeMillis(), 1, false);
CompletableFuture<ScaleOpEvent> request1 = new CompletableFuture<>();
CompletableFuture<ScaleOpEvent> request2 = new CompletableFuture<>();
EventStreamWriter<ControllerEvent> writer = createWriter(x -> {
if (!request1.isDone()) {
final ArrayList<AbstractMap.SimpleEntry<Double, Double>> expected = new ArrayList<>();
double start = 2.0 / 3.0;
double end = 1.0;
double middle = (start + end) / 2;
expected.add(new AbstractMap.SimpleEntry<>(start, middle));
expected.add(new AbstractMap.SimpleEntry<>(middle, end));
checkRequest(request1, x, Lists.newArrayList(2), expected);
} else if (!request2.isDone()) {
final ArrayList<AbstractMap.SimpleEntry<Double, Double>> expected = new ArrayList<>();
double start = 2.0 / 3.0;
double end = 1.0;
expected.add(new AbstractMap.SimpleEntry<>(start, end));
checkRequest(request2, x, Lists.newArrayList(3, 4), expected);
}
});
when(clientFactory.createEventWriter(eq(Config.SCALE_STREAM_NAME), eq(new JavaSerializer<ControllerEvent>()), any())).thenReturn(writer);
assertTrue(Futures.await(multiplexer.process(request)));
assertTrue(Futures.await(request1));
assertTrue(Futures.await(multiplexer.process(request1.get())));
// verify that the event is posted successfully
List<Segment> activeSegments = streamStore.getActiveSegments(scope, stream, null, executor).get();
assertTrue(activeSegments.stream().noneMatch(z -> z.getNumber() == 2));
// verify that two splits are created even when we sent 1 as numOfSplits in AutoScaleEvent.
assertTrue(activeSegments.stream().anyMatch(z -> z.getNumber() == 3));
assertTrue(activeSegments.stream().anyMatch(z -> z.getNumber() == 4));
assertTrue(activeSegments.size() == 4);
request = new AutoScaleEvent(scope, stream, 4, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false);
assertTrue(Futures.await(multiplexer.process(request)));
activeSegments = streamStore.getActiveSegments(scope, stream, null, executor).get();
assertTrue(activeSegments.stream().anyMatch(z -> z.getNumber() == 4));
assertTrue(activeSegments.size() == 4);
request = new AutoScaleEvent(scope, stream, 3, AutoScaleEvent.DOWN, System.currentTimeMillis(), 0, false);
assertTrue(Futures.await(multiplexer.process(request)));
assertTrue(Futures.await(request2));
assertTrue(Futures.await(multiplexer.process(request2.get())));
activeSegments = streamStore.getActiveSegments(scope, stream, null, executor).get();
assertTrue(activeSegments.stream().noneMatch(z -> z.getNumber() == 3));
assertTrue(activeSegments.stream().noneMatch(z -> z.getNumber() == 4));
assertTrue(activeSegments.stream().anyMatch(z -> z.getNumber() == 5));
assertTrue(activeSegments.size() == 3);
// make it throw a non retryable failure so that test does not wait for number of retries.
// This will bring down the test duration drastically because a retryable failure can keep retrying for few seconds.
// And if someone changes retry durations and number of attempts in retry helper, it will impact this test's running time.
// hence sending incorrect segmentsToSeal list which will result in a non retryable failure and this will fail immediately
assertFalse(Futures.await(multiplexer.process(new ScaleOpEvent(scope, stream, Lists.newArrayList(6), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), true, System.currentTimeMillis()))));
assertTrue(activeSegments.stream().noneMatch(z -> z.getNumber() == 3));
assertTrue(activeSegments.stream().noneMatch(z -> z.getNumber() == 4));
assertTrue(activeSegments.stream().anyMatch(z -> z.getNumber() == 5));
assertTrue(activeSegments.size() == 3);
assertFalse(Futures.await(multiplexer.process(new AbortEvent(scope, stream, 0, UUID.randomUUID()))));
}
use of io.pravega.controller.store.stream.Segment in project pravega by pravega.
the class TableHelper method computeEpochCutMap.
private static Map<Integer, Integer> computeEpochCutMap(byte[] historyTable, byte[] indexTable, byte[] segmentTable, Map<Integer, Long> streamCut) {
Map<Integer, Integer> epochStreamCutMap = new HashMap<>();
int mostRecent = streamCut.keySet().stream().max(Comparator.naturalOrder()).get();
Segment mostRecentSegment = getSegment(mostRecent, segmentTable);
final Optional<HistoryRecord> highEpochRecord = segmentCreationHistoryRecord(mostRecent, mostRecentSegment.getStart(), indexTable, historyTable);
List<Integer> toFind = new ArrayList<>(streamCut.keySet());
Optional<HistoryRecord> epochRecord = highEpochRecord;
while (epochRecord.isPresent() && !toFind.isEmpty()) {
List<Integer> epochSegments = epochRecord.get().getSegments();
Map<Boolean, List<Integer>> group = toFind.stream().collect(Collectors.groupingBy(epochSegments::contains));
toFind = Optional.ofNullable(group.get(false)).orElse(Collections.emptyList());
int epoch = epochRecord.get().getEpoch();
List<Integer> found = Optional.ofNullable(group.get(true)).orElse(Collections.emptyList());
found.forEach(x -> epochStreamCutMap.put(x, epoch));
epochRecord = HistoryRecord.fetchPrevious(epochRecord.get(), historyTable);
}
return epochStreamCutMap;
}
use of io.pravega.controller.store.stream.Segment in project pravega by pravega.
the class StreamTransactionMetadataTasks method createTxnBody.
/**
* Creates txn on the specified stream.
*
* Post-condition:
* 1. If txn creation succeeds, then
* (a) txn node is created in the store,
* (b) txn segments are successfully created on respective segment stores,
* (c) txn is present in the host-txn index of current host,
* (d) txn's timeout is being tracked in timeout service.
*
* 2. If process fails after creating txn node, but before responding to the client, then since txn is
* present in the host-txn index, some other controller process shall abort the txn after maxLeaseValue
*
* 3. If timeout service tracks timeout of specified txn,
* then txn is also present in the host-txn index of current process.
*
* Invariant:
* The following invariants are maintained throughout the execution of createTxn, pingTxn and sealTxn methods.
* 1. If timeout service tracks timeout of a txn, then txn is also present in the host-txn index of current process.
* 2. If txn znode is updated, then txn is also present in the host-txn index of current process.
*
* @param scope scope name.
* @param stream stream name.
* @param lease txn lease.
* @param scaleGracePeriod amount of time for which txn may remain open after scale operation is initiated.
* @param ctx context.
* @return identifier of the created txn.
*/
CompletableFuture<Pair<VersionedTransactionData, List<Segment>>> createTxnBody(final String scope, final String stream, final long lease, final long scaleGracePeriod, final OperationContext ctx) {
// Step 1. Validate parameters.
CompletableFuture<Void> validate = validate(lease, scaleGracePeriod);
long maxExecutionPeriod = Math.min(MAX_EXECUTION_TIME_MULTIPLIER * lease, Duration.ofDays(1).toMillis());
UUID txnId = UUID.randomUUID();
TxnResource resource = new TxnResource(scope, stream, txnId);
// Step 2. Add txn to host-transaction index.
CompletableFuture<Void> addIndex = validate.thenComposeAsync(ignore -> streamMetadataStore.addTxnToIndex(hostId, resource, 0), executor).whenComplete((v, e) -> {
if (e != null) {
log.debug("Txn={}, failed adding txn to host-txn index of host={}", txnId, hostId);
} else {
log.debug("Txn={}, added txn to host-txn index of host={}", txnId, hostId);
}
});
// Step 3. Create txn node in the store.
CompletableFuture<VersionedTransactionData> txnFuture = addIndex.thenComposeAsync(ignore -> streamMetadataStore.createTransaction(scope, stream, txnId, lease, maxExecutionPeriod, scaleGracePeriod, ctx, executor), executor).whenComplete((v, e) -> {
if (e != null) {
log.debug("Txn={}, failed creating txn in store", txnId);
} else {
log.debug("Txn={}, created in store", txnId);
}
});
// Step 4. Notify segment stores about new txn.
CompletableFuture<List<Segment>> segmentsFuture = txnFuture.thenComposeAsync(txnData -> streamMetadataStore.getActiveSegments(scope, stream, txnData.getEpoch(), ctx, executor), executor);
CompletableFuture<Void> notify = segmentsFuture.thenComposeAsync(activeSegments -> notifyTxnCreation(scope, stream, activeSegments, txnId), executor).whenComplete((v, e) -> log.debug("Txn={}, notified segments stores", txnId));
// Step 5. Start tracking txn in timeout service
return notify.whenCompleteAsync((result, ex) -> {
int version = 0;
long executionExpiryTime = System.currentTimeMillis() + maxExecutionPeriod;
if (!txnFuture.isCompletedExceptionally()) {
version = txnFuture.join().getVersion();
executionExpiryTime = txnFuture.join().getMaxExecutionExpiryTime();
}
timeoutService.addTxn(scope, stream, txnId, version, lease, executionExpiryTime, scaleGracePeriod);
log.debug("Txn={}, added to timeout service on host={}", txnId, hostId);
}, executor).thenApplyAsync(v -> new ImmutablePair<>(txnFuture.join(), segmentsFuture.join()), executor);
}
Aggregations