use of io.pravega.controller.stream.api.grpc.v1.Controller.StreamCut in project pravega by pravega.
the class StreamMetadataTasks method getTruncationStreamCutByTimeLimit.
private CompletableFuture<Map<Long, Long>> getTruncationStreamCutByTimeLimit(String scope, String stream, OperationContext context, RetentionPolicy policy, RetentionSet retentionSet, Map<Long, Long> lowerBound) {
long currentTime = retentionClock.get().get();
// we get the streamcuts from retentionset that satisfy the min and max bounds with min pointing to most recent
// streamcut to satisfy both min and max bounds while max refering to oldest such streamcut in retention set.
// limits.key will refer to max and limit.value will refer to min.
Map.Entry<StreamCutReferenceRecord, StreamCutReferenceRecord> limits = getBoundStreamCuts(policy, retentionSet, x -> currentTime - x.getRecordingTime());
// if subscriber lowerbound is greater than (ahead of/after) streamcut corresponding to the max time and is less than
// (behind/before) stream cut for min time from the retention set then we can safely truncate at lowerbound.
// Else we will truncate at the max time bound if it exists
// 1. if LB is greater than (ahead of/after) min => truncate at min
// 2. if LB is less than (behind/before) max => truncate at max
// 3. if LB is less than (behind/before) min && LB is greater than (ahead of/after) max => truncate at LB
// 4. if LB is less than (behind/before) min && overlaps max => truncate at max
// 5. if LB overlaps with min and max ==> so its got both recent data and older data.
// we will truncate at a streamcut less than (behind/before) max in this case.
CompletableFuture<StreamCutRecord> limitMinFuture = limits.getValue() == null ? CompletableFuture.completedFuture(null) : streamMetadataStore.getStreamCutRecord(scope, stream, limits.getValue(), context, executor);
// if lowerbound is empty simply return min
if (lowerBound == null || lowerBound.isEmpty()) {
return limitMinFuture.thenApply(min -> Optional.ofNullable(min).map(StreamCutRecord::getStreamCut).orElse(null));
}
Optional<StreamCutReferenceRecord> maxBoundRef = retentionSet.getRetentionRecords().stream().filter(x -> currentTime - x.getRecordingTime() >= policy.getRetentionMax()).max(Comparator.comparingLong(StreamCutReferenceRecord::getRecordingTime));
CompletableFuture<StreamCutRecord> limitMaxFuture = limits.getKey() == null ? CompletableFuture.completedFuture(null) : streamMetadataStore.getStreamCutRecord(scope, stream, limits.getKey(), context, executor);
CompletableFuture<StreamCutRecord> maxBoundFuture = maxBoundRef.map(x -> streamMetadataStore.getStreamCutRecord(scope, stream, x, context, executor)).orElse(CompletableFuture.completedFuture(null));
return CompletableFuture.allOf(limitMaxFuture, limitMinFuture, maxBoundFuture).thenCompose(v -> {
StreamCutRecord limitMax = limitMaxFuture.join();
StreamCutRecord limitMin = limitMinFuture.join();
StreamCutRecord maxBound = maxBoundFuture.join();
if (limitMin != null) {
return streamMetadataStore.compareStreamCut(scope, stream, limitMin.getStreamCut(), lowerBound, context, executor).thenCompose(compareWithMin -> {
switch(compareWithMin) {
case EqualOrAfter:
// if it overlaps with limitmax, then we truncate at maxbound
return truncateAtLowerBoundOrMax(scope, stream, context, lowerBound, limitMax, maxBound);
case Overlaps:
// and we are choosing from retention set.
return getStreamcutBeforeLowerbound(scope, stream, context, retentionSet, lowerBound);
case Before:
// min is less than (behind/before) lb. truncate at min
return CompletableFuture.completedFuture(limitMin.getStreamCut());
default:
throw new IllegalArgumentException("Invalid Compare streamcut response");
}
});
} else {
return CompletableFuture.completedFuture(null);
}
});
}
use of io.pravega.controller.stream.api.grpc.v1.Controller.StreamCut in project pravega by pravega.
the class StreamMetadataTasks method startTruncation.
public CompletableFuture<Boolean> startTruncation(String scope, String stream, Map<Long, Long> streamCut, OperationContext contextOpt) {
final OperationContext context = contextOpt != null ? contextOpt : streamMetadataStore.createStreamContext(scope, stream, ControllerService.nextRequestId());
long requestId = context.getRequestId();
return streamMetadataStore.getTruncationRecord(scope, stream, context, executor).thenCompose(property -> {
if (!property.getObject().isUpdating()) {
// 2. post event with new stream cut if no truncation is ongoing
return eventHelperFuture.thenCompose(eventHelper -> eventHelper.addIndexAndSubmitTask(new TruncateStreamEvent(scope, stream, requestId), // 3. start truncation by updating the metadata
() -> streamMetadataStore.startTruncation(scope, stream, streamCut, context, executor)).thenApply(x -> {
log.debug(requestId, "Started truncation request for stream {}/{}", scope, stream);
return true;
}));
} else {
log.error(requestId, "Another truncation in progress for {}/{}", scope, stream);
return CompletableFuture.completedFuture(false);
}
});
}
use of io.pravega.controller.stream.api.grpc.v1.Controller.StreamCut in project pravega by pravega.
the class ControllerServiceImplTest method createReaderGroupTests.
@Test
public void createReaderGroupTests() {
createScopeAndStream(SCOPE1, STREAM1, ScalingPolicy.fixed(2));
final Segment seg0 = new Segment(SCOPE1, STREAM1, 0L);
final Segment seg1 = new Segment(SCOPE1, STREAM1, 1L);
ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 10L, seg1, 10L);
Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE1, STREAM1), new StreamCutImpl(Stream.of(SCOPE1, STREAM1), startStreamCut));
ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 200L, seg1, 300L);
Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE1, STREAM1), new StreamCutImpl(Stream.of(SCOPE1, STREAM1), endStreamCut));
ReaderGroupConfig config = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(30000L).groupRefreshTimeMillis(20000L).maxOutstandingCheckpointRequest(2).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
ResultObserver<CreateReaderGroupResponse> result = new ResultObserver<>();
String rgName = "rg_1";
this.controllerService.createReaderGroup(ModelHelper.decode(SCOPE1, rgName, config), result);
CreateReaderGroupResponse createRGStatus = result.get();
assertEquals("Create Reader Group Invalid RG Name", CreateReaderGroupResponse.Status.INVALID_RG_NAME, createRGStatus.getStatus());
ResultObserver<CreateReaderGroupResponse> result1 = new ResultObserver<>();
rgName = "rg1";
this.controllerService.createReaderGroup(ModelHelper.decode("somescope", rgName, config), result1);
createRGStatus = result1.get();
assertEquals("Create Reader Group Scope not found", CreateReaderGroupResponse.Status.SCOPE_NOT_FOUND, createRGStatus.getStatus());
}
use of io.pravega.controller.stream.api.grpc.v1.Controller.StreamCut in project pravega by pravega.
the class ControllerServiceImplTest method updateReaderGroupTests.
@Test
public void updateReaderGroupTests() {
createScopeAndStream(SCOPE1, STREAM1, ScalingPolicy.fixed(2));
String rgName = "rg1";
UUID rgId = UUID.randomUUID();
createReaderGroup(SCOPE1, STREAM1, rgName, rgId);
final Segment seg0 = new Segment(SCOPE1, STREAM1, 0L);
final Segment seg1 = new Segment(SCOPE1, STREAM1, 1L);
ImmutableMap<Segment, Long> startStreamCut = ImmutableMap.of(seg0, 100L, seg1, 1000L);
Map<Stream, StreamCut> startSC = ImmutableMap.of(Stream.of(SCOPE1, STREAM1), new StreamCutImpl(Stream.of(SCOPE1, STREAM1), startStreamCut));
ImmutableMap<Segment, Long> endStreamCut = ImmutableMap.of(seg0, 2000L, seg1, 3000L);
Map<Stream, StreamCut> endSC = ImmutableMap.of(Stream.of(SCOPE1, STREAM1), new StreamCutImpl(Stream.of(SCOPE1, STREAM1), endStreamCut));
ReaderGroupConfig newConfig = ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(80000L).groupRefreshTimeMillis(40000L).maxOutstandingCheckpointRequest(5).retentionType(ReaderGroupConfig.StreamDataRetention.AUTOMATIC_RELEASE_AT_LAST_CHECKPOINT).startingStreamCuts(startSC).endingStreamCuts(endSC).build();
newConfig = ReaderGroupConfig.cloneConfig(newConfig, rgId, 0L);
ResultObserver<UpdateReaderGroupResponse> result = new ResultObserver<>();
this.controllerService.updateReaderGroup(ModelHelper.decode(SCOPE1, rgName, newConfig), result);
UpdateReaderGroupResponse rgStatus = result.get();
assertEquals("Update Reader Group Status", UpdateReaderGroupResponse.Status.SUCCESS, rgStatus.getStatus());
assertEquals("Updated Generation", 1L, rgStatus.getGeneration());
ResultObserver<UpdateReaderGroupResponse> result1 = new ResultObserver<>();
this.controllerService.updateReaderGroup(ModelHelper.decode(SCOPE1, rgName, newConfig), result1);
rgStatus = result1.get();
assertEquals("Update Reader Group", UpdateReaderGroupResponse.Status.INVALID_CONFIG, rgStatus.getStatus());
ResultObserver<UpdateReaderGroupResponse> result2 = new ResultObserver<>();
this.controllerService.updateReaderGroup(ModelHelper.decode(SCOPE1, "somerg", newConfig), result2);
rgStatus = result2.get();
assertEquals("Update Reader Group", UpdateReaderGroupResponse.Status.RG_NOT_FOUND, rgStatus.getStatus());
}
use of io.pravega.controller.stream.api.grpc.v1.Controller.StreamCut in project pravega by pravega.
the class StreamMetadataTasksTest method truncateStreamTest.
@Test(timeout = 30000)
public void truncateStreamTest() throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
streamStorePartialMock.createStream(SCOPE, "test", configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, "test", State.ACTIVE, null, executor).get();
assertNotEquals(0, consumer.getCurrentSegments(SCOPE, "test", 0L).get().size());
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
List<Map.Entry<Double, Double>> newRanges = new ArrayList<>();
newRanges.add(new AbstractMap.SimpleEntry<>(0.5, 0.75));
newRanges.add(new AbstractMap.SimpleEntry<>(0.75, 1.0));
ScaleResponse scaleOpResult = streamMetadataTasks.manualScale(SCOPE, "test", Collections.singletonList(1L), newRanges, 30, 0L).get();
assertTrue(scaleOpResult.getStatus().equals(ScaleStreamStatus.STARTED));
ScaleOperationTask scaleTask = new ScaleOperationTask(streamMetadataTasks, streamStorePartialMock, executor);
assertTrue(Futures.await(scaleTask.execute((ScaleOpEvent) requestEventWriter.eventQueue.take())));
// start truncation
StreamTruncationRecord truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).join().getObject();
assertFalse(truncProp.isUpdating());
// 1. happy day test
// update.. should succeed
Map<Long, Long> streamCut = new HashMap<>();
streamCut.put(0L, 1L);
streamCut.put(1L, 11L);
CompletableFuture<UpdateStreamStatus.Status> truncateFuture = streamMetadataTasks.truncateStream(SCOPE, "test", streamCut, 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
assertEquals(UpdateStreamStatus.Status.SUCCESS, truncateFuture.join());
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).join().getObject();
assertTrue(truncProp.getStreamCut().equals(streamCut));
assertTrue(truncProp.getStreamCut().equals(streamCut));
// 2. change state to scaling
streamStorePartialMock.setState(SCOPE, "test", State.SCALING, null, executor).get();
// call update should fail without posting the event
long two = NameUtils.computeSegmentId(2, 1);
long three = NameUtils.computeSegmentId(3, 1);
Map<Long, Long> streamCut2 = new HashMap<>();
streamCut2.put(0L, 1L);
streamCut2.put(two, 1L);
streamCut2.put(three, 1L);
streamMetadataTasks.truncateStream(SCOPE, "test", streamCut2, 0L);
AtomicBoolean loop = new AtomicBoolean(false);
Futures.loop(() -> !loop.get(), () -> Futures.delayedFuture(() -> streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor), 1000, executor).thenApply(x -> x.getObject().isUpdating()).thenAccept(loop::set), executor).join();
// event posted, first step performed. now pick the event for processing
TruncateStreamTask truncateStreamTask = new TruncateStreamTask(streamMetadataTasks, streamStorePartialMock, executor);
TruncateStreamEvent taken = (TruncateStreamEvent) requestEventWriter.eventQueue.take();
AssertExtensions.assertFutureThrows("", truncateStreamTask.execute(taken), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStorePartialMock.setState(SCOPE, "test", State.ACTIVE, null, executor).get();
// now with state = active, process the same event. it should succeed now.
assertTrue(Futures.await(truncateStreamTask.execute(taken)));
// 3. multiple back to back updates.
Map<Long, Long> streamCut3 = new HashMap<>();
streamCut3.put(0L, 12L);
streamCut3.put(two, 12L);
streamCut3.put(three, 12L);
CompletableFuture<UpdateStreamStatus.Status> truncateOp1 = streamMetadataTasks.truncateStream(SCOPE, "test", streamCut3, 0L);
// ensure that previous updatestream has posted the event and set status to updating,
// only then call second updateStream
AtomicBoolean loop2 = new AtomicBoolean(false);
Futures.loop(() -> !loop2.get(), () -> streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).thenApply(x -> x.getObject().isUpdating()).thenAccept(loop2::set), executor).join();
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).join().getObject();
assertTrue(truncProp.getStreamCut().equals(streamCut3) && truncProp.isUpdating());
// post the second update request. This should fail here itself as previous one has started.
Map<Long, Long> streamCut4 = new HashMap<>();
streamCut4.put(0L, 14L);
streamCut4.put(two, 14L);
streamCut4.put(three, 14L);
CompletableFuture<UpdateStreamStatus.Status> truncateOpFuture2 = streamMetadataTasks.truncateStream(SCOPE, "test", streamCut4, 0L);
assertEquals(UpdateStreamStatus.Status.FAILURE, truncateOpFuture2.join());
// process event
assertTrue(Futures.await(processEvent(requestEventWriter)));
// verify that first request for update also completes with success.
assertEquals(UpdateStreamStatus.Status.SUCCESS, truncateOp1.join());
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).join().getObject();
assertTrue(truncProp.getStreamCut().equals(streamCut3) && !truncProp.isUpdating());
streamStorePartialMock.setState(SCOPE, "test", State.TRUNCATING, null, executor).join();
TruncateStreamEvent event = new TruncateStreamEvent(SCOPE, "test", System.nanoTime());
assertTrue(Futures.await(truncateStreamTask.execute(event)));
// execute the event again. It should complete without doing anything.
truncateStreamTask.execute(event).join();
assertEquals(State.ACTIVE, streamStorePartialMock.getState(SCOPE, "test", true, null, executor).join());
doReturn(CompletableFuture.completedFuture(true)).when(streamStorePartialMock).isScopeSealed(anyString(), any(), any());
CompletableFuture<CreateStreamResponse> streamResponse = streamStorePartialMock.createStream(SCOPE, "test", configuration, System.currentTimeMillis(), null, executor);
CreateStreamResponse.CreateStatus s = streamResponse.get().getStatus();
assertEquals(CreateStreamResponse.CreateStatus.EXISTS_ACTIVE, streamResponse.get().getStatus());
}
Aggregations