use of io.pravega.controller.store.stream.records.StreamTruncationRecord in project pravega by pravega.
the class ControllerMetadataJsonSerializerTest method testStreamTruncationRecord.
@Test
public void testStreamTruncationRecord() {
Map<StreamSegmentRecord, Integer> span = new HashMap<>();
span.put(StreamSegmentRecord.newSegmentRecord(0, 0, 0L, 0.0, 1.0), 0);
span.put(StreamSegmentRecord.newSegmentRecord(1, 0, 0L, 0.0, 1.0), 0);
Map<Long, Long> streamCut = new HashMap<>();
streamCut.put(0L, 0L);
Set<Long> set = new HashSet<>();
set.add(0L);
StreamTruncationRecord record = new StreamTruncationRecord(ImmutableMap.copyOf(streamCut), ImmutableMap.copyOf(span), ImmutableSet.copyOf(set), ImmutableSet.copyOf(set), 0L, true);
testRecordSerialization(record, StreamTruncationRecord.class);
}
use of io.pravega.controller.store.stream.records.StreamTruncationRecord in project pravega by pravega.
the class PersistentStreamBase method computeTruncationRecord.
private CompletableFuture<StreamTruncationRecord> computeTruncationRecord(StreamTruncationRecord previous, Map<Long, Long> streamCut, ImmutableMap<StreamSegmentRecord, Integer> span, OperationContext context) {
log.debug(context.getRequestId(), "computing truncation for stream {}/{}", scope, name);
// find segments between "previous" stream cut and current stream cut. these are segments to delete.
// Note: exclude segments in current streamcut
CompletableFuture<Map<StreamSegmentRecord, Integer>> previousSpanFuture = previous.getSpan().isEmpty() ? getEpochRecord(0, context).thenApply(epoch -> convertToSpan(epoch)) : CompletableFuture.completedFuture(previous.getSpan());
return previousSpanFuture.thenCompose(spanFrom -> segmentsBetweenStreamCutSpans(spanFrom, span, context)).thenCompose(segmentsBetween -> sizeBetweenStreamCuts(previous.getStreamCut(), streamCut, segmentsBetween, context).thenApply(sizeBetween -> {
ImmutableSet.Builder<Long> builder = ImmutableSet.builder();
segmentsBetween.stream().map(StreamSegmentRecord::segmentId).filter(x -> !streamCut.containsKey(x)).forEach(builder::add);
return new StreamTruncationRecord(ImmutableMap.copyOf(streamCut), span, previous.getDeletedSegments(), builder.build(), previous.getSizeTill() + sizeBetween, true);
}));
}
use of io.pravega.controller.store.stream.records.StreamTruncationRecord in project pravega by pravega.
the class PersistentStreamBase method startTruncation.
@Override
public CompletableFuture<Void> startTruncation(final Map<Long, Long> streamCut, OperationContext context) {
Preconditions.checkNotNull(context, "operation context cannot be null");
return getTruncationRecord(context).thenCompose(existing -> {
Preconditions.checkNotNull(existing);
Preconditions.checkArgument(!existing.getObject().isUpdating());
long mostRecent = getMostRecent(streamCut);
long oldest = getOldest(streamCut);
int epochLow = NameUtils.getEpoch(oldest);
int epochHigh = NameUtils.getEpoch(mostRecent);
return fetchEpochs(epochLow, epochHigh, true, context).thenCompose(epochs -> {
boolean isValid = isStreamCutValidInternal(streamCut, epochLow, epochs);
Exceptions.checkArgument(isValid, "streamCut", "invalid stream cut");
ImmutableMap<StreamSegmentRecord, Integer> span = computeStreamCutSpanInternal(streamCut, epochLow, epochHigh, epochs);
StreamTruncationRecord previous = existing.getObject();
// check greater than
Exceptions.checkArgument(streamCutEqualOrAfter(streamCut, span, previous.getStreamCut(), previous.getSpan()), "StreamCut", "Supplied streamcut is behind previous truncation point");
return computeTruncationRecord(previous, streamCut, span, context).thenCompose(prop -> Futures.toVoid(setTruncationData(new VersionedMetadata<>(prop, existing.getVersion()), context)));
});
});
}
use of io.pravega.controller.store.stream.records.StreamTruncationRecord in project pravega by pravega.
the class RequestHandlersTest method truncateSealedStream.
@SuppressWarnings("unchecked")
@Test(timeout = 300000)
public void truncateSealedStream() throws Exception {
String stream = "truncateSealed";
StreamMetadataStore streamStore = getStore();
StreamMetadataStore streamStoreSpied = spy(getStore());
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 1)).build();
streamStore.createStream(scope, stream, config, System.currentTimeMillis(), null, executor).join();
streamStore.setState(scope, stream, State.ACTIVE, null, executor).join();
streamStore.setState(scope, stream, State.SEALED, null, executor).join();
TruncateStreamTask requestHandler = new TruncateStreamTask(streamMetadataTasks, streamStoreSpied, executor);
CompletableFuture<Void> wait = new CompletableFuture<>();
CompletableFuture<Void> signal = new CompletableFuture<>();
Map<Long, Long> map = new HashMap<>();
map.put(0L, 100L);
streamStore.startTruncation(scope, stream, map, null, executor).join();
TruncateStreamEvent event = new TruncateStreamEvent(scope, stream, System.currentTimeMillis());
doAnswer(x -> {
signal.complete(null);
wait.join();
return streamStore.completeTruncation(x.getArgument(0), x.getArgument(1), x.getArgument(2), x.getArgument(3), x.getArgument(4));
}).when(streamStoreSpied).completeTruncation(anyString(), anyString(), any(), any(), any());
CompletableFuture<Void> future = CompletableFuture.completedFuture(null).thenComposeAsync(v -> requestHandler.execute(event), executor);
signal.join();
wait.complete(null);
AssertExtensions.assertSuppliedFutureThrows("Updating sealed stream job should fail", () -> future, e -> Exceptions.unwrap(e) instanceof UnsupportedOperationException);
// validate
VersionedMetadata<StreamTruncationRecord> versioned = streamStore.getTruncationRecord(scope, stream, null, executor).join();
assertFalse(versioned.getObject().isUpdating());
assertEquals(2, getVersionNumber(versioned.getVersion()));
assertEquals(State.SEALED, streamStore.getState(scope, stream, true, null, executor).join());
streamStore.close();
}
use of io.pravega.controller.store.stream.records.StreamTruncationRecord in project pravega by pravega.
the class StreamMetadataTasksTest method truncateStreamTest.
@Test(timeout = 30000)
public void truncateStreamTest() throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
streamStorePartialMock.createStream(SCOPE, "test", configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, "test", State.ACTIVE, null, executor).get();
assertNotEquals(0, consumer.getCurrentSegments(SCOPE, "test", 0L).get().size());
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
List<Map.Entry<Double, Double>> newRanges = new ArrayList<>();
newRanges.add(new AbstractMap.SimpleEntry<>(0.5, 0.75));
newRanges.add(new AbstractMap.SimpleEntry<>(0.75, 1.0));
ScaleResponse scaleOpResult = streamMetadataTasks.manualScale(SCOPE, "test", Collections.singletonList(1L), newRanges, 30, 0L).get();
assertTrue(scaleOpResult.getStatus().equals(ScaleStreamStatus.STARTED));
ScaleOperationTask scaleTask = new ScaleOperationTask(streamMetadataTasks, streamStorePartialMock, executor);
assertTrue(Futures.await(scaleTask.execute((ScaleOpEvent) requestEventWriter.eventQueue.take())));
// start truncation
StreamTruncationRecord truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).join().getObject();
assertFalse(truncProp.isUpdating());
// 1. happy day test
// update.. should succeed
Map<Long, Long> streamCut = new HashMap<>();
streamCut.put(0L, 1L);
streamCut.put(1L, 11L);
CompletableFuture<UpdateStreamStatus.Status> truncateFuture = streamMetadataTasks.truncateStream(SCOPE, "test", streamCut, 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
assertEquals(UpdateStreamStatus.Status.SUCCESS, truncateFuture.join());
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).join().getObject();
assertTrue(truncProp.getStreamCut().equals(streamCut));
assertTrue(truncProp.getStreamCut().equals(streamCut));
// 2. change state to scaling
streamStorePartialMock.setState(SCOPE, "test", State.SCALING, null, executor).get();
// call update should fail without posting the event
long two = NameUtils.computeSegmentId(2, 1);
long three = NameUtils.computeSegmentId(3, 1);
Map<Long, Long> streamCut2 = new HashMap<>();
streamCut2.put(0L, 1L);
streamCut2.put(two, 1L);
streamCut2.put(three, 1L);
streamMetadataTasks.truncateStream(SCOPE, "test", streamCut2, 0L);
AtomicBoolean loop = new AtomicBoolean(false);
Futures.loop(() -> !loop.get(), () -> Futures.delayedFuture(() -> streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor), 1000, executor).thenApply(x -> x.getObject().isUpdating()).thenAccept(loop::set), executor).join();
// event posted, first step performed. now pick the event for processing
TruncateStreamTask truncateStreamTask = new TruncateStreamTask(streamMetadataTasks, streamStorePartialMock, executor);
TruncateStreamEvent taken = (TruncateStreamEvent) requestEventWriter.eventQueue.take();
AssertExtensions.assertFutureThrows("", truncateStreamTask.execute(taken), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStorePartialMock.setState(SCOPE, "test", State.ACTIVE, null, executor).get();
// now with state = active, process the same event. it should succeed now.
assertTrue(Futures.await(truncateStreamTask.execute(taken)));
// 3. multiple back to back updates.
Map<Long, Long> streamCut3 = new HashMap<>();
streamCut3.put(0L, 12L);
streamCut3.put(two, 12L);
streamCut3.put(three, 12L);
CompletableFuture<UpdateStreamStatus.Status> truncateOp1 = streamMetadataTasks.truncateStream(SCOPE, "test", streamCut3, 0L);
// ensure that previous updatestream has posted the event and set status to updating,
// only then call second updateStream
AtomicBoolean loop2 = new AtomicBoolean(false);
Futures.loop(() -> !loop2.get(), () -> streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).thenApply(x -> x.getObject().isUpdating()).thenAccept(loop2::set), executor).join();
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).join().getObject();
assertTrue(truncProp.getStreamCut().equals(streamCut3) && truncProp.isUpdating());
// post the second update request. This should fail here itself as previous one has started.
Map<Long, Long> streamCut4 = new HashMap<>();
streamCut4.put(0L, 14L);
streamCut4.put(two, 14L);
streamCut4.put(three, 14L);
CompletableFuture<UpdateStreamStatus.Status> truncateOpFuture2 = streamMetadataTasks.truncateStream(SCOPE, "test", streamCut4, 0L);
assertEquals(UpdateStreamStatus.Status.FAILURE, truncateOpFuture2.join());
// process event
assertTrue(Futures.await(processEvent(requestEventWriter)));
// verify that first request for update also completes with success.
assertEquals(UpdateStreamStatus.Status.SUCCESS, truncateOp1.join());
truncProp = streamStorePartialMock.getTruncationRecord(SCOPE, "test", null, executor).join().getObject();
assertTrue(truncProp.getStreamCut().equals(streamCut3) && !truncProp.isUpdating());
streamStorePartialMock.setState(SCOPE, "test", State.TRUNCATING, null, executor).join();
TruncateStreamEvent event = new TruncateStreamEvent(SCOPE, "test", System.nanoTime());
assertTrue(Futures.await(truncateStreamTask.execute(event)));
// execute the event again. It should complete without doing anything.
truncateStreamTask.execute(event).join();
assertEquals(State.ACTIVE, streamStorePartialMock.getState(SCOPE, "test", true, null, executor).join());
doReturn(CompletableFuture.completedFuture(true)).when(streamStorePartialMock).isScopeSealed(anyString(), any(), any());
CompletableFuture<CreateStreamResponse> streamResponse = streamStorePartialMock.createStream(SCOPE, "test", configuration, System.currentTimeMillis(), null, executor);
CreateStreamResponse.CreateStatus s = streamResponse.get().getStatus();
assertEquals(CreateStreamResponse.CreateStatus.EXISTS_ACTIVE, streamResponse.get().getStatus());
}
Aggregations