use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.
the class StreamMetadataTasksTest method updateStreamTest.
@Test(timeout = 30000)
public void updateStreamTest() throws Exception {
assertNotEquals(0, consumer.getCurrentSegments(SCOPE, stream1, 0L).get().size());
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
StreamConfiguration streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(5)).build();
StreamConfigurationRecord configProp = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join().getObject();
assertFalse(configProp.isUpdating());
// 1. happy day test
// update.. should succeed
CompletableFuture<UpdateStreamStatus.Status> updateOperationFuture = streamMetadataTasks.updateStream(SCOPE, stream1, streamConfiguration, 0L);
assertTrue(Futures.await(processEvent(requestEventWriter)));
assertEquals(UpdateStreamStatus.Status.SUCCESS, updateOperationFuture.join());
configProp = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join().getObject();
assertTrue(configProp.getStreamConfiguration().equals(streamConfiguration));
streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(6)).build();
// 2. change state to scaling
streamStorePartialMock.setState(SCOPE, stream1, State.SCALING, null, executor).get();
// call update should fail without posting the event
streamMetadataTasks.updateStream(SCOPE, stream1, streamConfiguration, 0L);
AtomicBoolean loop = new AtomicBoolean(false);
Futures.loop(() -> !loop.get(), () -> streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).thenApply(x -> x.getObject().isUpdating()).thenAccept(loop::set), executor).join();
// event posted, first step performed. now pick the event for processing
UpdateStreamTask updateStreamTask = new UpdateStreamTask(streamMetadataTasks, streamStorePartialMock, bucketStore, executor);
UpdateStreamEvent taken = (UpdateStreamEvent) requestEventWriter.eventQueue.take();
AssertExtensions.assertFutureThrows("", updateStreamTask.execute(taken), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
// now with state = active, process the same event. it should succeed now.
assertTrue(Futures.await(updateStreamTask.execute(taken)));
// 3. multiple back to back updates.
StreamConfiguration streamConfiguration1 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 1, 2)).build();
CompletableFuture<UpdateStreamStatus.Status> updateOperationFuture1 = streamMetadataTasks.updateStream(SCOPE, stream1, streamConfiguration1, 0L);
// ensure that previous updatestream has posted the event and set status to updating,
// only then call second updateStream
AtomicBoolean loop2 = new AtomicBoolean(false);
Futures.loop(() -> !loop2.get(), () -> streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).thenApply(x -> x.getObject().isUpdating()).thenAccept(loop2::set), executor).join();
configProp = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join().getObject();
assertTrue(configProp.getStreamConfiguration().equals(streamConfiguration1) && configProp.isUpdating());
StreamConfiguration streamConfiguration2 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(7)).build();
// post the second update request. This should fail here itself as previous one has started.
CompletableFuture<UpdateStreamStatus.Status> updateOperationFuture2 = streamMetadataTasks.updateStream(SCOPE, stream1, streamConfiguration2, 0L);
assertEquals(UpdateStreamStatus.Status.FAILURE, updateOperationFuture2.join());
// process event
assertTrue(Futures.await(processEvent(requestEventWriter)));
// verify that first request for update also completes with success.
assertEquals(UpdateStreamStatus.Status.SUCCESS, updateOperationFuture1.join());
configProp = streamStorePartialMock.getConfigurationRecord(SCOPE, stream1, null, executor).join().getObject();
assertTrue(configProp.getStreamConfiguration().equals(streamConfiguration1) && !configProp.isUpdating());
streamStorePartialMock.setState(SCOPE, stream1, State.UPDATING, null, executor).join();
UpdateStreamEvent event = new UpdateStreamEvent(SCOPE, stream1, System.nanoTime());
assertTrue(Futures.await(updateStreamTask.execute(event)));
// execute the event again. It should complete without doing anything.
updateStreamTask.execute(event).join();
assertEquals(State.ACTIVE, streamStorePartialMock.getState(SCOPE, stream1, true, null, executor).join());
}
use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.
the class StreamMetadataStoreTest method concurrentStartScaleTest.
@Test(timeout = 30000)
public void concurrentStartScaleTest() throws Exception {
final String scope = "ScopeScale";
final String stream = "StreamScale1";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
long start = System.currentTimeMillis();
store.createScope(scope, null, executor).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
// set minimum number of segments to 1
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
store.startUpdateConfiguration(scope, stream, config, null, executor).join();
VersionedMetadata<StreamConfigurationRecord> configRecord = store.getConfigurationRecord(scope, stream, null, executor).join();
store.completeUpdateConfiguration(scope, stream, configRecord, null, executor).join();
// region concurrent start scale
// Test scenario where one request starts and completes as the other is waiting on StartScale.createEpochTransition
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.0, 1.0);
List<Long> segmentsToSeal = Arrays.asList(0L, 1L);
long scaleTs = System.currentTimeMillis();
@SuppressWarnings("unchecked") PersistentStreamBase streamObj = (PersistentStreamBase) ((AbstractStreamMetadataStore) store).getStream(scope, stream, null);
PersistentStreamBase streamObjSpied = spy(streamObj);
CompletableFuture<Void> latch = new CompletableFuture<>();
CompletableFuture<Void> updateEpochTransitionCalled = new CompletableFuture<>();
doAnswer(x -> streamObj.getEpochTransitionNode(x.getArgument(0))).when(streamObjSpied).getEpochTransitionNode(any());
doAnswer(x -> streamObj.updateEpochTransitionNode(x.getArgument(0), x.getArgument(1))).when(streamObjSpied).updateEpochTransitionNode(any(), any());
doAnswer(x -> CompletableFuture.runAsync(() -> {
VersionedMetadata<EpochTransitionRecord> argument = x.getArgument(0);
EpochTransitionRecord record = argument.getObject();
if (record.getSegmentsToSeal().containsAll(segmentsToSeal)) {
// wait until we create epoch transition outside of this method
updateEpochTransitionCalled.complete(null);
latch.join();
}
}).thenCompose(v -> streamObj.updateEpochTransitionNode(x.getArgument(0), x.getArgument(1)))).when(streamObjSpied).updateEpochTransitionNode(any(), any());
StreamOperationContext context = new StreamOperationContext(((AbstractStreamMetadataStore) store).getScope(scope, null), streamObjSpied, 0L);
// the following should be stuck at createEpochTransition
CompletableFuture<VersionedMetadata<EpochTransitionRecord>> response = store.submitScale(scope, stream, segmentsToSeal, Collections.singletonList(segment2), scaleTs, null, context, executor);
updateEpochTransitionCalled.join();
// create new epochs corresponding to new scale as the previous scale waits to create epoch transition record
SimpleEntry<Double, Double> segment2p = new SimpleEntry<>(0.0, 0.5);
List<Long> segmentsToSeal2 = Collections.singletonList(0L);
long scaleTs2 = System.currentTimeMillis();
streamObjSpied.getEpochRecord(0, context).thenCompose(epochRecord -> {
EpochTransitionRecord record = RecordHelper.computeEpochTransition(epochRecord, segmentsToSeal2, Collections.singletonList(segment2p), scaleTs2);
return streamObjSpied.getEpochTransition(context).thenCompose(existing -> streamObjSpied.updateEpochTransitionNode(new VersionedMetadata<>(record, existing.getVersion()), context)).thenApply(v -> new VersionedMetadata<>(record, v));
}).thenCompose(epochRecord -> store.getVersionedState(scope, stream, context, executor).thenCompose(state -> store.updateVersionedState(scope, stream, State.SCALING, state, context, executor).thenCompose(updatedState -> store.startScale(scope, stream, false, epochRecord, updatedState, context, executor)).thenCompose(x -> store.scaleCreateNewEpochs(scope, stream, epochRecord, context, executor)).thenCompose(x -> store.scaleSegmentsSealed(scope, stream, segmentsToSeal2.stream().collect(Collectors.toMap(r -> r, r -> 0L)), epochRecord, context, executor)).thenCompose(x -> store.completeScale(scope, stream, epochRecord, context, executor)))).thenCompose(y -> store.setState(scope, stream, State.ACTIVE, context, executor)).join();
latch.complete(null);
// first scale should fail in attempting to update epoch transition record.
AssertExtensions.assertSuppliedFutureThrows("WriteConflict in start scale", () -> response, e -> Exceptions.unwrap(e) instanceof StoreException.WriteConflictException);
VersionedMetadata<EpochTransitionRecord> versioned = streamObj.getEpochTransition(context).join();
EpochTransitionRecord epochTransitionRecord = versioned.getObject();
assertEquals(EpochTransitionRecord.EMPTY, epochTransitionRecord);
// now that start scale succeeded, we should set the state to scaling.
VersionedMetadata<State> state = store.getVersionedState(scope, stream, context, executor).join();
state = store.updateVersionedState(scope, stream, State.SCALING, state, context, executor).join();
// now call first step of scaling -- createNewSegments. this should throw exception
AssertExtensions.assertFutureThrows("epoch transition was supposed to be invalid", store.startScale(scope, stream, false, versioned, state, context, executor), e -> Exceptions.unwrap(e) instanceof IllegalStateException);
// verify that state is reset to ACTIVE
assertEquals(State.ACTIVE, store.getState(scope, stream, true, context, executor).join());
// endregion
}
use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.
the class StreamMetadataStoreTest method updateTest.
@Test(timeout = 30000)
public void updateTest() throws Exception {
final String scope = "ScopeUpdate";
final String stream = "StreamUpdate";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
long start = System.currentTimeMillis();
store.createScope(scope, null, executor).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
final StreamConfiguration configuration2 = StreamConfiguration.builder().scalingPolicy(policy).build();
StreamConfigurationRecord configProperty = store.getConfigurationRecord(scope, stream, null, executor).join().getObject();
assertFalse(configProperty.isUpdating());
// run update configuration multiple times
assertTrue(Futures.await(store.startUpdateConfiguration(scope, stream, configuration2, null, executor)));
store.setState(scope, stream, State.UPDATING, null, executor).join();
configProperty = store.getConfigurationRecord(scope, stream, null, executor).join().getObject();
assertTrue(configProperty.isUpdating());
final StreamConfiguration configuration3 = StreamConfiguration.builder().scalingPolicy(policy).build();
assertFalse(Futures.await(store.startUpdateConfiguration(scope, stream, configuration3, null, executor)));
VersionedMetadata<StreamConfigurationRecord> existing = store.getConfigurationRecord(scope, stream, null, executor).join();
assertTrue(Futures.await(store.completeUpdateConfiguration(scope, stream, existing, null, executor)));
configProperty = store.getConfigurationRecord(scope, stream, null, executor).join().getObject();
assertEquals(configuration2, configProperty.getStreamConfiguration());
assertTrue(Futures.await(store.startUpdateConfiguration(scope, stream, configuration3, null, executor)));
existing = store.getConfigurationRecord(scope, stream, null, executor).join();
assertTrue(Futures.await(store.completeUpdateConfiguration(scope, stream, existing, null, executor)));
store.setState(scope, stream, State.ACTIVE, null, executor).join();
}
use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.
the class ZKStreamMetadataStoreTest method testScaleMetadata.
@Test
public void testScaleMetadata() throws Exception {
String scope = "testScopeScale";
String stream = "testStreamScale";
ScalingPolicy policy = ScalingPolicy.fixed(3);
StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
SimpleEntry<Double, Double> segment1 = new SimpleEntry<>(0.0, 0.5);
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.5, 1.0);
List<Map.Entry<Double, Double>> newRanges = Arrays.asList(segment1, segment2);
store.createScope(scope, null, executor).get();
store.createStream(scope, stream, configuration, System.currentTimeMillis(), null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
// set minimum number of segments to 1 so that we can also test scale downs
configuration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
store.startUpdateConfiguration(scope, stream, configuration, null, executor).join();
VersionedMetadata<StreamConfigurationRecord> configRecord = store.getConfigurationRecord(scope, stream, null, executor).join();
store.completeUpdateConfiguration(scope, stream, configRecord, null, executor).join();
List<ScaleMetadata> scaleIncidents = store.getScaleMetadata(scope, stream, 0, Long.MAX_VALUE, null, executor).get();
assertTrue(scaleIncidents.size() == 1);
assertTrue(scaleIncidents.get(0).getSegments().size() == 3);
// scale
scale(scope, stream, scaleIncidents.get(0).getSegments(), newRanges);
scaleIncidents = store.getScaleMetadata(scope, stream, 0, Long.MAX_VALUE, null, executor).get();
assertTrue(scaleIncidents.size() == 2);
assertTrue(scaleIncidents.get(0).getSegments().size() == 3);
assertTrue(scaleIncidents.get(1).getSegments().size() == 2);
// scale again
scale(scope, stream, scaleIncidents.get(1).getSegments(), newRanges);
scaleIncidents = store.getScaleMetadata(scope, stream, 0, Long.MAX_VALUE, null, executor).get();
assertTrue(scaleIncidents.size() == 3);
assertTrue(scaleIncidents.get(0).getSegments().size() == 3);
assertTrue(scaleIncidents.get(1).getSegments().size() == 2);
assertTrue(scaleIncidents.get(2).getSegments().size() == 2);
// scale again
scale(scope, stream, scaleIncidents.get(2).getSegments(), newRanges);
scaleIncidents = store.getScaleMetadata(scope, stream, 0, Long.MAX_VALUE, null, executor).get();
assertTrue(scaleIncidents.size() == 4);
assertTrue(scaleIncidents.get(0).getSegments().size() == 3);
assertTrue(scaleIncidents.get(1).getSegments().size() == 2);
assertTrue(scaleIncidents.get(2).getSegments().size() == 2);
assertTrue(scaleIncidents.get(3).getSegments().size() == 2);
}
use of io.pravega.controller.store.stream.records.StreamConfigurationRecord in project pravega by pravega.
the class ZkStreamTest method testZkStream.
@Test(timeout = 30000)
public void testZkStream() throws Exception {
double keyChunk = 1.0 / 5;
final ScalingPolicy policy = ScalingPolicy.fixed(5);
@Cleanup final StreamMetadataStore store = new ZKStreamMetadataStore(cli, executor);
final String streamName = "test";
store.createScope(SCOPE, null, executor).get();
StreamConfiguration streamConfig = StreamConfiguration.builder().scalingPolicy(policy).build();
store.createStream(SCOPE, streamName, streamConfig, System.currentTimeMillis(), null, executor).get();
store.setState(SCOPE, streamName, State.ACTIVE, null, executor).get();
OperationContext context = store.createStreamContext(SCOPE, streamName, 0L);
// set minimum number of segments to 1 so that we can also test scale downs
streamConfig = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
store.startUpdateConfiguration(SCOPE, streamName, streamConfig, null, executor).join();
VersionedMetadata<StreamConfigurationRecord> configRecord = store.getConfigurationRecord(SCOPE, streamName, null, executor).join();
store.completeUpdateConfiguration(SCOPE, streamName, configRecord, null, executor).join();
List<StreamSegmentRecord> segments = store.getActiveSegments(SCOPE, streamName, context, executor).get();
assertEquals(segments.size(), 5);
assertTrue(segments.stream().allMatch(x -> Lists.newArrayList(0L, 1L, 2L, 3L, 4L).contains(x.segmentId())));
long start = segments.get(0).getCreationTime();
assertEquals(store.getConfiguration(SCOPE, streamName, context, executor).get(), streamConfig);
List<Map.Entry<Double, Double>> newRanges;
// existing range 0 = 0 - .2, 1 = .2 - .4, 2 = .4 - .6, 3 = .6 - .8, 4 = .8 - 1.0
// 3, 4 -> 5 = .6 - 1.0
newRanges = Collections.singletonList(new AbstractMap.SimpleEntry<>(3 * keyChunk, 1.0));
long scale1 = start + 10000;
ArrayList<Long> sealedSegments = Lists.newArrayList(3L, 4L);
long five = computeSegmentId(5, 1);
VersionedMetadata<EpochTransitionRecord> versioned = store.submitScale(SCOPE, streamName, sealedSegments, newRanges, scale1, null, context, executor).get();
VersionedMetadata<State> state = store.getVersionedState(SCOPE, streamName, null, executor).join();
state = store.updateVersionedState(SCOPE, streamName, State.SCALING, state, null, executor).join();
versioned = store.startScale(SCOPE, streamName, false, versioned, state, null, executor).join();
store.scaleCreateNewEpochs(SCOPE, streamName, versioned, context, executor).get();
store.scaleSegmentsSealed(SCOPE, streamName, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, context, executor).get();
store.completeScale(SCOPE, streamName, versioned, null, executor).join();
store.setState(SCOPE, streamName, State.ACTIVE, null, executor).join();
segments = store.getActiveSegments(SCOPE, streamName, context, executor).get();
assertEquals(segments.size(), 4);
assertTrue(segments.stream().allMatch(x -> Lists.newArrayList(0L, 1L, 2L, five).contains(x.segmentId())));
// 1 -> 6 = 0.2 -.3, 7 = .3 - .4
// 2,5 -> 8 = .4 - 1.0
newRanges = Arrays.asList(new AbstractMap.SimpleEntry<>(keyChunk, 0.3), new AbstractMap.SimpleEntry<>(0.3, 2 * keyChunk), new AbstractMap.SimpleEntry<>(2 * keyChunk, 1.0));
long scale2 = scale1 + 10000;
ArrayList<Long> sealedSegments1 = Lists.newArrayList(1L, 2L, five);
long six = computeSegmentId(6, 2);
long seven = computeSegmentId(7, 2);
long eight = computeSegmentId(8, 2);
versioned = store.submitScale(SCOPE, streamName, sealedSegments1, newRanges, scale2, null, context, executor).get();
EpochTransitionRecord response = versioned.getObject();
state = store.getVersionedState(SCOPE, streamName, null, executor).join();
state = store.updateVersionedState(SCOPE, streamName, State.SCALING, state, null, executor).join();
versioned = store.startScale(SCOPE, streamName, false, versioned, state, null, executor).join();
store.scaleCreateNewEpochs(SCOPE, streamName, versioned, context, executor).get();
store.scaleSegmentsSealed(SCOPE, streamName, sealedSegments1.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, context, executor).get();
store.completeScale(SCOPE, streamName, versioned, null, executor).join();
store.setState(SCOPE, streamName, State.ACTIVE, null, executor).join();
segments = store.getActiveSegments(SCOPE, streamName, context, executor).get();
assertEquals(segments.size(), 4);
assertTrue(segments.stream().allMatch(x -> Lists.newArrayList(0L, six, seven, eight).contains(x.segmentId())));
// 7 -> 9 = .3 - .35, 10 = .35 - .6
// 8 -> 10 = .35 - .6, 11 = .6 - 1.0
newRanges = Arrays.asList(new AbstractMap.SimpleEntry<>(0.3, 0.35), new AbstractMap.SimpleEntry<>(0.35, 3 * keyChunk), new AbstractMap.SimpleEntry<>(3 * keyChunk, 1.0));
long scale3 = scale2 + 10000;
long nine = computeSegmentId(9, 3);
long ten = computeSegmentId(10, 3);
long eleven = computeSegmentId(11, 3);
ArrayList<Long> sealedSegments2 = Lists.newArrayList(seven, eight);
versioned = store.submitScale(SCOPE, streamName, sealedSegments2, newRanges, scale3, null, context, executor).get();
response = versioned.getObject();
state = store.getVersionedState(SCOPE, streamName, null, executor).join();
state = store.updateVersionedState(SCOPE, streamName, State.SCALING, state, null, executor).join();
store.startScale(SCOPE, streamName, false, versioned, state, null, executor).join();
store.scaleCreateNewEpochs(SCOPE, streamName, versioned, context, executor).get();
store.scaleSegmentsSealed(SCOPE, streamName, sealedSegments2.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, context, executor).get();
store.completeScale(SCOPE, streamName, versioned, null, executor).join();
store.setState(SCOPE, streamName, State.ACTIVE, null, executor).join();
segments = store.getActiveSegments(SCOPE, streamName, context, executor).get();
assertEquals(segments.size(), 5);
assertTrue(segments.stream().allMatch(x -> Lists.newArrayList(0L, six, nine, ten, eleven).contains(x.segmentId())));
Map<Long, List<Long>> successors = store.getSuccessors(SCOPE, streamName, 0L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.isEmpty());
successors = store.getSuccessors(SCOPE, streamName, 1L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.size() == 2 && successors.containsKey(six) && successors.get(six).containsAll(Collections.singleton(1L)) && successors.containsKey(seven) && successors.get(seven).containsAll(Collections.singleton(1L)));
successors = store.getSuccessors(SCOPE, streamName, 2L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.size() == 1 && successors.containsKey(eight) && successors.get(eight).containsAll(Lists.newArrayList(2L, five)));
successors = store.getSuccessors(SCOPE, streamName, 3L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.size() == 1 && successors.containsKey(five) && successors.get(five).containsAll(Lists.newArrayList(3L, 4L)));
successors = store.getSuccessors(SCOPE, streamName, 4L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.size() == 1 && successors.containsKey(five) && successors.get(five).containsAll(Lists.newArrayList(3L, 4L)));
successors = store.getSuccessors(SCOPE, streamName, five, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.size() == 1 && successors.containsKey(eight) && successors.get(eight).containsAll(Lists.newArrayList(2L, five)));
successors = store.getSuccessors(SCOPE, streamName, six, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.isEmpty());
successors = store.getSuccessors(SCOPE, streamName, seven, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.size() == 2 && successors.containsKey(nine) && successors.get(nine).containsAll(Collections.singleton(seven)) && successors.containsKey(ten) && successors.get(ten).containsAll(Lists.newArrayList(seven, eight)));
successors = store.getSuccessors(SCOPE, streamName, eight, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.size() == 2 && successors.containsKey(eleven) && successors.get(eleven).containsAll(Collections.singleton(eight)) && successors.containsKey(ten) && successors.get(ten).containsAll(Lists.newArrayList(seven, eight)));
successors = store.getSuccessors(SCOPE, streamName, nine, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.isEmpty());
successors = store.getSuccessors(SCOPE, streamName, ten, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.isEmpty());
successors = store.getSuccessors(SCOPE, streamName, eleven, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertTrue(successors.isEmpty());
// start -1
Map<Long, Long> historicalSegments = store.getSegmentsAtHead(SCOPE, streamName, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
assertEquals(historicalSegments.size(), 5);
assertTrue(historicalSegments.keySet().containsAll(Lists.newArrayList(0L, 1L, 2L, 3L, 4L)));
// start + 1
List<Long> segmentsInEpoch = store.getSegmentsInEpoch(SCOPE, streamName, 0, context, executor).get().stream().map(x -> x.segmentId()).collect(Collectors.toList());
assertEquals(segmentsInEpoch.size(), 5);
assertTrue(segmentsInEpoch.containsAll(Lists.newArrayList(0L, 1L, 2L, 3L, 4L)));
// scale1
segmentsInEpoch = store.getSegmentsInEpoch(SCOPE, streamName, 1, context, executor).get().stream().map(x -> x.segmentId()).collect(Collectors.toList());
assertEquals(segmentsInEpoch.size(), 4);
assertTrue(segmentsInEpoch.containsAll(Lists.newArrayList(0L, 1L, 2L, five)));
// scale2
segmentsInEpoch = store.getSegmentsInEpoch(SCOPE, streamName, 2, context, executor).get().stream().map(x -> x.segmentId()).collect(Collectors.toList());
assertEquals(segmentsInEpoch.size(), 4);
assertTrue(segmentsInEpoch.containsAll(Lists.newArrayList(0L, six, seven, eight)));
// scale3
segmentsInEpoch = store.getSegmentsInEpoch(SCOPE, streamName, 3, context, executor).get().stream().map(x -> x.segmentId()).collect(Collectors.toList());
assertEquals(segmentsInEpoch.size(), 5);
assertTrue(segmentsInEpoch.containsAll(Lists.newArrayList(0L, six, nine, ten, eleven)));
assertFalse(store.isSealed(SCOPE, streamName, context, executor).get());
assertNotEquals(0, store.getActiveSegments(SCOPE, streamName, context, executor).get().size());
store.setSealed(SCOPE, streamName, context, executor).get();
assertTrue(store.isSealed(SCOPE, streamName, context, executor).get());
assertEquals(0, store.getActiveSegments(SCOPE, streamName, context, executor).get().size());
// seal an already sealed stream.
store.setSealed(SCOPE, streamName, context, executor).get();
assertTrue(store.isSealed(SCOPE, streamName, context, executor).get());
assertEquals(0, store.getActiveSegments(SCOPE, streamName, context, executor).get().size());
// seal a non existing stream.
AssertExtensions.assertFutureThrows("", store.setSealed(SCOPE, "nonExistentStream", null, executor), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
store.markCold(SCOPE, streamName, 0L, System.currentTimeMillis() + 1000, null, executor).get();
assertTrue(store.isCold(SCOPE, streamName, 0L, null, executor).get());
Thread.sleep(1000);
assertFalse(store.isCold(SCOPE, streamName, 0L, null, executor).get());
store.markCold(SCOPE, streamName, 0L, System.currentTimeMillis() + 1000, null, executor).get();
store.removeMarker(SCOPE, streamName, 0L, null, executor).get();
assertFalse(store.isCold(SCOPE, streamName, 0L, null, executor).get());
}
Aggregations