use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class StreamMetadataStoreTest method sizeTest.
@Test(timeout = 30000)
public void sizeTest() throws Exception {
final String scope = "ScopeSize";
final String stream = "StreamSize";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final RetentionPolicy retentionPolicy = RetentionPolicy.builder().retentionType(RetentionPolicy.RetentionType.SIZE).retentionParam(100L).build();
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).retentionPolicy(retentionPolicy).build();
long start = System.currentTimeMillis();
store.createScope(scope, null, executor).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
bucketStore.addStreamToBucketStore(BucketStore.ServiceType.RetentionService, scope, stream, executor).get();
Set<String> streams = bucketStore.getStreamsForBucket(BucketStore.ServiceType.RetentionService, 0, executor).get();
assertTrue(streams.contains(String.format("%s/%s", scope, stream)));
// region Size Computation on stream cuts on epoch 0
Map<Long, Long> map1 = new HashMap<>();
map1.put(0L, 10L);
map1.put(1L, 10L);
Long size = store.getSizeTillStreamCut(scope, stream, map1, Optional.empty(), null, executor).join();
assertEquals(20L, (long) size);
long recordingTime = System.currentTimeMillis();
StreamCutRecord streamCut1 = new StreamCutRecord(recordingTime, size, ImmutableMap.copyOf(map1));
store.addStreamCutToRetentionSet(scope, stream, streamCut1, null, executor).get();
Map<Long, Long> map2 = new HashMap<>();
map2.put(0L, 20L);
map2.put(1L, 20L);
size = store.getSizeTillStreamCut(scope, stream, map2, Optional.empty(), null, executor).join();
assertEquals(40L, (long) size);
StreamCutRecord streamCut2 = new StreamCutRecord(recordingTime + 10, size, ImmutableMap.copyOf(map2));
store.addStreamCutToRetentionSet(scope, stream, streamCut2, null, executor).get();
Map<Long, Long> map3 = new HashMap<>();
map3.put(0L, 30L);
map3.put(1L, 30L);
size = store.getSizeTillStreamCut(scope, stream, map3, Optional.empty(), null, executor).join();
assertEquals(60L, (long) size);
StreamCutRecord streamCut3 = new StreamCutRecord(recordingTime + 20, 60L, ImmutableMap.copyOf(map3));
store.addStreamCutToRetentionSet(scope, stream, streamCut3, null, executor).get();
// endregion
// region Size Computation on multiple epochs
long scaleTs = System.currentTimeMillis();
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.0, 0.5);
SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.5, 1.0);
List<Long> scale1SealedSegments = Lists.newArrayList(0L, 1L);
VersionedMetadata<EpochTransitionRecord> versioned = store.submitScale(scope, stream, scale1SealedSegments, Arrays.asList(segment2, segment3), scaleTs, null, null, executor).join();
VersionedMetadata<State> state = store.getVersionedState(scope, stream, null, executor).get();
state = store.updateVersionedState(scope, stream, State.SCALING, state, null, executor).get();
store.startScale(scope, stream, false, versioned, state, null, executor).join();
store.scaleCreateNewEpochs(scope, stream, versioned, null, executor).join();
store.scaleSegmentsSealed(scope, stream, scale1SealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 40L)), versioned, null, executor).join();
store.completeScale(scope, stream, versioned, null, executor).join();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
// complex stream cut - across two epochs
Map<Long, Long> map4 = new HashMap<>();
map4.put(0L, 40L);
map4.put(computeSegmentId(3, 1), 10L);
size = store.getSizeTillStreamCut(scope, stream, map4, Optional.empty(), null, executor).join();
assertEquals(Long.valueOf(90L), size);
StreamCutRecord streamCut4 = new StreamCutRecord(recordingTime + 30, size, ImmutableMap.copyOf(map4));
store.addStreamCutToRetentionSet(scope, stream, streamCut4, null, executor).get();
// simple stream cut on epoch 2
Map<Long, Long> map5 = new HashMap<>();
map5.put(computeSegmentId(2, 1), 10L);
map5.put(computeSegmentId(3, 1), 10L);
size = store.getSizeTillStreamCut(scope, stream, map5, Optional.empty(), null, executor).join();
assertTrue(size == 100L);
StreamCutRecord streamCut5 = new StreamCutRecord(recordingTime + 30, size, ImmutableMap.copyOf(map5));
store.addStreamCutToRetentionSet(scope, stream, streamCut5, null, executor).get();
// endregion
}
use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class StreamTest method testConcurrentGetSuccessorScale.
private void testConcurrentGetSuccessorScale(StreamMetadataStore store, BiFunction<String, String, Stream> createStream) throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(1);
final String streamName = "test";
String scopeName = "test";
store.createScope(scopeName, null, executorService()).join();
Stream stream = spy(createStream.apply(scopeName, streamName));
StreamConfiguration streamConfig = StreamConfiguration.builder().scalingPolicy(policy).build();
store.createStream(scopeName, streamName, streamConfig, System.currentTimeMillis(), null, executorService()).join();
store.setState(scopeName, streamName, State.ACTIVE, null, executorService()).join();
List<Map.Entry<Double, Double>> newRanges;
newRanges = Arrays.asList(new AbstractMap.SimpleEntry<>(0.0, 0.5), new AbstractMap.SimpleEntry<>(0.5, 1.0));
long scale = System.currentTimeMillis();
ArrayList<Long> sealedSegments = Lists.newArrayList(0L);
long one = NameUtils.computeSegmentId(1, 1);
long two = NameUtils.computeSegmentId(2, 1);
OperationContext context = getContext();
VersionedMetadata<EpochTransitionRecord> response = stream.submitScale(sealedSegments, newRanges, scale, null, context).join();
Map<Long, Map.Entry<Double, Double>> newSegments = response.getObject().getNewSegmentsWithRange();
VersionedMetadata<State> state = stream.getVersionedState(context).join();
state = stream.updateVersionedState(state, State.SCALING, context).join();
stream.startScale(false, response, state, context).join();
stream.scaleCreateNewEpoch(response, context).join();
// history table has a partial record at this point.
// now we could have sealed the segments so get successors could be called.
Map<Long, List<Long>> successors = stream.getSuccessorsWithPredecessors(0, context).join().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), Map.Entry::getValue));
assertTrue(successors.containsKey(one) && successors.containsKey(two));
// reset mock so that we can resume scale operation
stream.scaleOldSegmentsSealed(sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), response, context).join();
stream.completeScale(response, context).join();
successors = stream.getSuccessorsWithPredecessors(0, context).join().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), Map.Entry::getValue));
assertTrue(successors.containsKey(one) && successors.containsKey(two));
}
use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class TaskTest method setUp.
@Before
public void setUp() throws Exception {
zkServer = new TestingServerStarter().start();
zkServer.start();
cli = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), new RetryOneTime(2000));
cli.start();
streamStore = getStream();
taskMetadataStore = TaskStoreFactory.createZKStore(cli, executor);
segmentHelperMock = SegmentHelperMock.getSegmentHelperMock();
streamMetadataTasks = new StreamMetadataTasks(streamStore, StreamStoreFactory.createInMemoryBucketStore(), taskMetadataStore, segmentHelperMock, executor, HOSTNAME, GrpcAuthHelper.getDisabledAuthHelper());
final String stream2 = "stream2";
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final ScalingPolicy policy2 = ScalingPolicy.fixed(3);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
final StreamConfiguration configuration2 = StreamConfiguration.builder().scalingPolicy(policy2).build();
// region createStream
streamStore.createScope(SCOPE, null, executor).join();
long start = System.currentTimeMillis();
streamStore.createStream(SCOPE, stream1, configuration1, start, null, executor).join();
streamStore.setState(SCOPE, stream1, State.ACTIVE, null, executor).join();
streamStore.createStream(SCOPE, stream2, configuration2, start, null, executor).join();
streamStore.setState(SCOPE, stream2, State.ACTIVE, null, executor).join();
// endregion
// region scaleSegments
AbstractMap.SimpleEntry<Double, Double> segment1 = new AbstractMap.SimpleEntry<>(0.5, 0.75);
AbstractMap.SimpleEntry<Double, Double> segment2 = new AbstractMap.SimpleEntry<>(0.75, 1.0);
List<Long> sealedSegments = Collections.singletonList(1L);
VersionedMetadata<EpochTransitionRecord> versioned = streamStore.submitScale(SCOPE, stream1, sealedSegments, Arrays.asList(segment1, segment2), start + 20, null, null, executor).get();
EpochTransitionRecord response = versioned.getObject();
Map<Long, Map.Entry<Double, Double>> segmentsCreated = response.getNewSegmentsWithRange();
VersionedMetadata<State> state = streamStore.getVersionedState(SCOPE, stream1, null, executor).join();
state = streamStore.updateVersionedState(SCOPE, stream1, State.SCALING, state, null, executor).get();
versioned = streamStore.startScale(SCOPE, stream1, false, versioned, state, null, executor).join();
streamStore.scaleCreateNewEpochs(SCOPE, stream1, versioned, null, executor).get();
streamStore.scaleSegmentsSealed(SCOPE, stream1, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, null, executor).get();
streamStore.completeScale(SCOPE, stream1, versioned, null, executor).join();
streamStore.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
AbstractMap.SimpleEntry<Double, Double> segment3 = new AbstractMap.SimpleEntry<>(0.0, 0.5);
AbstractMap.SimpleEntry<Double, Double> segment4 = new AbstractMap.SimpleEntry<>(0.5, 0.75);
AbstractMap.SimpleEntry<Double, Double> segment5 = new AbstractMap.SimpleEntry<>(0.75, 1.0);
List<Long> sealedSegments1 = Arrays.asList(0L, 1L, 2L);
versioned = streamStore.submitScale(SCOPE, stream2, sealedSegments1, Arrays.asList(segment3, segment4, segment5), start + 20, null, null, executor).get();
response = versioned.getObject();
segmentsCreated = response.getNewSegmentsWithRange();
state = streamStore.getVersionedState(SCOPE, stream2, null, executor).join();
state = streamStore.updateVersionedState(SCOPE, stream2, State.SCALING, state, null, executor).get();
versioned = streamStore.startScale(SCOPE, stream2, false, versioned, state, null, executor).join();
streamStore.scaleCreateNewEpochs(SCOPE, stream2, versioned, null, executor).get();
streamStore.scaleSegmentsSealed(SCOPE, stream2, sealedSegments1.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, null, executor).get();
streamStore.completeScale(SCOPE, stream2, versioned, null, executor).join();
streamStore.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
// endregion
}
use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class ScaleRequestHandlerTest method concurrentDistinctScaleRun.
// concurrent run of scale 1 intermixed with scale 2
private void concurrentDistinctScaleRun(String stream, String funcToWaitOn, boolean isManual, Predicate<Throwable> firstExceptionPredicate, Map<String, Integer> invocationCount) throws Exception {
StreamMetadataStore streamStore1 = getStore();
StreamMetadataStore streamStore1Spied = spy(getStore());
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 1)).build();
streamStore1.createStream(scope, stream, config, System.currentTimeMillis(), null, executor).join();
streamStore1.setState(scope, stream, State.ACTIVE, null, executor).join();
CompletableFuture<Void> wait = new CompletableFuture<>();
CompletableFuture<Void> signal = new CompletableFuture<>();
ScaleOpEvent event = new ScaleOpEvent(scope, stream, Lists.newArrayList(0L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), isManual, System.currentTimeMillis(), System.currentTimeMillis());
if (isManual) {
streamStore1.submitScale(scope, stream, Lists.newArrayList(0L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), System.currentTimeMillis(), null, null, executor).join();
}
StreamMetadataStore streamStore2 = getStore();
ScaleOperationTask scaleRequestHandler1 = new ScaleOperationTask(streamMetadataTasks, streamStore1Spied, executor);
ScaleOperationTask scaleRequestHandler2 = new ScaleOperationTask(streamMetadataTasks, streamStore2, executor);
setMockLatch(streamStore1, streamStore1Spied, funcToWaitOn, signal, wait);
CompletableFuture<Void> future1 = CompletableFuture.completedFuture(null).thenComposeAsync(v -> scaleRequestHandler1.execute(event), executor);
signal.join();
// let this run to completion. this should succeed
scaleRequestHandler2.execute(event).join();
long one = NameUtils.computeSegmentId(1, 1);
ScaleOpEvent event2 = new ScaleOpEvent(scope, stream, Lists.newArrayList(one), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), isManual, System.currentTimeMillis(), System.currentTimeMillis());
if (isManual) {
streamStore1.submitScale(scope, stream, Lists.newArrayList(one), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), System.currentTimeMillis(), null, null, executor).join();
}
scaleRequestHandler2.execute(event2).join();
// now complete wait latch.
wait.complete(null);
AssertExtensions.assertSuppliedFutureThrows("first scale should fail", () -> future1, firstExceptionPredicate);
verify(streamStore1Spied, times(invocationCount.get("startScale"))).startScale(anyString(), anyString(), anyBoolean(), any(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("scaleCreateNewEpochs"))).scaleCreateNewEpochs(anyString(), anyString(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("scaleSegmentsSealed"))).scaleSegmentsSealed(anyString(), anyString(), any(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("completeScale"))).completeScale(anyString(), anyString(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("updateVersionedState"))).updateVersionedState(anyString(), anyString(), any(), any(), any(), any());
// validate scale done
VersionedMetadata<EpochTransitionRecord> versioned = streamStore1.getEpochTransition(scope, stream, null, executor).join();
assertEquals(EpochTransitionRecord.EMPTY, versioned.getObject());
assertEquals(4, getVersionNumber(versioned));
assertEquals(2, streamStore1.getActiveEpoch(scope, stream, null, true, executor).join().getEpoch());
assertEquals(State.ACTIVE, streamStore1.getState(scope, stream, true, null, executor).join());
streamStore1.close();
streamStore2.close();
}
use of io.pravega.controller.store.stream.records.EpochTransitionRecord in project pravega by pravega.
the class ControllerMetadataJsonSerializerTest method testEpochTransitionRecord.
@Test
public void testEpochTransitionRecord() {
ImmutableSet<Long> segmentsToSeal = ImmutableSet.of(1L, 2L);
ImmutableMap<Long, Map.Entry<Double, Double>> newSegmentsWithRange = ImmutableMap.of(3L, Map.entry(0.1, 0.2), 4L, Map.entry(0.3, 0.4));
EpochTransitionRecord record = new EpochTransitionRecord(10, 100L, segmentsToSeal, newSegmentsWithRange);
testRecordSerialization(record, EpochTransitionRecord.class);
}
Aggregations