Search in sources :

Example 41 with VersionedMetadata

use of io.pravega.controller.store.VersionedMetadata in project pravega by pravega.

the class ZkStreamTest method testZkStream.

@Test(timeout = 30000)
public void testZkStream() throws Exception {
    double keyChunk = 1.0 / 5;
    final ScalingPolicy policy = ScalingPolicy.fixed(5);
    @Cleanup final StreamMetadataStore store = new ZKStreamMetadataStore(cli, executor);
    final String streamName = "test";
    store.createScope(SCOPE, null, executor).get();
    StreamConfiguration streamConfig = StreamConfiguration.builder().scalingPolicy(policy).build();
    store.createStream(SCOPE, streamName, streamConfig, System.currentTimeMillis(), null, executor).get();
    store.setState(SCOPE, streamName, State.ACTIVE, null, executor).get();
    OperationContext context = store.createStreamContext(SCOPE, streamName, 0L);
    // set minimum number of segments to 1 so that we can also test scale downs
    streamConfig = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
    store.startUpdateConfiguration(SCOPE, streamName, streamConfig, null, executor).join();
    VersionedMetadata<StreamConfigurationRecord> configRecord = store.getConfigurationRecord(SCOPE, streamName, null, executor).join();
    store.completeUpdateConfiguration(SCOPE, streamName, configRecord, null, executor).join();
    List<StreamSegmentRecord> segments = store.getActiveSegments(SCOPE, streamName, context, executor).get();
    assertEquals(segments.size(), 5);
    assertTrue(segments.stream().allMatch(x -> Lists.newArrayList(0L, 1L, 2L, 3L, 4L).contains(x.segmentId())));
    long start = segments.get(0).getCreationTime();
    assertEquals(store.getConfiguration(SCOPE, streamName, context, executor).get(), streamConfig);
    List<Map.Entry<Double, Double>> newRanges;
    // existing range 0 = 0 - .2, 1 = .2 - .4, 2 = .4 - .6, 3 = .6 - .8, 4 = .8 - 1.0
    // 3, 4 -> 5 = .6 - 1.0
    newRanges = Collections.singletonList(new AbstractMap.SimpleEntry<>(3 * keyChunk, 1.0));
    long scale1 = start + 10000;
    ArrayList<Long> sealedSegments = Lists.newArrayList(3L, 4L);
    long five = computeSegmentId(5, 1);
    VersionedMetadata<EpochTransitionRecord> versioned = store.submitScale(SCOPE, streamName, sealedSegments, newRanges, scale1, null, context, executor).get();
    VersionedMetadata<State> state = store.getVersionedState(SCOPE, streamName, null, executor).join();
    state = store.updateVersionedState(SCOPE, streamName, State.SCALING, state, null, executor).join();
    versioned = store.startScale(SCOPE, streamName, false, versioned, state, null, executor).join();
    store.scaleCreateNewEpochs(SCOPE, streamName, versioned, context, executor).get();
    store.scaleSegmentsSealed(SCOPE, streamName, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, context, executor).get();
    store.completeScale(SCOPE, streamName, versioned, null, executor).join();
    store.setState(SCOPE, streamName, State.ACTIVE, null, executor).join();
    segments = store.getActiveSegments(SCOPE, streamName, context, executor).get();
    assertEquals(segments.size(), 4);
    assertTrue(segments.stream().allMatch(x -> Lists.newArrayList(0L, 1L, 2L, five).contains(x.segmentId())));
    // 1 -> 6 = 0.2 -.3, 7 = .3 - .4
    // 2,5 -> 8 = .4 - 1.0
    newRanges = Arrays.asList(new AbstractMap.SimpleEntry<>(keyChunk, 0.3), new AbstractMap.SimpleEntry<>(0.3, 2 * keyChunk), new AbstractMap.SimpleEntry<>(2 * keyChunk, 1.0));
    long scale2 = scale1 + 10000;
    ArrayList<Long> sealedSegments1 = Lists.newArrayList(1L, 2L, five);
    long six = computeSegmentId(6, 2);
    long seven = computeSegmentId(7, 2);
    long eight = computeSegmentId(8, 2);
    versioned = store.submitScale(SCOPE, streamName, sealedSegments1, newRanges, scale2, null, context, executor).get();
    EpochTransitionRecord response = versioned.getObject();
    state = store.getVersionedState(SCOPE, streamName, null, executor).join();
    state = store.updateVersionedState(SCOPE, streamName, State.SCALING, state, null, executor).join();
    versioned = store.startScale(SCOPE, streamName, false, versioned, state, null, executor).join();
    store.scaleCreateNewEpochs(SCOPE, streamName, versioned, context, executor).get();
    store.scaleSegmentsSealed(SCOPE, streamName, sealedSegments1.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, context, executor).get();
    store.completeScale(SCOPE, streamName, versioned, null, executor).join();
    store.setState(SCOPE, streamName, State.ACTIVE, null, executor).join();
    segments = store.getActiveSegments(SCOPE, streamName, context, executor).get();
    assertEquals(segments.size(), 4);
    assertTrue(segments.stream().allMatch(x -> Lists.newArrayList(0L, six, seven, eight).contains(x.segmentId())));
    // 7 -> 9 = .3 - .35, 10 = .35 - .6
    // 8 -> 10 = .35 - .6, 11 = .6 - 1.0
    newRanges = Arrays.asList(new AbstractMap.SimpleEntry<>(0.3, 0.35), new AbstractMap.SimpleEntry<>(0.35, 3 * keyChunk), new AbstractMap.SimpleEntry<>(3 * keyChunk, 1.0));
    long scale3 = scale2 + 10000;
    long nine = computeSegmentId(9, 3);
    long ten = computeSegmentId(10, 3);
    long eleven = computeSegmentId(11, 3);
    ArrayList<Long> sealedSegments2 = Lists.newArrayList(seven, eight);
    versioned = store.submitScale(SCOPE, streamName, sealedSegments2, newRanges, scale3, null, context, executor).get();
    response = versioned.getObject();
    state = store.getVersionedState(SCOPE, streamName, null, executor).join();
    state = store.updateVersionedState(SCOPE, streamName, State.SCALING, state, null, executor).join();
    store.startScale(SCOPE, streamName, false, versioned, state, null, executor).join();
    store.scaleCreateNewEpochs(SCOPE, streamName, versioned, context, executor).get();
    store.scaleSegmentsSealed(SCOPE, streamName, sealedSegments2.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, context, executor).get();
    store.completeScale(SCOPE, streamName, versioned, null, executor).join();
    store.setState(SCOPE, streamName, State.ACTIVE, null, executor).join();
    segments = store.getActiveSegments(SCOPE, streamName, context, executor).get();
    assertEquals(segments.size(), 5);
    assertTrue(segments.stream().allMatch(x -> Lists.newArrayList(0L, six, nine, ten, eleven).contains(x.segmentId())));
    Map<Long, List<Long>> successors = store.getSuccessors(SCOPE, streamName, 0L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.isEmpty());
    successors = store.getSuccessors(SCOPE, streamName, 1L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.size() == 2 && successors.containsKey(six) && successors.get(six).containsAll(Collections.singleton(1L)) && successors.containsKey(seven) && successors.get(seven).containsAll(Collections.singleton(1L)));
    successors = store.getSuccessors(SCOPE, streamName, 2L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.size() == 1 && successors.containsKey(eight) && successors.get(eight).containsAll(Lists.newArrayList(2L, five)));
    successors = store.getSuccessors(SCOPE, streamName, 3L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.size() == 1 && successors.containsKey(five) && successors.get(five).containsAll(Lists.newArrayList(3L, 4L)));
    successors = store.getSuccessors(SCOPE, streamName, 4L, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.size() == 1 && successors.containsKey(five) && successors.get(five).containsAll(Lists.newArrayList(3L, 4L)));
    successors = store.getSuccessors(SCOPE, streamName, five, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.size() == 1 && successors.containsKey(eight) && successors.get(eight).containsAll(Lists.newArrayList(2L, five)));
    successors = store.getSuccessors(SCOPE, streamName, six, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.isEmpty());
    successors = store.getSuccessors(SCOPE, streamName, seven, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.size() == 2 && successors.containsKey(nine) && successors.get(nine).containsAll(Collections.singleton(seven)) && successors.containsKey(ten) && successors.get(ten).containsAll(Lists.newArrayList(seven, eight)));
    successors = store.getSuccessors(SCOPE, streamName, eight, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.size() == 2 && successors.containsKey(eleven) && successors.get(eleven).containsAll(Collections.singleton(eight)) && successors.containsKey(ten) && successors.get(ten).containsAll(Lists.newArrayList(seven, eight)));
    successors = store.getSuccessors(SCOPE, streamName, nine, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.isEmpty());
    successors = store.getSuccessors(SCOPE, streamName, ten, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.isEmpty());
    successors = store.getSuccessors(SCOPE, streamName, eleven, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertTrue(successors.isEmpty());
    // start -1
    Map<Long, Long> historicalSegments = store.getSegmentsAtHead(SCOPE, streamName, context, executor).get().entrySet().stream().collect(Collectors.toMap(x -> x.getKey().segmentId(), x -> x.getValue()));
    assertEquals(historicalSegments.size(), 5);
    assertTrue(historicalSegments.keySet().containsAll(Lists.newArrayList(0L, 1L, 2L, 3L, 4L)));
    // start + 1
    List<Long> segmentsInEpoch = store.getSegmentsInEpoch(SCOPE, streamName, 0, context, executor).get().stream().map(x -> x.segmentId()).collect(Collectors.toList());
    assertEquals(segmentsInEpoch.size(), 5);
    assertTrue(segmentsInEpoch.containsAll(Lists.newArrayList(0L, 1L, 2L, 3L, 4L)));
    // scale1
    segmentsInEpoch = store.getSegmentsInEpoch(SCOPE, streamName, 1, context, executor).get().stream().map(x -> x.segmentId()).collect(Collectors.toList());
    assertEquals(segmentsInEpoch.size(), 4);
    assertTrue(segmentsInEpoch.containsAll(Lists.newArrayList(0L, 1L, 2L, five)));
    // scale2
    segmentsInEpoch = store.getSegmentsInEpoch(SCOPE, streamName, 2, context, executor).get().stream().map(x -> x.segmentId()).collect(Collectors.toList());
    assertEquals(segmentsInEpoch.size(), 4);
    assertTrue(segmentsInEpoch.containsAll(Lists.newArrayList(0L, six, seven, eight)));
    // scale3
    segmentsInEpoch = store.getSegmentsInEpoch(SCOPE, streamName, 3, context, executor).get().stream().map(x -> x.segmentId()).collect(Collectors.toList());
    assertEquals(segmentsInEpoch.size(), 5);
    assertTrue(segmentsInEpoch.containsAll(Lists.newArrayList(0L, six, nine, ten, eleven)));
    assertFalse(store.isSealed(SCOPE, streamName, context, executor).get());
    assertNotEquals(0, store.getActiveSegments(SCOPE, streamName, context, executor).get().size());
    store.setSealed(SCOPE, streamName, context, executor).get();
    assertTrue(store.isSealed(SCOPE, streamName, context, executor).get());
    assertEquals(0, store.getActiveSegments(SCOPE, streamName, context, executor).get().size());
    // seal an already sealed stream.
    store.setSealed(SCOPE, streamName, context, executor).get();
    assertTrue(store.isSealed(SCOPE, streamName, context, executor).get());
    assertEquals(0, store.getActiveSegments(SCOPE, streamName, context, executor).get().size());
    // seal a non existing stream.
    AssertExtensions.assertFutureThrows("", store.setSealed(SCOPE, "nonExistentStream", null, executor), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
    store.markCold(SCOPE, streamName, 0L, System.currentTimeMillis() + 1000, null, executor).get();
    assertTrue(store.isCold(SCOPE, streamName, 0L, null, executor).get());
    Thread.sleep(1000);
    assertFalse(store.isCold(SCOPE, streamName, 0L, null, executor).get());
    store.markCold(SCOPE, streamName, 0L, System.currentTimeMillis() + 1000, null, executor).get();
    store.removeMarker(SCOPE, streamName, 0L, null, executor).get();
    assertFalse(store.isCold(SCOPE, streamName, 0L, null, executor).get());
}
Also used : Arrays(java.util.Arrays) StreamSegmentRecord(io.pravega.controller.store.stream.records.StreamSegmentRecord) AssertExtensions(io.pravega.test.common.AssertExtensions) ArgumentMatchers.eq(org.mockito.ArgumentMatchers.eq) Cleanup(lombok.Cleanup) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) Map(java.util.Map) After(org.junit.After) Mockito.doReturn(org.mockito.Mockito.doReturn) DeleteScopeStatus(io.pravega.controller.stream.api.grpc.v1.Controller.DeleteScopeStatus) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) Predicate(java.util.function.Predicate) ActiveTxnRecord(io.pravega.controller.store.stream.records.ActiveTxnRecord) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) ZKStoreHelper(io.pravega.controller.store.ZKStoreHelper) List(java.util.List) CuratorFramework(org.apache.curator.framework.CuratorFramework) Assert.assertFalse(org.junit.Assert.assertFalse) Optional(java.util.Optional) Futures(io.pravega.common.concurrent.Futures) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) CuratorFrameworkFactory(org.apache.curator.framework.CuratorFrameworkFactory) CreateScopeStatus(io.pravega.controller.stream.api.grpc.v1.Controller.CreateScopeStatus) NameUtils.computeSegmentId(io.pravega.shared.NameUtils.computeSegmentId) Exceptions(io.pravega.common.Exceptions) CompletableFuture(java.util.concurrent.CompletableFuture) Mockito.spy(org.mockito.Mockito.spy) BitConverter(io.pravega.common.util.BitConverter) ArrayList(java.util.ArrayList) RetryOneTime(org.apache.curator.retry.RetryOneTime) Lists(com.google.common.collect.Lists) TestingServerStarter(io.pravega.test.common.TestingServerStarter) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) TestingServer(org.apache.curator.test.TestingServer) Before(org.junit.Before) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) ExecutionException(java.util.concurrent.ExecutionException) Mockito(org.mockito.Mockito) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) AbstractMap(java.util.AbstractMap) TestOperationContext(io.pravega.controller.store.TestOperationContext) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) Assert(org.junit.Assert) Collections(java.util.Collections) Mockito.reset(org.mockito.Mockito.reset) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) Cleanup(lombok.Cleanup) StreamSegmentRecord(io.pravega.controller.store.stream.records.StreamSegmentRecord) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) List(java.util.List) ArrayList(java.util.ArrayList) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) TestOperationContext(io.pravega.controller.store.TestOperationContext) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) Test(org.junit.Test)

Example 42 with VersionedMetadata

use of io.pravega.controller.store.VersionedMetadata in project pravega by pravega.

the class StreamTestBase method testSegmentQueriesDuringScale.

@Test(timeout = 30000L)
public void testSegmentQueriesDuringScale() {
    OperationContext context = getContext();
    // start scale and perform `getSegment`, `getActiveEpoch` and `getEpoch` during different phases of scale
    int startingSegmentNumber = new Random().nextInt(20);
    Stream stream = createStream("scope", "stream" + startingSegmentNumber, System.currentTimeMillis(), 5, startingSegmentNumber);
    StreamSegmentRecord segment = stream.getSegment(startingSegmentNumber, context).join();
    assertEquals(segment.segmentId(), startingSegmentNumber + 0L);
    assertEquals(segment.getKeyStart(), 0, 0);
    assertEquals(segment.getKeyEnd(), 1.0 / 5, 0);
    long segment5 = computeSegmentId(startingSegmentNumber + 5, 1);
    long segment6 = computeSegmentId(startingSegmentNumber + 6, 1);
    long segment7 = computeSegmentId(startingSegmentNumber + 7, 1);
    long segment8 = computeSegmentId(startingSegmentNumber + 8, 1);
    long segment9 = computeSegmentId(startingSegmentNumber + 9, 1);
    List<Long> newSegments = Lists.newArrayList(segment5, segment6, segment7, segment8, segment9);
    List<StreamSegmentRecord> originalSegments = stream.getActiveSegments(context).join();
    List<Long> segmentsToSeal = originalSegments.stream().map(StreamSegmentRecord::segmentId).collect(Collectors.toList());
    List<Map.Entry<Double, Double>> newRanges = originalSegments.stream().map(x -> new AbstractMap.SimpleEntry<>(x.getKeyStart(), x.getKeyEnd())).collect(Collectors.toList());
    VersionedMetadata<EpochTransitionRecord> etr = stream.getEpochTransition(context).join();
    // submit scale
    etr = stream.submitScale(segmentsToSeal, newRanges, System.currentTimeMillis(), etr, context).join();
    VersionedMetadata<State> state = stream.getVersionedState(context).thenCompose(s -> stream.updateVersionedState(s, State.SCALING, context)).join();
    etr = stream.startScale(true, etr, state, context).join();
    List<StreamSegmentRecord> newCurrentSegments = stream.getActiveSegments(context).join();
    assertEquals(originalSegments, newCurrentSegments);
    AssertExtensions.assertSuppliedFutureThrows("", () -> stream.getSegment(segment9, context), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
    Map<StreamSegmentRecord, List<Long>> successorsWithPredecessors = stream.getSuccessorsWithPredecessors(0L, context).join();
    assertTrue(successorsWithPredecessors.isEmpty());
    // scale create new epochs
    stream.scaleCreateNewEpoch(etr, context).join();
    newCurrentSegments = stream.getActiveSegments(context).join();
    assertEquals(originalSegments, newCurrentSegments);
    segment = stream.getSegment(segment9, context).join();
    assertEquals(computeSegmentId(startingSegmentNumber + 9, 1), segment.segmentId());
    assertEquals(segment.getKeyStart(), 1.0 / 5 * 4, 0);
    assertEquals(segment.getKeyEnd(), 1.0, 0);
    successorsWithPredecessors = stream.getSuccessorsWithPredecessors(startingSegmentNumber + 0L, context).join();
    Set<StreamSegmentRecord> successors = successorsWithPredecessors.keySet();
    assertEquals(1, successors.size());
    StreamSegmentRecord five = successors.stream().findAny().get();
    assertEquals(computeSegmentId(startingSegmentNumber + 5, 1), five.segmentId());
    List<Long> predecessors = successorsWithPredecessors.get(five);
    assertEquals(1, predecessors.size());
    assertTrue(predecessors.contains(startingSegmentNumber + 0L));
    // scale old segments sealed
    stream.scaleOldSegmentsSealed(Collections.emptyMap(), etr, context).join();
    newCurrentSegments = stream.getActiveSegments(context).join();
    assertEquals(new HashSet<>(newSegments), newCurrentSegments.stream().map(StreamSegmentRecord::segmentId).collect(Collectors.toSet()));
    segment = stream.getSegment(segment9, context).join();
    assertEquals(computeSegmentId(startingSegmentNumber + 9, 1), segment.segmentId());
    assertEquals(segment.getKeyStart(), 1.0 / 5 * 4, 0);
    assertEquals(segment.getKeyEnd(), 1.0, 0);
    // complete scale
    stream.completeScale(etr, context).join();
    segment = stream.getSegment(segment9, context).join();
    assertEquals(computeSegmentId(startingSegmentNumber + 9, 1), segment.segmentId());
    assertEquals(segment.getKeyStart(), 1.0 / 5 * 4, 0);
    assertEquals(segment.getKeyEnd(), 1.0, 0);
}
Also used : TestOperationContext(io.pravega.controller.store.TestOperationContext) StreamSegmentRecord(io.pravega.controller.store.stream.records.StreamSegmentRecord) AssertExtensions(io.pravega.test.common.AssertExtensions) Random(java.util.Random) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) Mockito.anyBoolean(org.mockito.Mockito.anyBoolean) Map(java.util.Map) After(org.junit.After) Mockito.doAnswer(org.mockito.Mockito.doAnswer) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) ImmutableMap(com.google.common.collect.ImmutableMap) Set(java.util.Set) ActiveTxnRecord(io.pravega.controller.store.stream.records.ActiveTxnRecord) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) List(java.util.List) Optional(java.util.Optional) HistoryTimeSeries(io.pravega.controller.store.stream.records.HistoryTimeSeries) Futures(io.pravega.common.concurrent.Futures) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) CommittingTransactionsRecord(io.pravega.controller.store.stream.records.CommittingTransactionsRecord) NameUtils.computeSegmentId(io.pravega.shared.NameUtils.computeSegmentId) Exceptions(io.pravega.common.Exceptions) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) Mockito.spy(org.mockito.Mockito.spy) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Lists(com.google.common.collect.Lists) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) LinkedList(java.util.LinkedList) SealedSegmentsMapShard(io.pravega.controller.store.stream.records.SealedSegmentsMapShard) Before(org.junit.Before) NameUtils.getEpoch(io.pravega.shared.NameUtils.getEpoch) NameUtils(io.pravega.shared.NameUtils) WriterMark(io.pravega.controller.store.stream.records.WriterMark) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) Mockito.times(org.mockito.Mockito.times) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) Mockito.verify(org.mockito.Mockito.verify) AtomicLong(java.util.concurrent.atomic.AtomicLong) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) AbstractMap(java.util.AbstractMap) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) Version(io.pravega.controller.store.Version) TestOperationContext(io.pravega.controller.store.TestOperationContext) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) Collections(java.util.Collections) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) StreamSegmentRecord(io.pravega.controller.store.stream.records.StreamSegmentRecord) Random(java.util.Random) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) AtomicLong(java.util.concurrent.atomic.AtomicLong) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) Test(org.junit.Test)

Example 43 with VersionedMetadata

use of io.pravega.controller.store.VersionedMetadata in project pravega by pravega.

the class StreamTestBase method scaleInputValidityTest.

@Test(timeout = 30000L)
public void scaleInputValidityTest() {
    OperationContext context = getContext();
    int startingSegmentNumber = new Random().nextInt(2000);
    String name = "stream" + startingSegmentNumber;
    PersistentStreamBase stream = createStream("scope", name, System.currentTimeMillis(), 5, startingSegmentNumber);
    long timestamp = System.currentTimeMillis();
    final double keyRangeChunk = 1.0 / 5;
    long s0 = startingSegmentNumber;
    long s1 = 1L + startingSegmentNumber;
    long s2 = 2L + startingSegmentNumber;
    long s3 = 3L + startingSegmentNumber;
    long s4 = 4L + startingSegmentNumber;
    VersionedMetadata<EpochTransitionRecord> etr = stream.getEpochTransition(context).join();
    List<Map.Entry<Double, Double>> newRanges = new ArrayList<>();
    AtomicReference<List<Map.Entry<Double, Double>>> newRangesRef = new AtomicReference<>(newRanges);
    AtomicReference<VersionedMetadata<EpochTransitionRecord>> etrRef = new AtomicReference<>(etr);
    // 1. empty newRanges
    AssertExtensions.assertSuppliedFutureThrows("", () -> stream.submitScale(Lists.newArrayList(s0), newRangesRef.get(), timestamp, etrRef.get(), context), e -> Exceptions.unwrap(e) instanceof EpochTransitionOperationExceptions.InputInvalidException);
    // 2. simple mismatch
    newRanges.add(new AbstractMap.SimpleEntry<>(0.0, keyRangeChunk));
    AssertExtensions.assertSuppliedFutureThrows("", () -> stream.submitScale(Lists.newArrayList(s0, s1), newRangesRef.get(), timestamp, etrRef.get(), context), e -> Exceptions.unwrap(e) instanceof EpochTransitionOperationExceptions.InputInvalidException);
    // 3. simple valid match
    newRanges = new ArrayList<>();
    newRangesRef.set(newRanges);
    newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 2 * keyRangeChunk));
    etr = stream.submitScale(Lists.newArrayList(s0, s1), newRangesRef.get(), timestamp, etr, context).join();
    etr = resetScale(etr, stream);
    etrRef.set(etr);
    // 4. valid 2 disjoint merges
    newRanges = new ArrayList<>();
    newRangesRef.set(newRanges);
    newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 2 * keyRangeChunk));
    newRanges.add(new AbstractMap.SimpleEntry<>(3 * keyRangeChunk, 1.0));
    etr = stream.submitScale(Lists.newArrayList(s0, s1, s3, s4), newRangesRef.get(), timestamp, etrRef.get(), context).join();
    etr = resetScale(etr, stream);
    etrRef.set(etr);
    // 5. valid 1 merge and 1 disjoint
    newRanges = new ArrayList<>();
    newRangesRef.set(newRanges);
    newRanges.add(new AbstractMap.SimpleEntry<>(keyRangeChunk, 2 * keyRangeChunk));
    newRanges.add(new AbstractMap.SimpleEntry<>(3 * keyRangeChunk, 1.0));
    etr = stream.submitScale(Lists.newArrayList(s1, s3, s4), newRangesRef.get(), timestamp, etrRef.get(), context).join();
    etr = resetScale(etr, stream);
    etrRef.set(etr);
    // 6. valid 1 merge, 2 splits
    newRanges = new ArrayList<>();
    newRangesRef.set(newRanges);
    newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 2 * keyRangeChunk));
    newRanges.add(new AbstractMap.SimpleEntry<>(3 * keyRangeChunk, 0.7));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.7, 0.8));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.8, 0.9));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.9, 1.0));
    etr = stream.submitScale(Lists.newArrayList(s0, s1, s3, s4), newRangesRef.get(), timestamp, etrRef.get(), context).join();
    etr = resetScale(etr, stream);
    etrRef.set(etr);
    // 7. 1 merge, 1 split and 1 invalid split
    newRanges = new ArrayList<>();
    newRangesRef.set(newRanges);
    newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 2 * keyRangeChunk));
    newRanges.add(new AbstractMap.SimpleEntry<>(3 * keyRangeChunk, 0.7));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.7, 0.8));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.8, 0.9));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.9, 0.99));
    AssertExtensions.assertSuppliedFutureThrows("", () -> stream.submitScale(Lists.newArrayList(s0, s1, s3, s4), newRangesRef.get(), timestamp, etrRef.get(), context), e -> Exceptions.unwrap(e) instanceof EpochTransitionOperationExceptions.InputInvalidException);
    // 8. valid unsorted segments to seal
    newRanges = new ArrayList<>();
    newRangesRef.set(newRanges);
    newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 2 * keyRangeChunk));
    newRanges.add(new AbstractMap.SimpleEntry<>(3 * keyRangeChunk, 0.7));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.7, 0.8));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.8, 0.9));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.9, 1.0));
    etr = stream.submitScale(Lists.newArrayList(s4, s0, s1, s3), newRangesRef.get(), timestamp, etrRef.get(), context).join();
    etr = resetScale(etr, stream);
    etrRef.set(etr);
    // 9. valid unsorted new ranges
    newRanges = new ArrayList<>();
    newRangesRef.set(newRanges);
    newRanges.add(new AbstractMap.SimpleEntry<>(0.9, 1.0));
    newRanges.add(new AbstractMap.SimpleEntry<>(3 * keyRangeChunk, 0.7));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.7, 0.8));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 2 * keyRangeChunk));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.8, 0.9));
    etr = stream.submitScale(Lists.newArrayList(s4, s0, s1, s3), newRangesRef.get(), timestamp, etrRef.get(), context).join();
    etr = resetScale(etr, stream);
    etrRef.set(etr);
    // 10. invalid input range low == high
    newRanges = new ArrayList<>();
    newRangesRef.set(newRanges);
    newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 0.2));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.2, 0.2));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.2, 0.4));
    AssertExtensions.assertSuppliedFutureThrows("", () -> stream.submitScale(Lists.newArrayList(s0, s1), newRangesRef.get(), timestamp, etrRef.get(), context), e -> Exceptions.unwrap(e) instanceof EpochTransitionOperationExceptions.InputInvalidException);
    // 11. invalid input range low > high
    newRanges = new ArrayList<>();
    newRangesRef.set(newRanges);
    newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 0.2));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.3, 0.2));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.2, 0.4));
    AssertExtensions.assertSuppliedFutureThrows("", () -> stream.submitScale(Lists.newArrayList(s0, s1), newRangesRef.get(), timestamp, etrRef.get(), context), e -> Exceptions.unwrap(e) instanceof EpochTransitionOperationExceptions.InputInvalidException);
    // 12. invalid overlapping key ranges
    newRanges = new ArrayList<>();
    newRangesRef.set(newRanges);
    newRanges.add(new AbstractMap.SimpleEntry<>(0.2, 0.4));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.3, 3 * keyRangeChunk));
    AssertExtensions.assertSuppliedFutureThrows("", () -> stream.submitScale(Lists.newArrayList(s1, s2), newRangesRef.get(), timestamp, etrRef.get(), context), e -> Exceptions.unwrap(e) instanceof EpochTransitionOperationExceptions.InputInvalidException);
    // 13. invalid overlapping key ranges -- a contains b
    newRanges = new ArrayList<>();
    newRangesRef.set(newRanges);
    newRanges.add(new AbstractMap.SimpleEntry<>(0.2, 0.4));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.3, 0.33));
    AssertExtensions.assertSuppliedFutureThrows("", () -> stream.submitScale(Lists.newArrayList(s1), newRangesRef.get(), timestamp, etrRef.get(), context), e -> Exceptions.unwrap(e) instanceof EpochTransitionOperationExceptions.InputInvalidException);
    // 14. invalid overlapping key ranges -- b contains a (with b.low == a.low)
    newRanges = new ArrayList<>();
    newRangesRef.set(newRanges);
    newRanges.add(new AbstractMap.SimpleEntry<>(0.2, 0.33));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.2, 0.4));
    AssertExtensions.assertSuppliedFutureThrows("", () -> stream.submitScale(Lists.newArrayList(s1), newRangesRef.get(), timestamp, etrRef.get(), context), e -> Exceptions.unwrap(e) instanceof EpochTransitionOperationExceptions.InputInvalidException);
    // 15. invalid overlapping key ranges b.low < a.high
    newRanges = new ArrayList<>();
    newRangesRef.set(newRanges);
    newRanges.add(new AbstractMap.SimpleEntry<>(0.2, 0.35));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.3, 0.4));
    AssertExtensions.assertSuppliedFutureThrows("", () -> stream.submitScale(Lists.newArrayList(s1), newRangesRef.get(), timestamp, etrRef.get(), context), e -> Exceptions.unwrap(e) instanceof EpochTransitionOperationExceptions.InputInvalidException);
    // 16. invalid overlapping key ranges.. a.high < b.low
    newRanges = new ArrayList<>();
    newRangesRef.set(newRanges);
    newRanges.add(new AbstractMap.SimpleEntry<>(0.2, 0.25));
    newRanges.add(new AbstractMap.SimpleEntry<>(0.3, 0.4));
    AssertExtensions.assertSuppliedFutureThrows("", () -> stream.submitScale(Lists.newArrayList(s1), newRangesRef.get(), timestamp, etrRef.get(), context), e -> Exceptions.unwrap(e) instanceof EpochTransitionOperationExceptions.InputInvalidException);
    // scale the stream for inconsistent epoch transition
    newRanges = new ArrayList<>();
    newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 0.4));
    scaleStream(stream, System.currentTimeMillis(), Lists.newArrayList(s0, s1), newRanges, Collections.emptyMap());
    // 17. precondition failure
    newRanges = new ArrayList<>();
    newRangesRef.set(newRanges);
    newRanges.add(new AbstractMap.SimpleEntry<>(0.2, 0.4));
    etrRef.set(stream.getEpochTransition(context).join());
    AssertExtensions.assertSuppliedFutureThrows("", () -> stream.submitScale(Lists.newArrayList(s1), newRangesRef.get(), timestamp, etrRef.get(), context), e -> Exceptions.unwrap(e) instanceof EpochTransitionOperationExceptions.PreConditionFailureException);
    etrRef.set(stream.getEpochTransition(context).join());
    // get current number of segments.
    List<Long> segments = stream.getActiveSegments(context).join().stream().map(StreamSegmentRecord::segmentId).collect(Collectors.toList());
    // set minimum number of segments to segments.size.
    stream.startUpdateConfiguration(StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(segments.size())).build(), context).join();
    VersionedMetadata<StreamConfigurationRecord> configRecord = stream.getVersionedConfigurationRecord(context).join();
    stream.completeUpdateConfiguration(configRecord, context).join();
    // attempt a scale down which should be rejected in submit scale.
    newRanges = new ArrayList<>();
    newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 1.0));
    newRangesRef.set(newRanges);
    AssertExtensions.assertSuppliedFutureThrows("", () -> stream.submitScale(segments, newRangesRef.get(), timestamp, etrRef.get(), context), e -> Exceptions.unwrap(e) instanceof EpochTransitionOperationExceptions.PreConditionFailureException);
}
Also used : ArrayList(java.util.ArrayList) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) AbstractMap(java.util.AbstractMap) Random(java.util.Random) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) TestOperationContext(io.pravega.controller.store.TestOperationContext) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) AtomicReference(java.util.concurrent.atomic.AtomicReference) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) AtomicLong(java.util.concurrent.atomic.AtomicLong) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap) AbstractMap(java.util.AbstractMap) Test(org.junit.Test)

Example 44 with VersionedMetadata

use of io.pravega.controller.store.VersionedMetadata in project pravega by pravega.

the class StreamTestBase method createStream.

private PersistentStreamBase createStream(String scope, String name, long time, int numOfSegments, int startingSegmentNumber, int chunkSize, int shardSize) {
    OperationContext context = getContext();
    createScope(scope, context);
    PersistentStreamBase stream = getStream(scope, name, chunkSize, shardSize);
    StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(numOfSegments)).build();
    stream.create(config, time, startingSegmentNumber, context).thenCompose(x -> stream.updateState(State.ACTIVE, context)).join();
    // set minimum number of segments to 1 so that we can also test scale downs
    stream.startUpdateConfiguration(StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), context).join();
    VersionedMetadata<StreamConfigurationRecord> configRecord = stream.getVersionedConfigurationRecord(context).join();
    stream.completeUpdateConfiguration(configRecord, context).join();
    return stream;
}
Also used : TestOperationContext(io.pravega.controller.store.TestOperationContext) StreamSegmentRecord(io.pravega.controller.store.stream.records.StreamSegmentRecord) AssertExtensions(io.pravega.test.common.AssertExtensions) Random(java.util.Random) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) Mockito.anyBoolean(org.mockito.Mockito.anyBoolean) Map(java.util.Map) After(org.junit.After) Mockito.doAnswer(org.mockito.Mockito.doAnswer) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) StreamTruncationRecord(io.pravega.controller.store.stream.records.StreamTruncationRecord) ImmutableMap(com.google.common.collect.ImmutableMap) Set(java.util.Set) ActiveTxnRecord(io.pravega.controller.store.stream.records.ActiveTxnRecord) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) List(java.util.List) Optional(java.util.Optional) HistoryTimeSeries(io.pravega.controller.store.stream.records.HistoryTimeSeries) Futures(io.pravega.common.concurrent.Futures) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) CommittingTransactionsRecord(io.pravega.controller.store.stream.records.CommittingTransactionsRecord) NameUtils.computeSegmentId(io.pravega.shared.NameUtils.computeSegmentId) Exceptions(io.pravega.common.Exceptions) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) Mockito.spy(org.mockito.Mockito.spy) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) Lists(com.google.common.collect.Lists) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) LinkedList(java.util.LinkedList) SealedSegmentsMapShard(io.pravega.controller.store.stream.records.SealedSegmentsMapShard) Before(org.junit.Before) NameUtils.getEpoch(io.pravega.shared.NameUtils.getEpoch) NameUtils(io.pravega.shared.NameUtils) WriterMark(io.pravega.controller.store.stream.records.WriterMark) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) Mockito.times(org.mockito.Mockito.times) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) Mockito.verify(org.mockito.Mockito.verify) AtomicLong(java.util.concurrent.atomic.AtomicLong) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord) AbstractMap(java.util.AbstractMap) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) Version(io.pravega.controller.store.Version) TestOperationContext(io.pravega.controller.store.TestOperationContext) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) Collections(java.util.Collections) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) StreamConfigurationRecord(io.pravega.controller.store.stream.records.StreamConfigurationRecord)

Example 45 with VersionedMetadata

use of io.pravega.controller.store.VersionedMetadata in project pravega by pravega.

the class ZKCounterTest method testCounter.

@Test(timeout = 30000)
public void testCounter() throws Exception {
    ZKStoreHelper storeHelper = spy(new ZKStoreHelper(cli, executor));
    storeHelper.createZNodeIfNotExist("/store/scope").join();
    ZkInt96Counter zkStore = spy(new ZkInt96Counter(storeHelper));
    // first call should get the new range from store
    Int96 counter = zkStore.getNextCounter().join();
    // verify that the generated counter is from new range
    assertEquals(0, counter.getMsb());
    assertEquals(1L, counter.getLsb());
    assertEquals(zkStore.getCounterForTesting(), counter);
    Int96 limit = zkStore.getLimitForTesting();
    assertEquals(ZkInt96Counter.COUNTER_RANGE, limit.getLsb());
    // update the local counter to the end of the current range (limit - 1)
    zkStore.setCounterAndLimitForTesting(limit.getMsb(), limit.getLsb() - 1, limit.getMsb(), limit.getLsb());
    // now call three getNextCounters concurrently.. first one to execute should increment the counter to limit.
    // other two will result in refresh being called.
    CompletableFuture<Int96> future1 = zkStore.getNextCounter();
    CompletableFuture<Int96> future2 = zkStore.getNextCounter();
    CompletableFuture<Int96> future3 = zkStore.getNextCounter();
    List<Int96> values = Futures.allOfWithResults(Arrays.asList(future1, future2, future3)).join();
    // second and third should result in refresh being called. Verify method call count is 3, twice for now and
    // once for first time when counter is set
    verify(zkStore, times(3)).refreshRangeIfNeeded();
    verify(zkStore, times(2)).getRefreshFuture();
    assertTrue(values.stream().anyMatch(x -> x.compareTo(new Int96(limit.getMsb(), limit.getLsb())) == 0));
    assertTrue(values.stream().anyMatch(x -> x.compareTo(new Int96(0, limit.getLsb() + 1)) == 0));
    assertTrue(values.stream().anyMatch(x -> x.compareTo(new Int96(0, limit.getLsb() + 2)) == 0));
    // verify that counter and limits are increased
    Int96 newCounter = zkStore.getCounterForTesting();
    Int96 newLimit = zkStore.getLimitForTesting();
    assertEquals(ZkInt96Counter.COUNTER_RANGE * 2, newLimit.getLsb());
    assertEquals(0, newLimit.getMsb());
    assertEquals(ZkInt96Counter.COUNTER_RANGE + 2, newCounter.getLsb());
    assertEquals(0, newCounter.getMsb());
    // set range in store to have lsb = Long.Max - 100
    VersionedMetadata<Int96> data = new VersionedMetadata<>(new Int96(0, Long.MAX_VALUE - 100), null);
    doReturn(CompletableFuture.completedFuture(data)).when(storeHelper).getData(eq(ZkInt96Counter.COUNTER_PATH), any());
    // set local limit to {msb, Long.Max - 100}
    zkStore.setCounterAndLimitForTesting(0, Long.MAX_VALUE - 100, 0, Long.MAX_VALUE - 100);
    // now the call to getNextCounter should result in another refresh
    zkStore.getNextCounter().join();
    // verify that post refresh counter and limit have different msb
    Int96 newCounter2 = zkStore.getCounterForTesting();
    Int96 newLimit2 = zkStore.getLimitForTesting();
    assertEquals(1, newLimit2.getMsb());
    assertEquals(ZkInt96Counter.COUNTER_RANGE - 100, newLimit2.getLsb());
    assertEquals(0, newCounter2.getMsb());
    assertEquals(Long.MAX_VALUE - 99, newCounter2.getLsb());
}
Also used : ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) CuratorFrameworkFactory(org.apache.curator.framework.CuratorFrameworkFactory) Arrays(java.util.Arrays) ArgumentMatchers.eq(org.mockito.ArgumentMatchers.eq) CompletableFuture(java.util.concurrent.CompletableFuture) Mockito.spy(org.mockito.Mockito.spy) RetryOneTime(org.apache.curator.retry.RetryOneTime) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) TestingServerStarter(io.pravega.test.common.TestingServerStarter) After(org.junit.After) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) TestingServer(org.apache.curator.test.TestingServer) Mockito.doReturn(org.mockito.Mockito.doReturn) Before(org.junit.Before) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) Mockito.times(org.mockito.Mockito.times) Mockito.verify(org.mockito.Mockito.verify) ZKStoreHelper(io.pravega.controller.store.ZKStoreHelper) List(java.util.List) CuratorFramework(org.apache.curator.framework.CuratorFramework) Int96(io.pravega.common.lang.Int96) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) Futures(io.pravega.common.concurrent.Futures) Assert.assertEquals(org.junit.Assert.assertEquals) ZKStoreHelper(io.pravega.controller.store.ZKStoreHelper) Int96(io.pravega.common.lang.Int96) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) Test(org.junit.Test)

Aggregations

VersionedMetadata (io.pravega.controller.store.VersionedMetadata)45 Map (java.util.Map)32 Futures (io.pravega.common.concurrent.Futures)31 List (java.util.List)31 Collectors (java.util.stream.Collectors)31 ArrayList (java.util.ArrayList)30 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)29 EpochTransitionRecord (io.pravega.controller.store.stream.records.EpochTransitionRecord)29 StreamConfigurationRecord (io.pravega.controller.store.stream.records.StreamConfigurationRecord)29 UUID (java.util.UUID)28 CompletableFuture (java.util.concurrent.CompletableFuture)28 EpochRecord (io.pravega.controller.store.stream.records.EpochRecord)27 Exceptions (io.pravega.common.Exceptions)26 NameUtils (io.pravega.shared.NameUtils)26 StreamTruncationRecord (io.pravega.controller.store.stream.records.StreamTruncationRecord)25 HashMap (java.util.HashMap)25 ImmutableMap (com.google.common.collect.ImmutableMap)24 NameUtils.computeSegmentId (io.pravega.shared.NameUtils.computeSegmentId)24 Collections (java.util.Collections)24 Optional (java.util.Optional)24