Search in sources :

Example 6 with Data

use of io.pravega.controller.store.stream.tables.Data in project pravega by pravega.

the class StreamTest method testConcurrentGetSuccessorScale.

@Test(timeout = 10000)
public void testConcurrentGetSuccessorScale() throws Exception {
    final ScalingPolicy policy = ScalingPolicy.fixed(1);
    final StreamMetadataStore store = new ZKStreamMetadataStore(cli, executor);
    final String streamName = "test";
    String scopeName = "test";
    store.createScope(scopeName).get();
    ZKStoreHelper zkStoreHelper = new ZKStoreHelper(cli, executor);
    StreamConfiguration streamConfig = StreamConfiguration.builder().scope(streamName).streamName(streamName).scalingPolicy(policy).build();
    store.createStream(scopeName, streamName, streamConfig, System.currentTimeMillis(), null, executor).get();
    store.setState(scopeName, streamName, State.ACTIVE, null, executor).get();
    ZKStream zkStream = spy(new ZKStream("test", "test", zkStoreHelper));
    List<AbstractMap.SimpleEntry<Double, Double>> newRanges;
    newRanges = Arrays.asList(new AbstractMap.SimpleEntry<>(0.0, 0.5), new AbstractMap.SimpleEntry<>(0.5, 1.0));
    long scale = System.currentTimeMillis();
    ArrayList<Integer> sealedSegments = Lists.newArrayList(0);
    StartScaleResponse response = zkStream.startScale(sealedSegments, newRanges, scale, false).join();
    List<Segment> newSegments = response.getSegmentsCreated();
    zkStream.updateState(State.SCALING).join();
    List<Integer> newSegmentInt = newSegments.stream().map(Segment::getNumber).collect(Collectors.toList());
    zkStream.scaleNewSegmentsCreated(sealedSegments, newSegmentInt, response.getActiveEpoch(), scale).get();
    // history table has a partial record at this point.
    // now we could have sealed the segments so get successors could be called.
    final CompletableFuture<Data<Integer>> segmentTable = zkStream.getSegmentTable();
    final CompletableFuture<Data<Integer>> historyTable = zkStream.getHistoryTable();
    AtomicBoolean historyCalled = new AtomicBoolean(false);
    AtomicBoolean segmentCalled = new AtomicBoolean(false);
    // mock.. If segment table is fetched before history table, throw runtime exception so that the test fails
    doAnswer((Answer<CompletableFuture<Data<Integer>>>) invocation -> {
        if (!historyCalled.get() && segmentCalled.get()) {
            throw new RuntimeException();
        }
        historyCalled.set(true);
        return historyTable;
    }).when(zkStream).getHistoryTable();
    doAnswer((Answer<CompletableFuture<Data<Integer>>>) invocation -> {
        if (!historyCalled.get()) {
            throw new RuntimeException();
        }
        segmentCalled.set(true);
        return segmentTable;
    }).when(zkStream).getSegmentTable();
    Map<Integer, List<Integer>> successors = zkStream.getSuccessorsWithPredecessors(0).get();
    assertTrue(successors.containsKey(1) && successors.containsKey(2));
    // reset mock so that we can resume scale operation
    doAnswer((Answer<CompletableFuture<Data<Integer>>>) invocation -> historyTable).when(zkStream).getHistoryTable();
    doAnswer((Answer<CompletableFuture<Data<Integer>>>) invocation -> segmentTable).when(zkStream).getSegmentTable();
    zkStream.scaleOldSegmentsSealed(sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), newSegmentInt, response.getActiveEpoch(), scale).get();
    // scale is completed, history table also has completed record now.
    final CompletableFuture<Data<Integer>> segmentTable2 = zkStream.getSegmentTable();
    final CompletableFuture<Data<Integer>> historyTable2 = zkStream.getHistoryTable();
    // mock such that if segment table is fetched before history table, throw runtime exception so that the test fails
    segmentCalled.set(false);
    historyCalled.set(false);
    doAnswer((Answer<CompletableFuture<Data<Integer>>>) invocation -> {
        if (!historyCalled.get() && segmentCalled.get()) {
            throw new RuntimeException();
        }
        historyCalled.set(true);
        return historyTable2;
    }).when(zkStream).getHistoryTable();
    doAnswer((Answer<CompletableFuture<Data<Integer>>>) invocation -> {
        if (!historyCalled.get()) {
            throw new RuntimeException();
        }
        segmentCalled.set(true);
        return segmentTable2;
    }).when(zkStream).getSegmentTable();
    successors = zkStream.getSuccessorsWithPredecessors(0).get();
    assertTrue(successors.containsKey(1) && successors.containsKey(2));
}
Also used : CuratorFrameworkFactory(org.apache.curator.framework.CuratorFrameworkFactory) Arrays(java.util.Arrays) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CompletableFuture(java.util.concurrent.CompletableFuture) Mockito.spy(org.mockito.Mockito.spy) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) ArrayList(java.util.ArrayList) RetryOneTime(org.apache.curator.retry.RetryOneTime) Answer(org.mockito.stubbing.Answer) Lists(com.google.common.collect.Lists) TestingServerStarter(io.pravega.test.common.TestingServerStarter) Data(io.pravega.controller.store.stream.tables.Data) After(org.junit.After) Map(java.util.Map) Mockito.doAnswer(org.mockito.Mockito.doAnswer) TestingServer(org.apache.curator.test.TestingServer) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) Before(org.junit.Before) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) State(io.pravega.controller.store.stream.tables.State) Collectors(java.util.stream.Collectors) Executors(java.util.concurrent.Executors) ExecutionException(java.util.concurrent.ExecutionException) AbstractMap(java.util.AbstractMap) List(java.util.List) CuratorFramework(org.apache.curator.framework.CuratorFramework) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Data(io.pravega.controller.store.stream.tables.Data) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CompletableFuture(java.util.concurrent.CompletableFuture) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) ArrayList(java.util.ArrayList) List(java.util.List) Test(org.junit.Test)

Example 7 with Data

use of io.pravega.controller.store.stream.tables.Data in project pravega by pravega.

the class ZKStreamMetadataStore method addUpdateStreamForAutoStreamCut.

@Override
public CompletableFuture<Void> addUpdateStreamForAutoStreamCut(final String scope, final String stream, final RetentionPolicy retentionPolicy, final OperationContext context, final Executor executor) {
    Preconditions.checkNotNull(retentionPolicy);
    int bucket = getBucket(scope, stream);
    String retentionPath = String.format(ZKStoreHelper.RETENTION_PATH, bucket, encodedScopedStreamName(scope, stream));
    byte[] serialize = SerializationUtils.serialize(retentionPolicy);
    return storeHelper.getData(retentionPath).exceptionally(e -> {
        if (e instanceof StoreException.DataNotFoundException) {
            return null;
        } else {
            throw new CompletionException(e);
        }
    }).thenCompose(data -> {
        if (data == null) {
            return storeHelper.createZNodeIfNotExist(retentionPath, serialize);
        } else {
            return storeHelper.setData(retentionPath, new Data<>(serialize, data.getVersion()));
        }
    });
}
Also used : SneakyThrows(lombok.SneakyThrows) StreamImpl(io.pravega.client.stream.impl.StreamImpl) RetentionPolicy(io.pravega.client.stream.RetentionPolicy) SerializationUtils(org.apache.commons.lang3.SerializationUtils) CompletableFuture(java.util.concurrent.CompletableFuture) StreamNotification(io.pravega.controller.server.retention.BucketChangeListener.StreamNotification) PathChildrenCacheListener(org.apache.curator.framework.recipes.cache.PathChildrenCacheListener) AtomicReference(java.util.concurrent.atomic.AtomicReference) ConcurrentMap(java.util.concurrent.ConcurrentMap) BucketOwnershipListener(io.pravega.controller.server.retention.BucketOwnershipListener) ZKPaths(org.apache.curator.utils.ZKPaths) BucketNotification(io.pravega.controller.server.retention.BucketOwnershipListener.BucketNotification) Data(io.pravega.controller.store.stream.tables.Data) NotificationType(io.pravega.controller.server.retention.BucketChangeListener.StreamNotification.NotificationType) BucketChangeListener(io.pravega.controller.server.retention.BucketChangeListener) Executor(java.util.concurrent.Executor) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) IOException(java.io.IOException) CompletionException(java.util.concurrent.CompletionException) Collectors(java.util.stream.Collectors) Slf4j(lombok.extern.slf4j.Slf4j) Base64(java.util.Base64) List(java.util.List) CuratorFramework(org.apache.curator.framework.CuratorFramework) Config(io.pravega.controller.util.Config) PathChildrenCache(org.apache.curator.framework.recipes.cache.PathChildrenCache) Preconditions(com.google.common.base.Preconditions) ZKHostIndex(io.pravega.controller.store.index.ZKHostIndex) CompletionException(java.util.concurrent.CompletionException)

Aggregations

Data (io.pravega.controller.store.stream.tables.Data)7 CompletableFuture (java.util.concurrent.CompletableFuture)6 ActiveTxnRecord (io.pravega.controller.store.stream.tables.ActiveTxnRecord)5 List (java.util.List)5 Collectors (java.util.stream.Collectors)5 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)4 State (io.pravega.controller.store.stream.tables.State)4 Map (java.util.Map)4 UUID (java.util.UUID)4 SerializationUtils (org.apache.commons.lang3.SerializationUtils)4 Preconditions (com.google.common.base.Preconditions)3 Lists (com.google.common.collect.Lists)3 Exceptions (io.pravega.common.Exceptions)3 Futures (io.pravega.common.concurrent.Futures)3 BitConverter (io.pravega.common.util.BitConverter)3 CompletedTxnRecord (io.pravega.controller.store.stream.tables.CompletedTxnRecord)3 StreamTruncationRecord (io.pravega.controller.store.stream.tables.StreamTruncationRecord)3 TableHelper (io.pravega.controller.store.stream.tables.TableHelper)3 AbstractMap (java.util.AbstractMap)3 ArrayList (java.util.ArrayList)3