Search in sources :

Example 81 with StreamConfiguration

use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.

the class TaskTest method setUp.

@Before
public void setUp() throws Exception {
    zkServer = new TestingServerStarter().start();
    zkServer.start();
    cli = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), new RetryOneTime(2000));
    cli.start();
    streamStore = getStream();
    taskMetadataStore = TaskStoreFactory.createZKStore(cli, executor);
    segmentHelperMock = SegmentHelperMock.getSegmentHelperMock();
    streamMetadataTasks = new StreamMetadataTasks(streamStore, StreamStoreFactory.createInMemoryBucketStore(), taskMetadataStore, segmentHelperMock, executor, HOSTNAME, GrpcAuthHelper.getDisabledAuthHelper());
    final String stream2 = "stream2";
    final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
    final ScalingPolicy policy2 = ScalingPolicy.fixed(3);
    final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
    final StreamConfiguration configuration2 = StreamConfiguration.builder().scalingPolicy(policy2).build();
    // region createStream
    streamStore.createScope(SCOPE, null, executor).join();
    long start = System.currentTimeMillis();
    streamStore.createStream(SCOPE, stream1, configuration1, start, null, executor).join();
    streamStore.setState(SCOPE, stream1, State.ACTIVE, null, executor).join();
    streamStore.createStream(SCOPE, stream2, configuration2, start, null, executor).join();
    streamStore.setState(SCOPE, stream2, State.ACTIVE, null, executor).join();
    // endregion
    // region scaleSegments
    AbstractMap.SimpleEntry<Double, Double> segment1 = new AbstractMap.SimpleEntry<>(0.5, 0.75);
    AbstractMap.SimpleEntry<Double, Double> segment2 = new AbstractMap.SimpleEntry<>(0.75, 1.0);
    List<Long> sealedSegments = Collections.singletonList(1L);
    VersionedMetadata<EpochTransitionRecord> versioned = streamStore.submitScale(SCOPE, stream1, sealedSegments, Arrays.asList(segment1, segment2), start + 20, null, null, executor).get();
    EpochTransitionRecord response = versioned.getObject();
    Map<Long, Map.Entry<Double, Double>> segmentsCreated = response.getNewSegmentsWithRange();
    VersionedMetadata<State> state = streamStore.getVersionedState(SCOPE, stream1, null, executor).join();
    state = streamStore.updateVersionedState(SCOPE, stream1, State.SCALING, state, null, executor).get();
    versioned = streamStore.startScale(SCOPE, stream1, false, versioned, state, null, executor).join();
    streamStore.scaleCreateNewEpochs(SCOPE, stream1, versioned, null, executor).get();
    streamStore.scaleSegmentsSealed(SCOPE, stream1, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, null, executor).get();
    streamStore.completeScale(SCOPE, stream1, versioned, null, executor).join();
    streamStore.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
    AbstractMap.SimpleEntry<Double, Double> segment3 = new AbstractMap.SimpleEntry<>(0.0, 0.5);
    AbstractMap.SimpleEntry<Double, Double> segment4 = new AbstractMap.SimpleEntry<>(0.5, 0.75);
    AbstractMap.SimpleEntry<Double, Double> segment5 = new AbstractMap.SimpleEntry<>(0.75, 1.0);
    List<Long> sealedSegments1 = Arrays.asList(0L, 1L, 2L);
    versioned = streamStore.submitScale(SCOPE, stream2, sealedSegments1, Arrays.asList(segment3, segment4, segment5), start + 20, null, null, executor).get();
    response = versioned.getObject();
    segmentsCreated = response.getNewSegmentsWithRange();
    state = streamStore.getVersionedState(SCOPE, stream2, null, executor).join();
    state = streamStore.updateVersionedState(SCOPE, stream2, State.SCALING, state, null, executor).get();
    versioned = streamStore.startScale(SCOPE, stream2, false, versioned, state, null, executor).join();
    streamStore.scaleCreateNewEpochs(SCOPE, stream2, versioned, null, executor).get();
    streamStore.scaleSegmentsSealed(SCOPE, stream2, sealedSegments1.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, null, executor).get();
    streamStore.completeScale(SCOPE, stream2, versioned, null, executor).join();
    streamStore.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
// endregion
}
Also used : Arrays(java.util.Arrays) AssertExtensions(io.pravega.test.common.AssertExtensions) Cleanup(lombok.Cleanup) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) VersionedMetadata(io.pravega.controller.store.VersionedMetadata) TaskMetadataStore(io.pravega.controller.store.task.TaskMetadataStore) Map(java.util.Map) After(org.junit.After) LockFailedException(io.pravega.controller.store.task.LockFailedException) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) CreateStreamStatus(io.pravega.controller.stream.api.grpc.v1.Controller.CreateStreamStatus) CompletionException(java.util.concurrent.CompletionException) UUID(java.util.UUID) EqualsAndHashCode(lombok.EqualsAndHashCode) Collectors(java.util.stream.Collectors) TaggedResource(io.pravega.controller.store.task.TaggedResource) Serializable(java.io.Serializable) List(java.util.List) Slf4j(lombok.extern.slf4j.Slf4j) CuratorFramework(org.apache.curator.framework.CuratorFramework) Assert.assertFalse(org.junit.Assert.assertFalse) Optional(java.util.Optional) Resource(io.pravega.controller.store.task.Resource) StreamMetadataStore(io.pravega.controller.store.stream.StreamMetadataStore) GrpcAuthHelper(io.pravega.controller.server.security.auth.GrpcAuthHelper) CuratorFrameworkFactory(org.apache.curator.framework.CuratorFrameworkFactory) StreamStoreFactory(io.pravega.controller.store.stream.StreamStoreFactory) SegmentHelper(io.pravega.controller.server.SegmentHelper) CompletableFuture(java.util.concurrent.CompletableFuture) ArrayList(java.util.ArrayList) RetryOneTime(org.apache.curator.retry.RetryOneTime) TestingServerStarter(io.pravega.test.common.TestingServerStarter) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) TestingServer(org.apache.curator.test.TestingServer) StreamMetadataTasks(io.pravega.controller.task.Stream.StreamMetadataTasks) HostMonitorConfigImpl(io.pravega.controller.store.host.impl.HostMonitorConfigImpl) Before(org.junit.Before) SegmentHelperMock(io.pravega.controller.mocks.SegmentHelperMock) TestTasks(io.pravega.controller.task.Stream.TestTasks) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) HostStoreFactory(io.pravega.controller.store.host.HostStoreFactory) ExecutionException(java.util.concurrent.ExecutionException) AbstractMap(java.util.AbstractMap) TaskStoreFactory(io.pravega.controller.store.task.TaskStoreFactory) HostControllerStore(io.pravega.controller.store.host.HostControllerStore) Data(lombok.Data) State(io.pravega.controller.store.stream.State) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) Assert(org.junit.Assert) Collections(java.util.Collections) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) RetryOneTime(org.apache.curator.retry.RetryOneTime) TestingServerStarter(io.pravega.test.common.TestingServerStarter) EpochTransitionRecord(io.pravega.controller.store.stream.records.EpochTransitionRecord) AbstractMap(java.util.AbstractMap) State(io.pravega.controller.store.stream.State) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) StreamMetadataTasks(io.pravega.controller.task.Stream.StreamMetadataTasks) Before(org.junit.Before)

Example 82 with StreamConfiguration

use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.

the class TaskTest method parallelTaskSweeperTest.

@Test(timeout = 30000)
public void parallelTaskSweeperTest() throws InterruptedException, ExecutionException {
    final String deadHost = "deadHost";
    final String deadThreadId1 = UUID.randomUUID().toString();
    final String deadThreadId2 = UUID.randomUUID().toString();
    final String scope = SCOPE;
    final String stream1 = "parallelSweeper1";
    final String stream2 = "parallelSweeper2";
    final StreamConfiguration config1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
    final StreamConfiguration config2 = StreamConfiguration.builder().scalingPolicy(policy1).build();
    final Resource resource1 = new Resource(scope, stream1);
    final long timestamp1 = System.currentTimeMillis();
    final TaskData taskData1 = new TaskData("createStream", "1.0", new Serializable[] { scope, stream1, config1, timestamp1, 0L });
    final Resource resource2 = new Resource(scope, stream2);
    final long timestamp2 = System.currentTimeMillis();
    final TaskData taskData2 = new TaskData("createStream", "1.0", new Serializable[] { scope, stream2, config2, timestamp2, 0L });
    for (int i = 0; i < 5; i++) {
        final TaggedResource taggedResource = new TaggedResource(UUID.randomUUID().toString(), resource1);
        taskMetadataStore.putChild(deadHost, taggedResource).join();
    }
    final TaggedResource taggedResource1 = new TaggedResource(deadThreadId1, resource1);
    taskMetadataStore.putChild(deadHost, taggedResource1).join();
    final TaggedResource taggedResource2 = new TaggedResource(deadThreadId2, resource2);
    taskMetadataStore.putChild(deadHost, taggedResource2).join();
    taskMetadataStore.lock(resource1, taskData1, deadHost, deadThreadId1, null, null).join();
    taskMetadataStore.lock(resource2, taskData2, deadHost, deadThreadId2, null, null).join();
    final SweeperThread sweeperThread1 = new SweeperThread(HOSTNAME, executor, taskMetadataStore, streamMetadataTasks, deadHost);
    final SweeperThread sweeperThread2 = new SweeperThread(HOSTNAME, executor, taskMetadataStore, streamMetadataTasks, deadHost);
    sweeperThread1.start();
    sweeperThread2.start();
    sweeperThread1.getResult().join();
    sweeperThread2.getResult().join();
    Optional<TaskData> data = taskMetadataStore.getTask(resource1, deadHost, deadThreadId1).get();
    assertFalse(data.isPresent());
    data = taskMetadataStore.getTask(resource2, deadHost, deadThreadId2).get();
    assertFalse(data.isPresent());
    Optional<TaggedResource> child = taskMetadataStore.getRandomChild(deadHost).get();
    assertFalse(child.isPresent());
    // ensure that the stream streamSweeper is created
    StreamConfiguration config = streamStore.getConfiguration(SCOPE, stream1, null, executor).get();
    assertEquals(config1, config);
    config = streamStore.getConfiguration(SCOPE, stream2, null, executor).get();
    assertEquals(config2, config);
}
Also used : StreamConfiguration(io.pravega.client.stream.StreamConfiguration) TaggedResource(io.pravega.controller.store.task.TaggedResource) Resource(io.pravega.controller.store.task.Resource) TaggedResource(io.pravega.controller.store.task.TaggedResource) Test(org.junit.Test)

Example 83 with StreamConfiguration

use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.

the class ClientAdapterBase method createStream.

// endregion
// region Stream Operations
@Override
public CompletableFuture<Void> createStream(String streamName, Duration timeout) {
    ensureRunning();
    return CompletableFuture.runAsync(() -> {
        if (this.streamWriters.containsKey(streamName)) {
            throw new CompletionException(new StreamSegmentExistsException(streamName));
        }
        StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(this.testConfig.getSegmentsPerStream())).build();
        if (!getStreamManager().createStream(SCOPE, streamName, config)) {
            throw new CompletionException(new StreamingException(String.format("Unable to create Stream '%s'.", streamName)));
        }
        List<EventStreamWriter<byte[]>> writers = new ArrayList<>(this.writersPerStream);
        if (this.streamWriters.putIfAbsent(streamName, writers) == null) {
            for (int i = 0; i < this.writersPerStream; i++) {
                writers.add(getClientFactory().createEventWriter(streamName, SERIALIZER, WRITER_CONFIG));
            }
        }
        List<TransactionalEventStreamWriter<byte[]>> txnWriters = new ArrayList<>(this.writersPerStream);
        if (this.transactionalWriters.putIfAbsent(streamName, txnWriters) == null) {
            for (int i = 0; i < this.writersPerStream; i++) {
                txnWriters.add(getClientFactory().createTransactionalEventWriter("writer", streamName, SERIALIZER, WRITER_CONFIG));
            }
        }
    }, this.testExecutor);
}
Also used : StreamingException(io.pravega.segmentstore.contracts.StreamingException) StreamSegmentExistsException(io.pravega.segmentstore.contracts.StreamSegmentExistsException) CompletionException(java.util.concurrent.CompletionException) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) ArrayList(java.util.ArrayList) TransactionalEventStreamWriter(io.pravega.client.stream.TransactionalEventStreamWriter) EventStreamWriter(io.pravega.client.stream.EventStreamWriter) TransactionalEventStreamWriter(io.pravega.client.stream.TransactionalEventStreamWriter)

Example 84 with StreamConfiguration

use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.

the class ScaleTest method main.

public static void main(String[] args) throws Exception {
    try {
        @Cleanup("shutdownNow") val executor = ExecutorServiceHelpers.newScheduledThreadPool(1, "test");
        @Cleanup TestingServer zkTestServer = new TestingServerStarter().start();
        ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
        serviceBuilder.initialize();
        StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
        TableStore tableStore = serviceBuilder.createTableStoreService();
        int port = Config.SERVICE_PORT;
        @Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, port, store, tableStore, serviceBuilder.getLowPriorityExecutor());
        server.startListening();
        // Create controller object for testing against a separate controller report.
        @Cleanup ControllerWrapper controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), port);
        Controller controller = controllerWrapper.getController();
        final String scope = "scope";
        controllerWrapper.getControllerService().createScope(scope, 0L).get();
        final String streamName = "stream1";
        final StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
        Stream stream = new StreamImpl(scope, streamName);
        log.info("Creating stream {}/{}", scope, streamName);
        if (!controller.createStream(scope, streamName, config).get()) {
            log.error("Stream already existed, exiting");
            return;
        }
        // Test 1: scale stream: split one segment into two
        log.info("Scaling stream {}/{}, splitting one segment into two", scope, streamName);
        Map<Double, Double> map = new HashMap<>();
        map.put(0.0, 0.5);
        map.put(0.5, 1.0);
        if (!controller.scaleStream(stream, Collections.singletonList(0L), map, executor).getFuture().get()) {
            log.error("Scale stream: splitting segment into two failed, exiting");
            return;
        }
        // Test 2: scale stream: merge two segments into one
        log.info("Scaling stream {}/{}, merging two segments into one", scope, streamName);
        CompletableFuture<Boolean> scaleResponseFuture = controller.scaleStream(stream, Arrays.asList(1L, 2L), Collections.singletonMap(0.0, 1.0), executor).getFuture();
        if (!scaleResponseFuture.get()) {
            log.error("Scale stream: merging two segments into one failed, exiting");
            return;
        }
        // Test 3: create a transaction, and try scale operation, it should fail with precondition check failure
        CompletableFuture<TxnSegments> txnFuture = controller.createTransaction(stream, 5000);
        TxnSegments transaction = txnFuture.get();
        if (transaction == null) {
            log.error("Create transaction failed, exiting");
            return;
        }
        log.info("Scaling stream {}/{}, splitting one segment into two, while transaction is ongoing", scope, streamName);
        scaleResponseFuture = controller.scaleStream(stream, Collections.singletonList(3L), map, executor).getFuture();
        CompletableFuture<Boolean> future = scaleResponseFuture.whenComplete((r, e) -> {
            if (e != null) {
                log.error("Failed: scale with ongoing transaction.", e);
            } else if (getAndHandleExceptions(controller.checkTransactionStatus(stream, transaction.getTxnId()), RuntimeException::new) != Transaction.Status.OPEN) {
                log.info("Success: scale with ongoing transaction.");
            } else {
                log.error("Failed: scale with ongoing transaction.");
            }
        });
        CompletableFuture<Void> statusFuture = controller.abortTransaction(stream, transaction.getTxnId());
        statusFuture.get();
        future.get();
        log.info("All scaling test PASSED");
        ExecutorServiceHelpers.shutdown(executor);
        System.exit(0);
    } catch (Throwable t) {
        log.error("test failed with {}", t);
        System.exit(-1);
    }
}
Also used : HashMap(java.util.HashMap) Cleanup(lombok.Cleanup) PravegaConnectionListener(io.pravega.segmentstore.server.host.handler.PravegaConnectionListener) ServiceBuilder(io.pravega.segmentstore.server.store.ServiceBuilder) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) Stream(io.pravega.client.stream.Stream) lombok.val(lombok.val) TestingServer(org.apache.curator.test.TestingServer) TxnSegments(io.pravega.client.stream.impl.TxnSegments) TestingServerStarter(io.pravega.test.common.TestingServerStarter) Controller(io.pravega.client.control.impl.Controller) TableStore(io.pravega.segmentstore.contracts.tables.TableStore) StreamSegmentStore(io.pravega.segmentstore.contracts.StreamSegmentStore) StreamImpl(io.pravega.client.stream.impl.StreamImpl)

Example 85 with StreamConfiguration

use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.

the class StreamsAndScopesManagementTest method testCreateSealAndDeleteStreams.

private void testCreateSealAndDeleteStreams(String scope) {
    final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
    for (int j = 1; j <= NUM_STREAMS; j++) {
        final String stream = String.valueOf(j);
        StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(j)).build();
        // Create Stream with nonexistent scope, which should not be successful.
        log.info("Creating a stream in a deliberately nonexistent scope nonexistentScope/{}.", stream);
        assertThrows(RuntimeException.class, () -> streamManager.createStream("nonexistentScope", stream, StreamConfiguration.builder().build()));
        long iniTime = System.nanoTime();
        log.info("Creating stream {}/{}.", scope, stream);
        assertTrue("Creating stream", streamManager.createStream(scope, stream, config));
        controllerPerfStats.get("createStreamMs").add(timeDiffInMs(iniTime));
        // Update the configuration of the stream by doubling the number of segments.
        config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(j * 2)).build();
        iniTime = System.nanoTime();
        assertTrue(streamManager.updateStream(scope, stream, config));
        controllerPerfStats.get("updateStreamMs").add(timeDiffInMs(iniTime));
        // Perform tests on empty and non-empty streams.
        if (j % 2 == 0) {
            log.info("Writing events in stream {}/{}.", scope, stream);
            @Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, clientConfig);
            writeEvents(clientFactory, stream, NUM_EVENTS);
        }
        // Update the configuration of the stream.
        config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(j * 2)).build();
        assertTrue(streamManager.updateStream(scope, stream, config));
        // Attempting to delete non-empty scope and non-sealed stream.
        assertThrows(RuntimeException.class, () -> streamManager.deleteScope(scope));
        assertThrows(RuntimeException.class, () -> streamManager.deleteStream(scope, stream));
        // Seal and delete stream.
        log.info("Attempting to seal and delete stream {}/{}.", scope, stream);
        iniTime = System.nanoTime();
        assertTrue(streamManager.sealStream(scope, stream));
        controllerPerfStats.get("sealStreamMs").add(timeDiffInMs(iniTime));
        iniTime = System.nanoTime();
        assertTrue(streamManager.deleteStream(scope, stream));
        controllerPerfStats.get("deleteStreamMs").add(timeDiffInMs(iniTime));
        // Seal and delete already sealed/deleted streams.
        log.info("Sealing and deleting an already deleted stream {}/{}.", scope, stream);
        assertThrows(RuntimeException.class, () -> streamManager.sealStream(scope, stream));
        assertFalse(streamManager.deleteStream(scope, stream));
    }
}
Also used : StreamConfiguration(io.pravega.client.stream.StreamConfiguration) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) ClientConfig(io.pravega.client.ClientConfig) Cleanup(lombok.Cleanup)

Aggregations

StreamConfiguration (io.pravega.client.stream.StreamConfiguration)251 Test (org.junit.Test)207 ScalingPolicy (io.pravega.client.stream.ScalingPolicy)112 Cleanup (lombok.Cleanup)86 HashMap (java.util.HashMap)64 Stream (io.pravega.client.stream.Stream)63 ArrayList (java.util.ArrayList)60 CompletableFuture (java.util.concurrent.CompletableFuture)60 SocketConnectionFactoryImpl (io.pravega.client.connection.impl.SocketConnectionFactoryImpl)54 List (java.util.List)52 ReaderGroupManager (io.pravega.client.admin.ReaderGroupManager)51 ConnectionFactory (io.pravega.client.connection.impl.ConnectionFactory)49 UUID (java.util.UUID)48 ClientFactoryImpl (io.pravega.client.stream.impl.ClientFactoryImpl)47 Map (java.util.Map)46 ArgumentMatchers.anyString (org.mockito.ArgumentMatchers.anyString)45 ReaderGroupConfig (io.pravega.client.stream.ReaderGroupConfig)44 ClientConfig (io.pravega.client.ClientConfig)42 ReaderGroupManagerImpl (io.pravega.client.admin.impl.ReaderGroupManagerImpl)41 Controller (io.pravega.controller.stream.api.grpc.v1.Controller)40