use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.
the class TaskTest method setUp.
@Before
public void setUp() throws Exception {
zkServer = new TestingServerStarter().start();
zkServer.start();
cli = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), new RetryOneTime(2000));
cli.start();
streamStore = getStream();
taskMetadataStore = TaskStoreFactory.createZKStore(cli, executor);
segmentHelperMock = SegmentHelperMock.getSegmentHelperMock();
streamMetadataTasks = new StreamMetadataTasks(streamStore, StreamStoreFactory.createInMemoryBucketStore(), taskMetadataStore, segmentHelperMock, executor, HOSTNAME, GrpcAuthHelper.getDisabledAuthHelper());
final String stream2 = "stream2";
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final ScalingPolicy policy2 = ScalingPolicy.fixed(3);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
final StreamConfiguration configuration2 = StreamConfiguration.builder().scalingPolicy(policy2).build();
// region createStream
streamStore.createScope(SCOPE, null, executor).join();
long start = System.currentTimeMillis();
streamStore.createStream(SCOPE, stream1, configuration1, start, null, executor).join();
streamStore.setState(SCOPE, stream1, State.ACTIVE, null, executor).join();
streamStore.createStream(SCOPE, stream2, configuration2, start, null, executor).join();
streamStore.setState(SCOPE, stream2, State.ACTIVE, null, executor).join();
// endregion
// region scaleSegments
AbstractMap.SimpleEntry<Double, Double> segment1 = new AbstractMap.SimpleEntry<>(0.5, 0.75);
AbstractMap.SimpleEntry<Double, Double> segment2 = new AbstractMap.SimpleEntry<>(0.75, 1.0);
List<Long> sealedSegments = Collections.singletonList(1L);
VersionedMetadata<EpochTransitionRecord> versioned = streamStore.submitScale(SCOPE, stream1, sealedSegments, Arrays.asList(segment1, segment2), start + 20, null, null, executor).get();
EpochTransitionRecord response = versioned.getObject();
Map<Long, Map.Entry<Double, Double>> segmentsCreated = response.getNewSegmentsWithRange();
VersionedMetadata<State> state = streamStore.getVersionedState(SCOPE, stream1, null, executor).join();
state = streamStore.updateVersionedState(SCOPE, stream1, State.SCALING, state, null, executor).get();
versioned = streamStore.startScale(SCOPE, stream1, false, versioned, state, null, executor).join();
streamStore.scaleCreateNewEpochs(SCOPE, stream1, versioned, null, executor).get();
streamStore.scaleSegmentsSealed(SCOPE, stream1, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, null, executor).get();
streamStore.completeScale(SCOPE, stream1, versioned, null, executor).join();
streamStore.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
AbstractMap.SimpleEntry<Double, Double> segment3 = new AbstractMap.SimpleEntry<>(0.0, 0.5);
AbstractMap.SimpleEntry<Double, Double> segment4 = new AbstractMap.SimpleEntry<>(0.5, 0.75);
AbstractMap.SimpleEntry<Double, Double> segment5 = new AbstractMap.SimpleEntry<>(0.75, 1.0);
List<Long> sealedSegments1 = Arrays.asList(0L, 1L, 2L);
versioned = streamStore.submitScale(SCOPE, stream2, sealedSegments1, Arrays.asList(segment3, segment4, segment5), start + 20, null, null, executor).get();
response = versioned.getObject();
segmentsCreated = response.getNewSegmentsWithRange();
state = streamStore.getVersionedState(SCOPE, stream2, null, executor).join();
state = streamStore.updateVersionedState(SCOPE, stream2, State.SCALING, state, null, executor).get();
versioned = streamStore.startScale(SCOPE, stream2, false, versioned, state, null, executor).join();
streamStore.scaleCreateNewEpochs(SCOPE, stream2, versioned, null, executor).get();
streamStore.scaleSegmentsSealed(SCOPE, stream2, sealedSegments1.stream().collect(Collectors.toMap(x -> x, x -> 0L)), versioned, null, executor).get();
streamStore.completeScale(SCOPE, stream2, versioned, null, executor).join();
streamStore.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
// endregion
}
use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.
the class TaskTest method parallelTaskSweeperTest.
@Test(timeout = 30000)
public void parallelTaskSweeperTest() throws InterruptedException, ExecutionException {
final String deadHost = "deadHost";
final String deadThreadId1 = UUID.randomUUID().toString();
final String deadThreadId2 = UUID.randomUUID().toString();
final String scope = SCOPE;
final String stream1 = "parallelSweeper1";
final String stream2 = "parallelSweeper2";
final StreamConfiguration config1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
final StreamConfiguration config2 = StreamConfiguration.builder().scalingPolicy(policy1).build();
final Resource resource1 = new Resource(scope, stream1);
final long timestamp1 = System.currentTimeMillis();
final TaskData taskData1 = new TaskData("createStream", "1.0", new Serializable[] { scope, stream1, config1, timestamp1, 0L });
final Resource resource2 = new Resource(scope, stream2);
final long timestamp2 = System.currentTimeMillis();
final TaskData taskData2 = new TaskData("createStream", "1.0", new Serializable[] { scope, stream2, config2, timestamp2, 0L });
for (int i = 0; i < 5; i++) {
final TaggedResource taggedResource = new TaggedResource(UUID.randomUUID().toString(), resource1);
taskMetadataStore.putChild(deadHost, taggedResource).join();
}
final TaggedResource taggedResource1 = new TaggedResource(deadThreadId1, resource1);
taskMetadataStore.putChild(deadHost, taggedResource1).join();
final TaggedResource taggedResource2 = new TaggedResource(deadThreadId2, resource2);
taskMetadataStore.putChild(deadHost, taggedResource2).join();
taskMetadataStore.lock(resource1, taskData1, deadHost, deadThreadId1, null, null).join();
taskMetadataStore.lock(resource2, taskData2, deadHost, deadThreadId2, null, null).join();
final SweeperThread sweeperThread1 = new SweeperThread(HOSTNAME, executor, taskMetadataStore, streamMetadataTasks, deadHost);
final SweeperThread sweeperThread2 = new SweeperThread(HOSTNAME, executor, taskMetadataStore, streamMetadataTasks, deadHost);
sweeperThread1.start();
sweeperThread2.start();
sweeperThread1.getResult().join();
sweeperThread2.getResult().join();
Optional<TaskData> data = taskMetadataStore.getTask(resource1, deadHost, deadThreadId1).get();
assertFalse(data.isPresent());
data = taskMetadataStore.getTask(resource2, deadHost, deadThreadId2).get();
assertFalse(data.isPresent());
Optional<TaggedResource> child = taskMetadataStore.getRandomChild(deadHost).get();
assertFalse(child.isPresent());
// ensure that the stream streamSweeper is created
StreamConfiguration config = streamStore.getConfiguration(SCOPE, stream1, null, executor).get();
assertEquals(config1, config);
config = streamStore.getConfiguration(SCOPE, stream2, null, executor).get();
assertEquals(config2, config);
}
use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.
the class ClientAdapterBase method createStream.
// endregion
// region Stream Operations
@Override
public CompletableFuture<Void> createStream(String streamName, Duration timeout) {
ensureRunning();
return CompletableFuture.runAsync(() -> {
if (this.streamWriters.containsKey(streamName)) {
throw new CompletionException(new StreamSegmentExistsException(streamName));
}
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(this.testConfig.getSegmentsPerStream())).build();
if (!getStreamManager().createStream(SCOPE, streamName, config)) {
throw new CompletionException(new StreamingException(String.format("Unable to create Stream '%s'.", streamName)));
}
List<EventStreamWriter<byte[]>> writers = new ArrayList<>(this.writersPerStream);
if (this.streamWriters.putIfAbsent(streamName, writers) == null) {
for (int i = 0; i < this.writersPerStream; i++) {
writers.add(getClientFactory().createEventWriter(streamName, SERIALIZER, WRITER_CONFIG));
}
}
List<TransactionalEventStreamWriter<byte[]>> txnWriters = new ArrayList<>(this.writersPerStream);
if (this.transactionalWriters.putIfAbsent(streamName, txnWriters) == null) {
for (int i = 0; i < this.writersPerStream; i++) {
txnWriters.add(getClientFactory().createTransactionalEventWriter("writer", streamName, SERIALIZER, WRITER_CONFIG));
}
}
}, this.testExecutor);
}
use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.
the class ScaleTest method main.
public static void main(String[] args) throws Exception {
try {
@Cleanup("shutdownNow") val executor = ExecutorServiceHelpers.newScheduledThreadPool(1, "test");
@Cleanup TestingServer zkTestServer = new TestingServerStarter().start();
ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
TableStore tableStore = serviceBuilder.createTableStoreService();
int port = Config.SERVICE_PORT;
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, port, store, tableStore, serviceBuilder.getLowPriorityExecutor());
server.startListening();
// Create controller object for testing against a separate controller report.
@Cleanup ControllerWrapper controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), port);
Controller controller = controllerWrapper.getController();
final String scope = "scope";
controllerWrapper.getControllerService().createScope(scope, 0L).get();
final String streamName = "stream1";
final StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
Stream stream = new StreamImpl(scope, streamName);
log.info("Creating stream {}/{}", scope, streamName);
if (!controller.createStream(scope, streamName, config).get()) {
log.error("Stream already existed, exiting");
return;
}
// Test 1: scale stream: split one segment into two
log.info("Scaling stream {}/{}, splitting one segment into two", scope, streamName);
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.5);
map.put(0.5, 1.0);
if (!controller.scaleStream(stream, Collections.singletonList(0L), map, executor).getFuture().get()) {
log.error("Scale stream: splitting segment into two failed, exiting");
return;
}
// Test 2: scale stream: merge two segments into one
log.info("Scaling stream {}/{}, merging two segments into one", scope, streamName);
CompletableFuture<Boolean> scaleResponseFuture = controller.scaleStream(stream, Arrays.asList(1L, 2L), Collections.singletonMap(0.0, 1.0), executor).getFuture();
if (!scaleResponseFuture.get()) {
log.error("Scale stream: merging two segments into one failed, exiting");
return;
}
// Test 3: create a transaction, and try scale operation, it should fail with precondition check failure
CompletableFuture<TxnSegments> txnFuture = controller.createTransaction(stream, 5000);
TxnSegments transaction = txnFuture.get();
if (transaction == null) {
log.error("Create transaction failed, exiting");
return;
}
log.info("Scaling stream {}/{}, splitting one segment into two, while transaction is ongoing", scope, streamName);
scaleResponseFuture = controller.scaleStream(stream, Collections.singletonList(3L), map, executor).getFuture();
CompletableFuture<Boolean> future = scaleResponseFuture.whenComplete((r, e) -> {
if (e != null) {
log.error("Failed: scale with ongoing transaction.", e);
} else if (getAndHandleExceptions(controller.checkTransactionStatus(stream, transaction.getTxnId()), RuntimeException::new) != Transaction.Status.OPEN) {
log.info("Success: scale with ongoing transaction.");
} else {
log.error("Failed: scale with ongoing transaction.");
}
});
CompletableFuture<Void> statusFuture = controller.abortTransaction(stream, transaction.getTxnId());
statusFuture.get();
future.get();
log.info("All scaling test PASSED");
ExecutorServiceHelpers.shutdown(executor);
System.exit(0);
} catch (Throwable t) {
log.error("test failed with {}", t);
System.exit(-1);
}
}
use of io.pravega.client.stream.StreamConfiguration in project pravega by pravega.
the class StreamsAndScopesManagementTest method testCreateSealAndDeleteStreams.
private void testCreateSealAndDeleteStreams(String scope) {
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
for (int j = 1; j <= NUM_STREAMS; j++) {
final String stream = String.valueOf(j);
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(j)).build();
// Create Stream with nonexistent scope, which should not be successful.
log.info("Creating a stream in a deliberately nonexistent scope nonexistentScope/{}.", stream);
assertThrows(RuntimeException.class, () -> streamManager.createStream("nonexistentScope", stream, StreamConfiguration.builder().build()));
long iniTime = System.nanoTime();
log.info("Creating stream {}/{}.", scope, stream);
assertTrue("Creating stream", streamManager.createStream(scope, stream, config));
controllerPerfStats.get("createStreamMs").add(timeDiffInMs(iniTime));
// Update the configuration of the stream by doubling the number of segments.
config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(j * 2)).build();
iniTime = System.nanoTime();
assertTrue(streamManager.updateStream(scope, stream, config));
controllerPerfStats.get("updateStreamMs").add(timeDiffInMs(iniTime));
// Perform tests on empty and non-empty streams.
if (j % 2 == 0) {
log.info("Writing events in stream {}/{}.", scope, stream);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, clientConfig);
writeEvents(clientFactory, stream, NUM_EVENTS);
}
// Update the configuration of the stream.
config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(j * 2)).build();
assertTrue(streamManager.updateStream(scope, stream, config));
// Attempting to delete non-empty scope and non-sealed stream.
assertThrows(RuntimeException.class, () -> streamManager.deleteScope(scope));
assertThrows(RuntimeException.class, () -> streamManager.deleteStream(scope, stream));
// Seal and delete stream.
log.info("Attempting to seal and delete stream {}/{}.", scope, stream);
iniTime = System.nanoTime();
assertTrue(streamManager.sealStream(scope, stream));
controllerPerfStats.get("sealStreamMs").add(timeDiffInMs(iniTime));
iniTime = System.nanoTime();
assertTrue(streamManager.deleteStream(scope, stream));
controllerPerfStats.get("deleteStreamMs").add(timeDiffInMs(iniTime));
// Seal and delete already sealed/deleted streams.
log.info("Sealing and deleting an already deleted stream {}/{}.", scope, stream);
assertThrows(RuntimeException.class, () -> streamManager.sealStream(scope, stream));
assertFalse(streamManager.deleteStream(scope, stream));
}
}
Aggregations