use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class EndToEndAutoScaleUpWithTxnTest method main.
public static void main(String[] args) throws Exception {
try {
@Cleanup TestingServer zkTestServer = new TestingServerStarter().start();
int port = Config.SERVICE_PORT;
@Cleanup ControllerWrapper controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), port);
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(NameUtils.INTERNAL_SCOPE_NAME, 0L).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactoryImpl internalCF = new ClientFactoryImpl(NameUtils.INTERNAL_SCOPE_NAME, controller, connectionFactory);
@Cleanup("shutdownNow") val executor = ExecutorServiceHelpers.newScheduledThreadPool(1, "test");
@Cleanup ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
TableStore tableStore = serviceBuilder.createTableStoreService();
@Cleanup AutoScaleMonitor autoScaleMonitor = new AutoScaleMonitor(store, internalCF, AutoScalerConfig.builder().with(AutoScalerConfig.MUTE_IN_SECONDS, 0).with(AutoScalerConfig.COOLDOWN_IN_SECONDS, 0).build());
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, false, "localhost", 12345, store, tableStore, autoScaleMonitor.getStatsRecorder(), autoScaleMonitor.getTableSegmentStatsRecorder(), null, null, null, true, serviceBuilder.getLowPriorityExecutor(), Config.TLS_PROTOCOL_VERSION.toArray(new String[Config.TLS_PROTOCOL_VERSION.size()]));
server.startListening();
controllerWrapper.awaitRunning();
controllerWrapper.getControllerService().createScope("test", 0L).get();
controller.createStream("test", "test", CONFIG).get();
@Cleanup MockClientFactory clientFactory = new MockClientFactory("test", controller, internalCF.getConnectionPool());
// Mocking pravega service by putting scale up and scale down requests for the stream
EventWriterConfig writerConfig = EventWriterConfig.builder().transactionTimeoutTime(30000).build();
TransactionalEventStreamWriter<String> test = clientFactory.createTransactionalEventWriter("writer", "test", new UTF8StringSerializer(), writerConfig);
// region Successful commit tests
Transaction<String> txn1 = test.beginTxn();
txn1.writeEvent("1");
txn1.flush();
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 1.0 / 3.0);
map.put(1.0 / 3.0, 2.0 / 3.0);
map.put(2.0 / 3.0, 1.0);
Stream stream = new StreamImpl("test", "test");
controller.startScale(stream, Collections.singletonList(0L), map).get();
Transaction<String> txn2 = test.beginTxn();
txn2.writeEvent("2");
txn2.flush();
txn2.commit();
txn1.commit();
Thread.sleep(1000);
@Cleanup ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
readerGroupManager.createReaderGroup("readergrp", ReaderGroupConfig.builder().stream("test/test").build());
final EventStreamReader<String> reader = clientFactory.createReader("1", "readergrp", new JavaSerializer<>(), ReaderConfig.builder().build());
String event1 = reader.readNextEvent(SECONDS.toMillis(60)).getEvent();
String event2 = reader.readNextEvent(SECONDS.toMillis(60)).getEvent();
assert event1.equals("1");
assert event2.equals("2");
final AtomicBoolean done = new AtomicBoolean(false);
startWriter(test, done);
Retry.withExpBackoff(10, 10, 100, 10000).retryingOn(NotDoneException.class).throwingOn(RuntimeException.class).runAsync(() -> controller.getCurrentSegments("test", "test").thenAccept(streamSegments -> {
if (streamSegments.getSegments().stream().anyMatch(x -> NameUtils.getEpoch(x.getSegmentId()) > 5)) {
System.err.println("Success");
log.info("Success");
System.exit(0);
} else {
throw new NotDoneException();
}
}), executor).exceptionally(e -> {
System.err.println("Failure");
log.error("Failure");
System.exit(1);
return null;
}).get();
} catch (Throwable e) {
System.err.print("Test failed with exception: " + e.getMessage());
log.error("Test failed with exception: {}", e);
System.exit(-1);
}
System.exit(0);
}
use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class ControllerWatermarkingTest method watermarkTest.
@Test(timeout = 60000)
public void watermarkTest() throws Exception {
Controller controller = controllerWrapper.getController();
String scope = "scope";
String stream = "stream";
controller.createScope(scope).join();
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
controller.createStream(scope, stream, config).join();
String markStream = NameUtils.getMarkStreamForStream(stream);
Stream streamObj = new StreamImpl(scope, stream);
WriterPosition pos1 = WriterPosition.builder().segments(Collections.singletonMap(new Segment(scope, stream, 0L), 10L)).build();
WriterPosition pos2 = WriterPosition.builder().segments(Collections.singletonMap(new Segment(scope, stream, 0L), 20L)).build();
controller.noteTimestampFromWriter("1", streamObj, 1L, pos1).join();
controller.noteTimestampFromWriter("2", streamObj, 2L, pos2).join();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
@Cleanup RevisionedStreamClient<Watermark> reader = clientFactory.createRevisionedStreamClient(markStream, new WatermarkSerializer(), SynchronizerConfig.builder().build());
AssertExtensions.assertEventuallyEquals(true, () -> {
Iterator<Entry<Revision, Watermark>> watermarks = reader.readFrom(reader.fetchOldestRevision());
return watermarks.hasNext();
}, 30000);
Iterator<Entry<Revision, Watermark>> watermarks = reader.readFrom(reader.fetchOldestRevision());
Watermark watermark = watermarks.next().getValue();
assertEquals(watermark.getLowerTimeBound(), 1L);
assertTrue(watermark.getStreamCut().entrySet().stream().anyMatch(x -> x.getKey().getSegmentId() == 0L && x.getValue() == 20L));
controller.sealStream(scope, stream).join();
controller.deleteStream(scope, stream).join();
AssertExtensions.assertFutureThrows("Mark Stream should not exist", controller.getCurrentSegments(scope, markStream), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
}
use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class StreamCutsTest method testReaderGroupCuts.
@Test(timeout = 40000)
public void testReaderGroupCuts() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope("test", 0L).get();
controller.createStream("test", "test", config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter("test", new JavaSerializer<>(), EventWriterConfig.builder().build());
writer.writeEvent("0", "fpj was here").get();
writer.writeEvent("0", "fpj was here again").get();
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
groupManager.createReaderGroup("cuts", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/test").groupRefreshTimeMillis(0).build());
@Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup("cuts");
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "cuts", new JavaSerializer<>(), ReaderConfig.builder().initialAllocationDelay(0).build());
EventRead<String> firstEvent = reader.readNextEvent(5000);
assertNotNull(firstEvent.getEvent());
assertEquals("fpj was here", firstEvent.getEvent());
readerGroup.initiateCheckpoint("cp1", executor);
EventRead<String> cpEvent = reader.readNextEvent(5000);
assertEquals("cp1", cpEvent.getCheckpointName());
EventRead<String> secondEvent = reader.readNextEvent(5000);
assertNotNull(secondEvent.getEvent());
assertEquals("fpj was here again", secondEvent.getEvent());
Map<Stream, StreamCut> cuts = readerGroup.getStreamCuts();
validateCuts(readerGroup, cuts, Collections.singleton(getQualifiedStreamSegmentName("test", "test", 0L)));
// Scale the stream to verify that we get more segments in the cut.
Stream stream = Stream.of("test", "test");
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.5);
map.put(0.5, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0L), map, executor).getFuture().get();
assertTrue(result);
log.info("Finished 1st scaling");
writer.writeEvent("0", "fpj was here again0").get();
writer.writeEvent("1", "fpj was here again1").get();
EventRead<String> eosEvent = reader.readNextEvent(100);
// Reader does not yet see the data becasue there has been no CP
assertNull(eosEvent.getEvent());
CompletableFuture<Checkpoint> checkpoint = readerGroup.initiateCheckpoint("cp2", executor);
cpEvent = reader.readNextEvent(100);
EventRead<String> event0 = reader.readNextEvent(100);
EventRead<String> event1 = reader.readNextEvent(100);
cuts = checkpoint.get(5, TimeUnit.SECONDS).asImpl().getPositions();
// Validate the reader did not release the segments before the checkpoint.
// This is important because it means that once the checkpoint is initiated no segments change readers.
Set<String> segmentNames = ImmutableSet.of(getQualifiedStreamSegmentName("test", "test", computeSegmentId(0, 0)));
validateCuts(readerGroup, cuts, segmentNames);
CompletableFuture<Map<Stream, StreamCut>> futureCuts = readerGroup.generateStreamCuts(executor);
EventRead<String> emptyEvent = reader.readNextEvent(100);
cuts = futureCuts.get();
segmentNames = ImmutableSet.of(getQualifiedStreamSegmentName("test", "test", computeSegmentId(1, 1)), getQualifiedStreamSegmentName("test", "test", computeSegmentId(2, 1)));
validateCuts(readerGroup, cuts, segmentNames);
// Scale down to verify that the number drops back.
map = new HashMap<>();
map.put(0.0, 1.0);
ArrayList<Long> toSeal = new ArrayList<>();
toSeal.add(computeSegmentId(1, 1));
toSeal.add(computeSegmentId(2, 1));
result = controller.scaleStream(stream, Collections.unmodifiableList(toSeal), map, executor).getFuture().get();
assertTrue(result);
log.info("Finished 2nd scaling");
writer.writeEvent("0", "fpj was here again2").get();
// Reader sees the segment is empty
emptyEvent = reader.readNextEvent(100);
assertNull(emptyEvent.getEvent());
checkpoint = readerGroup.initiateCheckpoint("cp3", executor);
cpEvent = reader.readNextEvent(100);
assertEquals("cp3", cpEvent.getCheckpointName());
// Reader releases segments here
event0 = reader.readNextEvent(5000);
assertTrue(event0.getEvent().endsWith("2"));
cuts = readerGroup.getStreamCuts();
long three = computeSegmentId(3, 2);
validateCuts(readerGroup, cuts, Collections.singleton(getQualifiedStreamSegmentName("test", "test", three)));
// Scale up to 4 segments again.
map = new HashMap<>();
map.put(0.0, 0.25);
map.put(0.25, 0.5);
map.put(0.5, 0.75);
map.put(0.75, 1.0);
result = controller.scaleStream(stream, Collections.singletonList(three), map, executor).getFuture().get();
assertTrue(result);
log.info("Finished 3rd scaling");
writer.writeEvent("0", "fpj was here again3").get();
// Reader sees the segment is empty
emptyEvent = reader.readNextEvent(100);
assertNull(emptyEvent.getEvent());
readerGroup.initiateCheckpoint("cp4", executor);
cpEvent = reader.readNextEvent(1000);
assertEquals("cp4", cpEvent.getCheckpointName());
// Reader releases segments here
event0 = reader.readNextEvent(5000);
assertNotNull(event0.getEvent());
cuts = readerGroup.getStreamCuts();
segmentNames = new HashSet<>();
long four = computeSegmentId(4, 3);
long five = computeSegmentId(5, 3);
long six = computeSegmentId(6, 3);
long seven = computeSegmentId(7, 3);
segmentNames.add(getQualifiedStreamSegmentName("test", "test", four));
segmentNames.add(getQualifiedStreamSegmentName("test", "test", five));
segmentNames.add(getQualifiedStreamSegmentName("test", "test", six));
segmentNames.add(getQualifiedStreamSegmentName("test", "test", seven));
validateCuts(readerGroup, cuts, Collections.unmodifiableSet(segmentNames));
}
use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class ControllerServiceTest method testControllerService.
@Test(timeout = 80000)
public void testControllerService() throws Exception {
final String scope1 = "scope1";
final String scope2 = "scope2";
controllerWrapper.getControllerService().createScope("scope1", 0L).get();
controllerWrapper.getControllerService().createScope("scope2", 0L).get();
Controller controller = controllerWrapper.getController();
final String streamName1 = "stream1";
final String streamName2 = "stream2";
final String streamName3 = "stream3";
final ScalingPolicy scalingPolicy = ScalingPolicy.fixed(2);
final StreamConfiguration config1 = StreamConfiguration.builder().scalingPolicy(scalingPolicy).build();
final StreamConfiguration config2 = StreamConfiguration.builder().scalingPolicy(scalingPolicy).build();
final StreamConfiguration config3 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(3)).build();
createAStream(scope1, streamName1, controller, config1);
// Same name in different scope
createAStream(scope2, streamName1, controller, config2);
// Different name in same scope
createAStream(scope1, streamName2, controller, config3);
createAStream(scope1, streamName3, controller, config3);
final String kvtName1 = "kvtable1";
final String kvtName2 = "kvtable2";
final KeyValueTableConfiguration kvtConfig1 = KeyValueTableConfiguration.builder().partitionCount(2).primaryKeyLength(4).secondaryKeyLength(4).build();
createAKeyValueTable(scope1, kvtName1, controller, kvtConfig1);
// Same name in different scope
createAKeyValueTable(scope2, kvtName1, controller, kvtConfig1);
// Different name in different scope
createAKeyValueTable(scope2, kvtName2, controller, kvtConfig1);
final String scopeSeal = "scopeSeal";
final String streamNameSeal = "streamSeal";
sealAStream(controllerWrapper, controller, scalingPolicy, scopeSeal, streamNameSeal);
sealASealedStream(controller, scopeSeal, streamNameSeal);
sealNonExistantStream(controller, scopeSeal);
streamDuplicationNotAllowed(scope1, streamName1, controller, config1);
// update stream config section
updateStreamName(controller, scope1, scalingPolicy);
updateScalingPolicy(controller, scope1, streamName1);
updateTargetRate(controller, scope1, streamName1);
updateScaleFactor(controller, scope1, streamName1);
updataMinSegmentes(controller, scope1, streamName1);
updateConfigOfNonExistantStream(controller);
// get currently active segments
getActiveSegments(controller, scope1, streamName1);
getActiveSegmentsForNonExistentStream(controller);
// get positions at a given time stamp
getSegmentsAtTime(controller, scope1, streamName1);
getSegmentsAtTime(controller, scope1, streamName2);
getSegmentsForNonExistentStream(controller);
getSegmentsBeforeCreation(controller, scope1, streamName1);
getSegmentsAfterCreation(controller, scope1, streamName1);
readerGroupsTest(controller, scope1, streamName1, streamName2, streamName3);
updateSubscriberStreamCutTest(controller, scope2, streamName1);
}
use of io.pravega.client.control.impl.Controller in project pravega by pravega.
the class AppendTest method appendThroughConditionalClient.
@Test(timeout = 10000)
public void appendThroughConditionalClient() throws Exception {
String endpoint = "localhost";
int port = TestUtils.getAvailableListenPort();
String testString = "Hello world\n";
String scope = "scope";
String stream = "appendThroughConditionalClient";
StreamSegmentStore store = SERVICE_BUILDER.createStreamSegmentService();
TableStore tableStore = SERVICE_BUILDER.createTableStoreService();
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, port, store, tableStore, SERVICE_BUILDER.getLowPriorityExecutor());
server.startListening();
@Cleanup SocketConnectionFactoryImpl clientCF = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(ClientConfig.builder().build(), clientCF);
@Cleanup Controller controller = new MockController(endpoint, port, connectionPool, true);
controller.createScope(scope);
controller.createStream(scope, stream, StreamConfiguration.builder().build());
ConditionalOutputStreamFactoryImpl segmentClient = new ConditionalOutputStreamFactoryImpl(controller, connectionPool);
Segment segment = Futures.getAndHandleExceptions(controller.getCurrentSegments(scope, stream), RuntimeException::new).getSegments().iterator().next();
@Cleanup ConditionalOutputStream out = segmentClient.createConditionalOutputStream(segment, DelegationTokenProviderFactory.createWithEmptyToken(), EventWriterConfig.builder().build());
assertTrue(out.write(ByteBuffer.wrap(testString.getBytes()), 0));
}
Aggregations