use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class SecureControllerRestApiTest method secureReaderGroupRestApiTest.
@Test
public void secureReaderGroupRestApiTest() throws Exception {
Invocation.Builder builder;
Response response;
restServerURI = CLUSTER.controllerRestUri();
log.info("REST Server URI: {}", restServerURI);
// TEST REST server status, ping test
resourceURl = new StringBuilder(restServerURI).append("/ping").toString();
webTarget = client.target(resourceURl);
builder = webTarget.request();
response = builder.get();
assertEquals("Ping test", OK.getStatusCode(), response.getStatus());
log.info("REST Server is running. Ping successful.");
// Test reader groups APIs.
// Prepare the streams and readers using the admin client.
final String testScope = RandomStringUtils.randomAlphanumeric(10);
final String testStream1 = RandomStringUtils.randomAlphanumeric(10);
final String testStream2 = RandomStringUtils.randomAlphanumeric(10);
URI controllerUri = new URI(CLUSTER.controllerUri());
@Cleanup("shutdown") InlineExecutor inlineExecutor = new InlineExecutor();
ClientConfig clientConfig = ClientConfig.builder().controllerURI(controllerUri).credentials(new DefaultCredentials(SecurityConfigDefaults.AUTH_ADMIN_PASSWORD, SecurityConfigDefaults.AUTH_ADMIN_USERNAME)).trustStore(TRUSTSTORE_PATH).validateHostName(false).build();
try (ConnectionPool cp = new ConnectionPoolImpl(clientConfig, new SocketConnectionFactoryImpl(clientConfig));
StreamManager streamManager = new StreamManagerImpl(createController(controllerUri, inlineExecutor), cp)) {
log.info("Creating scope: {}", testScope);
streamManager.createScope(testScope);
log.info("Creating stream: {}", testStream1);
StreamConfiguration streamConf1 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
streamManager.createStream(testScope, testStream1, streamConf1);
log.info("Creating stream: {}", testStream2);
StreamConfiguration streamConf2 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
streamManager.createStream(testScope, testStream2, streamConf2);
}
final String readerGroupName1 = RandomStringUtils.randomAlphanumeric(10);
final String readerGroupName2 = RandomStringUtils.randomAlphanumeric(10);
final String reader1 = RandomStringUtils.randomAlphanumeric(10);
final String reader2 = RandomStringUtils.randomAlphanumeric(10);
try (ClientFactoryImpl clientFactory = new ClientFactoryImpl(testScope, createController(controllerUri, inlineExecutor), clientConfig);
ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(testScope, ClientConfig.builder().controllerURI(controllerUri).credentials(new DefaultCredentials(SecurityConfigDefaults.AUTH_ADMIN_PASSWORD, SecurityConfigDefaults.AUTH_ADMIN_USERNAME)).trustStore(TRUSTSTORE_PATH).validateHostName(false).build())) {
readerGroupManager.createReaderGroup(readerGroupName1, ReaderGroupConfig.builder().stream(Stream.of(testScope, testStream1)).stream(Stream.of(testScope, testStream2)).build());
readerGroupManager.createReaderGroup(readerGroupName2, ReaderGroupConfig.builder().stream(Stream.of(testScope, testStream1)).stream(Stream.of(testScope, testStream2)).build());
clientFactory.createReader(reader1, readerGroupName1, new JavaSerializer<Long>(), ReaderConfig.builder().build());
clientFactory.createReader(reader2, readerGroupName1, new JavaSerializer<Long>(), ReaderConfig.builder().build());
}
// Test fetching readergroups.
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + testScope + "/readergroups").toString();
response = client.target(resourceURl).request().get();
assertEquals("Get readergroups status", OK.getStatusCode(), response.getStatus());
ReaderGroupsList readerGroupsList = response.readEntity(ReaderGroupsList.class);
assertEquals("Get readergroups size", 2, readerGroupsList.getReaderGroups().size());
assertTrue(readerGroupsList.getReaderGroups().contains(new ReaderGroupsListReaderGroups().readerGroupName(readerGroupName1)));
assertTrue(readerGroupsList.getReaderGroups().contains(new ReaderGroupsListReaderGroups().readerGroupName(readerGroupName2)));
log.info("Get readergroups successful");
// Test fetching readergroup info.
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + testScope + "/readergroups/" + readerGroupName1).toString();
response = client.target(resourceURl).request().get();
assertEquals("Get readergroup properties status", OK.getStatusCode(), response.getStatus());
ReaderGroupProperty readerGroupProperty = response.readEntity(ReaderGroupProperty.class);
assertEquals("Get readergroup name", readerGroupName1, readerGroupProperty.getReaderGroupName());
assertEquals("Get readergroup scope name", testScope, readerGroupProperty.getScopeName());
assertEquals("Get readergroup streams size", 2, readerGroupProperty.getStreamList().size());
assertTrue(readerGroupProperty.getStreamList().contains(Stream.of(testScope, testStream1).getScopedName()));
assertTrue(readerGroupProperty.getStreamList().contains(Stream.of(testScope, testStream2).getScopedName()));
assertEquals("Get readergroup onlinereaders size", 2, readerGroupProperty.getOnlineReaderIds().size());
assertTrue(readerGroupProperty.getOnlineReaderIds().contains(reader1));
assertTrue(readerGroupProperty.getOnlineReaderIds().contains(reader2));
// Test readergroup or scope not found.
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + testScope + "/readergroups/" + "unknownreadergroup").toString();
response = client.target(resourceURl).request().get();
assertEquals("Get readergroup properties status", NOT_FOUND.getStatusCode(), response.getStatus());
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + "unknownscope" + "/readergroups/" + readerGroupName1).toString();
response = client.target(resourceURl).request().get();
assertEquals("Get readergroup properties status", NOT_FOUND.getStatusCode(), response.getStatus());
log.info("Get readergroup properties successful");
log.info("Test restApiTests passed successfully!");
}
use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class StreamCutsTest method testReaderGroupCuts.
@Test(timeout = 40000)
public void testReaderGroupCuts() throws Exception {
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(10, 2, 1)).build();
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope("test", 0L).get();
controller.createStream("test", "test", config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
@Cleanup EventStreamWriter<String> writer = clientFactory.createEventWriter("test", new JavaSerializer<>(), EventWriterConfig.builder().build());
writer.writeEvent("0", "fpj was here").get();
writer.writeEvent("0", "fpj was here again").get();
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
groupManager.createReaderGroup("cuts", ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream("test/test").groupRefreshTimeMillis(0).build());
@Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup("cuts");
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", "cuts", new JavaSerializer<>(), ReaderConfig.builder().initialAllocationDelay(0).build());
EventRead<String> firstEvent = reader.readNextEvent(5000);
assertNotNull(firstEvent.getEvent());
assertEquals("fpj was here", firstEvent.getEvent());
readerGroup.initiateCheckpoint("cp1", executor);
EventRead<String> cpEvent = reader.readNextEvent(5000);
assertEquals("cp1", cpEvent.getCheckpointName());
EventRead<String> secondEvent = reader.readNextEvent(5000);
assertNotNull(secondEvent.getEvent());
assertEquals("fpj was here again", secondEvent.getEvent());
Map<Stream, StreamCut> cuts = readerGroup.getStreamCuts();
validateCuts(readerGroup, cuts, Collections.singleton(getQualifiedStreamSegmentName("test", "test", 0L)));
// Scale the stream to verify that we get more segments in the cut.
Stream stream = Stream.of("test", "test");
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 0.5);
map.put(0.5, 1.0);
Boolean result = controller.scaleStream(stream, Collections.singletonList(0L), map, executor).getFuture().get();
assertTrue(result);
log.info("Finished 1st scaling");
writer.writeEvent("0", "fpj was here again0").get();
writer.writeEvent("1", "fpj was here again1").get();
EventRead<String> eosEvent = reader.readNextEvent(100);
// Reader does not yet see the data becasue there has been no CP
assertNull(eosEvent.getEvent());
CompletableFuture<Checkpoint> checkpoint = readerGroup.initiateCheckpoint("cp2", executor);
cpEvent = reader.readNextEvent(100);
EventRead<String> event0 = reader.readNextEvent(100);
EventRead<String> event1 = reader.readNextEvent(100);
cuts = checkpoint.get(5, TimeUnit.SECONDS).asImpl().getPositions();
// Validate the reader did not release the segments before the checkpoint.
// This is important because it means that once the checkpoint is initiated no segments change readers.
Set<String> segmentNames = ImmutableSet.of(getQualifiedStreamSegmentName("test", "test", computeSegmentId(0, 0)));
validateCuts(readerGroup, cuts, segmentNames);
CompletableFuture<Map<Stream, StreamCut>> futureCuts = readerGroup.generateStreamCuts(executor);
EventRead<String> emptyEvent = reader.readNextEvent(100);
cuts = futureCuts.get();
segmentNames = ImmutableSet.of(getQualifiedStreamSegmentName("test", "test", computeSegmentId(1, 1)), getQualifiedStreamSegmentName("test", "test", computeSegmentId(2, 1)));
validateCuts(readerGroup, cuts, segmentNames);
// Scale down to verify that the number drops back.
map = new HashMap<>();
map.put(0.0, 1.0);
ArrayList<Long> toSeal = new ArrayList<>();
toSeal.add(computeSegmentId(1, 1));
toSeal.add(computeSegmentId(2, 1));
result = controller.scaleStream(stream, Collections.unmodifiableList(toSeal), map, executor).getFuture().get();
assertTrue(result);
log.info("Finished 2nd scaling");
writer.writeEvent("0", "fpj was here again2").get();
// Reader sees the segment is empty
emptyEvent = reader.readNextEvent(100);
assertNull(emptyEvent.getEvent());
checkpoint = readerGroup.initiateCheckpoint("cp3", executor);
cpEvent = reader.readNextEvent(100);
assertEquals("cp3", cpEvent.getCheckpointName());
// Reader releases segments here
event0 = reader.readNextEvent(5000);
assertTrue(event0.getEvent().endsWith("2"));
cuts = readerGroup.getStreamCuts();
long three = computeSegmentId(3, 2);
validateCuts(readerGroup, cuts, Collections.singleton(getQualifiedStreamSegmentName("test", "test", three)));
// Scale up to 4 segments again.
map = new HashMap<>();
map.put(0.0, 0.25);
map.put(0.25, 0.5);
map.put(0.5, 0.75);
map.put(0.75, 1.0);
result = controller.scaleStream(stream, Collections.singletonList(three), map, executor).getFuture().get();
assertTrue(result);
log.info("Finished 3rd scaling");
writer.writeEvent("0", "fpj was here again3").get();
// Reader sees the segment is empty
emptyEvent = reader.readNextEvent(100);
assertNull(emptyEvent.getEvent());
readerGroup.initiateCheckpoint("cp4", executor);
cpEvent = reader.readNextEvent(1000);
assertEquals("cp4", cpEvent.getCheckpointName());
// Reader releases segments here
event0 = reader.readNextEvent(5000);
assertNotNull(event0.getEvent());
cuts = readerGroup.getStreamCuts();
segmentNames = new HashSet<>();
long four = computeSegmentId(4, 3);
long five = computeSegmentId(5, 3);
long six = computeSegmentId(6, 3);
long seven = computeSegmentId(7, 3);
segmentNames.add(getQualifiedStreamSegmentName("test", "test", four));
segmentNames.add(getQualifiedStreamSegmentName("test", "test", five));
segmentNames.add(getQualifiedStreamSegmentName("test", "test", six));
segmentNames.add(getQualifiedStreamSegmentName("test", "test", seven));
validateCuts(readerGroup, cuts, Collections.unmodifiableSet(segmentNames));
}
use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class AppendTest method appendALotOfData.
@Test(timeout = 100000)
public void appendALotOfData() {
String endpoint = "localhost";
String scope = "Scope";
String streamName = "appendALotOfData";
int port = TestUtils.getAvailableListenPort();
long heapSize = Runtime.getRuntime().maxMemory();
long messageSize = Math.min(1024 * 1024, heapSize / 20000);
ByteBuffer payload = ByteBuffer.allocate((int) messageSize);
StreamSegmentStore store = SERVICE_BUILDER.createStreamSegmentService();
TableStore tableStore = SERVICE_BUILDER.createTableStoreService();
@Cleanup("shutdown") InlineExecutor tokenExpiryExecutor = new InlineExecutor();
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, port, store, tableStore, tokenExpiryExecutor);
server.startListening();
ClientConfig config = ClientConfig.builder().build();
SocketConnectionFactoryImpl clientCF = new SocketConnectionFactoryImpl(config);
@Cleanup ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(config, clientCF);
Controller controller = new MockController(endpoint, port, connectionPool, true);
@Cleanup StreamManagerImpl streamManager = new StreamManagerImpl(controller, connectionPool);
streamManager.createScope(scope);
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, config);
streamManager.createStream("Scope", streamName, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build());
@Cleanup EventStreamWriter<ByteBuffer> producer = clientFactory.createEventWriter(streamName, new ByteBufferSerializer(), EventWriterConfig.builder().build());
@Cleanup RawClient rawClient = new RawClient(new PravegaNodeUri(endpoint, port), connectionPool);
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 100; j++) {
producer.writeEvent(payload.slice());
}
producer.flush();
long requestId = rawClient.getFlow().getNextSequenceNumber();
String scopedName = new Segment(scope, streamName, 0).getScopedName();
WireCommands.TruncateSegment request = new WireCommands.TruncateSegment(requestId, scopedName, i * 100L * (payload.remaining() + TYPE_PLUS_LENGTH_SIZE), "");
Reply reply = rawClient.sendRequest(requestId, request).join();
assertFalse(reply.toString(), reply.isFailure());
}
producer.close();
}
use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class ControllerWatermarkingTest method watermarkTest.
@Test(timeout = 60000)
public void watermarkTest() throws Exception {
Controller controller = controllerWrapper.getController();
String scope = "scope";
String stream = "stream";
controller.createScope(scope).join();
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
controller.createStream(scope, stream, config).join();
String markStream = NameUtils.getMarkStreamForStream(stream);
Stream streamObj = new StreamImpl(scope, stream);
WriterPosition pos1 = WriterPosition.builder().segments(Collections.singletonMap(new Segment(scope, stream, 0L), 10L)).build();
WriterPosition pos2 = WriterPosition.builder().segments(Collections.singletonMap(new Segment(scope, stream, 0L), 20L)).build();
controller.noteTimestampFromWriter("1", streamObj, 1L, pos1).join();
controller.noteTimestampFromWriter("2", streamObj, 2L, pos2).join();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
@Cleanup RevisionedStreamClient<Watermark> reader = clientFactory.createRevisionedStreamClient(markStream, new WatermarkSerializer(), SynchronizerConfig.builder().build());
AssertExtensions.assertEventuallyEquals(true, () -> {
Iterator<Entry<Revision, Watermark>> watermarks = reader.readFrom(reader.fetchOldestRevision());
return watermarks.hasNext();
}, 30000);
Iterator<Entry<Revision, Watermark>> watermarks = reader.readFrom(reader.fetchOldestRevision());
Watermark watermark = watermarks.next().getValue();
assertEquals(watermark.getLowerTimeBound(), 1L);
assertTrue(watermark.getStreamCut().entrySet().stream().anyMatch(x -> x.getKey().getSegmentId() == 0L && x.getValue() == 20L));
controller.sealStream(scope, stream).join();
controller.deleteStream(scope, stream).join();
AssertExtensions.assertFutureThrows("Mark Stream should not exist", controller.getCurrentSegments(scope, markStream), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
}
use of io.pravega.client.stream.impl.ClientFactoryImpl in project pravega by pravega.
the class LargeEventTest method testReadWriteWithSegmentStoreRestart.
@Test(timeout = 60000)
public void testReadWriteWithSegmentStoreRestart() throws ExecutionException, InterruptedException {
String readerGroupName = "testLargeEventFailoverReaderGroup";
String streamName = "SegmentStoreRestart";
StreamConfiguration config = getStreamConfiguration(NUM_READERS);
createScopeStream(SCOPE_NAME, streamName, config);
int events = 1;
AtomicInteger generation = new AtomicInteger(0);
merge(eventsWrittenToPravega, generateEventData(NUM_WRITERS, events * generation.getAndIncrement(), events, LARGE_EVENT_SIZE));
eventsReadFromPravega = readWriteCycle(streamName, readerGroupName, eventsWrittenToPravega);
validateEventReads(eventsReadFromPravega, eventsWrittenToPravega);
// Passing in this restart callback will override the default behavior of closing the connection.
Runnable restart = () -> {
// Reset the server, in effect clearing the AppendProcessor and PravegaRequestProcessor.
this.server.close();
this.server = new PravegaConnectionListener(false, servicePort, store, tableStore, serviceBuilder.getLowPriorityExecutor());
this.server.startListening();
};
restart.run();
Map<Integer, List<ByteBuffer>> data = generateEventData(NUM_WRITERS, events * generation.getAndIncrement(), events, LARGE_EVENT_SIZE);
merge(eventsWrittenToPravega, data);
eventsReadFromPravega = readWriteCycle(streamName, readerGroupName, data);
validateEventReads(eventsReadFromPravega, eventsWrittenToPravega);
// Clear objects necessary for read-write validation.
stopReadFlag = new AtomicBoolean(false);
eventsReadFromPravega.clear();
eventReadCount.set(0);
// Generate new data.
data = generateEventData(NUM_WRITERS, events * generation.getAndIncrement(), events, LARGE_EVENT_SIZE);
merge(eventsWrittenToPravega, data);
AtomicInteger sendCount = new AtomicInteger(0);
Supplier<Boolean> predicate = () -> sendCount.getAndIncrement() == CLOSE_WRITE_COUNT;
// Now try the restart *during* a large event write.
AtomicReference<Boolean> latch = new AtomicReference<>(true);
try (ConnectionExporter connectionFactory = new ConnectionExporter(ClientConfig.builder().build(), latch, restart, predicate);
ClientFactoryImpl clientFactory = new ClientFactoryImpl(SCOPE_NAME, controller, connectionFactory);
ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(SCOPE_NAME, controller, clientFactory)) {
// Start writing events to the stream.
val writers = createEventWriters(streamName, NUM_WRITERS, clientFactory, data);
Futures.allOf(writers).get();
// Create a ReaderGroup.
createReaderGroup(readerGroupName, readerGroupManager, streamName);
// Create Readers.
val readers = createEventReaders(NUM_READERS, clientFactory, readerGroupName, eventsReadFromPravega);
stopReadFlag.set(true);
Futures.allOf(readers).get();
readerGroupManager.deleteReaderGroup(readerGroupName);
}
validateEventReads(eventsReadFromPravega, eventsWrittenToPravega);
validateCleanUp(streamName);
}
Aggregations