use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class LargeEventTest method largeEventSimpleTest.
/**
* Invoke the largeEventSimpleTest, ensure we are able to produce events.
* The test fails incase of exceptions while writing to the stream.
*/
@Test
public void largeEventSimpleTest() {
Service conService = Utils.createPravegaControllerService(null);
List<URI> ctlURIs = conService.getServiceDetails();
URI controllerUri = ctlURIs.get(0);
log.info("Invoking create stream with Controller URI: {}", controllerUri);
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(Utils.buildClientConfig(controllerUri));
@Cleanup ControllerImpl controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(Utils.buildClientConfig(controllerUri)).build(), connectionFactory.getInternalExecutor());
assertTrue(controller.createScope(STREAM_SCOPE).join());
assertTrue(controller.createStream(STREAM_SCOPE, STREAM_NAME, config).join());
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(STREAM_SCOPE, Utils.buildClientConfig(controllerUri));
log.info("Invoking Writer test with Controller URI: {}", controllerUri);
@Cleanup EventStreamWriter<ByteBuffer> writer = clientFactory.createEventWriter(STREAM_NAME, new ByteBufferSerializer(), EventWriterConfig.builder().build());
byte[] payload = new byte[Serializer.MAX_EVENT_SIZE];
for (int i = 0; i < NUM_EVENTS; i++) {
log.debug("Producing event: {} ", i);
// any exceptions while writing the event will fail the test.
writer.writeEvent("", ByteBuffer.wrap(payload));
writer.flush();
}
log.info("Invoking Reader test.");
ReaderGroupManager groupManager = ReaderGroupManager.withScope(STREAM_SCOPE, Utils.buildClientConfig(controllerUri));
groupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().stream(Stream.of(STREAM_SCOPE, STREAM_NAME)).build());
@Cleanup EventStreamReader<ByteBuffer> reader = clientFactory.createReader(UUID.randomUUID().toString(), READER_GROUP, new ByteBufferSerializer(), ReaderConfig.builder().build());
int readCount = 0;
EventRead<ByteBuffer> event = null;
do {
event = reader.readNextEvent(10_000);
log.debug("Read event: {}.", event.getEvent());
if (event.getEvent() != null) {
readCount++;
}
// try reading until all the written events are read, else the test will timeout.
} while ((event.getEvent() != null || event.isCheckpoint()) && readCount < NUM_EVENTS);
assertEquals("Read count should be equal to write count", NUM_EVENTS, readCount);
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class ReadWithAutoScaleTest method scaleTestsWithReader.
@Test
public void scaleTestsWithReader() {
URI controllerUri = getControllerURI();
Controller controller = getController();
testState = new TestState(true);
final AtomicBoolean stopWriteFlag = new AtomicBoolean(false);
final AtomicBoolean stopReadFlag = new AtomicBoolean(false);
@Cleanup EventStreamClientFactory clientFactory = getClientFactory();
// 1. Start writing events to the Stream.
List<CompletableFuture<Void>> writers = new ArrayList<>();
writers.add(startWritingIntoTxn(clientFactory.createTransactionalEventWriter("initWriter", STREAM_NAME, new JavaSerializer<>(), EventWriterConfig.builder().transactionTimeoutTime(25000).build()), stopWriteFlag));
// 2. Start a reader group with 2 readers (The stream is configured with 2 segments.)
// 2.1 Create a reader group.
log.info("Creating Reader group : {}", READER_GROUP_NAME);
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(SCOPE, Utils.buildClientConfig(controllerUri));
readerGroupManager.createReaderGroup(READER_GROUP_NAME, ReaderGroupConfig.builder().stream(Stream.of(SCOPE, STREAM_NAME)).build());
// 2.2 Create readers.
CompletableFuture<Void> reader1 = startReading(clientFactory.createReader("reader1", READER_GROUP_NAME, new JavaSerializer<>(), ReaderConfig.builder().build()), stopReadFlag);
CompletableFuture<Void> reader2 = startReading(clientFactory.createReader("reader2", READER_GROUP_NAME, new JavaSerializer<>(), ReaderConfig.builder().build()), stopReadFlag);
// 3 Now increase the number of TxnWriters to trigger scale operation.
log.info("Increasing the number of writers to 6");
for (int i = 0; i < 5; i++) {
writers.add(startWritingIntoTxn(clientFactory.createTransactionalEventWriter("writer-" + i, STREAM_NAME, new JavaSerializer<>(), EventWriterConfig.builder().transactionTimeoutTime(25000).build()), stopWriteFlag));
}
// 4 Wait until the scale operation is triggered (else time out)
// validate the data read by the readers ensuring all the events are read and there are no duplicates.
CompletableFuture<Void> testResult = Retry.withExpBackoff(10, 10, 40, ofSeconds(10).toMillis()).retryingOn(ScaleOperationNotDoneException.class).throwingOn(RuntimeException.class).runAsync(() -> controller.getCurrentSegments(SCOPE, STREAM_NAME).thenAccept(x -> {
int currentNumOfSegments = x.getSegments().size();
if (currentNumOfSegments == 2) {
log.info("The current number of segments is equal to 2, ScaleOperation did not happen");
// Scaling operation did not happen, retry operation.
throw new ScaleOperationNotDoneException();
} else if (currentNumOfSegments > 2) {
// scale operation successful.
log.info("Current Number of segments is {}", currentNumOfSegments);
stopWriteFlag.set(true);
} else {
Assert.fail("Current number of Segments reduced to less than 2. Failure of test");
}
}), scaleExecutorService).thenCompose(v -> Futures.allOf(writers)).thenRun(this::waitForTxnsToComplete).thenCompose(v -> {
stopReadFlag.set(true);
log.info("All writers have stopped. Setting Stop_Read_Flag. Event Written Count:{}, Event Read " + "Count: {}", testState.writtenEvents, testState.readEvents);
return CompletableFuture.allOf(reader1, reader2);
}).thenRun(this::validateResults);
Futures.getAndHandleExceptions(testResult.whenComplete((r, e) -> {
recordResult(testResult, "ScaleUpWithTxnWithReaderGroup");
}), RuntimeException::new);
readerGroupManager.deleteReaderGroup(READER_GROUP_NAME);
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class StreamCutsTest method streamCutsTest.
/**
* This test verifies the correct operation of readers using StreamCuts. Concretely, the test creates two streams
* with different number of segments and it writes some events (TOTAL_EVENTS / 2) in them. Then, the test creates a
* list of StreamCuts that encompasses both streams every CUT_SIZE events. The test asserts that new groups of
* readers can be initialized at these sequential StreamCut intervals and that only CUT_SIZE events are read. Also,
* the test checks the correctness of different combinations of StreamCuts that have not been sequentially created.
* After creating StreamCuts and tests the correctness of reads, the test also checks resetting a reader group to a
* specific initial read point. The previous process is repeated twice: before and after scaling streams, to test if
* StreamCuts work correctly under scaling events (thus writing TOTAL_EVENTS). Finally, this test checks reading
* different StreamCut combinations in both streams for all events (encompassing events before and after scaling).
*/
@Test
public void streamCutsTest() {
final ClientConfig clientConfig = Utils.buildClientConfig(controllerURI);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(SCOPE, clientConfig);
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(SCOPE, clientConfig);
readerGroupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().stream(Stream.of(SCOPE, STREAM_ONE)).stream(Stream.of(SCOPE, STREAM_TWO)).build());
@Cleanup ReaderGroup readerGroup = readerGroupManager.getReaderGroup(READER_GROUP);
// Perform write of events, slice by slice StreamCuts test and combinations StreamCuts test.
log.info("Write, slice by slice and combinations test before scaling.");
final int parallelismBeforeScale = RG_PARALLELISM_ONE + RG_PARALLELISM_TWO;
List<Map<Stream, StreamCut>> slicesBeforeScale = writeEventsAndCheckSlices(clientFactory, readerGroup, readerGroupManager, parallelismBeforeScale);
// Now, we perform a manual scale on both streams and wait until it occurs.
CompletableFuture<Boolean> scaleStreamOne = scaleStream(SCOPE, STREAM_ONE, RG_PARALLELISM_ONE * 2, executor);
checkScaleStatus(scaleStreamOne);
// Perform again the same test on the stream segments after scaling.
final int parallelSegmentsAfterScale = RG_PARALLELISM_ONE * 2 + RG_PARALLELISM_TWO;
final String newReaderGroupName = READER_GROUP + "new";
final Map<Stream, StreamCut> streamCutBeforeScale = slicesBeforeScale.get(slicesBeforeScale.size() - 1);
readerGroupManager.createReaderGroup(newReaderGroupName, ReaderGroupConfig.builder().stream(Stream.of(SCOPE, STREAM_ONE)).stream(Stream.of(SCOPE, STREAM_TWO)).startingStreamCuts(streamCutBeforeScale).build());
@Cleanup ReaderGroup newReaderGroup = readerGroupManager.getReaderGroup(newReaderGroupName);
log.info("Checking slices again starting from {}.", streamCutBeforeScale);
List<Map<Stream, StreamCut>> slicesAfterScale = writeEventsAndCheckSlices(clientFactory, newReaderGroup, readerGroupManager, parallelSegmentsAfterScale);
// Perform combinations including StreamCuts before and after the scale event.
slicesAfterScale.remove(0);
slicesBeforeScale.addAll(slicesAfterScale);
log.info("Performing combinations in the whole stream.");
combineSlicesAndVerify(readerGroupManager, clientFactory, parallelSegmentsAfterScale, slicesBeforeScale);
log.info("All events correctly read from StreamCut slices on multiple Streams. StreamCuts test passed.");
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class TestUtils method readNextEventMessages.
/**
* Returns the specified number of unread messages from the given {@code scope}/{@code stream}.
*
* @param scope the scope
* @param stream the stream
* @param numMessages the number of event messages to read
* @param readerClientConfig the {@link ClientConfig} object to use to connect to the server
* @param readerGroup the name of the reader group application
* @return the event messages
* @throws NullPointerException if {@code scope} or {@code stream} or {@writerClientConfig} is null
* @throws IllegalArgumentException if {@code numMessages} < 1
* @throws RuntimeException if any exception is thrown by the client
*/
public static List<String> readNextEventMessages(@NonNull String scope, @NonNull String stream, int numMessages, @NonNull ClientConfig readerClientConfig, @NonNull String readerGroup) {
Preconditions.checkArgument(numMessages > 0);
@Cleanup EventStreamClientFactory readerClientFactory = EventStreamClientFactory.withScope(scope, readerClientConfig);
log.debug("Created the readerClientFactory");
ReaderGroupConfig readerGroupConfig = ReaderGroupConfig.builder().stream(Stream.of(scope, stream)).disableAutomaticCheckpoints().build();
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(scope, readerClientConfig);
readerGroupManager.createReaderGroup(readerGroup, readerGroupConfig);
log.debug("Created reader group with name {}", readerGroup);
@Cleanup EventStreamReader<String> reader = readerClientFactory.createReader("readerId", readerGroup, new JavaSerializer<String>(), ReaderConfig.builder().initialAllocationDelay(0).build());
log.debug("Created an event reader");
// Keeping the read timeout large so that there is ample time for reading the event even in
// case of abnormal delays in test environments.
List<String> result = new ArrayList<>();
for (int i = 0; i < numMessages; i++) {
result.add(reader.readNextEvent(20000).getEvent());
}
log.info("Done reading {} events", numMessages);
return result;
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class ControllerRestApiTest method restApiTests.
@Test
public void restApiTests() {
Invocation.Builder builder;
Response response;
restServerURI = SETUP_UTILS.getControllerRestUri().toString();
log.info("REST Server URI: {}", restServerURI);
// TEST REST server status, ping test
resourceURl = new StringBuilder(restServerURI).append("/ping").toString();
webTarget = client.target(resourceURl);
builder = webTarget.request();
response = builder.get();
assertEquals("Ping test", OK.getStatusCode(), response.getStatus());
log.info("REST Server is running. Ping successful.");
final String scope1 = RandomStringUtils.randomAlphanumeric(10);
final String stream1 = RandomStringUtils.randomAlphanumeric(10);
// TEST CreateScope POST http://controllerURI:Port/v1/scopes
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes").toString();
webTarget = client.target(resourceURl);
final CreateScopeRequest createScopeRequest = new CreateScopeRequest();
createScopeRequest.setScopeName(scope1);
builder = webTarget.request(MediaType.APPLICATION_JSON_TYPE);
response = builder.post(Entity.json(createScopeRequest));
assertEquals("Create scope status", CREATED.getStatusCode(), response.getStatus());
Assert.assertEquals("Create scope response", scope1, response.readEntity(ScopeProperty.class).getScopeName());
log.info("Create scope: {} successful ", scope1);
// Create another scope for empty stream test later.
final String scope2 = RandomStringUtils.randomAlphanumeric(10);
final CreateScopeRequest createScopeRequest1 = new CreateScopeRequest();
createScopeRequest1.setScopeName(scope2);
builder = webTarget.request(MediaType.APPLICATION_JSON_TYPE);
response = builder.post(Entity.json(createScopeRequest1));
assertEquals("Create scope status", CREATED.getStatusCode(), response.getStatus());
Assert.assertEquals("Create scope response", scope2, response.readEntity(ScopeProperty.class).getScopeName());
// TEST CreateStream POST http://controllerURI:Port/v1/scopes/{scopeName}/streams
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + scope1 + "/streams").toString();
webTarget = client.target(resourceURl);
CreateStreamRequest createStreamRequest = new CreateStreamRequest();
ScalingConfig scalingConfig = new ScalingConfig();
scalingConfig.setType(ScalingConfig.TypeEnum.FIXED_NUM_SEGMENTS);
scalingConfig.setTargetRate(2);
scalingConfig.scaleFactor(2);
scalingConfig.minSegments(2);
RetentionConfig retentionConfig = new RetentionConfig();
retentionConfig.setType(RetentionConfig.TypeEnum.LIMITED_DAYS);
retentionConfig.setValue(123L);
TagsList tagsList = new TagsList();
tagsList.add("testTag");
createStreamRequest.setStreamName(stream1);
createStreamRequest.setScalingPolicy(scalingConfig);
createStreamRequest.setRetentionPolicy(retentionConfig);
createStreamRequest.setStreamTags(tagsList);
createStreamRequest.setTimestampAggregationTimeout(1000L);
createStreamRequest.setRolloverSizeBytes(1024L);
builder = webTarget.request(MediaType.APPLICATION_JSON_TYPE);
response = builder.post(Entity.json(createStreamRequest));
assertEquals("Create stream status", CREATED.getStatusCode(), response.getStatus());
final StreamProperty streamPropertyResponse = response.readEntity(StreamProperty.class);
assertEquals("Scope name in response", scope1, streamPropertyResponse.getScopeName());
assertEquals("Stream name in response", stream1, streamPropertyResponse.getStreamName());
assertEquals("TimestampAggregationTimeout in response", 1000L, (long) streamPropertyResponse.getTimestampAggregationTimeout());
assertEquals("RolloverSizeBytes in response", 1024L, (long) streamPropertyResponse.getRolloverSizeBytes());
log.info("Create stream: {} successful", stream1);
// Test listScopes GET http://controllerURI:Port/v1/scopes/{scopeName}/streams
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes").toString();
webTarget = client.target(resourceURl);
builder = webTarget.request();
response = builder.get();
assertEquals("List scopes", OK.getStatusCode(), response.getStatus());
log.info("List scopes successful");
// Test listStream GET /v1/scopes/scope1/streams
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + scope1 + "/streams").toString();
webTarget = client.target(resourceURl);
builder = webTarget.request();
response = builder.get();
assertEquals("List streams", OK.getStatusCode(), response.getStatus());
Assert.assertEquals("List streams size", 1, response.readEntity(StreamsList.class).getStreams().size());
log.info("List streams successful");
// Test listStream GET /v1/scopes/scope1/streams for tags
response = client.target(resourceURl).queryParam("filter_type", "tag").queryParam("filter_value", "testTag").request().get();
assertEquals("List streams", OK.getStatusCode(), response.getStatus());
Assert.assertEquals("List streams size", 1, response.readEntity(StreamsList.class).getStreams().size());
response = client.target(resourceURl).queryParam("filter_type", "tag").queryParam("filter_value", "randomTag").request().get();
assertEquals("List streams", OK.getStatusCode(), response.getStatus());
Assert.assertEquals("List streams size", 0, response.readEntity(StreamsList.class).getStreams().size());
log.info("List streams with tag successful");
response = client.target(resourceURl).queryParam("filter_type", "showInternalStreams").request().get();
assertEquals("List streams", OK.getStatusCode(), response.getStatus());
assertTrue(response.readEntity(StreamsList.class).getStreams().get(0).getStreamName().startsWith("_MARK"));
log.info("List streams with showInternalStreams successful");
// Test for the case when the scope is empty.
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + scope2 + "/streams").toString();
response = client.target(resourceURl).request().get();
assertEquals("List streams", OK.getStatusCode(), response.getStatus());
Assert.assertEquals("List streams size", 0, response.readEntity(StreamsList.class).getStreams().size());
// Test getScope
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + scope1).toString();
response = client.target(resourceURl).request().get();
assertEquals("Get scope status", OK.getStatusCode(), response.getStatus());
assertEquals("Get scope scope1 response", scope1, response.readEntity(ScopeProperty.class).getScopeName());
log.info("Get scope successful");
// Test updateStream
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + scope1 + "/streams/" + stream1).toString();
UpdateStreamRequest updateStreamRequest = new UpdateStreamRequest();
ScalingConfig scalingConfig1 = new ScalingConfig();
scalingConfig1.setType(ScalingConfig.TypeEnum.FIXED_NUM_SEGMENTS);
scalingConfig1.setTargetRate(2);
// update existing scaleFactor from 2 to 3
scalingConfig1.scaleFactor(3);
// update existing minSegments from 2 to 4
scalingConfig1.minSegments(4);
updateStreamRequest.setScalingPolicy(scalingConfig1);
updateStreamRequest.setRetentionPolicy(retentionConfig);
updateStreamRequest.setTimestampAggregationTimeout(2000L);
updateStreamRequest.setRolloverSizeBytes(2048L);
response = client.target(resourceURl).request(MediaType.APPLICATION_JSON_TYPE).put(Entity.json(updateStreamRequest));
assertEquals("Update stream status", OK.getStatusCode(), response.getStatus());
assertEquals("Verify updated property", 4, response.readEntity(StreamProperty.class).getScalingPolicy().getMinSegments().intValue());
log.info("Update stream successful");
// Test scaling event list GET /v1/scopes/scope1/streams/stream1
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + scope1 + "/streams/" + stream1 + "/scaling-events").toString();
response = client.target(resourceURl).queryParam("from", 0L).queryParam("to", System.currentTimeMillis()).request().get();
List<ScaleMetadata> scaleMetadataListResponse = response.readEntity(new GenericType<List<ScaleMetadata>>() {
});
assertEquals(2, scaleMetadataListResponse.size());
assertEquals(2, scaleMetadataListResponse.get(0).getSegments().size());
assertEquals(4, scaleMetadataListResponse.get(1).getSegments().size());
// Test getStream
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + scope1 + "/streams/" + stream1).toString();
response = client.target(resourceURl).request().get();
assertEquals("Get stream status", OK.getStatusCode(), response.getStatus());
StreamProperty responseProperty = response.readEntity(StreamProperty.class);
assertEquals("Get stream stream1 response", stream1, responseProperty.getStreamName());
assertEquals("Get stream stream1 response TimestampAggregationTimeout", (long) responseProperty.getTimestampAggregationTimeout(), 2000L);
assertEquals("Get stream stream1 RolloverSizeBytes", (long) responseProperty.getRolloverSizeBytes(), 2048L);
log.info("Get stream successful");
// Test updateStreamState
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + scope1 + "/streams/" + stream1 + "/state").toString();
StreamState streamState = new StreamState();
streamState.setStreamState(StreamState.StreamStateEnum.SEALED);
response = client.target(resourceURl).request(MediaType.APPLICATION_JSON_TYPE).put(Entity.json(streamState));
assertEquals("UpdateStreamState status", OK.getStatusCode(), response.getStatus());
assertEquals("UpdateStreamState status in response", streamState.getStreamState(), response.readEntity(StreamState.class).getStreamState());
log.info("Update stream state successful");
// Test deleteStream
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + scope1 + "/streams/" + stream1).toString();
response = client.target(resourceURl).request().delete();
assertEquals("DeleteStream status", NO_CONTENT.getStatusCode(), response.getStatus());
log.info("Delete stream successful");
// Test deleteScope
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + scope1).toString();
response = client.target(resourceURl).request().delete();
assertEquals("Get scope status", NO_CONTENT.getStatusCode(), response.getStatus());
log.info("Delete Scope successful");
// Test reader groups APIs.
// Prepare the streams and readers using the admin client.
final String testScope = RandomStringUtils.randomAlphanumeric(10);
final String testStream1 = RandomStringUtils.randomAlphanumeric(10);
final String testStream2 = RandomStringUtils.randomAlphanumeric(10);
URI controllerUri = SETUP_UTILS.getControllerUri();
@Cleanup("shutdown") InlineExecutor inlineExecutor = new InlineExecutor();
ClientConfig clientConfig = ClientConfig.builder().build();
try (ConnectionPool cp = new ConnectionPoolImpl(clientConfig, new SocketConnectionFactoryImpl(clientConfig));
StreamManager streamManager = new StreamManagerImpl(createController(controllerUri, inlineExecutor), cp)) {
log.info("Creating scope: {}", testScope);
streamManager.createScope(testScope);
log.info("Creating stream: {}", testStream1);
StreamConfiguration streamConf1 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
streamManager.createStream(testScope, testStream1, streamConf1);
log.info("Creating stream: {}", testStream2);
StreamConfiguration streamConf2 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
streamManager.createStream(testScope, testStream2, streamConf2);
}
final String readerGroupName1 = RandomStringUtils.randomAlphanumeric(10);
final String readerGroupName2 = RandomStringUtils.randomAlphanumeric(10);
final String reader1 = RandomStringUtils.randomAlphanumeric(10);
final String reader2 = RandomStringUtils.randomAlphanumeric(10);
try (ClientFactoryImpl clientFactory = new ClientFactoryImpl(testScope, createController(controllerUri, inlineExecutor), clientConfig);
ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(testScope, ClientConfig.builder().controllerURI(controllerUri).build())) {
readerGroupManager.createReaderGroup(readerGroupName1, ReaderGroupConfig.builder().stream(Stream.of(testScope, testStream1)).stream(Stream.of(testScope, testStream2)).build());
readerGroupManager.createReaderGroup(readerGroupName2, ReaderGroupConfig.builder().stream(Stream.of(testScope, testStream1)).stream(Stream.of(testScope, testStream2)).build());
clientFactory.createReader(reader1, readerGroupName1, new JavaSerializer<Long>(), ReaderConfig.builder().build());
clientFactory.createReader(reader2, readerGroupName1, new JavaSerializer<Long>(), ReaderConfig.builder().build());
}
// Test fetching readergroups.
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + testScope + "/readergroups").toString();
response = client.target(resourceURl).request().get();
assertEquals("Get readergroups status", OK.getStatusCode(), response.getStatus());
ReaderGroupsList readerGroupsList = response.readEntity(ReaderGroupsList.class);
assertEquals("Get readergroups size", 2, readerGroupsList.getReaderGroups().size());
assertTrue(readerGroupsList.getReaderGroups().contains(new ReaderGroupsListReaderGroups().readerGroupName(readerGroupName1)));
assertTrue(readerGroupsList.getReaderGroups().contains(new ReaderGroupsListReaderGroups().readerGroupName(readerGroupName2)));
log.info("Get readergroups successful");
// Test fetching readergroup info.
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + testScope + "/readergroups/" + readerGroupName1).toString();
response = client.target(resourceURl).request().get();
assertEquals("Get readergroup properties status", OK.getStatusCode(), response.getStatus());
ReaderGroupProperty readerGroupProperty = response.readEntity(ReaderGroupProperty.class);
assertEquals("Get readergroup name", readerGroupName1, readerGroupProperty.getReaderGroupName());
assertEquals("Get readergroup scope name", testScope, readerGroupProperty.getScopeName());
assertEquals("Get readergroup streams size", 2, readerGroupProperty.getStreamList().size());
assertTrue(readerGroupProperty.getStreamList().contains(Stream.of(testScope, testStream1).getScopedName()));
assertTrue(readerGroupProperty.getStreamList().contains(Stream.of(testScope, testStream2).getScopedName()));
assertEquals("Get readergroup onlinereaders size", 2, readerGroupProperty.getOnlineReaderIds().size());
assertTrue(readerGroupProperty.getOnlineReaderIds().contains(reader1));
assertTrue(readerGroupProperty.getOnlineReaderIds().contains(reader2));
// Test readergroup or scope not found.
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + testScope + "/readergroups/" + "unknownreadergroup").toString();
response = client.target(resourceURl).request().get();
assertEquals("Get readergroup properties status", NOT_FOUND.getStatusCode(), response.getStatus());
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + "unknownscope" + "/readergroups/" + readerGroupName1).toString();
response = client.target(resourceURl).request().get();
assertEquals("Get readergroup properties status", NOT_FOUND.getStatusCode(), response.getStatus());
log.info("Get readergroup properties successful");
log.info("Test restApiTests passed successfully!");
}
Aggregations