use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class ControllerRestApiTest method restApiTests.
@Test(timeout = 300000)
public void restApiTests() {
Service conService = Utils.createPravegaControllerService(null);
List<URI> ctlURIs = conService.getServiceDetails();
URI controllerRESTUri = ctlURIs.get(1);
Invocation.Builder builder;
Response response;
restServerURI = "http://" + controllerRESTUri.getHost() + ":" + controllerRESTUri.getPort();
log.info("REST Server URI: {}", restServerURI);
// TEST REST server status, ping test
resourceURl = new StringBuilder(restServerURI).append("/ping").toString();
webTarget = client.target(resourceURl);
builder = webTarget.request();
response = builder.get();
assertEquals("Ping test", OK.getStatusCode(), response.getStatus());
log.info("REST Server is running. Ping successful.");
final String scope1 = RandomStringUtils.randomAlphanumeric(10);
final String stream1 = RandomStringUtils.randomAlphanumeric(10);
// TEST CreateScope POST http://controllerURI:Port/v1/scopes
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes").toString();
webTarget = client.target(resourceURl);
final CreateScopeRequest createScopeRequest = new CreateScopeRequest();
createScopeRequest.setScopeName(scope1);
builder = webTarget.request(MediaType.APPLICATION_JSON_TYPE);
response = builder.post(Entity.json(createScopeRequest));
assertEquals("Create scope status", CREATED.getStatusCode(), response.getStatus());
Assert.assertEquals("Create scope response", scope1, response.readEntity(ScopeProperty.class).getScopeName());
log.info("Create scope: {} successful ", scope1);
// TEST CreateStream POST http://controllerURI:Port/v1/scopes/{scopeName}/streams
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + scope1 + "/streams").toString();
webTarget = client.target(resourceURl);
CreateStreamRequest createStreamRequest = new CreateStreamRequest();
ScalingConfig scalingConfig = new ScalingConfig();
scalingConfig.setType(ScalingConfig.TypeEnum.FIXED_NUM_SEGMENTS);
scalingConfig.setTargetRate(2);
scalingConfig.scaleFactor(2);
scalingConfig.minSegments(2);
RetentionConfig retentionConfig = new RetentionConfig();
retentionConfig.setType(RetentionConfig.TypeEnum.LIMITED_DAYS);
retentionConfig.setValue(123L);
createStreamRequest.setStreamName(stream1);
createStreamRequest.setScalingPolicy(scalingConfig);
createStreamRequest.setRetentionPolicy(retentionConfig);
builder = webTarget.request(MediaType.APPLICATION_JSON_TYPE);
response = builder.post(Entity.json(createStreamRequest));
assertEquals("Create stream status", CREATED.getStatusCode(), response.getStatus());
final StreamProperty streamPropertyResponse = response.readEntity(StreamProperty.class);
assertEquals("Scope name in response", scope1, streamPropertyResponse.getScopeName());
assertEquals("Stream name in response", stream1, streamPropertyResponse.getStreamName());
log.info("Create stream: {} successful", stream1);
// Test listScopes GET http://controllerURI:Port/v1/scopes/{scopeName}/streams
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes").toString();
webTarget = client.target(resourceURl);
builder = webTarget.request();
response = builder.get();
assertEquals("List scopes", OK.getStatusCode(), response.getStatus());
log.info("List scopes successful");
// Test listStream GET /v1/scopes/scope1/streams
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + scope1 + "/streams").toString();
webTarget = client.target(resourceURl);
builder = webTarget.request();
response = builder.get();
assertEquals("List streams", OK.getStatusCode(), response.getStatus());
Assert.assertEquals("List streams size", 1, response.readEntity(StreamsList.class).getStreams().size());
log.info("List streams successful");
// Test getScope
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + scope1).toString();
response = client.target(resourceURl).request().get();
assertEquals("Get scope status", OK.getStatusCode(), response.getStatus());
assertEquals("Get scope scope1 response", scope1, response.readEntity(ScopeProperty.class).getScopeName());
log.info("Get scope successful");
// Test updateStream
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + scope1 + "/streams/" + stream1).toString();
UpdateStreamRequest updateStreamRequest = new UpdateStreamRequest();
ScalingConfig scalingConfig1 = new ScalingConfig();
scalingConfig1.setType(ScalingConfig.TypeEnum.FIXED_NUM_SEGMENTS);
scalingConfig1.setTargetRate(2);
// update existing scaleFactor from 2 to 3
scalingConfig1.scaleFactor(3);
// update existing minSegments from 2 to 4
scalingConfig1.minSegments(4);
updateStreamRequest.setScalingPolicy(scalingConfig1);
updateStreamRequest.setRetentionPolicy(retentionConfig);
response = client.target(resourceURl).request(MediaType.APPLICATION_JSON_TYPE).put(Entity.json(updateStreamRequest));
assertEquals("Update stream status", OK.getStatusCode(), response.getStatus());
assertEquals("Verify updated property", 4, response.readEntity(StreamProperty.class).getScalingPolicy().getMinSegments().intValue());
log.info("Update stream successful");
// Test getStream
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + scope1 + "/streams/" + stream1).toString();
response = client.target(resourceURl).request().get();
assertEquals("Get stream status", OK.getStatusCode(), response.getStatus());
assertEquals("Get stream stream1 response", stream1, response.readEntity(StreamProperty.class).getStreamName());
log.info("Get stream successful");
// Test updateStreamState
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + scope1 + "/streams/" + stream1 + "/state").toString();
StreamState streamState = new StreamState();
streamState.setStreamState(StreamState.StreamStateEnum.SEALED);
response = client.target(resourceURl).request(MediaType.APPLICATION_JSON_TYPE).put(Entity.json(streamState));
assertEquals("UpdateStreamState status", OK.getStatusCode(), response.getStatus());
assertEquals("UpdateStreamState status in response", streamState.getStreamState(), response.readEntity(StreamState.class).getStreamState());
log.info("Update stream state successful");
// Test deleteStream
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + scope1 + "/streams/" + stream1).toString();
response = client.target(resourceURl).request().delete();
assertEquals("DeleteStream status", NO_CONTENT.getStatusCode(), response.getStatus());
log.info("Delete stream successful");
// Test deleteScope
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + scope1).toString();
response = client.target(resourceURl).request().delete();
assertEquals("Get scope status", NO_CONTENT.getStatusCode(), response.getStatus());
log.info("Delete Scope successful");
// Test reader groups APIs.
// Prepare the streams and readers using the admin client.
final String testScope = RandomStringUtils.randomAlphanumeric(10);
final String testStream1 = RandomStringUtils.randomAlphanumeric(10);
final String testStream2 = RandomStringUtils.randomAlphanumeric(10);
URI controllerUri = ctlURIs.get(0);
try (StreamManager streamManager = new StreamManagerImpl(ClientConfig.builder().controllerURI(controllerUri).build())) {
log.info("Creating scope: {}", testScope);
streamManager.createScope(testScope);
log.info("Creating stream: {}", testStream1);
StreamConfiguration streamConf1 = StreamConfiguration.builder().scope(testScope).streamName(testStream1).scalingPolicy(ScalingPolicy.fixed(1)).build();
streamManager.createStream(testScope, testStream1, streamConf1);
log.info("Creating stream: {}", testStream2);
StreamConfiguration streamConf2 = StreamConfiguration.builder().scope(testScope).streamName(testStream2).scalingPolicy(ScalingPolicy.fixed(1)).build();
streamManager.createStream(testScope, testStream2, streamConf2);
}
final String readerGroupName1 = RandomStringUtils.randomAlphanumeric(10);
final String readerGroupName2 = RandomStringUtils.randomAlphanumeric(10);
final String reader1 = RandomStringUtils.randomAlphanumeric(10);
final String reader2 = RandomStringUtils.randomAlphanumeric(10);
@Cleanup("shutdown") InlineExecutor executor = new InlineExecutor();
Controller controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(ClientConfig.builder().controllerURI(controllerUri).build()).build(), executor);
try (ClientFactory clientFactory = new ClientFactoryImpl(testScope, controller);
ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(testScope, ClientConfig.builder().controllerURI(controllerUri).build())) {
final ReaderGroupConfig config = ReaderGroupConfig.builder().stream(Stream.of(testScope, testStream1)).stream(Stream.of(testScope, testStream2)).build();
readerGroupManager.createReaderGroup(readerGroupName1, config);
readerGroupManager.createReaderGroup(readerGroupName2, config);
clientFactory.createReader(reader1, readerGroupName1, new JavaSerializer<Long>(), ReaderConfig.builder().build());
clientFactory.createReader(reader2, readerGroupName1, new JavaSerializer<Long>(), ReaderConfig.builder().build());
}
// Verify the reader group info using REST APIs.
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + testScope + "/readergroups").toString();
response = client.target(resourceURl).request().get();
assertEquals("Get readergroups status", OK.getStatusCode(), response.getStatus());
ReaderGroupsList readerGroupsList = response.readEntity(ReaderGroupsList.class);
assertEquals("Get readergroups size", 2, readerGroupsList.getReaderGroups().size());
assertTrue(readerGroupsList.getReaderGroups().contains(new ReaderGroupsListReaderGroups().readerGroupName(readerGroupName1)));
assertTrue(readerGroupsList.getReaderGroups().contains(new ReaderGroupsListReaderGroups().readerGroupName(readerGroupName2)));
log.info("Get readergroups successful");
// Test fetching readergroup info.
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + testScope + "/readergroups/" + readerGroupName1).toString();
response = client.target(resourceURl).request().get();
assertEquals("Get readergroup properties status", OK.getStatusCode(), response.getStatus());
ReaderGroupProperty readerGroupProperty = response.readEntity(ReaderGroupProperty.class);
assertEquals("Get readergroup name", readerGroupName1, readerGroupProperty.getReaderGroupName());
assertEquals("Get readergroup scope name", testScope, readerGroupProperty.getScopeName());
assertEquals("Get readergroup streams size", 2, readerGroupProperty.getStreamList().size());
assertTrue(readerGroupProperty.getStreamList().contains(testStream1));
assertTrue(readerGroupProperty.getStreamList().contains(testStream2));
assertEquals("Get readergroup onlinereaders size", 2, readerGroupProperty.getOnlineReaderIds().size());
assertTrue(readerGroupProperty.getOnlineReaderIds().contains(reader1));
assertTrue(readerGroupProperty.getOnlineReaderIds().contains(reader2));
// Test readergroup or scope not found.
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + testScope + "/readergroups/" + "unknownreadergroup").toString();
response = client.target(resourceURl).request().get();
assertEquals("Get readergroup properties status", NOT_FOUND.getStatusCode(), response.getStatus());
resourceURl = new StringBuilder(restServerURI).append("/v1/scopes/" + "unknownscope" + "/readergroups/" + readerGroupName1).toString();
response = client.target(resourceURl).request().get();
assertEquals("Get readergroup properties status", NOT_FOUND.getStatusCode(), response.getStatus());
log.info("Get readergroup properties successful");
log.info("Test restApiTests passed successfully!");
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class PravegaTest method simpleTest.
/**
* Invoke the simpleTest, ensure we are able to produce events.
* The test fails incase of exceptions while writing to the stream.
*
* @throws InterruptedException if interrupted
* @throws URISyntaxException If URI is invalid
*/
@Test(timeout = 10 * 60 * 1000)
public void simpleTest() throws InterruptedException {
Service conService = Utils.createPravegaControllerService(null);
List<URI> ctlURIs = conService.getServiceDetails();
URI controllerUri = ctlURIs.get(0);
@Cleanup ClientFactory clientFactory = ClientFactory.withScope(STREAM_SCOPE, ClientConfig.builder().controllerURI(controllerUri).build());
log.info("Invoking Writer test with Controller URI: {}", controllerUri);
@Cleanup EventStreamWriter<Serializable> writer = clientFactory.createEventWriter(STREAM_NAME, new JavaSerializer<>(), EventWriterConfig.builder().build());
for (int i = 0; i < NUM_EVENTS; i++) {
String event = "Publish " + i + "\n";
log.debug("Producing event: {} ", event);
writer.writeEvent("", event);
writer.flush();
Thread.sleep(500);
}
log.info("Invoking Reader test.");
ReaderGroupManager groupManager = ReaderGroupManager.withScope(STREAM_SCOPE, ClientConfig.builder().controllerURI(controllerUri).build());
groupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().stream(Stream.of(STREAM_SCOPE, STREAM_NAME)).build());
EventStreamReader<String> reader = clientFactory.createReader(UUID.randomUUID().toString(), READER_GROUP, new JavaSerializer<>(), ReaderConfig.builder().build());
for (int i = 0; i < NUM_EVENTS; i++) {
try {
String event = reader.readNextEvent(6000).getEvent();
if (event != null) {
log.debug("Read event: {} ", event);
}
} catch (ReinitializationRequiredException e) {
log.error("Unexpected request to reinitialize {}", e);
System.exit(0);
}
}
reader.close();
groupManager.deleteReaderGroup(READER_GROUP);
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class ReadWithAutoScaleTest method scaleTestsWithReader.
// timeout of 10 mins.
@Test(timeout = 10 * 60 * 1000)
public void scaleTestsWithReader() {
URI controllerUri = getControllerURI();
ControllerImpl controller = getController();
ConcurrentLinkedQueue<Long> eventsReadFromPravega = new ConcurrentLinkedQueue<>();
final AtomicBoolean stopWriteFlag = new AtomicBoolean(false);
final AtomicBoolean stopReadFlag = new AtomicBoolean(false);
// data used by each of the writers.
final AtomicLong eventData = new AtomicLong();
// used by readers to maintain a count of events.
final AtomicLong eventReadCount = new AtomicLong();
@Cleanup ClientFactory clientFactory = getClientFactory();
// 1. Start writing events to the Stream.
CompletableFuture<Void> writer1 = startNewTxnWriter(eventData, clientFactory, stopWriteFlag);
// 2. Start a reader group with 2 readers (The stream is configured with 2 segments.)
// 2.1 Create a reader group.
log.info("Creating Reader group : {}", READER_GROUP_NAME);
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(SCOPE, controllerUri);
readerGroupManager.createReaderGroup(READER_GROUP_NAME, ReaderGroupConfig.builder().stream(Stream.of(SCOPE, STREAM_NAME)).build());
// 2.2 Create readers.
CompletableFuture<Void> reader1 = startReader("reader1", clientFactory, READER_GROUP_NAME, eventsReadFromPravega, eventData, eventReadCount, stopReadFlag);
CompletableFuture<Void> reader2 = startReader("reader2", clientFactory, READER_GROUP_NAME, eventsReadFromPravega, eventData, eventReadCount, stopReadFlag);
// 3 Now increase the number of TxnWriters to trigger scale operation.
log.info("Increasing the number of writers to 6");
CompletableFuture<Void> writer2 = startNewTxnWriter(eventData, clientFactory, stopWriteFlag);
CompletableFuture<Void> writer3 = startNewTxnWriter(eventData, clientFactory, stopWriteFlag);
CompletableFuture<Void> writer4 = startNewTxnWriter(eventData, clientFactory, stopWriteFlag);
CompletableFuture<Void> writer5 = startNewTxnWriter(eventData, clientFactory, stopWriteFlag);
CompletableFuture<Void> writer6 = startNewTxnWriter(eventData, clientFactory, stopWriteFlag);
// 4 Wait until the scale operation is triggered (else time out)
// validate the data read by the readers ensuring all the events are read and there are no duplicates.
CompletableFuture<Void> testResult = Retry.withExpBackoff(10, 10, 40, ofSeconds(10).toMillis()).retryingOn(ScaleOperationNotDoneException.class).throwingOn(RuntimeException.class).runAsync(() -> controller.getCurrentSegments(SCOPE, STREAM_NAME).thenAccept(x -> {
int currentNumOfSegments = x.getSegments().size();
if (currentNumOfSegments == 2) {
log.info("The current number of segments is equal to 2, ScaleOperation did not happen");
// Scaling operation did not happen, retry operation.
throw new ScaleOperationNotDoneException();
} else if (currentNumOfSegments > 2) {
// scale operation successful.
log.info("Current Number of segments is {}", currentNumOfSegments);
stopWriteFlag.set(true);
} else {
Assert.fail("Current number of Segments reduced to less than 2. Failure of test");
}
}), EXECUTOR_SERVICE).thenCompose(v -> CompletableFuture.allOf(writer1, writer2, writer3, writer4, writer5, writer6)).thenCompose(v -> {
stopReadFlag.set(true);
log.info("All writers have stopped. Setting Stop_Read_Flag. Event Written Count:{}, Event Read " + "Count: {}", eventData.get(), eventsReadFromPravega.size());
return CompletableFuture.allOf(reader1, reader2);
}).thenRun(() -> validateResults(eventData.get(), eventsReadFromPravega));
Futures.getAndHandleExceptions(testResult.whenComplete((r, e) -> {
recordResult(testResult, "ScaleUpWithTxnWithReaderGroup");
}), RuntimeException::new);
readerGroupManager.deleteReaderGroup(READER_GROUP_NAME);
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class ReaderCheckpointTest method readerCheckpointTest.
@Test
public void readerCheckpointTest() {
@Cleanup ReaderGroupManager readerGroupManager = ReaderGroupManager.withScope(SCOPE, controllerURI);
ReaderGroup readerGroup = readerGroupManager.createReaderGroup(READER_GROUP_NAME, ReaderGroupConfig.builder().stream(io.pravega.client.stream.Stream.of(SCOPE, STREAM)).build());
int startInclusive = 1;
int endExclusive = 100;
log.info("Write events with range [{},{})", startInclusive, endExclusive);
writeEvents(IntStream.range(startInclusive, endExclusive).boxed().collect(Collectors.toList()));
readEventsAndVerify(startInclusive, endExclusive);
// initiate checkpoint100
Checkpoint checkPoint100 = createCheckPointAndVerify(readerGroup, "batch100");
// write and read events 100 to 200
startInclusive = 100;
endExclusive = 200;
log.info("Write events with range [{},{})", startInclusive, endExclusive);
writeEvents(IntStream.range(startInclusive, endExclusive).boxed().collect(Collectors.toList()));
readEventsAndVerify(startInclusive, endExclusive);
// reset to check point 100
readerGroup.resetReaderGroup(ReaderGroupConfig.builder().startFromCheckpoint(checkPoint100).build());
readEventsAndVerify(100, endExclusive);
// initiate checkpoint200
Checkpoint checkPoint200 = createCheckPointAndVerify(readerGroup, "batch200");
// write and read events 200 to 300
startInclusive = 200;
endExclusive = 300;
log.info("Write events with range [{},{})", startInclusive, endExclusive);
writeEvents(IntStream.range(startInclusive, endExclusive).boxed().collect(Collectors.toList()));
readEventsAndVerify(startInclusive, endExclusive);
// reset back to checkpoint 200
readerGroup.resetReaderGroup(ReaderGroupConfig.builder().startFromCheckpoint(checkPoint200).build());
readEventsAndVerify(200, endExclusive);
// reset back to checkpoint 100
readerGroup.resetReaderGroup(ReaderGroupConfig.builder().startFromCheckpoint(checkPoint100).build());
readEventsAndVerify(100, endExclusive);
// clean up
readerGroupManager.deleteReaderGroup(READER_GROUP_NAME);
}
use of io.pravega.client.admin.ReaderGroupManager in project pravega by pravega.
the class RetentionTest method retentionTest.
@Test
public void retentionTest() throws Exception {
ConnectionFactory connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
ControllerImpl controller = new ControllerImpl(ControllerImplConfig.builder().clientConfig(ClientConfig.builder().controllerURI(controllerURI).build()).build(), connectionFactory.getInternalExecutor());
ClientFactory clientFactory = new ClientFactoryImpl(SCOPE, controller);
log.info("Invoking Writer test with Controller URI: {}", controllerURI);
// create a writer
EventStreamWriter<Serializable> writer = clientFactory.createEventWriter(STREAM, new JavaSerializer<>(), EventWriterConfig.builder().build());
// write an event
String writeEvent = "event";
writer.writeEvent(writeEvent);
writer.flush();
log.debug("Writing event: {} ", writeEvent);
// sleep for 4 mins
Exceptions.handleInterrupted(() -> Thread.sleep(5 * 60 * 1000));
// create a reader
ReaderGroupManager groupManager = ReaderGroupManager.withScope(SCOPE, controllerURI);
groupManager.createReaderGroup(READER_GROUP, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(Stream.of(SCOPE, STREAM)).build());
EventStreamReader<String> reader = clientFactory.createReader(UUID.randomUUID().toString(), READER_GROUP, new JavaSerializer<>(), ReaderConfig.builder().build());
// expectation is it should have been truncated and we should find stream to be empty
try {
String readEvent = reader.readNextEvent(6000).getEvent();
log.debug("Reading event: {} ", readEvent);
assertEquals(null, readEvent);
} catch (ReinitializationRequiredException e) {
log.error("Unexpected request to reinitialize {}", e);
Assert.fail("Unexpected request to reinitialize.Test failed.");
}
log.debug("The stream is already truncated.Simple retention test passed.");
}
Aggregations