use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class ControllerServiceTest method getSegmentsAfterCreation.
private static void getSegmentsAfterCreation(Controller controller, final String scope, final String streamName) throws InterruptedException, ExecutionException {
CompletableFuture<Map<Segment, Long>> segments = controller.getSegmentsAtTime(new StreamImpl(scope, streamName), System.currentTimeMillis() + 3600);
assertFalse("FAILURE: Fetching positions at given time in furture after stream creation failed", segments.get().isEmpty());
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class MetricsTest method metricsTimeBasedCacheEvictionTest.
@Test(timeout = 120000)
public void metricsTimeBasedCacheEvictionTest() throws Exception {
ClientConfig clientConfig = ClientConfig.builder().build();
try (ConnectionPool cp = new ConnectionPoolImpl(clientConfig, new SocketConnectionFactoryImpl(clientConfig));
StreamManager streamManager = new StreamManagerImpl(controller, cp)) {
boolean createScopeStatus = streamManager.createScope(scope);
log.info("Create scope status {}", createScopeStatus);
boolean createStreamStatus = streamManager.createStream(scope, STREAM_NAME, config);
log.info("Create stream status {}", createStreamStatus);
}
try (ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory)) {
@Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(STREAM_NAME, new UTF8StringSerializer(), EventWriterConfig.builder().build());
String event = "12345";
long bytesWritten = TOTAL_NUM_EVENTS * (8 + event.length());
writeEvents(event, writer1);
String readerGroupName1 = readerGroupName + "1";
log.info("Creating Reader group : {}", readerGroupName1);
readerGroupManager.createReaderGroup(readerGroupName1, ReaderGroupConfig.builder().stream(Stream.of(scope, STREAM_NAME)).automaticCheckpointIntervalMillis(2000).build());
EventStreamReader<String> reader1 = clientFactory.createReader(readerName, readerGroupName1, new UTF8StringSerializer(), ReaderConfig.builder().build());
readAllEvents(reader1);
final String[] streamTags = segmentTags(scope + "/" + STREAM_NAME + "/0.#epoch.0");
assertEquals(bytesWritten, (long) MetricRegistryUtils.getCounter(SEGMENT_READ_BYTES, streamTags).count());
// Wait for cache eviction to happen
Thread.sleep(5000);
String readerGroupName2 = readerGroupName + "2";
log.info("Creating Reader group : {}", readerGroupName2);
readerGroupManager.createReaderGroup(readerGroupName2, ReaderGroupConfig.builder().stream(Stream.of(scope, STREAM_NAME)).automaticCheckpointIntervalMillis(2000).build());
EventStreamReader<String> reader2 = clientFactory.createReader(readerName, readerGroupName2, new UTF8StringSerializer(), ReaderConfig.builder().build());
readAllEvents(reader2);
// Metric is evicted from Cache, after cache eviction duration
// Count starts from 0, rather than adding up to previously ready bytes, as cache is evicted.
assertEquals(bytesWritten, (long) MetricRegistryUtils.getCounter(SEGMENT_READ_BYTES, streamTags).count());
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 1.0);
// Seal segment 0, create segment 1
CompletableFuture<Boolean> scaleStatus = controller.scaleStream(new StreamImpl(scope, STREAM_NAME), Collections.singletonList(0L), map, executorService()).getFuture();
Assert.assertTrue(scaleStatus.get());
@Cleanup EventStreamWriter<String> writer2 = clientFactory.createEventWriter(STREAM_NAME, new UTF8StringSerializer(), EventWriterConfig.builder().build());
writeEvents(event, writer2);
readAllEvents(reader1);
final String[] streamTags2nd = segmentTags(scope + "/" + STREAM_NAME + "/1.#epoch.1");
assertEquals(bytesWritten, (long) MetricRegistryUtils.getCounter(SEGMENT_READ_BYTES, streamTags2nd).count());
readerGroupManager.deleteReaderGroup(readerGroupName1);
readerGroupManager.deleteReaderGroup(readerGroupName2);
CompletableFuture<Boolean> sealStreamStatus = controller.sealStream(scope, STREAM_NAME);
log.info("Sealing stream {}", STREAM_NAME);
assertTrue(sealStreamStatus.get());
CompletableFuture<Boolean> deleteStreamStatus = controller.deleteStream(scope, STREAM_NAME);
log.info("Deleting stream {}", STREAM_NAME);
assertTrue(deleteStreamStatus.get());
CompletableFuture<Boolean> deleteScopeStatus = controller.deleteScope(scope);
log.info("Deleting scope {}", scope);
assertTrue(deleteScopeStatus.get());
}
log.info("Metrics Time based Cache Eviction test succeeds");
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class BucketServiceTest method testRetentionService.
@Test(timeout = 10000)
public void testRetentionService() {
Map<Integer, BucketService> bucketServices = retentionService.getBucketServices();
assertNotNull(bucketServices);
assertEquals(3, bucketServices.size());
assertTrue(retentionService.takeBucketOwnership(0, hostId, executor).join());
assertTrue(retentionService.takeBucketOwnership(1, hostId, executor).join());
assertTrue(retentionService.takeBucketOwnership(2, hostId, executor).join());
AssertExtensions.assertThrows("", () -> retentionService.takeBucketOwnership(3, hostId, executor).join(), e -> e instanceof IllegalArgumentException);
retentionService.tryTakeOwnership(0).join();
String scope = "scope";
String streamName = "stream";
Stream stream = new StreamImpl(scope, streamName);
bucketStore.addStreamToBucketStore(BucketStore.ServiceType.RetentionService, scope, streamName, executor).join();
// verify that at least one of the buckets got the notification
int bucketId = BucketStore.getBucket(scope, streamName, 3);
Set<String> streams = bucketStore.getStreamsForBucket(BucketStore.ServiceType.RetentionService, bucketId, executor).join();
BucketService bucketService = bucketServices.get(bucketId);
AtomicBoolean added = new AtomicBoolean(false);
RetryHelper.loopWithDelay(() -> !added.get(), () -> CompletableFuture.completedFuture(null).thenAccept(x -> added.set(bucketService.getKnownStreams().size() > 0)), Duration.ofSeconds(1).toMillis(), executor).join();
assertTrue(bucketService.getKnownStreams().contains(stream));
bucketStore.removeStreamFromBucketStore(BucketStore.ServiceType.RetentionService, scope, streamName, executor).join();
AtomicBoolean removed = new AtomicBoolean(false);
RetryHelper.loopWithDelay(() -> !removed.get(), () -> CompletableFuture.completedFuture(null).thenAccept(x -> removed.set(bucketService.getKnownStreams().size() == 0)), Duration.ofSeconds(1).toMillis(), executor).join();
assertEquals(0, bucketService.getKnownStreams().size());
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class ZkStoreBucketServiceTest method testBucketOwnership.
@Test(timeout = 10000)
public void testBucketOwnership() throws Exception {
// verify that ownership is not taken up by another host
assertFalse(retentionService.takeBucketOwnership(0, "", executor).join());
// Introduce connection failure error
zkClient.getZookeeperClient().close();
// restart
CuratorFramework zkClient2 = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), 10000, 1000, (r, e, s) -> false);
zkClient2.start();
BucketStore bucketStore2 = StreamStoreFactory.createZKBucketStore(zkClient2, executor);
String scope = "scope1";
String streamName = "stream1";
bucketStore2.addStreamToBucketStore(BucketStore.ServiceType.RetentionService, scope, streamName, executor).join();
zkClient2.close();
zkClient.getZookeeperClient().start();
Stream stream = new StreamImpl(scope, streamName);
// verify that at least one of the buckets got the notification
Map<Integer, BucketService> bucketServices = retentionService.getBucketServices();
int bucketId = BucketStore.getBucket(scope, streamName, 3);
BucketService bucketService = bucketServices.get(bucketId);
AtomicBoolean added = new AtomicBoolean(false);
RetryHelper.loopWithDelay(() -> !added.get(), () -> CompletableFuture.completedFuture(null).thenAccept(x -> added.set(bucketService.getKnownStreams().size() > 0)), Duration.ofSeconds(1).toMillis(), executor).join();
assertTrue(bucketService.getKnownStreams().contains(stream));
}
use of io.pravega.client.stream.impl.StreamImpl in project pravega by pravega.
the class AutoScaleTest method setup.
/**
* Invoke the createStream method, ensure we are able to create stream.
*
* @throws InterruptedException if interrupted
* @throws URISyntaxException If URI is invalid
* @throws ExecutionException if error in create stream
*/
@Before
public void setup() throws InterruptedException, ExecutionException {
// create a scope
Controller controller = getController();
executorService = ExecutorServiceHelpers.newScheduledThreadPool(5, "AutoScaleTest-main");
Boolean createScopeStatus = controller.createScope(SCOPE).get();
log.debug("create scope status {}", createScopeStatus);
// create a stream
Boolean createStreamStatus = controller.createStream(SCOPE, SCALE_UP_STREAM_NAME, CONFIG_UP).get();
log.debug("create stream status for scale up stream {}", createStreamStatus);
createStreamStatus = controller.createStream(SCOPE, SCALE_DOWN_STREAM_NAME, CONFIG_DOWN).get();
log.debug("create stream status for scaledown stream {}", createStreamStatus);
log.debug("scale down stream starting segments:" + controller.getCurrentSegments(SCOPE, SCALE_DOWN_STREAM_NAME).get().getSegments().size());
Map<Double, Double> keyRanges = new HashMap<>();
keyRanges.put(0.0, 0.5);
keyRanges.put(0.5, 1.0);
Boolean status = controller.scaleStream(new StreamImpl(SCOPE, SCALE_DOWN_STREAM_NAME), Collections.singletonList(0L), keyRanges, executorService).getFuture().get();
assertTrue(status);
createStreamStatus = controller.createStream(SCOPE, SCALE_UP_TXN_STREAM_NAME, CONFIG_TXN).get();
log.debug("create stream status for txn stream {}", createStreamStatus);
}
Aggregations