use of io.pravega.segmentstore.contracts.tables.TableStore in project pravega by pravega.
the class ControllerMetricsTest method setUp.
@Before
public void setUp() throws Exception {
MetricsConfig metricsConfig = MetricsConfig.builder().with(MetricsConfig.ENABLE_STATISTICS, true).with(MetricsConfig.ENABLE_STATSD_REPORTER, false).build();
metricsConfig.setDynamicCacheEvictionDuration(Duration.ofMinutes(5));
MetricsProvider.initialize(metricsConfig);
statsProvider = MetricsProvider.getMetricsProvider();
statsProvider.startWithoutExporting();
log.info("Metrics Stats provider is started");
zkTestServer = new TestingServerStarter().start();
serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
TableStore tableStore = serviceBuilder.createTableStoreService();
server = new PravegaConnectionListener(false, servicePort, store, tableStore, serviceBuilder.getLowPriorityExecutor());
server.startListening();
controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), false, false, controllerPort, serviceHost, servicePort, containerCount, -1);
controllerWrapper.awaitRunning();
}
use of io.pravega.segmentstore.contracts.tables.TableStore in project pravega by pravega.
the class ReadTest method testEventPositions.
/**
* This test performs concurrent writes, reads and position checks on a Stream. Readers are checkers exercising the
* lazy construction of PositionImpl objects while the internal segmentOffsetUpdates list in EventStreamReaderImpl is
* being updated due to new read events. This test generates enough events to make segmentOffsetUpdates list in
* EventStreamReaderImpl to be filled and cleaned at least once. This test verifies the thread safety of the new
* optimization in EventStreamReaderImpl to prevent generating segmentOffset maps on every event read, as well as
* to check for the correctness of the segment offsets returned by PositionImpl.
*/
@Test(timeout = 60000)
public void testEventPositions() {
String endpoint = "localhost";
String streamName = "eventPositions";
String readerGroup = "groupPositions";
String scope = "scopePositions";
// Generate enough events to make the internal segment offset update buffer in EventStreamReaderImpl to be
// emptied and filled again.
int eventsToWrite = 2000;
BlockingQueue<Entry<Integer, PositionImpl>> readEventsPositions = new ArrayBlockingQueue<>(eventsToWrite);
@Cleanup("shutdown") ScheduledExecutorService readersWritersAndCheckers = ExecutorServiceHelpers.newScheduledThreadPool(4, "readers-writers-checkers");
AtomicInteger finishedProcesses = new AtomicInteger(0);
int port = TestUtils.getAvailableListenPort();
StreamSegmentStore store = SERVICE_BUILDER.createStreamSegmentService();
TableStore tableStore = SERVICE_BUILDER.createTableStoreService();
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, port, store, tableStore, NoOpScheduledExecutor.get());
server.startListening();
@Cleanup MockStreamManager streamManager = new MockStreamManager(scope, endpoint, port);
@Cleanup MockClientFactory clientFactory = streamManager.getClientFactory();
ReaderGroupConfig groupConfig = ReaderGroupConfig.builder().groupRefreshTimeMillis(1000).stream(Stream.of(scope, streamName)).build();
streamManager.createScope(scope);
// Create a Stream with 2 segments.
streamManager.createStream(scope, streamName, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(2)).build());
streamManager.createReaderGroup(readerGroup, groupConfig);
JavaSerializer<String> serializer = new JavaSerializer<>();
@Cleanup EventStreamWriter<String> producer = clientFactory.createEventWriter(streamName, serializer, EventWriterConfig.builder().build());
@Cleanup EventStreamReader<String> reader1 = clientFactory.createReader("reader1", readerGroup, serializer, ReaderConfig.builder().build());
@Cleanup EventStreamReader<String> reader2 = clientFactory.createReader("reader2", readerGroup, serializer, ReaderConfig.builder().build());
// Leave some time for readers to re-balance the segments and acquire one each.
Exceptions.handleInterrupted(() -> Thread.sleep(2000));
// Start writers and readers in parallel.
CompletableFuture reader1Future = CompletableFuture.runAsync(() -> {
readAndQueueEvents(reader1, eventsToWrite, readEventsPositions);
finishedProcesses.incrementAndGet();
}, readersWritersAndCheckers);
CompletableFuture reader2Future = CompletableFuture.runAsync(() -> {
readAndQueueEvents(reader2, eventsToWrite, readEventsPositions);
finishedProcesses.incrementAndGet();
}, readersWritersAndCheckers);
CompletableFuture writerFuture = CompletableFuture.runAsync(() -> {
for (int i = 0; i < eventsToWrite; i++) {
producer.writeEvent("segment1", "a");
producer.writeEvent("segment2", "b");
Exceptions.handleInterrupted(() -> Thread.sleep(1));
}
finishedProcesses.incrementAndGet();
}, readersWritersAndCheckers);
// This process access the positions read by the reader threads, which means that this thread is concurrently
// accessing the shared segmentOffsetUpdates list, whereas readers are appending data to it.
CompletableFuture checkOffsets = CompletableFuture.runAsync(() -> {
// 1-char string is assumed to be the payload of events
int sizeOfEvent = 16;
while (finishedProcesses.get() < 2) {
Entry<Integer, PositionImpl> element;
try {
element = readEventsPositions.poll(1, TimeUnit.MINUTES);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
int numberOfSegments = element.getValue().getOwnedSegmentsWithOffsets().size();
assertEquals("Reader owning too many segments.", 1, numberOfSegments);
// The segment position should increase by sizeOfEvent every event.
long segmentPositionOffset = element.getValue().getOwnedSegmentsWithOffsets().values().iterator().next();
assertEquals("Wrong event position", sizeOfEvent * element.getKey(), segmentPositionOffset);
}
finishedProcesses.incrementAndGet();
}, readersWritersAndCheckers);
// Wait for all futures to complete.
CompletableFuture.allOf(writerFuture, reader1Future, reader2Future, checkOffsets).join();
// Any failure reading, writing or checking positions will make this assertion to fail.
assertEquals(finishedProcesses.get(), 4);
ExecutorServiceHelpers.shutdown(readersWritersAndCheckers);
}
use of io.pravega.segmentstore.contracts.tables.TableStore in project pravega by pravega.
the class ReadTest method readConditionalData.
@Test(timeout = 10000)
public void readConditionalData() throws SegmentSealedException, EndOfSegmentException, SegmentTruncatedException {
String endpoint = "localhost";
String scope = "scope";
String stream = "readConditionalData";
int port = TestUtils.getAvailableListenPort();
byte[] testString = "Hello world\n".getBytes();
StreamSegmentStore store = SERVICE_BUILDER.createStreamSegmentService();
TableStore tableStore = SERVICE_BUILDER.createTableStoreService();
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, port, store, tableStore, SERVICE_BUILDER.getLowPriorityExecutor());
server.startListening();
@Cleanup SocketConnectionFactoryImpl clientCF = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ConnectionPoolImpl connectionPool = new ConnectionPoolImpl(ClientConfig.builder().build(), clientCF);
@Cleanup Controller controller = new MockController(endpoint, port, connectionPool, true);
controller.createScope(scope);
controller.createStream(scope, stream, StreamConfiguration.builder().build());
ConditionalOutputStreamFactoryImpl segmentproducerClient = new ConditionalOutputStreamFactoryImpl(controller, connectionPool);
SegmentInputStreamFactoryImpl segmentConsumerClient = new SegmentInputStreamFactoryImpl(controller, connectionPool);
Segment segment = Futures.getAndHandleExceptions(controller.getCurrentSegments(scope, stream), RuntimeException::new).getSegments().iterator().next();
@Cleanup ConditionalOutputStream out = segmentproducerClient.createConditionalOutputStream(segment, DelegationTokenProviderFactory.createWithEmptyToken(), EventWriterConfig.builder().build());
assertTrue(out.write(ByteBuffer.wrap(testString), 0));
@Cleanup EventSegmentReader in = segmentConsumerClient.createEventReaderForSegment(segment);
ByteBuffer result = in.read();
assertEquals(ByteBuffer.wrap(testString), result);
assertNull(in.read(100));
assertFalse(out.write(ByteBuffer.wrap(testString), 0));
assertTrue(out.write(ByteBuffer.wrap(testString), testString.length + WireCommands.TYPE_PLUS_LENGTH_SIZE));
result = in.read();
assertEquals(ByteBuffer.wrap(testString), result);
assertNull(in.read(100));
}
use of io.pravega.segmentstore.contracts.tables.TableStore in project pravega by pravega.
the class ReadWriteTest method setup.
@Before
public void setup() throws Exception {
final int controllerPort = TestUtils.getAvailableListenPort();
final String serviceHost = "localhost";
final int servicePort = TestUtils.getAvailableListenPort();
final int containerCount = 4;
// 1. Start ZK
this.zkTestServer = new TestingServerStarter().start();
// 2. Start Pravega SegmentStore service.
serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
TableStore tableStore = serviceBuilder.createTableStoreService();
this.server = new PravegaConnectionListener(false, servicePort, store, tableStore, serviceBuilder.getLowPriorityExecutor());
this.server.startListening();
// 3. Start Pravega Controller service
this.controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), false, controllerPort, serviceHost, servicePort, containerCount);
this.controllerWrapper.awaitRunning();
this.controller = controllerWrapper.getController();
this.writerPool = ExecutorServiceHelpers.newScheduledThreadPool(NUM_WRITERS, "WriterPool");
this.readerPool = ExecutorServiceHelpers.newScheduledThreadPool(NUM_READERS, "ReaderPool");
}
use of io.pravega.segmentstore.contracts.tables.TableStore in project pravega by pravega.
the class ReaderGroupNotificationTest method setUp.
@Before
public void setUp() throws Exception {
zkTestServer = new TestingServerStarter().start();
serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
TableStore tableStore = serviceBuilder.createTableStoreService();
server = new PravegaConnectionListener(false, servicePort, store, tableStore, this.serviceBuilder.getLowPriorityExecutor());
server.startListening();
controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), false, controllerPort, serviceHost, servicePort, containerCount);
controllerWrapper.awaitRunning();
listenerLatch.reset();
listenerInvoked.set(false);
}
Aggregations