use of io.pravega.client.stream.impl.UTF8StringSerializer in project pravega by pravega.
the class RestoreBackUpDataRecoveryTest method writeEvents.
// Writes the required number of events to the given stream without using transactions.
private void writeEvents(String streamName, ClientFactoryImpl clientFactory) {
EventStreamWriter<String> writer = clientFactory.createEventWriter(streamName, new UTF8StringSerializer(), EventWriterConfig.builder().build());
for (int i = 0; i < TOTAL_NUM_EVENTS; ) {
writer.writeEvent("", EVENT);
i++;
}
writer.flush();
writer.close();
}
use of io.pravega.client.stream.impl.UTF8StringSerializer in project pravega by pravega.
the class MetricsTest method metricsTimeBasedCacheEvictionTest.
@Test(timeout = 120000)
public void metricsTimeBasedCacheEvictionTest() throws Exception {
ClientConfig clientConfig = ClientConfig.builder().build();
try (ConnectionPool cp = new ConnectionPoolImpl(clientConfig, new SocketConnectionFactoryImpl(clientConfig));
StreamManager streamManager = new StreamManagerImpl(controller, cp)) {
boolean createScopeStatus = streamManager.createScope(scope);
log.info("Create scope status {}", createScopeStatus);
boolean createStreamStatus = streamManager.createStream(scope, STREAM_NAME, config);
log.info("Create stream status {}", createStreamStatus);
}
try (ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl(scope, controller, clientFactory)) {
@Cleanup EventStreamWriter<String> writer1 = clientFactory.createEventWriter(STREAM_NAME, new UTF8StringSerializer(), EventWriterConfig.builder().build());
String event = "12345";
long bytesWritten = TOTAL_NUM_EVENTS * (8 + event.length());
writeEvents(event, writer1);
String readerGroupName1 = readerGroupName + "1";
log.info("Creating Reader group : {}", readerGroupName1);
readerGroupManager.createReaderGroup(readerGroupName1, ReaderGroupConfig.builder().stream(Stream.of(scope, STREAM_NAME)).automaticCheckpointIntervalMillis(2000).build());
EventStreamReader<String> reader1 = clientFactory.createReader(readerName, readerGroupName1, new UTF8StringSerializer(), ReaderConfig.builder().build());
readAllEvents(reader1);
final String[] streamTags = segmentTags(scope + "/" + STREAM_NAME + "/0.#epoch.0");
assertEquals(bytesWritten, (long) MetricRegistryUtils.getCounter(SEGMENT_READ_BYTES, streamTags).count());
// Wait for cache eviction to happen
Thread.sleep(5000);
String readerGroupName2 = readerGroupName + "2";
log.info("Creating Reader group : {}", readerGroupName2);
readerGroupManager.createReaderGroup(readerGroupName2, ReaderGroupConfig.builder().stream(Stream.of(scope, STREAM_NAME)).automaticCheckpointIntervalMillis(2000).build());
EventStreamReader<String> reader2 = clientFactory.createReader(readerName, readerGroupName2, new UTF8StringSerializer(), ReaderConfig.builder().build());
readAllEvents(reader2);
// Metric is evicted from Cache, after cache eviction duration
// Count starts from 0, rather than adding up to previously ready bytes, as cache is evicted.
assertEquals(bytesWritten, (long) MetricRegistryUtils.getCounter(SEGMENT_READ_BYTES, streamTags).count());
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 1.0);
// Seal segment 0, create segment 1
CompletableFuture<Boolean> scaleStatus = controller.scaleStream(new StreamImpl(scope, STREAM_NAME), Collections.singletonList(0L), map, executorService()).getFuture();
Assert.assertTrue(scaleStatus.get());
@Cleanup EventStreamWriter<String> writer2 = clientFactory.createEventWriter(STREAM_NAME, new UTF8StringSerializer(), EventWriterConfig.builder().build());
writeEvents(event, writer2);
readAllEvents(reader1);
final String[] streamTags2nd = segmentTags(scope + "/" + STREAM_NAME + "/1.#epoch.1");
assertEquals(bytesWritten, (long) MetricRegistryUtils.getCounter(SEGMENT_READ_BYTES, streamTags2nd).count());
readerGroupManager.deleteReaderGroup(readerGroupName1);
readerGroupManager.deleteReaderGroup(readerGroupName2);
CompletableFuture<Boolean> sealStreamStatus = controller.sealStream(scope, STREAM_NAME);
log.info("Sealing stream {}", STREAM_NAME);
assertTrue(sealStreamStatus.get());
CompletableFuture<Boolean> deleteStreamStatus = controller.deleteStream(scope, STREAM_NAME);
log.info("Deleting stream {}", STREAM_NAME);
assertTrue(deleteStreamStatus.get());
CompletableFuture<Boolean> deleteScopeStatus = controller.deleteScope(scope);
log.info("Deleting scope {}", scope);
assertTrue(deleteScopeStatus.get());
}
log.info("Metrics Time based Cache Eviction test succeeds");
}
use of io.pravega.client.stream.impl.UTF8StringSerializer in project pravega by pravega.
the class EndToEndTruncationTest method testSimpleOffsetTruncation.
/**
* This test checks the basic operation of truncation with offsets. The test first writes two events on a Stream
* (1 segment) and then truncates the Stream after the first event. We verify that a new reader first gets a
* TruncatedDataException and then it reads only the second event written, as the first has been truncated.
*
* @throws ReinitializationRequiredException If a checkpoint or reset is performed on the reader group.
*/
@Test(timeout = 30000)
public void testSimpleOffsetTruncation() throws ReinitializationRequiredException {
final String scope = "truncationTests";
final String streamName = "testSimpleOffsetTruncation";
final String readerGroupName = "RGTestSimpleOffsetTruncation";
StreamConfiguration streamConfiguration = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
@Cleanup StreamManager streamManager = StreamManager.create(PRAVEGA.getControllerURI());
streamManager.createScope(scope);
streamManager.createStream(scope, streamName, streamConfiguration);
@Cleanup EventStreamClientFactory clientFactory = EventStreamClientFactory.withScope(scope, ClientConfig.builder().controllerURI(PRAVEGA.getControllerURI()).build());
@Cleanup ReaderGroupManager groupManager = ReaderGroupManager.withScope(scope, PRAVEGA.getControllerURI());
groupManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().disableAutomaticCheckpoints().stream(scope + "/" + streamName).build());
@Cleanup ReaderGroup readerGroup = groupManager.getReaderGroup(readerGroupName);
// Write two events to the Stream.
writeEvents(clientFactory, streamName, 2);
// Read only the first one.
@Cleanup EventStreamReader<String> reader = clientFactory.createReader(readerGroupName + "1", readerGroupName, new UTF8StringSerializer(), ReaderConfig.builder().build());
assertEquals(reader.readNextEvent(5000).getEvent(), "0");
reader.close();
// Create a Checkpoint, get StreamCut and truncate the Stream at that point.
Checkpoint cp = readerGroup.initiateCheckpoint("myCheckpoint", executorService()).join();
StreamCut streamCut = cp.asImpl().getPositions().values().iterator().next();
assertTrue(streamManager.truncateStream(scope, streamName, streamCut));
// Verify that a new reader reads from event 1 onwards.
final String newReaderGroupName = readerGroupName + "new";
groupManager.createReaderGroup(newReaderGroupName, ReaderGroupConfig.builder().stream(Stream.of(scope, streamName)).build());
@Cleanup final EventStreamReader<String> newReader = clientFactory.createReader(newReaderGroupName + "2", newReaderGroupName, new UTF8StringSerializer(), ReaderConfig.builder().build());
assertEquals("Expected read event: ", "1", newReader.readNextEvent(5000).getEvent());
assertNull(newReader.readNextEvent(5000).getEvent());
}
use of io.pravega.client.stream.impl.UTF8StringSerializer in project pravega by pravega.
the class EndToEndTxnWithTest method testTxnWithErrors.
@Test(timeout = 30000)
public void testTxnWithErrors() throws Exception {
String scope = "scope";
String stream = "testTxnWithErrors";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
Controller controller = PRAVEGA.getLocalController();
controller.createScope(scope).get();
controller.createStream(scope, stream, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl(scope, controller, connectionFactory);
@Cleanup TransactionalEventStreamWriter<String> test = clientFactory.createTransactionalEventWriter("writer", stream, new UTF8StringSerializer(), EventWriterConfig.builder().transactionTimeoutTime(10000).build());
Transaction<String> transaction = test.beginTxn();
transaction.writeEvent("0", "txntest1");
// abort the transaction to simulate a txn abort due to a missing ping request.
controller.abortTransaction(Stream.of(scope, stream), transaction.getTxnId()).join();
// check the status of the transaction.
assertEventuallyEquals(Transaction.Status.ABORTED, () -> controller.checkTransactionStatus(Stream.of(scope, stream), transaction.getTxnId()).join(), 10000);
transaction.writeEvent("0", "txntest2");
// verify that commit fails with TxnFailedException.
assertThrows("TxnFailedException should be thrown", () -> transaction.commit(), t -> t instanceof TxnFailedException);
}
use of io.pravega.client.stream.impl.UTF8StringSerializer in project pravega by pravega.
the class EndToEndTxnWithTest method testGetTxnWithScale.
@Test(timeout = 20000)
public void testGetTxnWithScale() throws Exception {
String streamName = "testGetTxnWithScale";
final StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
final Serializer<String> serializer = new UTF8StringSerializer();
final EventWriterConfig writerConfig = EventWriterConfig.builder().transactionTimeoutTime(10000).build();
final Controller controller = PRAVEGA.getLocalController();
controller.createScope("test").get();
controller.createStream("test", streamName, config).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactoryImpl clientFactory = new ClientFactoryImpl("test", controller, connectionFactory);
@Cleanup EventStreamWriter<String> streamWriter = clientFactory.createEventWriter(streamName, serializer, writerConfig);
streamWriter.writeEvent("key", "e").join();
@Cleanup TransactionalEventStreamWriter<String> txnWriter = clientFactory.createTransactionalEventWriter(streamName, serializer, writerConfig);
Transaction<String> txn = txnWriter.beginTxn();
txn.writeEvent("key", "1");
txn.flush();
// the txn is not yet committed here.
UUID txnId = txn.getTxnId();
// scale up stream
scaleUpStream(streamName);
// write event using stream writer
streamWriter.writeEvent("key", "e").join();
Transaction<String> txn1 = txnWriter.getTxn(txnId);
txn1.writeEvent("key", "2");
txn1.flush();
// commit the transaction
txn1.commit();
assertEventuallyEquals(Transaction.Status.COMMITTED, txn1::checkStatus, 5000);
String group = "testGetTxnWithScale-group";
@Cleanup ReaderGroupManager groupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
groupManager.createReaderGroup(group, ReaderGroupConfig.builder().disableAutomaticCheckpoints().groupRefreshTimeMillis(0).stream("test/" + streamName).build());
@Cleanup EventStreamReader<String> reader = clientFactory.createReader("readerId", group, new UTF8StringSerializer(), ReaderConfig.builder().build());
EventRead<String> event = reader.readNextEvent(5000);
assertEquals("e", event.getEvent());
assertNull(reader.readNextEvent(100).getEvent());
groupManager.getReaderGroup(group).initiateCheckpoint("cp1", executorService());
event = reader.readNextEvent(5000);
assertEquals("Checkpoint event expected", "cp1", event.getCheckpointName());
event = reader.readNextEvent(5000);
assertEquals("second event post scale up", "e", event.getEvent());
assertNull(reader.readNextEvent(100).getEvent());
groupManager.getReaderGroup(group).initiateCheckpoint("cp2", executorService());
event = reader.readNextEvent(5000);
assertEquals("Checkpoint event expected", "cp2", event.getCheckpointName());
event = reader.readNextEvent(5000);
assertEquals("txn events", "1", event.getEvent());
event = reader.readNextEvent(5000);
assertEquals("txn events", "2", event.getEvent());
}
Aggregations