use of io.pravega.segmentstore.contracts.tables.TableStore in project pravega by pravega.
the class TableBasedMetadataStoreMockTests method testExceptionDuringRemoveWithSpy.
@Test
public void testExceptionDuringRemoveWithSpy() throws Exception {
TableStore mockTableStore = spy(new InMemoryTableStore(executorService()));
@Cleanup TableBasedMetadataStore tableBasedMetadataStore = new TableBasedMetadataStore("test", mockTableStore, ChunkedSegmentStorageConfig.DEFAULT_CONFIG, executorService());
// Step 1 - set up keys
try (val txn = tableBasedMetadataStore.beginTransaction(false, "TEST")) {
txn.create(new MockStorageMetadata("key1", "A"));
txn.create(new MockStorageMetadata("key2", "B"));
txn.create(new MockStorageMetadata("key3", "C"));
txn.commit().join();
}
// Step 2 - Throw random exception
Exception e = new ArithmeticException();
val td = BaseMetadataStore.TransactionData.builder().key("foo").version(1L).dbObject(2L).build();
CompletableFuture<Void> f = new CompletableFuture<>();
f.completeExceptionally(e);
when(mockTableStore.remove(anyString(), any(), any())).thenReturn(f);
// Step 3 - modify some keys and delete a key. This uses mock method that throws exception
try (val txn = tableBasedMetadataStore.beginTransaction(false, "TEST")) {
Assert.assertEquals("A", ((MockStorageMetadata) txn.get("key1").get()).getValue());
Assert.assertEquals("B", ((MockStorageMetadata) txn.get("key2").get()).getValue());
Assert.assertEquals("C", ((MockStorageMetadata) txn.get("key3").get()).getValue());
txn.update(new MockStorageMetadata("key1", "a"));
txn.update(new MockStorageMetadata("key2", "b"));
txn.delete("key3");
txn.commit().join();
}
// Step 4 - Validate results and then retry delete with no mocking.
try (val txn = tableBasedMetadataStore.beginTransaction(false, "TEST")) {
Assert.assertEquals("a", ((MockStorageMetadata) txn.get("key1").get()).getValue());
Assert.assertEquals("b", ((MockStorageMetadata) txn.get("key2").get()).getValue());
Assert.assertEquals(null, txn.get("key3").get());
val direct = tableBasedMetadataStore.read("key3").get();
Assert.assertNotNull(direct);
Assert.assertNotEquals(-1L, direct.getDbObject());
txn.delete("key3");
// stop mocking
when(mockTableStore.remove(anyString(), any(), any())).thenCallRealMethod();
txn.commit().join();
}
// Step 5 - Validate results.
try (val txn = tableBasedMetadataStore.beginTransaction(true, "TEST")) {
Assert.assertEquals("a", ((MockStorageMetadata) txn.get("key1").get()).getValue());
Assert.assertEquals("b", ((MockStorageMetadata) txn.get("key2").get()).getValue());
Assert.assertEquals(null, txn.get("key3").get());
val direct = tableBasedMetadataStore.read("key3").get();
Assert.assertNotNull(direct);
Assert.assertEquals(-1L, direct.getDbObject());
}
}
use of io.pravega.segmentstore.contracts.tables.TableStore in project pravega by pravega.
the class TableBasedMetadataStoreMockTests method testRandomExceptionDuringWrite.
@Test
public void testRandomExceptionDuringWrite() {
TableStore mockTableStore = mock(TableStore.class);
@Cleanup TableBasedMetadataStore tableBasedMetadataStore = new TableBasedMetadataStore("test", mockTableStore, ChunkedSegmentStorageConfig.DEFAULT_CONFIG, executorService());
when(mockTableStore.createSegment(any(), any(), any())).thenReturn(Futures.failedFuture(new CompletionException(new StreamSegmentExistsException("test"))));
// Make it throw IllegalStateException
val td = BaseMetadataStore.TransactionData.builder().key("foo").version(1L).dbObject(null).build();
AssertExtensions.assertFutureThrows("write should throw an exception", tableBasedMetadataStore.writeAll(Collections.singleton(td)), ex -> ex instanceof StorageMetadataException && ex.getCause() instanceof IllegalStateException);
}
use of io.pravega.segmentstore.contracts.tables.TableStore in project pravega by pravega.
the class TableBasedMetadataStoreMockTests method testIllegalStateExceptionDuringRead.
@Test
public void testIllegalStateExceptionDuringRead() {
TableStore mockTableStore = mock(TableStore.class);
@Cleanup TableBasedMetadataStore tableBasedMetadataStore = new TableBasedMetadataStore("test", mockTableStore, ChunkedSegmentStorageConfig.DEFAULT_CONFIG, executorService());
when(mockTableStore.createSegment(any(), any(), any())).thenReturn(Futures.failedFuture(new CompletionException(new StreamSegmentExistsException("test"))));
when(mockTableStore.get(anyString(), any(), any())).thenThrow(new IllegalStateException());
AssertExtensions.assertFutureThrows("read should throw an exception", tableBasedMetadataStore.read("test"), ex -> ex instanceof IllegalStateException);
}
use of io.pravega.segmentstore.contracts.tables.TableStore in project pravega by pravega.
the class StreamMetadataTest method testMetadataOperations.
@Test(timeout = 60000)
public void testMetadataOperations() throws Exception {
@Cleanup TestingServer zkTestServer = new TestingServerStarter().start();
ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
int servicePort = TestUtils.getAvailableListenPort();
TableStore tableStore = serviceBuilder.createTableStoreService();
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, servicePort, store, tableStore, serviceBuilder.getLowPriorityExecutor());
server.startListening();
int controllerPort = TestUtils.getAvailableListenPort();
@Cleanup ControllerWrapper controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), false, controllerPort, "localhost", servicePort, 4);
Controller controller = controllerWrapper.getController();
final String scope1 = "scope1";
final String streamName1 = "stream1";
final String scopeSeal = "scopeSeal";
final String streamNameSeal = "streamSeal";
final String scope2 = "scope2";
final String streamName2 = "stream2";
assertEquals(CreateScopeStatus.Status.SUCCESS, controllerWrapper.getControllerService().createScope(scope1, 0L).get().getStatus());
final ScalingPolicy scalingPolicy = ScalingPolicy.fixed(2);
final StreamConfiguration config1 = StreamConfiguration.builder().scalingPolicy(scalingPolicy).build();
// create stream and seal stream
// CS1:create a stream :given a streamName, scope and config
assertTrue(controller.createStream(scope1, streamName1, config1).get());
// Seal a stream given a streamName and scope.
controllerWrapper.getControllerService().createScope(scopeSeal, 0L).get();
final StreamConfiguration configSeal = StreamConfiguration.builder().scalingPolicy(scalingPolicy).build();
assertTrue(controller.createStream(scopeSeal, streamNameSeal, configSeal).get());
controller.getCurrentSegments(scopeSeal, streamNameSeal).get();
assertTrue(controller.sealStream(scopeSeal, streamNameSeal).get());
assertTrue("FAILURE: No active segments should be present in a sealed stream", controller.getCurrentSegments(scopeSeal, streamNameSeal).get().getSegments().isEmpty());
// Seal an already sealed stream.
assertTrue(controller.sealStream(scopeSeal, streamNameSeal).get());
assertTrue("FAILURE: No active segments should be present in a sealed stream", controller.getCurrentSegments(scopeSeal, streamNameSeal).get().getSegments().isEmpty());
assertFutureThrows("FAILURE: Seal operation on a non-existent stream returned ", controller.sealStream(scopeSeal, "nonExistentStream"), t -> true);
// CS2:stream duplication not allowed
assertFalse(controller.createStream(scope1, streamName1, config1).get());
// CS3:create a stream with same stream name in different scopes
controllerWrapper.getControllerService().createScope(scope2, 0L).get();
final StreamConfiguration config2 = StreamConfiguration.builder().scalingPolicy(scalingPolicy).build();
assertTrue(controller.createStream(scope2, streamName1, config2).get());
// CS4:create a stream with different stream name and config in same scope
final StreamConfiguration config3 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(3)).build();
assertTrue(controller.createStream(scope1, streamName2, config3).get());
// update stream config(update Stream)
// AS3:update the type of scaling policy
final StreamConfiguration config6 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byDataRate(100, 2, 2)).build();
assertTrue(controller.updateStream(scope1, streamName1, config6).get());
// AS4:update the target rate of scaling policy
final StreamConfiguration config7 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byDataRate(200, 2, 2)).build();
assertTrue(controller.updateStream(scope1, streamName1, config7).get());
// AS5:update the scale factor of scaling policy
final StreamConfiguration config8 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byDataRate(200, 4, 2)).build();
assertTrue(controller.updateStream(scope1, streamName1, config8).get());
// AS6:update the minNumsegments of scaling policy
final StreamConfiguration config9 = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byDataRate(200, 4, 3)).build();
assertTrue(controller.updateStream(scope1, streamName1, config9).get());
// the number of segments in the stream should now be 3.
// AS7:Update configuration of non-existent stream.
final StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(2)).build();
CompletableFuture<Boolean> updateStatus = controller.updateStream("scope", "streamName", config);
assertFutureThrows("FAILURE: Updating the configuration of a non-existent stream", updateStatus, t -> true);
// get currently active segments
// GCS1:get active segments of the stream
assertFalse(controller.getCurrentSegments(scope1, streamName1).get().getSegments().isEmpty());
// GCS2:Get active segments for a non-existent stream.
assertFutureThrows("Active segments cannot be fetched for non existent stream", controller.getCurrentSegments("scope", "streamName"), t -> true);
// get positions at a given time stamp
// PS1:get positions at a given time stamp:given stream, time stamp, count
Stream stream1 = new StreamImpl(scope1, streamName1);
CompletableFuture<Map<Segment, Long>> segments = controller.getSegmentsAtTime(stream1, System.currentTimeMillis());
assertEquals(2, segments.get().size());
// PS2:get positions of a stream with different count
Stream stream2 = new StreamImpl(scope1, streamName2);
segments = controller.getSegmentsAtTime(stream2, System.currentTimeMillis());
assertEquals(3, segments.get().size());
// PS4:get positions at a given timestamp for non-existent stream.
Stream stream = new StreamImpl("scope", "streamName");
assertFutureThrows("Fetching segments at given time stamp for non existent stream ", controller.getSegmentsAtTime(stream, System.currentTimeMillis()), t -> true);
// PS5:Get position at time before stream creation
segments = controller.getSegmentsAtTime(stream1, System.currentTimeMillis() - 36000);
assertEquals(segments.join().size(), 2);
assertEquals(controller.getCurrentSegments(scope1, streamName1).get().getSegments().size(), 3);
// PS6:Get positions at a time in future after stream creation
segments = controller.getSegmentsAtTime(stream1, System.currentTimeMillis() + 3600);
assertTrue(!segments.get().isEmpty());
}
use of io.pravega.segmentstore.contracts.tables.TableStore in project pravega by pravega.
the class EndToEndTransactionOrderTest method setUp.
@Before
public void setUp() throws Exception {
zkTestServer = new TestingServerStarter().start();
int port = Config.SERVICE_PORT;
controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), false, controllerPort, serviceHost, servicePort, Config.HOST_STORE_CONTAINER_COUNT);
controller = controllerWrapper.getController();
connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
internalCF = new ClientFactoryImpl(NameUtils.INTERNAL_SCOPE_NAME, controller, connectionFactory);
ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
TableStore tableStore = serviceBuilder.createTableStoreService();
controllerWrapper.getControllerService().createScope(NameUtils.INTERNAL_SCOPE_NAME, 0L).get();
autoScaleMonitor = new AutoScaleMonitor(store, internalCF, AutoScalerConfig.builder().with(AutoScalerConfig.MUTE_IN_SECONDS, 0).with(AutoScalerConfig.COOLDOWN_IN_SECONDS, 0).build());
server = new PravegaConnectionListener(false, false, "localhost", servicePort, store, tableStore, autoScaleMonitor.getStatsRecorder(), autoScaleMonitor.getTableSegmentStatsRecorder(), null, null, null, true, serviceBuilder.getLowPriorityExecutor(), SecurityConfigDefaults.TLS_PROTOCOL_VERSION);
server.startListening();
controllerWrapper.awaitRunning();
controllerWrapper.getControllerService().createScope("test", 0L).get();
controller.createStream("test", "test", config).get();
clientFactory = new MockClientFactory("test", controller, internalCF.getConnectionPool());
readerGroupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
readerGroupManager.createReaderGroup("readergrp", ReaderGroupConfig.builder().automaticCheckpointIntervalMillis(2000).groupRefreshTimeMillis(1000).stream("test/test").build());
reader = clientFactory.createReader("1", "readergrp", new IntegerSerializer(), ReaderConfig.builder().build());
}
Aggregations