use of io.pravega.segmentstore.contracts.tables.TableStore in project pravega by pravega.
the class PravegaRequestProcessorTest method testGetTableEntries.
@Test
public void testGetTableEntries() throws Exception {
// Set up PravegaRequestProcessor instance to execute requests against
val rnd = new Random(0);
String tableSegmentName = "testGetTableEntries";
@Cleanup ServiceBuilder serviceBuilder = newInlineExecutionInMemoryBuilder(getBuilderConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
TableStore tableStore = serviceBuilder.createTableStoreService();
ServerConnection connection = mock(ServerConnection.class);
InOrder order = inOrder(connection);
val recorderMock = mock(TableSegmentStatsRecorder.class);
PravegaRequestProcessor processor = new PravegaRequestProcessor(store, tableStore, new TrackedConnection(connection), SegmentStatsRecorder.noOp(), recorderMock, new PassingTokenVerifier(), false);
// Generate keys.
ArrayList<ArrayView> keys = generateKeys(3, rnd);
ArrayView testValue = generateValue(rnd);
TableEntry e1 = TableEntry.unversioned(keys.get(0), testValue);
TableEntry e2 = TableEntry.unversioned(keys.get(1), testValue);
TableEntry e3 = TableEntry.unversioned(keys.get(2), testValue);
// Create a table segment and add data.
processor.createTableSegment(new WireCommands.CreateTableSegment(1, tableSegmentName, false, 0, "", 0));
order.verify(connection).send(new WireCommands.SegmentCreated(1, tableSegmentName));
verify(recorderMock).createTableSegment(eq(tableSegmentName), any());
processor.updateTableEntries(new WireCommands.UpdateTableEntries(2, tableSegmentName, "", getTableEntries(asList(e1, e2, e3)), WireCommands.NULL_TABLE_SEGMENT_OFFSET));
verify(recorderMock).updateEntries(eq(tableSegmentName), eq(3), eq(false), any());
// 1. Now read the table entries where suggestedEntryCount is equal to number of entries in the Table Store.
WireCommands.TableIteratorArgs args = new WireCommands.TableIteratorArgs(Unpooled.EMPTY_BUFFER, Unpooled.EMPTY_BUFFER, Unpooled.EMPTY_BUFFER, Unpooled.EMPTY_BUFFER);
processor.readTableEntries(new WireCommands.ReadTableEntries(3, tableSegmentName, "", 3, args));
// Capture the WireCommands sent.
ArgumentCaptor<WireCommand> wireCommandsCaptor = ArgumentCaptor.forClass(WireCommand.class);
order.verify(connection, times(2)).send(wireCommandsCaptor.capture());
verify(recorderMock).iterateEntries(eq(tableSegmentName), eq(3), any());
// Verify the WireCommands.
List<Long> keyVersions = ((WireCommands.TableEntriesUpdated) wireCommandsCaptor.getAllValues().get(0)).getUpdatedVersions();
WireCommands.TableEntriesRead getTableEntriesIteratorsResp = (WireCommands.TableEntriesRead) wireCommandsCaptor.getAllValues().get(1);
assertTrue(getTableEntriesIteratorsResp.getEntries().getEntries().stream().map(e -> e.getKey().getKeyVersion()).collect(Collectors.toList()).containsAll(keyVersions));
// Verify if the value is correct.
assertTrue(getTableEntriesIteratorsResp.getEntries().getEntries().stream().allMatch(e -> {
ByteBuf buf = e.getValue().getData();
byte[] bytes = new byte[buf.readableBytes()];
buf.getBytes(buf.readerIndex(), bytes);
return testValue.equals(new ByteArraySegment(bytes));
}));
// 2. Now read the table keys where suggestedEntryCount is less than the number of entries in the Table Store.
processor.readTableEntries(new WireCommands.ReadTableEntries(3, tableSegmentName, "", 1, args));
// Capture the WireCommands sent.
ArgumentCaptor<WireCommands.TableEntriesRead> tableEntriesCaptor = ArgumentCaptor.forClass(WireCommands.TableEntriesRead.class);
order.verify(connection, times(1)).send(tableEntriesCaptor.capture());
// Verify the WireCommands.
getTableEntriesIteratorsResp = tableEntriesCaptor.getAllValues().get(0);
assertEquals(1, getTableEntriesIteratorsResp.getEntries().getEntries().size());
assertTrue(keyVersions.contains(getTableEntriesIteratorsResp.getEntries().getEntries().get(0).getKey().getKeyVersion()));
// Get the last state.
ByteBuf state = getTableEntriesIteratorsResp.getContinuationToken();
args = new WireCommands.TableIteratorArgs(state, Unpooled.EMPTY_BUFFER, Unpooled.EMPTY_BUFFER, Unpooled.EMPTY_BUFFER);
// 3. Now read the remaining table entries by providing a higher suggestedKeyCount and the state to the iterator.
processor.readTableEntries(new WireCommands.ReadTableEntries(3, tableSegmentName, "", 3, args));
// Capture the WireCommands sent.
tableEntriesCaptor = ArgumentCaptor.forClass(WireCommands.TableEntriesRead.class);
order.verify(connection, times(1)).send(tableEntriesCaptor.capture());
verify(recorderMock).iterateEntries(eq(tableSegmentName), eq(1), any());
// Verify the WireCommands.
getTableEntriesIteratorsResp = tableEntriesCaptor.getAllValues().get(0);
assertEquals(2, getTableEntriesIteratorsResp.getEntries().getEntries().size());
assertTrue(keyVersions.containsAll(getTableEntriesIteratorsResp.getEntries().getEntries().stream().map(e -> e.getKey().getKeyVersion()).collect(Collectors.toList())));
}
use of io.pravega.segmentstore.contracts.tables.TableStore in project pravega by pravega.
the class TableServiceTests method testEndToEnd.
// endregion
/**
* Tests an End-to-End scenario for a {@link TableStore} implementation using a real implementation of {@link StreamSegmentStore}
* (without any mocks or manual event triggering or other test aids). Features tested:
* - Table Segment creation and deletion.
* - Conditional and unconditional updates.
* - Conditional and unconditional removals.
* - Recovering of Table Segments after failover.
*
* This tests both Hash Table Segments and Fixed-Key-Length Table Segments.
*/
@Test
public void testEndToEnd() throws Exception {
val rnd = new Random(0);
val segmentTypes = new SegmentType[] { SegmentType.builder().tableSegment().build(), SegmentType.builder().fixedKeyLengthTableSegment().build() };
ArrayList<String> segmentNames;
HashMap<BufferView, EntryData> keyInfo;
// Phase 1: Create some segments and update some data (unconditionally).
log.info("Starting Phase 1");
try (val builder = createBuilder()) {
val tableStore = builder.createTableStoreService();
// Create the Table Segments.
segmentNames = createSegments(tableStore, segmentTypes);
log.info("Created Segments: {}.", String.join(", ", segmentNames));
// Generate the keys and map them to segments.
keyInfo = generateKeysForSegments(segmentNames, rnd);
// Unconditional updates.
val updates = generateUpdates(keyInfo, false, rnd);
val updateVersions = executeUpdates(updates, tableStore);
acceptUpdates(updates, updateVersions, keyInfo);
log.info("Finished unconditional updates.");
// Check.
check(keyInfo, tableStore);
log.info("Finished Phase 1");
}
// Phase 2: Force a recovery and remove all data (unconditionally)
log.info("Starting Phase 2");
try (val builder = createBuilder()) {
val tableStore = builder.createTableStoreService();
// Check (after recovery)
check(keyInfo, tableStore);
// Unconditional removals.
val removals = generateRemovals(keyInfo, false);
executeRemovals(removals, tableStore);
acceptRemovals(removals, keyInfo);
// Check.
check(keyInfo, tableStore);
log.info("Finished Phase 2");
}
// Phase 3: Force a recovery and conditionally update and remove data
log.info("Starting Phase 3");
try (val builder = createBuilder()) {
val tableStore = builder.createTableStoreService();
// Check (after recovery).
check(keyInfo, tableStore);
// Conditional update.
val updates = generateUpdates(keyInfo, true, rnd);
val updateVersions = executeUpdates(updates, tableStore);
acceptUpdates(updates, updateVersions, keyInfo);
val offsetConditionedUpdates = generateUpdates(keyInfo, true, rnd);
val offsetUpdateVersions = executeOffsetConditionalUpdates(offsetConditionedUpdates, -1L, tableStore);
acceptUpdates(offsetConditionedUpdates, offsetUpdateVersions, keyInfo);
log.info("Finished conditional updates.");
// Check.
check(keyInfo, tableStore);
// Conditional remove.
val removals = generateRemovals(keyInfo, true);
executeRemovals(removals, tableStore);
acceptRemovals(removals, keyInfo);
val offsetConditionedRemovals = generateRemovals(keyInfo, true);
executeOffsetConditonalRemovals(offsetConditionedRemovals, -1L, tableStore);
acceptRemovals(offsetConditionedRemovals, keyInfo);
log.info("Finished conditional removes.");
// Check.
check(keyInfo, tableStore);
log.info("Finished Phase 3");
}
// Phase 4: Force a recovery and conditionally remove all data
log.info("Starting Phase 4");
try (val builder = createBuilder()) {
val tableStore = builder.createTableStoreService();
// Check (after recovery)
check(keyInfo, tableStore);
// Conditional update again.
val updates = generateUpdates(keyInfo, true, rnd);
val updateVersions = executeUpdates(updates, tableStore);
acceptUpdates(updates, updateVersions, keyInfo);
log.info("Finished conditional updates.");
// Check.
check(keyInfo, tableStore);
// Delete all.
val deletions = segmentNames.stream().map(s -> tableStore.deleteSegment(s, false, TIMEOUT)).collect(Collectors.toList());
Futures.allOf(deletions).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
log.info("Finished Phase 4");
}
}
use of io.pravega.segmentstore.contracts.tables.TableStore in project pravega by pravega.
the class TableServiceTests method executeOffsetConditonalRemovals.
private void executeOffsetConditonalRemovals(HashMap<String, ArrayList<TableKey>> removals, long tableSegmentOffset, TableStore tableStore) throws Exception {
val updateResult = removals.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> tableStore.remove(e.getKey(), e.getValue(), tableSegmentOffset, TIMEOUT)));
Futures.allOf(updateResult.values()).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}
use of io.pravega.segmentstore.contracts.tables.TableStore in project pravega by pravega.
the class TableServiceTests method executeRemovals.
private void executeRemovals(HashMap<String, ArrayList<TableKey>> removals, TableStore tableStore) throws Exception {
val updateResult = removals.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> tableStore.remove(e.getKey(), e.getValue(), TIMEOUT)));
Futures.allOf(updateResult.values()).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}
use of io.pravega.segmentstore.contracts.tables.TableStore in project pravega by pravega.
the class EndToEndAutoScaleUpWithTxnTest method main.
public static void main(String[] args) throws Exception {
try {
@Cleanup TestingServer zkTestServer = new TestingServerStarter().start();
int port = Config.SERVICE_PORT;
@Cleanup ControllerWrapper controllerWrapper = new ControllerWrapper(zkTestServer.getConnectString(), port);
Controller controller = controllerWrapper.getController();
controllerWrapper.getControllerService().createScope(NameUtils.INTERNAL_SCOPE_NAME, 0L).get();
@Cleanup ConnectionFactory connectionFactory = new SocketConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup ClientFactoryImpl internalCF = new ClientFactoryImpl(NameUtils.INTERNAL_SCOPE_NAME, controller, connectionFactory);
@Cleanup("shutdownNow") val executor = ExecutorServiceHelpers.newScheduledThreadPool(1, "test");
@Cleanup ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
serviceBuilder.initialize();
StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
TableStore tableStore = serviceBuilder.createTableStoreService();
@Cleanup AutoScaleMonitor autoScaleMonitor = new AutoScaleMonitor(store, internalCF, AutoScalerConfig.builder().with(AutoScalerConfig.MUTE_IN_SECONDS, 0).with(AutoScalerConfig.COOLDOWN_IN_SECONDS, 0).build());
@Cleanup PravegaConnectionListener server = new PravegaConnectionListener(false, false, "localhost", 12345, store, tableStore, autoScaleMonitor.getStatsRecorder(), autoScaleMonitor.getTableSegmentStatsRecorder(), null, null, null, true, serviceBuilder.getLowPriorityExecutor(), Config.TLS_PROTOCOL_VERSION.toArray(new String[Config.TLS_PROTOCOL_VERSION.size()]));
server.startListening();
controllerWrapper.awaitRunning();
controllerWrapper.getControllerService().createScope("test", 0L).get();
controller.createStream("test", "test", CONFIG).get();
@Cleanup MockClientFactory clientFactory = new MockClientFactory("test", controller, internalCF.getConnectionPool());
// Mocking pravega service by putting scale up and scale down requests for the stream
EventWriterConfig writerConfig = EventWriterConfig.builder().transactionTimeoutTime(30000).build();
TransactionalEventStreamWriter<String> test = clientFactory.createTransactionalEventWriter("writer", "test", new UTF8StringSerializer(), writerConfig);
// region Successful commit tests
Transaction<String> txn1 = test.beginTxn();
txn1.writeEvent("1");
txn1.flush();
Map<Double, Double> map = new HashMap<>();
map.put(0.0, 1.0 / 3.0);
map.put(1.0 / 3.0, 2.0 / 3.0);
map.put(2.0 / 3.0, 1.0);
Stream stream = new StreamImpl("test", "test");
controller.startScale(stream, Collections.singletonList(0L), map).get();
Transaction<String> txn2 = test.beginTxn();
txn2.writeEvent("2");
txn2.flush();
txn2.commit();
txn1.commit();
Thread.sleep(1000);
@Cleanup ReaderGroupManager readerGroupManager = new ReaderGroupManagerImpl("test", controller, clientFactory);
readerGroupManager.createReaderGroup("readergrp", ReaderGroupConfig.builder().stream("test/test").build());
final EventStreamReader<String> reader = clientFactory.createReader("1", "readergrp", new JavaSerializer<>(), ReaderConfig.builder().build());
String event1 = reader.readNextEvent(SECONDS.toMillis(60)).getEvent();
String event2 = reader.readNextEvent(SECONDS.toMillis(60)).getEvent();
assert event1.equals("1");
assert event2.equals("2");
final AtomicBoolean done = new AtomicBoolean(false);
startWriter(test, done);
Retry.withExpBackoff(10, 10, 100, 10000).retryingOn(NotDoneException.class).throwingOn(RuntimeException.class).runAsync(() -> controller.getCurrentSegments("test", "test").thenAccept(streamSegments -> {
if (streamSegments.getSegments().stream().anyMatch(x -> NameUtils.getEpoch(x.getSegmentId()) > 5)) {
System.err.println("Success");
log.info("Success");
System.exit(0);
} else {
throw new NotDoneException();
}
}), executor).exceptionally(e -> {
System.err.println("Failure");
log.error("Failure");
System.exit(1);
return null;
}).get();
} catch (Throwable e) {
System.err.print("Test failed with exception: " + e.getMessage());
log.error("Test failed with exception: {}", e);
System.exit(-1);
}
System.exit(0);
}
Aggregations