use of io.pravega.client.connection.impl.ConnectionPool in project pravega by pravega.
the class SegmentHelper method removeTableKeys.
/**
* This method sends a WireCommand to remove table keys.
*
* @param tableName Qualified table name.
* @param keys List of {@link TableSegmentKey}s to be removed. Only if all the elements in the list has version
* as {@link TableSegmentKeyVersion#NO_VERSION} then an unconditional update/removal is performed.
* Else an atomic conditional update (removal) is performed.
* @param delegationToken The token to be presented to the Segment Store.
* @param clientRequestId Request id.
* @return A CompletableFuture that will complete normally when the provided keys are deleted.
* If the operation failed, the future will be failed with the causing exception. If the exception can be
* retried then the future will be failed with {@link WireCommandFailedException}.
*/
public CompletableFuture<Void> removeTableKeys(final String tableName, final List<TableSegmentKey> keys, String delegationToken, final long clientRequestId) {
final Controller.NodeUri uri = getTableUri(tableName);
final WireCommandType type = WireCommandType.REMOVE_TABLE_KEYS;
List<WireCommands.TableKey> keyList = keys.stream().map(x -> {
WireCommands.TableKey key = convertToWireCommand(x);
return key;
}).collect(Collectors.toList());
RawClient connection = new RawClient(ModelHelper.encode(uri), connectionPool);
final long requestId = connection.getFlow().asLong();
WireCommands.RemoveTableKeys request = new WireCommands.RemoveTableKeys(requestId, tableName, delegationToken, keyList, WireCommands.NULL_TABLE_SEGMENT_OFFSET);
return sendRequest(connection, clientRequestId, request).thenAccept(rpl -> handleReply(clientRequestId, rpl, connection, tableName, WireCommands.RemoveTableKeys.class, type));
}
use of io.pravega.client.connection.impl.ConnectionPool in project pravega by pravega.
the class SegmentHelper method mergeTxnSegments.
public CompletableFuture<List<Long>> mergeTxnSegments(final String scope, final String stream, final long targetSegmentId, final long sourceSegmentId, final List<UUID> txId, final String delegationToken, final long clientRequestId) {
Preconditions.checkArgument(getSegmentNumber(targetSegmentId) == getSegmentNumber(sourceSegmentId));
final Controller.NodeUri uri = getSegmentUri(scope, stream, sourceSegmentId);
final String qualifiedNameTarget = getQualifiedStreamSegmentName(scope, stream, targetSegmentId);
final List<String> transactionNames = txId.stream().map(x -> getTransactionName(scope, stream, sourceSegmentId, x)).collect(Collectors.toList());
final WireCommandType type = WireCommandType.MERGE_SEGMENTS_BATCH;
RawClient connection = new RawClient(ModelHelper.encode(uri), connectionPool);
final long requestId = connection.getFlow().asLong();
WireCommands.MergeSegmentsBatch request = new WireCommands.MergeSegmentsBatch(requestId, qualifiedNameTarget, transactionNames, delegationToken);
return sendRequest(connection, clientRequestId, request).thenApply(r -> {
handleReply(clientRequestId, r, connection, qualifiedNameTarget, WireCommands.MergeSegmentsBatch.class, type);
return ((WireCommands.SegmentsBatchMerged) r).getNewTargetWriteOffset();
});
}
use of io.pravega.client.connection.impl.ConnectionPool in project pravega by pravega.
the class SegmentHelper method abortTransaction.
public CompletableFuture<TxnStatus> abortTransaction(final String scope, final String stream, final long segmentId, final UUID txId, final String delegationToken, final long clientRequestId) {
final String transactionName = getTransactionName(scope, stream, segmentId, txId);
final Controller.NodeUri uri = getSegmentUri(scope, stream, segmentId);
final WireCommandType type = WireCommandType.DELETE_SEGMENT;
RawClient connection = new RawClient(ModelHelper.encode(uri), connectionPool);
final long requestId = connection.getFlow().asLong();
WireCommands.DeleteSegment request = new WireCommands.DeleteSegment(requestId, transactionName, delegationToken);
return sendRequest(connection, clientRequestId, request).thenAccept(r -> handleReply(clientRequestId, r, connection, transactionName, WireCommands.DeleteSegment.class, type)).thenApply(v -> TxnStatus.newBuilder().setStatus(TxnStatus.Status.SUCCESS).build());
}
use of io.pravega.client.connection.impl.ConnectionPool in project pravega by pravega.
the class ControllerEventProcessorsTest method testTruncate.
@Test(timeout = 10000L)
public void testTruncate() throws CheckpointStoreException, InterruptedException {
LocalController controller = mock(LocalController.class);
CheckpointStore checkpointStore = mock(CheckpointStore.class);
StreamMetadataStore streamStore = mock(StreamMetadataStore.class);
BucketStore bucketStore = mock(BucketStore.class);
ConnectionPool connectionPool = mock(ConnectionPool.class);
StreamMetadataTasks streamMetadataTasks = mock(StreamMetadataTasks.class);
StreamTransactionMetadataTasks streamTransactionMetadataTasks = mock(StreamTransactionMetadataTasks.class);
KVTableMetadataStore kvtStore = mock(KVTableMetadataStore.class);
TableMetadataTasks kvtTasks = mock(TableMetadataTasks.class);
ControllerEventProcessorConfig config = ControllerEventProcessorConfigImpl.withDefault();
EventProcessorSystem system = mock(EventProcessorSystem.class);
Map<SegmentWithRange, Long> map1 = new HashMap<>();
map1.put(new SegmentWithRange(new Segment("scope", "stream", 0L), 0.0, 0.33), 10L);
map1.put(new SegmentWithRange(new Segment("scope", "stream", 1L), 0.33, 0.66), 10L);
map1.put(new SegmentWithRange(new Segment("scope", "stream", 2L), 0.66, 1.0), 20L);
Map<SegmentWithRange, Long> map2 = new HashMap<>();
map2.put(new SegmentWithRange(new Segment("scope", "stream", 0L), 0.0, 0.33), 20L);
map2.put(new SegmentWithRange(new Segment("scope", "stream", 2L), 0.66, 1.0), 10L);
Map<SegmentWithRange, Long> map3 = new HashMap<>();
map3.put(new SegmentWithRange(new Segment("scope", "stream", 3L), 0.0, 0.33), 0L);
map3.put(new SegmentWithRange(new Segment("scope", "stream", 4L), 0.33, 0.66), 10L);
map3.put(new SegmentWithRange(new Segment("scope", "stream", 5L), 0.66, 1.0), 20L);
PositionImpl position1 = new PositionImpl(map1);
PositionImpl position2 = new PositionImpl(map2);
PositionImpl position3 = new PositionImpl(map3);
doReturn(getProcessor()).when(system).createEventProcessorGroup(any(), any(), any());
doReturn(CompletableFuture.completedFuture(null)).when(controller).createScope(anyString());
doReturn(CompletableFuture.completedFuture(null)).when(controller).createInternalStream(anyString(), anyString(), any());
doNothing().when(streamMetadataTasks).initializeStreamWriters(any(), anyString());
doNothing().when(streamTransactionMetadataTasks).initializeStreamWriters(any(EventStreamClientFactory.class), any(ControllerEventProcessorConfig.class));
AtomicBoolean requestCalled = new AtomicBoolean(false);
AtomicBoolean commitCalled = new AtomicBoolean(false);
CompletableFuture<Void> requestStreamTruncationFuture = new CompletableFuture<>();
CompletableFuture<Void> kvtStreamTruncationFuture = new CompletableFuture<>();
CompletableFuture<Void> abortStreamTruncationFuture = new CompletableFuture<>();
CompletableFuture<Void> commitStreamTruncationFuture = new CompletableFuture<>();
doAnswer(x -> {
String argument = x.getArgument(1);
if (argument.equals(config.getRequestStreamName())) {
// let one of the processors throw the exception. this should still be retried in the next cycle.
if (!requestCalled.get()) {
requestCalled.set(true);
throw new RuntimeException("inducing sporadic failure");
} else {
requestStreamTruncationFuture.complete(null);
}
} else if (argument.equals(config.getCommitStreamName())) {
// let one of the processors throw the exception. this should still be retried in the next cycle.
if (commitCalled.get()) {
commitStreamTruncationFuture.complete(null);
} else {
commitCalled.set(true);
return CompletableFuture.completedFuture(false);
}
} else if (argument.equals(config.getAbortStreamName())) {
abortStreamTruncationFuture.complete(null);
} else if (argument.equals(config.getKvtStreamName())) {
kvtStreamTruncationFuture.complete(null);
}
return CompletableFuture.completedFuture(true);
}).when(streamMetadataTasks).startTruncation(anyString(), anyString(), any(), any());
Set<String> processes = Sets.newHashSet("p1", "p2", "p3");
// first throw checkpoint store exception
AtomicBoolean signal = new AtomicBoolean(false);
CountDownLatch cd = new CountDownLatch(4);
doAnswer(x -> {
// this ensures that the call to truncate has been invoked for all 4 internal streams.
cd.countDown();
cd.await();
if (!signal.get()) {
throw new CheckpointStoreException("CheckpointStoreException");
} else {
return processes;
}
}).when(checkpointStore).getProcesses();
Map<String, PositionImpl> r1 = Collections.singletonMap("r1", position1);
doReturn(r1).when(checkpointStore).getPositions(eq("p1"), anyString());
Map<String, PositionImpl> r2 = Collections.singletonMap("r2", position1);
doReturn(r2).when(checkpointStore).getPositions(eq("p2"), anyString());
Map<String, PositionImpl> r3 = Collections.singletonMap("r3", position1);
doReturn(r3).when(checkpointStore).getPositions(eq("p3"), anyString());
@Cleanup ControllerEventProcessors processors = new ControllerEventProcessors("host1", config, controller, checkpointStore, streamStore, bucketStore, connectionPool, streamMetadataTasks, streamTransactionMetadataTasks, kvtStore, kvtTasks, system, executorService());
// set truncation interval
processors.setTruncationInterval(100L);
processors.startAsync();
processors.awaitRunning();
ControllerEventProcessors processorsSpied = spy(processors);
processorsSpied.bootstrap(streamTransactionMetadataTasks, streamMetadataTasks, kvtTasks);
// wait for all 4 countdown exceptions to have been thrown.
cd.await();
verify(processorsSpied, atLeast(4)).truncate(any(), any(), any());
verify(checkpointStore, atLeast(4)).getProcesses();
verify(checkpointStore, never()).getPositions(anyString(), anyString());
verify(streamMetadataTasks, never()).startTruncation(anyString(), anyString(), any(), any());
signal.set(true);
CompletableFuture.allOf(requestStreamTruncationFuture, commitStreamTruncationFuture, abortStreamTruncationFuture, kvtStreamTruncationFuture).join();
// verify that truncate method is being called periodically.
verify(processorsSpied, atLeastOnce()).truncate(config.getRequestStreamName(), config.getRequestReaderGroupName(), streamMetadataTasks);
verify(processorsSpied, atLeastOnce()).truncate(config.getCommitStreamName(), config.getCommitReaderGroupName(), streamMetadataTasks);
verify(processorsSpied, atLeastOnce()).truncate(config.getAbortStreamName(), config.getAbortReaderGroupName(), streamMetadataTasks);
verify(processorsSpied, atLeastOnce()).truncate(config.getKvtStreamName(), config.getKvtReaderGroupName(), streamMetadataTasks);
for (int i = 1; i <= 3; i++) {
verify(checkpointStore, atLeastOnce()).getPositions("p" + i, config.getRequestReaderGroupName());
verify(checkpointStore, atLeastOnce()).getPositions("p" + i, config.getCommitReaderGroupName());
verify(checkpointStore, atLeastOnce()).getPositions("p" + i, config.getAbortReaderGroupName());
verify(checkpointStore, atLeastOnce()).getPositions("p" + i, config.getKvtReaderGroupName());
}
}
use of io.pravega.client.connection.impl.ConnectionPool in project pravega by pravega.
the class ControllerEventProcessorsTest method testBootstrap.
@Test(timeout = 30000L)
public void testBootstrap() throws Exception {
LocalController controller = mock(LocalController.class);
CheckpointStore checkpointStore = mock(CheckpointStore.class);
StreamMetadataStore streamStore = mock(StreamMetadataStore.class);
BucketStore bucketStore = mock(BucketStore.class);
ConnectionPool connectionPool = mock(ConnectionPool.class);
StreamMetadataTasks streamMetadataTasks = mock(StreamMetadataTasks.class);
StreamTransactionMetadataTasks streamTransactionMetadataTasks = mock(StreamTransactionMetadataTasks.class);
KVTableMetadataStore kvtStore = mock(KVTableMetadataStore.class);
TableMetadataTasks kvtTasks = mock(TableMetadataTasks.class);
ControllerEventProcessorConfig config = ControllerEventProcessorConfigImpl.withDefault();
EventProcessorSystem system = mock(EventProcessorSystem.class);
doAnswer(x -> null).when(streamMetadataTasks).initializeStreamWriters(any(), any());
doAnswer(x -> null).when(streamTransactionMetadataTasks).initializeStreamWriters(any(EventStreamClientFactory.class), any(ControllerEventProcessorConfig.class));
LinkedBlockingQueue<CompletableFuture<Boolean>> createScopeResponses = new LinkedBlockingQueue<>();
LinkedBlockingQueue<CompletableFuture<Void>> createScopeSignals = new LinkedBlockingQueue<>();
List<CompletableFuture<Boolean>> createScopeResponsesList = new LinkedList<>();
List<CompletableFuture<Void>> createScopeSignalsList = new LinkedList<>();
for (int i = 0; i < 2; i++) {
CompletableFuture<Boolean> responseFuture = new CompletableFuture<>();
CompletableFuture<Void> signalFuture = new CompletableFuture<>();
createScopeResponsesList.add(responseFuture);
createScopeResponses.add(responseFuture);
createScopeSignalsList.add(signalFuture);
createScopeSignals.add(signalFuture);
}
// return a future from latches queue
doAnswer(x -> {
createScopeSignals.take().complete(null);
return createScopeResponses.take();
}).when(controller).createScope(anyString());
LinkedBlockingQueue<CompletableFuture<Boolean>> createStreamResponses = new LinkedBlockingQueue<>();
LinkedBlockingQueue<CompletableFuture<Void>> createStreamSignals = new LinkedBlockingQueue<>();
List<CompletableFuture<Boolean>> createStreamResponsesList = new LinkedList<>();
List<CompletableFuture<Void>> createStreamSignalsList = new LinkedList<>();
for (int i = 0; i < 8; i++) {
CompletableFuture<Boolean> responseFuture = new CompletableFuture<>();
CompletableFuture<Void> signalFuture = new CompletableFuture<>();
createStreamResponsesList.add(responseFuture);
createStreamResponses.add(responseFuture);
createStreamSignalsList.add(signalFuture);
createStreamSignals.add(signalFuture);
}
// return a future from latches queue
doAnswer(x -> {
createStreamSignals.take().complete(null);
return createStreamResponses.take();
}).when(controller).createInternalStream(anyString(), anyString(), any());
@Cleanup ControllerEventProcessors processors = new ControllerEventProcessors("host1", config, controller, checkpointStore, streamStore, bucketStore, connectionPool, streamMetadataTasks, streamTransactionMetadataTasks, kvtStore, kvtTasks, system, executorService());
// call bootstrap on ControllerEventProcessors
processors.bootstrap(streamTransactionMetadataTasks, streamMetadataTasks, kvtTasks);
// wait on create scope being called.
createScopeSignalsList.get(0).join();
verify(controller, times(1)).createScope(any());
// complete scopeFuture1 exceptionally. this should result in a retry.
createScopeResponsesList.get(0).completeExceptionally(new RuntimeException());
// wait on second scope signal being called
createScopeSignalsList.get(1).join();
verify(controller, times(2)).createScope(any());
// so far no create stream should have been invoked
verify(controller, times(0)).createInternalStream(anyString(), anyString(), any());
// complete scopeFuture2 successfully
createScopeResponsesList.get(1).complete(true);
// create streams should be called now
// since we call four create streams. We will wait on first three signal futures
createStreamSignalsList.get(0).join();
createStreamSignalsList.get(1).join();
createStreamSignalsList.get(2).join();
createStreamSignalsList.get(3).join();
verify(controller, times(4)).createInternalStream(anyString(), anyString(), any());
// fail first four requests
createStreamResponsesList.get(0).completeExceptionally(new RuntimeException());
createStreamResponsesList.get(1).completeExceptionally(new RuntimeException());
createStreamResponsesList.get(2).completeExceptionally(new RuntimeException());
createStreamResponsesList.get(3).completeExceptionally(new RuntimeException());
// this should result in a retry for four create streams. wait on next four signals
createStreamSignalsList.get(4).join();
createStreamSignalsList.get(5).join();
createStreamSignalsList.get(6).join();
createStreamSignalsList.get(7).join();
verify(controller, times(8)).createInternalStream(anyString(), anyString(), any());
// complete successfully
createStreamResponsesList.get(4).complete(true);
createStreamResponsesList.get(5).complete(true);
createStreamResponsesList.get(6).complete(true);
createStreamResponsesList.get(7).complete(true);
AssertExtensions.assertEventuallyEquals(true, () -> processors.getBootstrapCompleted().get(), 10000);
}
Aggregations