use of io.pravega.controller.mocks.SegmentHelperMock in project pravega by pravega.
the class StreamTransactionMetadataTasksTest method failOverTests.
@Test
public void failOverTests() throws CheckpointStoreException, InterruptedException {
// Create mock writer objects.
EventStreamWriterMock<CommitEvent> commitWriter = new EventStreamWriterMock<>();
EventStreamWriterMock<AbortEvent> abortWriter = new EventStreamWriterMock<>();
EventStreamReader<CommitEvent> commitReader = commitWriter.getReader();
EventStreamReader<AbortEvent> abortReader = abortWriter.getReader();
consumer = new ControllerService(streamStore, hostStore, streamMetadataTasks, txnTasks, segmentHelperMock, executor, null);
// Create test scope and stream.
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scope(SCOPE).streamName(STREAM).scalingPolicy(policy1).build();
Assert.assertEquals(Controller.CreateScopeStatus.Status.SUCCESS, consumer.createScope(SCOPE).join().getStatus());
Assert.assertEquals(Controller.CreateStreamStatus.Status.SUCCESS, streamMetadataTasks.createStream(SCOPE, STREAM, configuration1, System.currentTimeMillis()).join());
// Set up txn task for creating transactions from a failedHost.
StreamTransactionMetadataTasks failedTxnTasks = new StreamTransactionMetadataTasks(streamStore, hostStore, segmentHelperMock, executor, "failedHost", connectionFactory, false, "");
failedTxnTasks.initializeStreamWriters("commitStream", new EventStreamWriterMock<>(), "abortStream", new EventStreamWriterMock<>());
// Create 3 transactions from failedHost.
VersionedTransactionData tx1 = failedTxnTasks.createTxn(SCOPE, STREAM, 10000, 10000, null).join().getKey();
VersionedTransactionData tx2 = failedTxnTasks.createTxn(SCOPE, STREAM, 10000, 10000, null).join().getKey();
VersionedTransactionData tx3 = failedTxnTasks.createTxn(SCOPE, STREAM, 10000, 10000, null).join().getKey();
// Ping another txn from failedHost.
UUID txnId = UUID.randomUUID();
streamStore.createTransaction(SCOPE, STREAM, txnId, 10000, 30000, 30000, null, executor).join();
PingTxnStatus pingStatus = failedTxnTasks.pingTxn(SCOPE, STREAM, txnId, 10000, null).join();
VersionedTransactionData tx4 = streamStore.getTransactionData(SCOPE, STREAM, txnId, null, executor).join();
// Validate versions of all txn
Assert.assertEquals(0, tx1.getVersion());
Assert.assertEquals(0, tx2.getVersion());
Assert.assertEquals(0, tx3.getVersion());
Assert.assertEquals(1, tx4.getVersion());
Assert.assertEquals(PingTxnStatus.Status.OK, pingStatus.getStatus());
// Validate the txn index.
Assert.assertEquals(1, streamStore.listHostsOwningTxn().join().size());
// Change state of one txn to COMMITTING.
TxnStatus txnStatus2 = streamStore.sealTransaction(SCOPE, STREAM, tx2.getId(), true, Optional.empty(), null, executor).thenApply(AbstractMap.SimpleEntry::getKey).join();
Assert.assertEquals(TxnStatus.COMMITTING, txnStatus2);
// Change state of another txn to ABORTING.
TxnStatus txnStatus3 = streamStore.sealTransaction(SCOPE, STREAM, tx3.getId(), false, Optional.empty(), null, executor).thenApply(AbstractMap.SimpleEntry::getKey).join();
Assert.assertEquals(TxnStatus.ABORTING, txnStatus3);
// Create transaction tasks for sweeping txns from failedHost.
txnTasks = new StreamTransactionMetadataTasks(streamStore, hostStore, segmentHelperMock, executor, "host", connectionFactory, false, "");
TxnSweeper txnSweeper = new TxnSweeper(streamStore, txnTasks, 100, executor);
// Before initializing, txnSweeper.sweepFailedHosts would throw an error
AssertExtensions.assertThrows("IllegalStateException before initialization", txnSweeper.sweepFailedProcesses(() -> Collections.singleton("host")), ex -> ex instanceof IllegalStateException);
// Initialize stream writers.
txnTasks.initializeStreamWriters("commitStream", commitWriter, "abortStream", abortWriter);
// Validate that txnTasks is ready.
assertTrue(txnTasks.isReady());
// Sweep txns that were being managed by failedHost.
txnSweeper.sweepFailedProcesses(() -> Collections.singleton("host")).join();
// Validate that sweeping completes correctly.
Assert.assertEquals(0, streamStore.listHostsOwningTxn().join().size());
Assert.assertEquals(TxnStatus.ABORTING, streamStore.transactionStatus(SCOPE, STREAM, tx1.getId(), null, executor).join());
Assert.assertEquals(TxnStatus.COMMITTING, streamStore.transactionStatus(SCOPE, STREAM, tx2.getId(), null, executor).join());
Assert.assertEquals(TxnStatus.ABORTING, streamStore.transactionStatus(SCOPE, STREAM, tx3.getId(), null, executor).join());
Assert.assertEquals(TxnStatus.ABORTING, streamStore.transactionStatus(SCOPE, STREAM, tx4.getId(), null, executor).join());
// Create commit and abort event processors.
ConnectionFactory connectionFactory = Mockito.mock(ConnectionFactory.class);
BlockingQueue<CommitEvent> processedCommitEvents = new LinkedBlockingQueue<>();
BlockingQueue<AbortEvent> processedAbortEvents = new LinkedBlockingQueue<>();
createEventProcessor("commitRG", "commitStream", commitReader, commitWriter, () -> new CommitEventProcessor(streamStore, streamMetadataTasks, hostStore, executor, segmentHelperMock, connectionFactory, processedCommitEvents));
createEventProcessor("abortRG", "abortStream", abortReader, abortWriter, () -> new ConcurrentEventProcessor<>(new AbortRequestHandler(streamStore, streamMetadataTasks, hostStore, executor, segmentHelperMock, connectionFactory, processedAbortEvents), executor));
// Wait until the commit event is processed and ensure that the txn state is COMMITTED.
CommitEvent commitEvent = processedCommitEvents.take();
assertEquals(tx2.getId(), commitEvent.getTxid());
assertEquals(TxnStatus.COMMITTED, streamStore.transactionStatus(SCOPE, STREAM, tx2.getId(), null, executor).join());
// Wait until 3 abort events are processed and ensure that the txn state is ABORTED.
Predicate<AbortEvent> predicate = event -> event.getTxid().equals(tx1.getId()) || event.getTxid().equals(tx3.getId()) || event.getTxid().equals(tx4.getId());
AbortEvent abortEvent1 = processedAbortEvents.take();
assertTrue(predicate.test(abortEvent1));
AbortEvent abortEvent2 = processedAbortEvents.take();
assertTrue(predicate.test(abortEvent2));
AbortEvent abortEvent3 = processedAbortEvents.take();
assertTrue(predicate.test(abortEvent3));
assertEquals(TxnStatus.ABORTED, streamStore.transactionStatus(SCOPE, STREAM, tx1.getId(), null, executor).join());
assertEquals(TxnStatus.ABORTED, streamStore.transactionStatus(SCOPE, STREAM, tx3.getId(), null, executor).join());
assertEquals(TxnStatus.ABORTED, streamStore.transactionStatus(SCOPE, STREAM, tx4.getId(), null, executor).join());
}
use of io.pravega.controller.mocks.SegmentHelperMock in project pravega by pravega.
the class StreamMetadataTasksTest method setup.
@Before
public void setup() throws Exception {
zkServer = new TestingServerStarter().start();
zkServer.start();
zkClient = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), new ExponentialBackoffRetry(200, 10, 5000));
zkClient.start();
StreamMetadataStore streamStore = StreamStoreFactory.createInMemoryStore(1, executor);
// create a partial mock.
streamStorePartialMock = spy(streamStore);
doReturn(CompletableFuture.completedFuture(false)).when(streamStorePartialMock).isTransactionOngoing(anyString(), anyString(), any(), // mock only isTransactionOngoing call.
any());
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createZKStore(zkClient, executor);
HostControllerStore hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
SegmentHelper segmentHelperMock = SegmentHelperMock.getSegmentHelperMock();
connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
streamMetadataTasks = spy(new StreamMetadataTasks(streamStorePartialMock, hostStore, taskMetadataStore, segmentHelperMock, executor, "host", connectionFactory, authEnabled, "key"));
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStorePartialMock, hostStore, segmentHelperMock, executor, "host", connectionFactory, authEnabled, "key");
this.streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStorePartialMock, executor), new ScaleOperationTask(streamMetadataTasks, streamStorePartialMock, executor), new UpdateStreamTask(streamMetadataTasks, streamStorePartialMock, executor), new SealStreamTask(streamMetadataTasks, streamStorePartialMock, executor), new DeleteStreamTask(streamMetadataTasks, streamStorePartialMock, executor), new TruncateStreamTask(streamMetadataTasks, streamStorePartialMock, executor), executor);
consumer = new ControllerService(streamStorePartialMock, hostStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelperMock, executor, null);
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scope(SCOPE).streamName(stream1).scalingPolicy(policy1).build();
streamStorePartialMock.createScope(SCOPE).join();
long start = System.currentTimeMillis();
streamStorePartialMock.createStream(SCOPE, stream1, configuration1, start, null, executor).get();
streamStorePartialMock.setState(SCOPE, stream1, State.ACTIVE, null, executor).get();
AbstractMap.SimpleEntry<Double, Double> segment1 = new AbstractMap.SimpleEntry<>(0.5, 0.75);
AbstractMap.SimpleEntry<Double, Double> segment2 = new AbstractMap.SimpleEntry<>(0.75, 1.0);
List<Integer> sealedSegments = Collections.singletonList(1);
StartScaleResponse response = streamStorePartialMock.startScale(SCOPE, stream1, sealedSegments, Arrays.asList(segment1, segment2), start + 20, false, null, executor).get();
List<Segment> segmentsCreated = response.getSegmentsCreated();
streamStorePartialMock.setState(SCOPE, stream1, State.SCALING, null, executor).get();
streamStorePartialMock.scaleNewSegmentsCreated(SCOPE, stream1, sealedSegments, segmentsCreated, response.getActiveEpoch(), start + 20, null, executor).get();
streamStorePartialMock.scaleSegmentsSealed(SCOPE, stream1, sealedSegments.stream().collect(Collectors.toMap(x -> x, x -> 0L)), segmentsCreated, response.getActiveEpoch(), start + 20, null, executor).get();
}
use of io.pravega.controller.mocks.SegmentHelperMock in project pravega by pravega.
the class TaskTest method testStreamTaskSweeping.
@Test(timeout = 10000)
public void testStreamTaskSweeping() {
final String stream = "testPartialCreationStream";
final String deadHost = "deadHost";
final int initialSegments = 2;
final ScalingPolicy policy1 = ScalingPolicy.fixed(initialSegments);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scope(SCOPE).streamName(stream1).scalingPolicy(policy1).build();
final ArrayList<Integer> sealSegments = new ArrayList<>();
sealSegments.add(0);
final ArrayList<AbstractMap.SimpleEntry<Double, Double>> newRanges = new ArrayList<>();
newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 0.25));
newRanges.add(new AbstractMap.SimpleEntry<>(0.25, 0.5));
// Create objects.
StreamMetadataTasks mockStreamTasks = new StreamMetadataTasks(streamStore, hostStore, taskMetadataStore, segmentHelperMock, executor, deadHost, Mockito.mock(ConnectionFactory.class), false, "");
mockStreamTasks.setCreateIndexOnlyMode();
TaskSweeper sweeper = new TaskSweeper(taskMetadataStore, HOSTNAME, executor, streamMetadataTasks);
// Create stream test.
completePartialTask(mockStreamTasks.createStream(SCOPE, stream, configuration1, System.currentTimeMillis()), deadHost, sweeper);
Assert.assertEquals(initialSegments, streamStore.getActiveSegments(SCOPE, stream, null, executor).join().size());
List<StreamConfiguration> streams = streamStore.listStreamsInScope(SCOPE).join();
Assert.assertTrue(streams.stream().allMatch(x -> !x.getStreamName().equals(stream)));
}
use of io.pravega.controller.mocks.SegmentHelperMock in project pravega by pravega.
the class ControllerEventProcessorTest method testCommitEventProcessorFailedWrite.
@Test(timeout = 10000)
public void testCommitEventProcessorFailedWrite() {
UUID txnId = UUID.randomUUID();
VersionedTransactionData txnData = streamStore.createTransaction(SCOPE, STREAM, txnId, 10000, 10000, 10000, null, executor).join();
CommitEventProcessor commitEventProcessor = spy(new CommitEventProcessor(streamStore, streamMetadataTasks, hostStore, executor, segmentHelperMock, null));
EventProcessor.Writer<CommitEvent> successWriter = event -> CompletableFuture.completedFuture(null);
EventProcessor.Writer<CommitEvent> failedWriter = event -> {
CompletableFuture<Void> future = new CompletableFuture<>();
future.completeExceptionally(new RuntimeException("Error"));
return future;
};
// Simulate a failed write
when(commitEventProcessor.getSelfWriter()).thenReturn(failedWriter).thenReturn(successWriter);
// invoke process with epoch > txnData.
commitEventProcessor.process(new CommitEvent(SCOPE, STREAM, txnData.getEpoch() + 1, txnData.getId()), null);
verify(commitEventProcessor, times(2)).getSelfWriter();
}
Aggregations