use of io.pravega.controller.store.host.HostControllerStore in project pravega by pravega.
the class TimeoutServiceTest method testPingOwnershipTransfer.
@Test(timeout = 30000)
public void testPingOwnershipTransfer() throws Exception {
StreamMetadataStore streamStore2 = StreamStoreFactory.createZKStore(client, executor);
HostControllerStore hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createStore(storeClient, executor);
ConnectionFactoryImpl connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
@Cleanup StreamMetadataTasks streamMetadataTasks2 = new StreamMetadataTasks(streamStore2, hostStore, taskMetadataStore, new SegmentHelper(), executor, "2", connectionFactory, false, "");
@Cleanup StreamTransactionMetadataTasks streamTransactionMetadataTasks2 = new StreamTransactionMetadataTasks(streamStore2, hostStore, SegmentHelperMock.getSegmentHelperMock(), executor, "2", TimeoutServiceConfig.defaultConfig(), new LinkedBlockingQueue<>(5), connectionFactory, false, "");
streamTransactionMetadataTasks2.initializeStreamWriters("commitStream", new EventStreamWriterMock<>(), "abortStream", new EventStreamWriterMock<>());
// Create TimeoutService
TimerWheelTimeoutService timeoutService2 = (TimerWheelTimeoutService) streamTransactionMetadataTasks2.getTimeoutService();
ControllerService controllerService2 = new ControllerService(streamStore2, hostStore, streamMetadataTasks2, streamTransactionMetadataTasks2, new SegmentHelper(), executor, null);
TxnId txnId = controllerService.createTransaction(SCOPE, STREAM, LEASE, SCALE_GRACE_PERIOD).thenApply(x -> ModelHelper.decode(x.getKey())).join();
VersionedTransactionData txnData = streamStore.getTransactionData(SCOPE, STREAM, ModelHelper.encode(txnId), null, executor).join();
Assert.assertEquals(txnData.getVersion(), 0);
Optional<Throwable> result = timeoutService.getTaskCompletionQueue().poll((long) (0.75 * LEASE), TimeUnit.MILLISECONDS);
Assert.assertNull(result);
TxnState txnState = controllerService.checkTransactionStatus(SCOPE, STREAM, txnId).join();
Assert.assertEquals(TxnState.State.OPEN, txnState.getState());
// increasing lease -> total effective lease = 3 * LEASE
PingTxnStatus pingStatus = controllerService2.pingTransaction(SCOPE, STREAM, txnId, 2 * LEASE).join();
Assert.assertEquals(PingTxnStatus.Status.OK, pingStatus.getStatus());
txnData = streamStore.getTransactionData(SCOPE, STREAM, ModelHelper.encode(txnId), null, executor).join();
Assert.assertEquals(txnData.getVersion(), 1);
// timeoutService1 should believe that LEASE has expired and should get non empty completion tasks
result = timeoutService.getTaskCompletionQueue().poll((long) (1.3 * LEASE + RETRY_DELAY), TimeUnit.MILLISECONDS);
Assert.assertNotNull(result);
// the txn may have been attempted to be aborted by timeoutService1 but would have failed. So txn to remain open
txnState = controllerService.checkTransactionStatus(SCOPE, STREAM, txnId).join();
Assert.assertEquals(TxnState.State.OPEN, txnState.getState());
// timeoutService2 should continue to wait on lease expiry and should get empty completion tasks
result = timeoutService2.getTaskCompletionQueue().poll(0L, TimeUnit.MILLISECONDS);
Assert.assertNull(result);
result = timeoutService2.getTaskCompletionQueue().poll(2 * LEASE + RETRY_DELAY, TimeUnit.MILLISECONDS);
Assert.assertNotNull(result);
// now txn should have moved to aborting because timeoutservice2 has initiated abort
txnState = controllerService.checkTransactionStatus(SCOPE, STREAM, txnId).join();
Assert.assertEquals(TxnState.State.ABORTING, txnState.getState());
}
use of io.pravega.controller.store.host.HostControllerStore in project pravega by pravega.
the class ControllerClusterListenerTest method clusterListenerTest.
@Test(timeout = 60000L)
public void clusterListenerTest() throws InterruptedException {
String hostName = "localhost";
Host host = new Host(hostName, 10, "host1");
// Create task sweeper.
TaskMetadataStore taskStore = TaskStoreFactory.createInMemoryStore(executor);
TaskSweeper taskSweeper = new TaskSweeper(taskStore, host.getHostId(), executor, new TestTasks(taskStore, executor, host.getHostId()));
// Create txn sweeper.
StreamMetadataStore streamStore = StreamStoreFactory.createInMemoryStore(executor);
HostControllerStore hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
ConnectionFactory connectionFactory = mock(ConnectionFactory.class);
StreamTransactionMetadataTasks txnTasks = new StreamTransactionMetadataTasks(streamStore, hostStore, segmentHelper, executor, host.getHostId(), connectionFactory, false, "");
txnTasks.initializeStreamWriters("commitStream", new EventStreamWriterMock<>(), "abortStream", new EventStreamWriterMock<>());
TxnSweeper txnSweeper = new TxnSweeper(streamStore, txnTasks, 100, executor);
// Create ControllerClusterListener.
ControllerClusterListener clusterListener = new ControllerClusterListener(host, clusterZK, executor, Lists.newArrayList(taskSweeper, txnSweeper));
clusterListener.startAsync();
clusterListener.awaitRunning();
validateAddedNode(host.getHostId());
// Add a new host
Host host1 = new Host(hostName, 20, "host2");
clusterZK.registerHost(host1);
validateAddedNode(host1.getHostId());
clusterZK.deregisterHost(host1);
validateRemovedNode(host1.getHostId());
clusterListener.stopAsync();
clusterListener.awaitTerminated();
validateRemovedNode(host.getHostId());
}
use of io.pravega.controller.store.host.HostControllerStore in project pravega by pravega.
the class ControllerClusterListenerTest method clusterListenerStarterTest.
@Test(timeout = 60000L)
public void clusterListenerStarterTest() throws InterruptedException, ExecutionException {
String hostName = "localhost";
Host host = new Host(hostName, 10, "originalhost");
// Following futures are used as latches. When awaitRunning a sweeper, we wait on a latch by calling
// Futures.await across the test case.
// Future for ensuring that task sweeper is ready and we let the sweep happen.
CompletableFuture<Void> taskSweep = new CompletableFuture<>();
// Future for when taskSweeper.failedHost is called once
CompletableFuture<Void> taskHostSweep1 = new CompletableFuture<>();
// Future for when taskSweeper.failedHost is called second time
CompletableFuture<Void> taskHostSweep2 = new CompletableFuture<>();
// Future for txn sweeper to get ready.
CompletableFuture<Void> txnSweep = new CompletableFuture<>();
// Future for txnsweeper.failedProcess to be called the first time
CompletableFuture<Void> txnHostSweepIgnore = new CompletableFuture<>();
CompletableFuture<Void> txnHostSweep2 = new CompletableFuture<>();
// Create task sweeper.
TaskMetadataStore taskStore = TaskStoreFactory.createZKStore(curatorClient, executor);
TaskSweeper taskSweeper = spy(new TaskSweeper(taskStore, host.getHostId(), executor, new TestTasks(taskStore, executor, host.getHostId())));
when(taskSweeper.sweepFailedProcesses(any(Supplier.class))).thenAnswer(invocation -> {
if (!taskSweep.isDone()) {
// we complete the future when this method is called for the first time.
taskSweep.complete(null);
}
return CompletableFuture.completedFuture(null);
});
when(taskSweeper.handleFailedProcess(anyString())).thenAnswer(invocation -> {
if (!taskHostSweep1.isDone()) {
// we complete this future when task sweeper for a failed host is called for the first time.
taskHostSweep1.complete(null);
} else if (!taskHostSweep2.isDone()) {
// we complete this future when task sweeper for a failed host is called for the second time
taskHostSweep2.complete(null);
}
return CompletableFuture.completedFuture(null);
});
// Create txn sweeper.
StreamMetadataStore streamStore = StreamStoreFactory.createInMemoryStore(executor);
HostControllerStore hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
ConnectionFactory connectionFactory = mock(ConnectionFactory.class);
// create streamtransactionmetadatatasks but dont initialize it with writers. this will not be
// ready until writers are supplied.
StreamTransactionMetadataTasks txnTasks = new StreamTransactionMetadataTasks(streamStore, hostStore, segmentHelper, executor, host.getHostId(), connectionFactory, false, "");
TxnSweeper txnSweeper = spy(new TxnSweeper(streamStore, txnTasks, 100, executor));
// any attempt to sweep txnHost should have been ignored
doAnswer(invocation -> {
txnHostSweepIgnore.complete(null);
return false;
}).when(txnSweeper).isReady();
when(txnSweeper.sweepFailedProcesses(any())).thenAnswer(invocation -> {
if (!txnSweep.isDone()) {
txnSweep.complete(null);
}
return CompletableFuture.completedFuture(null);
});
when(txnSweeper.handleFailedProcess(anyString())).thenAnswer(invocation -> {
if (!txnHostSweep2.isDone()) {
txnHostSweep2.complete(null);
}
return CompletableFuture.completedFuture(null);
});
// Create ControllerClusterListener.
ControllerClusterListener clusterListener = new ControllerClusterListener(host, clusterZK, executor, Lists.newArrayList(taskSweeper, txnSweeper));
clusterListener.startAsync();
clusterListener.awaitRunning();
log.info("cluster started");
// ensure that task sweep happens after cluster listener becomes ready.
assertTrue(Futures.await(taskSweep, 3000));
log.info("task sweeper completed");
// ensure only tasks are swept
verify(taskSweeper, times(1)).sweepFailedProcesses(any(Supplier.class));
verify(txnSweeper, times(0)).sweepFailedProcesses(any());
verify(taskSweeper, times(0)).handleFailedProcess(anyString());
verify(txnSweeper, times(0)).handleFailedProcess(anyString());
validateAddedNode(host.getHostId());
log.info("adding new host");
// now add and remove a new host
Host newHost = new Host(hostName, 20, "newHost1");
clusterZK.registerHost(newHost);
validateAddedNode(newHost.getHostId());
clusterZK.deregisterHost(newHost);
validateRemovedNode(newHost.getHostId());
log.info("deregistering new host");
assertTrue(Futures.await(taskHostSweep1, 3000));
assertTrue(Futures.await(txnHostSweepIgnore, 10000));
log.info("task sweep for new host done");
// verify that all tasks are not swept again.
verify(taskSweeper, times(1)).sweepFailedProcesses(any(Supplier.class));
// verify that host specific sweep happens once.
verify(taskSweeper, atLeast(1)).handleFailedProcess(anyString());
// verify that txns are not yet swept as txnsweeper is not yet ready.
verify(txnSweeper, times(0)).sweepFailedProcesses(any());
verify(txnSweeper, times(0)).handleFailedProcess(anyString());
// verify that txn sweeper was checked to be ready. It would have found it not ready at this point
verify(txnSweeper, atLeast(1)).isReady();
// Reset the mock to call real method on txnsweeper.isReady.
doCallRealMethod().when(txnSweeper).isReady();
// Complete txn sweeper initialization by adding event writers.
txnTasks.initializeStreamWriters("commitStream", new EventStreamWriterMock<>(), "abortStream", new EventStreamWriterMock<>());
txnSweeper.awaitInitialization();
assertTrue(Futures.await(txnSweep, 3000));
// verify that post initialization txns are swept. And host specific txn sweep is also performed.
verify(txnSweeper, times(1)).sweepFailedProcesses(any());
// now add another host
newHost = new Host(hostName, 20, "newHost2");
clusterZK.registerHost(newHost);
validateAddedNode(newHost.getHostId());
clusterZK.deregisterHost(newHost);
log.info("removing newhost2");
validateRemovedNode(newHost.getHostId());
assertTrue(Futures.await(taskHostSweep2, 3000));
assertTrue(Futures.await(txnHostSweep2, 3000));
verify(taskSweeper, atLeast(2)).handleFailedProcess(anyString());
verify(txnSweeper, atLeast(1)).handleFailedProcess(anyString());
clusterListener.stopAsync();
clusterListener.awaitTerminated();
}
use of io.pravega.controller.store.host.HostControllerStore in project pravega by pravega.
the class ControllerServiceWithZKStreamTest method setup.
@Before
public void setup() {
try {
zkServer = new TestingServerStarter().start();
} catch (Exception e) {
log.error("Error starting ZK server", e);
}
zkClient = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), new ExponentialBackoffRetry(200, 10, 5000));
zkClient.start();
StreamMetadataStore streamStore = StreamStoreFactory.createZKStore(zkClient, executor);
TaskMetadataStore taskMetadataStore = TaskStoreFactory.createZKStore(zkClient, executor);
HostControllerStore hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
SegmentHelper segmentHelperMock = SegmentHelperMock.getSegmentHelperMock();
connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().controllerURI(URI.create("tcp://localhost")).build());
streamMetadataTasks = new StreamMetadataTasks(streamStore, hostStore, taskMetadataStore, segmentHelperMock, executor, "host", connectionFactory, false, "");
this.streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, executor), new SealStreamTask(streamMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), executor);
streamMetadataTasks.setRequestEventWriter(new ControllerEventStreamWriterMock(streamRequestHandler, executor));
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, hostStore, segmentHelperMock, executor, "host", connectionFactory, false, "");
consumer = new ControllerService(streamStore, hostStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelperMock, executor, null);
}
use of io.pravega.controller.store.host.HostControllerStore in project pravega by pravega.
the class ZKControllerServiceImplTest method setup.
@Override
public void setup() throws Exception {
final StreamMetadataStore streamStore;
final HostControllerStore hostStore;
final TaskMetadataStore taskMetadataStore;
final SegmentHelper segmentHelper;
zkServer = new TestingServerStarter().start();
zkServer.start();
zkClient = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), new ExponentialBackoffRetry(200, 10, 5000));
zkClient.start();
storeClient = StoreClientFactory.createZKStoreClient(zkClient);
executorService = ExecutorServiceHelpers.newScheduledThreadPool(20, "testpool");
taskMetadataStore = TaskStoreFactory.createStore(storeClient, executorService);
hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
streamStore = StreamStoreFactory.createZKStore(zkClient, executorService);
segmentHelper = SegmentHelperMock.getSegmentHelperMock();
ConnectionFactoryImpl connectionFactory = new ConnectionFactoryImpl(ClientConfig.builder().build());
streamMetadataTasks = new StreamMetadataTasks(streamStore, hostStore, taskMetadataStore, segmentHelper, executorService, "host", connectionFactory, false, "");
this.streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executorService), new ScaleOperationTask(streamMetadataTasks, streamStore, executorService), new UpdateStreamTask(streamMetadataTasks, streamStore, executorService), new SealStreamTask(streamMetadataTasks, streamStore, executorService), new DeleteStreamTask(streamMetadataTasks, streamStore, executorService), new TruncateStreamTask(streamMetadataTasks, streamStore, executorService), executorService);
streamMetadataTasks.setRequestEventWriter(new ControllerEventStreamWriterMock(streamRequestHandler, executorService));
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, hostStore, segmentHelper, executorService, "host", connectionFactory, false, "");
streamTransactionMetadataTasks.initializeStreamWriters("commitStream", new EventStreamWriterMock<>(), "abortStream", new EventStreamWriterMock<>());
cluster = new ClusterZKImpl(zkClient, ClusterType.CONTROLLER);
final CountDownLatch latch = new CountDownLatch(1);
cluster.addListener((type, host) -> latch.countDown());
cluster.registerHost(new Host("localhost", 9090, null));
latch.await();
ControllerService controller = new ControllerService(streamStore, hostStore, streamMetadataTasks, streamTransactionMetadataTasks, new SegmentHelper(), executorService, cluster);
controllerService = new ControllerServiceImpl(controller, "", false);
}
Aggregations