use of io.pravega.controller.task.EventHelper in project pravega by pravega.
the class TableMetadataTasksTest method testCreateKeyValueTable.
@Test(timeout = 30000)
public void testCreateKeyValueTable() throws ExecutionException, InterruptedException {
Assert.assertTrue(isScopeCreated);
long creationTime = System.currentTimeMillis();
KeyValueTableConfiguration kvtConfig = KeyValueTableConfiguration.builder().partitionCount(2).primaryKeyLength(4).secondaryKeyLength(4).build();
CompletableFuture<Controller.CreateKeyValueTableStatus.Status> createOperationFuture = kvtMetadataTasks.createKeyValueTable(SCOPE, kvtable1, kvtConfig, creationTime, 0L);
assertTrue(Futures.await(processEvent((TableMetadataTasksTest.WriterMock) requestEventWriter)));
assertEquals(CreateKeyValueTableStatus.Status.SUCCESS, createOperationFuture.join());
List<KVTSegmentRecord> segmentsList = kvtStore.getActiveSegments(SCOPE, kvtable1, null, executor).get();
assertEquals(segmentsList.size(), kvtConfig.getPartitionCount());
long storedCreationTime = kvtStore.getCreationTime(SCOPE, kvtable1, null, executor).get();
assertEquals(storedCreationTime, creationTime);
KeyValueTableConfiguration storedConfig = kvtStore.getConfiguration(SCOPE, kvtable1, null, executor).get();
assertEquals(storedConfig.getPartitionCount(), kvtConfig.getPartitionCount());
// check retry failures...
EventHelper mockHelper = EventHelperMock.getFailingEventHelperMock();
TableMetadataTasks kvtFailingMetaTasks = spy(new TableMetadataTasks(kvtStore, segmentHelperMock, executor, executor, "host", GrpcAuthHelper.getDisabledAuthHelper(), mockHelper));
AssertExtensions.assertFutureThrows("addIndexAndSubmitTask throws exception", kvtFailingMetaTasks.createKeyValueTable(SCOPE, kvtable1, kvtConfig, creationTime, 0L), e -> Exceptions.unwrap(e) instanceof RuntimeException);
}
use of io.pravega.controller.task.EventHelper in project pravega by pravega.
the class StreamMetadataTasks method updateReaderGroup.
/**
* Update Reader Group Configuration.
*
* @param scope Reader Group scope.
* @param rgName Reader Group name.
* @param config New Reader Group config.
* @param requestId request id.
* @return updation status.
*/
public CompletableFuture<UpdateReaderGroupResponse> updateReaderGroup(final String scope, final String rgName, final ReaderGroupConfig config, long requestId) {
final OperationContext context = streamMetadataStore.createRGContext(scope, rgName, requestId);
return RetryHelper.withRetriesAsync(() -> {
// 1. check if Reader Group exists...
return streamMetadataStore.checkReaderGroupExists(scope, rgName, context, executor).thenCompose(exists -> {
if (!exists) {
UpdateReaderGroupResponse response = UpdateReaderGroupResponse.newBuilder().setStatus(UpdateReaderGroupResponse.Status.RG_NOT_FOUND).setGeneration(config.getGeneration()).build();
return CompletableFuture.completedFuture(response);
}
// 2. check for generation && ID match with existing config
return streamMetadataStore.getReaderGroupConfigRecord(scope, rgName, context, executor).thenCompose(rgConfigRecord -> {
if (rgConfigRecord.getObject().getGeneration() != config.getGeneration()) {
UpdateReaderGroupResponse response = UpdateReaderGroupResponse.newBuilder().setStatus(UpdateReaderGroupResponse.Status.INVALID_CONFIG).setGeneration(config.getGeneration()).build();
return CompletableFuture.completedFuture(response);
}
if (!rgConfigRecord.getObject().isUpdating()) {
return streamMetadataStore.getReaderGroupId(scope, rgName, context, executor).thenCompose(rgId -> {
if (!config.getReaderGroupId().equals(rgId)) {
UpdateReaderGroupResponse response = UpdateReaderGroupResponse.newBuilder().setStatus(UpdateReaderGroupResponse.Status.INVALID_CONFIG).setGeneration(config.getGeneration()).build();
return CompletableFuture.completedFuture(response);
}
ImmutableSet<String> removeStreams = getStreamsToBeUnsubscribed(rgConfigRecord.getObject(), config);
boolean isTransition = isTransitionToOrFromSubscriber(rgConfigRecord.getObject(), config);
UpdateReaderGroupEvent event = new UpdateReaderGroupEvent(scope, rgName, requestId, rgId, rgConfigRecord.getObject().getGeneration() + 1, isTransition, removeStreams);
// 3. Create Reader Group Metadata and submit event
return eventHelperFuture.thenCompose(eventHelper -> eventHelper.addIndexAndSubmitTask(event, () -> streamMetadataStore.startRGConfigUpdate(scope, rgName, config, context, executor)).thenCompose(x -> eventHelper.checkDone(() -> isRGUpdated(scope, rgName, executor, context)).thenCompose(y -> streamMetadataStore.getReaderGroupConfigRecord(scope, rgName, context, executor).thenApply(configRecord -> {
UpdateReaderGroupResponse response = UpdateReaderGroupResponse.newBuilder().setStatus(UpdateReaderGroupResponse.Status.SUCCESS).setGeneration(configRecord.getObject().getGeneration()).build();
return response;
}))));
});
} else {
log.error(requestId, "Reader group config update failed as another update was in progress.");
UpdateReaderGroupResponse response = UpdateReaderGroupResponse.newBuilder().setStatus(UpdateReaderGroupResponse.Status.FAILURE).setGeneration(config.getGeneration()).build();
return CompletableFuture.completedFuture(response);
}
});
});
}, e -> Exceptions.unwrap(e) instanceof RetryableException, READER_GROUP_OPERATION_MAX_RETRIES, executor);
}
use of io.pravega.controller.task.EventHelper in project pravega by pravega.
the class StreamMetadataTasks method initializeStreamWriters.
public void initializeStreamWriters(final EventStreamClientFactory clientFactory, final String streamName) {
EventHelper e = null;
synchronized (lock) {
if (toSetEventHelper) {
this.eventHelper = new EventHelper(clientFactory.createEventWriter(streamName, ControllerEventProcessors.CONTROLLER_EVENT_SERIALIZER, EventWriterConfig.builder().enableConnectionPooling(true).retryAttempts(Integer.MAX_VALUE).build()), this.executor, this.eventExecutor, this.context.getHostId(), ((AbstractStreamMetadataStore) this.streamMetadataStore).getHostTaskIndex());
toSetEventHelper = false;
e = eventHelper;
}
}
if (e != null) {
eventHelperFuture.complete(e);
}
}
use of io.pravega.controller.task.EventHelper in project pravega by pravega.
the class ZKControllerServiceImplTest method getControllerService.
@Override
public ControllerService getControllerService() throws Exception {
final HostControllerStore hostStore;
final SegmentHelper segmentHelper = SegmentHelperMock.getSegmentHelperMock();
StreamMetrics.initialize();
TransactionMetrics.initialize();
zkServer = new TestingServerStarter().start();
zkServer.start();
zkClient = CuratorFrameworkFactory.newClient(zkServer.getConnectString(), new ExponentialBackoffRetry(200, 10, 5000));
zkClient.start();
storeClient = StoreClientFactory.createZKStoreClient(zkClient);
executorService = ExecutorServiceHelpers.newScheduledThreadPool(20, "testpool");
taskMetadataStore = TaskStoreFactoryForTests.createStore(storeClient, executorService);
hostStore = HostStoreFactory.createInMemoryStore(HostMonitorConfigImpl.dummyConfig());
streamStore = StreamStoreFactory.createZKStore(zkClient, executorService);
kvtStore = KVTableStoreFactory.createZKStore(zkClient, executorService);
EventHelper tableEventHelper = EventHelperMock.getEventHelperMock(executorService, "host", ((AbstractKVTableMetadataStore) kvtStore).getHostTaskIndex());
this.kvtStore = KVTableStoreFactory.createZKStore(zkClient, executorService);
this.kvtMetadataTasks = new TableMetadataTasks(kvtStore, segmentHelper, executorService, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper(), tableEventHelper);
this.tableRequestHandler = new TableRequestHandler(new CreateTableTask(this.kvtStore, this.kvtMetadataTasks, executorService), new DeleteTableTask(this.kvtStore, this.kvtMetadataTasks, executorService), this.kvtStore, executorService);
BucketStore bucketStore = StreamStoreFactory.createZKBucketStore(zkClient, executorService);
EventHelper helperMock = EventHelperMock.getEventHelperMock(executorService, "host", ((AbstractStreamMetadataStore) streamStore).getHostTaskIndex());
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelper, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper(), helperMock);
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper());
this.streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executorService), new ScaleOperationTask(streamMetadataTasks, streamStore, executorService), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executorService), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executorService), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executorService), new TruncateStreamTask(streamMetadataTasks, streamStore, executorService), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executorService), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executorService), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executorService), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtMetadataTasks, executorService), executorService);
streamMetadataTasks.setRequestEventWriter(new ControllerEventStreamWriterMock(streamRequestHandler, executorService));
streamTransactionMetadataTasks.initializeStreamWriters(new EventStreamWriterMock<>(), new EventStreamWriterMock<>());
tableEventHelper.setRequestEventWriter(new ControllerEventTableWriterMock(tableRequestHandler, executorService));
cluster = new ClusterZKImpl(zkClient, ClusterType.CONTROLLER);
final CountDownLatch latch = new CountDownLatch(1);
cluster.addListener((type, host) -> latch.countDown());
cluster.registerHost(new Host("localhost", 9090, null));
latch.await();
return new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, streamTransactionMetadataTasks, segmentHelper, executorService, cluster, requestTracker);
}
use of io.pravega.controller.task.EventHelper in project pravega by pravega.
the class InMemoryControllerServiceImplTest method getControllerService.
@Override
public ControllerService getControllerService() {
executorService = ExecutorServiceHelpers.newScheduledThreadPool(20, "testpool");
taskMetadataStore = TaskStoreFactoryForTests.createInMemoryStore(executorService);
streamStore = StreamStoreFactory.createInMemoryStore();
BucketStore bucketStore = StreamStoreFactory.createInMemoryBucketStore();
StreamMetrics.initialize();
TransactionMetrics.initialize();
segmentHelper = SegmentHelperMock.getSegmentHelperMockForTables(executorService);
EventHelper helperMock = EventHelperMock.getEventHelperMock(executorService, "host", ((AbstractStreamMetadataStore) streamStore).getHostTaskIndex());
streamMetadataTasks = new StreamMetadataTasks(streamStore, bucketStore, taskMetadataStore, segmentHelper, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper(), helperMock);
streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelper, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper());
this.kvtStore = KVTableStoreFactory.createInMemoryStore(streamStore, executorService);
EventHelper tableEventHelper = EventHelperMock.getEventHelperMock(executorService, "host", ((AbstractKVTableMetadataStore) kvtStore).getHostTaskIndex());
this.kvtMetadataTasks = new TableMetadataTasks(kvtStore, segmentHelper, executorService, executorService, "host", GrpcAuthHelper.getDisabledAuthHelper(), tableEventHelper);
this.tableRequestHandler = new TableRequestHandler(new CreateTableTask(this.kvtStore, this.kvtMetadataTasks, executorService), new DeleteTableTask(this.kvtStore, this.kvtMetadataTasks, executorService), this.kvtStore, executorService);
this.streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executorService), new ScaleOperationTask(streamMetadataTasks, streamStore, executorService), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executorService), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executorService), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executorService), new TruncateStreamTask(streamMetadataTasks, streamStore, executorService), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executorService), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executorService), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executorService), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtMetadataTasks, executorService), executorService);
streamMetadataTasks.setRequestEventWriter(new ControllerEventStreamWriterMock(streamRequestHandler, executorService));
streamTransactionMetadataTasks.initializeStreamWriters(new EventStreamWriterMock<>(), new EventStreamWriterMock<>());
tableEventHelper.setRequestEventWriter(new ControllerEventTableWriterMock(tableRequestHandler, executorService));
Cluster mockCluster = mock(Cluster.class);
when(mockCluster.getClusterMembers()).thenReturn(Collections.singleton(new Host("localhost", 9090, null)));
return new ControllerService(kvtStore, kvtMetadataTasks, streamStore, StreamStoreFactory.createInMemoryBucketStore(), streamMetadataTasks, streamTransactionMetadataTasks, SegmentHelperMock.getSegmentHelperMock(), executorService, mockCluster, requestTracker);
}
Aggregations