use of org.opendaylight.controller.cluster.datastore.utils.ActorContext in project controller by opendaylight.
the class IntegrationTestKit method verifyShardState.
public static void verifyShardState(final AbstractDataStore datastore, final String shardName, final Consumer<OnDemandShardState> verifier) throws Exception {
ActorContext actorContext = datastore.getActorContext();
Future<ActorRef> future = actorContext.findLocalShardAsync(shardName);
ActorRef shardActor = Await.result(future, Duration.create(10, TimeUnit.SECONDS));
AssertionError lastError = null;
Stopwatch sw = Stopwatch.createStarted();
while (sw.elapsed(TimeUnit.SECONDS) <= 5) {
OnDemandShardState shardState = (OnDemandShardState) actorContext.executeOperation(shardActor, GetOnDemandRaftState.INSTANCE);
try {
verifier.accept(shardState);
return;
} catch (AssertionError e) {
lastError = e;
Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
}
}
throw lastError;
}
use of org.opendaylight.controller.cluster.datastore.utils.ActorContext in project controller by opendaylight.
the class ClusterAdminRpcService method makeLeaderLocal.
@Override
public Future<RpcResult<Void>> makeLeaderLocal(final MakeLeaderLocalInput input) {
final String shardName = input.getShardName();
if (Strings.isNullOrEmpty(shardName)) {
return newFailedRpcResultFuture("A valid shard name must be specified");
}
DataStoreType dataStoreType = input.getDataStoreType();
if (dataStoreType == null) {
return newFailedRpcResultFuture("A valid DataStoreType must be specified");
}
ActorContext actorContext = dataStoreType == DataStoreType.Config ? configDataStore.getActorContext() : operDataStore.getActorContext();
LOG.info("Moving leader to local node {} for shard {}, datastoreType {}", actorContext.getCurrentMemberName().getName(), shardName, dataStoreType);
final scala.concurrent.Future<ActorRef> localShardReply = actorContext.findLocalShardAsync(shardName);
final scala.concurrent.Promise<Object> makeLeaderLocalAsk = akka.dispatch.Futures.promise();
localShardReply.onComplete(new OnComplete<ActorRef>() {
@Override
public void onComplete(final Throwable failure, final ActorRef actorRef) throws Throwable {
if (failure != null) {
LOG.warn("No local shard found for {} datastoreType {} - Cannot request leadership transfer to" + " local shard.", shardName, failure);
makeLeaderLocalAsk.failure(failure);
} else {
makeLeaderLocalAsk.completeWith(actorContext.executeOperationAsync(actorRef, MakeLeaderLocal.INSTANCE, makeLeaderLocalTimeout));
}
}
}, actorContext.getClientDispatcher());
final SettableFuture<RpcResult<Void>> future = SettableFuture.create();
makeLeaderLocalAsk.future().onComplete(new OnComplete<Object>() {
@Override
public void onComplete(final Throwable failure, final Object success) throws Throwable {
if (failure != null) {
LOG.error("Leadership transfer failed for shard {}.", shardName, failure);
future.set(RpcResultBuilder.<Void>failed().withError(ErrorType.APPLICATION, "leadership transfer failed", failure).build());
return;
}
LOG.debug("Leadership transfer complete");
future.set(RpcResultBuilder.<Void>success().build());
}
}, actorContext.getClientDispatcher());
return future;
}
use of org.opendaylight.controller.cluster.datastore.utils.ActorContext in project controller by opendaylight.
the class ClusterAdminRpcService method sendMessageToManagerForConfiguredShards.
private <T> void sendMessageToManagerForConfiguredShards(DataStoreType dataStoreType, List<Entry<ListenableFuture<T>, ShardResultBuilder>> shardResultData, Function<String, Object> messageSupplier) {
ActorContext actorContext = dataStoreType == DataStoreType.Config ? configDataStore.getActorContext() : operDataStore.getActorContext();
Set<String> allShardNames = actorContext.getConfiguration().getAllShardNames();
LOG.debug("Sending message to all shards {} for data store {}", allShardNames, actorContext.getDataStoreName());
for (String shardName : allShardNames) {
ListenableFuture<T> future = this.ask(actorContext.getShardManager(), messageSupplier.apply(shardName), SHARD_MGR_TIMEOUT);
shardResultData.add(new SimpleEntry<>(future, new ShardResultBuilder().setShardName(shardName).setDataStoreType(dataStoreType)));
}
}
use of org.opendaylight.controller.cluster.datastore.utils.ActorContext in project controller by opendaylight.
the class DistributedShardFrontendTest method testClientTransaction.
@Test
public void testClientTransaction() throws Exception {
final DistributedDataStore distributedDataStore = mock(DistributedDataStore.class);
final ActorContext context = mock(ActorContext.class);
doReturn(context).when(distributedDataStore).getActorContext();
doReturn(SchemaContextHelper.full()).when(context).getSchemaContext();
final DistributedShardFrontend rootShard = new DistributedShardFrontend(distributedDataStore, client, ROOT);
try (DOMDataTreeProducer producer = shardedDOMDataTree.createProducer(Collections.singletonList(ROOT))) {
shardedDOMDataTree.registerDataTreeShard(ROOT, rootShard, producer);
}
final DataStoreClient outerListClient = mock(DataStoreClient.class);
final ClientTransaction outerListClientTransaction = mock(ClientTransaction.class);
final ClientLocalHistory outerListClientHistory = mock(ClientLocalHistory.class);
final DOMDataTreeWriteCursor outerListCursor = mock(DOMDataTreeWriteCursor.class);
doNothing().when(outerListCursor).close();
doNothing().when(outerListCursor).write(any(), any());
doNothing().when(outerListCursor).merge(any(), any());
doNothing().when(outerListCursor).delete(any());
doReturn(outerListCursor).when(outerListClientTransaction).openCursor();
doReturn(outerListClientTransaction).when(outerListClient).createTransaction();
doReturn(outerListClientHistory).when(outerListClient).createLocalHistory();
doReturn(outerListClientTransaction).when(outerListClientHistory).createTransaction();
doReturn(commitCohort).when(outerListClientTransaction).ready();
doNothing().when(outerListClientHistory).close();
doNothing().when(outerListClient).close();
final DistributedShardFrontend outerListShard = new DistributedShardFrontend(distributedDataStore, outerListClient, OUTER_LIST_ID);
try (DOMDataTreeProducer producer = shardedDOMDataTree.createProducer(Collections.singletonList(OUTER_LIST_ID))) {
shardedDOMDataTree.registerDataTreeShard(OUTER_LIST_ID, outerListShard, producer);
}
final DOMDataTreeProducer producer = shardedDOMDataTree.createProducer(Collections.singletonList(ROOT));
final DOMDataTreeCursorAwareTransaction tx = producer.createTransaction(false);
final DOMDataTreeWriteCursor txCursor = tx.createCursor(ROOT);
assertNotNull(txCursor);
txCursor.write(TestModel.TEST_PATH.getLastPathArgument(), createCrossShardContainer());
// check the lower shard got the correct modification
verify(outerListCursor, times(2)).write(pathArgumentCaptor.capture(), nodeCaptor.capture());
final YangInstanceIdentifier.PathArgument expectedYid = new NodeIdentifier(TestModel.ID_QNAME);
final YangInstanceIdentifier.PathArgument actualIdYid = pathArgumentCaptor.getAllValues().get(0);
assertEquals(expectedYid, actualIdYid);
final YangInstanceIdentifier.PathArgument expectedInnerYid = new NodeIdentifier(TestModel.INNER_LIST_QNAME);
final YangInstanceIdentifier.PathArgument actualInnerListYid = pathArgumentCaptor.getAllValues().get(1);
assertEquals(expectedInnerYid, actualInnerListYid);
final LeafNode<Integer> actualIdNode = (LeafNode<Integer>) nodeCaptor.getAllValues().get(0);
assertEquals(ImmutableNodes.leafNode(TestModel.ID_QNAME, 1), actualIdNode);
final MapNode actualInnerListNode = (MapNode) nodeCaptor.getAllValues().get(1);
assertEquals(createInnerMapNode(1), actualInnerListNode);
txCursor.close();
tx.submit().checkedGet();
verify(commitCohort, times(2)).canCommit();
verify(commitCohort, times(2)).preCommit();
verify(commitCohort, times(2)).commit();
}
use of org.opendaylight.controller.cluster.datastore.utils.ActorContext in project controller by opendaylight.
the class DistributedShardedDOMDataTreeTest method testSingleNodeWritesAndRead.
@Test
public void testSingleNodeWritesAndRead() throws Exception {
initEmptyDatastores();
final DistributedShardRegistration shardRegistration = waitOnAsyncTask(leaderShardFactory.createDistributedShard(TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME)), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorContext(), ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
final DOMDataTreeCursorAwareTransaction tx = producer.createTransaction(true);
final DOMDataTreeWriteCursor cursor = tx.createCursor(TEST_ID);
Assert.assertNotNull(cursor);
final YangInstanceIdentifier nameId = YangInstanceIdentifier.builder(TestModel.TEST_PATH).node(TestModel.NAME_QNAME).build();
final LeafNode<String> valueToCheck = ImmutableLeafNodeBuilder.<String>create().withNodeIdentifier(new NodeIdentifier(TestModel.NAME_QNAME)).withValue("Test Value").build();
LOG.debug("Writing data {} at {}, cursor {}", nameId.getLastPathArgument(), valueToCheck, cursor);
cursor.write(nameId.getLastPathArgument(), valueToCheck);
cursor.close();
LOG.debug("Got to pre submit");
tx.submit().checkedGet();
final DOMDataTreeListener mockedDataTreeListener = mock(DOMDataTreeListener.class);
doNothing().when(mockedDataTreeListener).onDataTreeChanged(anyCollection(), anyMap());
leaderShardFactory.registerListener(mockedDataTreeListener, Collections.singletonList(TEST_ID), true, Collections.emptyList());
verify(mockedDataTreeListener, timeout(1000).times(1)).onDataTreeChanged(captorForChanges.capture(), captorForSubtrees.capture());
final List<Collection<DataTreeCandidate>> capturedValue = captorForChanges.getAllValues();
final java.util.Optional<NormalizedNode<?, ?>> dataAfter = capturedValue.get(0).iterator().next().getRootNode().getDataAfter();
final NormalizedNode<?, ?> expected = ImmutableContainerNodeBuilder.create().withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME)).withChild(valueToCheck).build();
assertEquals(expected, dataAfter.get());
verifyNoMoreInteractions(mockedDataTreeListener);
final String shardName = ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier());
LOG.debug("Creating distributed datastore client for shard {}", shardName);
final ActorContext actorContext = leaderDistributedDataStore.getActorContext();
final Props distributedDataStoreClientProps = SimpleDataStoreClientActor.props(actorContext.getCurrentMemberName(), "Shard-" + shardName, actorContext, shardName);
final ActorRef clientActor = leaderSystem.actorOf(distributedDataStoreClientProps);
final DataStoreClient distributedDataStoreClient = SimpleDataStoreClientActor.getDistributedDataStoreClient(clientActor, 30, TimeUnit.SECONDS);
final ClientLocalHistory localHistory = distributedDataStoreClient.createLocalHistory();
final ClientTransaction tx2 = localHistory.createTransaction();
final CheckedFuture<Optional<NormalizedNode<?, ?>>, org.opendaylight.mdsal.common.api.ReadFailedException> read = tx2.read(YangInstanceIdentifier.EMPTY);
final Optional<NormalizedNode<?, ?>> optional = read.checkedGet();
tx2.abort();
localHistory.close();
shardRegistration.close().toCompletableFuture().get();
}
Aggregations