use of org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient in project controller by opendaylight.
the class DistributedShardedDOMDataTree method createShardFrontend.
private void createShardFrontend(final DOMDataTreeIdentifier prefix) {
LOG.debug("{}: Creating CDS shard for prefix: {}", memberName, prefix);
final String shardName = ClusterUtils.getCleanShardName(prefix.getRootIdentifier());
final AbstractDataStore distributedDataStore = prefix.getDatastoreType().equals(org.opendaylight.mdsal.common.api.LogicalDatastoreType.CONFIGURATION) ? distributedConfigDatastore : distributedOperDatastore;
try (DOMDataTreeProducer producer = localCreateProducer(Collections.singletonList(prefix))) {
final Entry<DataStoreClient, ActorRef> entry = createDatastoreClient(shardName, distributedDataStore.getActorContext());
final DistributedShardFrontend shard = new DistributedShardFrontend(distributedDataStore, entry.getKey(), prefix);
final DOMDataTreeShardRegistration<DOMDataTreeShard> reg = shardedDOMDataTree.registerDataTreeShard(prefix, shard, producer);
synchronized (shards) {
shards.store(prefix, reg);
}
} catch (final DOMDataTreeShardingConflictException e) {
LOG.error("{}: Prefix {} is already occupied by another shard", distributedConfigDatastore.getActorContext().getClusterWrapper().getCurrentMemberName(), prefix, e);
} catch (DOMDataTreeProducerException e) {
LOG.error("Unable to close producer", e);
} catch (DOMDataTreeShardCreationFailedException e) {
LOG.error("Unable to create datastore client for shard {}", prefix, e);
}
}
use of org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient in project controller by opendaylight.
the class DistributedShardFrontendTest method testClientTransaction.
@Test
public void testClientTransaction() throws Exception {
final DistributedDataStore distributedDataStore = mock(DistributedDataStore.class);
final ActorContext context = mock(ActorContext.class);
doReturn(context).when(distributedDataStore).getActorContext();
doReturn(SchemaContextHelper.full()).when(context).getSchemaContext();
final DistributedShardFrontend rootShard = new DistributedShardFrontend(distributedDataStore, client, ROOT);
try (DOMDataTreeProducer producer = shardedDOMDataTree.createProducer(Collections.singletonList(ROOT))) {
shardedDOMDataTree.registerDataTreeShard(ROOT, rootShard, producer);
}
final DataStoreClient outerListClient = mock(DataStoreClient.class);
final ClientTransaction outerListClientTransaction = mock(ClientTransaction.class);
final ClientLocalHistory outerListClientHistory = mock(ClientLocalHistory.class);
final DOMDataTreeWriteCursor outerListCursor = mock(DOMDataTreeWriteCursor.class);
doNothing().when(outerListCursor).close();
doNothing().when(outerListCursor).write(any(), any());
doNothing().when(outerListCursor).merge(any(), any());
doNothing().when(outerListCursor).delete(any());
doReturn(outerListCursor).when(outerListClientTransaction).openCursor();
doReturn(outerListClientTransaction).when(outerListClient).createTransaction();
doReturn(outerListClientHistory).when(outerListClient).createLocalHistory();
doReturn(outerListClientTransaction).when(outerListClientHistory).createTransaction();
doReturn(commitCohort).when(outerListClientTransaction).ready();
doNothing().when(outerListClientHistory).close();
doNothing().when(outerListClient).close();
final DistributedShardFrontend outerListShard = new DistributedShardFrontend(distributedDataStore, outerListClient, OUTER_LIST_ID);
try (DOMDataTreeProducer producer = shardedDOMDataTree.createProducer(Collections.singletonList(OUTER_LIST_ID))) {
shardedDOMDataTree.registerDataTreeShard(OUTER_LIST_ID, outerListShard, producer);
}
final DOMDataTreeProducer producer = shardedDOMDataTree.createProducer(Collections.singletonList(ROOT));
final DOMDataTreeCursorAwareTransaction tx = producer.createTransaction(false);
final DOMDataTreeWriteCursor txCursor = tx.createCursor(ROOT);
assertNotNull(txCursor);
txCursor.write(TestModel.TEST_PATH.getLastPathArgument(), createCrossShardContainer());
// check the lower shard got the correct modification
verify(outerListCursor, times(2)).write(pathArgumentCaptor.capture(), nodeCaptor.capture());
final YangInstanceIdentifier.PathArgument expectedYid = new NodeIdentifier(TestModel.ID_QNAME);
final YangInstanceIdentifier.PathArgument actualIdYid = pathArgumentCaptor.getAllValues().get(0);
assertEquals(expectedYid, actualIdYid);
final YangInstanceIdentifier.PathArgument expectedInnerYid = new NodeIdentifier(TestModel.INNER_LIST_QNAME);
final YangInstanceIdentifier.PathArgument actualInnerListYid = pathArgumentCaptor.getAllValues().get(1);
assertEquals(expectedInnerYid, actualInnerListYid);
final LeafNode<Integer> actualIdNode = (LeafNode<Integer>) nodeCaptor.getAllValues().get(0);
assertEquals(ImmutableNodes.leafNode(TestModel.ID_QNAME, 1), actualIdNode);
final MapNode actualInnerListNode = (MapNode) nodeCaptor.getAllValues().get(1);
assertEquals(createInnerMapNode(1), actualInnerListNode);
txCursor.close();
tx.submit().checkedGet();
verify(commitCohort, times(2)).canCommit();
verify(commitCohort, times(2)).preCommit();
verify(commitCohort, times(2)).commit();
}
use of org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient in project controller by opendaylight.
the class DistributedShardedDOMDataTreeTest method testSingleNodeWritesAndRead.
@Test
public void testSingleNodeWritesAndRead() throws Exception {
initEmptyDatastores();
final DistributedShardRegistration shardRegistration = waitOnAsyncTask(leaderShardFactory.createDistributedShard(TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME)), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorContext(), ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
final DOMDataTreeCursorAwareTransaction tx = producer.createTransaction(true);
final DOMDataTreeWriteCursor cursor = tx.createCursor(TEST_ID);
Assert.assertNotNull(cursor);
final YangInstanceIdentifier nameId = YangInstanceIdentifier.builder(TestModel.TEST_PATH).node(TestModel.NAME_QNAME).build();
final LeafNode<String> valueToCheck = ImmutableLeafNodeBuilder.<String>create().withNodeIdentifier(new NodeIdentifier(TestModel.NAME_QNAME)).withValue("Test Value").build();
LOG.debug("Writing data {} at {}, cursor {}", nameId.getLastPathArgument(), valueToCheck, cursor);
cursor.write(nameId.getLastPathArgument(), valueToCheck);
cursor.close();
LOG.debug("Got to pre submit");
tx.submit().checkedGet();
final DOMDataTreeListener mockedDataTreeListener = mock(DOMDataTreeListener.class);
doNothing().when(mockedDataTreeListener).onDataTreeChanged(anyCollection(), anyMap());
leaderShardFactory.registerListener(mockedDataTreeListener, Collections.singletonList(TEST_ID), true, Collections.emptyList());
verify(mockedDataTreeListener, timeout(1000).times(1)).onDataTreeChanged(captorForChanges.capture(), captorForSubtrees.capture());
final List<Collection<DataTreeCandidate>> capturedValue = captorForChanges.getAllValues();
final java.util.Optional<NormalizedNode<?, ?>> dataAfter = capturedValue.get(0).iterator().next().getRootNode().getDataAfter();
final NormalizedNode<?, ?> expected = ImmutableContainerNodeBuilder.create().withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME)).withChild(valueToCheck).build();
assertEquals(expected, dataAfter.get());
verifyNoMoreInteractions(mockedDataTreeListener);
final String shardName = ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier());
LOG.debug("Creating distributed datastore client for shard {}", shardName);
final ActorContext actorContext = leaderDistributedDataStore.getActorContext();
final Props distributedDataStoreClientProps = SimpleDataStoreClientActor.props(actorContext.getCurrentMemberName(), "Shard-" + shardName, actorContext, shardName);
final ActorRef clientActor = leaderSystem.actorOf(distributedDataStoreClientProps);
final DataStoreClient distributedDataStoreClient = SimpleDataStoreClientActor.getDistributedDataStoreClient(clientActor, 30, TimeUnit.SECONDS);
final ClientLocalHistory localHistory = distributedDataStoreClient.createLocalHistory();
final ClientTransaction tx2 = localHistory.createTransaction();
final CheckedFuture<Optional<NormalizedNode<?, ?>>, org.opendaylight.mdsal.common.api.ReadFailedException> read = tx2.read(YangInstanceIdentifier.EMPTY);
final Optional<NormalizedNode<?, ?>> optional = read.checkedGet();
tx2.abort();
localHistory.close();
shardRegistration.close().toCompletableFuture().get();
}
use of org.opendaylight.controller.cluster.databroker.actors.dds.DataStoreClient in project controller by opendaylight.
the class MdsalLowLevelTestProvider method unsubscribeDdtl.
@Override
@SuppressWarnings("checkstyle:IllegalCatch")
public Future<RpcResult<UnsubscribeDdtlOutput>> unsubscribeDdtl() {
LOG.debug("Received unsubscribe-ddtl.");
if (idIntsDdtl == null || ddtlReg == null) {
final RpcError error = RpcResultBuilder.newError(ErrorType.RPC, "Ddtl missing.", "No DOMDataTreeListener registered.");
return Futures.immediateFuture(RpcResultBuilder.<UnsubscribeDdtlOutput>failed().withRpcError(error).build());
}
try {
idIntsDdtl.tryFinishProcessing().get(120, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
final RpcError error = RpcResultBuilder.newError(ErrorType.RPC, "resource-denied-transport", "Unable to finish notification processing in 120 seconds.", "clustering-it", "clustering-it", e);
return Futures.immediateFuture(RpcResultBuilder.<UnsubscribeDdtlOutput>failed().withRpcError(error).build());
}
ddtlReg.close();
ddtlReg = null;
if (!idIntsDdtl.hasTriggered()) {
final RpcError error = RpcResultBuilder.newError(ErrorType.APPLICATION, "No notification received.", "id-ints listener has not received" + "any notifications.");
return Futures.immediateFuture(RpcResultBuilder.<UnsubscribeDdtlOutput>failed().withRpcError(error).build());
}
final String shardName = ClusterUtils.getCleanShardName(ProduceTransactionsHandler.ID_INTS_YID);
LOG.debug("Creating distributed datastore client for shard {}", shardName);
final ActorContext actorContext = configDataStore.getActorContext();
final Props distributedDataStoreClientProps = SimpleDataStoreClientActor.props(actorContext.getCurrentMemberName(), "Shard-" + shardName, actorContext, shardName);
final ActorRef clientActor = actorSystem.actorOf(distributedDataStoreClientProps);
final DataStoreClient distributedDataStoreClient;
try {
distributedDataStoreClient = SimpleDataStoreClientActor.getDistributedDataStoreClient(clientActor, 30, TimeUnit.SECONDS);
} catch (RuntimeException e) {
LOG.error("Failed to get actor for {}", distributedDataStoreClientProps, e);
clientActor.tell(PoisonPill.getInstance(), noSender());
final RpcError error = RpcResultBuilder.newError(ErrorType.APPLICATION, "Unable to create ds client for read.", "Unable to create ds client for read.");
return Futures.immediateFuture(RpcResultBuilder.<UnsubscribeDdtlOutput>failed().withRpcError(error).build());
}
final ClientLocalHistory localHistory = distributedDataStoreClient.createLocalHistory();
final ClientTransaction tx = localHistory.createTransaction();
final CheckedFuture<Optional<NormalizedNode<?, ?>>, org.opendaylight.mdsal.common.api.ReadFailedException> read = tx.read(YangInstanceIdentifier.of(ProduceTransactionsHandler.ID_INT));
tx.abort();
localHistory.close();
try {
final Optional<NormalizedNode<?, ?>> optional = read.checkedGet();
if (!optional.isPresent()) {
LOG.warn("Final read from client is empty.");
final RpcError error = RpcResultBuilder.newError(ErrorType.APPLICATION, "Read failed.", "Final read from id-ints is empty.");
return Futures.immediateFuture(RpcResultBuilder.<UnsubscribeDdtlOutput>failed().withRpcError(error).build());
}
return Futures.immediateFuture(RpcResultBuilder.success(new UnsubscribeDdtlOutputBuilder().setCopyMatches(idIntsDdtl.checkEqual(optional.get()))).build());
} catch (org.opendaylight.mdsal.common.api.ReadFailedException e) {
LOG.error("Unable to read data to verify ddtl data.", e);
final RpcError error = RpcResultBuilder.newError(ErrorType.APPLICATION, "Read failed.", "Final read from id-ints failed.");
return Futures.immediateFuture(RpcResultBuilder.<UnsubscribeDdtlOutput>failed().withRpcError(error).build());
} finally {
distributedDataStoreClient.close();
clientActor.tell(PoisonPill.getInstance(), noSender());
}
}
Aggregations