use of org.opendaylight.controller.cluster.sharding.DistributedShardFactory.DistributedShardRegistration in project controller by opendaylight.
the class DistributedShardedDOMDataTreeRemotingTest method testMultipleShardRegistrations.
@Test
public void testMultipleShardRegistrations() throws Exception {
LOG.info("testMultipleShardRegistrations starting");
initEmptyDatastores();
final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
final DistributedShardRegistration reg2 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.OUTER_CONTAINER_PATH), Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
final DistributedShardRegistration reg3 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.INNER_LIST_PATH), Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
final DistributedShardRegistration reg4 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, TestModel.JUNK_PATH), Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
// check leader has local shards
assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH)));
assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH)));
assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.JUNK_PATH)));
// check follower has local shards
assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH)));
assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH)));
assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.JUNK_PATH)));
LOG.debug("Closing registrations");
reg1.close().toCompletableFuture().get();
reg2.close().toCompletableFuture().get();
reg3.close().toCompletableFuture().get();
reg4.close().toCompletableFuture().get();
waitUntilShardIsDown(leaderConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
waitUntilShardIsDown(leaderConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
waitUntilShardIsDown(leaderConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
waitUntilShardIsDown(leaderConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
LOG.debug("All leader shards gone");
waitUntilShardIsDown(followerConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
waitUntilShardIsDown(followerConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.OUTER_CONTAINER_PATH));
waitUntilShardIsDown(followerConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.INNER_LIST_PATH));
waitUntilShardIsDown(followerConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.JUNK_PATH));
LOG.debug("All follower shards gone");
LOG.info("testMultipleShardRegistrations ending");
}
use of org.opendaylight.controller.cluster.sharding.DistributedShardFactory.DistributedShardRegistration in project controller by opendaylight.
the class DistributedShardedDOMDataTreeRemotingTest method testMultipleRegistrationsAtOnePrefix.
@Test
public void testMultipleRegistrationsAtOnePrefix() throws Exception {
LOG.info("testMultipleRegistrationsAtOnePrefix starting");
initEmptyDatastores();
for (int i = 0; i < 5; i++) {
LOG.info("Round {}", i);
final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
final Set<String> peers = new HashSet<>();
IntegrationTestKit.verifyShardState(leaderConfigDatastore, ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()), onDemandShardState -> peers.addAll(onDemandShardState.getPeerAddresses().values()));
assertEquals(peers.size(), 1);
waitOnAsyncTask(reg1.close(), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
waitUntilShardIsDown(leaderConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
waitUntilShardIsDown(followerConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
}
LOG.info("testMultipleRegistrationsAtOnePrefix ending");
}
use of org.opendaylight.controller.cluster.sharding.DistributedShardFactory.DistributedShardRegistration in project controller by opendaylight.
the class DistributedShardedDOMDataTreeRemotingTest method testProducerRegistrations.
@Test
public void testProducerRegistrations() throws Exception {
LOG.info("testProducerRegistrations starting");
initEmptyDatastores();
leaderTestKit.waitForMembersUp("member-2");
// TODO refactor shard creation and verification to own method
final DistributedShardRegistration shardRegistration = waitOnAsyncTask(leaderShardFactory.createDistributedShard(TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
final ActorRef leaderShardManager = leaderConfigDatastore.getActorContext().getShardManager();
assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier())));
assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier())));
final Set<String> peers = new HashSet<>();
IntegrationTestKit.verifyShardState(leaderConfigDatastore, ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()), onDemandShardState -> peers.addAll(onDemandShardState.getPeerAddresses().values()));
assertEquals(peers.size(), 1);
final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
try {
followerShardFactory.createProducer(Collections.singleton(TEST_ID));
fail("Producer should be already registered on the other node");
} catch (final IllegalArgumentException e) {
assertTrue(e.getMessage().contains("is attached to producer"));
}
producer.close();
final DOMDataTreeProducer followerProducer = followerShardFactory.createProducer(Collections.singleton(TEST_ID));
try {
leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
fail("Producer should be already registered on the other node");
} catch (final IllegalArgumentException e) {
assertTrue(e.getMessage().contains("is attached to producer"));
}
followerProducer.close();
// try to create a shard on an already registered prefix on follower
try {
waitOnAsyncTask(followerShardFactory.createDistributedShard(TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
fail("This prefix already should have a shard registration that was forwarded from the other node");
} catch (final DOMDataTreeShardingConflictException e) {
assertTrue(e.getMessage().contains("is already occupied by another shard"));
}
shardRegistration.close().toCompletableFuture().get();
LOG.info("testProducerRegistrations ending");
}
use of org.opendaylight.controller.cluster.sharding.DistributedShardFactory.DistributedShardRegistration in project controller by opendaylight.
the class DistributedShardedDOMDataTreeTest method testMultipleShardLevels.
// top level shard at TEST element, with subshards on each outer-list map entry
@Test
@Ignore
public void testMultipleShardLevels() throws Exception {
initEmptyDatastores();
final DistributedShardRegistration testShardReg = waitOnAsyncTask(leaderShardFactory.createDistributedShard(TEST_ID, SINGLE_MEMBER), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
final ArrayList<DistributedShardRegistration> registrations = new ArrayList<>();
final int listSize = 5;
for (int i = 0; i < listSize; i++) {
final YangInstanceIdentifier entryYID = getOuterListIdFor(i);
final CompletionStage<DistributedShardRegistration> future = leaderShardFactory.createDistributedShard(new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, entryYID), SINGLE_MEMBER);
registrations.add(waitOnAsyncTask(future, DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION));
}
final DOMDataTreeIdentifier rootId = new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, YangInstanceIdentifier.EMPTY);
final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singletonList(rootId));
DOMDataTreeCursorAwareTransaction transaction = producer.createTransaction(false);
DOMDataTreeWriteCursor cursor = transaction.createCursor(rootId);
assertNotNull(cursor);
final MapNode outerList = ImmutableMapNodeBuilder.create().withNodeIdentifier(new NodeIdentifier(TestModel.OUTER_LIST_QNAME)).build();
final ContainerNode testNode = ImmutableContainerNodeBuilder.create().withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME)).withChild(outerList).build();
cursor.write(testNode.getIdentifier(), testNode);
cursor.close();
transaction.submit().checkedGet();
final DOMDataTreeListener mockedDataTreeListener = mock(DOMDataTreeListener.class);
doNothing().when(mockedDataTreeListener).onDataTreeChanged(anyCollection(), anyMap());
final MapNode wholeList = ImmutableMapNodeBuilder.create(outerList).withValue(createOuterEntries(listSize, "testing-values")).build();
transaction = producer.createTransaction(false);
cursor = transaction.createCursor(TEST_ID);
assertNotNull(cursor);
cursor.write(wholeList.getIdentifier(), wholeList);
cursor.close();
transaction.submit().checkedGet();
leaderShardFactory.registerListener(mockedDataTreeListener, Collections.singletonList(TEST_ID), true, Collections.emptyList());
verify(mockedDataTreeListener, timeout(35000).atLeast(2)).onDataTreeChanged(captorForChanges.capture(), captorForSubtrees.capture());
verifyNoMoreInteractions(mockedDataTreeListener);
final List<Map<DOMDataTreeIdentifier, NormalizedNode<?, ?>>> allSubtrees = captorForSubtrees.getAllValues();
final Map<DOMDataTreeIdentifier, NormalizedNode<?, ?>> lastSubtree = allSubtrees.get(allSubtrees.size() - 1);
final NormalizedNode<?, ?> actual = lastSubtree.get(TEST_ID);
assertNotNull(actual);
final NormalizedNode<?, ?> expected = ImmutableContainerNodeBuilder.create().withNodeIdentifier(new NodeIdentifier(TestModel.TEST_QNAME)).withChild(ImmutableMapNodeBuilder.create(outerList).withValue(createOuterEntries(listSize, "testing-values")).build()).build();
for (final DistributedShardRegistration registration : registrations) {
waitOnAsyncTask(registration.close(), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
}
waitOnAsyncTask(testShardReg.close(), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
assertEquals(expected, actual);
}
use of org.opendaylight.controller.cluster.sharding.DistributedShardFactory.DistributedShardRegistration in project controller by opendaylight.
the class DistributedShardedDOMDataTreeTest method testMultipleRegistrationsAtOnePrefix.
@Test
public void testMultipleRegistrationsAtOnePrefix() throws Exception {
initEmptyDatastores();
for (int i = 0; i < 10; i++) {
LOG.debug("Round {}", i);
final DistributedShardRegistration reg1 = waitOnAsyncTask(leaderShardFactory.createDistributedShard(TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME)), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
leaderTestKit.waitUntilLeader(leaderDistributedDataStore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
assertNotNull(findLocalShard(leaderDistributedDataStore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.TEST_PATH)));
waitOnAsyncTask(reg1.close(), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
waitUntilShardIsDown(leaderDistributedDataStore.getActorContext(), ClusterUtils.getCleanShardName(TestModel.TEST_PATH));
}
}
Aggregations