use of org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException in project controller by opendaylight.
the class DistributedShardedDOMDataTree method createShardFrontend.
private void createShardFrontend(final DOMDataTreeIdentifier prefix) {
LOG.debug("{}: Creating CDS shard for prefix: {}", memberName, prefix);
final String shardName = ClusterUtils.getCleanShardName(prefix.getRootIdentifier());
final AbstractDataStore distributedDataStore = prefix.getDatastoreType().equals(org.opendaylight.mdsal.common.api.LogicalDatastoreType.CONFIGURATION) ? distributedConfigDatastore : distributedOperDatastore;
try (DOMDataTreeProducer producer = localCreateProducer(Collections.singletonList(prefix))) {
final Entry<DataStoreClient, ActorRef> entry = createDatastoreClient(shardName, distributedDataStore.getActorContext());
final DistributedShardFrontend shard = new DistributedShardFrontend(distributedDataStore, entry.getKey(), prefix);
final DOMDataTreeShardRegistration<DOMDataTreeShard> reg = shardedDOMDataTree.registerDataTreeShard(prefix, shard, producer);
synchronized (shards) {
shards.store(prefix, reg);
}
} catch (final DOMDataTreeShardingConflictException e) {
LOG.error("{}: Prefix {} is already occupied by another shard", distributedConfigDatastore.getActorContext().getClusterWrapper().getCurrentMemberName(), prefix, e);
} catch (DOMDataTreeProducerException e) {
LOG.error("Unable to close producer", e);
} catch (DOMDataTreeShardCreationFailedException e) {
LOG.error("Unable to create datastore client for shard {}", prefix, e);
}
}
use of org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException in project controller by opendaylight.
the class DistributedShardedDOMDataTreeRemotingTest method testProducerRegistrations.
@Test
public void testProducerRegistrations() throws Exception {
LOG.info("testProducerRegistrations starting");
initEmptyDatastores();
leaderTestKit.waitForMembersUp("member-2");
// TODO refactor shard creation and verification to own method
final DistributedShardRegistration shardRegistration = waitOnAsyncTask(leaderShardFactory.createDistributedShard(TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
leaderTestKit.waitUntilLeader(leaderConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()));
final ActorRef leaderShardManager = leaderConfigDatastore.getActorContext().getShardManager();
assertNotNull(findLocalShard(leaderConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier())));
assertNotNull(findLocalShard(followerConfigDatastore.getActorContext(), ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier())));
final Set<String> peers = new HashSet<>();
IntegrationTestKit.verifyShardState(leaderConfigDatastore, ClusterUtils.getCleanShardName(TEST_ID.getRootIdentifier()), onDemandShardState -> peers.addAll(onDemandShardState.getPeerAddresses().values()));
assertEquals(peers.size(), 1);
final DOMDataTreeProducer producer = leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
try {
followerShardFactory.createProducer(Collections.singleton(TEST_ID));
fail("Producer should be already registered on the other node");
} catch (final IllegalArgumentException e) {
assertTrue(e.getMessage().contains("is attached to producer"));
}
producer.close();
final DOMDataTreeProducer followerProducer = followerShardFactory.createProducer(Collections.singleton(TEST_ID));
try {
leaderShardFactory.createProducer(Collections.singleton(TEST_ID));
fail("Producer should be already registered on the other node");
} catch (final IllegalArgumentException e) {
assertTrue(e.getMessage().contains("is attached to producer"));
}
followerProducer.close();
// try to create a shard on an already registered prefix on follower
try {
waitOnAsyncTask(followerShardFactory.createDistributedShard(TEST_ID, Lists.newArrayList(AbstractTest.MEMBER_NAME, AbstractTest.MEMBER_2_NAME)), DistributedShardedDOMDataTree.SHARD_FUTURE_TIMEOUT_DURATION);
fail("This prefix already should have a shard registration that was forwarded from the other node");
} catch (final DOMDataTreeShardingConflictException e) {
assertTrue(e.getMessage().contains("is already occupied by another shard"));
}
shardRegistration.close().toCompletableFuture().get();
LOG.info("testProducerRegistrations ending");
}
use of org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException in project controller by opendaylight.
the class PrefixShardHandler method onCreatePrefixShard.
public ListenableFuture<RpcResult<Void>> onCreatePrefixShard(final CreatePrefixShardInput input) {
final SettableFuture<RpcResult<Void>> future = SettableFuture.create();
final CompletionStage<DistributedShardRegistration> completionStage;
final YangInstanceIdentifier identifier = serializer.toYangInstanceIdentifier(input.getPrefix());
try {
completionStage = shardFactory.createDistributedShard(new DOMDataTreeIdentifier(LogicalDatastoreType.CONFIGURATION, identifier), input.getReplicas().stream().map(MemberName::forName).collect(Collectors.toList()));
completionStage.thenAccept(registration -> {
LOG.debug("Shard[{}] created successfully.", identifier);
registrations.put(identifier, registration);
final ListenableFuture<Void> ensureFuture = ensureListExists();
Futures.addCallback(ensureFuture, new FutureCallback<Void>() {
@Override
public void onSuccess(@Nullable final Void result) {
LOG.debug("Initial list write successful.");
future.set(RpcResultBuilder.<Void>success().build());
}
@Override
public void onFailure(final Throwable throwable) {
LOG.warn("Shard[{}] creation failed:", identifier, throwable);
final RpcError error = RpcResultBuilder.newError(RpcError.ErrorType.APPLICATION, "create-shard-failed", "Shard creation failed", "cluster-test-app", "", throwable);
future.set(RpcResultBuilder.<Void>failed().withRpcError(error).build());
}
}, MoreExecutors.directExecutor());
});
completionStage.exceptionally(throwable -> {
LOG.warn("Shard[{}] creation failed:", identifier, throwable);
final RpcError error = RpcResultBuilder.newError(RpcError.ErrorType.APPLICATION, "create-shard-failed", "Shard creation failed", "cluster-test-app", "", throwable);
future.set(RpcResultBuilder.<Void>failed().withRpcError(error).build());
return null;
});
} catch (final DOMDataTreeShardingConflictException e) {
LOG.warn("Unable to register shard for: {}.", identifier);
final RpcError error = RpcResultBuilder.newError(RpcError.ErrorType.APPLICATION, "create-shard-failed", "Sharding conflict", "cluster-test-app", "", e);
future.set(RpcResultBuilder.<Void>failed().withRpcError(error).build());
}
return future;
}
use of org.opendaylight.mdsal.dom.api.DOMDataTreeShardingConflictException in project controller by opendaylight.
the class DistributedShardedDOMDataTree method initDefaultShard.
@SuppressWarnings("checkstyle:IllegalCatch")
private void initDefaultShard(final LogicalDatastoreType logicalDatastoreType) throws ExecutionException, InterruptedException {
final PrefixedShardConfigWriter writer = writerMap.get(logicalDatastoreType);
if (writer.checkDefaultIsPresent()) {
LOG.debug("{}: Default shard for {} is already present in the config. Possibly saved in snapshot.", memberName, logicalDatastoreType);
} else {
try {
// Currently the default shard configuration is present in the out-of-box modules.conf and is
// expected to be present. So look up the local default shard here and create the frontend.
// TODO we don't have to do it for config and operational default shard separately. Just one of them
// should be enough
final ActorContext actorContext = logicalDatastoreType == LogicalDatastoreType.CONFIGURATION ? distributedConfigDatastore.getActorContext() : distributedOperDatastore.getActorContext();
final Optional<ActorRef> defaultLocalShardOptional = actorContext.findLocalShard(ClusterUtils.getCleanShardName(YangInstanceIdentifier.EMPTY));
if (defaultLocalShardOptional.isPresent()) {
LOG.debug("{}: Default shard for {} is already started, creating just frontend", memberName, logicalDatastoreType);
createShardFrontend(new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY));
}
// The local shard isn't present - we assume that means the local member isn't in the replica list
// and will be dynamically created later via an explicit add-shard-replica request. This is the
// bootstrapping mechanism to add a new node into an existing cluster. The following code to create
// the default shard as a prefix shard is problematic in this scenario so it is commented out. Since
// the default shard is a module-based shard by default, it makes sense to always treat it as such,
// ie bootstrap it in the same manner as the special prefix-configuration and EOS shards.
// final Collection<MemberName> names = distributedConfigDatastore.getActorContext().getConfiguration()
// .getUniqueMemberNamesForAllShards();
// Await.result(FutureConverters.toScala(createDistributedShard(
// new DOMDataTreeIdentifier(logicalDatastoreType, YangInstanceIdentifier.EMPTY), names)),
// SHARD_FUTURE_TIMEOUT_DURATION);
// } catch (DOMDataTreeShardingConflictException e) {
// LOG.debug("{}: Default shard for {} already registered, possibly due to other node doing it faster",
// memberName, logicalDatastoreType);
} catch (Exception e) {
LOG.error("{}: Default shard initialization for {} failed", memberName, logicalDatastoreType, e);
throw new RuntimeException(e);
}
}
}
Aggregations