use of org.opendaylight.controller.cluster.datastore.messages.BatchedModifications in project controller by opendaylight.
the class TransactionProxyTest method testReadyWithReadWrite.
@Test
public void testReadyWithReadWrite() throws Exception {
ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
final NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
doReturn(readDataReply(null)).when(mockActorContext).executeOperationAsync(eq(actorSelection(actorRef)), eqReadData(), any(Timeout.class));
expectBatchedModificationsReady(actorRef, true);
TransactionProxy transactionProxy = new TransactionProxy(mockComponentFactory, READ_WRITE);
transactionProxy.read(TestModel.TEST_PATH);
transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
DOMStoreThreePhaseCommitCohort ready = transactionProxy.ready();
assertTrue(ready instanceof SingleCommitCohortProxy);
verifyCohortFutures((SingleCommitCohortProxy) ready, new CommitTransactionReply().toSerializable());
List<BatchedModifications> batchedModifications = captureBatchedModifications(actorRef);
assertEquals("Captured BatchedModifications count", 1, batchedModifications.size());
verifyBatchedModifications(batchedModifications.get(0), true, true, new WriteModification(TestModel.TEST_PATH, nodeToWrite));
assertEquals("getTotalMessageCount", 1, batchedModifications.get(0).getTotalMessagesSent());
}
use of org.opendaylight.controller.cluster.datastore.messages.BatchedModifications in project controller by opendaylight.
the class RemoteTransactionContextTest method testLimiterOnOverflowFailure.
/**
* OperationLimiter gives up throttling at some point -- {@link RemoteTransactionContext} needs to deal with that
* case, too.
*/
@Test
public void testLimiterOnOverflowFailure() throws TimeoutException, InterruptedException {
txContext.executeModification(DELETE);
txContext.executeModification(DELETE);
txContext.executeModification(DELETE);
txContext.executeModification(DELETE);
assertEquals(0, limiter.availablePermits());
txContext.executeModification(DELETE);
// Last acquire should have failed ...
assertEquals(0, limiter.availablePermits());
Future<Object> future = txContext.sendBatchedModifications();
assertEquals(0, limiter.availablePermits());
BatchedModifications msg = kit.expectMsgClass(BatchedModifications.class);
// ... so we are sending 5 modifications ...
assertEquals(5, msg.getModifications().size());
assertEquals(1, msg.getTotalMessagesSent());
sendReply(new Failure(new NullPointerException()));
assertFuture(future, new OnComplete<Object>() {
@Override
public void onComplete(final Throwable failure, final Object success) {
assertTrue(failure instanceof NullPointerException);
// ... but they account for only 4 permits.
assertEquals(4, limiter.availablePermits());
}
});
kit.expectNoMsg();
}
use of org.opendaylight.controller.cluster.datastore.messages.BatchedModifications in project controller by opendaylight.
the class Shard method handleNonRaftCommand.
@Override
protected void handleNonRaftCommand(final Object message) {
try (MessageTracker.Context context = appendEntriesReplyTracker.received(message)) {
final Optional<Error> maybeError = context.error();
if (maybeError.isPresent()) {
LOG.trace("{} : AppendEntriesReply failed to arrive at the expected interval {}", persistenceId(), maybeError.get());
}
store.resetTransactionBatch();
if (message instanceof RequestEnvelope) {
handleRequestEnvelope((RequestEnvelope) message);
} else if (MessageAssembler.isHandledMessage(message)) {
handleRequestAssemblerMessage(message);
} else if (message instanceof ConnectClientRequest) {
handleConnectClient((ConnectClientRequest) message);
} else if (CreateTransaction.isSerializedType(message)) {
handleCreateTransaction(message);
} else if (message instanceof BatchedModifications) {
handleBatchedModifications((BatchedModifications) message);
} else if (message instanceof ForwardedReadyTransaction) {
handleForwardedReadyTransaction((ForwardedReadyTransaction) message);
} else if (message instanceof ReadyLocalTransaction) {
handleReadyLocalTransaction((ReadyLocalTransaction) message);
} else if (CanCommitTransaction.isSerializedType(message)) {
handleCanCommitTransaction(CanCommitTransaction.fromSerializable(message));
} else if (CommitTransaction.isSerializedType(message)) {
handleCommitTransaction(CommitTransaction.fromSerializable(message));
} else if (AbortTransaction.isSerializedType(message)) {
handleAbortTransaction(AbortTransaction.fromSerializable(message));
} else if (CloseTransactionChain.isSerializedType(message)) {
closeTransactionChain(CloseTransactionChain.fromSerializable(message));
} else if (message instanceof RegisterChangeListener) {
changeSupport.onMessage((RegisterChangeListener) message, isLeader(), hasLeader());
} else if (message instanceof RegisterDataTreeChangeListener) {
treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader(), hasLeader());
} else if (message instanceof UpdateSchemaContext) {
updateSchemaContext((UpdateSchemaContext) message);
} else if (message instanceof PeerAddressResolved) {
PeerAddressResolved resolved = (PeerAddressResolved) message;
setPeerAddress(resolved.getPeerId(), resolved.getPeerAddress());
} else if (TX_COMMIT_TIMEOUT_CHECK_MESSAGE.equals(message)) {
commitTimeoutCheck();
} else if (message instanceof DatastoreContext) {
onDatastoreContext((DatastoreContext) message);
} else if (message instanceof RegisterRoleChangeListener) {
roleChangeNotifier.get().forward(message, context());
} else if (message instanceof FollowerInitialSyncUpStatus) {
shardMBean.setFollowerInitialSyncStatus(((FollowerInitialSyncUpStatus) message).isInitialSyncDone());
context().parent().tell(message, self());
} else if (GET_SHARD_MBEAN_MESSAGE.equals(message)) {
sender().tell(getShardMBean(), self());
} else if (message instanceof GetShardDataTree) {
sender().tell(store.getDataTree(), self());
} else if (message instanceof ServerRemoved) {
context().parent().forward(message, context());
} else if (ShardTransactionMessageRetrySupport.TIMER_MESSAGE_CLASS.isInstance(message)) {
messageRetrySupport.onTimerMessage(message);
} else if (message instanceof DataTreeCohortActorRegistry.CohortRegistryCommand) {
store.processCohortRegistryCommand(getSender(), (DataTreeCohortActorRegistry.CohortRegistryCommand) message);
} else if (message instanceof PersistAbortTransactionPayload) {
final TransactionIdentifier txId = ((PersistAbortTransactionPayload) message).getTransactionId();
persistPayload(txId, AbortTransactionPayload.create(txId), true);
} else if (message instanceof MakeLeaderLocal) {
onMakeLeaderLocal();
} else if (RESUME_NEXT_PENDING_TRANSACTION.equals(message)) {
store.resumeNextPendingTransaction();
} else if (!responseMessageSlicer.handleMessage(message)) {
super.handleNonRaftCommand(message);
}
}
}
use of org.opendaylight.controller.cluster.datastore.messages.BatchedModifications in project controller by opendaylight.
the class RemoteTransactionContext method sendBatchedModifications.
protected Future<Object> sendBatchedModifications(boolean ready, boolean doCommitOnReady) {
Future<Object> sent = null;
if (ready || batchedModifications != null && !batchedModifications.getModifications().isEmpty()) {
if (batchedModifications == null) {
batchedModifications = newBatchedModifications();
}
LOG.debug("Tx {} sending {} batched modifications, ready: {}", getIdentifier(), batchedModifications.getModifications().size(), ready);
batchedModifications.setReady(ready);
batchedModifications.setDoCommitOnReady(doCommitOnReady);
batchedModifications.setTotalMessagesSent(++totalBatchedModificationsSent);
final BatchedModifications toSend = batchedModifications;
final int permitsToRelease = batchPermits;
batchPermits = 0;
if (ready) {
batchedModifications = null;
} else {
batchedModifications = newBatchedModifications();
final Throwable failure = failedModification;
if (failure != null) {
// We have observed a modification failure, it does not make sense to send this batch. This speeds
// up the time when the application could be blocked due to messages timing out and operation
// limiter kicking in.
LOG.debug("Tx {} modifications previously failed, not sending a non-ready batch", getIdentifier());
limiter.release(permitsToRelease);
return Futures.failed(failure);
}
}
sent = actorContext.executeOperationAsync(getActor(), toSend.toSerializable(), actorContext.getTransactionCommitOperationTimeout());
sent.onComplete(new OnComplete<Object>() {
@Override
public void onComplete(Throwable failure, Object success) {
if (failure != null) {
LOG.debug("Tx {} modifications failed", getIdentifier(), failure);
failedModification = failure;
} else {
LOG.debug("Tx {} modifications completed with {}", getIdentifier(), success);
}
limiter.release(permitsToRelease);
}
}, actorContext.getClientDispatcher());
}
return sent;
}
use of org.opendaylight.controller.cluster.datastore.messages.BatchedModifications in project controller by opendaylight.
the class Shard method handleBatchedModifications.
private void handleBatchedModifications(final BatchedModifications batched) {
// This message is sent to prepare the modifications transaction directly on the Shard as an
// optimization to avoid the extra overhead of a separate ShardTransaction actor. On the last
// BatchedModifications message, the caller sets the ready flag in the message indicating
// modifications are complete. The reply contains the cohort actor path (this actor) for the caller
// to initiate the 3-phase commit. This also avoids the overhead of sending an additional
// ReadyTransaction message.
// If we're not the leader then forward to the leader. This is a safety measure - we shouldn't
// normally get here if we're not the leader as the front-end (TransactionProxy) should determine
// the primary/leader shard. However with timing and caching on the front-end, there's a small
// window where it could have a stale leader during leadership transitions.
//
boolean isLeaderActive = isLeaderActive();
if (isLeader() && isLeaderActive) {
handleBatchedModificationsLocal(batched, getSender());
} else {
ActorSelection leader = getLeader();
if (!isLeaderActive || leader == null) {
messageRetrySupport.addMessageToRetry(batched, getSender(), "Could not process BatchedModifications " + batched.getTransactionId());
} else {
// If this is not the first batch and leadership changed in between batched messages,
// we need to reconstruct previous BatchedModifications from the transaction
// DataTreeModification, honoring the max batched modification count, and forward all the
// previous BatchedModifications to the new leader.
Collection<BatchedModifications> newModifications = commitCoordinator.createForwardedBatchedModifications(batched, datastoreContext.getShardBatchedModificationCount());
LOG.debug("{}: Forwarding {} BatchedModifications to leader {}", persistenceId(), newModifications.size(), leader);
for (BatchedModifications bm : newModifications) {
leader.forward(bm, getContext());
}
}
}
}
Aggregations