use of com.hazelcast.cluster.impl.VectorClock in project hazelcast by hazelcast.
the class ClusterDataSerializerHook method createFactory.
@Override
public DataSerializableFactory createFactory() {
ConstructorFunction<Integer, IdentifiedDataSerializable>[] constructors = new ConstructorFunction[LEN];
constructors[AUTH_FAILURE] = arg -> new AuthenticationFailureOp();
constructors[ADDRESS] = arg -> new Address();
constructors[MEMBER] = arg -> new MemberImpl();
constructors[HEARTBEAT] = arg -> new HeartbeatOp();
constructors[CONFIG_CHECK] = arg -> new ConfigCheck();
constructors[MEMBER_HANDSHAKE] = arg -> new MemberHandshake();
constructors[MEMBER_INFO_UPDATE] = arg -> new MembersUpdateOp();
constructors[FINALIZE_JOIN] = arg -> new FinalizeJoinOp();
constructors[BEFORE_JOIN_CHECK_FAILURE] = arg -> new BeforeJoinCheckFailureOp();
constructors[CHANGE_CLUSTER_STATE] = arg -> new CommitClusterStateOp();
constructors[CONFIG_MISMATCH] = arg -> new ConfigMismatchOp();
constructors[CLUSTER_MISMATCH] = arg -> new ClusterMismatchOp();
constructors[SPLIT_BRAIN_MERGE_VALIDATION] = arg -> new SplitBrainMergeValidationOp();
constructors[JOIN_REQUEST_OP] = arg -> new JoinRequestOp();
constructors[LOCK_CLUSTER_STATE] = arg -> new LockClusterStateOp();
constructors[MASTER_CLAIM] = arg -> new JoinMastershipClaimOp();
constructors[WHOIS_MASTER] = arg -> new WhoisMasterOp();
constructors[MERGE_CLUSTERS] = arg -> new MergeClustersOp();
constructors[POST_JOIN] = arg -> new OnJoinOp();
constructors[ROLLBACK_CLUSTER_STATE] = arg -> new RollbackClusterStateOp();
constructors[MASTER_RESPONSE] = arg -> new MasterResponseOp();
constructors[SHUTDOWN_NODE] = arg -> new ShutdownNodeOp();
constructors[TRIGGER_MEMBER_LIST_PUBLISH] = arg -> new TriggerMemberListPublishOp();
constructors[CLUSTER_STATE_TRANSACTION_LOG_RECORD] = arg -> new ClusterStateTransactionLogRecord();
constructors[MEMBER_INFO] = arg -> new MemberInfo();
constructors[JOIN_MESSAGE] = arg -> new JoinMessage();
constructors[JOIN_REQUEST] = arg -> new JoinRequest();
constructors[MIGRATION_INFO] = arg -> new MigrationInfo();
constructors[MEMBER_VERSION] = arg -> new MemberVersion();
constructors[CLUSTER_STATE_CHANGE] = arg -> new ClusterStateChange();
constructors[SPLIT_BRAIN_JOIN_MESSAGE] = arg -> new SplitBrainJoinMessage();
constructors[VERSION] = arg -> new Version();
constructors[FETCH_MEMBER_LIST_STATE] = arg -> new FetchMembersViewOp();
constructors[EXPLICIT_SUSPICION] = arg -> new ExplicitSuspicionOp();
constructors[MEMBERS_VIEW] = arg -> new MembersView();
constructors[TRIGGER_EXPLICIT_SUSPICION] = arg -> new TriggerExplicitSuspicionOp();
constructors[MEMBERS_VIEW_METADATA] = arg -> new MembersViewMetadata();
constructors[HEARTBEAT_COMPLAINT] = arg -> new HeartbeatComplaintOp();
constructors[PROMOTE_LITE_MEMBER] = arg -> new PromoteLiteMemberOp();
constructors[VECTOR_CLOCK] = arg -> new VectorClock();
constructors[ENDPOINT_QUALIFIER] = arg -> new EndpointQualifier();
return new ArrayDataSerializableFactory(constructors);
}
use of com.hazelcast.cluster.impl.VectorClock in project hazelcast by hazelcast.
the class CRDTReplicationTask method replicate.
/**
* Performs replication of a {@link CRDTReplicationAwareService} to the
* given target. The service may optimise the returned operation based on
* the target member and the previous successful replication operations.
*
* @param service the service to replicate
* @param target the target to replicate to
* @see CRDTReplicationAwareService
*/
private void replicate(CRDTReplicationAwareService service, Member target) {
if (Thread.currentThread().isInterrupted()) {
return;
}
final int targetIndex = getDataMemberListIndex(target);
final Map<String, VectorClock> lastSuccessfullyReplicatedClocks = replicationMigrationService.getReplicatedVectorClocks(service.getName(), target.getUuid());
final OperationService operationService = nodeEngine.getOperationService();
final CRDTReplicationContainer replicationOperation = service.prepareReplicationOperation(lastSuccessfullyReplicatedClocks, targetIndex);
if (replicationOperation == null) {
logger.finest("Skipping replication of " + service.getName() + " for target " + target);
return;
}
try {
logger.finest("Replicating " + service.getName() + " to " + target);
operationService.invokeOnTarget(null, replicationOperation.getOperation(), target.getAddress()).joinInternal();
replicationMigrationService.setReplicatedVectorClocks(service.getName(), target.getUuid(), replicationOperation.getVectorClocks());
} catch (Exception e) {
if (logger.isFineEnabled()) {
logger.fine("Failed replication of " + service.getName() + " for target " + target, e);
} else {
logger.info("Failed replication of " + service.getName() + " for target " + target);
}
}
}
use of com.hazelcast.cluster.impl.VectorClock in project hazelcast by hazelcast.
the class PNCounterService method prepareMigrationOperation.
@Override
public CRDTReplicationContainer prepareMigrationOperation(int maxConfiguredReplicaCount) {
final HashMap<String, VectorClock> currentVectorClocks = new HashMap<>();
final HashMap<String, PNCounterImpl> counters = new HashMap<>();
final Config config = nodeEngine.getConfig();
for (Entry<String, PNCounterImpl> counterEntry : this.counters.entrySet()) {
final String counterName = counterEntry.getKey();
final PNCounterImpl counter = counterEntry.getValue();
if (config.findPNCounterConfig(counterName).getReplicaCount() >= maxConfiguredReplicaCount) {
continue;
}
counters.put(counterName, counter);
currentVectorClocks.put(counterName, counter.getCurrentVectorClock());
}
return counters.isEmpty() ? null : new CRDTReplicationContainer(new PNCounterReplicationOperation(counters), currentVectorClocks);
}
use of com.hazelcast.cluster.impl.VectorClock in project hazelcast by hazelcast.
the class PNCounterService method prepareReplicationOperation.
@Override
public CRDTReplicationContainer prepareReplicationOperation(Map<String, VectorClock> previouslyReplicatedVectorClocks, int targetIndex) {
final HashMap<String, VectorClock> currentVectorClocks = new HashMap<String, VectorClock>();
final HashMap<String, PNCounterImpl> counters = new HashMap<String, PNCounterImpl>();
final Config config = nodeEngine.getConfig();
for (Entry<String, PNCounterImpl> counterEntry : this.counters.entrySet()) {
final String counterName = counterEntry.getKey();
final PNCounterImpl counter = counterEntry.getValue();
if (targetIndex >= config.findPNCounterConfig(counterName).getReplicaCount()) {
continue;
}
final VectorClock counterCurrentVectorClock = counter.getCurrentVectorClock();
final VectorClock counterPreviousVectorClock = previouslyReplicatedVectorClocks.get(counterName);
if (counterPreviousVectorClock == null || counterCurrentVectorClock.isAfter(counterPreviousVectorClock)) {
counters.put(counterName, counter);
}
currentVectorClocks.put(counterName, counterCurrentVectorClock);
}
return counters.isEmpty() ? null : new CRDTReplicationContainer(new PNCounterReplicationOperation(counters), currentVectorClocks);
}
use of com.hazelcast.cluster.impl.VectorClock in project hazelcast by hazelcast.
the class PNCounterImpl method get.
/**
* Returns the current value of the counter.
* <p>
* The method can throw a {@link ConsistencyLostException} when the state
* of this CRDT is not causally related to the observed timestamps. This
* means that it cannot provide the session guarantees of RYW (read your
* writes) and monotonic read.
*
* @param observedTimestamps the vector clock last observed by the client of
* this counter
* @return the current counter value with the current counter vector clock
* @throws ConsistencyLostException if this replica cannot provide the
* session guarantees
*/
public CRDTTimestampedLong get(VectorClock observedTimestamps) {
checkSessionConsistency(observedTimestamps);
stateReadLock.lock();
try {
long value = 0;
for (long[] pnValue : state.values()) {
value += pnValue[0];
value -= pnValue[1];
}
return new CRDTTimestampedLong(value, new VectorClock(stateVectorClock));
} finally {
stateReadLock.unlock();
}
}
Aggregations