Search in sources :

Example 1 with MOVE

use of org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType.MOVE in project ozone by apache.

the class ReplicationManager method deleteSrcDnForMove.

/**
 * if the container is in inflightMove, handle move.
 * This function assumes replication has been completed
 *
 * @param cif ContainerInfo
 * @param replicaSet An Set of replicas, which may have excess replicas
 */
private void deleteSrcDnForMove(final ContainerInfo cif, final Set<ContainerReplica> replicaSet) {
    final ContainerID cid = cif.containerID();
    MoveDataNodePair movePair = moveScheduler.getMoveDataNodePair(cid);
    if (movePair == null) {
        return;
    }
    final DatanodeDetails srcDn = movePair.getSrc();
    ContainerReplicaCount replicaCount = getContainerReplicaCount(cif, replicaSet);
    if (!replicaSet.stream().anyMatch(r -> r.getDatanodeDetails().equals(srcDn))) {
        // if the target is present but source disappears somehow,
        // we can consider move is successful.
        compleleteMoveFutureWithResult(cid, MoveResult.COMPLETED);
        moveScheduler.completeMove(cid.getProtobuf());
        return;
    }
    int replicationFactor = cif.getReplicationConfig().getRequiredNodes();
    ContainerPlacementStatus currentCPS = getPlacementStatus(replicaSet, replicationFactor);
    Set<ContainerReplica> newReplicaSet = replicaSet.stream().collect(Collectors.toSet());
    newReplicaSet.removeIf(r -> r.getDatanodeDetails().equals(srcDn));
    ContainerPlacementStatus newCPS = getPlacementStatus(newReplicaSet, replicationFactor);
    if (replicaCount.isOverReplicated() && isPlacementStatusActuallyEqual(currentCPS, newCPS)) {
        sendDeleteCommand(cif, srcDn, true);
    } else {
        // if source and target datanode are both in the replicaset,
        // but we can not delete source datanode for now (e.g.,
        // there is only 3 replicas or not policy-statisfied , etc.),
        // we just complete the future without sending a delete command.
        LOG.info("can not remove source replica after successfully " + "replicated to target datanode");
        compleleteMoveFutureWithResult(cid, MoveResult.DELETE_FAIL_POLICY);
        moveScheduler.completeMove(cid.getProtobuf());
    }
}
Also used : ConfigGroup(org.apache.hadoop.hdds.conf.ConfigGroup) ScmConfigKeys(org.apache.hadoop.hdds.scm.ScmConfigKeys) HddsProtos(org.apache.hadoop.hdds.protocol.proto.HddsProtos) NodeStatus(org.apache.hadoop.hdds.scm.node.NodeStatus) DeleteContainerCommand(org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand) LoggerFactory(org.slf4j.LoggerFactory) ConfigurationSource(org.apache.hadoop.hdds.conf.ConfigurationSource) EventPublisher(org.apache.hadoop.hdds.server.events.EventPublisher) Duration(java.time.Duration) Map(java.util.Map) SCMHAManager(org.apache.hadoop.hdds.scm.ha.SCMHAManager) ReplicateContainerCommand(org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand) HddsConfigKeys(org.apache.hadoop.hdds.HddsConfigKeys) ConfigType(org.apache.hadoop.hdds.conf.ConfigType) Predicate(java.util.function.Predicate) MOVE(org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType.MOVE) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ExitUtil(org.apache.hadoop.util.ExitUtil) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) ContainerPlacementStatus(org.apache.hadoop.hdds.scm.ContainerPlacementStatus) CommandForDatanode(org.apache.hadoop.ozone.protocol.commands.CommandForDatanode) List(java.util.List) StorageUnit(org.apache.hadoop.hdds.conf.StorageUnit) PlacementPolicy(org.apache.hadoop.hdds.scm.PlacementPolicy) Config(org.apache.hadoop.hdds.conf.Config) MoveDataNodePair(org.apache.hadoop.hdds.scm.container.common.helpers.MoveDataNodePair) SCMServiceManager(org.apache.hadoop.hdds.scm.ha.SCMServiceManager) SCMHAInvocationHandler(org.apache.hadoop.hdds.scm.ha.SCMHAInvocationHandler) InvalidStateTransitionException(org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException) Proxy(java.lang.reflect.Proxy) NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) HealthState(org.apache.hadoop.hdds.scm.container.ReplicationManagerReport.HealthState) CURRENT_VERSION(org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION) Preconditions(org.apache.ratis.util.Preconditions) Replicate(org.apache.hadoop.hdds.scm.metadata.Replicate) NodeOperationalState(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState) CompletableFuture(java.util.concurrent.CompletableFuture) SCMContext(org.apache.hadoop.hdds.scm.ha.SCMContext) SCMRatisServer(org.apache.hadoop.hdds.scm.ha.SCMRatisServer) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) LinkedHashMap(java.util.LinkedHashMap) LifeCycleState(org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState) NotLeaderException(org.apache.ratis.protocol.exceptions.NotLeaderException) SCMService(org.apache.hadoop.hdds.scm.ha.SCMService) NodeNotFoundException(org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException) CloseContainerCommand(org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand) GeneratedMessage(com.google.protobuf.GeneratedMessage) LinkedList(java.util.LinkedList) StorageContainerManager(org.apache.hadoop.hdds.scm.server.StorageContainerManager) DBTransactionBuffer(org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) ReentrantLock(java.util.concurrent.locks.ReentrantLock) State(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) IOException(java.io.IOException) SCMEvents(org.apache.hadoop.hdds.scm.events.SCMEvents) NodeState(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) Lock(java.util.concurrent.locks.Lock) OZONE(org.apache.hadoop.hdds.conf.ConfigTag.OZONE) Table(org.apache.hadoop.hdds.utils.db.Table) SCM(org.apache.hadoop.hdds.conf.ConfigTag.SCM) Clock(java.time.Clock) ReplicationManagerMetrics(org.apache.hadoop.hdds.scm.container.replication.ReplicationManagerMetrics) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Comparator(java.util.Comparator) TableIterator(org.apache.hadoop.hdds.utils.db.TableIterator) Collections(java.util.Collections) SCMCommand(org.apache.hadoop.ozone.protocol.commands.SCMCommand) MoveDataNodePair(org.apache.hadoop.hdds.scm.container.common.helpers.MoveDataNodePair) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerPlacementStatus(org.apache.hadoop.hdds.scm.ContainerPlacementStatus)

Aggregations

VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 GeneratedMessage (com.google.protobuf.GeneratedMessage)1 IOException (java.io.IOException)1 Proxy (java.lang.reflect.Proxy)1 Clock (java.time.Clock)1 Duration (java.time.Duration)1 ArrayList (java.util.ArrayList)1 Collections (java.util.Collections)1 Comparator (java.util.Comparator)1 HashSet (java.util.HashSet)1 Iterator (java.util.Iterator)1 LinkedHashMap (java.util.LinkedHashMap)1 LinkedList (java.util.LinkedList)1 List (java.util.List)1 Map (java.util.Map)1 Set (java.util.Set)1 UUID (java.util.UUID)1 CompletableFuture (java.util.concurrent.CompletableFuture)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 TimeUnit (java.util.concurrent.TimeUnit)1