Search in sources :

Example 26 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsDatasetImpl method convertTemporaryToRbw.

// FsDatasetSpi
@Override
public ReplicaInPipeline convertTemporaryToRbw(final ExtendedBlock b) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        final long blockId = b.getBlockId();
        final long expectedGs = b.getGenerationStamp();
        final long visible = b.getNumBytes();
        LOG.info("Convert " + b + " from Temporary to RBW, visible length=" + visible);
        final ReplicaInfo temp;
        {
            // get replica
            final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), blockId);
            if (r == null) {
                throw new ReplicaNotFoundException(ReplicaNotFoundException.NON_EXISTENT_REPLICA + b);
            }
            // check the replica's state
            if (r.getState() != ReplicaState.TEMPORARY) {
                throw new ReplicaAlreadyExistsException("r.getState() != ReplicaState.TEMPORARY, r=" + r);
            }
            temp = r;
        }
        // check generation stamp
        if (temp.getGenerationStamp() != expectedGs) {
            throw new ReplicaAlreadyExistsException("temp.getGenerationStamp() != expectedGs = " + expectedGs + ", temp=" + temp);
        }
        // TODO: check writer?
        // set writer to the current thread
        // temp.setWriter(Thread.currentThread());
        // check length
        final long numBytes = temp.getNumBytes();
        if (numBytes < visible) {
            throw new IOException(numBytes + " = numBytes < visible = " + visible + ", temp=" + temp);
        }
        // check volume
        final FsVolumeImpl v = (FsVolumeImpl) temp.getVolume();
        if (v == null) {
            throw new IOException("r.getVolume() = null, temp=" + temp);
        }
        final ReplicaInPipeline rbw = v.convertTemporaryToRbw(b, temp);
        if (rbw.getState() != ReplicaState.RBW) {
            throw new IOException("Expected replica state: " + ReplicaState.RBW + " obtained " + rbw.getState() + " for converting block " + b.getBlockId());
        }
        // overwrite the RBW in the volume map
        volumeMap.add(b.getBlockPoolId(), rbw.getReplicaInfo());
        return rbw;
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) ReplicaAlreadyExistsException(org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)

Example 27 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsDatasetImpl method updateReplicaUnderRecovery.

// FsDatasetSpi
@Override
public Replica updateReplicaUnderRecovery(final ExtendedBlock oldBlock, final long recoveryId, final long newBlockId, final long newlength) throws IOException {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        //get replica
        final String bpid = oldBlock.getBlockPoolId();
        final ReplicaInfo replica = volumeMap.get(bpid, oldBlock.getBlockId());
        LOG.info("updateReplica: " + oldBlock + ", recoveryId=" + recoveryId + ", length=" + newlength + ", replica=" + replica);
        //check replica
        if (replica == null) {
            throw new ReplicaNotFoundException(oldBlock);
        }
        //check replica state
        if (replica.getState() != ReplicaState.RUR) {
            throw new IOException("replica.getState() != " + ReplicaState.RUR + ", replica=" + replica);
        }
        //check replica's byte on disk
        if (replica.getBytesOnDisk() != oldBlock.getNumBytes()) {
            throw new IOException("THIS IS NOT SUPPOSED TO HAPPEN:" + " replica.getBytesOnDisk() != block.getNumBytes(), block=" + oldBlock + ", replica=" + replica);
        }
        //check replica files before update
        checkReplicaFiles(replica);
        //update replica
        final ReplicaInfo finalized = updateReplicaUnderRecovery(oldBlock.getBlockPoolId(), replica, recoveryId, newBlockId, newlength);
        boolean copyTruncate = newBlockId != oldBlock.getBlockId();
        if (!copyTruncate) {
            assert finalized.getBlockId() == oldBlock.getBlockId() && finalized.getGenerationStamp() == recoveryId && finalized.getNumBytes() == newlength : "Replica information mismatched: oldBlock=" + oldBlock + ", recoveryId=" + recoveryId + ", newlength=" + newlength + ", newBlockId=" + newBlockId + ", finalized=" + finalized;
        } else {
            assert finalized.getBlockId() == oldBlock.getBlockId() && finalized.getGenerationStamp() == oldBlock.getGenerationStamp() && finalized.getNumBytes() == oldBlock.getNumBytes() : "Finalized and old information mismatched: oldBlock=" + oldBlock + ", genStamp=" + oldBlock.getGenerationStamp() + ", len=" + oldBlock.getNumBytes() + ", finalized=" + finalized;
        }
        //check replica files after update
        checkReplicaFiles(finalized);
        return finalized;
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 28 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsDatasetImpl method removeVolumes.

/**
   * Removes a set of volumes from FsDataset.
   * @param storageLocationsToRemove a set of
   * {@link StorageLocation}s for each volume.
   * @param clearFailure set true to clear failure information.
   */
@Override
public void removeVolumes(final Collection<StorageLocation> storageLocsToRemove, boolean clearFailure) {
    Collection<StorageLocation> storageLocationsToRemove = new ArrayList<>(storageLocsToRemove);
    Map<String, List<ReplicaInfo>> blkToInvalidate = new HashMap<>();
    List<String> storageToRemove = new ArrayList<>();
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
            Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
            final StorageLocation sdLocation = sd.getStorageLocation();
            LOG.info("Checking removing StorageLocation " + sdLocation + " with id " + sd.getStorageUuid());
            if (storageLocationsToRemove.contains(sdLocation)) {
                LOG.info("Removing StorageLocation " + sdLocation + " with id " + sd.getStorageUuid() + " from FsDataset.");
                // Disable the volume from the service.
                asyncDiskService.removeVolume(sd.getStorageUuid());
                volumes.removeVolume(sdLocation, clearFailure);
                volumes.waitVolumeRemoved(5000, datasetLockCondition);
                // not scan disks.
                for (String bpid : volumeMap.getBlockPoolList()) {
                    List<ReplicaInfo> blocks = new ArrayList<>();
                    for (Iterator<ReplicaInfo> it = volumeMap.replicas(bpid).iterator(); it.hasNext(); ) {
                        ReplicaInfo block = it.next();
                        final StorageLocation blockStorageLocation = block.getVolume().getStorageLocation();
                        LOG.info("checking for block " + block.getBlockId() + " with storageLocation " + blockStorageLocation);
                        if (blockStorageLocation.equals(sdLocation)) {
                            blocks.add(block);
                            it.remove();
                        }
                    }
                    blkToInvalidate.put(bpid, blocks);
                }
                storageToRemove.add(sd.getStorageUuid());
                storageLocationsToRemove.remove(sdLocation);
            }
        }
        // Now, lets remove this from the failed volume list.
        if (clearFailure) {
            for (StorageLocation storageLocToRemove : storageLocationsToRemove) {
                volumes.removeVolumeFailureInfo(storageLocToRemove);
            }
        }
        setupAsyncLazyPersistThreads();
    }
    // Call this outside the lock.
    for (Map.Entry<String, List<ReplicaInfo>> entry : blkToInvalidate.entrySet()) {
        String bpid = entry.getKey();
        List<ReplicaInfo> blocks = entry.getValue();
        for (ReplicaInfo block : blocks) {
            invalidate(bpid, block);
        }
    }
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        for (String storageUuid : storageToRemove) {
            storageMap.remove(storageUuid);
        }
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ArrayList(java.util.ArrayList) List(java.util.List) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap)

Example 29 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsDatasetImpl method getBlockReports.

@Override
public Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid) {
    Map<DatanodeStorage, BlockListAsLongs> blockReportsMap = new HashMap<DatanodeStorage, BlockListAsLongs>();
    Map<String, BlockListAsLongs.Builder> builders = new HashMap<String, BlockListAsLongs.Builder>();
    List<FsVolumeImpl> curVolumes = null;
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        curVolumes = volumes.getVolumes();
        for (FsVolumeSpi v : curVolumes) {
            builders.put(v.getStorageID(), BlockListAsLongs.builder(maxDataLength));
        }
        Set<String> missingVolumesReported = new HashSet<>();
        for (ReplicaInfo b : volumeMap.replicas(bpid)) {
            String volStorageID = b.getVolume().getStorageID();
            if (!builders.containsKey(volStorageID)) {
                if (!missingVolumesReported.contains(volStorageID)) {
                    LOG.warn("Storage volume: " + volStorageID + " missing for the" + " replica block: " + b + ". Probably being removed!");
                    missingVolumesReported.add(volStorageID);
                }
                continue;
            }
            switch(b.getState()) {
                case FINALIZED:
                case RBW:
                case RWR:
                    builders.get(b.getVolume().getStorageID()).add(b);
                    break;
                case RUR:
                    ReplicaInfo orig = b.getOriginalReplica();
                    builders.get(b.getVolume().getStorageID()).add(orig);
                    break;
                case TEMPORARY:
                    break;
                default:
                    assert false : "Illegal ReplicaInfo state.";
            }
        }
    }
    for (FsVolumeImpl v : curVolumes) {
        blockReportsMap.put(v.toDatanodeStorage(), builders.get(v.getStorageID()).build());
    }
    return blockReportsMap;
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ReplicaBuilder(org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) HashSet(java.util.HashSet)

Example 30 with ReplicaInfo

use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.

the class FsDatasetImpl method stopAllDataxceiverThreads.

void stopAllDataxceiverThreads(FsVolumeImpl volume) {
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        for (String blockPoolId : volumeMap.getBlockPoolList()) {
            Collection<ReplicaInfo> replicas = volumeMap.replicas(blockPoolId);
            for (ReplicaInfo replicaInfo : replicas) {
                if ((replicaInfo.getState() == ReplicaState.TEMPORARY || replicaInfo.getState() == ReplicaState.RBW) && replicaInfo.getVolume().equals(volume)) {
                    ReplicaInPipeline replicaInPipeline = (ReplicaInPipeline) replicaInfo;
                    replicaInPipeline.interruptThread();
                }
            }
        }
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaInPipeline(org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)

Aggregations

ReplicaInfo (org.apache.hadoop.hdfs.server.datanode.ReplicaInfo)48 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)27 IOException (java.io.IOException)19 MultipleIOException (org.apache.hadoop.io.MultipleIOException)16 File (java.io.File)11 ReplicaInPipeline (org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline)10 ReplicaNotFoundException (org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)10 RandomAccessFile (java.io.RandomAccessFile)7 ReplicaBuilder (org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder)7 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)7 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 ReplicaAlreadyExistsException (org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException)5 Block (org.apache.hadoop.hdfs.protocol.Block)4 ReplicaHandler (org.apache.hadoop.hdfs.server.datanode.ReplicaHandler)4 FileInputStream (java.io.FileInputStream)3 FileNotFoundException (java.io.FileNotFoundException)3 ArrayList (java.util.ArrayList)3 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)3 Test (org.junit.Test)3 HashMap (java.util.HashMap)2