use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImpl method removeVolumes.
/**
* Removes a set of volumes from FsDataset.
* @param storageLocationsToRemove a set of
* {@link StorageLocation}s for each volume.
* @param clearFailure set true to clear failure information.
*/
@Override
public void removeVolumes(final Collection<StorageLocation> storageLocsToRemove, boolean clearFailure) {
Collection<StorageLocation> storageLocationsToRemove = new ArrayList<>(storageLocsToRemove);
Map<String, List<ReplicaInfo>> blkToInvalidate = new HashMap<>();
List<String> storageToRemove = new ArrayList<>();
try (AutoCloseableLock lock = datasetLock.acquire()) {
for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
final StorageLocation sdLocation = sd.getStorageLocation();
LOG.info("Checking removing StorageLocation " + sdLocation + " with id " + sd.getStorageUuid());
if (storageLocationsToRemove.contains(sdLocation)) {
LOG.info("Removing StorageLocation " + sdLocation + " with id " + sd.getStorageUuid() + " from FsDataset.");
// Disable the volume from the service.
asyncDiskService.removeVolume(sd.getStorageUuid());
volumes.removeVolume(sdLocation, clearFailure);
volumes.waitVolumeRemoved(5000, datasetLockCondition);
// not scan disks.
for (String bpid : volumeMap.getBlockPoolList()) {
List<ReplicaInfo> blocks = new ArrayList<>();
for (Iterator<ReplicaInfo> it = volumeMap.replicas(bpid).iterator(); it.hasNext(); ) {
ReplicaInfo block = it.next();
final StorageLocation blockStorageLocation = block.getVolume().getStorageLocation();
LOG.info("checking for block " + block.getBlockId() + " with storageLocation " + blockStorageLocation);
if (blockStorageLocation.equals(sdLocation)) {
blocks.add(block);
it.remove();
}
}
blkToInvalidate.put(bpid, blocks);
}
storageToRemove.add(sd.getStorageUuid());
storageLocationsToRemove.remove(sdLocation);
}
}
// Now, lets remove this from the failed volume list.
if (clearFailure) {
for (StorageLocation storageLocToRemove : storageLocationsToRemove) {
volumes.removeVolumeFailureInfo(storageLocToRemove);
}
}
setupAsyncLazyPersistThreads();
}
// Call this outside the lock.
for (Map.Entry<String, List<ReplicaInfo>> entry : blkToInvalidate.entrySet()) {
String bpid = entry.getKey();
List<ReplicaInfo> blocks = entry.getValue();
for (ReplicaInfo block : blocks) {
invalidate(bpid, block);
}
}
try (AutoCloseableLock lock = datasetLock.acquire()) {
for (String storageUuid : storageToRemove) {
storageMap.remove(storageUuid);
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImpl method getBlockReports.
@Override
public Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid) {
Map<DatanodeStorage, BlockListAsLongs> blockReportsMap = new HashMap<DatanodeStorage, BlockListAsLongs>();
Map<String, BlockListAsLongs.Builder> builders = new HashMap<String, BlockListAsLongs.Builder>();
List<FsVolumeImpl> curVolumes = null;
try (AutoCloseableLock lock = datasetLock.acquire()) {
curVolumes = volumes.getVolumes();
for (FsVolumeSpi v : curVolumes) {
builders.put(v.getStorageID(), BlockListAsLongs.builder(maxDataLength));
}
Set<String> missingVolumesReported = new HashSet<>();
for (ReplicaInfo b : volumeMap.replicas(bpid)) {
String volStorageID = b.getVolume().getStorageID();
if (!builders.containsKey(volStorageID)) {
if (!missingVolumesReported.contains(volStorageID)) {
LOG.warn("Storage volume: " + volStorageID + " missing for the" + " replica block: " + b + ". Probably being removed!");
missingVolumesReported.add(volStorageID);
}
continue;
}
switch(b.getState()) {
case FINALIZED:
case RBW:
case RWR:
builders.get(b.getVolume().getStorageID()).add(b);
break;
case RUR:
ReplicaInfo orig = b.getOriginalReplica();
builders.get(b.getVolume().getStorageID()).add(orig);
break;
case TEMPORARY:
break;
default:
assert false : "Illegal ReplicaInfo state.";
}
}
}
for (FsVolumeImpl v : curVolumes) {
blockReportsMap.put(v.toDatanodeStorage(), builders.get(v.getStorageID()).build());
}
return blockReportsMap;
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImpl method stopAllDataxceiverThreads.
void stopAllDataxceiverThreads(FsVolumeImpl volume) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
for (String blockPoolId : volumeMap.getBlockPoolList()) {
Collection<ReplicaInfo> replicas = volumeMap.replicas(blockPoolId);
for (ReplicaInfo replicaInfo : replicas) {
if ((replicaInfo.getState() == ReplicaState.TEMPORARY || replicaInfo.getState() == ReplicaState.RBW) && replicaInfo.getVolume().equals(volume)) {
ReplicaInPipeline replicaInPipeline = (ReplicaInPipeline) replicaInfo;
replicaInPipeline.interruptThread();
}
}
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImpl method finalizeBlock.
//
// REMIND - mjc - eventually we should have a timeout system
// in place to clean up block files left by abandoned clients.
// We should have some timer in place, so that if a blockfile
// is created but non-valid, and has been idle for >48 hours,
// we can GC it safely.
//
/**
* Complete the block write!
*/
// FsDatasetSpi
@Override
public void finalizeBlock(ExtendedBlock b) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
if (Thread.interrupted()) {
// Don't allow data modifications from interrupted threads
throw new IOException("Cannot finalize block from Interrupted Thread");
}
ReplicaInfo replicaInfo = getReplicaInfo(b);
if (replicaInfo.getState() == ReplicaState.FINALIZED) {
// been opened for append but never modified
return;
}
finalizeReplica(b.getBlockPoolId(), replicaInfo);
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImpl method setPinning.
@Override
public void setPinning(ExtendedBlock block) throws IOException {
if (!blockPinningEnabled) {
return;
}
ReplicaInfo r = getBlockReplica(block);
r.setPinning(localFS);
}
Aggregations