use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImpl method convertTemporaryToRbw.
// FsDatasetSpi
@Override
public ReplicaInPipeline convertTemporaryToRbw(final ExtendedBlock b) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
final long blockId = b.getBlockId();
final long expectedGs = b.getGenerationStamp();
final long visible = b.getNumBytes();
LOG.info("Convert " + b + " from Temporary to RBW, visible length=" + visible);
final ReplicaInfo temp;
{
// get replica
final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), blockId);
if (r == null) {
throw new ReplicaNotFoundException(ReplicaNotFoundException.NON_EXISTENT_REPLICA + b);
}
// check the replica's state
if (r.getState() != ReplicaState.TEMPORARY) {
throw new ReplicaAlreadyExistsException("r.getState() != ReplicaState.TEMPORARY, r=" + r);
}
temp = r;
}
// check generation stamp
if (temp.getGenerationStamp() != expectedGs) {
throw new ReplicaAlreadyExistsException("temp.getGenerationStamp() != expectedGs = " + expectedGs + ", temp=" + temp);
}
// TODO: check writer?
// set writer to the current thread
// temp.setWriter(Thread.currentThread());
// check length
final long numBytes = temp.getNumBytes();
if (numBytes < visible) {
throw new IOException(numBytes + " = numBytes < visible = " + visible + ", temp=" + temp);
}
// check volume
final FsVolumeImpl v = (FsVolumeImpl) temp.getVolume();
if (v == null) {
throw new IOException("r.getVolume() = null, temp=" + temp);
}
final ReplicaInPipeline rbw = v.convertTemporaryToRbw(b, temp);
if (rbw.getState() != ReplicaState.RBW) {
throw new IOException("Expected replica state: " + ReplicaState.RBW + " obtained " + rbw.getState() + " for converting block " + b.getBlockId());
}
// overwrite the RBW in the volume map
volumeMap.add(b.getBlockPoolId(), rbw.getReplicaInfo());
return rbw;
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImpl method updateReplicaUnderRecovery.
// FsDatasetSpi
@Override
public Replica updateReplicaUnderRecovery(final ExtendedBlock oldBlock, final long recoveryId, final long newBlockId, final long newlength) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
//get replica
final String bpid = oldBlock.getBlockPoolId();
final ReplicaInfo replica = volumeMap.get(bpid, oldBlock.getBlockId());
LOG.info("updateReplica: " + oldBlock + ", recoveryId=" + recoveryId + ", length=" + newlength + ", replica=" + replica);
//check replica
if (replica == null) {
throw new ReplicaNotFoundException(oldBlock);
}
//check replica state
if (replica.getState() != ReplicaState.RUR) {
throw new IOException("replica.getState() != " + ReplicaState.RUR + ", replica=" + replica);
}
//check replica's byte on disk
if (replica.getBytesOnDisk() != oldBlock.getNumBytes()) {
throw new IOException("THIS IS NOT SUPPOSED TO HAPPEN:" + " replica.getBytesOnDisk() != block.getNumBytes(), block=" + oldBlock + ", replica=" + replica);
}
//check replica files before update
checkReplicaFiles(replica);
//update replica
final ReplicaInfo finalized = updateReplicaUnderRecovery(oldBlock.getBlockPoolId(), replica, recoveryId, newBlockId, newlength);
boolean copyTruncate = newBlockId != oldBlock.getBlockId();
if (!copyTruncate) {
assert finalized.getBlockId() == oldBlock.getBlockId() && finalized.getGenerationStamp() == recoveryId && finalized.getNumBytes() == newlength : "Replica information mismatched: oldBlock=" + oldBlock + ", recoveryId=" + recoveryId + ", newlength=" + newlength + ", newBlockId=" + newBlockId + ", finalized=" + finalized;
} else {
assert finalized.getBlockId() == oldBlock.getBlockId() && finalized.getGenerationStamp() == oldBlock.getGenerationStamp() && finalized.getNumBytes() == oldBlock.getNumBytes() : "Finalized and old information mismatched: oldBlock=" + oldBlock + ", genStamp=" + oldBlock.getGenerationStamp() + ", len=" + oldBlock.getNumBytes() + ", finalized=" + finalized;
}
//check replica files after update
checkReplicaFiles(finalized);
return finalized;
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImpl method removeVolumes.
/**
* Removes a set of volumes from FsDataset.
* @param storageLocationsToRemove a set of
* {@link StorageLocation}s for each volume.
* @param clearFailure set true to clear failure information.
*/
@Override
public void removeVolumes(final Collection<StorageLocation> storageLocsToRemove, boolean clearFailure) {
Collection<StorageLocation> storageLocationsToRemove = new ArrayList<>(storageLocsToRemove);
Map<String, List<ReplicaInfo>> blkToInvalidate = new HashMap<>();
List<String> storageToRemove = new ArrayList<>();
try (AutoCloseableLock lock = datasetLock.acquire()) {
for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
final StorageLocation sdLocation = sd.getStorageLocation();
LOG.info("Checking removing StorageLocation " + sdLocation + " with id " + sd.getStorageUuid());
if (storageLocationsToRemove.contains(sdLocation)) {
LOG.info("Removing StorageLocation " + sdLocation + " with id " + sd.getStorageUuid() + " from FsDataset.");
// Disable the volume from the service.
asyncDiskService.removeVolume(sd.getStorageUuid());
volumes.removeVolume(sdLocation, clearFailure);
volumes.waitVolumeRemoved(5000, datasetLockCondition);
// not scan disks.
for (String bpid : volumeMap.getBlockPoolList()) {
List<ReplicaInfo> blocks = new ArrayList<>();
for (Iterator<ReplicaInfo> it = volumeMap.replicas(bpid).iterator(); it.hasNext(); ) {
ReplicaInfo block = it.next();
final StorageLocation blockStorageLocation = block.getVolume().getStorageLocation();
LOG.info("checking for block " + block.getBlockId() + " with storageLocation " + blockStorageLocation);
if (blockStorageLocation.equals(sdLocation)) {
blocks.add(block);
it.remove();
}
}
blkToInvalidate.put(bpid, blocks);
}
storageToRemove.add(sd.getStorageUuid());
storageLocationsToRemove.remove(sdLocation);
}
}
// Now, lets remove this from the failed volume list.
if (clearFailure) {
for (StorageLocation storageLocToRemove : storageLocationsToRemove) {
volumes.removeVolumeFailureInfo(storageLocToRemove);
}
}
setupAsyncLazyPersistThreads();
}
// Call this outside the lock.
for (Map.Entry<String, List<ReplicaInfo>> entry : blkToInvalidate.entrySet()) {
String bpid = entry.getKey();
List<ReplicaInfo> blocks = entry.getValue();
for (ReplicaInfo block : blocks) {
invalidate(bpid, block);
}
}
try (AutoCloseableLock lock = datasetLock.acquire()) {
for (String storageUuid : storageToRemove) {
storageMap.remove(storageUuid);
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImpl method getBlockReports.
@Override
public Map<DatanodeStorage, BlockListAsLongs> getBlockReports(String bpid) {
Map<DatanodeStorage, BlockListAsLongs> blockReportsMap = new HashMap<DatanodeStorage, BlockListAsLongs>();
Map<String, BlockListAsLongs.Builder> builders = new HashMap<String, BlockListAsLongs.Builder>();
List<FsVolumeImpl> curVolumes = null;
try (AutoCloseableLock lock = datasetLock.acquire()) {
curVolumes = volumes.getVolumes();
for (FsVolumeSpi v : curVolumes) {
builders.put(v.getStorageID(), BlockListAsLongs.builder(maxDataLength));
}
Set<String> missingVolumesReported = new HashSet<>();
for (ReplicaInfo b : volumeMap.replicas(bpid)) {
String volStorageID = b.getVolume().getStorageID();
if (!builders.containsKey(volStorageID)) {
if (!missingVolumesReported.contains(volStorageID)) {
LOG.warn("Storage volume: " + volStorageID + " missing for the" + " replica block: " + b + ". Probably being removed!");
missingVolumesReported.add(volStorageID);
}
continue;
}
switch(b.getState()) {
case FINALIZED:
case RBW:
case RWR:
builders.get(b.getVolume().getStorageID()).add(b);
break;
case RUR:
ReplicaInfo orig = b.getOriginalReplica();
builders.get(b.getVolume().getStorageID()).add(orig);
break;
case TEMPORARY:
break;
default:
assert false : "Illegal ReplicaInfo state.";
}
}
}
for (FsVolumeImpl v : curVolumes) {
blockReportsMap.put(v.toDatanodeStorage(), builders.get(v.getStorageID()).build());
}
return blockReportsMap;
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImpl method stopAllDataxceiverThreads.
void stopAllDataxceiverThreads(FsVolumeImpl volume) {
try (AutoCloseableLock lock = datasetLock.acquire()) {
for (String blockPoolId : volumeMap.getBlockPoolList()) {
Collection<ReplicaInfo> replicas = volumeMap.replicas(blockPoolId);
for (ReplicaInfo replicaInfo : replicas) {
if ((replicaInfo.getState() == ReplicaState.TEMPORARY || replicaInfo.getState() == ReplicaState.RBW) && replicaInfo.getVolume().equals(volume)) {
ReplicaInPipeline replicaInPipeline = (ReplicaInPipeline) replicaInfo;
replicaInPipeline.interruptThread();
}
}
}
}
}
Aggregations