use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.
the class FsDatasetImpl method createRbw.
// FsDatasetSpi
@Override
public ReplicaHandler createRbw(StorageType storageType, ExtendedBlock b, boolean allowLazyPersist) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
if (replicaInfo != null) {
throw new ReplicaAlreadyExistsException("Block " + b + " already exists in state " + replicaInfo.getState() + " and thus cannot be created.");
}
// create a new block
FsVolumeReference ref = null;
// significantly.
if (allowLazyPersist && lazyWriter != null && b.getNumBytes() % cacheManager.getOsPageSize() == 0 && reserveLockedMemory(b.getNumBytes())) {
try {
// First try to place the block on a transient volume.
ref = volumes.getNextTransientVolume(b.getNumBytes());
datanode.getMetrics().incrRamDiskBlocksWrite();
} catch (DiskOutOfSpaceException de) {
// Ignore the exception since we just fall back to persistent storage.
} finally {
if (ref == null) {
cacheManager.release(b.getNumBytes());
}
}
}
if (ref == null) {
ref = volumes.getNextVolume(storageType, b.getNumBytes());
}
FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
if (allowLazyPersist && !v.isTransientStorage()) {
datanode.getMetrics().incrRamDiskBlocksWriteFallback();
}
ReplicaInPipeline newReplicaInfo;
try {
newReplicaInfo = v.createRbw(b);
if (newReplicaInfo.getReplicaInfo().getState() != ReplicaState.RBW) {
throw new IOException("CreateRBW returned a replica of state " + newReplicaInfo.getReplicaInfo().getState() + " for block " + b.getBlockId());
}
} catch (IOException e) {
IOUtils.cleanup(null, ref);
throw e;
}
volumeMap.add(b.getBlockPoolId(), newReplicaInfo.getReplicaInfo());
return new ReplicaHandler(newReplicaInfo, ref);
}
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.
the class FsDatasetImpl method recoverAppend.
// FsDatasetSpi
@Override
public ReplicaHandler recoverAppend(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
LOG.info("Recover failed append to " + b);
while (true) {
try {
try (AutoCloseableLock lock = datasetLock.acquire()) {
ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen);
FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
ReplicaInPipeline replica;
try {
// change the replica's state/gs etc.
if (replicaInfo.getState() == ReplicaState.FINALIZED) {
replica = append(b.getBlockPoolId(), replicaInfo, newGS, b.getNumBytes());
} else {
//RBW
replicaInfo.bumpReplicaGS(newGS);
replica = (ReplicaInPipeline) replicaInfo;
}
} catch (IOException e) {
IOUtils.cleanup(null, ref);
throw e;
}
return new ReplicaHandler(replica, ref);
}
} catch (MustStopExistingWriter e) {
e.getReplicaInPipeline().stopWriter(datanode.getDnConf().getXceiverStopTimeout());
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.
the class FsDatasetImpl method getStorageReports.
// FsDatasetSpi
@Override
public StorageReport[] getStorageReports(String bpid) throws IOException {
List<StorageReport> reports;
synchronized (statsLock) {
List<FsVolumeImpl> curVolumes = volumes.getVolumes();
reports = new ArrayList<>(curVolumes.size());
for (FsVolumeImpl volume : curVolumes) {
try (FsVolumeReference ref = volume.obtainReference()) {
StorageReport sr = new StorageReport(volume.toDatanodeStorage(), false, volume.getCapacity(), volume.getDfsUsed(), volume.getAvailable(), volume.getBlockPoolUsed(bpid), volume.getNonDfsUsed());
reports.add(sr);
} catch (ClosedChannelException e) {
continue;
}
}
}
return reports.toArray(new StorageReport[reports.size()]);
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.
the class FsDatasetImpl method append.
// FsDatasetSpi
@Override
public ReplicaHandler append(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException {
try (AutoCloseableLock lock = datasetLock.acquire()) {
// check the validity of the parameter
if (newGS < b.getGenerationStamp()) {
throw new IOException("The new generation stamp " + newGS + " should be greater than the replica " + b + "'s generation stamp");
}
ReplicaInfo replicaInfo = getReplicaInfo(b);
LOG.info("Appending to " + replicaInfo);
if (replicaInfo.getState() != ReplicaState.FINALIZED) {
throw new ReplicaNotFoundException(ReplicaNotFoundException.UNFINALIZED_REPLICA + b);
}
if (replicaInfo.getNumBytes() != expectedBlockLen) {
throw new IOException("Corrupted replica " + replicaInfo + " with a length of " + replicaInfo.getNumBytes() + " expected length is " + expectedBlockLen);
}
FsVolumeReference ref = replicaInfo.getVolume().obtainReference();
ReplicaInPipeline replica = null;
try {
replica = append(b.getBlockPoolId(), replicaInfo, newGS, b.getNumBytes());
} catch (IOException e) {
IOUtils.cleanup(null, ref);
throw e;
}
return new ReplicaHandler(replica, ref);
}
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference in project hadoop by apache.
the class FsDatasetImpl method addVolume.
@Override
public void addVolume(final StorageLocation location, final List<NamespaceInfo> nsInfos) throws IOException {
// Prepare volume in DataStorage
final DataStorage.VolumeBuilder builder;
try {
builder = dataStorage.prepareVolume(datanode, location, nsInfos);
} catch (IOException e) {
volumes.addVolumeFailureInfo(new VolumeFailureInfo(location, Time.now()));
throw e;
}
final Storage.StorageDirectory sd = builder.getStorageDirectory();
StorageType storageType = location.getStorageType();
final FsVolumeImpl fsVolume = createFsVolume(sd.getStorageUuid(), sd, location);
final ReplicaMap tempVolumeMap = new ReplicaMap(new AutoCloseableLock());
ArrayList<IOException> exceptions = Lists.newArrayList();
for (final NamespaceInfo nsInfo : nsInfos) {
String bpid = nsInfo.getBlockPoolID();
try {
fsVolume.addBlockPool(bpid, this.conf, this.timer);
fsVolume.getVolumeMap(bpid, tempVolumeMap, ramDiskReplicaTracker);
} catch (IOException e) {
LOG.warn("Caught exception when adding " + fsVolume + ". Will throw later.", e);
exceptions.add(e);
}
}
if (!exceptions.isEmpty()) {
try {
sd.unlock();
} catch (IOException e) {
exceptions.add(e);
}
throw MultipleIOException.createIOException(exceptions);
}
final FsVolumeReference ref = fsVolume.obtainReference();
setupAsyncLazyPersistThread(fsVolume);
builder.build();
activateVolume(tempVolumeMap, sd, storageType, ref);
LOG.info("Added volume - " + location + ", StorageType: " + storageType);
}
Aggregations