use of org.apache.hadoop.hdfs.server.datanode.LocalReplica in project hadoop by apache.
the class FsVolumeImpl method append.
public ReplicaInPipeline append(String bpid, ReplicaInfo replicaInfo, long newGS, long estimateBlockLen) throws IOException {
long bytesReserved = estimateBlockLen - replicaInfo.getNumBytes();
if (getAvailable() < bytesReserved) {
throw new DiskOutOfSpaceException("Insufficient space for appending to " + replicaInfo);
}
assert replicaInfo.getVolume() == this : "The volume of the replica should be the same as this volume";
// construct a RBW replica with the new GS
File newBlkFile = new File(getRbwDir(bpid), replicaInfo.getBlockName());
LocalReplicaInPipeline newReplicaInfo = new ReplicaBuilder(ReplicaState.RBW).setBlockId(replicaInfo.getBlockId()).setLength(replicaInfo.getNumBytes()).setGenerationStamp(newGS).setFsVolume(this).setDirectoryToUse(newBlkFile.getParentFile()).setWriterThread(Thread.currentThread()).setBytesToReserve(bytesReserved).buildLocalReplicaInPipeline();
// load last checksum and datalen
LocalReplica localReplica = (LocalReplica) replicaInfo;
byte[] lastChunkChecksum = loadLastPartialChunkChecksum(localReplica.getBlockFile(), localReplica.getMetaFile());
newReplicaInfo.setLastChecksumAndDataLen(replicaInfo.getNumBytes(), lastChunkChecksum);
// rename meta file to rbw directory
// rename block file to rbw directory
newReplicaInfo.moveReplicaFrom(replicaInfo, newBlkFile);
reserveSpaceForReplica(bytesReserved);
return newReplicaInfo;
}
Aggregations