use of org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder in project hadoop by apache.
the class FsVolumeImpl method addFinalizedBlock.
/**
*
* @param bytesReserved Space that was reserved during
* block creation. Now that the block is being finalized we
* can free up this space.
* @return
* @throws IOException
*/
ReplicaInfo addFinalizedBlock(String bpid, Block b, ReplicaInfo replicaInfo, long bytesReserved) throws IOException {
releaseReservedSpace(bytesReserved);
File dest = getBlockPoolSlice(bpid).addFinalizedBlock(b, replicaInfo);
return new ReplicaBuilder(ReplicaState.FINALIZED).setBlock(replicaInfo).setFsVolume(this).setDirectoryToUse(dest.getParentFile()).build();
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder in project hadoop by apache.
the class FsVolumeImpl method updateRURCopyOnTruncate.
public ReplicaInPipeline updateRURCopyOnTruncate(ReplicaInfo rur, String bpid, long newBlockId, long recoveryId, long newlength) throws IOException {
rur.breakHardLinksIfNeeded();
File[] copiedReplicaFiles = copyReplicaWithNewBlockIdAndGS(rur, bpid, newBlockId, recoveryId);
File blockFile = copiedReplicaFiles[1];
File metaFile = copiedReplicaFiles[0];
LocalReplica.truncateBlock(rur.getVolume(), blockFile, metaFile, rur.getNumBytes(), newlength, fileIoProvider);
LocalReplicaInPipeline newReplicaInfo = new ReplicaBuilder(ReplicaState.RBW).setBlockId(newBlockId).setGenerationStamp(recoveryId).setFsVolume(this).setDirectoryToUse(blockFile.getParentFile()).setBytesToReserve(newlength).buildLocalReplicaInPipeline();
// so no need to update it.
return newReplicaInfo;
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder in project hadoop by apache.
the class FsVolumeImpl method createTemporary.
public ReplicaInPipeline createTemporary(ExtendedBlock b) throws IOException {
// create a temporary file to hold block in the designated volume
File f = createTmpFile(b.getBlockPoolId(), b.getLocalBlock());
LocalReplicaInPipeline newReplicaInfo = new ReplicaBuilder(ReplicaState.TEMPORARY).setBlockId(b.getBlockId()).setGenerationStamp(b.getGenerationStamp()).setDirectoryToUse(f.getParentFile()).setBytesToReserve(b.getLocalBlock().getNumBytes()).setFsVolume(this).buildLocalReplicaInPipeline();
return newReplicaInfo;
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder in project hadoop by apache.
the class FsVolumeImpl method convertTemporaryToRbw.
public ReplicaInPipeline convertTemporaryToRbw(ExtendedBlock b, ReplicaInfo temp) throws IOException {
final long blockId = b.getBlockId();
final long expectedGs = b.getGenerationStamp();
final long visible = b.getNumBytes();
final long numBytes = temp.getNumBytes();
// move block files to the rbw directory
BlockPoolSlice bpslice = getBlockPoolSlice(b.getBlockPoolId());
final File dest = FsDatasetImpl.moveBlockFiles(b.getLocalBlock(), temp, bpslice.getRbwDir());
// create RBW
final LocalReplicaInPipeline rbw = new ReplicaBuilder(ReplicaState.RBW).setBlockId(blockId).setLength(numBytes).setGenerationStamp(expectedGs).setFsVolume(this).setDirectoryToUse(dest.getParentFile()).setWriterThread(Thread.currentThread()).setBytesToReserve(0).buildLocalReplicaInPipeline();
rbw.setBytesAcked(visible);
// load last checksum and datalen
final File destMeta = FsDatasetUtil.getMetaFile(dest, b.getGenerationStamp());
byte[] lastChunkChecksum = loadLastPartialChunkChecksum(dest, destMeta);
rbw.setLastChecksumAndDataLen(numBytes, lastChunkChecksum);
return rbw;
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder in project hadoop by apache.
the class FsVolumeImpl method createRbw.
public ReplicaInPipeline createRbw(ExtendedBlock b) throws IOException {
File f = createRbwFile(b.getBlockPoolId(), b.getLocalBlock());
LocalReplicaInPipeline newReplicaInfo = new ReplicaBuilder(ReplicaState.RBW).setBlockId(b.getBlockId()).setGenerationStamp(b.getGenerationStamp()).setFsVolume(this).setDirectoryToUse(f.getParentFile()).setBytesToReserve(b.getNumBytes()).buildLocalReplicaInPipeline();
return newReplicaInfo;
}
Aggregations