use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class FSNamesystem method updatePipelineInternal.
private void updatePipelineInternal(String clientName, ExtendedBlock oldBlock, ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs, boolean logRetryCache) throws IOException {
assert hasWriteLock();
// check the vadility of the block and lease holder name
final INodeFile pendingFile = checkUCBlock(oldBlock, clientName);
final String src = pendingFile.getFullPathName();
final BlockInfo lastBlock = pendingFile.getLastBlock();
assert !lastBlock.isComplete();
// check new GS & length: this is not expected
if (newBlock.getGenerationStamp() <= lastBlock.getGenerationStamp()) {
final String msg = "Update " + oldBlock + " but the new block " + newBlock + " does not have a larger generation stamp than the last block " + lastBlock;
LOG.warn(msg);
throw new IOException(msg);
}
if (newBlock.getNumBytes() < lastBlock.getNumBytes()) {
final String msg = "Update " + oldBlock + " (size=" + oldBlock.getNumBytes() + ") to a smaller size block " + newBlock + " (size=" + newBlock.getNumBytes() + ")";
LOG.warn(msg);
throw new IOException(msg);
}
// Update old block with the new generation stamp and new length
lastBlock.setNumBytes(newBlock.getNumBytes());
lastBlock.setGenerationStampAndVerifyReplicas(newBlock.getGenerationStamp());
// find the DatanodeDescriptor objects
final DatanodeStorageInfo[] storages = blockManager.getDatanodeManager().getDatanodeStorageInfos(newNodes, newStorageIDs, "src=%s, oldBlock=%s, newBlock=%s, clientName=%s", src, oldBlock, newBlock, clientName);
lastBlock.getUnderConstructionFeature().setExpectedLocations(lastBlock, storages, lastBlock.getBlockType());
FSDirWriteFileOp.persistBlocks(dir, src, pendingFile, logRetryCache);
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class FileUnderConstructionFeature method cleanZeroSizeBlock.
/**
* When deleting a file in the current fs directory, and the file is contained
* in a snapshot, we should delete the last block if it's under construction
* and its size is 0.
*/
void cleanZeroSizeBlock(final INodeFile f, final BlocksMapUpdateInfo collectedBlocks) {
final BlockInfo[] blocks = f.getBlocks();
if (blocks != null && blocks.length > 0 && !blocks[blocks.length - 1].isComplete()) {
BlockInfo lastUC = blocks[blocks.length - 1];
if (lastUC.getNumBytes() == 0) {
// this is a 0-sized block. do not need check its UC state here
collectedBlocks.addDeleteBlock(lastUC);
f.removeLastBlock(lastUC);
}
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class FileUnderConstructionFeature method updateLengthOfLastBlock.
/**
* Update the length for the last block
*
* @param lastBlockLength
* The length of the last block reported from client
* @throws IOException
*/
void updateLengthOfLastBlock(INodeFile f, long lastBlockLength) throws IOException {
BlockInfo lastBlock = f.getLastBlock();
assert (lastBlock != null) : "The last block for path " + f.getFullPathName() + " is null when updating its length";
assert !lastBlock.isComplete() : "The last block for path " + f.getFullPathName() + " is not under-construction when updating its length";
lastBlock.setNumBytes(lastBlockLength);
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class INodeFile method computeQuotaDeltaForTruncate.
/**
* compute the quota usage change for a truncate op
* @param newLength the length for truncation
* TODO: properly handle striped blocks (HDFS-7622)
**/
void computeQuotaDeltaForTruncate(long newLength, BlockStoragePolicy bsps, QuotaCounts delta) {
final BlockInfo[] blocks = getBlocks();
if (blocks.length == 0) {
return;
}
long size = 0;
for (BlockInfo b : blocks) {
size += b.getNumBytes();
}
BlockInfo[] sblocks = null;
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
if (sf != null) {
FileDiff diff = sf.getDiffs().getLast();
sblocks = diff != null ? diff.getBlocks() : null;
}
for (int i = blocks.length - 1; i >= 0 && size > newLength; size -= blocks[i].getNumBytes(), --i) {
BlockInfo bi = blocks[i];
long truncatedBytes;
if (size - newLength < bi.getNumBytes()) {
// Record a full block as the last block will be copied during
// recovery
truncatedBytes = bi.getNumBytes() - getPreferredBlockSize();
} else {
truncatedBytes = bi.getNumBytes();
}
// existing files
if (sblocks != null && i < sblocks.length && bi.equals(sblocks[i])) {
truncatedBytes -= bi.getNumBytes();
}
delta.addStorageSpace(-truncatedBytes * bi.getReplication());
if (bsps != null) {
List<StorageType> types = bsps.chooseStorageTypes(bi.getReplication());
for (StorageType t : types) {
if (t.supportTypeQuota()) {
delta.addTypeSpace(t, -truncatedBytes);
}
}
}
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class INodeFile method storagespaceConsumedStriped.
// TODO: support EC with heterogeneous storage
public final QuotaCounts storagespaceConsumedStriped() {
QuotaCounts counts = new QuotaCounts.Builder().build();
for (BlockInfo b : blocks) {
Preconditions.checkState(b.isStriped());
long blockSize = b.isComplete() ? ((BlockInfoStriped) b).spaceConsumed() : getPreferredBlockSize() * ((BlockInfoStriped) b).getTotalBlockNum();
counts.addStorageSpace(blockSize);
}
return counts;
}
Aggregations