use of org.apache.hadoop.hdfs.protocol.BlockType in project hadoop by apache.
the class BlockManager method isPlacementPolicySatisfied.
boolean isPlacementPolicySatisfied(BlockInfo storedBlock) {
List<DatanodeDescriptor> liveNodes = new ArrayList<>();
Collection<DatanodeDescriptor> corruptNodes = corruptReplicas.getNodes(storedBlock);
for (DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) {
final DatanodeDescriptor cur = storage.getDatanodeDescriptor();
// rack policy point of view.
if (!cur.isDecommissionInProgress() && !cur.isDecommissioned() && ((corruptNodes == null) || !corruptNodes.contains(cur))) {
liveNodes.add(cur);
}
}
DatanodeInfo[] locs = liveNodes.toArray(new DatanodeInfo[liveNodes.size()]);
BlockType blockType = storedBlock.getBlockType();
BlockPlacementPolicy placementPolicy = placementPolicies.getPolicy(blockType);
int numReplicas = blockType == STRIPED ? ((BlockInfoStriped) storedBlock).getRealTotalBlockNum() : storedBlock.getReplication();
return placementPolicy.verifyBlockPlacement(locs, numReplicas).isPlacementPolicySatisfied();
}
use of org.apache.hadoop.hdfs.protocol.BlockType in project hadoop by apache.
the class FSDirWriteFileOp method addFileForEditLog.
static INodeFile addFileForEditLog(FSDirectory fsd, long id, INodesInPath existing, byte[] localName, PermissionStatus permissions, List<AclEntry> aclEntries, List<XAttr> xAttrs, short replication, long modificationTime, long atime, long preferredBlockSize, boolean underConstruction, String clientName, String clientMachine, byte storagePolicyId) {
final INodeFile newNode;
Preconditions.checkNotNull(existing);
assert fsd.hasWriteLock();
try {
// check if the file has an EC policy
boolean isStriped = false;
ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), existing);
if (ecPolicy != null) {
isStriped = true;
}
final BlockType blockType = isStriped ? BlockType.STRIPED : BlockType.CONTIGUOUS;
final Short replicationFactor = (!isStriped ? replication : null);
final Byte ecPolicyID = (isStriped ? ecPolicy.getId() : null);
if (underConstruction) {
newNode = newINodeFile(id, permissions, modificationTime, modificationTime, replicationFactor, ecPolicyID, preferredBlockSize, storagePolicyId, blockType);
newNode.toUnderConstruction(clientName, clientMachine);
} else {
newNode = newINodeFile(id, permissions, modificationTime, atime, replicationFactor, ecPolicyID, preferredBlockSize, storagePolicyId, blockType);
}
newNode.setLocalName(localName);
INodesInPath iip = fsd.addINode(existing, newNode, permissions.getPermission());
if (iip != null) {
if (aclEntries != null) {
AclStorage.updateINodeAcl(newNode, aclEntries, CURRENT_STATE_ID);
}
if (xAttrs != null) {
XAttrStorage.updateINodeXAttrs(newNode, xAttrs, CURRENT_STATE_ID);
}
return newNode;
}
} catch (IOException e) {
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedAddFile: exception when add " + existing.getPath() + " to the file system", e);
if (e instanceof FSLimitException.MaxDirectoryItemsExceededException) {
NameNode.stateChangeLog.warn("Please increase " + "dfs.namenode.fs-limits.max-directory-items and make it " + "consistent across all NameNodes.");
}
}
return null;
}
use of org.apache.hadoop.hdfs.protocol.BlockType in project hadoop by apache.
the class FSDirWriteFileOp method storeAllocatedBlock.
/**
* Part II of getAdditionalBlock().
* Should repeat the same analysis of the file state as in Part 1,
* but under the write lock.
* If the conditions still hold, then allocate a new block with
* the new targets, add it to the INode and to the BlocksMap.
*/
static LocatedBlock storeAllocatedBlock(FSNamesystem fsn, String src, long fileId, String clientName, ExtendedBlock previous, DatanodeStorageInfo[] targets) throws IOException {
long offset;
// Run the full analysis again, since things could have changed
// while chooseTarget() was executing.
LocatedBlock[] onRetryBlock = new LocatedBlock[1];
INodesInPath iip = fsn.dir.resolvePath(null, src, fileId);
FileState fileState = analyzeFileState(fsn, iip, fileId, clientName, previous, onRetryBlock);
final INodeFile pendingFile = fileState.inode;
src = fileState.path;
if (onRetryBlock[0] != null) {
if (onRetryBlock[0].getLocations().length > 0) {
// This is a retry. Just return the last block if having locations.
return onRetryBlock[0];
} else {
// add new chosen targets to already allocated block and return
BlockInfo lastBlockInFile = pendingFile.getLastBlock();
lastBlockInFile.getUnderConstructionFeature().setExpectedLocations(lastBlockInFile, targets, pendingFile.getBlockType());
offset = pendingFile.computeFileSize();
return makeLocatedBlock(fsn, lastBlockInFile, targets, offset);
}
}
// commit the last block and complete it if it has minimum replicas
fsn.commitOrCompleteLastBlock(pendingFile, fileState.iip, ExtendedBlock.getLocalBlock(previous));
// allocate new block, record block locations in INode.
final BlockType blockType = pendingFile.getBlockType();
// allocate new block, record block locations in INode.
Block newBlock = fsn.createNewBlock(blockType);
INodesInPath inodesInPath = INodesInPath.fromINode(pendingFile);
saveAllocatedBlock(fsn, src, inodesInPath, newBlock, targets, blockType);
persistNewBlock(fsn, src, pendingFile);
offset = pendingFile.computeFileSize();
// Return located block
return makeLocatedBlock(fsn, fsn.getStoredBlock(newBlock), targets, offset);
}
use of org.apache.hadoop.hdfs.protocol.BlockType in project hadoop by apache.
the class FSNamesystem method getAdditionalDatanode.
/** @see ClientProtocol#getAdditionalDatanode */
LocatedBlock getAdditionalDatanode(String src, long fileId, final ExtendedBlock blk, final DatanodeInfo[] existings, final String[] storageIDs, final Set<Node> excludes, final int numAdditionalNodes, final String clientName) throws IOException {
//check if the feature is enabled
dtpReplaceDatanodeOnFailure.checkEnabled();
Node clientnode = null;
String clientMachine;
final long preferredblocksize;
final byte storagePolicyID;
final List<DatanodeStorageInfo> chosen;
final BlockType blockType;
checkOperation(OperationCategory.READ);
FSPermissionChecker pc = getPermissionChecker();
readLock();
try {
checkOperation(OperationCategory.READ);
//check safe mode
checkNameNodeSafeMode("Cannot add datanode; src=" + src + ", blk=" + blk);
final INodesInPath iip = dir.resolvePath(pc, src, fileId);
src = iip.getPath();
//check lease
final INodeFile file = checkLease(iip, clientName, fileId);
clientMachine = file.getFileUnderConstructionFeature().getClientMachine();
clientnode = blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
preferredblocksize = file.getPreferredBlockSize();
storagePolicyID = file.getStoragePolicyID();
blockType = file.getBlockType();
//find datanode storages
final DatanodeManager dm = blockManager.getDatanodeManager();
chosen = Arrays.asList(dm.getDatanodeStorageInfos(existings, storageIDs, "src=%s, fileId=%d, blk=%s, clientName=%s, clientMachine=%s", src, fileId, blk, clientName, clientMachine));
} finally {
readUnlock("getAdditionalDatanode");
}
if (clientnode == null) {
clientnode = FSDirWriteFileOp.getClientNode(blockManager, clientMachine);
}
// choose new datanodes.
final DatanodeStorageInfo[] targets = blockManager.chooseTarget4AdditionalDatanode(src, numAdditionalNodes, clientnode, chosen, excludes, preferredblocksize, storagePolicyID, blockType);
final LocatedBlock lb = BlockManager.newLocatedBlock(blk, targets, -1, false);
blockManager.setBlockToken(lb, BlockTokenIdentifier.AccessMode.COPY);
return lb;
}
use of org.apache.hadoop.hdfs.protocol.BlockType in project hadoop by apache.
the class TestFSImage method testBlockTypeProtoDefaultsToContiguous.
@Test
public void testBlockTypeProtoDefaultsToContiguous() throws Exception {
INodeSection.INodeFile.Builder builder = INodeSection.INodeFile.newBuilder();
INodeSection.INodeFile inodeFile = builder.build();
BlockType defaultBlockType = PBHelperClient.convert(inodeFile.getBlockType());
assertEquals(defaultBlockType, BlockType.CONTIGUOUS);
}
Aggregations