use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.
the class TestINodeFile method checkEquals.
private static void checkEquals(LocatedBlocks l1, LocatedBlocks l2) {
List<LocatedBlock> list1 = l1.getLocatedBlocks();
List<LocatedBlock> list2 = l2.getLocatedBlocks();
assertEquals(list1.size(), list2.size());
for (int i = 0; i < list1.size(); i++) {
LocatedBlock b1 = list1.get(i);
LocatedBlock b2 = list2.get(i);
assertEquals(b1.getBlock(), b2.getBlock());
assertEquals(b1.getBlockSize(), b2.getBlockSize());
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.
the class JsonUtilClient method toLocatedBlock.
/** Convert a Json map to LocatedBlock. */
static LocatedBlock toLocatedBlock(final Map<?, ?> m) throws IOException {
if (m == null) {
return null;
}
final ExtendedBlock b = toExtendedBlock((Map<?, ?>) m.get("block"));
final DatanodeInfo[] locations = toDatanodeInfoArray(getList(m, "locations"));
final long startOffset = ((Number) m.get("startOffset")).longValue();
final boolean isCorrupt = (Boolean) m.get("isCorrupt");
final DatanodeInfo[] cachedLocations = toDatanodeInfoArray(getList(m, "cachedLocations"));
final StorageType[] storageTypes = toStorageTypeArray(getList(m, "storageTypes"));
final LocatedBlock locatedblock = new LocatedBlock(b, locations, null, storageTypes, startOffset, isCorrupt, cachedLocations);
locatedblock.setBlockToken(toBlockToken((Map<?, ?>) m.get("blockToken")));
return locatedblock;
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.
the class PBHelperClient method convertLocatedBlockProto.
public static LocatedBlock convertLocatedBlockProto(LocatedBlockProto proto) {
if (proto == null)
return null;
List<DatanodeInfoProto> locs = proto.getLocsList();
DatanodeInfo[] targets = new DatanodeInfo[locs.size()];
for (int i = 0; i < locs.size(); i++) {
targets[i] = convert(locs.get(i));
}
final StorageType[] storageTypes = convertStorageTypes(proto.getStorageTypesList(), locs.size());
final int storageIDsCount = proto.getStorageIDsCount();
final String[] storageIDs;
if (storageIDsCount == 0) {
storageIDs = null;
} else {
Preconditions.checkState(storageIDsCount == locs.size());
storageIDs = proto.getStorageIDsList().toArray(new String[storageIDsCount]);
}
byte[] indices = null;
if (proto.hasBlockIndices()) {
indices = proto.getBlockIndices().toByteArray();
}
// Set values from the isCached list, re-using references from loc
List<DatanodeInfo> cachedLocs = new ArrayList<>(locs.size());
List<Boolean> isCachedList = proto.getIsCachedList();
for (int i = 0; i < isCachedList.size(); i++) {
if (isCachedList.get(i)) {
cachedLocs.add(targets[i]);
}
}
final LocatedBlock lb;
if (indices == null) {
lb = new LocatedBlock(PBHelperClient.convert(proto.getB()), targets, storageIDs, storageTypes, proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
} else {
lb = new LocatedStripedBlock(PBHelperClient.convert(proto.getB()), targets, storageIDs, storageTypes, indices, proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
List<TokenProto> tokenProtos = proto.getBlockTokensList();
Token<BlockTokenIdentifier>[] blockTokens = convertTokens(tokenProtos);
((LocatedStripedBlock) lb).setBlockTokens(blockTokens);
}
lb.setBlockToken(convert(proto.getBlockToken()));
return lb;
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.
the class PBHelperClient method convertLocatedBlock.
public static List<LocatedBlock> convertLocatedBlock(List<LocatedBlockProto> lb) {
if (lb == null)
return null;
final int len = lb.size();
List<LocatedBlock> result = new ArrayList<>(len);
for (LocatedBlockProto aLb : lb) {
result.add(convertLocatedBlockProto(aLb));
}
return result;
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.
the class FSDirAppendOp method appendFile.
/**
* Append to an existing file.
* <p>
*
* The method returns the last block of the file if this is a partial block,
* which can still be used for writing more data. The client uses the
* returned block locations to form the data pipeline for this block.<br>
* The {@link LocatedBlock} will be null if the last block is full.
* The client then allocates a new block with the next call using
* {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#addBlock}.
* <p>
*
* For description of parameters and exceptions thrown see
* {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#append}
*
* @param fsn namespace
* @param srcArg path name
* @param pc permission checker to check fs permission
* @param holder client name
* @param clientMachine client machine info
* @param newBlock if the data is appended to a new block
* @param logRetryCache whether to record RPC ids in editlog for retry cache
* rebuilding
* @return the last block with status
*/
static LastBlockWithStatus appendFile(final FSNamesystem fsn, final String srcArg, final FSPermissionChecker pc, final String holder, final String clientMachine, final boolean newBlock, final boolean logRetryCache) throws IOException {
assert fsn.hasWriteLock();
final LocatedBlock lb;
final FSDirectory fsd = fsn.getFSDirectory();
final INodesInPath iip;
fsd.writeLock();
try {
iip = fsd.resolvePath(pc, srcArg, DirOp.WRITE);
// Verify that the destination does not exist as a directory already
final INode inode = iip.getLastINode();
final String path = iip.getPath();
if (inode != null && inode.isDirectory()) {
throw new FileAlreadyExistsException("Cannot append to directory " + path + "; already exists as a directory.");
}
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.WRITE);
}
if (inode == null) {
throw new FileNotFoundException("Failed to append to non-existent file " + path + " for client " + clientMachine);
}
final INodeFile file = INodeFile.valueOf(inode, path, true);
// not support appending file with striped blocks
if (file.isStriped()) {
throw new UnsupportedOperationException("Cannot append to files with striped block " + path);
}
BlockManager blockManager = fsd.getBlockManager();
final BlockStoragePolicy lpPolicy = blockManager.getStoragePolicy("LAZY_PERSIST");
if (lpPolicy != null && lpPolicy.getId() == file.getStoragePolicyID()) {
throw new UnsupportedOperationException("Cannot append to lazy persist file " + path);
}
// Opening an existing file for append - may need to recover lease.
fsn.recoverLeaseInternal(RecoverLeaseOp.APPEND_FILE, iip, path, holder, clientMachine, false);
final BlockInfo lastBlock = file.getLastBlock();
// Check that the block has at least minimum replication.
if (lastBlock != null) {
if (lastBlock.getBlockUCState() == BlockUCState.COMMITTED) {
throw new RetriableException(new NotReplicatedYetException("append: lastBlock=" + lastBlock + " of src=" + path + " is COMMITTED but not yet COMPLETE."));
} else if (lastBlock.isComplete() && !blockManager.isSufficientlyReplicated(lastBlock)) {
throw new IOException("append: lastBlock=" + lastBlock + " of src=" + path + " is not sufficiently replicated yet.");
}
}
lb = prepareFileForAppend(fsn, iip, holder, clientMachine, newBlock, true, logRetryCache);
} catch (IOException ie) {
NameNode.stateChangeLog.warn("DIR* NameSystem.append: " + ie.getMessage());
throw ie;
} finally {
fsd.writeUnlock();
}
HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, iip);
if (lb != null) {
NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: file {} for {} at {} block {} block" + " size {}", srcArg, holder, clientMachine, lb.getBlock(), lb.getBlock().getNumBytes());
}
return new LastBlockWithStatus(lb, stat);
}
Aggregations