use of org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto in project hadoop by apache.
the class PBHelperClient method convertLocatedBlockProto.
public static LocatedBlock convertLocatedBlockProto(LocatedBlockProto proto) {
if (proto == null)
return null;
List<DatanodeInfoProto> locs = proto.getLocsList();
DatanodeInfo[] targets = new DatanodeInfo[locs.size()];
for (int i = 0; i < locs.size(); i++) {
targets[i] = convert(locs.get(i));
}
final StorageType[] storageTypes = convertStorageTypes(proto.getStorageTypesList(), locs.size());
final int storageIDsCount = proto.getStorageIDsCount();
final String[] storageIDs;
if (storageIDsCount == 0) {
storageIDs = null;
} else {
Preconditions.checkState(storageIDsCount == locs.size());
storageIDs = proto.getStorageIDsList().toArray(new String[storageIDsCount]);
}
byte[] indices = null;
if (proto.hasBlockIndices()) {
indices = proto.getBlockIndices().toByteArray();
}
// Set values from the isCached list, re-using references from loc
List<DatanodeInfo> cachedLocs = new ArrayList<>(locs.size());
List<Boolean> isCachedList = proto.getIsCachedList();
for (int i = 0; i < isCachedList.size(); i++) {
if (isCachedList.get(i)) {
cachedLocs.add(targets[i]);
}
}
final LocatedBlock lb;
if (indices == null) {
lb = new LocatedBlock(PBHelperClient.convert(proto.getB()), targets, storageIDs, storageTypes, proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
} else {
lb = new LocatedStripedBlock(PBHelperClient.convert(proto.getB()), targets, storageIDs, storageTypes, indices, proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
List<TokenProto> tokenProtos = proto.getBlockTokensList();
Token<BlockTokenIdentifier>[] blockTokens = convertTokens(tokenProtos);
((LocatedStripedBlock) lb).setBlockTokens(blockTokens);
}
lb.setBlockToken(convert(proto.getBlockToken()));
return lb;
}
use of org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto in project hadoop by apache.
the class ClientNamenodeProtocolServerSideTranslatorPB method addBlock.
@Override
public AddBlockResponseProto addBlock(RpcController controller, AddBlockRequestProto req) throws ServiceException {
try {
List<DatanodeInfoProto> excl = req.getExcludeNodesList();
List<String> favor = req.getFavoredNodesList();
EnumSet<AddBlockFlag> flags = PBHelperClient.convertAddBlockFlags(req.getFlagsList());
LocatedBlock result = server.addBlock(req.getSrc(), req.getClientName(), req.hasPrevious() ? PBHelperClient.convert(req.getPrevious()) : null, (excl == null || excl.size() == 0) ? null : PBHelperClient.convert(excl.toArray(new DatanodeInfoProto[excl.size()])), req.getFileId(), (favor == null || favor.size() == 0) ? null : favor.toArray(new String[favor.size()]), flags);
return AddBlockResponseProto.newBuilder().setBlock(PBHelperClient.convertLocatedBlock(result)).build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
use of org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto in project hadoop by apache.
the class ClientNamenodeProtocolServerSideTranslatorPB method getAdditionalDatanode.
@Override
public GetAdditionalDatanodeResponseProto getAdditionalDatanode(RpcController controller, GetAdditionalDatanodeRequestProto req) throws ServiceException {
try {
List<DatanodeInfoProto> existingList = req.getExistingsList();
List<String> existingStorageIDsList = req.getExistingStorageUuidsList();
List<DatanodeInfoProto> excludesList = req.getExcludesList();
LocatedBlock result = server.getAdditionalDatanode(req.getSrc(), req.getFileId(), PBHelperClient.convert(req.getBlk()), PBHelperClient.convert(existingList.toArray(new DatanodeInfoProto[existingList.size()])), existingStorageIDsList.toArray(new String[existingStorageIDsList.size()]), PBHelperClient.convert(excludesList.toArray(new DatanodeInfoProto[excludesList.size()])), req.getNumAdditionalNodes(), req.getClientName());
return GetAdditionalDatanodeResponseProto.newBuilder().setBlock(PBHelperClient.convertLocatedBlock(result)).build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
Aggregations