use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class PBHelperClient method convert.
public static HdfsFileStatusProto convert(HdfsFileStatus fs) {
if (fs == null)
return null;
FileType fType = FileType.IS_FILE;
if (fs.isDir()) {
fType = FileType.IS_DIR;
} else if (fs.isSymlink()) {
fType = FileType.IS_SYMLINK;
}
HdfsFileStatusProto.Builder builder = HdfsFileStatusProto.newBuilder().setLength(fs.getLen()).setFileType(fType).setBlockReplication(fs.getReplication()).setBlocksize(fs.getBlockSize()).setModificationTime(fs.getModificationTime()).setAccessTime(fs.getAccessTime()).setPermission(convert(fs.getPermission())).setOwner(fs.getOwner()).setGroup(fs.getGroup()).setFileId(fs.getFileId()).setChildrenNum(fs.getChildrenNum()).setPath(getByteString(fs.getLocalNameInBytes())).setStoragePolicy(fs.getStoragePolicy());
if (fs.isSymlink()) {
builder.setSymlink(getByteString(fs.getSymlinkInBytes()));
}
if (fs.getFileEncryptionInfo() != null) {
builder.setFileEncryptionInfo(convert(fs.getFileEncryptionInfo()));
}
if (fs instanceof HdfsLocatedFileStatus) {
final HdfsLocatedFileStatus lfs = (HdfsLocatedFileStatus) fs;
LocatedBlocks locations = lfs.getBlockLocations();
if (locations != null) {
builder.setLocations(convert(locations));
}
}
if (fs.getErasureCodingPolicy() != null) {
builder.setEcPolicy(convertErasureCodingPolicy(fs.getErasureCodingPolicy()));
}
return builder.build();
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class FSDirStatAndListingOp method getListing.
/**
* Get a partial listing of the indicated directory
*
* We will stop when any of the following conditions is met:
* 1) this.lsLimit files have been added
* 2) needLocation is true AND enough files have been added such
* that at least this.lsLimit block locations are in the response
*
* @param fsd FSDirectory
* @param iip the INodesInPath instance containing all the INodes along the
* path
* @param startAfter the name to start listing after
* @param needLocation if block locations are returned
* @param includeStoragePolicy if storage policy is returned
* @return a partial listing starting after startAfter
*/
private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip, byte[] startAfter, boolean needLocation, boolean includeStoragePolicy) throws IOException {
if (FSDirectory.isExactReservedName(iip.getPathComponents())) {
return getReservedListing(fsd);
}
fsd.readLock();
try {
if (iip.isDotSnapshotDir()) {
return getSnapshotsListing(fsd, iip, startAfter);
}
final int snapshot = iip.getPathSnapshotId();
final INode targetNode = iip.getLastINode();
if (targetNode == null) {
return null;
}
byte parentStoragePolicy = includeStoragePolicy ? targetNode.getStoragePolicyID() : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
if (!targetNode.isDirectory()) {
// target INode
return new DirectoryListing(new HdfsFileStatus[] { createFileStatus(fsd, iip, null, parentStoragePolicy, needLocation) }, 0);
}
final INodeDirectory dirInode = targetNode.asDirectory();
final ReadOnlyList<INode> contents = dirInode.getChildrenList(snapshot);
int startChild = INodeDirectory.nextChild(contents, startAfter);
int totalNumChildren = contents.size();
int numOfListing = Math.min(totalNumChildren - startChild, fsd.getLsLimit());
int locationBudget = fsd.getLsLimit();
int listingCnt = 0;
HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing];
for (int i = 0; i < numOfListing && locationBudget > 0; i++) {
INode child = contents.get(startChild + i);
byte childStoragePolicy = (includeStoragePolicy && !child.isSymlink()) ? getStoragePolicyID(child.getLocalStoragePolicyID(), parentStoragePolicy) : parentStoragePolicy;
listing[i] = createFileStatus(fsd, iip, child, childStoragePolicy, needLocation);
listingCnt++;
if (listing[i] instanceof HdfsLocatedFileStatus) {
// Once we hit lsLimit locations, stop.
// This helps to prevent excessively large response payloads.
// Approximate #locations with locatedBlockCount() * repl_factor
LocatedBlocks blks = ((HdfsLocatedFileStatus) listing[i]).getBlockLocations();
locationBudget -= (blks == null) ? 0 : blks.locatedBlockCount() * listing[i].getReplication();
}
}
// truncate return array if necessary
if (listingCnt < numOfListing) {
listing = Arrays.copyOf(listing, listingCnt);
}
return new DirectoryListing(listing, totalNumChildren - startChild - listingCnt);
} finally {
fsd.readUnlock();
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class DFSTestUtil method verifyFileReplicasOnStorageType.
/**
* Helper function that verified blocks of a file are placed on the
* expected storage type.
*
* @param fs The file system containing the the file.
* @param client The DFS client used to access the file
* @param path name to the file to verify
* @param storageType expected storage type
* @returns true if file exists and its blocks are located on the expected
* storage type.
* false otherwise.
*/
public static boolean verifyFileReplicasOnStorageType(FileSystem fs, DFSClient client, Path path, StorageType storageType) throws IOException {
if (!fs.exists(path)) {
LOG.info("verifyFileReplicasOnStorageType: file " + path + "does not exist");
return false;
}
long fileLength = client.getFileInfo(path.toString()).getLen();
LocatedBlocks locatedBlocks = client.getLocatedBlocks(path.toString(), 0, fileLength);
for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
if (locatedBlock.getStorageTypes()[0] != storageType) {
LOG.info("verifyFileReplicasOnStorageType: for file " + path + ". Expect blk" + locatedBlock + " on Type: " + storageType + ". Actual Type: " + locatedBlock.getStorageTypes()[0]);
return false;
}
}
return true;
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class DFSTestUtil method allBlockReplicasCorrupt.
/*
* Check if the given block in the given file is corrupt.
*/
public static boolean allBlockReplicasCorrupt(MiniDFSCluster cluster, Path file, int blockNo) throws IOException {
try (DFSClient client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), cluster.getConfiguration(0))) {
LocatedBlocks blocks;
blocks = client.getNamenode().getBlockLocations(file.toString(), 0, Long.MAX_VALUE);
return blocks.get(blockNo).isCorrupt();
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestLeaseRecovery2 method testHardLeaseRecovery.
/**
* This test makes the client does not renew its lease and also
* set the hard lease expiration period to be short 1s. Thus triggering
* lease expiration to happen while the client is still alive.
*
* The test makes sure that the lease recovery completes and the client
* fails if it continues to write to the file.
*
* @throws Exception
*/
@Test
public void testHardLeaseRecovery() throws Exception {
//create a file
String filestr = "/hardLeaseRecovery";
AppendTestUtil.LOG.info("filestr=" + filestr);
Path filepath = new Path(filestr);
FSDataOutputStream stm = dfs.create(filepath, true, BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE);
assertTrue(dfs.dfs.exists(filestr));
// write bytes into the file.
int size = AppendTestUtil.nextInt(FILE_SIZE);
AppendTestUtil.LOG.info("size=" + size);
stm.write(buffer, 0, size);
// hflush file
AppendTestUtil.LOG.info("hflush");
stm.hflush();
// kill the lease renewal thread
AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
dfs.dfs.getLeaseRenewer().interruptAndJoin();
// set the hard limit to be 1 second
cluster.setLeasePeriod(LONG_LEASE_PERIOD, SHORT_LEASE_PERIOD);
// wait for lease recovery to complete
LocatedBlocks locatedBlocks;
do {
Thread.sleep(SHORT_LEASE_PERIOD);
locatedBlocks = dfs.dfs.getLocatedBlocks(filestr, 0L, size);
} while (locatedBlocks.isUnderConstruction());
assertEquals(size, locatedBlocks.getFileLength());
// make sure that the writer thread gets killed
try {
stm.write('b');
stm.close();
fail("Writer thread should have been killed");
} catch (IOException e) {
e.printStackTrace();
}
// verify data
AppendTestUtil.LOG.info("File size is good. Now validating sizes from datanodes...");
AppendTestUtil.checkFullFile(dfs, filepath, size, buffer, filestr);
}
Aggregations