use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class AbstractContractGetFileStatusTest method verifyFileStats.
/**
* Scan through a filestatus iterator, get the status of every element and
* verify core attributes. This should identify a situation where the
* attributes of a file/dir retrieved in a listing operation do not
* match the values individually retrieved. That is: the metadata returned
* in a directory listing is different from the explicitly retrieved data.
*
* Timestamps are not compared.
* @param results iterator to scan
* @return the number of entries in the result set
* @throws IOException any IO problem
*/
private int verifyFileStats(RemoteIterator<LocatedFileStatus> results) throws IOException {
describe("verifying file statuses");
int count = 0;
while (results.hasNext()) {
count++;
LocatedFileStatus next = results.next();
FileStatus fileStatus = getFileSystem().getFileStatus(next.getPath());
assertEquals("isDirectory", fileStatus.isDirectory(), next.isDirectory());
assertEquals("isFile", fileStatus.isFile(), next.isFile());
assertEquals("getLen", fileStatus.getLen(), next.getLen());
assertEquals("getOwner", fileStatus.getOwner(), next.getOwner());
}
return count;
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class RollingFileSystemSinkTestBase method readLogFile.
/**
* Read the log files at the target path and return the contents as a single
* string. This method will assert that the correct number of files is found.
*
* @param path the target path
* @param then when the test method began. Used to find the log directory in
* the case that the test run crosses the top of the hour.
* @param count the number of log files to expect
* @return
* @throws IOException
* @throws URISyntaxException
*/
protected String readLogFile(String path, String then, int count) throws IOException, URISyntaxException {
final String now = DATE_FORMAT.format(new Date()) + "00";
final String logFile = getLogFilename();
FileSystem fs = FileSystem.get(new URI(path), new Configuration());
StringBuilder metrics = new StringBuilder();
boolean found = false;
for (FileStatus status : fs.listStatus(new Path(path))) {
Path logDir = status.getPath();
// the test started and the current time. Anything else can be ignored.
if (now.equals(logDir.getName()) || then.equals(logDir.getName())) {
readLogData(fs, findMostRecentLogFile(fs, new Path(logDir, logFile)), metrics);
assertFileCount(fs, logDir, count);
found = true;
}
}
assertTrue("No valid log directories found", found);
return metrics.toString();
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class StripedFileTestUtil method verifyLength.
static void verifyLength(FileSystem fs, Path srcPath, int fileLength) throws IOException {
FileStatus status = fs.getFileStatus(srcPath);
assertEquals("File length should be the same", fileLength, status.getLen());
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestSafeMode method testSafeModeWhenZeroBlockLocations.
@Test
public void testSafeModeWhenZeroBlockLocations() throws IOException {
try {
Path file1 = new Path("/tmp/testManualSafeMode/file1");
Path file2 = new Path("/tmp/testManualSafeMode/file2");
System.out.println("Created file1 and file2.");
// create two files with one block each.
DFSTestUtil.createFile(fs, file1, 1000, (short) 1, 0);
DFSTestUtil.createFile(fs, file2, 2000, (short) 1, 0);
checkGetBlockLocationsWorks(fs, file1);
NameNode namenode = cluster.getNameNode();
// manually set safemode.
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
assertTrue("should still be in SafeMode", namenode.isInSafeMode());
// getBlock locations should still work since block locations exists
checkGetBlockLocationsWorks(fs, file1);
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
assertFalse("should not be in SafeMode", namenode.isInSafeMode());
// Now 2nd part of the tests where there aren't block locations
cluster.shutdownDataNodes();
cluster.shutdownNameNode(0);
// now bring up just the NameNode.
cluster.restartNameNode();
cluster.waitActive();
System.out.println("Restarted cluster with just the NameNode");
namenode = cluster.getNameNode();
assertTrue("No datanode is started. Should be in SafeMode", namenode.isInSafeMode());
FileStatus stat = fs.getFileStatus(file1);
try {
fs.getFileBlockLocations(stat, 0, 1000);
assertTrue("Should have got safemode exception", false);
} catch (SafeModeException e) {
// as expected
} catch (RemoteException re) {
if (!re.getClassName().equals(SafeModeException.class.getName()))
assertTrue("Should have got safemode exception", false);
}
dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
assertFalse("Should not be in safemode", namenode.isInSafeMode());
checkGetBlockLocationsWorks(fs, file1);
} finally {
if (fs != null)
fs.close();
if (cluster != null)
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestReplaceDatanodeOnFailure method testBestEffort.
@Test
public void testBestEffort() throws Exception {
final Configuration conf = new HdfsConfiguration();
//always replace a datanode but do not throw exception
ReplaceDatanodeOnFailure.write(Policy.ALWAYS, true, conf);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
try {
final DistributedFileSystem fs = cluster.getFileSystem();
final Path f = new Path(DIR, "testIgnoreReplaceFailure");
final byte[] bytes = new byte[1000];
{
LOG.info("write " + bytes.length + " bytes to " + f);
final FSDataOutputStream out = fs.create(f, REPLICATION);
out.write(bytes);
out.close();
final FileStatus status = fs.getFileStatus(f);
Assert.assertEquals(REPLICATION, status.getReplication());
Assert.assertEquals(bytes.length, status.getLen());
}
{
LOG.info("append another " + bytes.length + " bytes to " + f);
final FSDataOutputStream out = fs.append(f);
out.write(bytes);
out.close();
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations