use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestSetTimes method testTimes.
/**
* Tests mod & access time in DFS.
*/
@Test
public void testTimes() throws IOException {
Configuration conf = new HdfsConfiguration();
// 2s
final int MAX_IDLE_TIME = 2000;
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
final int nnport = cluster.getNameNodePort();
InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ", numDatanodes, info.length);
FileSystem fileSys = cluster.getFileSystem();
int replicas = 1;
assertTrue(fileSys instanceof DistributedFileSystem);
try {
//
// create file and record atime/mtime
//
System.out.println("Creating testdir1 and testdir1/test1.dat.");
Path dir1 = new Path("testdir1");
Path file1 = new Path(dir1, "test1.dat");
FSDataOutputStream stm = writeFile(fileSys, file1, replicas);
FileStatus stat = fileSys.getFileStatus(file1);
long atimeBeforeClose = stat.getAccessTime();
String adate = dateForm.format(new Date(atimeBeforeClose));
System.out.println("atime on " + file1 + " before close is " + adate + " (" + atimeBeforeClose + ")");
assertTrue(atimeBeforeClose != 0);
stm.close();
stat = fileSys.getFileStatus(file1);
long atime1 = stat.getAccessTime();
long mtime1 = stat.getModificationTime();
adate = dateForm.format(new Date(atime1));
String mdate = dateForm.format(new Date(mtime1));
System.out.println("atime on " + file1 + " is " + adate + " (" + atime1 + ")");
System.out.println("mtime on " + file1 + " is " + mdate + " (" + mtime1 + ")");
assertTrue(atime1 != 0);
//
// record dir times
//
stat = fileSys.getFileStatus(dir1);
long mdir1 = stat.getAccessTime();
assertTrue(mdir1 == 0);
// set the access time to be one day in the past
long atime2 = atime1 - (24L * 3600L * 1000L);
fileSys.setTimes(file1, -1, atime2);
// check new access time on file
stat = fileSys.getFileStatus(file1);
long atime3 = stat.getAccessTime();
String adate3 = dateForm.format(new Date(atime3));
System.out.println("new atime on " + file1 + " is " + adate3 + " (" + atime3 + ")");
assertTrue(atime2 == atime3);
assertTrue(mtime1 == stat.getModificationTime());
// set the modification time to be 1 hour in the past
long mtime2 = mtime1 - (3600L * 1000L);
fileSys.setTimes(file1, mtime2, -1);
// check new modification time on file
stat = fileSys.getFileStatus(file1);
long mtime3 = stat.getModificationTime();
String mdate3 = dateForm.format(new Date(mtime3));
System.out.println("new mtime on " + file1 + " is " + mdate3 + " (" + mtime3 + ")");
assertTrue(atime2 == stat.getAccessTime());
assertTrue(mtime2 == mtime3);
long mtime4 = Time.now() - (3600L * 1000L);
long atime4 = Time.now();
fileSys.setTimes(dir1, mtime4, atime4);
// check new modification time on file
stat = fileSys.getFileStatus(dir1);
assertTrue("Not matching the modification times", mtime4 == stat.getModificationTime());
assertTrue("Not matching the access times", atime4 == stat.getAccessTime());
Path nonExistingDir = new Path(dir1, "/nonExistingDir/");
try {
fileSys.setTimes(nonExistingDir, mtime4, atime4);
fail("Expecting FileNotFoundException");
} catch (FileNotFoundException e) {
assertTrue(e.getMessage().contains("File/Directory " + nonExistingDir.toString() + " does not exist."));
}
// shutdown cluster and restart
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
} catch (InterruptedException e) {
}
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
// verify that access times and modification times persist after a
// cluster restart.
System.out.println("Verifying times after cluster restart");
stat = fileSys.getFileStatus(file1);
assertTrue(atime2 == stat.getAccessTime());
assertTrue(mtime3 == stat.getModificationTime());
cleanupFile(fileSys, file1);
cleanupFile(fileSys, dir1);
} catch (IOException e) {
info = client.datanodeReport(DatanodeReportType.ALL);
printDatanodeReport(info);
throw e;
} finally {
fileSys.close();
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestSetrepIncreasing method setrep.
static void setrep(int fromREP, int toREP, boolean simulatedStorage) throws IOException {
Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "" + fromREP);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(10).build();
FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: " + fs.getUri(), fs instanceof DistributedFileSystem);
try {
Path root = TestDFSShell.mkdir(fs, new Path("/test/setrep" + fromREP + "-" + toREP));
Path f = TestDFSShell.writeFile(fs, new Path(root, "foo"));
// Verify setrep for changing replication
{
String[] args = { "-setrep", "-w", "" + toREP, "" + f };
FsShell shell = new FsShell();
shell.setConf(conf);
try {
assertEquals(0, shell.run(args));
} catch (Exception e) {
assertTrue("-setrep " + e, false);
}
}
//get fs again since the old one may be closed
fs = cluster.getFileSystem();
FileStatus file = fs.getFileStatus(f);
long len = file.getLen();
for (BlockLocation locations : fs.getFileBlockLocations(file, 0, len)) {
assertTrue(locations.getHosts().length == toREP);
}
TestDFSShell.show("done setrep waiting: " + root);
} finally {
try {
fs.close();
} catch (Exception e) {
}
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestHASafeMode method testOpenFileWhenNNAndClientCrashAfterAddBlock.
/** Test NN crash and client crash/stuck immediately after block allocation */
@Test(timeout = 100000)
public void testOpenFileWhenNNAndClientCrashAfterAddBlock() throws Exception {
cluster.getConfiguration(0).set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "1.0f");
String testData = "testData";
// to make sure we write the full block before creating dummy block at NN.
cluster.getConfiguration(0).setInt("io.bytes.per.checksum", testData.length());
cluster.restartNameNode(0);
try {
cluster.waitActive();
cluster.transitionToActive(0);
cluster.transitionToStandby(1);
DistributedFileSystem dfs = cluster.getFileSystem(0);
String pathString = "/tmp1.txt";
Path filePath = new Path(pathString);
FSDataOutputStream create = dfs.create(filePath, FsPermission.getDefault(), true, 1024, (short) 3, testData.length(), null);
create.write(testData.getBytes());
create.hflush();
long fileId = ((DFSOutputStream) create.getWrappedStream()).getFileId();
FileStatus fileStatus = dfs.getFileStatus(filePath);
DFSClient client = DFSClientAdapter.getClient(dfs);
// add one dummy block at NN, but not write to DataNode
ExtendedBlock previousBlock = DFSClientAdapter.getPreviousBlock(client, fileId);
DFSClientAdapter.getNamenode(client).addBlock(pathString, client.getClientName(), new ExtendedBlock(previousBlock), new DatanodeInfo[0], DFSClientAdapter.getFileId((DFSOutputStream) create.getWrappedStream()), null, null);
cluster.restartNameNode(0, true);
cluster.restartDataNode(0);
cluster.transitionToActive(0);
// let the block reports be processed.
Thread.sleep(2000);
FSDataInputStream is = dfs.open(filePath);
is.close();
// initiate recovery
dfs.recoverLease(filePath);
assertTrue("Recovery also should be success", dfs.recoverLease(filePath));
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestOfflineImageViewer method testFileDistributionCalculator.
@Test
public void testFileDistributionCalculator() throws IOException {
ByteArrayOutputStream output = new ByteArrayOutputStream();
PrintStream o = new PrintStream(output);
new FileDistributionCalculator(new Configuration(), 0, 0, false, o).visit(new RandomAccessFile(originalFsimage, "r"));
o.close();
String outputString = output.toString();
Pattern p = Pattern.compile("totalFiles = (\\d+)\n");
Matcher matcher = p.matcher(outputString);
assertTrue(matcher.find() && matcher.groupCount() == 1);
int totalFiles = Integer.parseInt(matcher.group(1));
assertEquals(NUM_DIRS * FILES_PER_DIR + filesECCount + 1, totalFiles);
p = Pattern.compile("totalDirectories = (\\d+)\n");
matcher = p.matcher(outputString);
assertTrue(matcher.find() && matcher.groupCount() == 1);
int totalDirs = Integer.parseInt(matcher.group(1));
// totalDirs includes root directory
assertEquals(dirCount + 1, totalDirs);
FileStatus maxFile = Collections.max(writtenFiles.values(), new Comparator<FileStatus>() {
@Override
public int compare(FileStatus first, FileStatus second) {
return first.getLen() < second.getLen() ? -1 : ((first.getLen() == second.getLen()) ? 0 : 1);
}
});
p = Pattern.compile("maxFileSize = (\\d+)\n");
matcher = p.matcher(output.toString("UTF-8"));
assertTrue(matcher.find() && matcher.groupCount() == 1);
assertEquals(maxFile.getLen(), Long.parseLong(matcher.group(1)));
}
use of org.apache.hadoop.fs.FileStatus in project hadoop by apache.
the class TestOfflineImageViewer method testWebImageViewer.
@Test
public void testWebImageViewer() throws Exception {
WebImageViewer viewer = new WebImageViewer(NetUtils.createSocketAddr("localhost:0"));
try {
viewer.initServer(originalFsimage.getAbsolutePath());
int port = viewer.getPort();
// create a WebHdfsFileSystem instance
URI uri = new URI("webhdfs://localhost:" + String.valueOf(port));
Configuration conf = new Configuration();
WebHdfsFileSystem webhdfs = (WebHdfsFileSystem) FileSystem.get(uri, conf);
// verify the number of directories
FileStatus[] statuses = webhdfs.listStatus(new Path("/"));
assertEquals(dirCount, statuses.length);
// verify the number of files in the directory
statuses = webhdfs.listStatus(new Path("/dir0"));
assertEquals(FILES_PER_DIR, statuses.length);
// compare a file
FileStatus status = webhdfs.listStatus(new Path("/dir0/file0"))[0];
FileStatus expected = writtenFiles.get("/dir0/file0");
compareFile(expected, status);
// LISTSTATUS operation to an empty directory
statuses = webhdfs.listStatus(new Path("/emptydir"));
assertEquals(0, statuses.length);
// LISTSTATUS operation to a invalid path
URL url = new URL("http://localhost:" + port + "/webhdfs/v1/invalid/?op=LISTSTATUS");
verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND, url);
// LISTSTATUS operation to a invalid prefix
url = new URL("http://localhost:" + port + "/foo");
verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND, url);
// Verify the Erasure Coded empty file status
Path emptyECFilePath = new Path("/ec/EmptyECFile.txt");
FileStatus actualEmptyECFileStatus = webhdfs.getFileStatus(new Path(emptyECFilePath.toString()));
FileStatus expectedEmptyECFileStatus = writtenFiles.get(emptyECFilePath.toString());
System.out.println(webhdfs.getFileStatus(new Path(emptyECFilePath.toString())));
compareFile(expectedEmptyECFileStatus, actualEmptyECFileStatus);
// Verify the Erasure Coded small file status
Path smallECFilePath = new Path("/ec/SmallECFile.txt");
FileStatus actualSmallECFileStatus = webhdfs.getFileStatus(new Path(smallECFilePath.toString()));
FileStatus expectedSmallECFileStatus = writtenFiles.get(smallECFilePath.toString());
compareFile(expectedSmallECFileStatus, actualSmallECFileStatus);
// GETFILESTATUS operation
status = webhdfs.getFileStatus(new Path("/dir0/file0"));
compareFile(expected, status);
// GETFILESTATUS operation to a invalid path
url = new URL("http://localhost:" + port + "/webhdfs/v1/invalid/?op=GETFILESTATUS");
verifyHttpResponseCode(HttpURLConnection.HTTP_NOT_FOUND, url);
// invalid operation
url = new URL("http://localhost:" + port + "/webhdfs/v1/?op=INVALID");
verifyHttpResponseCode(HttpURLConnection.HTTP_BAD_REQUEST, url);
// invalid method
url = new URL("http://localhost:" + port + "/webhdfs/v1/?op=LISTSTATUS");
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("POST");
connection.connect();
assertEquals(HttpURLConnection.HTTP_BAD_METHOD, connection.getResponseCode());
} finally {
// shutdown the viewer
viewer.close();
}
}
Aggregations