use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class TestSetTimes method testTimes.
/**
* Tests mod & access time in DFS.
*/
@Test
public void testTimes() throws IOException {
Configuration conf = new HdfsConfiguration();
// 2s
final int MAX_IDLE_TIME = 2000;
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
final int nnport = cluster.getNameNodePort();
InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ", numDatanodes, info.length);
FileSystem fileSys = cluster.getFileSystem();
int replicas = 1;
assertTrue(fileSys instanceof DistributedFileSystem);
try {
//
// create file and record atime/mtime
//
System.out.println("Creating testdir1 and testdir1/test1.dat.");
Path dir1 = new Path("testdir1");
Path file1 = new Path(dir1, "test1.dat");
FSDataOutputStream stm = writeFile(fileSys, file1, replicas);
FileStatus stat = fileSys.getFileStatus(file1);
long atimeBeforeClose = stat.getAccessTime();
String adate = dateForm.format(new Date(atimeBeforeClose));
System.out.println("atime on " + file1 + " before close is " + adate + " (" + atimeBeforeClose + ")");
assertTrue(atimeBeforeClose != 0);
stm.close();
stat = fileSys.getFileStatus(file1);
long atime1 = stat.getAccessTime();
long mtime1 = stat.getModificationTime();
adate = dateForm.format(new Date(atime1));
String mdate = dateForm.format(new Date(mtime1));
System.out.println("atime on " + file1 + " is " + adate + " (" + atime1 + ")");
System.out.println("mtime on " + file1 + " is " + mdate + " (" + mtime1 + ")");
assertTrue(atime1 != 0);
//
// record dir times
//
stat = fileSys.getFileStatus(dir1);
long mdir1 = stat.getAccessTime();
assertTrue(mdir1 == 0);
// set the access time to be one day in the past
long atime2 = atime1 - (24L * 3600L * 1000L);
fileSys.setTimes(file1, -1, atime2);
// check new access time on file
stat = fileSys.getFileStatus(file1);
long atime3 = stat.getAccessTime();
String adate3 = dateForm.format(new Date(atime3));
System.out.println("new atime on " + file1 + " is " + adate3 + " (" + atime3 + ")");
assertTrue(atime2 == atime3);
assertTrue(mtime1 == stat.getModificationTime());
// set the modification time to be 1 hour in the past
long mtime2 = mtime1 - (3600L * 1000L);
fileSys.setTimes(file1, mtime2, -1);
// check new modification time on file
stat = fileSys.getFileStatus(file1);
long mtime3 = stat.getModificationTime();
String mdate3 = dateForm.format(new Date(mtime3));
System.out.println("new mtime on " + file1 + " is " + mdate3 + " (" + mtime3 + ")");
assertTrue(atime2 == stat.getAccessTime());
assertTrue(mtime2 == mtime3);
long mtime4 = Time.now() - (3600L * 1000L);
long atime4 = Time.now();
fileSys.setTimes(dir1, mtime4, atime4);
// check new modification time on file
stat = fileSys.getFileStatus(dir1);
assertTrue("Not matching the modification times", mtime4 == stat.getModificationTime());
assertTrue("Not matching the access times", atime4 == stat.getAccessTime());
Path nonExistingDir = new Path(dir1, "/nonExistingDir/");
try {
fileSys.setTimes(nonExistingDir, mtime4, atime4);
fail("Expecting FileNotFoundException");
} catch (FileNotFoundException e) {
assertTrue(e.getMessage().contains("File/Directory " + nonExistingDir.toString() + " does not exist."));
}
// shutdown cluster and restart
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
} catch (InterruptedException e) {
}
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
// verify that access times and modification times persist after a
// cluster restart.
System.out.println("Verifying times after cluster restart");
stat = fileSys.getFileStatus(file1);
assertTrue(atime2 == stat.getAccessTime());
assertTrue(mtime3 == stat.getModificationTime());
cleanupFile(fileSys, file1);
cleanupFile(fileSys, dir1);
} catch (IOException e) {
info = client.datanodeReport(DatanodeReportType.ALL);
printDatanodeReport(info);
throw e;
} finally {
fileSys.close();
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class TestJsonUtil method testToDatanodeInfoWithName.
@Test
public void testToDatanodeInfoWithName() throws Exception {
Map<String, Object> response = new HashMap<String, Object>();
// Older servers (1.x, 0.23, etc.) sends 'name' instead of ipAddr
// and xferPort.
String name = "127.0.0.1:1004";
response.put("name", name);
response.put("hostName", "localhost");
response.put("storageID", "fake-id");
response.put("infoPort", 1338l);
response.put("ipcPort", 1339l);
response.put("capacity", 1024l);
response.put("dfsUsed", 512l);
response.put("remaining", 512l);
response.put("blockPoolUsed", 512l);
response.put("lastUpdate", 0l);
response.put("xceiverCount", 4096l);
response.put("networkLocation", "foo.bar.baz");
response.put("adminState", "NORMAL");
response.put("cacheCapacity", 123l);
response.put("cacheUsed", 321l);
DatanodeInfo di = JsonUtilClient.toDatanodeInfo(response);
Assert.assertEquals(name, di.getXferAddr());
// The encoded result should contain name, ipAddr and xferPort.
Map<String, Object> r = JsonUtil.toJsonMap(di);
Assert.assertEquals(name, r.get("name"));
Assert.assertEquals("127.0.0.1", r.get("ipAddr"));
// In this test, it is Integer instead of Long since json was not actually
// involved in constructing the map.
Assert.assertEquals(1004, (int) (Integer) r.get("xferPort"));
// Invalid names
String[] badNames = { "127.0.0.1", "127.0.0.1:", ":", "127.0.0.1:sweet", ":123" };
for (String badName : badNames) {
response.put("name", badName);
checkDecodeFailure(response);
}
// Missing both name and ipAddr
response.remove("name");
checkDecodeFailure(response);
// Only missing xferPort
response.put("ipAddr", "127.0.0.1");
checkDecodeFailure(response);
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class FSNamesystem method reportBadBlocks.
/**
* Client is reporting some bad block locations.
*/
void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
for (int i = 0; i < blocks.length; i++) {
ExtendedBlock blk = blocks[i].getBlock();
DatanodeInfo[] nodes = blocks[i].getLocations();
String[] storageIDs = blocks[i].getStorageIDs();
for (int j = 0; j < nodes.length; j++) {
NameNode.stateChangeLog.info("*DIR* reportBadBlocks for block: {} on" + " datanode: {}", blk, nodes[j].getXferAddr());
blockManager.findAndMarkBlockAsCorrupt(blk, nodes[j], storageIDs == null ? null : storageIDs[j], "client machine reported it");
}
}
} finally {
writeUnlock("reportBadBlocks");
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class FSNamesystem method datanodeReport.
DatanodeInfo[] datanodeReport(final DatanodeReportType type) throws AccessControlException, StandbyException {
checkSuperuserPrivilege();
checkOperation(OperationCategory.UNCHECKED);
readLock();
try {
checkOperation(OperationCategory.UNCHECKED);
final DatanodeManager dm = getBlockManager().getDatanodeManager();
final List<DatanodeDescriptor> results = dm.getDatanodeListForReport(type);
DatanodeInfo[] arr = new DatanodeInfo[results.size()];
for (int i = 0; i < arr.length; i++) {
arr[i] = new DatanodeInfoBuilder().setFrom(results.get(i)).build();
}
return arr;
} finally {
readUnlock("datanodeReport");
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class TestFileAppend4 method testAppendInsufficientLocations.
/**
* Test that an append with no locations fails with an exception
* showing insufficient locations.
*/
@Test(timeout = 60000)
public void testAppendInsufficientLocations() throws Exception {
Configuration conf = new Configuration();
// lower heartbeat interval for fast recognition of DN
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
DistributedFileSystem fileSystem = null;
try {
// create a file with replication 3
fileSystem = cluster.getFileSystem();
Path f = new Path("/testAppend");
FSDataOutputStream create = fileSystem.create(f, (short) 2);
create.write("/testAppend".getBytes());
create.close();
// Check for replications
DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
// Shut down all DNs that have the last block location for the file
LocatedBlocks lbs = fileSystem.dfs.getNamenode().getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
List<DataNode> dnsOfCluster = cluster.getDataNodes();
DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().getLocations();
for (DataNode dn : dnsOfCluster) {
for (DatanodeInfo loc : dnsWithLocations) {
if (dn.getDatanodeId().equals(loc)) {
dn.shutdown();
DFSTestUtil.waitForDatanodeDeath(dn);
}
}
}
// Wait till 0 replication is recognized
DFSTestUtil.waitReplication(fileSystem, f, (short) 0);
// have the block.
try {
fileSystem.append(f);
fail("Append should fail because insufficient locations");
} catch (IOException e) {
LOG.info("Expected exception: ", e);
}
FSDirectory dir = cluster.getNamesystem().getFSDirectory();
final INodeFile inode = INodeFile.valueOf(dir.getINode("/testAppend"), "/testAppend");
assertTrue("File should remain closed", !inode.isUnderConstruction());
} finally {
if (null != fileSystem) {
fileSystem.close();
}
cluster.shutdown();
}
}
Aggregations