use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class TestReplaceDatanodeOnFailure method testDefaultPolicy.
/** Test DEFAULT ReplaceDatanodeOnFailure policy. */
@Test
public void testDefaultPolicy() throws Exception {
final Configuration conf = new HdfsConfiguration();
final ReplaceDatanodeOnFailure p = ReplaceDatanodeOnFailure.get(conf);
final DatanodeInfo[] infos = new DatanodeInfo[5];
final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
datanodes[0] = new DatanodeInfo[0];
for (int i = 0; i < infos.length; ) {
infos[i] = DFSTestUtil.getLocalDatanodeInfo(9867 + i);
i++;
datanodes[i] = new DatanodeInfo[i];
System.arraycopy(infos, 0, datanodes[i], 0, datanodes[i].length);
}
final boolean[] isAppend = { true, true, false, false };
final boolean[] isHflushed = { true, false, true, false };
for (short replication = 1; replication <= infos.length; replication++) {
for (int nExistings = 0; nExistings < datanodes.length; nExistings++) {
final DatanodeInfo[] existings = datanodes[nExistings];
Assert.assertEquals(nExistings, existings.length);
for (int i = 0; i < isAppend.length; i++) {
for (int j = 0; j < isHflushed.length; j++) {
final int half = replication / 2;
final boolean enoughReplica = replication <= nExistings;
final boolean noReplica = nExistings == 0;
final boolean replicationL3 = replication < 3;
final boolean existingsLEhalf = nExistings <= half;
final boolean isAH = isAppend[i] || isHflushed[j];
final boolean expected;
if (enoughReplica || noReplica || replicationL3) {
expected = false;
} else {
expected = isAH || existingsLEhalf;
}
final boolean computed = p.satisfy(replication, existings, isAppend[i], isHflushed[j]);
try {
Assert.assertEquals(expected, computed);
} catch (AssertionError e) {
final String s = "replication=" + replication + "\nnExistings =" + nExistings + "\nisAppend =" + isAppend[i] + "\nisHflushed =" + isHflushed[j];
throw new RuntimeException(s, e);
}
}
}
}
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class TestReplication method checkFile.
/* check if there are at least two nodes are on the same rack */
private void checkFile(FileSystem fileSys, Path name, int repl) throws IOException {
Configuration conf = fileSys.getConf();
ClientProtocol namenode = NameNodeProxies.createProxy(conf, fileSys.getUri(), ClientProtocol.class).getProxy();
waitForBlockReplication(name.toString(), namenode, Math.min(numDatanodes, repl), -1);
LocatedBlocks locations = namenode.getBlockLocations(name.toString(), 0, Long.MAX_VALUE);
FileStatus stat = fileSys.getFileStatus(name);
BlockLocation[] blockLocations = fileSys.getFileBlockLocations(stat, 0L, Long.MAX_VALUE);
// verify that rack locations match
assertTrue(blockLocations.length == locations.locatedBlockCount());
for (int i = 0; i < blockLocations.length; i++) {
LocatedBlock blk = locations.get(i);
DatanodeInfo[] datanodes = blk.getLocations();
String[] topologyPaths = blockLocations[i].getTopologyPaths();
assertTrue(topologyPaths.length == datanodes.length);
for (int j = 0; j < topologyPaths.length; j++) {
boolean found = false;
for (int k = 0; k < racks.length; k++) {
if (topologyPaths[j].startsWith(racks[k])) {
found = true;
break;
}
}
assertTrue(found);
}
}
boolean isOnSameRack = true, isNotOnSameRack = true;
for (LocatedBlock blk : locations.getLocatedBlocks()) {
DatanodeInfo[] datanodes = blk.getLocations();
if (datanodes.length <= 1)
break;
if (datanodes.length == 2) {
isNotOnSameRack = !(datanodes[0].getNetworkLocation().equals(datanodes[1].getNetworkLocation()));
break;
}
isOnSameRack = false;
isNotOnSameRack = false;
for (int i = 0; i < datanodes.length - 1; i++) {
LOG.info("datanode " + i + ": " + datanodes[i]);
boolean onRack = false;
for (int j = i + 1; j < datanodes.length; j++) {
if (datanodes[i].getNetworkLocation().equals(datanodes[j].getNetworkLocation())) {
onRack = true;
}
}
if (onRack) {
isOnSameRack = true;
}
if (!onRack) {
isNotOnSameRack = true;
}
if (isOnSameRack && isNotOnSameRack)
break;
}
if (!isOnSameRack || !isNotOnSameRack)
break;
}
assertTrue(isOnSameRack);
assertTrue(isNotOnSameRack);
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class BlockReaderTestUtil method getBlockReader.
/**
* Get a BlockReader for the given block.
*/
public static BlockReader getBlockReader(final DistributedFileSystem fs, LocatedBlock testBlock, int offset, long lenToRead) throws IOException {
InetSocketAddress targetAddr = null;
ExtendedBlock block = testBlock.getBlock();
DatanodeInfo[] nodes = testBlock.getLocations();
targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
return new BlockReaderFactory(fs.getClient().getConf()).setInetSocketAddress(targetAddr).setBlock(block).setFileName(targetAddr.toString() + ":" + block.getBlockId()).setBlockToken(testBlock.getBlockToken()).setStartOffset(offset).setLength(lenToRead).setVerifyChecksum(true).setClientName("BlockReaderTestUtil").setDatanodeInfo(nodes[0]).setClientCacheContext(ClientContext.getFromConf(fs.getConf())).setCachingStrategy(CachingStrategy.newDefaultStrategy()).setConfiguration(fs.getConf()).setAllowShortCircuitLocalReads(true).setTracer(FsTracer.get(fs.getConf())).setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException {
Peer peer = null;
Socket sock = NetUtils.getDefaultSocketFactory(fs.getConf()).createSocket();
try {
sock.connect(addr, HdfsConstants.READ_TIMEOUT);
sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
peer = DFSUtilClient.peerFromSocket(sock);
} finally {
if (peer == null) {
IOUtils.closeQuietly(sock);
}
}
return peer;
}
}).build();
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class TestBlockReaderFactory method testShortCircuitReadFromServerWithoutShm.
/**
* Test that a client which supports short-circuit reads using
* shared memory can fall back to not using shared memory when
* the server doesn't support it.
*/
@Test
public void testShortCircuitReadFromServerWithoutShm() throws Exception {
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration clientConf = createShortCircuitConf("testShortCircuitReadFromServerWithoutShm", sockDir);
Configuration serverConf = new Configuration(clientConf);
serverConf.setInt(DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, 0);
DFSInputStream.tcpReadsDisabledForTesting = true;
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
cluster.waitActive();
clientConf.set(DFS_CLIENT_CONTEXT, "testShortCircuitReadFromServerWithoutShm_clientContext");
final DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(cluster.getURI(0), clientConf);
final String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 4000;
final int SEED = 0xFADEC;
DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
byte[] contents = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
Assert.assertTrue(Arrays.equals(contents, expected));
final ShortCircuitCache cache = fs.getClient().getClientContext().getShortCircuitCache();
final DatanodeInfo datanode = new DatanodeInfoBuilder().setNodeID(cluster.getDataNodes().get(0).getDatanodeId()).build();
cache.getDfsClientShmManager().visit(new Visitor() {
@Override
public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info) throws IOException {
Assert.assertEquals(1, info.size());
PerDatanodeVisitorInfo vinfo = info.get(datanode);
Assert.assertTrue(vinfo.disabled);
Assert.assertEquals(0, vinfo.full.size());
Assert.assertEquals(0, vinfo.notFull.size());
}
});
cluster.shutdown();
sockDir.close();
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class TestSetTimes method testTimesAtClose.
/**
* Tests mod time change at close in DFS.
*/
@Test
public void testTimesAtClose() throws IOException {
Configuration conf = new HdfsConfiguration();
// 2s
final int MAX_IDLE_TIME = 2000;
int replicas = 1;
// parameter initialization
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ", numDatanodes, info.length);
FileSystem fileSys = cluster.getFileSystem();
assertTrue(fileSys instanceof DistributedFileSystem);
try {
// create a new file and write to it
Path file1 = new Path("/simple.dat");
FSDataOutputStream stm = writeFile(fileSys, file1, replicas);
System.out.println("Created and wrote file simple.dat");
FileStatus statBeforeClose = fileSys.getFileStatus(file1);
long mtimeBeforeClose = statBeforeClose.getModificationTime();
String mdateBeforeClose = dateForm.format(new Date(mtimeBeforeClose));
System.out.println("mtime on " + file1 + " before close is " + mdateBeforeClose + " (" + mtimeBeforeClose + ")");
assertTrue(mtimeBeforeClose != 0);
//close file after writing
stm.close();
System.out.println("Closed file.");
FileStatus statAfterClose = fileSys.getFileStatus(file1);
long mtimeAfterClose = statAfterClose.getModificationTime();
String mdateAfterClose = dateForm.format(new Date(mtimeAfterClose));
System.out.println("mtime on " + file1 + " after close is " + mdateAfterClose + " (" + mtimeAfterClose + ")");
assertTrue(mtimeAfterClose != 0);
assertTrue(mtimeBeforeClose != mtimeAfterClose);
cleanupFile(fileSys, file1);
} catch (IOException e) {
info = client.datanodeReport(DatanodeReportType.ALL);
printDatanodeReport(info);
throw e;
} finally {
fileSys.close();
cluster.shutdown();
}
}
Aggregations