use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hbase by apache.
the class TestBlockReorder method testBlockLocationReorder.
/**
* Test that we're can add a hook, and that this hook works when we try to read the file in HDFS.
*/
@Test
public void testBlockLocationReorder() throws Exception {
Path p = new Path("hello");
Assert.assertTrue((short) cluster.getDataNodes().size() > 1);
final int repCount = 2;
// Let's write the file
FSDataOutputStream fop = dfs.create(p, (short) repCount);
final double toWrite = 875.5613;
fop.writeDouble(toWrite);
fop.close();
// Let's check we can read it when everybody's there
long start = System.currentTimeMillis();
FSDataInputStream fin = dfs.open(p);
Assert.assertTrue(toWrite == fin.readDouble());
long end = System.currentTimeMillis();
LOG.info("readtime= " + (end - start));
fin.close();
Assert.assertTrue((end - start) < 30 * 1000);
// Let's kill the first location. But actually the fist location returned will change
// The first thing to do is to get the location, then the port
FileStatus f = dfs.getFileStatus(p);
BlockLocation[] lbs;
do {
lbs = dfs.getFileBlockLocations(f, 0, 1);
} while (lbs.length != 1 && lbs[0].getLength() != repCount);
final String name = lbs[0].getNames()[0];
Assert.assertTrue(name.indexOf(':') > 0);
String portS = name.substring(name.indexOf(':') + 1);
final int port = Integer.parseInt(portS);
LOG.info("port= " + port);
int ipcPort = -1;
// Let's find the DN to kill. cluster.getDataNodes(int) is not on the same port, so we need
// to iterate ourselves.
boolean ok = false;
final String lookup = lbs[0].getHosts()[0];
StringBuilder sb = new StringBuilder();
for (DataNode dn : cluster.getDataNodes()) {
final String dnName = getHostName(dn);
sb.append(dnName).append(' ');
if (lookup.equals(dnName)) {
ok = true;
LOG.info("killing datanode " + name + " / " + lookup);
ipcPort = dn.ipcServer.getListenerAddress().getPort();
dn.shutdown();
LOG.info("killed datanode " + name + " / " + lookup);
break;
}
}
Assert.assertTrue("didn't find the server to kill, was looking for " + lookup + " found " + sb, ok);
LOG.info("ipc port= " + ipcPort);
// Add the hook, with an implementation checking that we don't use the port we've just killed.
Assert.assertTrue(HFileSystem.addLocationsOrderInterceptor(conf, new HFileSystem.ReorderBlocks() {
@Override
public void reorderBlocks(Configuration c, LocatedBlocks lbs, String src) {
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
if (lb.getLocations().length > 1) {
DatanodeInfo[] infos = lb.getLocations();
if (infos[0].getHostName().equals(lookup)) {
LOG.info("HFileSystem bad host, inverting");
DatanodeInfo tmp = infos[0];
infos[0] = infos[1];
infos[1] = tmp;
}
}
}
}
}));
final int retries = 10;
ServerSocket ss = null;
ServerSocket ssI;
try {
// We're taking the port to have a timeout issue later.
ss = new ServerSocket(port);
ssI = new ServerSocket(ipcPort);
} catch (BindException be) {
LOG.warn("Got bind exception trying to set up socket on " + port + " or " + ipcPort + ", this means that the datanode has not closed the socket or" + " someone else took it. It may happen, skipping this test for this time.", be);
if (ss != null) {
ss.close();
}
return;
}
// so we try retries times; with the reorder it will never last more than a few milli seconds
for (int i = 0; i < retries; i++) {
start = System.currentTimeMillis();
fin = dfs.open(p);
Assert.assertTrue(toWrite == fin.readDouble());
fin.close();
end = System.currentTimeMillis();
LOG.info("HFileSystem readtime= " + (end - start));
Assert.assertFalse("We took too much time to read", (end - start) > 60000);
}
ss.close();
ssI.close();
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class MiniDFSCluster method restartDataNode.
/**
* Restart a datanode, on the same port if requested
* @param dnprop the datanode to restart
* @param keepPort whether to use the same port
* @return true if restarting is successful
* @throws IOException
*/
public synchronized boolean restartDataNode(DataNodeProperties dnprop, boolean keepPort) throws IOException {
Configuration conf = dnprop.conf;
String[] args = dnprop.dnArgs;
SecureResources secureResources = dnprop.secureResources;
// save cloned config
Configuration newconf = new HdfsConfiguration(conf);
if (keepPort) {
InetSocketAddress addr = dnprop.datanode.getXferAddress();
conf.set(DFS_DATANODE_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" + addr.getPort());
conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort);
}
final DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
final DataNodeProperties dnp = new DataNodeProperties(newDn, newconf, args, secureResources, newDn.getIpcPort());
dataNodes.add(dnp);
numDataNodes++;
setDataNodeStorageCapacities(dataNodes.lastIndexOf(dnp), newDn, storageCap.toArray(new long[][] {}));
return true;
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class MiniDFSCluster method injectBlocks.
/**
* This method is valid only if the data nodes have simulated data
* @param dataNodeIndex - data node i which to inject - the index is same as for getDataNodes()
* @param blocksToInject - the blocks
* @param bpid - (optional) the block pool id to use for injecting blocks.
* If not supplied then it is queried from the in-process NameNode.
* @throws IOException
* if not simulatedFSDataset
* if any of blocks already exist in the data node
*
*/
public void injectBlocks(int dataNodeIndex, Iterable<Block> blocksToInject, String bpid) throws IOException {
if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
throw new IndexOutOfBoundsException();
}
final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
if (!(dataSet instanceof SimulatedFSDataset)) {
throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
}
if (bpid == null) {
bpid = getNamesystem().getBlockPoolId();
}
SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
sdataset.injectBlocks(bpid, blocksToInject);
dataNodes.get(dataNodeIndex).datanode.scheduleAllBlockReport(0);
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class MiniDFSCluster method corruptBlockOnDataNodesHelper.
private int corruptBlockOnDataNodesHelper(ExtendedBlock block, boolean deleteBlockFile) throws IOException {
int blocksCorrupted = 0;
for (DataNode dn : getDataNodes()) {
try {
MaterializedReplica replica = getFsDatasetTestUtils(dn).getMaterializedReplica(block);
if (deleteBlockFile) {
replica.deleteData();
} else {
replica.corruptData();
}
blocksCorrupted++;
} catch (ReplicaNotFoundException e) {
// Ignore.
}
}
return blocksCorrupted;
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class MiniDFSCluster method stopDataNodeForUpgrade.
/*
* Shutdown a particular datanode
* @param i node index
* @return null if the node index is out of range, else the properties of the
* removed node
*/
public synchronized DataNodeProperties stopDataNodeForUpgrade(int i) throws IOException {
if (i < 0 || i >= dataNodes.size()) {
return null;
}
DataNodeProperties dnprop = dataNodes.remove(i);
DataNode dn = dnprop.datanode;
LOG.info("MiniDFSCluster Stopping DataNode " + dn.getDisplayName() + " from a total of " + (dataNodes.size() + 1) + " datanodes.");
dn.shutdownDatanode(true);
numDataNodes--;
return dnprop;
}
Aggregations