use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestFileAppend method testConcurrentAppendRead.
@Test(timeout = 10000)
public void testConcurrentAppendRead() throws IOException, TimeoutException, InterruptedException {
// Create a finalized replica and append to it
// Read block data and checksum. Verify checksum.
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
conf.setInt("dfs.min.replication", 1);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
// create a file with 1 byte of data.
long initialFileLength = 1;
DistributedFileSystem fs = cluster.getFileSystem();
Path fileName = new Path("/appendCorruptBlock");
DFSTestUtil.createFile(fs, fileName, initialFileLength, (short) 1, 0);
DFSTestUtil.waitReplication(fs, fileName, (short) 1);
Assert.assertTrue("File not created", fs.exists(fileName));
// Call FsDatasetImpl#append to append the block file,
// which converts it to a rbw replica.
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
long newGS = block.getGenerationStamp() + 1;
ReplicaHandler replicaHandler = dataSet.append(block, newGS, initialFileLength);
// write data to block file
ReplicaBeingWritten rbw = (ReplicaBeingWritten) replicaHandler.getReplica();
ReplicaOutputStreams outputStreams = rbw.createStreams(false, DEFAULT_CHECKSUM);
OutputStream dataOutput = outputStreams.getDataOut();
byte[] appendBytes = new byte[1];
dataOutput.write(appendBytes, 0, 1);
dataOutput.flush();
dataOutput.close();
// update checksum file
final int smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
FsDatasetUtil.computeChecksum(rbw.getMetaFile(), rbw.getMetaFile(), rbw.getBlockFile(), smallBufferSize, conf);
// read the block
// the DataNode BlockSender should read from the rbw replica's in-memory
// checksum, rather than on-disk checksum. Otherwise it will see a
// checksum mismatch error.
final byte[] readBlock = DFSTestUtil.readFileBuffer(fs, fileName);
assertEquals("should have read only one byte!", 1, readBlock.length);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestFileAppend method testBreakHardlinksIfNeeded.
@Test
public void testBreakHardlinksIfNeeded() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
try {
// create a new file, write to it and close it.
Path file1 = new Path("/filestatus.dat");
FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
writeFile(stm);
stm.close();
// Get a handle to the datanode
DataNode[] dn = cluster.listDataNodes();
assertTrue("There should be only one datanode but found " + dn.length, dn.length == 1);
LocatedBlocks locations = client.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
List<LocatedBlock> blocks = locations.getLocatedBlocks();
final FsDatasetSpi<?> fsd = dn[0].getFSDataset();
//
for (int i = 0; i < blocks.size(); i = i + 2) {
ExtendedBlock b = blocks.get(i).getBlock();
final File f = FsDatasetTestUtil.getBlockFile(fsd, b.getBlockPoolId(), b.getLocalBlock());
File link = new File(f.toString() + ".link");
System.out.println("Creating hardlink for File " + f + " to " + link);
HardLink.createHardLink(f, link);
}
// Detach all blocks. This should remove hardlinks (if any)
for (int i = 0; i < blocks.size(); i++) {
ExtendedBlock b = blocks.get(i).getBlock();
System.out.println("breakHardlinksIfNeeded detaching block " + b);
assertTrue("breakHardlinksIfNeeded(" + b + ") should have returned true", FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b));
}
// return false
for (int i = 0; i < blocks.size(); i++) {
ExtendedBlock b = blocks.get(i).getBlock();
System.out.println("breakHardlinksIfNeeded re-attempting to " + "detach block " + b);
assertTrue("breakHardlinksIfNeeded(" + b + ") should have returned false", FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b));
}
} finally {
client.close();
fs.close();
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestFileCreation method testLeaseExpireHardLimit.
/**
* Create a file, write something, hflush but not close.
* Then change lease period and wait for lease recovery.
* Finally, read the block directly from each Datanode and verify the content.
*/
@Test
public void testLeaseExpireHardLimit() throws Exception {
System.out.println("testLeaseExpireHardLimit start");
final long leasePeriod = 1000;
final int DATANODE_NUM = 3;
Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
DistributedFileSystem dfs = null;
try {
cluster.waitActive();
dfs = cluster.getFileSystem();
// create a new file.
final String f = DIR + "foo";
final Path fpath = new Path(f);
HdfsDataOutputStream out = create(dfs, fpath, DATANODE_NUM);
out.write("something".getBytes());
out.hflush();
int actualRepl = out.getCurrentBlockReplication();
assertTrue(f + " should be replicated to " + DATANODE_NUM + " datanodes.", actualRepl == DATANODE_NUM);
// set the soft and hard limit to be 1 second so that the
// namenode triggers lease recovery
cluster.setLeasePeriod(leasePeriod, leasePeriod);
// wait for the lease to expire
try {
Thread.sleep(5 * leasePeriod);
} catch (InterruptedException e) {
}
LocatedBlocks locations = dfs.dfs.getNamenode().getBlockLocations(f, 0, Long.MAX_VALUE);
assertEquals(1, locations.locatedBlockCount());
LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
int successcount = 0;
for (DatanodeInfo datanodeinfo : locatedblock.getLocations()) {
DataNode datanode = cluster.getDataNode(datanodeinfo.getIpcPort());
ExtendedBlock blk = locatedblock.getBlock();
try (BufferedReader in = new BufferedReader(new InputStreamReader(datanode.getFSDataset().getBlockInputStream(blk, 0)))) {
assertEquals("something", in.readLine());
successcount++;
}
}
System.out.println("successcount=" + successcount);
assertTrue(successcount > 0);
} finally {
IOUtils.closeStream(dfs);
cluster.shutdown();
}
System.out.println("testLeaseExpireHardLimit successful");
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestFileCreation method checkFileCreation.
/**
* Test if file creation and disk space consumption works right
* @param netIf the local interface, if any, clients should use to access DNs
* @param useDnHostname whether the client should contact DNs by hostname
*/
public void checkFileCreation(String netIf, boolean useDnHostname) throws IOException {
Configuration conf = new HdfsConfiguration();
if (netIf != null) {
conf.set(HdfsClientConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf);
}
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, useDnHostname);
if (useDnHostname) {
// Since the mini cluster only listens on the loopback we have to
// ensure the hostname used to access DNs maps to the loopback. We
// do this by telling the DN to advertise localhost as its hostname
// instead of the default hostname.
conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
}
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).checkDataNodeHostConfig(true).build();
FileSystem fs = cluster.getFileSystem();
try {
//
// check that / exists
//
Path path = new Path("/");
System.out.println("Path : \"" + path.toString() + "\"");
System.out.println(fs.getFileStatus(path).isDirectory());
assertTrue("/ should be a directory", fs.getFileStatus(path).isDirectory());
//
// Create a directory inside /, then try to overwrite it
//
Path dir1 = new Path("/test_dir");
fs.mkdirs(dir1);
System.out.println("createFile: Creating " + dir1.getName() + " for overwrite of existing directory.");
try {
// Create path, overwrite=true
fs.create(dir1, true);
fs.close();
assertTrue("Did not prevent directory from being overwritten.", false);
} catch (FileAlreadyExistsException e) {
// expected
}
//
// create a new file in home directory. Do not close it.
//
Path file1 = new Path("filestatus.dat");
Path parent = file1.getParent();
fs.mkdirs(parent);
DistributedFileSystem dfs = (DistributedFileSystem) fs;
dfs.setQuota(file1.getParent(), 100L, blockSize * 5);
FSDataOutputStream stm = createFile(fs, file1, 1);
// verify that file exists in FS namespace
assertTrue(file1 + " should be a file", fs.getFileStatus(file1).isFile());
System.out.println("Path : \"" + file1 + "\"");
// write to file
writeFile(stm);
stm.close();
// verify that file size has changed to the full size
long len = fs.getFileStatus(file1).getLen();
assertTrue(file1 + " should be of size " + fileSize + " but found to be of size " + len, len == fileSize);
// verify the disk space the file occupied
long diskSpace = dfs.getContentSummary(file1.getParent()).getLength();
assertEquals(file1 + " should take " + fileSize + " bytes disk space " + "but found to take " + diskSpace + " bytes", fileSize, diskSpace);
// can't check capacities for real storage since the OS file system may be changing under us.
if (simulatedStorage) {
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetSpi<?> dataset = DataNodeTestUtils.getFSDataset(dn);
assertEquals(fileSize, dataset.getDfsUsed());
assertEquals(SimulatedFSDataset.DEFAULT_CAPACITY - fileSize, dataset.getRemaining());
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestWriteToReplica method testReplicaMapAfterDatanodeRestart.
/**
* This is a test to check the replica map before and after the datanode
* quick restart (less than 5 minutes)
* @throws Exception
*/
@Test
public void testReplicaMapAfterDatanodeRestart() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).build();
try {
cluster.waitActive();
NameNode nn1 = cluster.getNameNode(0);
NameNode nn2 = cluster.getNameNode(1);
assertNotNull("cannot create nn1", nn1);
assertNotNull("cannot create nn2", nn2);
// check number of volumes in fsdataset
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetImpl dataSet = (FsDatasetImpl) DataNodeTestUtils.getFSDataset(dn);
List<FsVolumeSpi> volumes = null;
try (FsDatasetSpi.FsVolumeReferences referredVols = dataSet.getFsVolumeReferences()) {
// number of volumes should be 2 - [data1, data2]
assertEquals("number of volumes is wrong", 2, referredVols.size());
volumes = new ArrayList<>(referredVols.size());
for (FsVolumeSpi vol : referredVols) {
volumes.add(vol);
}
}
ArrayList<String> bpList = new ArrayList<>(Arrays.asList(cluster.getNamesystem(0).getBlockPoolId(), cluster.getNamesystem(1).getBlockPoolId()));
Assert.assertTrue("Cluster should have 2 block pools", bpList.size() == 2);
createReplicas(bpList, volumes, cluster.getFsDatasetTestUtils(dn));
ReplicaMap oldReplicaMap = new ReplicaMap(new AutoCloseableLock());
oldReplicaMap.addAll(dataSet.volumeMap);
cluster.restartDataNode(0);
cluster.waitActive();
dn = cluster.getDataNodes().get(0);
dataSet = (FsDatasetImpl) dn.getFSDataset();
testEqualityOfReplicaMap(oldReplicaMap, dataSet.volumeMap, bpList);
} finally {
cluster.shutdown();
}
}
Aggregations