use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestFileCreation method testFileCreationWithOverwrite.
/**
* 1. Check the blocks of old file are cleaned after creating with overwrite
* 2. Restart NN, check the file
* 3. Save new checkpoint and restart NN, check the file
*/
@Test(timeout = 120000)
public void testFileCreationWithOverwrite() throws Exception {
Configuration conf = new Configuration();
conf.setInt("dfs.blocksize", blockSize);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
DistributedFileSystem dfs = cluster.getFileSystem();
try {
dfs.mkdirs(new Path("/foo/dir"));
String file = "/foo/dir/file";
Path filePath = new Path(file);
// Case 1: Create file with overwrite, check the blocks of old file
// are cleaned after creating with overwrite
NameNode nn = cluster.getNameNode();
FSNamesystem fsn = NameNodeAdapter.getNamesystem(nn);
BlockManager bm = fsn.getBlockManager();
FSDataOutputStream out = dfs.create(filePath);
byte[] oldData = AppendTestUtil.randomBytes(seed, fileSize);
try {
out.write(oldData);
} finally {
out.close();
}
LocatedBlocks oldBlocks = NameNodeAdapter.getBlockLocations(nn, file, 0, fileSize);
assertBlocks(bm, oldBlocks, true);
out = dfs.create(filePath, true);
byte[] newData = AppendTestUtil.randomBytes(seed, fileSize);
try {
out.write(newData);
} finally {
out.close();
}
dfs.deleteOnExit(filePath);
LocatedBlocks newBlocks = NameNodeAdapter.getBlockLocations(nn, file, 0, fileSize);
assertBlocks(bm, newBlocks, true);
assertBlocks(bm, oldBlocks, false);
FSDataInputStream in = dfs.open(filePath);
byte[] result = null;
try {
result = readAll(in);
} finally {
in.close();
}
Assert.assertArrayEquals(newData, result);
// Case 2: Restart NN, check the file
cluster.restartNameNode();
nn = cluster.getNameNode();
in = dfs.open(filePath);
try {
result = readAll(in);
} finally {
in.close();
}
Assert.assertArrayEquals(newData, result);
// Case 3: Save new checkpoint and restart NN, check the file
NameNodeAdapter.enterSafeMode(nn, false);
NameNodeAdapter.saveNamespace(nn);
cluster.restartNameNode();
nn = cluster.getNameNode();
in = dfs.open(filePath);
try {
result = readAll(in);
} finally {
in.close();
}
Assert.assertArrayEquals(newData, result);
} finally {
if (dfs != null) {
dfs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestFileCreation method testFileCreationError2.
/**
* Test that the filesystem removes the last block from a file if its
* lease expires.
*/
@Test
public void testFileCreationError2() throws IOException {
long leasePeriod = 1000;
System.out.println("testFileCreationError2 start");
Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem dfs = null;
try {
cluster.waitActive();
dfs = cluster.getFileSystem();
DFSClient client = dfs.dfs;
// create a new file.
//
Path file1 = new Path("/filestatus.dat");
createFile(dfs, file1, 1);
System.out.println("testFileCreationError2: " + "Created file filestatus.dat with one replicas.");
LocatedBlocks locations = client.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
System.out.println("testFileCreationError2: " + "The file has " + locations.locatedBlockCount() + " blocks.");
// add one block to the file
LocatedBlock location = client.getNamenode().addBlock(file1.toString(), client.clientName, null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
System.out.println("testFileCreationError2: " + "Added block " + location.getBlock());
locations = client.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
int count = locations.locatedBlockCount();
System.out.println("testFileCreationError2: " + "The file now has " + count + " blocks.");
// set the soft and hard limit to be 1 second so that the
// namenode triggers lease recovery
cluster.setLeasePeriod(leasePeriod, leasePeriod);
// wait for the lease to expire
try {
Thread.sleep(5 * leasePeriod);
} catch (InterruptedException e) {
}
// verify that the last block was synchronized.
locations = client.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
System.out.println("testFileCreationError2: " + "locations = " + locations.locatedBlockCount());
assertEquals(0, locations.locatedBlockCount());
System.out.println("testFileCreationError2 successful");
} finally {
IOUtils.closeStream(dfs);
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestFileCreation method testFileCreationNamenodeRestart.
/**
* Test that file leases are persisted across namenode restarts.
*/
@Test
public void testFileCreationNamenodeRestart() throws IOException, NoSuchFieldException, IllegalAccessException {
Configuration conf = new HdfsConfiguration();
// 2s
final int MAX_IDLE_TIME = 2000;
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem fs = null;
try {
cluster.waitActive();
fs = cluster.getFileSystem();
final int nnport = cluster.getNameNodePort();
// create a new file.
Path file1 = new Path("/filestatus.dat");
HdfsDataOutputStream stm = create(fs, file1, 1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file1);
assertEquals(file1 + " should be replicated to 1 datanode.", 1, stm.getCurrentBlockReplication());
// write two full blocks.
writeFile(stm, numBlocks * blockSize);
stm.hflush();
assertEquals(file1 + " should still be replicated to 1 datanode.", 1, stm.getCurrentBlockReplication());
// rename file wile keeping it open.
Path fileRenamed = new Path("/filestatusRenamed.dat");
fs.rename(file1, fileRenamed);
System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file1 + " to " + fileRenamed);
file1 = fileRenamed;
// create another new file.
//
Path file2 = new Path("/filestatus2.dat");
FSDataOutputStream stm2 = createFile(fs, file2, 1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file2);
// create yet another new file with full path name.
// rename it while open
//
Path file3 = new Path("/user/home/fullpath.dat");
FSDataOutputStream stm3 = createFile(fs, file3, 1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file3);
Path file4 = new Path("/user/home/fullpath4.dat");
FSDataOutputStream stm4 = createFile(fs, file4, 1);
System.out.println("testFileCreationNamenodeRestart: " + "Created file " + file4);
fs.mkdirs(new Path("/bin"));
fs.rename(new Path("/user/home"), new Path("/bin"));
Path file3new = new Path("/bin/home/fullpath.dat");
System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file3 + " to " + file3new);
Path file4new = new Path("/bin/home/fullpath4.dat");
System.out.println("testFileCreationNamenodeRestart: " + "Renamed file " + file4 + " to " + file4new);
// restart cluster with the same namenode port as before.
// This ensures that leases are persisted in fsimage.
cluster.shutdown(false, false);
try {
Thread.sleep(2 * MAX_IDLE_TIME);
} catch (InterruptedException e) {
}
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
// restart cluster yet again. This triggers the code to read in
// persistent leases from fsimage.
cluster.shutdown(false, false);
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
}
cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport).format(false).build();
cluster.waitActive();
fs = cluster.getFileSystem();
// instruct the dfsclient to use a new filename when it requests
// new blocks for files that were renamed.
DFSOutputStream dfstream = (DFSOutputStream) (stm.getWrappedStream());
Field f = DFSOutputStream.class.getDeclaredField("src");
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
modifiersField.setInt(f, f.getModifiers() & ~Modifier.FINAL);
f.setAccessible(true);
f.set(dfstream, file1.toString());
dfstream = (DFSOutputStream) (stm3.getWrappedStream());
f.set(dfstream, file3new.toString());
dfstream = (DFSOutputStream) (stm4.getWrappedStream());
f.set(dfstream, file4new.toString());
// write 1 byte to file. This should succeed because the
// namenode should have persisted leases.
byte[] buffer = AppendTestUtil.randomBytes(seed, 1);
stm.write(buffer);
stm.close();
stm2.write(buffer);
stm2.close();
stm3.close();
stm4.close();
// verify that new block is associated with this file
DFSClient client = fs.dfs;
LocatedBlocks locations = client.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
assertTrue("Error blocks were not cleaned up for file " + file1, locations.locatedBlockCount() == 3);
// verify filestatus2.dat
locations = client.getNamenode().getBlockLocations(file2.toString(), 0, Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
assertTrue("Error blocks were not cleaned up for file " + file2, locations.locatedBlockCount() == 1);
} finally {
IOUtils.closeStream(fs);
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestFileCreation method testFileCreationError1.
/**
* Test that file data does not become corrupted even in the face of errors.
*/
@Test
public void testFileCreationError1() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
cluster.waitActive();
InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
try {
// create a new file.
//
Path file1 = new Path("/filestatus.dat");
FSDataOutputStream stm = createFile(fs, file1, 1);
// verify that file exists in FS namespace
assertTrue(file1 + " should be a file", fs.getFileStatus(file1).isFile());
System.out.println("Path : \"" + file1 + "\"");
// kill the datanode
cluster.shutdownDataNodes();
// wait for the datanode to be declared dead
while (true) {
DatanodeInfo[] info = client.datanodeReport(HdfsConstants.DatanodeReportType.LIVE);
if (info.length == 0) {
break;
}
System.out.println("testFileCreationError1: waiting for datanode " + " to die.");
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
}
// write 1 byte to file.
// This should fail because all datanodes are dead.
byte[] buffer = AppendTestUtil.randomBytes(seed, 1);
try {
stm.write(buffer);
stm.close();
} catch (Exception e) {
System.out.println("Encountered expected exception");
}
// verify that no blocks are associated with this file
// bad block allocations were cleaned up earlier.
LocatedBlocks locations = client.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
assertTrue("Error blocks were not cleaned up", locations.locatedBlockCount() == 0);
} finally {
cluster.shutdown();
client.close();
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestFileAppend3 method testTC12.
/**
* TC12: Append to partial CRC chunk
*/
private void testTC12(boolean appendToNewBlock) throws Exception {
final Path p = new Path("/TC12/foo" + (appendToNewBlock ? "0" : "1"));
System.out.println("p=" + p);
//a. Create file with a block size of 64KB
// and a default io.bytes.per.checksum of 512 bytes.
// Write 25687 bytes of data. Close file.
final int len1 = 25687;
{
FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
AppendTestUtil.write(out, 0, len1);
out.close();
}
//b. Reopen file in "append" mode. Append another 5877 bytes of data. Close file.
final int len2 = 5877;
{
FSDataOutputStream out = appendToNewBlock ? fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null) : fs.append(p);
AppendTestUtil.write(out, len1, len2);
out.close();
}
//c. Reopen file and read 25687+5877 bytes of data from file. Close file.
AppendTestUtil.check(fs, p, len1 + len2);
if (appendToNewBlock) {
LocatedBlocks blks = fs.dfs.getLocatedBlocks(p.toString(), 0);
Assert.assertEquals(2, blks.getLocatedBlocks().size());
Assert.assertEquals(len1, blks.getLocatedBlocks().get(0).getBlockSize());
Assert.assertEquals(len2, blks.getLocatedBlocks().get(1).getBlockSize());
AppendTestUtil.check(fs, p, 0, len1);
AppendTestUtil.check(fs, p, len1, len2);
}
}
Aggregations