use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestFileAppend4 method testAppendInsufficientLocations.
/**
* Test that an append with no locations fails with an exception
* showing insufficient locations.
*/
@Test(timeout = 60000)
public void testAppendInsufficientLocations() throws Exception {
Configuration conf = new Configuration();
// lower heartbeat interval for fast recognition of DN
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
DistributedFileSystem fileSystem = null;
try {
// create a file with replication 3
fileSystem = cluster.getFileSystem();
Path f = new Path("/testAppend");
FSDataOutputStream create = fileSystem.create(f, (short) 2);
create.write("/testAppend".getBytes());
create.close();
// Check for replications
DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
// Shut down all DNs that have the last block location for the file
LocatedBlocks lbs = fileSystem.dfs.getNamenode().getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
List<DataNode> dnsOfCluster = cluster.getDataNodes();
DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().getLocations();
for (DataNode dn : dnsOfCluster) {
for (DatanodeInfo loc : dnsWithLocations) {
if (dn.getDatanodeId().equals(loc)) {
dn.shutdown();
DFSTestUtil.waitForDatanodeDeath(dn);
}
}
}
// Wait till 0 replication is recognized
DFSTestUtil.waitReplication(fileSystem, f, (short) 0);
// have the block.
try {
fileSystem.append(f);
fail("Append should fail because insufficient locations");
} catch (IOException e) {
LOG.info("Expected exception: ", e);
}
FSDirectory dir = cluster.getNamesystem().getFSDirectory();
final INodeFile inode = INodeFile.valueOf(dir.getINode("/testAppend"), "/testAppend");
assertTrue("File should remain closed", !inode.isUnderConstruction());
} finally {
if (null != fileSystem) {
fileSystem.close();
}
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestFileAppendRestart method testAppendRestart.
/**
* Regression test for HDFS-2991. Creates and appends to files
* where blocks start/end on block boundaries.
*/
@Test
public void testAppendRestart() throws Exception {
final Configuration conf = new HdfsConfiguration();
// Turn off persistent IPC, so that the DFSClient can survive NN restart
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
MiniDFSCluster cluster = null;
FSDataOutputStream stream = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
FileSystem fs = cluster.getFileSystem();
File editLog = new File(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(0), NNStorage.getInProgressEditsFileName(1));
EnumMap<FSEditLogOpCodes, Holder<Integer>> counts;
Path p1 = new Path("/block-boundaries");
writeAndAppend(fs, p1, BLOCK_SIZE, BLOCK_SIZE);
counts = FSImageTestUtil.countEditLogOpTypes(editLog);
// OP_ADD to create file
// OP_ADD_BLOCK for first block
// OP_CLOSE to close file
// OP_APPEND to reopen file
// OP_ADD_BLOCK for second block
// OP_CLOSE to close file
assertEquals(1, (int) counts.get(FSEditLogOpCodes.OP_ADD).held);
assertEquals(1, (int) counts.get(FSEditLogOpCodes.OP_APPEND).held);
assertEquals(2, (int) counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
assertEquals(2, (int) counts.get(FSEditLogOpCodes.OP_CLOSE).held);
Path p2 = new Path("/not-block-boundaries");
writeAndAppend(fs, p2, BLOCK_SIZE / 2, BLOCK_SIZE);
counts = FSImageTestUtil.countEditLogOpTypes(editLog);
// OP_ADD to create file
// OP_ADD_BLOCK for first block
// OP_CLOSE to close file
// OP_APPEND to re-establish the lease
// OP_UPDATE_BLOCKS from the updatePipeline call (increments genstamp of last block)
// OP_ADD_BLOCK at the start of the second block
// OP_CLOSE to close file
// Total: 2 OP_ADDs, 1 OP_UPDATE_BLOCKS, 2 OP_ADD_BLOCKs, and 2 OP_CLOSEs
// in addition to the ones above
assertEquals(2, (int) counts.get(FSEditLogOpCodes.OP_ADD).held);
assertEquals(2, (int) counts.get(FSEditLogOpCodes.OP_APPEND).held);
assertEquals(1, (int) counts.get(FSEditLogOpCodes.OP_UPDATE_BLOCKS).held);
assertEquals(2 + 2, (int) counts.get(FSEditLogOpCodes.OP_ADD_BLOCK).held);
assertEquals(2 + 2, (int) counts.get(FSEditLogOpCodes.OP_CLOSE).held);
cluster.restartNameNode();
AppendTestUtil.check(fs, p1, 2 * BLOCK_SIZE);
AppendTestUtil.check(fs, p2, 3 * BLOCK_SIZE / 2);
} finally {
IOUtils.closeStream(stream);
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestFileAppendRestart method testAppendWithPipelineRecovery.
/**
* Test to append to the file, when one of datanode in the existing pipeline
* is down.
*/
@Test
public void testAppendWithPipelineRecovery() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
FSDataOutputStream out = null;
try {
cluster = new MiniDFSCluster.Builder(conf).manageDataDfsDirs(true).manageNameDfsDirs(true).numDataNodes(4).racks(new String[] { "/rack1", "/rack1", "/rack2", "/rack2" }).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
Path path = new Path("/test1");
out = fs.create(path, true, BLOCK_SIZE, (short) 3, BLOCK_SIZE);
AppendTestUtil.write(out, 0, 1024);
out.close();
cluster.stopDataNode(3);
out = fs.append(path);
AppendTestUtil.write(out, 1024, 1024);
out.close();
cluster.restartNameNode(true);
AppendTestUtil.check(fs, path, 2048);
} finally {
IOUtils.closeStream(out);
if (null != cluster) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestFileAppend method testComplexFlush.
/**
* Test that file data can be flushed.
* @throws IOException an exception might be thrown
*/
@Test
public void testComplexFlush() throws IOException {
Configuration conf = new HdfsConfiguration();
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem fs = cluster.getFileSystem();
try {
// create a new file.
Path file1 = new Path("/complexFlush.dat");
FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
System.out.println("Created file complexFlush.dat");
int start = 0;
for (start = 0; (start + 29) < AppendTestUtil.FILE_SIZE; ) {
stm.write(fileContents, start, 29);
stm.hflush();
start += 29;
}
stm.write(fileContents, start, AppendTestUtil.FILE_SIZE - start);
// need to make sure we completely write out all full blocks before
// the checkFile() call (see FSOutputSummer#flush)
stm.flush();
// verify that full blocks are sane
checkFile(fs, file1, 1);
stm.close();
// verify that entire file is good
AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2");
} catch (IOException e) {
System.out.println("Exception :" + e);
throw e;
} catch (Throwable e) {
System.out.println("Throwable :" + e);
e.printStackTrace();
throw new IOException("Throwable : " + e);
} finally {
fs.close();
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestFileAppend method testAppendAfterSoftLimit.
/** Tests appending after soft-limit expires. */
@Test
public void testAppendAfterSoftLimit() throws IOException, InterruptedException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
//Set small soft-limit for lease
final long softLimit = 1L;
final long hardLimit = 9999999L;
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.setLeasePeriod(softLimit, hardLimit);
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
FileSystem fs2 = new DistributedFileSystem();
fs2.initialize(fs.getUri(), conf);
final Path testPath = new Path("/testAppendAfterSoftLimit");
final byte[] fileContents = AppendTestUtil.initBuffer(32);
// create a new file without closing
FSDataOutputStream out = fs.create(testPath);
out.write(fileContents);
//Wait for > soft-limit
Thread.sleep(250);
try {
FSDataOutputStream appendStream2 = fs2.append(testPath);
appendStream2.write(fileContents);
appendStream2.close();
assertEquals(fileContents.length, fs.getFileStatus(testPath).getLen());
} finally {
fs.close();
fs2.close();
cluster.shutdown();
}
}
Aggregations