use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestFileAppend method testAppend2AfterSoftLimit.
/** Tests appending after soft-limit expires. */
@Test
public void testAppend2AfterSoftLimit() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
//Set small soft-limit for lease
final long softLimit = 1L;
final long hardLimit = 9999999L;
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.setLeasePeriod(softLimit, hardLimit);
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
DistributedFileSystem fs2 = new DistributedFileSystem();
fs2.initialize(fs.getUri(), conf);
final Path testPath = new Path("/testAppendAfterSoftLimit");
final byte[] fileContents = AppendTestUtil.initBuffer(32);
// create a new file without closing
FSDataOutputStream out = fs.create(testPath);
out.write(fileContents);
//Wait for > soft-limit
Thread.sleep(250);
try {
FSDataOutputStream appendStream2 = fs2.append(testPath, EnumSet.of(CreateFlag.APPEND, CreateFlag.NEW_BLOCK), 4096, null);
appendStream2.write(fileContents);
appendStream2.close();
assertEquals(fileContents.length, fs.getFileStatus(testPath).getLen());
// make sure we now have 1 block since the first writer was revoked
LocatedBlocks blks = fs.getClient().getLocatedBlocks(testPath.toString(), 0L);
assertEquals(1, blks.getLocatedBlocks().size());
for (LocatedBlock blk : blks.getLocatedBlocks()) {
assertEquals(fileContents.length, blk.getBlockSize());
}
} finally {
fs.close();
fs2.close();
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestFileAppend method testSimpleFlush.
/**
* Test a simple flush on a simple HDFS file.
* @throws IOException an exception might be thrown
*/
@Test
public void testSimpleFlush() throws IOException {
Configuration conf = new HdfsConfiguration();
fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
DistributedFileSystem fs = cluster.getFileSystem();
try {
// create a new file.
Path file1 = new Path("/simpleFlush.dat");
FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
System.out.println("Created file simpleFlush.dat");
// write to file
int mid = AppendTestUtil.FILE_SIZE / 2;
stm.write(fileContents, 0, mid);
stm.hflush();
System.out.println("Wrote and Flushed first part of file.");
// write the remainder of the file
stm.write(fileContents, mid, AppendTestUtil.FILE_SIZE - mid);
System.out.println("Written second part of file");
stm.hflush();
stm.hflush();
System.out.println("Wrote and Flushed second part of file.");
// verify that full blocks are sane
checkFile(fs, file1, 1);
stm.close();
System.out.println("Closed file.");
// verify that entire file is good
AppendTestUtil.checkFullFile(fs, file1, AppendTestUtil.FILE_SIZE, fileContents, "Read 2");
} catch (IOException e) {
System.out.println("Exception :" + e);
throw e;
} catch (Throwable e) {
System.out.println("Throwable :" + e);
e.printStackTrace();
throw new IOException("Throwable : " + e);
} finally {
fs.close();
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestFileAppend method testBreakHardlinksIfNeeded.
@Test
public void testBreakHardlinksIfNeeded() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
try {
// create a new file, write to it and close it.
Path file1 = new Path("/filestatus.dat");
FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
writeFile(stm);
stm.close();
// Get a handle to the datanode
DataNode[] dn = cluster.listDataNodes();
assertTrue("There should be only one datanode but found " + dn.length, dn.length == 1);
LocatedBlocks locations = client.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
List<LocatedBlock> blocks = locations.getLocatedBlocks();
final FsDatasetSpi<?> fsd = dn[0].getFSDataset();
//
for (int i = 0; i < blocks.size(); i = i + 2) {
ExtendedBlock b = blocks.get(i).getBlock();
final File f = FsDatasetTestUtil.getBlockFile(fsd, b.getBlockPoolId(), b.getLocalBlock());
File link = new File(f.toString() + ".link");
System.out.println("Creating hardlink for File " + f + " to " + link);
HardLink.createHardLink(f, link);
}
// Detach all blocks. This should remove hardlinks (if any)
for (int i = 0; i < blocks.size(); i++) {
ExtendedBlock b = blocks.get(i).getBlock();
System.out.println("breakHardlinksIfNeeded detaching block " + b);
assertTrue("breakHardlinksIfNeeded(" + b + ") should have returned true", FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b));
}
// return false
for (int i = 0; i < blocks.size(); i++) {
ExtendedBlock b = blocks.get(i).getBlock();
System.out.println("breakHardlinksIfNeeded re-attempting to " + "detach block " + b);
assertTrue("breakHardlinksIfNeeded(" + b + ") should have returned false", FsDatasetTestUtil.breakHardlinksIfNeeded(fsd, b));
}
} finally {
client.close();
fs.close();
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestFileAppend2 method testAppendLessThanChecksumChunk.
/**
* Make sure when the block length after appending is less than 512 bytes, the
* checksum re-calculation and overwrite are performed correctly.
*/
@Test
public void testAppendLessThanChecksumChunk() throws Exception {
final byte[] buf = new byte[1024];
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).numDataNodes(1).build();
cluster.waitActive();
try (DistributedFileSystem fs = cluster.getFileSystem()) {
final int len1 = 200;
final int len2 = 300;
final Path p = new Path("/foo");
FSDataOutputStream out = fs.create(p);
out.write(buf, 0, len1);
out.close();
out = fs.append(p);
out.write(buf, 0, len2);
// flush but leave open
out.hflush();
// read data to verify the replica's content and checksum are correct
FSDataInputStream in = fs.open(p);
final int length = in.read(0, buf, 0, len1 + len2);
assertTrue(length > 0);
in.close();
out.close();
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestFileConcurrentReader method testUnfinishedBlockPacketBufferOverrun.
/**
* test case: if the BlockSender decides there is only one packet to send,
* the previous computation of the pktSize based on transferToAllowed
* would result in too small a buffer to do the buffer-copy needed
* for partial chunks.
*/
@Test(timeout = 30000)
public void testUnfinishedBlockPacketBufferOverrun() throws IOException {
// check that / exists
Path path = new Path("/");
System.out.println("Path : \"" + path.toString() + "\"");
// create a new file in the root, write data, do no close
Path file1 = new Path("/unfinished-block");
final FSDataOutputStream stm = TestFileCreation.createFile(fileSystem, file1, 1);
// write partial block and sync
final int bytesPerChecksum = conf.getInt("io.bytes.per.checksum", 512);
final int partialBlockSize = bytesPerChecksum - 1;
writeFileAndSync(stm, partialBlockSize);
// Make sure a client can read it before it is closed
checkCanRead(fileSystem, file1, partialBlockSize);
stm.close();
}
Aggregations