use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestFileAppend3 method doSmallAppends.
// Do small appends.
void doSmallAppends(Path file, DistributedFileSystem fs, int iterations) throws IOException {
for (int i = 0; i < iterations; i++) {
FSDataOutputStream stm;
try {
stm = fs.append(file);
} catch (IOException e) {
// If another thread is already appending, skip this time.
continue;
}
// Failure in write or close will be terminal.
AppendTestUtil.write(stm, 0, 123);
stm.close();
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestFileAppend4 method recoverFile.
/*
* Recover file.
* Try and open file in append mode.
* Doing this, we get a hold of the file that crashed writer
* was writing to. Once we have it, close it. This will
* allow subsequent reader to see up to last sync.
* NOTE: This is the same algorithm that HBase uses for file recovery
* @param fs
* @throws Exception
*/
private void recoverFile(final FileSystem fs) throws Exception {
LOG.info("Recovering File Lease");
// set the soft limit to be 1 second so that the
// namenode triggers lease recovery upon append request
cluster.setLeasePeriod(1000, HdfsConstants.LEASE_HARDLIMIT_PERIOD);
// Trying recovery
int tries = 60;
boolean recovered = false;
FSDataOutputStream out = null;
while (!recovered && tries-- > 0) {
try {
out = fs.append(file1);
LOG.info("Successfully opened for append");
recovered = true;
} catch (IOException e) {
LOG.info("Failed open for append, waiting on lease recovery");
try {
Thread.sleep(1000);
} catch (InterruptedException ex) {
// ignore it and try again
}
}
}
if (out != null) {
out.close();
}
if (!recovered) {
fail("Recovery should take < 1 min");
}
LOG.info("Past out lease recovery");
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestFileAppend4 method testUpdateNeededReplicationsForAppendedFile.
/**
* Test the updation of NeededReplications for the Appended Block
*/
@Test(timeout = 60000)
public void testUpdateNeededReplicationsForAppendedFile() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
DistributedFileSystem fileSystem = null;
try {
// create a file.
fileSystem = cluster.getFileSystem();
Path f = new Path("/testAppend");
FSDataOutputStream create = fileSystem.create(f, (short) 2);
create.write("/testAppend".getBytes());
create.close();
// Append to the file.
FSDataOutputStream append = fileSystem.append(f);
append.write("/testAppend".getBytes());
append.close();
// Start a new datanode
cluster.startDataNodes(conf, 1, true, null, null);
// Check for replications
DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
} finally {
if (null != fileSystem) {
fileSystem.close();
}
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestBlockToken method testBlockTokenInLastLocatedBlock.
/**
* This test writes a file and gets the block locations without closing the
* file, and tests the block token in the last block. Block token is verified
* by ensuring it is of correct kind.
*
* @throws IOException
* @throws InterruptedException
*/
private void testBlockTokenInLastLocatedBlock(boolean enableProtobuf) throws IOException, InterruptedException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE, enableProtobuf);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
try {
FileSystem fs = cluster.getFileSystem();
String fileName = "/testBlockTokenInLastLocatedBlock";
Path filePath = new Path(fileName);
FSDataOutputStream out = fs.create(filePath, (short) 1);
out.write(new byte[1000]);
// ensure that the first block is written out (see FSOutputSummer#flush)
out.flush();
LocatedBlocks locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(fileName, 0, 1000);
while (locatedBlocks.getLastLocatedBlock() == null) {
Thread.sleep(100);
locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(fileName, 0, 1000);
}
Token<BlockTokenIdentifier> token = locatedBlocks.getLastLocatedBlock().getBlockToken();
Assert.assertEquals(BlockTokenIdentifier.KIND_NAME, token.getKind());
out.close();
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestMover method testWithinSameNode.
private void testWithinSameNode(Configuration conf) throws Exception {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String file = "/testScheduleWithinSameNode/file";
Path dir = new Path("/testScheduleWithinSameNode");
dfs.mkdirs(dir);
// write to DISK
dfs.setStoragePolicy(dir, "HOT");
final FSDataOutputStream out = dfs.create(new Path(file));
out.writeChars("testScheduleWithinSameNode");
out.close();
// verify before movement
LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
StorageType[] storageTypes = lb.getStorageTypes();
for (StorageType storageType : storageTypes) {
Assert.assertTrue(StorageType.DISK == storageType);
}
// move to ARCHIVE
dfs.setStoragePolicy(dir, "COLD");
int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", dir.toString() });
Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
// Wait till namenode notified about the block location details
waitForLocatedBlockWithArchiveStorageType(dfs, file, 3);
} finally {
cluster.shutdown();
}
}
Aggregations