use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.
the class TestBlockManager method testNeededReconstructionWhileAppending.
@Test(timeout = 60000)
public void testNeededReconstructionWhileAppending() throws IOException {
Configuration conf = new HdfsConfiguration();
String src = "/test-file";
Path file = new Path(src);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
try {
BlockManager bm = cluster.getNamesystem().getBlockManager();
FileSystem fs = cluster.getFileSystem();
NamenodeProtocols namenode = cluster.getNameNodeRpc();
DFSOutputStream out = null;
try {
out = (DFSOutputStream) (fs.create(file).getWrappedStream());
out.write(1);
out.hflush();
out.close();
FSDataInputStream in = null;
ExtendedBlock oldBlock = null;
try {
in = fs.open(file);
oldBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock();
} finally {
IOUtils.closeStream(in);
}
String clientName = ((DistributedFileSystem) fs).getClient().getClientName();
namenode.append(src, clientName, new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
LocatedBlock newLocatedBlock = namenode.updateBlockForPipeline(oldBlock, clientName);
ExtendedBlock newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(), oldBlock.getBlockId(), oldBlock.getNumBytes(), newLocatedBlock.getBlock().getGenerationStamp());
namenode.updatePipeline(clientName, oldBlock, newBlock, newLocatedBlock.getLocations(), newLocatedBlock.getStorageIDs());
BlockInfo bi = bm.getStoredBlock(newBlock.getLocalBlock());
assertFalse(bm.isNeededReconstruction(bi, bm.countNodes(bi, cluster.getNamesystem().isInStartupSafeMode())));
} finally {
IOUtils.closeStream(out);
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.
the class TestDataNodeRollingUpgrade method testDatanodePeersXceiver.
@Test(timeout = 600000)
public // Test DatanodeXceiver has correct peer-dataxceiver pairs for sending OOB message
void testDatanodePeersXceiver() throws Exception {
try {
startCluster();
// Create files in DFS.
String testFile1 = "/" + GenericTestUtils.getMethodName() + ".01.dat";
String testFile2 = "/" + GenericTestUtils.getMethodName() + ".02.dat";
String testFile3 = "/" + GenericTestUtils.getMethodName() + ".03.dat";
DFSClient client1 = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
DFSClient client2 = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
DFSClient client3 = new DFSClient(DFSUtilClient.getNNAddress(conf), conf);
DFSOutputStream s1 = (DFSOutputStream) client1.create(testFile1, true);
DFSOutputStream s2 = (DFSOutputStream) client2.create(testFile2, true);
DFSOutputStream s3 = (DFSOutputStream) client3.create(testFile3, true);
byte[] toWrite = new byte[1024 * 1024 * 8];
Random rb = new Random(1111);
rb.nextBytes(toWrite);
s1.write(toWrite, 0, 1024 * 1024 * 8);
s1.flush();
s2.write(toWrite, 0, 1024 * 1024 * 8);
s2.flush();
s3.write(toWrite, 0, 1024 * 1024 * 8);
s3.flush();
assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer().getNumPeersXceiver());
s1.close();
s2.close();
s3.close();
assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer().getNumPeersXceiver());
client1.close();
client2.close();
client3.close();
} finally {
shutdownCluster();
}
}
use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.
the class TestDataNodeVolumeMetrics method testVolumeMetrics.
@Test
public void testVolumeMetrics() throws Exception {
MiniDFSCluster cluster = setupClusterForVolumeMetrics();
try {
FileSystem fs = cluster.getFileSystem();
final Path fileName = new Path("/test.dat");
final long fileLen = Integer.MAX_VALUE + 1L;
DFSTestUtil.createFile(fs, fileName, false, BLOCK_SIZE, fileLen, fs.getDefaultBlockSize(fileName), REPL, 1L, true);
try (FSDataOutputStream out = fs.append(fileName)) {
out.writeBytes("hello world");
((DFSOutputStream) out.getWrappedStream()).hsync();
}
verifyDataNodeVolumeMetrics(fs, cluster, fileName);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.
the class TestStorageMover method testMigrateOpenFileToArchival.
/**
* Move an open file into archival storage
*/
@Test
public void testMigrateOpenFileToArchival() throws Exception {
LOG.info("testMigrateOpenFileToArchival");
final Path fooDir = new Path("/foo");
Map<Path, BlockStoragePolicy> policyMap = Maps.newHashMap();
policyMap.put(fooDir, COLD);
NamespaceScheme nsScheme = new NamespaceScheme(Arrays.asList(fooDir), null, BLOCK_SIZE, null, policyMap);
ClusterScheme clusterScheme = new ClusterScheme(DEFAULT_CONF, NUM_DATANODES, REPL, genStorageTypes(NUM_DATANODES), null);
MigrationTest test = new MigrationTest(clusterScheme, nsScheme);
test.setupCluster();
// create an open file
banner("writing to file /foo/bar");
final Path barFile = new Path(fooDir, "bar");
DFSTestUtil.createFile(test.dfs, barFile, BLOCK_SIZE, (short) 1, 0L);
FSDataOutputStream out = test.dfs.append(barFile);
out.writeBytes("hello, ");
((DFSOutputStream) out.getWrappedStream()).hsync();
try {
banner("start data migration");
// set /foo to COLD
test.setStoragePolicy();
test.migrate(ExitStatus.SUCCESS);
// make sure the under construction block has not been migrated
LocatedBlocks lbs = test.dfs.getClient().getLocatedBlocks(barFile.toString(), BLOCK_SIZE);
LOG.info("Locations: " + lbs);
List<LocatedBlock> blks = lbs.getLocatedBlocks();
Assert.assertEquals(1, blks.size());
Assert.assertEquals(1, blks.get(0).getLocations().length);
banner("finish the migration, continue writing");
// make sure the writing can continue
out.writeBytes("world!");
((DFSOutputStream) out.getWrappedStream()).hsync();
IOUtils.cleanup(LOG, out);
lbs = test.dfs.getClient().getLocatedBlocks(barFile.toString(), BLOCK_SIZE);
LOG.info("Locations: " + lbs);
blks = lbs.getLocatedBlocks();
Assert.assertEquals(1, blks.size());
Assert.assertEquals(1, blks.get(0).getLocations().length);
banner("finish writing, starting reading");
// check the content of /foo/bar
FSDataInputStream in = test.dfs.open(barFile);
byte[] buf = new byte[13];
// read from offset 1024
in.readFully(BLOCK_SIZE, buf, 0, buf.length);
IOUtils.cleanup(LOG, in);
Assert.assertEquals("hello, world!", new String(buf));
} finally {
test.shutdownCluster();
}
}
use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.
the class TestDiskspaceQuotaUpdate method testUpdateQuotaForFSync.
/**
* Test if the quota can be correctly updated when file length is updated
* through fsync
*/
@Test(timeout = 60000)
public void testUpdateQuotaForFSync() throws Exception {
final Path foo = new Path(getParent(GenericTestUtils.getMethodName()), "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(getDFS(), bar, BLOCKSIZE, REPLICATION, 0L);
getDFS().setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
FSDataOutputStream out = getDFS().append(bar);
out.write(new byte[BLOCKSIZE / 4]);
((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));
INodeDirectory fooNode = getFSDirectory().getINode4Write(foo.toString()).asDirectory();
QuotaCounts quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
long ns = quota.getNameSpace();
long ds = quota.getStorageSpace();
// foo and bar
assertEquals(2, ns);
// file is under construction
assertEquals(BLOCKSIZE * 2 * REPLICATION, ds);
out.write(new byte[BLOCKSIZE / 4]);
out.close();
fooNode = getFSDirectory().getINode4Write(foo.toString()).asDirectory();
quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
ns = quota.getNameSpace();
ds = quota.getStorageSpace();
assertEquals(2, ns);
assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, ds);
// append another block
DFSTestUtil.appendFile(getDFS(), bar, BLOCKSIZE);
quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
ns = quota.getNameSpace();
ds = quota.getStorageSpace();
// foo and bar
assertEquals(2, ns);
assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, ds);
}
Aggregations