use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestAbandonBlock method testQuotaUpdatedWhenBlockAbandoned.
@Test
public /** Make sure that the quota is decremented correctly when a block is abandoned */
void testQuotaUpdatedWhenBlockAbandoned() throws IOException {
// Setting diskspace quota to 3MB
fs.setQuota(new Path("/"), HdfsConstants.QUOTA_DONT_SET, 3 * 1024 * 1024);
// Start writing a file with 2 replicas to ensure each datanode has one.
// Block Size is 1MB.
String src = FILE_NAME_PREFIX + "test_quota1";
FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short) 2, 1024 * 1024);
for (int i = 0; i < 1024; i++) {
fout.writeByte(123);
}
// Shutdown one datanode, causing the block abandonment.
cluster.getDataNodes().get(0).shutdown();
// Close the file, new block will be allocated with 2MB pending size.
try {
fout.close();
} catch (QuotaExceededException e) {
fail("Unexpected quota exception when closing fout");
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class FileAppendTest4 method testAppend.
/**
* Comprehensive test for append
* @throws IOException an exception might be thrown
*/
@Test
public void testAppend() throws IOException {
final int maxOldFileLen = 2 * BLOCK_SIZE + 1;
final int maxFlushedBytes = BLOCK_SIZE;
byte[] contents = AppendTestUtil.initBuffer(maxOldFileLen + 2 * maxFlushedBytes);
for (int oldFileLen = 0; oldFileLen <= maxOldFileLen; oldFileLen++) {
for (int flushedBytes1 = 0; flushedBytes1 <= maxFlushedBytes; flushedBytes1++) {
for (int flushedBytes2 = 0; flushedBytes2 <= maxFlushedBytes; flushedBytes2++) {
final int fileLen = oldFileLen + flushedBytes1 + flushedBytes2;
// create the initial file of oldFileLen
final Path p = new Path("foo" + oldFileLen + "_" + flushedBytes1 + "_" + flushedBytes2);
LOG.info("Creating file " + p);
FSDataOutputStream out = fs.create(p, false, conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), REPLICATION, BLOCK_SIZE);
out.write(contents, 0, oldFileLen);
out.close();
// append flushedBytes bytes to the file
out = fs.append(p);
out.write(contents, oldFileLen, flushedBytes1);
out.hflush();
// write another flushedBytes2 bytes to the file
out.write(contents, oldFileLen + flushedBytes1, flushedBytes2);
out.close();
// validate the file content
AppendTestUtil.checkFullFile(fs, p, fileLen, contents, p.toString());
fs.delete(p, false);
}
}
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestBlockStoragePolicy method testGetFileStoragePolicyAfterRestartNN.
@Test
public void testGetFileStoragePolicyAfterRestartNN() throws Exception {
//HDFS8219
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
cluster.waitActive();
final DistributedFileSystem fs = cluster.getFileSystem();
try {
final String file = "/testScheduleWithinSameNode/file";
Path dir = new Path("/testScheduleWithinSameNode");
fs.mkdirs(dir);
// 2. Set Dir policy
fs.setStoragePolicy(dir, "COLD");
// 3. Create file
final FSDataOutputStream out = fs.create(new Path(file));
out.writeChars("testScheduleWithinSameNode");
out.close();
// 4. Set Dir policy
fs.setStoragePolicy(dir, "HOT");
HdfsFileStatus status = fs.getClient().getFileInfo(file);
// 5. get file policy, it should be parent policy.
Assert.assertTrue("File storage policy should be HOT", status.getStoragePolicy() == HOT);
// 6. restart NameNode for reloading edits logs.
cluster.restartNameNode(true);
// 7. get file policy, it should be parent policy.
status = fs.getClient().getFileInfo(file);
Assert.assertTrue("File storage policy should be HOT", status.getStoragePolicy() == HOT);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestBlocksScheduledCounter method testScheduledBlocksCounterShouldDecrementOnAbandonBlock.
/**
* Abandon block should decrement the scheduledBlocks count for the dataNode.
*/
@Test
public void testScheduledBlocksCounterShouldDecrementOnAbandonBlock() throws Exception {
cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).numDataNodes(2).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DatanodeManager datanodeManager = cluster.getNamesystem().getBlockManager().getDatanodeManager();
ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
datanodeManager.fetchDatanodes(dnList, dnList, false);
for (DatanodeDescriptor descriptor : dnList) {
assertEquals("Blocks scheduled should be 0 for " + descriptor.getName(), 0, descriptor.getBlocksScheduled());
}
cluster.getDataNodes().get(0).shutdown();
// open a file an write a few bytes:
FSDataOutputStream out = fs.create(new Path("/testBlockScheduledCounter"), (short) 2);
for (int i = 0; i < 1024; i++) {
out.write(i);
}
// flush to make sure a block is allocated.
out.hflush();
DatanodeDescriptor abandonedDn = datanodeManager.getDatanode(cluster.getDataNodes().get(0).getDatanodeId());
assertEquals("for the abandoned dn scheduled counts should be 0", 0, abandonedDn.getBlocksScheduled());
for (DatanodeDescriptor descriptor : dnList) {
if (descriptor.equals(abandonedDn)) {
continue;
}
assertEquals("Blocks scheduled should be 1 for " + descriptor.getName(), 1, descriptor.getBlocksScheduled());
}
// close the file and the counter should go to zero.
out.close();
for (DatanodeDescriptor descriptor : dnList) {
assertEquals("Blocks scheduled should be 0 for " + descriptor.getName(), 0, descriptor.getBlocksScheduled());
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestBlocksScheduledCounter method testBlocksScheduledCounter.
@Test
public void testBlocksScheduledCounter() throws IOException {
cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
cluster.waitActive();
fs = cluster.getFileSystem();
//open a file an write a few bytes:
FSDataOutputStream out = fs.create(new Path("/testBlockScheduledCounter"));
for (int i = 0; i < 1024; i++) {
out.write(i);
}
// flush to make sure a block is allocated.
out.hflush();
ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
final DatanodeManager dm = cluster.getNamesystem().getBlockManager().getDatanodeManager();
dm.fetchDatanodes(dnList, dnList, false);
DatanodeDescriptor dn = dnList.get(0);
assertEquals(1, dn.getBlocksScheduled());
// close the file and the counter should go to zero.
out.close();
assertEquals(0, dn.getBlocksScheduled());
}
Aggregations