use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestFileCreation method checkFileCreation.
/**
* Test if file creation and disk space consumption works right
* @param netIf the local interface, if any, clients should use to access DNs
* @param useDnHostname whether the client should contact DNs by hostname
*/
public void checkFileCreation(String netIf, boolean useDnHostname) throws IOException {
Configuration conf = new HdfsConfiguration();
if (netIf != null) {
conf.set(HdfsClientConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf);
}
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, useDnHostname);
if (useDnHostname) {
// Since the mini cluster only listens on the loopback we have to
// ensure the hostname used to access DNs maps to the loopback. We
// do this by telling the DN to advertise localhost as its hostname
// instead of the default hostname.
conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
}
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).checkDataNodeHostConfig(true).build();
FileSystem fs = cluster.getFileSystem();
try {
//
// check that / exists
//
Path path = new Path("/");
System.out.println("Path : \"" + path.toString() + "\"");
System.out.println(fs.getFileStatus(path).isDirectory());
assertTrue("/ should be a directory", fs.getFileStatus(path).isDirectory());
//
// Create a directory inside /, then try to overwrite it
//
Path dir1 = new Path("/test_dir");
fs.mkdirs(dir1);
System.out.println("createFile: Creating " + dir1.getName() + " for overwrite of existing directory.");
try {
// Create path, overwrite=true
fs.create(dir1, true);
fs.close();
assertTrue("Did not prevent directory from being overwritten.", false);
} catch (FileAlreadyExistsException e) {
// expected
}
//
// create a new file in home directory. Do not close it.
//
Path file1 = new Path("filestatus.dat");
Path parent = file1.getParent();
fs.mkdirs(parent);
DistributedFileSystem dfs = (DistributedFileSystem) fs;
dfs.setQuota(file1.getParent(), 100L, blockSize * 5);
FSDataOutputStream stm = createFile(fs, file1, 1);
// verify that file exists in FS namespace
assertTrue(file1 + " should be a file", fs.getFileStatus(file1).isFile());
System.out.println("Path : \"" + file1 + "\"");
// write to file
writeFile(stm);
stm.close();
// verify that file size has changed to the full size
long len = fs.getFileStatus(file1).getLen();
assertTrue(file1 + " should be of size " + fileSize + " but found to be of size " + len, len == fileSize);
// verify the disk space the file occupied
long diskSpace = dfs.getContentSummary(file1.getParent()).getLength();
assertEquals(file1 + " should take " + fileSize + " bytes disk space " + "but found to take " + diskSpace + " bytes", fileSize, diskSpace);
// can't check capacities for real storage since the OS file system may be changing under us.
if (simulatedStorage) {
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetSpi<?> dataset = DataNodeTestUtils.getFSDataset(dn);
assertEquals(fileSize, dataset.getDfsUsed());
assertEquals(SimulatedFSDataset.DEFAULT_CAPACITY - fileSize, dataset.getRemaining());
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestFileCreation method createFile.
// creates a file but does not close it
public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl) throws IOException {
System.out.println("createFile: Created " + name + " with " + repl + " replica.");
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) repl, blockSize);
return stm;
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestFileCreation method testFileCreationSyncOnClose.
/**
* Test creating a file whose data gets sync when closed
*/
@Test
public void testFileCreationSyncOnClose() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFS_DATANODE_SYNCONCLOSE_KEY, true);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
Path[] p = { new Path("/foo"), new Path("/bar") };
//write 2 files at the same time
FSDataOutputStream[] out = { fs.create(p[0]), fs.create(p[1]) };
int i = 0;
for (; i < 100; i++) {
out[0].write(i);
out[1].write(i);
}
out[0].close();
for (; i < 200; i++) {
out[1].write(i);
}
out[1].close();
//verify
FSDataInputStream[] in = { fs.open(p[0]), fs.open(p[1]) };
for (i = 0; i < 100; i++) {
assertEquals(i, in[0].read());
}
for (i = 0; i < 200; i++) {
assertEquals(i, in[1].read());
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestHFlush method testPipelineHeartbeat.
/** This creates a slow writer and check to see
* if pipeline heartbeats work fine
*/
@Test
public void testPipelineHeartbeat() throws Exception {
final int DATANODE_NUM = 2;
final int fileLen = 6;
Configuration conf = new HdfsConfiguration();
final int timeout = 2000;
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout);
final Path p = new Path("/pipelineHeartbeat/foo");
System.out.println("p=" + p);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
try {
DistributedFileSystem fs = cluster.getFileSystem();
byte[] fileContents = AppendTestUtil.initBuffer(fileLen);
// create a new file.
FSDataOutputStream stm = AppendTestUtil.createFile(fs, p, DATANODE_NUM);
stm.write(fileContents, 0, 1);
Thread.sleep(timeout);
stm.hflush();
System.out.println("Wrote 1 byte and hflush " + p);
// write another byte
Thread.sleep(timeout);
stm.write(fileContents, 1, 1);
stm.hflush();
stm.write(fileContents, 2, 1);
Thread.sleep(timeout);
stm.hflush();
stm.write(fileContents, 3, 1);
Thread.sleep(timeout);
stm.write(fileContents, 4, 1);
stm.hflush();
stm.write(fileContents, 5, 1);
Thread.sleep(timeout);
stm.close();
// verify that entire file is good
AppendTestUtil.checkFullFile(fs, p, fileLen, fileContents, "Failed to slowly write to a file");
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class TestFileCreationDelete method testFileCreationDeleteParent.
@Test
public void testFileCreationDeleteParent() throws IOException {
Configuration conf = new HdfsConfiguration();
// 2s
final int MAX_IDLE_TIME = 2000;
conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
// create cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = null;
try {
cluster.waitActive();
fs = cluster.getFileSystem();
// create file1.
Path dir = new Path("/foo");
Path file1 = new Path(dir, "file1");
FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
System.out.println("testFileCreationDeleteParent: " + "Created file " + file1);
TestFileCreation.writeFile(stm1, 1000);
stm1.hflush();
// create file2.
Path file2 = new Path("/file2");
FSDataOutputStream stm2 = TestFileCreation.createFile(fs, file2, 1);
System.out.println("testFileCreationDeleteParent: " + "Created file " + file2);
TestFileCreation.writeFile(stm2, 1000);
stm2.hflush();
// rm dir
fs.delete(dir, true);
// restart cluster.
// This ensures that leases are persisted in fsimage.
cluster.shutdown();
try {
Thread.sleep(2 * MAX_IDLE_TIME);
} catch (InterruptedException e) {
}
cluster = new MiniDFSCluster.Builder(conf).format(false).build();
cluster.waitActive();
// restart cluster yet again. This triggers the code to read in
// persistent leases from fsimage.
cluster.shutdown();
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
}
cluster = new MiniDFSCluster.Builder(conf).format(false).build();
cluster.waitActive();
fs = cluster.getFileSystem();
assertTrue(!fs.exists(file1));
assertTrue(fs.exists(file2));
} finally {
fs.close();
cluster.shutdown();
}
}
Aggregations