use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.
the class TestRenameWithSnapshots method testAppendFileAfterRenameInSnapshot.
/**
* Similar with testRenameUCFileInSnapshot, but do renaming first and then
* append file without closing it. Unit test for HDFS-5425.
*/
@Test
public void testAppendFileAfterRenameInSnapshot() throws Exception {
final Path test = new Path("/test");
final Path foo = new Path(test, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
// rename bar --> bar2
final Path bar2 = new Path(foo, "bar2");
hdfs.rename(bar, bar2);
// append file and keep it as underconstruction.
FSDataOutputStream out = hdfs.append(bar2);
out.writeByte(0);
((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
// save namespace and restart
restartClusterAndCheckImage(true);
}
use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.
the class TestOpenFilesWithSnapshot method doWriteAndAbort.
private void doWriteAndAbort(DistributedFileSystem fs, Path path) throws IOException {
fs.mkdirs(path);
fs.allowSnapshot(path);
DFSTestUtil.createFile(fs, new Path("/test/test1"), 100, (short) 2, 100024L);
DFSTestUtil.createFile(fs, new Path("/test/test2"), 100, (short) 2, 100024L);
Path file = new Path("/test/test/test2");
FSDataOutputStream out = fs.create(file);
for (int i = 0; i < 2; i++) {
long count = 0;
while (count < 1048576) {
out.writeBytes("hell");
count += 4;
}
}
((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
DFSTestUtil.abortStream((DFSOutputStream) out.getWrappedStream());
Path file2 = new Path("/test/test/test3");
FSDataOutputStream out2 = fs.create(file2);
for (int i = 0; i < 2; i++) {
long count = 0;
while (count < 1048576) {
out2.writeBytes("hell");
count += 4;
}
}
((DFSOutputStream) out2.getWrappedStream()).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
DFSTestUtil.abortStream((DFSOutputStream) out2.getWrappedStream());
fs.createSnapshot(path, "s1");
}
use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.
the class HdfsDataOutputStream method hsync.
/**
* Sync buffered data to DataNodes (flush to disk devices).
*
* @param syncFlags
* Indicate the detailed semantic and actions of the hsync.
* @throws IOException
* @see FSDataOutputStream#hsync()
*/
public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
OutputStream wrappedStream = getWrappedStream();
if (wrappedStream instanceof CryptoOutputStream) {
wrappedStream.flush();
wrappedStream = ((CryptoOutputStream) wrappedStream).getWrappedStream();
}
((DFSOutputStream) wrappedStream).hsync(syncFlags);
}
use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.
the class TestDataNodeVolumeMetrics method testVolumeMetricsWithVolumeDepartureArrival.
@Test
public void testVolumeMetricsWithVolumeDepartureArrival() throws Exception {
MiniDFSCluster cluster = setupClusterForVolumeMetrics();
try {
FileSystem fs = cluster.getFileSystem();
final Path fileName = new Path("/test.dat");
final long fileLen = Integer.MAX_VALUE + 1L;
DFSTestUtil.createFile(fs, fileName, false, BLOCK_SIZE, fileLen, fs.getDefaultBlockSize(fileName), REPL, 1L, true);
try (FSDataOutputStream out = fs.append(fileName)) {
out.writeBytes("hello world");
((DFSOutputStream) out.getWrappedStream()).hsync();
}
ArrayList<DataNode> dns = cluster.getDataNodes();
assertTrue("DN1 should be up", dns.get(0).isDatanodeUp());
final String dataDir = cluster.getDataDirectory();
final File dn1Vol2 = new File(dataDir, "data2");
DataNodeTestUtils.injectDataDirFailure(dn1Vol2);
verifyDataNodeVolumeMetrics(fs, cluster, fileName);
DataNodeTestUtils.restoreDataDirFromFailure(dn1Vol2);
DataNodeTestUtils.reconfigureDataNode(dns.get(0), dn1Vol2);
verifyDataNodeVolumeMetrics(fs, cluster, fileName);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.DFSOutputStream in project hadoop by apache.
the class TestDataNodeMetrics method testRoundTripAckMetric.
/**
* Tests that round-trip acks in a datanode write pipeline are correctly
* measured.
*/
@Test
public void testRoundTripAckMetric() throws Exception {
final int datanodeCount = 2;
final int interval = 1;
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY, "" + interval);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(datanodeCount).build();
try {
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
// Open a file and get the head of the pipeline
Path testFile = new Path("/testRoundTripAckMetric.txt");
FSDataOutputStream fsout = fs.create(testFile, (short) datanodeCount);
DFSOutputStream dout = (DFSOutputStream) fsout.getWrappedStream();
// Slow down the writes to catch the write pipeline
dout.setChunksPerPacket(5);
dout.setArtificialSlowdown(3000);
fsout.write(new byte[10000]);
DatanodeInfo[] pipeline = null;
int count = 0;
while (pipeline == null && count < 5) {
pipeline = dout.getPipeline();
System.out.println("Waiting for pipeline to be created.");
Thread.sleep(1000);
count++;
}
// Get the head node that should be receiving downstream acks
DatanodeInfo headInfo = pipeline[0];
DataNode headNode = null;
for (DataNode datanode : cluster.getDataNodes()) {
if (datanode.getDatanodeId().equals(headInfo)) {
headNode = datanode;
break;
}
}
assertNotNull("Could not find the head of the datanode write pipeline", headNode);
// Close the file and wait for the metrics to rollover
Thread.sleep((interval + 1) * 1000);
// Check the ack was received
MetricsRecordBuilder dnMetrics = getMetrics(headNode.getMetrics().name());
assertTrue("Expected non-zero number of acks", getLongCounter("PacketAckRoundTripTimeNanosNumOps", dnMetrics) > 0);
assertQuantileGauges("PacketAckRoundTripTimeNanos" + interval + "s", dnMetrics);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations