use of java.io.DataOutputStream in project hadoop by apache.
the class TestDataTransferProtocol method testPacketHeader.
@Test
public void testPacketHeader() throws IOException {
PacketHeader hdr = new PacketHeader(// size of packet
4, // OffsetInBlock
1024, // sequencenumber
100, // lastPacketInBlock
false, // chunk length
4096, false);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
hdr.write(new DataOutputStream(baos));
// Read back using DataInput
PacketHeader readBack = new PacketHeader();
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
readBack.readFields(new DataInputStream(bais));
assertEquals(hdr, readBack);
// Read back using ByteBuffer
readBack = new PacketHeader();
readBack.readFields(ByteBuffer.wrap(baos.toByteArray()));
assertEquals(hdr, readBack);
assertTrue(hdr.sanityCheck(99));
assertFalse(hdr.sanityCheck(100));
}
use of java.io.DataOutputStream in project hadoop by apache.
the class TestDFSOutputStream method testCongestionBackoff.
@Test
public void testCongestionBackoff() throws IOException {
DfsClientConf dfsClientConf = mock(DfsClientConf.class);
DFSClient client = mock(DFSClient.class);
when(client.getConf()).thenReturn(dfsClientConf);
when(client.getTracer()).thenReturn(FsTracer.get(new Configuration()));
client.clientRunning = true;
DataStreamer stream = new DataStreamer(mock(HdfsFileStatus.class), mock(ExtendedBlock.class), client, "foo", null, null, null, null, null, null);
DataOutputStream blockStream = mock(DataOutputStream.class);
doThrow(new IOException()).when(blockStream).flush();
Whitebox.setInternalState(stream, "blockStream", blockStream);
Whitebox.setInternalState(stream, "stage", BlockConstructionStage.PIPELINE_CLOSE);
@SuppressWarnings("unchecked") LinkedList<DFSPacket> dataQueue = (LinkedList<DFSPacket>) Whitebox.getInternalState(stream, "dataQueue");
@SuppressWarnings("unchecked") ArrayList<DatanodeInfo> congestedNodes = (ArrayList<DatanodeInfo>) Whitebox.getInternalState(stream, "congestedNodes");
congestedNodes.add(mock(DatanodeInfo.class));
DFSPacket packet = mock(DFSPacket.class);
when(packet.getTraceParents()).thenReturn(new SpanId[] {});
dataQueue.add(packet);
stream.run();
Assert.assertTrue(congestedNodes.isEmpty());
}
use of java.io.DataOutputStream in project hadoop by apache.
the class TestDFSRemove method createFile.
static void createFile(FileSystem fs, Path f) throws IOException {
DataOutputStream a_out = fs.create(f);
a_out.writeBytes("something");
a_out.close();
}
use of java.io.DataOutputStream in project hadoop by apache.
the class TestDFSRename method testRename.
@Test
public void testRename() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
try {
FileSystem fs = cluster.getFileSystem();
assertTrue(fs.mkdirs(dir));
{
//test lease
Path a = new Path(dir, "a");
Path aa = new Path(dir, "aa");
Path b = new Path(dir, "b");
createFile(fs, a);
//should not have any lease
assertEquals(0, countLease(cluster));
DataOutputStream aa_out = fs.create(aa);
aa_out.writeBytes("something");
//should have 1 lease
assertEquals(1, countLease(cluster));
list(fs, "rename0");
fs.rename(a, b);
list(fs, "rename1");
aa_out.writeBytes(" more");
aa_out.close();
list(fs, "rename2");
//should not have any lease
assertEquals(0, countLease(cluster));
}
{
// test non-existent destination
Path dstPath = new Path("/c/d");
assertFalse(fs.exists(dstPath));
assertFalse(fs.rename(dir, dstPath));
}
{
// dst cannot be a file or directory under src
// test rename /a/b/foo to /a/b/c
Path src = new Path("/a/b");
Path dst = new Path("/a/b/c");
createFile(fs, new Path(src, "foo"));
// dst cannot be a file under src
assertFalse(fs.rename(src, dst));
// dst cannot be a directory under src
assertFalse(fs.rename(src.getParent(), dst.getParent()));
}
{
// dst can start with src, if it is not a directory or file under src
// test rename /test /testfile
Path src = new Path("/testPrefix");
Path dst = new Path("/testPrefixfile");
createFile(fs, src);
assertTrue(fs.rename(src, dst));
}
{
// dst should not be same as src test rename /a/b/c to /a/b/c
Path src = new Path("/a/b/c");
createFile(fs, src);
assertTrue(fs.rename(src, src));
assertFalse(fs.rename(new Path("/a/b"), new Path("/a/b/")));
assertTrue(fs.rename(src, new Path("/a/b/c/")));
}
fs.delete(dir, true);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of java.io.DataOutputStream in project hadoop by apache.
the class TextOutputFormat method getRecordWriter.
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext job) throws IOException, InterruptedException {
Configuration conf = job.getConfiguration();
boolean isCompressed = getCompressOutput(job);
String keyValueSeparator = conf.get(SEPERATOR, "\t");
CompressionCodec codec = null;
String extension = "";
if (isCompressed) {
Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(job, GzipCodec.class);
codec = ReflectionUtils.newInstance(codecClass, conf);
extension = codec.getDefaultExtension();
}
Path file = getDefaultWorkFile(job, extension);
FileSystem fs = file.getFileSystem(conf);
FSDataOutputStream fileOut = fs.create(file, false);
if (isCompressed) {
return new LineRecordWriter<>(new DataOutputStream(codec.createOutputStream(fileOut)), keyValueSeparator);
} else {
return new LineRecordWriter<>(fileOut, keyValueSeparator);
}
}
Aggregations