use of java.io.BufferedWriter in project hadoop by apache.
the class TestAvroFSInput method testAFSInput.
public void testAFSInput() throws Exception {
Configuration conf = new Configuration();
FileSystem fs = FileSystem.getLocal(conf);
Path dir = getInputPath();
if (!fs.exists(dir)) {
fs.mkdirs(dir);
}
Path filePath = new Path(dir, "foo");
if (fs.exists(filePath)) {
fs.delete(filePath, false);
}
FSDataOutputStream ostream = fs.create(filePath);
BufferedWriter w = new BufferedWriter(new OutputStreamWriter(ostream));
w.write("0123456789");
w.close();
// Create the stream
FileContext fc = FileContext.getFileContext(conf);
AvroFSInput avroFSIn = new AvroFSInput(fc, filePath);
assertEquals(10, avroFSIn.length());
// Check initial position
byte[] buf = new byte[1];
assertEquals(0, avroFSIn.tell());
// Check a read from that position.
avroFSIn.read(buf, 0, 1);
assertEquals(1, avroFSIn.tell());
assertEquals('0', (char) buf[0]);
// Check a seek + read
avroFSIn.seek(4);
assertEquals(4, avroFSIn.tell());
avroFSIn.read(buf, 0, 1);
assertEquals('4', (char) buf[0]);
assertEquals(5, avroFSIn.tell());
avroFSIn.close();
}
use of java.io.BufferedWriter in project hadoop by apache.
the class TestCodec method testGzipCodecWrite.
private void testGzipCodecWrite(boolean useNative) throws IOException {
// Create a gzipped file using a compressor from the CodecPool,
// and try to read it back via the regular GZIPInputStream.
// Use native libs per the parameter
Configuration conf = new Configuration();
if (useNative) {
assumeTrue(ZlibFactory.isNativeZlibLoaded(conf));
} else {
assertFalse("ZlibFactory is using native libs against request", ZlibFactory.isNativeZlibLoaded(conf));
}
// Ensure that the CodecPool has a BuiltInZlibDeflater in it.
Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
assertNotNull("zlibCompressor is null!", zlibCompressor);
assertTrue("ZlibFactory returned unexpected deflator", useNative ? zlibCompressor instanceof ZlibCompressor : zlibCompressor instanceof BuiltInZlibDeflater);
CodecPool.returnCompressor(zlibCompressor);
// Create a GZIP text file via the Compressor interface.
CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
CompressionCodec codec = ccf.getCodec(new Path("foo.gz"));
assertTrue("Codec for .gz file is not GzipCodec", codec instanceof GzipCodec);
final String msg = "This is the message we are going to compress.";
final String fileName = new Path(GenericTestUtils.getTempPath("testGzipCodecWrite.txt.gz")).toString();
BufferedWriter w = null;
Compressor gzipCompressor = CodecPool.getCompressor(codec);
if (null != gzipCompressor) {
// If it gives us back a Compressor, we should be able to use this
// to write files we can then read back with Java's gzip tools.
OutputStream os = new CompressorStream(new FileOutputStream(fileName), gzipCompressor);
w = new BufferedWriter(new OutputStreamWriter(os));
w.write(msg);
w.close();
CodecPool.returnCompressor(gzipCompressor);
verifyGzipFile(fileName, msg);
}
// Create a gzip text file via codec.getOutputStream().
w = new BufferedWriter(new OutputStreamWriter(codec.createOutputStream(new FileOutputStream(fileName))));
w.write(msg);
w.close();
verifyGzipFile(fileName, msg);
}
use of java.io.BufferedWriter in project hadoop by apache.
the class TestCodec method testGzipCodecRead.
@Test
public void testGzipCodecRead() throws IOException {
// Create a gzipped file and try to read it back, using a decompressor
// from the CodecPool.
// Don't use native libs for this test.
Configuration conf = new Configuration();
ZlibFactory.setNativeZlibLoaded(false);
// Ensure that the CodecPool has a BuiltInZlibInflater in it.
Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
assertNotNull("zlibDecompressor is null!", zlibDecompressor);
assertTrue("ZlibFactory returned unexpected inflator", zlibDecompressor instanceof BuiltInZlibInflater);
CodecPool.returnDecompressor(zlibDecompressor);
// Now create a GZip text file.
Path f = new Path(GenericTestUtils.getTempPath("testGzipCodecRead.txt.gz"));
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(f.toString()))));
final String msg = "This is the message in the file!";
bw.write(msg);
bw.close();
// Now read it back, using the CodecPool to establish the
// decompressor to use.
CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
CompressionCodec codec = ccf.getCodec(f);
Decompressor decompressor = CodecPool.getDecompressor(codec);
FileSystem fs = FileSystem.getLocal(conf);
InputStream is = fs.open(f);
is = codec.createInputStream(is, decompressor);
BufferedReader br = new BufferedReader(new InputStreamReader(is));
String line = br.readLine();
assertEquals("Didn't get the same message back!", msg, line);
br.close();
}
use of java.io.BufferedWriter in project hadoop by apache.
the class TestCodec method testGzipLongOverflow.
@Test
public void testGzipLongOverflow() throws IOException {
LOG.info("testGzipLongOverflow");
// Don't use native libs for this test.
Configuration conf = new Configuration();
ZlibFactory.setNativeZlibLoaded(false);
assertFalse("ZlibFactory is using native libs against request", ZlibFactory.isNativeZlibLoaded(conf));
// Ensure that the CodecPool has a BuiltInZlibInflater in it.
Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
assertNotNull("zlibDecompressor is null!", zlibDecompressor);
assertTrue("ZlibFactory returned unexpected inflator", zlibDecompressor instanceof BuiltInZlibInflater);
CodecPool.returnDecompressor(zlibDecompressor);
// Now create a GZip text file.
Path f = new Path(GenericTestUtils.getTempPath("testGzipLongOverflow.bin.gz"));
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(f.toString()))));
final int NBUF = 1024 * 4 + 1;
final char[] buf = new char[1024 * 1024];
for (int i = 0; i < buf.length; i++) buf[i] = '\0';
for (int i = 0; i < NBUF; i++) {
bw.write(buf);
}
bw.close();
// Now read it back, using the CodecPool to establish the
// decompressor to use.
CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
CompressionCodec codec = ccf.getCodec(f);
Decompressor decompressor = CodecPool.getDecompressor(codec);
FileSystem fs = FileSystem.getLocal(conf);
InputStream is = fs.open(f);
is = codec.createInputStream(is, decompressor);
BufferedReader br = new BufferedReader(new InputStreamReader(is));
for (int j = 0; j < NBUF; j++) {
int n = br.read(buf);
assertEquals("got wrong read length!", n, buf.length);
for (int i = 0; i < buf.length; i++) assertEquals("got wrong byte!", buf[i], '\0');
}
br.close();
}
use of java.io.BufferedWriter in project hadoop by apache.
the class TestProcfsBasedProcessTree method writeStatFiles.
/**
* Write stat files under the specified pid directories with data setup in the
* corresponding ProcessStatInfo objects
*
* @param procfsRootDir
* root directory of procfs file system
* @param pids
* the PID directories under which to create the stat file
* @param procs
* corresponding ProcessStatInfo objects whose data should be written
* to the stat files.
* @throws IOException
* if stat files could not be written
*/
public static void writeStatFiles(File procfsRootDir, String[] pids, ProcessStatInfo[] procs, ProcessTreeSmapMemInfo[] smaps) throws IOException {
for (int i = 0; i < pids.length; i++) {
File statFile = new File(new File(procfsRootDir, pids[i]), ProcfsBasedProcessTree.PROCFS_STAT_FILE);
BufferedWriter bw = null;
try {
FileWriter fw = new FileWriter(statFile);
bw = new BufferedWriter(fw);
bw.write(procs[i].getStatLine());
LOG.info("wrote stat file for " + pids[i] + " with contents: " + procs[i].getStatLine());
} finally {
// not handling exception - will throw an error and fail the test.
if (bw != null) {
bw.close();
}
}
if (smaps != null) {
File smapFile = new File(new File(procfsRootDir, pids[i]), ProcfsBasedProcessTree.SMAPS);
bw = null;
try {
FileWriter fw = new FileWriter(smapFile);
bw = new BufferedWriter(fw);
bw.write(smaps[i].toString());
bw.flush();
LOG.info("wrote smap file for " + pids[i] + " with contents: " + smaps[i].toString());
} finally {
// not handling exception - will throw an error and fail the test.
if (bw != null) {
bw.close();
}
}
}
}
}
Aggregations