use of java.io.OutputStream in project hadoop by apache.
the class FileUtil method copy.
/** Copy files between FileSystems. */
public static boolean copy(FileSystem srcFS, FileStatus srcStatus, FileSystem dstFS, Path dst, boolean deleteSource, boolean overwrite, Configuration conf) throws IOException {
Path src = srcStatus.getPath();
dst = checkDest(src.getName(), dstFS, dst, overwrite);
if (srcStatus.isDirectory()) {
checkDependencies(srcFS, src, dstFS, dst);
if (!dstFS.mkdirs(dst)) {
return false;
}
FileStatus[] contents = srcFS.listStatus(src);
for (int i = 0; i < contents.length; i++) {
copy(srcFS, contents[i], dstFS, new Path(dst, contents[i].getPath().getName()), deleteSource, overwrite, conf);
}
} else {
InputStream in = null;
OutputStream out = null;
try {
in = srcFS.open(src);
out = dstFS.create(dst, overwrite);
IOUtils.copyBytes(in, out, conf, true);
} catch (IOException e) {
IOUtils.closeStream(out);
IOUtils.closeStream(in);
throw e;
}
}
if (deleteSource) {
return srcFS.delete(src, true);
} else {
return true;
}
}
use of java.io.OutputStream in project hadoop by apache.
the class FileUtil method unZip.
/**
* Given a File input it will unzip the file in a the unzip directory
* passed as the second parameter
* @param inFile The zip file as input
* @param unzipDir The unzip directory where to unzip the zip file.
* @throws IOException
*/
public static void unZip(File inFile, File unzipDir) throws IOException {
Enumeration<? extends ZipEntry> entries;
ZipFile zipFile = new ZipFile(inFile);
try {
entries = zipFile.entries();
while (entries.hasMoreElements()) {
ZipEntry entry = entries.nextElement();
if (!entry.isDirectory()) {
InputStream in = zipFile.getInputStream(entry);
try {
File file = new File(unzipDir, entry.getName());
if (!file.getParentFile().mkdirs()) {
if (!file.getParentFile().isDirectory()) {
throw new IOException("Mkdirs failed to create " + file.getParentFile().toString());
}
}
OutputStream out = new FileOutputStream(file);
try {
byte[] buffer = new byte[8192];
int i;
while ((i = in.read(buffer)) != -1) {
out.write(buffer, 0, i);
}
} finally {
out.close();
}
} finally {
in.close();
}
}
}
} finally {
zipFile.close();
}
}
use of java.io.OutputStream in project hadoop by apache.
the class CryptoStreamsTestBase method testSeek.
/** Test seek to different position. */
@Test(timeout = 120000)
public void testSeek() throws Exception {
OutputStream out = getOutputStream(defaultBufferSize);
writeData(out);
InputStream in = getInputStream(defaultBufferSize);
// Pos: 1/3 dataLen
seekCheck(in, dataLen / 3);
// Pos: 0
seekCheck(in, 0);
// Pos: 1/2 dataLen
seekCheck(in, dataLen / 2);
final long pos = ((Seekable) in).getPos();
// Pos: -3
try {
seekCheck(in, -3);
Assert.fail("Seek to negative offset should fail.");
} catch (EOFException e) {
GenericTestUtils.assertExceptionContains(FSExceptionMessages.NEGATIVE_SEEK, e);
}
Assert.assertEquals(pos, ((Seekable) in).getPos());
// Pos: dataLen + 3
try {
seekCheck(in, dataLen + 3);
Assert.fail("Seek after EOF should fail.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Cannot seek after EOF", e);
}
Assert.assertEquals(pos, ((Seekable) in).getPos());
in.close();
}
use of java.io.OutputStream in project hadoop by apache.
the class CryptoStreamsTestBase method testGetPos.
/** Test get position. */
@Test(timeout = 120000)
public void testGetPos() throws Exception {
OutputStream out = getOutputStream(defaultBufferSize);
writeData(out);
// Default buffer size
InputStream in = getInputStream(defaultBufferSize);
byte[] result = new byte[dataLen];
int n1 = readAll(in, result, 0, dataLen / 3);
Assert.assertEquals(n1, ((Seekable) in).getPos());
int n2 = readAll(in, result, n1, dataLen - n1);
Assert.assertEquals(n1 + n2, ((Seekable) in).getPos());
in.close();
}
use of java.io.OutputStream in project hadoop by apache.
the class CryptoStreamsTestBase method cryptoCheck.
private void cryptoCheck(byte[] iv) throws Exception {
OutputStream out = getOutputStream(defaultBufferSize, key, iv);
writeData(out);
InputStream in = getInputStream(defaultBufferSize, key, iv);
readCheck(in);
in.close();
}
Aggregations