Search in sources :

Example 41 with DataOutputStream

use of java.io.DataOutputStream in project hadoop by apache.

the class TestCopyMapper method touchFile.

private static void touchFile(String path, boolean createMultipleBlocks, ChecksumOpt checksumOpt) throws Exception {
    FileSystem fs;
    DataOutputStream outputStream = null;
    try {
        fs = cluster.getFileSystem();
        final Path qualifiedPath = new Path(path).makeQualified(fs.getUri(), fs.getWorkingDirectory());
        final long blockSize = createMultipleBlocks ? NON_DEFAULT_BLOCK_SIZE : fs.getDefaultBlockSize(qualifiedPath) * 2;
        FsPermission permission = FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(fs.getConf()));
        outputStream = fs.create(qualifiedPath, permission, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), 0, (short) (fs.getDefaultReplication(qualifiedPath) * 2), blockSize, null, checksumOpt);
        byte[] bytes = new byte[DEFAULT_FILE_SIZE];
        outputStream.write(bytes);
        long fileSize = DEFAULT_FILE_SIZE;
        if (createMultipleBlocks) {
            while (fileSize < 2 * blockSize) {
                outputStream.write(bytes);
                outputStream.flush();
                fileSize += DEFAULT_FILE_SIZE;
            }
        }
        pathList.add(qualifiedPath);
        ++nFiles;
        FileStatus fileStatus = fs.getFileStatus(qualifiedPath);
        System.out.println(fileStatus.getBlockSize());
        System.out.println(fileStatus.getReplication());
    } finally {
        IOUtils.cleanup(null, outputStream);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) CopyListingFileStatus(org.apache.hadoop.tools.CopyListingFileStatus) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) FileSystem(org.apache.hadoop.fs.FileSystem) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Example 42 with DataOutputStream

use of java.io.DataOutputStream in project hadoop by apache.

the class CompressionEmulationUtil method getPossiblyCompressedOutputStream.

/**
   * Returns a {@link OutputStream} for a file that might need 
   * compression.
   */
static OutputStream getPossiblyCompressedOutputStream(Path file, Configuration conf) throws IOException {
    FileSystem fs = file.getFileSystem(conf);
    JobConf jConf = new JobConf(conf);
    if (org.apache.hadoop.mapred.FileOutputFormat.getCompressOutput(jConf)) {
        // get the codec class
        Class<? extends CompressionCodec> codecClass = org.apache.hadoop.mapred.FileOutputFormat.getOutputCompressorClass(jConf, GzipCodec.class);
        // get the codec implementation
        CompressionCodec codec = ReflectionUtils.newInstance(codecClass, conf);
        // add the appropriate extension
        file = file.suffix(codec.getDefaultExtension());
        if (isCompressionEmulationEnabled(conf)) {
            FSDataOutputStream fileOut = fs.create(file, false);
            return new DataOutputStream(codec.createOutputStream(fileOut));
        }
    }
    return fs.create(file, false);
}
Also used : FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) FileSystem(org.apache.hadoop.fs.FileSystem) CompressionCodec(org.apache.hadoop.io.compress.CompressionCodec) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) JobConf(org.apache.hadoop.mapred.JobConf)

Example 43 with DataOutputStream

use of java.io.DataOutputStream in project hadoop by apache.

the class HistoryServerFileSystemStateStoreService method storeTokenMasterKey.

@Override
public void storeTokenMasterKey(DelegationKey key) throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Storing master key " + key.getKeyId());
    }
    Path keyPath = new Path(tokenKeysStatePath, TOKEN_MASTER_KEY_FILE_PREFIX + key.getKeyId());
    if (fs.exists(keyPath)) {
        throw new FileAlreadyExistsException(keyPath + " already exists");
    }
    ByteArrayOutputStream memStream = new ByteArrayOutputStream();
    DataOutputStream dataStream = new DataOutputStream(memStream);
    try {
        key.write(dataStream);
        dataStream.close();
        dataStream = null;
    } finally {
        IOUtils.cleanup(LOG, dataStream);
    }
    createNewFile(keyPath, memStream.toByteArray());
}
Also used : Path(org.apache.hadoop.fs.Path) FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DataOutputStream(java.io.DataOutputStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream)

Example 44 with DataOutputStream

use of java.io.DataOutputStream in project hadoop by apache.

the class HistoryServerLeveldbStateStoreService method storeToken.

@Override
public void storeToken(MRDelegationTokenIdentifier tokenId, Long renewDate) throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Storing token " + tokenId.getSequenceNumber());
    }
    ByteArrayOutputStream memStream = new ByteArrayOutputStream();
    DataOutputStream dataStream = new DataOutputStream(memStream);
    try {
        tokenId.write(dataStream);
        dataStream.writeLong(renewDate);
        dataStream.close();
        dataStream = null;
    } finally {
        IOUtils.cleanup(LOG, dataStream);
    }
    String dbKey = getTokenDatabaseKey(tokenId);
    try {
        db.put(bytes(dbKey), memStream.toByteArray());
    } catch (DBException e) {
        throw new IOException(e);
    }
}
Also used : DBException(org.iq80.leveldb.DBException) DataOutputStream(java.io.DataOutputStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) JniDBFactory.asString(org.fusesource.leveldbjni.JniDBFactory.asString) IOException(java.io.IOException)

Example 45 with DataOutputStream

use of java.io.DataOutputStream in project hadoop by apache.

the class TestFetcher method testReduceOutOfDiskSpace.

@Test
public void testReduceOutOfDiskSpace() throws Throwable {
    LOG.info("testReduceOutOfDiskSpace");
    Fetcher<Text, Text> underTest = new FakeFetcher<Text, Text>(job, id, ss, mm, r, metrics, except, key, connection);
    String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
    ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 10, 10, 1);
    ByteArrayOutputStream bout = new ByteArrayOutputStream();
    header.write(new DataOutputStream(bout));
    ByteArrayInputStream in = new ByteArrayInputStream(bout.toByteArray());
    when(connection.getResponseCode()).thenReturn(200);
    when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME)).thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
    when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION)).thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
    when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH)).thenReturn(replyHash);
    when(connection.getInputStream()).thenReturn(in);
    when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt())).thenThrow(new DiskErrorException("No disk space available"));
    underTest.copyFromHost(host);
    verify(ss).reportLocalError(any(IOException.class));
}
Also used : ByteArrayInputStream(java.io.ByteArrayInputStream) DataOutputStream(java.io.DataOutputStream) TaskAttemptID(org.apache.hadoop.mapreduce.TaskAttemptID) DiskErrorException(org.apache.hadoop.util.DiskChecker.DiskErrorException) Text(org.apache.hadoop.io.Text) ByteArrayOutputStream(java.io.ByteArrayOutputStream) IOException(java.io.IOException) Test(org.junit.Test)

Aggregations

DataOutputStream (java.io.DataOutputStream)2957 ByteArrayOutputStream (java.io.ByteArrayOutputStream)1309 IOException (java.io.IOException)1019 Test (org.junit.Test)633 DataInputStream (java.io.DataInputStream)611 FileOutputStream (java.io.FileOutputStream)426 ByteArrayInputStream (java.io.ByteArrayInputStream)409 File (java.io.File)279 BufferedOutputStream (java.io.BufferedOutputStream)227 UnitTest (org.apache.geode.test.junit.categories.UnitTest)172 URL (java.net.URL)149 InputStreamReader (java.io.InputStreamReader)144 BufferedReader (java.io.BufferedReader)140 Path (org.apache.hadoop.fs.Path)137 DataInput (java.io.DataInput)124 ArrayList (java.util.ArrayList)122 HttpURLConnection (java.net.HttpURLConnection)121 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)117 FileInputStream (java.io.FileInputStream)107 InputStream (java.io.InputStream)107