use of java.io.DataOutputStream in project hadoop by apache.
the class TestCopyMapper method touchFile.
private static void touchFile(String path, boolean createMultipleBlocks, ChecksumOpt checksumOpt) throws Exception {
FileSystem fs;
DataOutputStream outputStream = null;
try {
fs = cluster.getFileSystem();
final Path qualifiedPath = new Path(path).makeQualified(fs.getUri(), fs.getWorkingDirectory());
final long blockSize = createMultipleBlocks ? NON_DEFAULT_BLOCK_SIZE : fs.getDefaultBlockSize(qualifiedPath) * 2;
FsPermission permission = FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(fs.getConf()));
outputStream = fs.create(qualifiedPath, permission, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), 0, (short) (fs.getDefaultReplication(qualifiedPath) * 2), blockSize, null, checksumOpt);
byte[] bytes = new byte[DEFAULT_FILE_SIZE];
outputStream.write(bytes);
long fileSize = DEFAULT_FILE_SIZE;
if (createMultipleBlocks) {
while (fileSize < 2 * blockSize) {
outputStream.write(bytes);
outputStream.flush();
fileSize += DEFAULT_FILE_SIZE;
}
}
pathList.add(qualifiedPath);
++nFiles;
FileStatus fileStatus = fs.getFileStatus(qualifiedPath);
System.out.println(fileStatus.getBlockSize());
System.out.println(fileStatus.getReplication());
} finally {
IOUtils.cleanup(null, outputStream);
}
}
use of java.io.DataOutputStream in project hadoop by apache.
the class CompressionEmulationUtil method getPossiblyCompressedOutputStream.
/**
* Returns a {@link OutputStream} for a file that might need
* compression.
*/
static OutputStream getPossiblyCompressedOutputStream(Path file, Configuration conf) throws IOException {
FileSystem fs = file.getFileSystem(conf);
JobConf jConf = new JobConf(conf);
if (org.apache.hadoop.mapred.FileOutputFormat.getCompressOutput(jConf)) {
// get the codec class
Class<? extends CompressionCodec> codecClass = org.apache.hadoop.mapred.FileOutputFormat.getOutputCompressorClass(jConf, GzipCodec.class);
// get the codec implementation
CompressionCodec codec = ReflectionUtils.newInstance(codecClass, conf);
// add the appropriate extension
file = file.suffix(codec.getDefaultExtension());
if (isCompressionEmulationEnabled(conf)) {
FSDataOutputStream fileOut = fs.create(file, false);
return new DataOutputStream(codec.createOutputStream(fileOut));
}
}
return fs.create(file, false);
}
use of java.io.DataOutputStream in project hadoop by apache.
the class HistoryServerFileSystemStateStoreService method storeTokenMasterKey.
@Override
public void storeTokenMasterKey(DelegationKey key) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Storing master key " + key.getKeyId());
}
Path keyPath = new Path(tokenKeysStatePath, TOKEN_MASTER_KEY_FILE_PREFIX + key.getKeyId());
if (fs.exists(keyPath)) {
throw new FileAlreadyExistsException(keyPath + " already exists");
}
ByteArrayOutputStream memStream = new ByteArrayOutputStream();
DataOutputStream dataStream = new DataOutputStream(memStream);
try {
key.write(dataStream);
dataStream.close();
dataStream = null;
} finally {
IOUtils.cleanup(LOG, dataStream);
}
createNewFile(keyPath, memStream.toByteArray());
}
use of java.io.DataOutputStream in project hadoop by apache.
the class HistoryServerLeveldbStateStoreService method storeToken.
@Override
public void storeToken(MRDelegationTokenIdentifier tokenId, Long renewDate) throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Storing token " + tokenId.getSequenceNumber());
}
ByteArrayOutputStream memStream = new ByteArrayOutputStream();
DataOutputStream dataStream = new DataOutputStream(memStream);
try {
tokenId.write(dataStream);
dataStream.writeLong(renewDate);
dataStream.close();
dataStream = null;
} finally {
IOUtils.cleanup(LOG, dataStream);
}
String dbKey = getTokenDatabaseKey(tokenId);
try {
db.put(bytes(dbKey), memStream.toByteArray());
} catch (DBException e) {
throw new IOException(e);
}
}
use of java.io.DataOutputStream in project hadoop by apache.
the class TestFetcher method testReduceOutOfDiskSpace.
@Test
public void testReduceOutOfDiskSpace() throws Throwable {
LOG.info("testReduceOutOfDiskSpace");
Fetcher<Text, Text> underTest = new FakeFetcher<Text, Text>(job, id, ss, mm, r, metrics, except, key, connection);
String replyHash = SecureShuffleUtils.generateHash(encHash.getBytes(), key);
ShuffleHeader header = new ShuffleHeader(map1ID.toString(), 10, 10, 1);
ByteArrayOutputStream bout = new ByteArrayOutputStream();
header.write(new DataOutputStream(bout));
ByteArrayInputStream in = new ByteArrayInputStream(bout.toByteArray());
when(connection.getResponseCode()).thenReturn(200);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_NAME)).thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_NAME);
when(connection.getHeaderField(ShuffleHeader.HTTP_HEADER_VERSION)).thenReturn(ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION);
when(connection.getHeaderField(SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH)).thenReturn(replyHash);
when(connection.getInputStream()).thenReturn(in);
when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt())).thenThrow(new DiskErrorException("No disk space available"));
underTest.copyFromHost(host);
verify(ss).reportLocalError(any(IOException.class));
}
Aggregations