use of java.io.DataOutputStream in project hadoop by apache.
the class LeveldbTimelineStateStore method buildTokenMasterKeyData.
private static byte[] buildTokenMasterKeyData(DelegationKey key) throws IOException {
ByteArrayOutputStream memStream = new ByteArrayOutputStream();
DataOutputStream dataStream = new DataOutputStream(memStream);
try {
key.write(dataStream);
dataStream.close();
} finally {
IOUtils.cleanup(LOG, dataStream);
}
return memStream.toByteArray();
}
use of java.io.DataOutputStream in project flink by apache.
the class MigrationV0ToV1Test method testSavepointMigrationV0ToV1.
/**
* Simple test of savepoint methods.
*/
@Test
public void testSavepointMigrationV0ToV1() throws Exception {
String target = tmp.getRoot().getAbsolutePath();
assertEquals(0, tmp.getRoot().listFiles().length);
long checkpointId = ThreadLocalRandom.current().nextLong(Integer.MAX_VALUE);
int numTaskStates = 4;
int numSubtaskStates = 16;
Collection<org.apache.flink.migration.runtime.checkpoint.TaskState> expected = createTaskStatesOld(numTaskStates, numSubtaskStates);
SavepointV0 savepoint = new SavepointV0(checkpointId, expected);
assertEquals(SavepointV0.VERSION, savepoint.getVersion());
assertEquals(checkpointId, savepoint.getCheckpointId());
assertEquals(expected, savepoint.getOldTaskStates());
assertFalse(savepoint.getOldTaskStates().isEmpty());
Exception latestException = null;
Path path = null;
FSDataOutputStream fdos = null;
FileSystem fs = null;
try {
// Try to create a FS output stream
for (int attempt = 0; attempt < 10; attempt++) {
path = new Path(target, FileUtils.getRandomFilename("savepoint-"));
if (fs == null) {
fs = FileSystem.get(path.toUri());
}
try {
fdos = fs.create(path, false);
break;
} catch (Exception e) {
latestException = e;
}
}
if (fdos == null) {
throw new IOException("Failed to create file output stream at " + path, latestException);
}
try (DataOutputStream dos = new DataOutputStream(fdos)) {
dos.writeInt(SavepointStore.MAGIC_NUMBER);
dos.writeInt(savepoint.getVersion());
SavepointV0Serializer.INSTANCE.serializeOld(savepoint, dos);
}
ClassLoader cl = Thread.currentThread().getContextClassLoader();
Savepoint sp = SavepointStore.loadSavepoint(path.toString(), cl);
int t = 0;
for (TaskState taskState : sp.getTaskStates()) {
for (int p = 0; p < taskState.getParallelism(); ++p) {
SubtaskState subtaskState = taskState.getState(p);
ChainedStateHandle<StreamStateHandle> legacyOperatorState = subtaskState.getLegacyOperatorState();
for (int c = 0; c < legacyOperatorState.getLength(); ++c) {
StreamStateHandle stateHandle = legacyOperatorState.get(c);
try (InputStream is = stateHandle.openInputStream()) {
Tuple4<Integer, Integer, Integer, Integer> expTestState = new Tuple4<>(0, t, p, c);
Tuple4<Integer, Integer, Integer, Integer> actTestState;
//check function state
if (p % 4 != 0) {
assertEquals(1, is.read());
actTestState = InstantiationUtil.deserializeObject(is, cl);
assertEquals(expTestState, actTestState);
} else {
assertEquals(0, is.read());
}
//check operator state
expTestState.f0 = 1;
actTestState = InstantiationUtil.deserializeObject(is, cl);
assertEquals(expTestState, actTestState);
}
}
//check keyed state
KeyGroupsStateHandle keyGroupsStateHandle = subtaskState.getManagedKeyedState();
if (t % 3 != 0) {
assertEquals(1, keyGroupsStateHandle.getNumberOfKeyGroups());
assertEquals(p, keyGroupsStateHandle.getGroupRangeOffsets().getKeyGroupRange().getStartKeyGroup());
ByteStreamStateHandle stateHandle = (ByteStreamStateHandle) keyGroupsStateHandle.getDelegateStateHandle();
HashMap<String, KvStateSnapshot<?, ?, ?, ?>> testKeyedState = MigrationInstantiationUtil.deserializeObject(stateHandle.getData(), cl);
assertEquals(2, testKeyedState.size());
for (KvStateSnapshot<?, ?, ?, ?> snapshot : testKeyedState.values()) {
MemValueState.Snapshot<?, ?, ?> castedSnapshot = (MemValueState.Snapshot<?, ?, ?>) snapshot;
byte[] data = castedSnapshot.getData();
assertEquals(t, data[0]);
assertEquals(p, data[1]);
}
} else {
assertEquals(null, keyGroupsStateHandle);
}
}
++t;
}
savepoint.dispose();
} finally {
// Dispose
SavepointStore.removeSavepointFile(path.toString());
}
}
use of java.io.DataOutputStream in project hadoop by apache.
the class TestLogsCLI method uploadEmptyContainerLogIntoRemoteDir.
private static void uploadEmptyContainerLogIntoRemoteDir(UserGroupInformation ugi, Configuration configuration, List<String> rootLogDirs, NodeId nodeId, ContainerId containerId, Path appDir, FileSystem fs) throws Exception {
Path path = new Path(appDir, LogAggregationUtils.getNodeString(nodeId) + System.currentTimeMillis());
AggregatedLogFormat.LogWriter writer = new AggregatedLogFormat.LogWriter(configuration, path, ugi);
writer.writeApplicationOwner(ugi.getUserName());
Map<ApplicationAccessType, String> appAcls = new HashMap<ApplicationAccessType, String>();
appAcls.put(ApplicationAccessType.VIEW_APP, ugi.getUserName());
writer.writeApplicationACLs(appAcls);
DataOutputStream out = writer.getWriter().prepareAppendKey(-1);
new AggregatedLogFormat.LogKey(containerId).write(out);
out.close();
out = writer.getWriter().prepareAppendValue(-1);
new AggregatedLogFormat.LogValue(rootLogDirs, containerId, UserGroupInformation.getCurrentUser().getShortUserName()).write(out, new HashSet<File>());
out.close();
writer.close();
}
use of java.io.DataOutputStream in project hadoop by apache.
the class TextOutputFormat method getRecordWriter.
public RecordWriter<K, V> getRecordWriter(FileSystem ignored, JobConf job, String name, Progressable progress) throws IOException {
boolean isCompressed = getCompressOutput(job);
String keyValueSeparator = job.get("mapreduce.output.textoutputformat.separator", "\t");
if (!isCompressed) {
Path file = FileOutputFormat.getTaskOutputPath(job, name);
FileSystem fs = file.getFileSystem(job);
FSDataOutputStream fileOut = fs.create(file, progress);
return new LineRecordWriter<K, V>(fileOut, keyValueSeparator);
} else {
Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(job, GzipCodec.class);
// create the named codec
CompressionCodec codec = ReflectionUtils.newInstance(codecClass, job);
// build the filename including the extension
Path file = FileOutputFormat.getTaskOutputPath(job, name + codec.getDefaultExtension());
FileSystem fs = file.getFileSystem(job);
FSDataOutputStream fileOut = fs.create(file, progress);
return new LineRecordWriter<K, V>(new DataOutputStream(codec.createOutputStream(fileOut)), keyValueSeparator);
}
}
use of java.io.DataOutputStream in project hadoop by apache.
the class MapReduceTestUtil method createJob.
public static Job createJob(Configuration conf, Path inDir, Path outDir, int numInputFiles, int numReds, String input) throws IOException {
Job job = Job.getInstance(conf);
FileSystem fs = FileSystem.get(conf);
if (fs.exists(outDir)) {
fs.delete(outDir, true);
}
if (fs.exists(inDir)) {
fs.delete(inDir, true);
}
fs.mkdirs(inDir);
for (int i = 0; i < numInputFiles; ++i) {
DataOutputStream file = fs.create(new Path(inDir, "part-" + i));
file.writeBytes(input);
file.close();
}
FileInputFormat.setInputPaths(job, inDir);
FileOutputFormat.setOutputPath(job, outDir);
job.setNumReduceTasks(numReds);
return job;
}
Aggregations