use of java.io.DataOutputStream in project pinot by linkedin.
the class DimensionKey method toBytes.
/**
* @return
* @throws IOException
*/
public byte[] toBytes() throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutput out = new DataOutputStream(baos);
// write the number of dimensions
out.writeInt(dimensionValues.length);
// values
for (String dimensionValue : dimensionValues) {
byte[] bytes = dimensionValue.getBytes(Charset.forName("utf-8"));
out.writeInt(bytes.length);
out.write(bytes);
}
baos.close();
byte[] byteArray = baos.toByteArray();
try {
DimensionKey key = fromBytes(byteArray);
} catch (Exception e) {
LOGGER.info("input key:{}", Arrays.toString(dimensionValues));
LOGGER.info("generated:{}", Arrays.toString(byteArray));
throw new RuntimeException(e);
}
return byteArray;
}
use of java.io.DataOutputStream in project jmonkeyengine by jMonkeyEngine.
the class AbstractHeightMap method save.
/**
* <code>save</code> will save the heightmap data into a new RAW file
* denoted by the supplied filename.
*
* @param filename
* the file name to save the current data as.
* @return true if the save was successful, false otherwise.
* @throws Exception
*
* @throws JmeException
* if filename is null.
*/
public boolean save(String filename) throws Exception {
if (null == filename) {
throw new Exception("Filename must not be null");
}
//open the streams and send the height data to the file.
FileOutputStream fos = null;
DataOutputStream dos = null;
try {
fos = new FileOutputStream(filename);
dos = new DataOutputStream(fos);
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
dos.write((int) heightData[j + (i * size)]);
}
}
fos.close();
dos.close();
} catch (FileNotFoundException e) {
logger.log(Level.WARNING, "Error opening file {0}", filename);
return false;
} catch (IOException e) {
logger.log(Level.WARNING, "Error writing to file {0}", filename);
return false;
} finally {
if (fos != null) {
fos.close();
}
if (dos != null) {
dos.close();
}
}
logger.log(Level.FINE, "Saved terrain to {0}", filename);
return true;
}
use of java.io.DataOutputStream in project hadoop by apache.
the class FileSystemRMStateStore method storeRMDTMasterKeyState.
@Override
public synchronized void storeRMDTMasterKeyState(DelegationKey masterKey) throws Exception {
Path nodeCreatePath = getNodePath(rmDTSecretManagerRoot, DELEGATION_KEY_PREFIX + masterKey.getKeyId());
ByteArrayOutputStream os = new ByteArrayOutputStream();
try (DataOutputStream fsOut = new DataOutputStream(os)) {
LOG.info("Storing RMDelegationKey_" + masterKey.getKeyId());
masterKey.write(fsOut);
writeFileWithRetries(nodeCreatePath, os.toByteArray(), true);
}
}
use of java.io.DataOutputStream in project hadoop by apache.
the class TestMapReduceChain method writeFlag.
private static void writeFlag(Configuration conf, String flag) throws IOException {
FileSystem fs = FileSystem.get(conf);
if (getFlag(conf, flag)) {
fail("Flag " + flag + " already exists");
}
DataOutputStream file = fs.create(new Path(flagDir, flag));
file.close();
}
use of java.io.DataOutputStream in project hadoop by apache.
the class TestJoinTupleWritable method testSparseWideWritable.
/**
* Tests a tuple writable with more than 64 values and the values set written
* spread far apart.
*/
@Test
public void testSparseWideWritable() throws Exception {
Writable[] manyWrits = makeRandomWritables(131);
TupleWritable sTuple = new TupleWritable(manyWrits);
for (int i = 0; i < manyWrits.length; i++) {
if (i % 65 == 0) {
sTuple.setWritten(i);
}
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
sTuple.write(new DataOutputStream(out));
ByteArrayInputStream in = new ByteArrayInputStream(out.toByteArray());
TupleWritable dTuple = new TupleWritable();
dTuple.readFields(new DataInputStream(in));
assertTrue("Failed to write/read tuple", sTuple.equals(dTuple));
assertEquals("All tuple data has not been read from the stream", -1, in.read());
}
Aggregations