use of java.io.DataOutputStream in project hadoop by apache.
the class TypedBytesReduceApp method go.
public void go() throws IOException {
TypedBytesInput tbinput = new TypedBytesInput(new DataInputStream(System.in));
TypedBytesOutput tboutput = new TypedBytesOutput(new DataOutputStream(System.out));
Object prevKey = null;
int sum = 0;
Object key = tbinput.read();
while (key != null) {
if (prevKey != null && !key.equals(prevKey)) {
// write key
tboutput.write(prevKey);
// write value
tboutput.write(sum);
sum = 0;
}
sum += (Integer) tbinput.read();
prevKey = key;
key = tbinput.read();
}
tboutput.write(prevKey);
tboutput.write(sum);
System.out.flush();
}
use of java.io.DataOutputStream in project hadoop by apache.
the class ZKRMStateStore method storeRMDTMasterKeyState.
@Override
protected synchronized void storeRMDTMasterKeyState(DelegationKey delegationKey) throws Exception {
String nodeCreatePath = getNodePath(dtMasterKeysRootPath, DELEGATION_KEY_PREFIX + delegationKey.getKeyId());
if (LOG.isDebugEnabled()) {
LOG.debug("Storing RMDelegationKey_" + delegationKey.getKeyId());
}
ByteArrayOutputStream os = new ByteArrayOutputStream();
try (DataOutputStream fsOut = new DataOutputStream(os)) {
delegationKey.write(fsOut);
safeCreate(nodeCreatePath, os.toByteArray(), zkAcl, CreateMode.PERSISTENT);
}
}
use of java.io.DataOutputStream in project hadoop by apache.
the class ZKRMStateStore method addStoreOrUpdateOps.
private void addStoreOrUpdateOps(SafeTransaction trx, RMDelegationTokenIdentifier rmDTIdentifier, Long renewDate, boolean isUpdate) throws Exception {
// store RM delegation token
String nodeCreatePath = getNodePath(delegationTokensRootPath, DELEGATION_TOKEN_PREFIX + rmDTIdentifier.getSequenceNumber());
RMDelegationTokenIdentifierData identifierData = new RMDelegationTokenIdentifierData(rmDTIdentifier, renewDate);
ByteArrayOutputStream seqOs = new ByteArrayOutputStream();
try (DataOutputStream seqOut = new DataOutputStream(seqOs)) {
if (isUpdate) {
if (LOG.isDebugEnabled()) {
LOG.debug("Updating RMDelegationToken_" + rmDTIdentifier.getSequenceNumber());
}
trx.setData(nodeCreatePath, identifierData.toByteArray(), -1);
} else {
trx.create(nodeCreatePath, identifierData.toByteArray(), zkAcl, CreateMode.PERSISTENT);
// Update Sequence number only while storing DT
seqOut.writeInt(rmDTIdentifier.getSequenceNumber());
if (LOG.isDebugEnabled()) {
LOG.debug("Storing " + dtSequenceNumberPath + ". SequenceNumber: " + rmDTIdentifier.getSequenceNumber());
}
trx.setData(dtSequenceNumberPath, seqOs.toByteArray(), -1);
}
}
}
use of java.io.DataOutputStream in project flink by apache.
the class HDFSCopyUtilitiesTest method testCopyToLocal.
/**
* This test verifies that a hadoop configuration is correctly read in the external
* process copying tools.
*/
@Test
public void testCopyToLocal() throws Exception {
File testFolder = tempFolder.newFolder();
File originalFile = new File(testFolder, "original");
File copyFile = new File(testFolder, "copy");
try (DataOutputStream out = new DataOutputStream(new FileOutputStream(originalFile))) {
out.writeUTF("Hello there, 42!");
}
HDFSCopyToLocal.copyToLocal(new Path("file://" + originalFile.getAbsolutePath()).toUri(), copyFile);
try (DataInputStream in = new DataInputStream(new FileInputStream(copyFile))) {
assertTrue(in.readUTF().equals("Hello there, 42!"));
}
}
use of java.io.DataOutputStream in project hadoop by apache.
the class SaslOutputStream method write.
/**
* Writes <code>len</code> bytes from the specified byte array starting at
* offset <code>off</code> to this output stream.
*
* @param inBuf
* the data.
* @param off
* the start offset in the data.
* @param len
* the number of bytes to write.
* @exception IOException
* if an I/O error occurs.
*/
@Override
public void write(byte[] inBuf, int off, int len) throws IOException {
if (!useWrap) {
outStream.write(inBuf, off, len);
return;
}
try {
if (saslServer != null) {
// using saslServer
saslToken = saslServer.wrap(inBuf, off, len);
} else {
// using saslClient
saslToken = saslClient.wrap(inBuf, off, len);
}
} catch (SaslException se) {
try {
disposeSasl();
} catch (SaslException ignored) {
}
throw se;
}
if (saslToken != null) {
ByteArrayOutputStream byteOut = new ByteArrayOutputStream();
DataOutputStream dout = new DataOutputStream(byteOut);
dout.writeInt(saslToken.length);
outStream.write(byteOut.toByteArray());
outStream.write(saslToken, 0, saslToken.length);
saslToken = null;
}
}
Aggregations