use of java.io.DataOutputStream in project hadoop by apache.
the class ZKDelegationTokenSecretManager method addOrUpdateToken.
private void addOrUpdateToken(TokenIdent ident, DelegationTokenInformation info, boolean isUpdate) throws Exception {
String nodeCreatePath = getNodePath(ZK_DTSM_TOKENS_ROOT, DELEGATION_TOKEN_PREFIX + ident.getSequenceNumber());
ByteArrayOutputStream tokenOs = new ByteArrayOutputStream();
DataOutputStream tokenOut = new DataOutputStream(tokenOs);
ByteArrayOutputStream seqOs = new ByteArrayOutputStream();
try {
ident.write(tokenOut);
tokenOut.writeLong(info.getRenewDate());
tokenOut.writeInt(info.getPassword().length);
tokenOut.write(info.getPassword());
if (LOG.isDebugEnabled()) {
LOG.debug((isUpdate ? "Updating " : "Storing ") + "ZKDTSMDelegationToken_" + ident.getSequenceNumber());
}
if (isUpdate) {
zkClient.setData().forPath(nodeCreatePath, tokenOs.toByteArray()).setVersion(-1);
} else {
zkClient.create().withMode(CreateMode.PERSISTENT).forPath(nodeCreatePath, tokenOs.toByteArray());
}
} finally {
seqOs.close();
}
}
use of java.io.DataOutputStream in project hadoop by apache.
the class TestBlockReplacement method replaceBlock.
/*
* Replace block
*/
private boolean replaceBlock(ExtendedBlock block, DatanodeInfo source, DatanodeInfo sourceProxy, DatanodeInfo destination, StorageType targetStorageType, Status opStatus) throws IOException, SocketException {
Socket sock = new Socket();
try {
sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()), HdfsConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
// sendRequest
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
new Sender(out).replaceBlock(block, targetStorageType, BlockTokenSecretManager.DUMMY_TOKEN, source.getDatanodeUuid(), sourceProxy);
out.flush();
// receiveResponse
DataInputStream reply = new DataInputStream(sock.getInputStream());
BlockOpResponseProto proto = BlockOpResponseProto.parseDelimitedFrom(reply);
while (proto.getStatus() == Status.IN_PROGRESS) {
proto = BlockOpResponseProto.parseDelimitedFrom(reply);
}
return proto.getStatus() == opStatus;
} finally {
sock.close();
}
}
use of java.io.DataOutputStream in project hadoop by apache.
the class TestCredentials method testReadWriteStorage.
@SuppressWarnings("unchecked")
@Test
public <T extends TokenIdentifier> void testReadWriteStorage() throws IOException, NoSuchAlgorithmException {
// create tokenStorage Object
Credentials ts = new Credentials();
Token<T> token1 = new Token();
Token<T> token2 = new Token();
Text service1 = new Text("service1");
Text service2 = new Text("service2");
Collection<Text> services = new ArrayList<Text>();
services.add(service1);
services.add(service2);
token1.setService(service1);
token2.setService(service2);
ts.addToken(new Text("sometoken1"), token1);
ts.addToken(new Text("sometoken2"), token2);
// create keys and put it in
final KeyGenerator kg = KeyGenerator.getInstance(DEFAULT_HMAC_ALGORITHM);
String alias = "alias";
Map<Text, byte[]> m = new HashMap<Text, byte[]>(10);
for (int i = 0; i < 10; i++) {
Key key = kg.generateKey();
m.put(new Text(alias + i), key.getEncoded());
ts.addSecretKey(new Text(alias + i), key.getEncoded());
}
// create file to store
File tmpFileName = new File(tmpDir, "tokenStorageTest");
DataOutputStream dos = new DataOutputStream(new FileOutputStream(tmpFileName));
ts.write(dos);
dos.close();
// open and read it back
DataInputStream dis = new DataInputStream(new FileInputStream(tmpFileName));
ts = new Credentials();
ts.readFields(dis);
dis.close();
// get the tokens and compare the services
Collection<Token<? extends TokenIdentifier>> list = ts.getAllTokens();
assertEquals("getAllTokens should return collection of size 2", list.size(), 2);
boolean foundFirst = false;
boolean foundSecond = false;
for (Token<? extends TokenIdentifier> token : list) {
if (token.getService().equals(service1)) {
foundFirst = true;
}
if (token.getService().equals(service2)) {
foundSecond = true;
}
}
assertTrue("Tokens for services service1 and service2 must be present", foundFirst && foundSecond);
// compare secret keys
int mapLen = m.size();
assertEquals("wrong number of keys in the Storage", mapLen, ts.numberOfSecretKeys());
for (Text a : m.keySet()) {
byte[] kTS = ts.getSecretKey(a);
byte[] kLocal = m.get(a);
assertTrue("keys don't match for " + a, WritableComparator.compareBytes(kTS, 0, kTS.length, kLocal, 0, kLocal.length) == 0);
}
tmpFileName.delete();
}
use of java.io.DataOutputStream in project hadoop by apache.
the class TestCredentials method writeCredentialsProto.
private void writeCredentialsProto(Credentials creds, String filename) throws IOException, FileNotFoundException {
DataOutputStream dos = new DataOutputStream(new FileOutputStream(new File(tmpDir, filename)));
creds.writeProto(dos);
}
use of java.io.DataOutputStream in project hadoop by apache.
the class BlockReaderRemote method newBlockReader.
/**
* Create a new BlockReader specifically to satisfy a read.
* This method also sends the OP_READ_BLOCK request.
*
* @param file File location
* @param block The block object
* @param blockToken The block token for security
* @param startOffset The read offset, relative to block head
* @param len The number of bytes to read
* @param verifyChecksum Whether to verify checksum
* @param clientName Client name
* @param peer The Peer to use
* @param datanodeID The DatanodeID this peer is connected to
* @return New BlockReader instance, or null on error.
*/
public static BlockReader newBlockReader(String file, ExtendedBlock block, Token<BlockTokenIdentifier> blockToken, long startOffset, long len, boolean verifyChecksum, String clientName, Peer peer, DatanodeID datanodeID, PeerCache peerCache, CachingStrategy cachingStrategy, Tracer tracer, int networkDistance) throws IOException {
// in and out will be closed when sock is closed (by the caller)
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(peer.getOutputStream()));
new Sender(out).readBlock(block, blockToken, clientName, startOffset, len, verifyChecksum, cachingStrategy);
//
// Get bytes in block
//
DataInputStream in = new DataInputStream(peer.getInputStream());
BlockOpResponseProto status = BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(in));
checkSuccess(status, peer, block, file);
ReadOpChecksumInfoProto checksumInfo = status.getReadOpChecksumInfo();
DataChecksum checksum = DataTransferProtoUtil.fromProto(checksumInfo.getChecksum());
//Warning when we get CHECKSUM_NULL?
// Read the first chunk offset.
long firstChunkOffset = checksumInfo.getChunkOffset();
if (firstChunkOffset < 0 || firstChunkOffset > startOffset || firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
throw new IOException("BlockReader: error in first chunk offset (" + firstChunkOffset + ") startOffset is " + startOffset + " for file " + file);
}
return new BlockReaderRemote(file, block.getBlockId(), checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer, datanodeID, peerCache, tracer, networkDistance);
}
Aggregations