use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.
the class TestPBHelper method createLocatedBlock.
private LocatedBlock createLocatedBlock() {
DatanodeInfo[] dnInfos = { DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL), DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4", AdminStates.NORMAL) };
String[] storageIDs = { "s1", "s2", "s3", "s4" };
StorageType[] media = { StorageType.DISK, StorageType.SSD, StorageType.DISK, StorageType.RAM_DISK };
LocatedBlock lb = new LocatedBlock(new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, storageIDs, media, 5, false, new DatanodeInfo[] {});
lb.setBlockToken(new Token<BlockTokenIdentifier>("identifier".getBytes(), "password".getBytes(), new Text("kind"), new Text("service")));
return lb;
}
use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.
the class PBHelperClient method convert.
public static BlockTokenSecretProto convert(BlockTokenIdentifier blockTokenSecret) {
BlockTokenSecretProto.Builder builder = BlockTokenSecretProto.newBuilder();
builder.setExpiryDate(blockTokenSecret.getExpiryDate());
builder.setKeyId(blockTokenSecret.getKeyId());
String userId = blockTokenSecret.getUserId();
if (userId != null) {
builder.setUserId(userId);
}
String blockPoolId = blockTokenSecret.getBlockPoolId();
if (blockPoolId != null) {
builder.setBlockPoolId(blockPoolId);
}
builder.setBlockId(blockTokenSecret.getBlockId());
for (BlockTokenIdentifier.AccessMode aMode : blockTokenSecret.getAccessModes()) {
builder.addModes(convert(aMode));
}
return builder.build();
}
use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.
the class TestBlockTokenWithDFS method testWrite.
/**
* testing that WRITE operation can handle token expiration when
* re-establishing pipeline is needed
*/
@Test
public void testWrite() throws Exception {
MiniDFSCluster cluster = null;
int numDataNodes = 2;
Configuration conf = getConf(numDataNodes);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
final NameNode nn = cluster.getNameNode();
final BlockManager bm = nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
// set a short token lifetime (1 second)
SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
Path fileToWrite = new Path(FILE_TO_WRITE);
FileSystem fs = cluster.getFileSystem();
byte[] expected = generateBytes(FILE_SIZE);
FSDataOutputStream stm = writeFile(fs, fileToWrite, (short) numDataNodes, BLOCK_SIZE);
// write a partial block
int mid = expected.length - 1;
stm.write(expected, 0, mid);
stm.hflush();
/*
* wait till token used in stm expires
*/
Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(stm);
while (!SecurityTestUtil.isBlockTokenExpired(token)) {
try {
Thread.sleep(10);
} catch (InterruptedException ignored) {
}
}
// remove a datanode to force re-establishing pipeline
cluster.stopDataNode(0);
// write the rest of the file
stm.write(expected, mid, expected.length - mid);
stm.close();
// check if write is successful
FSDataInputStream in4 = fs.open(fileToWrite);
assertTrue(checkFile1(in4, expected));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.
the class DFSInputStream method getBlockReader.
protected BlockReader getBlockReader(LocatedBlock targetBlock, long offsetInBlock, long length, InetSocketAddress targetAddr, StorageType storageType, DatanodeInfo datanode) throws IOException {
ExtendedBlock blk = targetBlock.getBlock();
Token<BlockTokenIdentifier> accessToken = targetBlock.getBlockToken();
CachingStrategy curCachingStrategy;
boolean shortCircuitForbidden;
synchronized (infoLock) {
curCachingStrategy = cachingStrategy;
shortCircuitForbidden = shortCircuitForbidden();
}
return new BlockReaderFactory(dfsClient.getConf()).setInetSocketAddress(targetAddr).setRemotePeerFactory(dfsClient).setDatanodeInfo(datanode).setStorageType(storageType).setFileName(src).setBlock(blk).setBlockToken(accessToken).setStartOffset(offsetInBlock).setVerifyChecksum(verifyChecksum).setClientName(dfsClient.clientName).setLength(length).setCachingStrategy(curCachingStrategy).setAllowShortCircuitLocalReads(!shortCircuitForbidden).setClientCacheContext(dfsClient.getClientContext()).setUserGroupInformation(dfsClient.ugi).setConfiguration(dfsClient.getConfiguration()).setTracer(dfsClient.getTracer()).build();
}
use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.
the class SaslDataTransferServer method deserializeIdentifier.
/**
* Deserializes a base64-encoded binary representation of a block access
* token.
*
* @param str String to deserialize
* @return BlockTokenIdentifier deserialized from str
* @throws IOException if there is any I/O error
*/
private BlockTokenIdentifier deserializeIdentifier(String str) throws IOException {
BlockTokenIdentifier identifier = new BlockTokenIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(Base64.decodeBase64(str))));
return identifier;
}
Aggregations