use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.
the class DataNode method checkBlockToken.
private void checkBlockToken(ExtendedBlock block, Token<BlockTokenIdentifier> token, AccessMode accessMode) throws IOException {
if (isBlockTokenEnabled) {
BlockTokenIdentifier id = new BlockTokenIdentifier();
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream in = new DataInputStream(buf);
id.readFields(in);
if (LOG.isDebugEnabled()) {
LOG.debug("Got: " + id.toString());
}
blockPoolTokenSecretManager.checkAccess(id, null, block, accessMode);
}
}
use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.
the class DFSTestUtil method transferRbw.
/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
assertEquals(2, datanodes.length);
final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
try (Socket s = DataStreamer.createSocketForPipeline(datanodes[0], datanodes.length, dfsClient);
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(NetUtils.getOutputStream(s, writeTimeout), DFSUtilClient.getSmallBufferSize(dfsClient.getConfiguration())));
DataInputStream in = new DataInputStream(NetUtils.getInputStream(s))) {
// send the request
new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(), dfsClient.clientName, new DatanodeInfo[] { datanodes[1] }, new StorageType[] { StorageType.DEFAULT });
out.flush();
return BlockOpResponseProto.parseDelimitedFrom(in);
}
}
use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.
the class TestDecommissionWithStriped method testDecommission.
private void testDecommission(int writeBytes, int storageCount, int decomNodeCount, String filename) throws IOException, Exception {
Path ecFile = new Path(ecDir, filename);
writeStripedFile(dfs, ecFile, writeBytes);
List<DatanodeInfo> decommisionNodes = getDecommissionDatanode(dfs, ecFile, writeBytes, decomNodeCount);
int deadDecomissioned = fsn.getNumDecomDeadDataNodes();
int liveDecomissioned = fsn.getNumDecomLiveDataNodes();
List<LocatedBlock> lbs = ((HdfsDataInputStream) dfs.open(ecFile)).getAllBlocks();
// prepare expected block index and token list.
List<HashMap<DatanodeInfo, Byte>> locToIndexList = new ArrayList<>();
List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList = new ArrayList<>();
prepareBlockIndexAndTokenList(lbs, locToIndexList, locToTokenList);
// Decommission node. Verify that node is decommissioned.
decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSIONED);
assertEquals(deadDecomissioned, fsn.getNumDecomDeadDataNodes());
assertEquals(liveDecomissioned + decommisionNodes.size(), fsn.getNumDecomLiveDataNodes());
// Ensure decommissioned datanode is not automatically shutdown
DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
assertEquals("All datanodes must be alive", numDNs, client.datanodeReport(DatanodeReportType.LIVE).length);
assertNull(checkFile(dfs, ecFile, storageCount, decommisionNodes, numDNs));
StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes, null, blockGroupSize);
assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
cleanupFile(dfs, ecFile);
}
use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.
the class TestBlockReaderLocalLegacy method testBlockReaderLocalLegacyWithAppend.
@Test(timeout = 20000)
public void testBlockReaderLocalLegacyWithAppend() throws Exception {
final short REPL_FACTOR = 1;
final HdfsConfiguration conf = getConfiguration(null);
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final Path path = new Path("/testBlockReaderLocalLegacy");
DFSTestUtil.createFile(dfs, path, 10, REPL_FACTOR, 0);
DFSTestUtil.waitReplication(dfs, path, REPL_FACTOR);
final ClientDatanodeProtocol proxy;
final Token<BlockTokenIdentifier> token;
final ExtendedBlock originalBlock;
final long originalGS;
{
final LocatedBlock lb = cluster.getNameNode().getRpcServer().getBlockLocations(path.toString(), 0, 1).get(0);
proxy = DFSUtilClient.createClientDatanodeProtocolProxy(lb.getLocations()[0], conf, 60000, false);
token = lb.getBlockToken();
// get block and generation stamp
final ExtendedBlock blk = new ExtendedBlock(lb.getBlock());
originalBlock = new ExtendedBlock(blk);
originalGS = originalBlock.getGenerationStamp();
// test getBlockLocalPathInfo
final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo(blk, token);
Assert.assertEquals(originalGS, info.getBlock().getGenerationStamp());
}
{
// append one byte
FSDataOutputStream out = dfs.append(path);
out.write(1);
out.close();
}
{
// get new generation stamp
final LocatedBlock lb = cluster.getNameNode().getRpcServer().getBlockLocations(path.toString(), 0, 1).get(0);
final long newGS = lb.getBlock().getGenerationStamp();
Assert.assertTrue(newGS > originalGS);
// getBlockLocalPathInfo using the original block.
Assert.assertEquals(originalGS, originalBlock.getGenerationStamp());
final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo(originalBlock, token);
Assert.assertEquals(newGS, info.getBlock().getGenerationStamp());
}
cluster.shutdown();
}
use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.
the class TestBlockTokenWithDFS method testAppend.
/**
* testing that APPEND operation can handle token expiration when
* re-establishing pipeline is needed
*/
@Test
public void testAppend() throws Exception {
MiniDFSCluster cluster = null;
int numDataNodes = 2;
Configuration conf = getConf(numDataNodes);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
final NameNode nn = cluster.getNameNode();
final BlockManager bm = nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
// set a short token lifetime (1 second)
SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
Path fileToAppend = new Path(FILE_TO_APPEND);
FileSystem fs = cluster.getFileSystem();
byte[] expected = generateBytes(FILE_SIZE);
// write a one-byte file
FSDataOutputStream stm = writeFile(fs, fileToAppend, (short) numDataNodes, BLOCK_SIZE);
stm.write(expected, 0, 1);
stm.close();
// open the file again for append
stm = fs.append(fileToAppend);
int mid = expected.length - 1;
stm.write(expected, 1, mid - 1);
stm.hflush();
/*
* wait till token used in stm expires
*/
Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(stm);
while (!SecurityTestUtil.isBlockTokenExpired(token)) {
try {
Thread.sleep(10);
} catch (InterruptedException ignored) {
}
}
// remove a datanode to force re-establishing pipeline
cluster.stopDataNode(0);
// append the rest of the file
stm.write(expected, mid, expected.length - mid);
stm.close();
// check if append is successful
FSDataInputStream in5 = fs.open(fileToAppend);
assertTrue(checkFile1(in5, expected));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations