use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestBlockTokenWithDFS method testWrite.
/**
* testing that WRITE operation can handle token expiration when
* re-establishing pipeline is needed
*/
@Test
public void testWrite() throws Exception {
MiniDFSCluster cluster = null;
int numDataNodes = 2;
Configuration conf = getConf(numDataNodes);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
final NameNode nn = cluster.getNameNode();
final BlockManager bm = nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
// set a short token lifetime (1 second)
SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
Path fileToWrite = new Path(FILE_TO_WRITE);
FileSystem fs = cluster.getFileSystem();
byte[] expected = generateBytes(FILE_SIZE);
FSDataOutputStream stm = writeFile(fs, fileToWrite, (short) numDataNodes, BLOCK_SIZE);
// write a partial block
int mid = expected.length - 1;
stm.write(expected, 0, mid);
stm.hflush();
/*
* wait till token used in stm expires
*/
Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(stm);
while (!SecurityTestUtil.isBlockTokenExpired(token)) {
try {
Thread.sleep(10);
} catch (InterruptedException ignored) {
}
}
// remove a datanode to force re-establishing pipeline
cluster.stopDataNode(0);
// write the rest of the file
stm.write(expected, mid, expected.length - mid);
stm.close();
// check if write is successful
FSDataInputStream in4 = fs.open(fileToWrite);
assertTrue(checkFile1(in4, expected));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestBlockTokenWithDFS method doTestRead.
protected void doTestRead(Configuration conf, MiniDFSCluster cluster, boolean isStriped) throws Exception {
final int numDataNodes = cluster.getDataNodes().size();
final NameNode nn = cluster.getNameNode();
final NamenodeProtocols nnProto = nn.getRpcServer();
final BlockManager bm = nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
// set a short token lifetime (1 second) initially
SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
Path fileToRead = new Path(FILE_TO_READ);
FileSystem fs = cluster.getFileSystem();
byte[] expected = generateBytes(FILE_SIZE);
createFile(fs, fileToRead, expected);
/*
* setup for testing expiration handling of cached tokens
*/
// read using blockSeekTo(). Acquired tokens are cached in in1
FSDataInputStream in1 = fs.open(fileToRead);
assertTrue(checkFile1(in1, expected));
// read using blockSeekTo(). Acquired tokens are cached in in2
FSDataInputStream in2 = fs.open(fileToRead);
assertTrue(checkFile1(in2, expected));
// read using fetchBlockByteRange(). Acquired tokens are cached in in3
FSDataInputStream in3 = fs.open(fileToRead);
assertTrue(checkFile2(in3, expected));
/*
* testing READ interface on DN using a BlockReader
*/
DFSClient client = null;
try {
client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
} finally {
if (client != null)
client.close();
}
List<LocatedBlock> locatedBlocks = nnProto.getBlockLocations(FILE_TO_READ, 0, FILE_SIZE).getLocatedBlocks();
// first block
LocatedBlock lblock = locatedBlocks.get(0);
// verify token is not expired
assertFalse(isBlockTokenExpired(lblock));
// read with valid token, should succeed
tryRead(conf, lblock, true);
while (!isBlockTokenExpired(lblock)) {
try {
Thread.sleep(10);
} catch (InterruptedException ignored) {
}
}
/*
* continue testing READ interface on DN using a BlockReader
*/
// verify token is expired
assertTrue(isBlockTokenExpired(lblock));
// read should fail
tryRead(conf, lblock, false);
// use a valid new token
bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.READ);
// read should succeed
tryRead(conf, lblock, true);
// use a token with wrong blockID
long rightId = lblock.getBlock().getBlockId();
long wrongId = rightId + 1;
lblock.getBlock().setBlockId(wrongId);
bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.READ);
lblock.getBlock().setBlockId(rightId);
// read should fail
tryRead(conf, lblock, false);
// use a token with wrong access modes
bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.WRITE);
// read should fail
tryRead(conf, lblock, false);
// set a long token lifetime for future tokens
SecurityTestUtil.setBlockTokenLifetime(sm, 600 * 1000L);
/*
* testing that when cached tokens are expired, DFSClient will re-fetch
* tokens transparently for READ.
*/
// confirm all tokens cached in in1 are expired by now
List<LocatedBlock> lblocks = DFSTestUtil.getAllBlocks(in1);
for (LocatedBlock blk : lblocks) {
assertTrue(isBlockTokenExpired(blk));
}
// verify blockSeekTo() is able to re-fetch token transparently
in1.seek(0);
assertTrue(checkFile1(in1, expected));
// confirm all tokens cached in in2 are expired by now
List<LocatedBlock> lblocks2 = DFSTestUtil.getAllBlocks(in2);
for (LocatedBlock blk : lblocks2) {
assertTrue(isBlockTokenExpired(blk));
}
// via another interface method)
if (isStriped) {
// striped block doesn't support seekToNewSource
in2.seek(0);
} else {
assertTrue(in2.seekToNewSource(0));
}
assertTrue(checkFile1(in2, expected));
// confirm all tokens cached in in3 are expired by now
List<LocatedBlock> lblocks3 = DFSTestUtil.getAllBlocks(in3);
for (LocatedBlock blk : lblocks3) {
assertTrue(isBlockTokenExpired(blk));
}
// verify fetchBlockByteRange() is able to re-fetch token transparently
assertTrue(checkFile2(in3, expected));
/*
* testing that after datanodes are restarted on the same ports, cached
* tokens should still work and there is no need to fetch new tokens from
* namenode. This test should run while namenode is down (to make sure no
* new tokens can be fetched from namenode).
*/
// restart datanodes on the same ports that they currently use
assertTrue(cluster.restartDataNodes(true));
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
cluster.shutdownNameNode(0);
// confirm tokens cached in in1 are still valid
lblocks = DFSTestUtil.getAllBlocks(in1);
for (LocatedBlock blk : lblocks) {
assertFalse(isBlockTokenExpired(blk));
}
// verify blockSeekTo() still works (forced to use cached tokens)
in1.seek(0);
assertTrue(checkFile1(in1, expected));
// confirm tokens cached in in2 are still valid
lblocks2 = DFSTestUtil.getAllBlocks(in2);
for (LocatedBlock blk : lblocks2) {
assertFalse(isBlockTokenExpired(blk));
}
// verify blockSeekTo() still works (forced to use cached tokens)
if (isStriped) {
in2.seek(0);
} else {
in2.seekToNewSource(0);
}
assertTrue(checkFile1(in2, expected));
// confirm tokens cached in in3 are still valid
lblocks3 = DFSTestUtil.getAllBlocks(in3);
for (LocatedBlock blk : lblocks3) {
assertFalse(isBlockTokenExpired(blk));
}
// verify fetchBlockByteRange() still works (forced to use cached tokens)
assertTrue(checkFile2(in3, expected));
/*
* testing that when namenode is restarted, cached tokens should still
* work and there is no need to fetch new tokens from namenode. Like the
* previous test, this test should also run while namenode is down. The
* setup for this test depends on the previous test.
*/
// restart the namenode and then shut it down for test
cluster.restartNameNode(0);
cluster.shutdownNameNode(0);
// verify blockSeekTo() still works (forced to use cached tokens)
in1.seek(0);
assertTrue(checkFile1(in1, expected));
// verify again blockSeekTo() still works (forced to use cached tokens)
if (isStriped) {
in2.seek(0);
} else {
in2.seekToNewSource(0);
}
assertTrue(checkFile1(in2, expected));
// verify fetchBlockByteRange() still works (forced to use cached tokens)
assertTrue(checkFile2(in3, expected));
/*
* testing that after both namenode and datanodes got restarted (namenode
* first, followed by datanodes), DFSClient can't access DN without
* re-fetching tokens and is able to re-fetch tokens transparently. The
* setup of this test depends on the previous test.
*/
// restore the cluster and restart the datanodes for test
cluster.restartNameNode(0);
assertTrue(cluster.restartDataNodes(true));
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
// shutdown namenode so that DFSClient can't get new tokens from namenode
cluster.shutdownNameNode(0);
// verify blockSeekTo() fails (cached tokens become invalid)
in1.seek(0);
assertFalse(checkFile1(in1, expected));
// verify fetchBlockByteRange() fails (cached tokens become invalid)
assertFalse(checkFile2(in3, expected));
// restart the namenode to allow DFSClient to re-fetch tokens
cluster.restartNameNode(0);
// verify blockSeekTo() works again (by transparently re-fetching
// tokens from namenode)
in1.seek(0);
assertTrue(checkFile1(in1, expected));
if (isStriped) {
in2.seek(0);
} else {
in2.seekToNewSource(0);
}
assertTrue(checkFile1(in2, expected));
// verify fetchBlockByteRange() works again (by transparently
// re-fetching tokens from namenode)
assertTrue(checkFile2(in3, expected));
/*
* testing that when datanodes are restarted on different ports, DFSClient
* is able to re-fetch tokens transparently to connect to them
*/
// restart datanodes on newly assigned ports
assertTrue(cluster.restartDataNodes(false));
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
// verify blockSeekTo() is able to re-fetch token transparently
in1.seek(0);
assertTrue(checkFile1(in1, expected));
// verify blockSeekTo() is able to re-fetch token transparently
if (isStriped) {
in2.seek(0);
} else {
in2.seekToNewSource(0);
}
assertTrue(checkFile1(in2, expected));
// verify fetchBlockByteRange() is able to re-fetch token transparently
assertTrue(checkFile2(in3, expected));
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestUpdatePipelineWithSnapshots method testUpdatePipelineAfterDelete.
// Regression test for HDFS-6647.
@Test
public void testUpdatePipelineAfterDelete() throws Exception {
Configuration conf = new HdfsConfiguration();
Path file = new Path("/test-file");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
NamenodeProtocols namenode = cluster.getNameNodeRpc();
DFSOutputStream out = null;
try {
// Create a file and make sure a block is allocated for it.
out = (DFSOutputStream) (fs.create(file).getWrappedStream());
out.write(1);
out.hflush();
// Create a snapshot that includes the file.
SnapshotTestHelper.createSnapshot((DistributedFileSystem) fs, new Path("/"), "s1");
// Grab the block info of this file for later use.
FSDataInputStream in = null;
ExtendedBlock oldBlock = null;
try {
in = fs.open(file);
oldBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock();
} finally {
IOUtils.closeStream(in);
}
// Allocate a new block ID/gen stamp so we can simulate pipeline
// recovery.
String clientName = ((DistributedFileSystem) fs).getClient().getClientName();
LocatedBlock newLocatedBlock = namenode.updateBlockForPipeline(oldBlock, clientName);
ExtendedBlock newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(), oldBlock.getBlockId(), oldBlock.getNumBytes(), newLocatedBlock.getBlock().getGenerationStamp());
// Delete the file from the present FS. It will still exist the
// previously-created snapshot. This will log an OP_DELETE for the
// file in question.
fs.delete(file, true);
// logged for the file in question.
try {
namenode.updatePipeline(clientName, oldBlock, newBlock, newLocatedBlock.getLocations(), newLocatedBlock.getStorageIDs());
} catch (IOException ioe) {
// normal
assertExceptionContains("does not exist or it is not under construction", ioe);
}
// Make sure the NN can restart with the edit logs as we have them now.
cluster.restartNameNode(true);
} finally {
IOUtils.closeStream(out);
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestShortCircuitCache method testShmBasedStaleness.
@Test(timeout = 60000)
public void testShmBasedStaleness() throws Exception {
BlockReaderTestUtil.enableShortCircuitShmTracing();
TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
Configuration conf = createShortCircuitConf("testShmBasedStaleness", sockDir);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
final ShortCircuitCache cache = fs.getClient().getClientContext().getShortCircuitCache();
String TEST_FILE = "/test_file";
final int TEST_FILE_LEN = 8193;
final int SEED = 0xFADED;
DFSTestUtil.createFile(fs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
FSDataInputStream fis = fs.open(new Path(TEST_FILE));
int first = fis.read();
final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path(TEST_FILE));
Assert.assertTrue(first != -1);
cache.accept(new CacheVisitor() {
@Override
public void visit(int numOutstandingMmaps, Map<ExtendedBlockId, ShortCircuitReplica> replicas, Map<ExtendedBlockId, InvalidToken> failedLoads, LinkedMap evictable, LinkedMap evictableMmapped) {
ShortCircuitReplica replica = replicas.get(ExtendedBlockId.fromExtendedBlock(block));
Assert.assertNotNull(replica);
Assert.assertTrue(replica.getSlot().isValid());
}
});
// Stop the Namenode. This will close the socket keeping the client's
// shared memory segment alive, and make it stale.
cluster.getDataNodes().get(0).shutdown();
cache.accept(new CacheVisitor() {
@Override
public void visit(int numOutstandingMmaps, Map<ExtendedBlockId, ShortCircuitReplica> replicas, Map<ExtendedBlockId, InvalidToken> failedLoads, LinkedMap evictable, LinkedMap evictableMmapped) {
ShortCircuitReplica replica = replicas.get(ExtendedBlockId.fromExtendedBlock(block));
Assert.assertNotNull(replica);
Assert.assertFalse(replica.getSlot().isValid());
}
});
cluster.shutdown();
sockDir.close();
}
use of org.apache.hadoop.fs.FSDataInputStream in project hadoop by apache.
the class TestSnapshotFileLength method testSnapshotfileLength.
/**
* Test that we cannot read a file beyond its snapshot length
* when accessing it via a snapshot path.
*
*/
@Test(timeout = 300000)
public void testSnapshotfileLength() throws Exception {
hdfs.mkdirs(sub);
int bytesRead;
byte[] buffer = new byte[BLOCKSIZE * 8];
int origLen = BLOCKSIZE + 1;
int toAppend = BLOCKSIZE;
FSDataInputStream fis = null;
FileStatus fileStatus = null;
// Create and write a file.
Path file1 = new Path(sub, file1Name);
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, 0, BLOCKSIZE, REPLICATION, SEED);
DFSTestUtil.appendFile(hdfs, file1, origLen);
// Create a snapshot on the parent directory.
hdfs.allowSnapshot(sub);
hdfs.createSnapshot(sub, snapshot1);
Path file1snap1 = SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name);
final FileChecksum snapChksum1 = hdfs.getFileChecksum(file1snap1);
assertThat("file and snapshot file checksums are not equal", hdfs.getFileChecksum(file1), is(snapChksum1));
// Append to the file.
FSDataOutputStream out = hdfs.append(file1);
// HDFS-8150:Fetching checksum for file under construction should fail
try {
hdfs.getFileChecksum(file1);
fail("getFileChecksum should fail for files " + "with blocks under construction");
} catch (IOException ie) {
assertTrue(ie.getMessage().contains("Fail to get checksum, since file " + file1 + " is under construction."));
}
assertThat("snapshot checksum (post-open for append) has changed", hdfs.getFileChecksum(file1snap1), is(snapChksum1));
try {
AppendTestUtil.write(out, 0, toAppend);
// Test reading from snapshot of file that is open for append
byte[] dataFromSnapshot = DFSTestUtil.readFileBuffer(hdfs, file1snap1);
assertThat("Wrong data size in snapshot.", dataFromSnapshot.length, is(origLen));
// Verify that checksum didn't change
assertThat("snapshot checksum (post-append) has changed", hdfs.getFileChecksum(file1snap1), is(snapChksum1));
} finally {
out.close();
}
assertThat("file and snapshot file checksums (post-close) are equal", hdfs.getFileChecksum(file1), not(snapChksum1));
assertThat("snapshot file checksum (post-close) has changed", hdfs.getFileChecksum(file1snap1), is(snapChksum1));
// Make sure we can read the entire file via its non-snapshot path.
fileStatus = hdfs.getFileStatus(file1);
assertThat(fileStatus.getLen(), is((long) origLen + toAppend));
fis = hdfs.open(file1);
bytesRead = fis.read(0, buffer, 0, buffer.length);
assertThat(bytesRead, is(origLen + toAppend));
fis.close();
// Try to open the file via its snapshot path.
fis = hdfs.open(file1snap1);
fileStatus = hdfs.getFileStatus(file1snap1);
assertThat(fileStatus.getLen(), is((long) origLen));
// Make sure we can only read up to the snapshot length.
bytesRead = fis.read(0, buffer, 0, buffer.length);
assertThat(bytesRead, is(origLen));
fis.close();
byte[] dataFromSnapshot = DFSTestUtil.readFileBuffer(hdfs, file1snap1);
assertThat("Wrong data size in snapshot.", dataFromSnapshot.length, is(origLen));
}
Aggregations