use of org.apache.hadoop.hdfs.client.HdfsDataInputStream in project hadoop by apache.
the class TestDecommissionWithStriped method testDecommission.
private void testDecommission(int writeBytes, int storageCount, int decomNodeCount, String filename) throws IOException, Exception {
Path ecFile = new Path(ecDir, filename);
writeStripedFile(dfs, ecFile, writeBytes);
List<DatanodeInfo> decommisionNodes = getDecommissionDatanode(dfs, ecFile, writeBytes, decomNodeCount);
int deadDecomissioned = fsn.getNumDecomDeadDataNodes();
int liveDecomissioned = fsn.getNumDecomLiveDataNodes();
List<LocatedBlock> lbs = ((HdfsDataInputStream) dfs.open(ecFile)).getAllBlocks();
// prepare expected block index and token list.
List<HashMap<DatanodeInfo, Byte>> locToIndexList = new ArrayList<>();
List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList = new ArrayList<>();
prepareBlockIndexAndTokenList(lbs, locToIndexList, locToTokenList);
// Decommission node. Verify that node is decommissioned.
decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSIONED);
assertEquals(deadDecomissioned, fsn.getNumDecomDeadDataNodes());
assertEquals(liveDecomissioned + decommisionNodes.size(), fsn.getNumDecomLiveDataNodes());
// Ensure decommissioned datanode is not automatically shutdown
DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
assertEquals("All datanodes must be alive", numDNs, client.datanodeReport(DatanodeReportType.LIVE).length);
assertNull(checkFile(dfs, ecFile, storageCount, decommisionNodes, numDNs));
StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes, null, blockGroupSize);
assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
cleanupFile(dfs, ecFile);
}
use of org.apache.hadoop.hdfs.client.HdfsDataInputStream in project hadoop by apache.
the class TestDecommissionWithStriped method checkFile.
/**
* Verify that the number of replicas are as expected for each block in the
* given file. For blocks with a decommissioned node, verify that their
* replication is 1 more than what is specified. For blocks without
* decommissioned nodes, verify their replication is equal to what is
* specified.
*
* @param decommissionedNodes
* - if null, there is no decommissioned node for this file.
* @return - null if no failure found, else an error message string.
*/
private static String checkFile(FileSystem fileSys, Path name, int repl, List<DatanodeInfo> decommissionedNodes, int numDatanodes) throws IOException {
boolean isNodeDown = decommissionedNodes.size() > 0;
// need a raw stream
assertTrue("Not HDFS:" + fileSys.getUri(), fileSys instanceof DistributedFileSystem);
HdfsDataInputStream dis = (HdfsDataInputStream) fileSys.open(name);
Collection<LocatedBlock> dinfo = dis.getAllBlocks();
for (LocatedBlock blk : dinfo) {
// for each block
int hasdown = 0;
DatanodeInfo[] nodes = blk.getLocations();
for (int j = 0; j < nodes.length; j++) {
// for each replica
LOG.info("Block Locations size={}, locs={}, j=", nodes.length, nodes[j].toString(), j);
boolean found = false;
for (DatanodeInfo datanodeInfo : decommissionedNodes) {
// check against decommissioned list
if (isNodeDown && nodes[j].getXferAddr().equals(datanodeInfo.getXferAddr())) {
found = true;
hasdown++;
// Downnode must actually be decommissioned
if (!nodes[j].isDecommissioned()) {
return "For block " + blk.getBlock() + " replica on " + nodes[j] + " is given as downnode, " + "but is not decommissioned";
}
// Decommissioned node (if any) should only be last node in list.
if (j < repl) {
return "For block " + blk.getBlock() + " decommissioned node " + nodes[j] + " was not last node in list: " + (j + 1) + " of " + nodes.length;
}
// should only be last node in list.
LOG.info("Block " + blk.getBlock() + " replica on " + nodes[j] + " is decommissioned.");
}
}
// Non-downnodes must not be decommissioned
if (!found && nodes[j].isDecommissioned()) {
return "For block " + blk.getBlock() + " replica on " + nodes[j] + " is unexpectedly decommissioned";
}
}
LOG.info("Block " + blk.getBlock() + " has " + hasdown + " decommissioned replica.");
if (Math.min(numDatanodes, repl + hasdown) != nodes.length) {
return "Wrong number of replicas for block " + blk.getBlock() + ": " + nodes.length + ", expected " + Math.min(numDatanodes, repl + hasdown);
}
}
return null;
}
use of org.apache.hadoop.hdfs.client.HdfsDataInputStream in project hadoop by apache.
the class TestExternalBlockReader method testExternalBlockReader.
@Test
public void testExternalBlockReader() throws Exception {
Configuration conf = new Configuration();
conf.set(HdfsClientConfigKeys.REPLICA_ACCESSOR_BUILDER_CLASSES_KEY, SyntheticReplicaAccessorBuilder.class.getName());
conf.setLong(HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
String uuid = UUID.randomUUID().toString();
conf.set(SYNTHETIC_BLOCK_READER_TEST_UUID_KEY, uuid);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).hosts(new String[] { NetUtils.getLocalHostname() }).build();
final int TEST_LENGTH = 2047;
DistributedFileSystem dfs = cluster.getFileSystem();
try {
DFSTestUtil.createFile(dfs, new Path("/a"), TEST_LENGTH, (short) 1, SEED);
HdfsDataInputStream stream = (HdfsDataInputStream) dfs.open(new Path("/a"));
byte[] buf = new byte[TEST_LENGTH];
stream.seek(1000);
IOUtils.readFully(stream, buf, 1000, TEST_LENGTH - 1000);
stream.seek(0);
IOUtils.readFully(stream, buf, 0, 1000);
byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_LENGTH);
ReadStatistics stats = stream.getReadStatistics();
Assert.assertEquals(1024, stats.getTotalShortCircuitBytesRead());
Assert.assertEquals(2047, stats.getTotalLocalBytesRead());
Assert.assertEquals(2047, stats.getTotalBytesRead());
Assert.assertArrayEquals(expected, buf);
stream.close();
ExtendedBlock block = DFSTestUtil.getFirstBlock(dfs, new Path("/a"));
Assert.assertNotNull(block);
LinkedList<SyntheticReplicaAccessor> accessorList = accessors.get(uuid);
Assert.assertNotNull(accessorList);
Assert.assertEquals(3, accessorList.size());
SyntheticReplicaAccessor accessor = accessorList.get(0);
Assert.assertTrue(accessor.builder.allowShortCircuit);
Assert.assertEquals(block.getBlockPoolId(), accessor.builder.blockPoolId);
Assert.assertEquals(block.getBlockId(), accessor.builder.blockId);
Assert.assertEquals(dfs.getClient().clientName, accessor.builder.clientName);
Assert.assertEquals("/a", accessor.builder.fileName);
Assert.assertEquals(block.getGenerationStamp(), accessor.getGenerationStamp());
Assert.assertTrue(accessor.builder.verifyChecksum);
Assert.assertEquals(1024L, accessor.builder.visibleLength);
Assert.assertEquals(24L, accessor.totalRead);
Assert.assertEquals("", accessor.getError());
Assert.assertEquals(1, accessor.numCloses);
byte[] tempBuf = new byte[5];
Assert.assertEquals(-1, accessor.read(TEST_LENGTH, tempBuf, 0, 0));
Assert.assertEquals(-1, accessor.read(TEST_LENGTH, tempBuf, 0, tempBuf.length));
accessors.remove(uuid);
} finally {
dfs.close();
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.client.HdfsDataInputStream in project hadoop by apache.
the class TestScrLazyPersistFiles method testRamDiskShortCircuitRead.
/**
* Read in-memory block with Short Circuit Read
* Note: the test uses faked RAM_DISK from physical disk.
*/
@Test
public void testRamDiskShortCircuitRead() throws IOException, InterruptedException, TimeoutException {
getClusterBuilder().setUseScr(true).build();
final String METHOD_NAME = GenericTestUtils.getMethodName();
final int SEED = 0xFADED;
Path path = new Path("/" + METHOD_NAME + ".dat");
// Create a file and wait till it is persisted.
makeRandomTestFile(path, BLOCK_SIZE, true, SEED);
ensureFileReplicasOnStorageType(path, RAM_DISK);
waitForMetric("RamDiskBlocksLazyPersisted", 1);
HdfsDataInputStream fis = (HdfsDataInputStream) fs.open(path);
// Verify SCR read counters
try {
byte[] buf = new byte[BUFFER_LENGTH];
fis.read(0, buf, 0, BUFFER_LENGTH);
Assert.assertEquals(BUFFER_LENGTH, fis.getReadStatistics().getTotalBytesRead());
Assert.assertEquals(BUFFER_LENGTH, fis.getReadStatistics().getTotalShortCircuitBytesRead());
} finally {
fis.close();
fis = null;
}
}
use of org.apache.hadoop.hdfs.client.HdfsDataInputStream in project hadoop by apache.
the class TestShortCircuitLocalRead method checkFileContentDirect.
/** Check the file content, reading as user {@code readingUser} */
static void checkFileContentDirect(URI uri, Path name, byte[] expected, int readOffset, String readingUser, Configuration conf, boolean legacyShortCircuitFails) throws IOException, InterruptedException {
// Ensure short circuit is enabled
DistributedFileSystem fs = getFileSystem(readingUser, uri, conf);
ClientContext clientContext = ClientContext.getFromConf(conf);
if (legacyShortCircuitFails) {
assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
}
HdfsDataInputStream stm = (HdfsDataInputStream) fs.open(name);
ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
IOUtils.skipFully(stm, readOffset);
actual.limit(3);
//Read a small number of bytes first.
int nread = stm.read(actual);
actual.limit(nread + 2);
nread += stm.read(actual);
// Read across chunk boundary
actual.limit(Math.min(actual.capacity(), nread + 517));
nread += stm.read(actual);
checkData(arrayFromByteBuffer(actual), readOffset, expected, nread, "A few bytes");
//Now read rest of it
actual.limit(actual.capacity());
while (actual.hasRemaining()) {
int nbytes = stm.read(actual);
if (nbytes < 0) {
throw new EOFException("End of file reached before reading fully.");
}
nread += nbytes;
}
checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
if (legacyShortCircuitFails) {
assertTrue(clientContext.getDisableLegacyBlockReaderLocal());
}
stm.close();
}
Aggregations