use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class StripedFileTestUtil method waitForReconstructionFinished.
/**
* Wait for the reconstruction to be finished when the file has
* corrupted blocks.
*/
public static LocatedBlocks waitForReconstructionFinished(Path file, DistributedFileSystem fs, int groupSize) throws Exception {
LOG.info("Waiting for reconstruction to be finished for the file:" + file + ", groupSize:" + groupSize);
final int attempts = 60;
for (int i = 0; i < attempts; i++) {
LocatedBlocks locatedBlocks = getLocatedBlocks(file, fs);
LocatedStripedBlock lastBlock = (LocatedStripedBlock) locatedBlocks.getLastLocatedBlock();
DatanodeInfo[] storageInfos = lastBlock.getLocations();
if (storageInfos.length >= groupSize) {
return locatedBlocks;
}
Thread.sleep(1000);
}
throw new IOException("Time out waiting for EC block reconstruction.");
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class StripedFileTestUtil method waitBlockGroupsReported.
/**
* Wait for all the internalBlocks of the blockGroups of the given file to be
* reported.
*/
public static void waitBlockGroupsReported(DistributedFileSystem fs, String src, int numDeadDNs) throws Exception {
boolean success;
final int ATTEMPTS = 40;
int count = 0;
final ErasureCodingPolicy ecPolicy = fs.getErasureCodingPolicy(new Path(src));
do {
success = true;
count++;
LocatedBlocks lbs = fs.getClient().getLocatedBlocks(src, 0);
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
short expected = (short) (getRealTotalBlockNum((int) lb.getBlockSize(), ecPolicy) - numDeadDNs);
int reported = lb.getLocations().length;
if (reported < expected) {
success = false;
LOG.info("blockGroup " + lb.getBlock() + " of file " + src + " has reported internalBlocks " + reported + " (desired " + expected + "); locations " + Joiner.on(' ').join(lb.getLocations()));
Thread.sleep(1000);
break;
}
}
if (success) {
LOG.info("All blockGroups of file " + src + " verified to have all internalBlocks.");
}
} while (!success && count < ATTEMPTS);
if (count == ATTEMPTS) {
throw new TimeoutException("Timed out waiting for " + src + " to have all the internalBlocks");
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestAbandonBlock method testAbandonBlock.
@Test
public /** Abandon a block while creating a file */
void testAbandonBlock() throws IOException {
String src = FILE_NAME_PREFIX + "foo";
// Start writing a file but do not close it
FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short) 1, 512L);
for (int i = 0; i < 1024; i++) {
fout.write(123);
}
fout.hflush();
long fileId = ((DFSOutputStream) fout.getWrappedStream()).getFileId();
// Now abandon the last block
DFSClient dfsclient = DFSClientAdapter.getDFSClient(fs);
LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
int orginalNumBlocks = blocks.locatedBlockCount();
LocatedBlock b = blocks.getLastLocatedBlock();
dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src, dfsclient.clientName);
// call abandonBlock again to make sure the operation is idempotent
dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src, dfsclient.clientName);
// And close the file
fout.close();
// Close cluster and check the block has been abandoned after restart
cluster.restartNameNode();
blocks = dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
Assert.assertEquals("Blocks " + b + " has not been abandoned.", orginalNumBlocks, blocks.locatedBlockCount() + 1);
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestClientReportBadBlock method testCorruptTwoOutOfThreeReplicas.
/**
* This test creates a file with three block replicas. Corrupt two of the
* replicas. Make dfs client read the file. The corrupted blocks with their
* owner data nodes should be reported to the name node.
*/
@Test
public void testCorruptTwoOutOfThreeReplicas() throws Exception {
final short repl = 3;
final int corruptBlocReplicas = 2;
for (int i = 0; i < 2; i++) {
String fileName = "/tmp/testClientReportBadBlock/CorruptTwoOutOfThreeReplicas" + i;
Path filePath = new Path(fileName);
createAFileWithCorruptedBlockReplicas(filePath, repl, corruptBlocReplicas);
int replicaCount = 0;
/*
* The order of data nodes in LocatedBlock returned by name node is sorted
* by NetworkToplology#pseudoSortByDistance. In current MiniDFSCluster,
* when LocatedBlock is returned, the sorting is based on a random order.
* That is to say, the DFS client and simulated data nodes in mini DFS
* cluster are considered not on the same host nor the same rack.
* Therefore, even we corrupted the first two block replicas based in
* order. When DFSClient read some block replicas, it is not guaranteed
* which block replicas (good/bad) will be returned first. So we try to
* re-read the file until we know the expected replicas numbers is
* returned.
*/
while (replicaCount != repl - corruptBlocReplicas) {
if (i == 0) {
dfsClientReadFile(filePath);
} else {
dfsClientReadFileFromPosition(filePath);
}
LocatedBlocks blocks = dfs.dfs.getNamenode().getBlockLocations(filePath.toString(), 0, Long.MAX_VALUE);
replicaCount = blocks.get(0).getLocations().length;
}
verifyFirstBlockCorrupted(filePath, false);
int expectedReplicaCount = repl - corruptBlocReplicas;
verifyCorruptedBlockCount(filePath, expectedReplicaCount);
verifyFsckHealth("Target Replicas is 3 but found 1 live replica");
testFsckListCorruptFilesBlocks(filePath, 0);
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.
the class TestDFSStripedInputStream method testPread.
@Test
public void testPread() throws Exception {
final int numBlocks = 2;
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, stripesPerBlock, false, ecPolicy);
LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(filePath.toString(), 0, blockGroupSize * numBlocks);
int fileLen = blockGroupSize * numBlocks;
byte[] expected = new byte[fileLen];
assertEquals(numBlocks, lbs.getLocatedBlocks().size());
for (int bgIdx = 0; bgIdx < numBlocks; bgIdx++) {
LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(bgIdx));
for (int i = 0; i < dataBlocks; i++) {
Block blk = new Block(bg.getBlock().getBlockId() + i, stripesPerBlock * cellSize, bg.getBlock().getGenerationStamp());
blk.setGenerationStamp(bg.getBlock().getGenerationStamp());
cluster.injectBlocks(i, Arrays.asList(blk), bg.getBlock().getBlockPoolId());
}
/**
* A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks
*/
for (int i = 0; i < stripesPerBlock; i++) {
for (int j = 0; j < dataBlocks; j++) {
for (int k = 0; k < cellSize; k++) {
int posInBlk = i * cellSize + k;
int posInFile = i * cellSize * dataBlocks + j * cellSize + k;
expected[bgIdx * blockGroupSize + posInFile] = SimulatedFSDataset.simulatedByte(new Block(bg.getBlock().getBlockId() + j), posInBlk);
}
}
}
}
DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(), filePath.toString(), false, ecPolicy, null);
int[] startOffsets = { 0, 1, cellSize - 102, cellSize, cellSize + 102, cellSize * dataBlocks, cellSize * dataBlocks + 102, blockGroupSize - 102, blockGroupSize, blockGroupSize + 102, fileLen - 1 };
for (int startOffset : startOffsets) {
startOffset = Math.max(0, Math.min(startOffset, fileLen - 1));
int remaining = fileLen - startOffset;
byte[] buf = new byte[fileLen];
int ret = in.read(startOffset, buf, 0, fileLen);
assertEquals(remaining, ret);
for (int i = 0; i < remaining; i++) {
Assert.assertEquals("Byte at " + (startOffset + i) + " should be the " + "same", expected[startOffset + i], buf[i]);
}
}
in.close();
}
Aggregations