use of org.apache.hadoop.hdfs.server.datanode.Replica in project hadoop by apache.
the class TestBlockListAsLongs method testFuzz.
@Test
public void testFuzz() throws InterruptedException {
Replica[] replicas = new Replica[100000];
Random rand = new Random(0);
for (int i = 0; i < replicas.length; i++) {
Block b = new Block(rand.nextLong(), i, i << 4);
switch(rand.nextInt(2)) {
case 0:
replicas[i] = new FinalizedReplica(b, null, null);
break;
case 1:
replicas[i] = new ReplicaBeingWritten(b, null, null, null);
break;
case 2:
replicas[i] = new ReplicaWaitingToBeRecovered(b, null, null);
break;
}
}
checkReport(replicas);
}
use of org.apache.hadoop.hdfs.server.datanode.Replica in project hadoop by apache.
the class TestBlockListAsLongs method checkReport.
private BlockListAsLongs checkReport(Replica... replicas) {
Map<Long, Replica> expectedReplicas = new HashMap<>();
for (Replica replica : replicas) {
expectedReplicas.put(replica.getBlockId(), replica);
}
expectedReplicas = Collections.unmodifiableMap(expectedReplicas);
// encode the blocks and extract the buffers
BlockListAsLongs blocks = BlockListAsLongs.encode(expectedReplicas.values());
List<ByteString> buffers = blocks.getBlocksBuffers();
// convert to old-style list of longs
List<Long> longs = new ArrayList<Long>();
for (long value : blocks.getBlockListAsLongs()) {
longs.add(value);
}
// decode the buffers and verify its contents
BlockListAsLongs decodedBlocks = BlockListAsLongs.decodeBuffers(expectedReplicas.size(), buffers);
checkReplicas(expectedReplicas, decodedBlocks);
// decode the long and verify its contents
BlockListAsLongs decodedList = BlockListAsLongs.decodeLongs(longs);
checkReplicas(expectedReplicas, decodedList);
return blocks;
}
use of org.apache.hadoop.hdfs.server.datanode.Replica in project hadoop by apache.
the class TestInterDatanodeProtocol method testUpdateReplicaUnderRecovery.
/**
* Test for
* {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)}
* */
@Test
public void testUpdateReplicaUnderRecovery() throws IOException {
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
//create a file
DistributedFileSystem dfs = cluster.getFileSystem();
String filestr = "/foo";
Path filepath = new Path(filestr);
DFSTestUtil.createFile(dfs, filepath, 1024L, (short) 3, 0L);
//get block info
final LocatedBlock locatedblock = getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
Assert.assertTrue(datanodeinfo.length > 0);
//get DataNode and FSDataset objects
final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
Assert.assertTrue(datanode != null);
//initReplicaRecovery
final ExtendedBlock b = locatedblock.getBlock();
final long recoveryid = b.getGenerationStamp() + 1;
final long newlength = b.getNumBytes() - 1;
final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode);
final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(new RecoveringBlock(b, null, recoveryid));
//check replica
final Replica replica = cluster.getFsDatasetTestUtils(datanode).fetchReplica(b);
Assert.assertEquals(ReplicaState.RUR, replica.getState());
//check meta data before update
cluster.getFsDatasetTestUtils(datanode).checkStoredReplica(replica);
//case "THIS IS NOT SUPPOSED TO HAPPEN"
//with (block length) != (stored replica's on disk length).
{
//create a block with same id and gs but different length.
final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri.getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp());
try {
//update should fail
fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, tmp.getBlockId(), newlength);
Assert.fail();
} catch (IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
//update
final Replica r = fsdataset.updateReplicaUnderRecovery(new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, rri.getBlockId(), newlength);
assertTrue(r != null);
assertTrue(r.getStorageUuid() != null);
} finally {
if (cluster != null)
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.datanode.Replica in project hadoop by apache.
the class TestPipelines method pipeline_01.
/**
* Creates and closes a file of certain length.
* Calls append to allow next write() operation to add to the end of it
* After write() invocation, calls hflush() to make sure that data sunk through
* the pipeline and check the state of the last block's replica.
* It supposes to be in RBW state
*
* @throws IOException in case of an error
*/
@Test
public void pipeline_01() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
if (LOG.isDebugEnabled()) {
LOG.debug("Running " + METHOD_NAME);
}
Path filePath = new Path("/" + METHOD_NAME + ".dat");
DFSTestUtil.createFile(fs, filePath, FILE_SIZE, REPL_FACTOR, rand.nextLong());
if (LOG.isDebugEnabled()) {
LOG.debug("Invoking append but doing nothing otherwise...");
}
FSDataOutputStream ofs = fs.append(filePath);
ofs.writeBytes("Some more stuff to write");
((DFSOutputStream) ofs.getWrappedStream()).hflush();
List<LocatedBlock> lb = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), FILE_SIZE - 1, FILE_SIZE).getLocatedBlocks();
for (DataNode dn : cluster.getDataNodes()) {
Replica r = cluster.getFsDatasetTestUtils(dn).fetchReplica(lb.get(0).getBlock());
assertTrue("Replica on DN " + dn + " shouldn't be null", r != null);
assertEquals("Should be RBW replica on " + dn + " after sequence of calls append()/write()/hflush()", HdfsServerConstants.ReplicaState.RBW, r.getState());
}
ofs.close();
}
Aggregations