use of org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB in project hadoop by apache.
the class TestDnRespectsBlockReportSplitThreshold method testAlwaysSplit.
/**
* Test that if splitThreshold is zero, then we always get a separate
* call per storage.
*/
@Test(timeout = 300000)
public void testAlwaysSplit() throws IOException, InterruptedException {
startUpCluster(0);
NameNode nn = cluster.getNameNode();
DataNode dn = cluster.getDataNodes().get(0);
// Create a file with a few blocks.
createFile(GenericTestUtils.getMethodName(), BLOCKS_IN_FILE);
// Insert a spy object for the NN RPC.
DatanodeProtocolClientSideTranslatorPB nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn);
// Trigger a block report so there is an interaction with the spy
// object.
DataNodeTestUtils.triggerBlockReport(dn);
ArgumentCaptor<StorageBlockReport[]> captor = ArgumentCaptor.forClass(StorageBlockReport[].class);
Mockito.verify(nnSpy, times(cluster.getStoragesPerDatanode())).blockReport(any(DatanodeRegistration.class), anyString(), captor.capture(), Mockito.<BlockReportContext>anyObject());
verifyCapturedArguments(captor, 1, BLOCKS_IN_FILE);
}
use of org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB in project hadoop by apache.
the class TestStorageReport method testStorageReportHasStorageTypeAndState.
/**
* Ensure that storage type and storage state are propagated
* in Storage Reports.
*/
@Test
public void testStorageReportHasStorageTypeAndState() throws IOException {
// Make sure we are not testing with the default type, that would not
// be a very good test.
assertNotSame(storageType, StorageType.DEFAULT);
NameNode nn = cluster.getNameNode();
DataNode dn = cluster.getDataNodes().get(0);
// Insert a spy object for the NN RPC.
DatanodeProtocolClientSideTranslatorPB nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn);
// Trigger a heartbeat so there is an interaction with the spy
// object.
DataNodeTestUtils.triggerHeartbeat(dn);
// Verify that the callback passed in the expected parameters.
ArgumentCaptor<StorageReport[]> captor = ArgumentCaptor.forClass(StorageReport[].class);
Mockito.verify(nnSpy).sendHeartbeat(any(DatanodeRegistration.class), captor.capture(), anyLong(), anyLong(), anyInt(), anyInt(), anyInt(), Mockito.any(VolumeFailureSummary.class), Mockito.anyBoolean(), Mockito.any(SlowPeerReports.class));
StorageReport[] reports = captor.getValue();
for (StorageReport report : reports) {
assertThat(report.getStorage().getStorageType(), is(storageType));
assertThat(report.getStorage().getState(), is(DatanodeStorage.State.NORMAL));
}
}
use of org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB in project hadoop by apache.
the class TestDeleteRace method testDeleteAndCommitBlockSynchronizationRace.
/**
* Test race between delete operation and commitBlockSynchronization method.
* See HDFS-6825.
* @param hasSnapshot
* @throws Exception
*/
private void testDeleteAndCommitBlockSynchronizationRace(boolean hasSnapshot) throws Exception {
LOG.info("Start testing, hasSnapshot: " + hasSnapshot);
ArrayList<AbstractMap.SimpleImmutableEntry<String, Boolean>> testList = new ArrayList<AbstractMap.SimpleImmutableEntry<String, Boolean>>();
testList.add(new AbstractMap.SimpleImmutableEntry<String, Boolean>("/test-file", false));
testList.add(new AbstractMap.SimpleImmutableEntry<String, Boolean>("/test-file1", true));
testList.add(new AbstractMap.SimpleImmutableEntry<String, Boolean>("/testdir/testdir1/test-file", false));
testList.add(new AbstractMap.SimpleImmutableEntry<String, Boolean>("/testdir/testdir1/test-file1", true));
final Path rootPath = new Path("/");
final Configuration conf = new Configuration();
// Disable permissions so that another user can recover the lease.
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
FSDataOutputStream stm = null;
Map<DataNode, DatanodeProtocolClientSideTranslatorPB> dnMap = new HashMap<DataNode, DatanodeProtocolClientSideTranslatorPB>();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
int stId = 0;
for (AbstractMap.SimpleImmutableEntry<String, Boolean> stest : testList) {
String testPath = stest.getKey();
Boolean mkSameDir = stest.getValue();
LOG.info("test on " + testPath + " mkSameDir: " + mkSameDir + " snapshot: " + hasSnapshot);
Path fPath = new Path(testPath);
//find grandest non-root parent
Path grandestNonRootParent = fPath;
while (!grandestNonRootParent.getParent().equals(rootPath)) {
grandestNonRootParent = grandestNonRootParent.getParent();
}
stm = fs.create(fPath);
LOG.info("test on " + testPath + " created " + fPath);
// write a half block
AppendTestUtil.write(stm, 0, BLOCK_SIZE / 2);
stm.hflush();
if (hasSnapshot) {
SnapshotTestHelper.createSnapshot(fs, rootPath, "st" + String.valueOf(stId));
++stId;
}
// Look into the block manager on the active node for the block
// under construction.
NameNode nn = cluster.getNameNode();
ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, fPath);
DatanodeDescriptor expectedPrimary = DFSTestUtil.getExpectedPrimaryNode(nn, blk);
LOG.info("Expecting block recovery to be triggered on DN " + expectedPrimary);
// Find the corresponding DN daemon, and spy on its connection to the
// active.
DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort());
DatanodeProtocolClientSideTranslatorPB nnSpy = dnMap.get(primaryDN);
if (nnSpy == null) {
nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(primaryDN, nn);
dnMap.put(primaryDN, nnSpy);
}
// Delay the commitBlockSynchronization call
DelayAnswer delayer = new DelayAnswer(LOG);
Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization(Mockito.eq(blk), // new genstamp
Mockito.anyInt(), // new length
Mockito.anyLong(), // close file
Mockito.eq(true), // delete block
Mockito.eq(false), // new targets
(DatanodeID[]) Mockito.anyObject(), // new target storages
(String[]) Mockito.anyObject());
fs.recoverLease(fPath);
LOG.info("Waiting for commitBlockSynchronization call from primary");
delayer.waitForCall();
LOG.info("Deleting recursively " + grandestNonRootParent);
fs.delete(grandestNonRootParent, true);
if (mkSameDir && !grandestNonRootParent.toString().equals(testPath)) {
LOG.info("Recreate dir " + grandestNonRootParent + " testpath: " + testPath);
fs.mkdirs(grandestNonRootParent);
}
delayer.proceed();
LOG.info("Now wait for result");
delayer.waitForResult();
Throwable t = delayer.getThrown();
if (t != null) {
LOG.info("Result exception (snapshot: " + hasSnapshot + "): " + t);
}
}
// end of loop each fPath
LOG.info("Now check we can restart");
cluster.restartNameNodes();
LOG.info("Restart finished");
} finally {
if (stm != null) {
IOUtils.closeStream(stm);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB in project hadoop by apache.
the class TestDiskspaceQuotaUpdate method testQuotaIssuesWhileCommitting.
/**
* Test that the cached quota stays correct between the COMMIT
* and COMPLETE block steps, even if the replication factor is
* changed during this time.
*/
@Test(timeout = 60000)
public void testQuotaIssuesWhileCommitting() throws Exception {
// We want a one-DN cluster so that we can force a lack of
// commit by only instrumenting a single DN; we kill the other 3
List<MiniDFSCluster.DataNodeProperties> dnprops = new ArrayList<>();
try {
for (int i = REPLICATION - 1; i > 0; i--) {
dnprops.add(cluster.stopDataNode(i));
}
DatanodeProtocolClientSideTranslatorPB nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(cluster.getDataNodes().get(0), cluster.getNameNode());
testQuotaIssuesWhileCommittingHelper(nnSpy, (short) 1, (short) 4);
testQuotaIssuesWhileCommittingHelper(nnSpy, (short) 4, (short) 1);
// Don't actually change replication; just check that the sizes
// agree during the commit period
testQuotaIssuesWhileCommittingHelper(nnSpy, (short) 1, (short) 1);
} finally {
for (MiniDFSCluster.DataNodeProperties dnprop : dnprops) {
cluster.restartDataNode(dnprop);
}
cluster.waitActive();
}
}
use of org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB in project hadoop by apache.
the class BlockRecoveryWorker method getActiveNamenodeForBP.
/**
* Get the NameNode corresponding to the given block pool.
*
* @param bpid Block pool Id
* @return Namenode corresponding to the bpid
* @throws IOException if unable to get the corresponding NameNode
*/
DatanodeProtocolClientSideTranslatorPB getActiveNamenodeForBP(String bpid) throws IOException {
BPOfferService bpos = datanode.getBPOfferService(bpid);
if (bpos == null) {
throw new IOException("No block pool offer service for bpid=" + bpid);
}
DatanodeProtocolClientSideTranslatorPB activeNN = bpos.getActiveNN();
if (activeNN == null) {
throw new IOException("Block pool " + bpid + " has not recognized an active NN");
}
return activeNN;
}
Aggregations