use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi in project hadoop by apache.
the class DataNode method initStorage.
/**
* Initializes the {@link #data}. The initialization is done only once, when
* handshake with the the first namenode is completed.
*/
private void initStorage(final NamespaceInfo nsInfo) throws IOException {
final FsDatasetSpi.Factory<? extends FsDatasetSpi<?>> factory = FsDatasetSpi.Factory.getFactory(getConf());
if (!factory.isSimulated()) {
final StartupOption startOpt = getStartupOption(getConf());
if (startOpt == null) {
throw new IOException("Startup option not set.");
}
final String bpid = nsInfo.getBlockPoolID();
//read storage info, lock data dirs and transition fs state if necessary
synchronized (this) {
storage.recoverTransitionRead(this, nsInfo, dataDirs, startOpt);
}
final StorageInfo bpStorage = storage.getBPStorage(bpid);
LOG.info("Setting up storage: nsid=" + bpStorage.getNamespaceID() + ";bpid=" + bpid + ";lv=" + storage.getLayoutVersion() + ";nsInfo=" + nsInfo + ";dnuuid=" + storage.getDatanodeUuid());
}
// If this is a newly formatted DataNode then assign a new DatanodeUuid.
checkDatanodeUuid();
synchronized (this) {
if (data == null) {
data = factory.newInstance(this, storage, getConf());
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi in project hadoop by apache.
the class LazyPersistTestCase method verifyDeletedBlocks.
protected final boolean verifyDeletedBlocks(LocatedBlocks locatedBlocks) throws IOException, InterruptedException {
LOG.info("Verifying replica has no saved copy after deletion.");
triggerBlockReport();
while (cluster.getFsDatasetTestUtils(0).getPendingAsyncDeletions() > 0L) {
Thread.sleep(1000);
}
final String bpid = cluster.getNamesystem().getBlockPoolId();
final FsDatasetSpi<?> dataset = cluster.getDataNodes().get(0).getFSDataset();
// transient volume or finalized dir of non-transient volume
try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
for (FsVolumeSpi vol : volumes) {
FsVolumeImpl volume = (FsVolumeImpl) vol;
File targetDir = (volume.isTransientStorage()) ? volume.getBlockPoolSlice(bpid).getFinalizedDir() : volume.getBlockPoolSlice(bpid).getLazypersistDir();
if (verifyBlockDeletedFromDir(targetDir, locatedBlocks) == false) {
return false;
}
}
}
return true;
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi in project hadoop by apache.
the class TestTriggerBlockReport method testTriggerBlockReport.
private void testTriggerBlockReport(boolean incremental) throws Exception {
Configuration conf = new HdfsConfiguration();
// Set a really long value for dfs.blockreport.intervalMsec and
// dfs.heartbeat.interval, so that incremental block reports and heartbeats
// won't be sent during this test unless they're triggered
// manually.
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10800000L);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1080L);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
DatanodeProtocolClientSideTranslatorPB spy = InternalDataNodeTestUtils.spyOnBposToNN(cluster.getDataNodes().get(0), cluster.getNameNode());
DFSTestUtil.createFile(fs, new Path("/abc"), 16, (short) 1, 1L);
// We should get 1 incremental block report.
Mockito.verify(spy, timeout(60000).times(1)).blockReceivedAndDeleted(any(DatanodeRegistration.class), anyString(), any(StorageReceivedDeletedBlocks[].class));
// since the interval we configured is so long.
for (int i = 0; i < 3; i++) {
Thread.sleep(10);
Mockito.verify(spy, times(0)).blockReport(any(DatanodeRegistration.class), anyString(), any(StorageBlockReport[].class), Mockito.<BlockReportContext>anyObject());
Mockito.verify(spy, times(1)).blockReceivedAndDeleted(any(DatanodeRegistration.class), anyString(), any(StorageReceivedDeletedBlocks[].class));
}
// Create a fake block deletion notification on the DataNode.
// This will be sent with the next incremental block report.
ReceivedDeletedBlockInfo rdbi = new ReceivedDeletedBlockInfo(new Block(5678, 512, 1000), BlockStatus.DELETED_BLOCK, null);
DataNode datanode = cluster.getDataNodes().get(0);
BPServiceActor actor = datanode.getAllBpOs().get(0).getBPServiceActors().get(0);
final FsDatasetSpi<?> dataset = datanode.getFSDataset();
final DatanodeStorage storage;
try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
storage = dataset.getStorage(volumes.get(0).getStorageID());
}
actor.getIbrManager().addRDBI(rdbi, storage);
// Manually trigger a block report.
datanode.triggerBlockReport(new BlockReportOptions.Factory().setIncremental(incremental).build());
// actually sent. Wait for it to be sent here.
if (incremental) {
Mockito.verify(spy, timeout(60000).times(2)).blockReceivedAndDeleted(any(DatanodeRegistration.class), anyString(), any(StorageReceivedDeletedBlocks[].class));
} else {
Mockito.verify(spy, timeout(60000)).blockReport(any(DatanodeRegistration.class), anyString(), any(StorageBlockReport[].class), Mockito.<BlockReportContext>anyObject());
}
cluster.shutdown();
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi in project hadoop by apache.
the class TestFsDatasetImpl method testDeletingBlocks.
@Test
public void testDeletingBlocks() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetSpi<?> ds = DataNodeTestUtils.getFSDataset(dn);
ds.addBlockPool(BLOCKPOOL, conf);
FsVolumeImpl vol;
try (FsDatasetSpi.FsVolumeReferences volumes = ds.getFsVolumeReferences()) {
vol = (FsVolumeImpl) volumes.get(0);
}
ExtendedBlock eb;
ReplicaInfo info;
List<Block> blockList = new ArrayList<>();
for (int i = 1; i <= 63; i++) {
eb = new ExtendedBlock(BLOCKPOOL, i, 1, 1000 + i);
cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
blockList.add(eb.getLocalBlock());
}
ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// Nothing to do
}
assertTrue(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
blockList.clear();
eb = new ExtendedBlock(BLOCKPOOL, 64, 1, 1064);
cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
blockList.add(eb.getLocalBlock());
ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// Nothing to do
}
assertFalse(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi in project hadoop by apache.
the class TestDataNodeHotSwapVolumes method testAddVolumeFailures.
@Test
public void testAddVolumeFailures() throws IOException {
startDFSCluster(1, 1);
final String dataDir = cluster.getDataDirectory();
DataNode dn = cluster.getDataNodes().get(0);
List<String> newDirs = Lists.newArrayList();
final int NUM_NEW_DIRS = 4;
for (int i = 0; i < NUM_NEW_DIRS; i++) {
File newVolume = new File(dataDir, "new_vol" + i);
newDirs.add(newVolume.toString());
if (i % 2 == 0) {
// Make addVolume() fail.
newVolume.createNewFile();
}
}
String newValue = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY) + "," + Joiner.on(",").join(newDirs);
try {
dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, newValue);
fail("Expect to throw IOException.");
} catch (ReconfigurationException e) {
String errorMessage = e.getCause().getMessage();
String[] messages = errorMessage.split("\\r?\\n");
assertEquals(2, messages.length);
assertThat(messages[0], containsString("new_vol0"));
assertThat(messages[1], containsString("new_vol2"));
}
// Make sure that vol0 and vol2's metadata are not left in memory.
FsDatasetSpi<?> dataset = dn.getFSDataset();
try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
for (FsVolumeSpi volume : volumes) {
assertThat(new File(volume.getStorageLocation().getUri()).toString(), is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
}
}
DataStorage storage = dn.getStorage();
for (int i = 0; i < storage.getNumStorageDirs(); i++) {
Storage.StorageDirectory sd = storage.getStorageDir(i);
assertThat(sd.getRoot().toString(), is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
}
// The newly effective conf does not have vol0 and vol2.
String[] effectiveVolumes = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY).split(",");
assertEquals(4, effectiveVolumes.length);
for (String ev : effectiveVolumes) {
assertThat(new File(StorageLocation.parse(ev).getUri()).getCanonicalPath(), is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
}
}
Aggregations