use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class TestDataStorage method testAddStorageDirectories.
@Test
public void testAddStorageDirectories() throws IOException, URISyntaxException {
final int numLocations = 3;
final int numNamespace = 3;
List<StorageLocation> locations = createStorageLocations(numLocations);
// Add volumes for multiple namespaces.
List<NamespaceInfo> namespaceInfos = createNamespaceInfos(numNamespace);
for (NamespaceInfo ni : namespaceInfos) {
storage.addStorageLocations(mockDN, ni, locations, START_OPT);
for (StorageLocation sl : locations) {
checkDir(new File(sl.getUri()));
checkDir(new File(sl.getUri()), ni.getBlockPoolID());
}
}
assertEquals(numLocations, storage.getNumStorageDirs());
locations = createStorageLocations(numLocations);
List<StorageDirectory> addedLocation = storage.addStorageLocations(mockDN, namespaceInfos.get(0), locations, START_OPT);
assertTrue(addedLocation.isEmpty());
// The number of active storage dirs has not changed, since it tries to
// add the storage dirs that are under service.
assertEquals(numLocations, storage.getNumStorageDirs());
// Add more directories.
locations = createStorageLocations(6);
storage.addStorageLocations(mockDN, nsInfo, locations, START_OPT);
assertEquals(6, storage.getNumStorageDirs());
}
use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class DataNode method initBlockPool.
/**
* One of the Block Pools has successfully connected to its NN.
* This initializes the local storage for that block pool,
* checks consistency of the NN's cluster ID, etc.
*
* If this is the first block pool to register, this also initializes
* the datanode-scoped storage.
*
* @param bpos Block pool offer service
* @throws IOException if the NN is inconsistent with the local storage.
*/
void initBlockPool(BPOfferService bpos) throws IOException {
NamespaceInfo nsInfo = bpos.getNamespaceInfo();
if (nsInfo == null) {
throw new IOException("NamespaceInfo not found: Block pool " + bpos + " should have retrieved namespace info before initBlockPool.");
}
setClusterId(nsInfo.clusterID, nsInfo.getBlockPoolID());
// Register the new block pool with the BP manager.
blockPoolManager.addBlockPool(bpos);
// In the case that this is the first block pool to connect, initialize
// the dataset, block scanners, etc.
initStorage(nsInfo);
// Exclude failed disks before initializing the block pools to avoid startup
// failures.
checkDiskError();
data.addBlockPool(nsInfo.getBlockPoolID(), getConf());
blockScanner.enableBlockPoolId(bpos.getBlockPoolId());
initDirectoryScanner(getConf());
initDiskBalancer(data, getConf());
}
use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class DataNode method refreshVolumes.
/**
* Attempts to reload data volumes with new configuration.
* @param newVolumes a comma separated string that specifies the data volumes.
* @throws IOException on error. If an IOException is thrown, some new volumes
* may have been successfully added and removed.
*/
private synchronized void refreshVolumes(String newVolumes) throws IOException {
Configuration conf = getConf();
conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes);
ExecutorService service = null;
int numOldDataDirs = dataDirs.size();
ChangedVolumes changedVolumes = parseChangedVolumes(newVolumes);
StringBuilder errorMessageBuilder = new StringBuilder();
List<String> effectiveVolumes = Lists.newArrayList();
for (StorageLocation sl : changedVolumes.unchangedLocations) {
effectiveVolumes.add(sl.toString());
}
try {
if (numOldDataDirs + getFSDataset().getNumFailedVolumes() + changedVolumes.newLocations.size() - changedVolumes.deactivateLocations.size() <= 0) {
throw new IOException("Attempt to remove all volumes.");
}
if (!changedVolumes.newLocations.isEmpty()) {
LOG.info("Adding new volumes: " + Joiner.on(",").join(changedVolumes.newLocations));
// Add volumes for each Namespace
final List<NamespaceInfo> nsInfos = Lists.newArrayList();
for (BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) {
nsInfos.add(bpos.getNamespaceInfo());
}
service = Executors.newFixedThreadPool(changedVolumes.newLocations.size());
List<Future<IOException>> exceptions = Lists.newArrayList();
for (final StorageLocation location : changedVolumes.newLocations) {
exceptions.add(service.submit(new Callable<IOException>() {
@Override
public IOException call() {
try {
data.addVolume(location, nsInfos);
} catch (IOException e) {
return e;
}
return null;
}
}));
}
for (int i = 0; i < changedVolumes.newLocations.size(); i++) {
StorageLocation volume = changedVolumes.newLocations.get(i);
Future<IOException> ioExceptionFuture = exceptions.get(i);
try {
IOException ioe = ioExceptionFuture.get();
if (ioe != null) {
errorMessageBuilder.append(String.format("FAILED TO ADD: %s: %s%n", volume, ioe.getMessage()));
LOG.error("Failed to add volume: " + volume, ioe);
} else {
effectiveVolumes.add(volume.toString());
LOG.info("Successfully added volume: " + volume);
}
} catch (Exception e) {
errorMessageBuilder.append(String.format("FAILED to ADD: %s: %s%n", volume, e.toString()));
LOG.error("Failed to add volume: " + volume, e);
}
}
}
try {
removeVolumes(changedVolumes.deactivateLocations);
} catch (IOException e) {
errorMessageBuilder.append(e.getMessage());
LOG.error("Failed to remove volume: " + e.getMessage(), e);
}
if (errorMessageBuilder.length() > 0) {
throw new IOException(errorMessageBuilder.toString());
}
} finally {
if (service != null) {
service.shutdown();
}
conf.set(DFS_DATANODE_DATA_DIR_KEY, Joiner.on(",").join(effectiveVolumes));
dataDirs = getStorageLocations(conf);
}
}
use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class BPServiceActor method retrieveNamespaceInfo.
/**
* Perform the first part of the handshake with the NameNode.
* This calls <code>versionRequest</code> to determine the NN's
* namespace and version info. It automatically retries until
* the NN responds or the DN is shutting down.
*
* @return the NamespaceInfo
*/
@VisibleForTesting
NamespaceInfo retrieveNamespaceInfo() throws IOException {
NamespaceInfo nsInfo = null;
while (shouldRun()) {
try {
nsInfo = bpNamenode.versionRequest();
LOG.debug(this + " received versionRequest response: " + nsInfo);
break;
} catch (SocketTimeoutException e) {
// namenode is busy
LOG.warn("Problem connecting to server: " + nnAddr);
} catch (IOException e) {
// namenode is not available
LOG.warn("Problem connecting to server: " + nnAddr);
}
// try again in a second
sleepAndLogInterrupts(5000, "requesting version info from NN");
}
if (nsInfo != null) {
checkNNVersion(nsInfo);
} else {
throw new IOException("DN shut down before block pool connected");
}
return nsInfo;
}
use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.
the class BPServiceActor method connectToNNAndHandshake.
private void connectToNNAndHandshake() throws IOException {
// get NN proxy
bpNamenode = dn.connectToNN(nnAddr);
// First phase of the handshake with NN - get the namespace
// info.
NamespaceInfo nsInfo = retrieveNamespaceInfo();
// Verify that this matches the other NN in this HA pair.
// This also initializes our block pool in the DN if we are
// the first NN connection for this BP.
bpos.verifyAndSetNamespaceInfo(this, nsInfo);
// Second phase of the handshake with the NN.
register(nsInfo);
}
Aggregations