Search in sources :

Example 16 with NamespaceInfo

use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.

the class TestDataStorage method testAddStorageDirectories.

@Test
public void testAddStorageDirectories() throws IOException, URISyntaxException {
    final int numLocations = 3;
    final int numNamespace = 3;
    List<StorageLocation> locations = createStorageLocations(numLocations);
    // Add volumes for multiple namespaces.
    List<NamespaceInfo> namespaceInfos = createNamespaceInfos(numNamespace);
    for (NamespaceInfo ni : namespaceInfos) {
        storage.addStorageLocations(mockDN, ni, locations, START_OPT);
        for (StorageLocation sl : locations) {
            checkDir(new File(sl.getUri()));
            checkDir(new File(sl.getUri()), ni.getBlockPoolID());
        }
    }
    assertEquals(numLocations, storage.getNumStorageDirs());
    locations = createStorageLocations(numLocations);
    List<StorageDirectory> addedLocation = storage.addStorageLocations(mockDN, namespaceInfos.get(0), locations, START_OPT);
    assertTrue(addedLocation.isEmpty());
    // The number of active storage dirs has not changed, since it tries to
    // add the storage dirs that are under service.
    assertEquals(numLocations, storage.getNumStorageDirs());
    // Add more directories.
    locations = createStorageLocations(6);
    storage.addStorageLocations(mockDN, nsInfo, locations, START_OPT);
    assertEquals(6, storage.getNumStorageDirs());
}
Also used : StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) File(java.io.File) Test(org.junit.Test)

Example 17 with NamespaceInfo

use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.

the class DataNode method initBlockPool.

/**
   * One of the Block Pools has successfully connected to its NN.
   * This initializes the local storage for that block pool,
   * checks consistency of the NN's cluster ID, etc.
   * 
   * If this is the first block pool to register, this also initializes
   * the datanode-scoped storage.
   * 
   * @param bpos Block pool offer service
   * @throws IOException if the NN is inconsistent with the local storage.
   */
void initBlockPool(BPOfferService bpos) throws IOException {
    NamespaceInfo nsInfo = bpos.getNamespaceInfo();
    if (nsInfo == null) {
        throw new IOException("NamespaceInfo not found: Block pool " + bpos + " should have retrieved namespace info before initBlockPool.");
    }
    setClusterId(nsInfo.clusterID, nsInfo.getBlockPoolID());
    // Register the new block pool with the BP manager.
    blockPoolManager.addBlockPool(bpos);
    // In the case that this is the first block pool to connect, initialize
    // the dataset, block scanners, etc.
    initStorage(nsInfo);
    // Exclude failed disks before initializing the block pools to avoid startup
    // failures.
    checkDiskError();
    data.addBlockPool(nsInfo.getBlockPoolID(), getConf());
    blockScanner.enableBlockPoolId(bpos.getBlockPoolId());
    initDirectoryScanner(getConf());
    initDiskBalancer(data, getConf());
}
Also used : IOException(java.io.IOException) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)

Example 18 with NamespaceInfo

use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.

the class DataNode method refreshVolumes.

/**
   * Attempts to reload data volumes with new configuration.
   * @param newVolumes a comma separated string that specifies the data volumes.
   * @throws IOException on error. If an IOException is thrown, some new volumes
   * may have been successfully added and removed.
   */
private synchronized void refreshVolumes(String newVolumes) throws IOException {
    Configuration conf = getConf();
    conf.set(DFS_DATANODE_DATA_DIR_KEY, newVolumes);
    ExecutorService service = null;
    int numOldDataDirs = dataDirs.size();
    ChangedVolumes changedVolumes = parseChangedVolumes(newVolumes);
    StringBuilder errorMessageBuilder = new StringBuilder();
    List<String> effectiveVolumes = Lists.newArrayList();
    for (StorageLocation sl : changedVolumes.unchangedLocations) {
        effectiveVolumes.add(sl.toString());
    }
    try {
        if (numOldDataDirs + getFSDataset().getNumFailedVolumes() + changedVolumes.newLocations.size() - changedVolumes.deactivateLocations.size() <= 0) {
            throw new IOException("Attempt to remove all volumes.");
        }
        if (!changedVolumes.newLocations.isEmpty()) {
            LOG.info("Adding new volumes: " + Joiner.on(",").join(changedVolumes.newLocations));
            // Add volumes for each Namespace
            final List<NamespaceInfo> nsInfos = Lists.newArrayList();
            for (BPOfferService bpos : blockPoolManager.getAllNamenodeThreads()) {
                nsInfos.add(bpos.getNamespaceInfo());
            }
            service = Executors.newFixedThreadPool(changedVolumes.newLocations.size());
            List<Future<IOException>> exceptions = Lists.newArrayList();
            for (final StorageLocation location : changedVolumes.newLocations) {
                exceptions.add(service.submit(new Callable<IOException>() {

                    @Override
                    public IOException call() {
                        try {
                            data.addVolume(location, nsInfos);
                        } catch (IOException e) {
                            return e;
                        }
                        return null;
                    }
                }));
            }
            for (int i = 0; i < changedVolumes.newLocations.size(); i++) {
                StorageLocation volume = changedVolumes.newLocations.get(i);
                Future<IOException> ioExceptionFuture = exceptions.get(i);
                try {
                    IOException ioe = ioExceptionFuture.get();
                    if (ioe != null) {
                        errorMessageBuilder.append(String.format("FAILED TO ADD: %s: %s%n", volume, ioe.getMessage()));
                        LOG.error("Failed to add volume: " + volume, ioe);
                    } else {
                        effectiveVolumes.add(volume.toString());
                        LOG.info("Successfully added volume: " + volume);
                    }
                } catch (Exception e) {
                    errorMessageBuilder.append(String.format("FAILED to ADD: %s: %s%n", volume, e.toString()));
                    LOG.error("Failed to add volume: " + volume, e);
                }
            }
        }
        try {
            removeVolumes(changedVolumes.deactivateLocations);
        } catch (IOException e) {
            errorMessageBuilder.append(e.getMessage());
            LOG.error("Failed to remove volume: " + e.getMessage(), e);
        }
        if (errorMessageBuilder.length() > 0) {
            throw new IOException(errorMessageBuilder.toString());
        }
    } finally {
        if (service != null) {
            service.shutdown();
        }
        conf.set(DFS_DATANODE_DATA_DIR_KEY, Joiner.on(",").join(effectiveVolumes));
        dataDirs = getStorageLocations(conf);
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) IOException(java.io.IOException) Callable(java.util.concurrent.Callable) DiskErrorException(org.apache.hadoop.util.DiskChecker.DiskErrorException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) ExecutionException(java.util.concurrent.ExecutionException) InvalidChecksumSizeException(org.apache.hadoop.util.InvalidChecksumSizeException) ReconfigurationException(org.apache.hadoop.conf.ReconfigurationException) InvalidBlockTokenException(org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException) DiskBalancerException(org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException) EOFException(java.io.EOFException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)

Example 19 with NamespaceInfo

use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.

the class BPServiceActor method retrieveNamespaceInfo.

/**
   * Perform the first part of the handshake with the NameNode.
   * This calls <code>versionRequest</code> to determine the NN's
   * namespace and version info. It automatically retries until
   * the NN responds or the DN is shutting down.
   * 
   * @return the NamespaceInfo
   */
@VisibleForTesting
NamespaceInfo retrieveNamespaceInfo() throws IOException {
    NamespaceInfo nsInfo = null;
    while (shouldRun()) {
        try {
            nsInfo = bpNamenode.versionRequest();
            LOG.debug(this + " received versionRequest response: " + nsInfo);
            break;
        } catch (SocketTimeoutException e) {
            // namenode is busy
            LOG.warn("Problem connecting to server: " + nnAddr);
        } catch (IOException e) {
            // namenode is not available
            LOG.warn("Problem connecting to server: " + nnAddr);
        }
        // try again in a second
        sleepAndLogInterrupts(5000, "requesting version info from NN");
    }
    if (nsInfo != null) {
        checkNNVersion(nsInfo);
    } else {
        throw new IOException("DN shut down before block pool connected");
    }
    return nsInfo;
}
Also used : SocketTimeoutException(java.net.SocketTimeoutException) IOException(java.io.IOException) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 20 with NamespaceInfo

use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.

the class BPServiceActor method connectToNNAndHandshake.

private void connectToNNAndHandshake() throws IOException {
    // get NN proxy
    bpNamenode = dn.connectToNN(nnAddr);
    // First phase of the handshake with NN - get the namespace
    // info.
    NamespaceInfo nsInfo = retrieveNamespaceInfo();
    // Verify that this matches the other NN in this HA pair.
    // This also initializes our block pool in the DN if we are
    // the first NN connection for this BP.
    bpos.verifyAndSetNamespaceInfo(this, nsInfo);
    // Second phase of the handshake with the NN.
    register(nsInfo);
}
Also used : NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)

Aggregations

NamespaceInfo (org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)35 IOException (java.io.IOException)13 Test (org.junit.Test)13 File (java.io.File)8 InetSocketAddress (java.net.InetSocketAddress)7 Storage (org.apache.hadoop.hdfs.server.common.Storage)6 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)6 ArrayList (java.util.ArrayList)5 DatanodeProtocolClientSideTranslatorPB (org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB)5 DataStorage (org.apache.hadoop.hdfs.server.datanode.DataStorage)5 Configuration (org.apache.hadoop.conf.Configuration)4 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)4 StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)4 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)4 NNHAStatusHeartbeat (org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat)4 SlowPeerReports (org.apache.hadoop.hdfs.server.protocol.SlowPeerReports)4 VolumeFailureSummary (org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary)4 HeartbeatResponse (org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse)3 MultipleIOException (org.apache.hadoop.io.MultipleIOException)3 Matchers.anyString (org.mockito.Matchers.anyString)3